< prev index next >

src/hotspot/share/opto/graphKit.cpp

Print this page
*** 21,10 ***
--- 21,12 ---
   * questions.
   *
   */
  
  #include "precompiled.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
+ #include "ci/ciInlineKlass.hpp"
  #include "ci/ciUtilities.hpp"
  #include "classfile/javaClasses.hpp"
  #include "ci/ciNativeEntryPoint.hpp"
  #include "ci/ciObjArray.hpp"
  #include "asm/register.hpp"

*** 36,13 ***
--- 38,15 ---
  #include "opto/addnode.hpp"
  #include "opto/castnode.hpp"
  #include "opto/convertnode.hpp"
  #include "opto/graphKit.hpp"
  #include "opto/idealKit.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/intrinsicnode.hpp"
  #include "opto/locknode.hpp"
  #include "opto/machnode.hpp"
+ #include "opto/narrowptrnode.hpp"
  #include "opto/opaquenode.hpp"
  #include "opto/parse.hpp"
  #include "opto/rootnode.hpp"
  #include "opto/runtime.hpp"
  #include "opto/subtypenode.hpp"

*** 52,19 ***
  #include "utilities/powerOfTwo.hpp"
  #include "utilities/growableArray.hpp"
  
  //----------------------------GraphKit-----------------------------------------
  // Main utility constructor.
! GraphKit::GraphKit(JVMState* jvms)
    : Phase(Phase::Parser),
      _env(C->env()),
!     _gvn(*C->initial_gvn()),
      _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  {
    _exceptions = jvms->map()->next_exception();
    if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
    set_jvms(jvms);
  }
  
  // Private constructor for parser.
  GraphKit::GraphKit()
    : Phase(Phase::Parser),
--- 56,27 ---
  #include "utilities/powerOfTwo.hpp"
  #include "utilities/growableArray.hpp"
  
  //----------------------------GraphKit-----------------------------------------
  // Main utility constructor.
! GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
    : Phase(Phase::Parser),
      _env(C->env()),
!     _gvn((gvn != NULL) ? *gvn : *C->initial_gvn()),
      _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  {
+   assert(gvn == NULL || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
    _exceptions = jvms->map()->next_exception();
    if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
    set_jvms(jvms);
+ #ifdef ASSERT
+   if (_gvn.is_IterGVN() != NULL) {
+     assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
+     // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
+     _worklist_size = _gvn.C->for_igvn()->size();
+   }
+ #endif
  }
  
  // Private constructor for parser.
  GraphKit::GraphKit()
    : Phase(Phase::Parser),

*** 828,11 ***
    ciMethod* cur_method = jvms->method();
    int       cur_bci   = jvms->bci();
    if (cur_method != NULL && cur_bci != InvocationEntryBci) {
      Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
      return Interpreter::bytecode_should_reexecute(code) ||
!            (is_anewarray && code == Bytecodes::_multianewarray);
      // Reexecute _multianewarray bytecode which was replaced with
      // sequence of [a]newarray. See Parse::do_multianewarray().
      //
      // Note: interpreter should not have it set since this optimization
      // is limited by dimensions and guarded by flag so in some cases
--- 840,11 ---
    ciMethod* cur_method = jvms->method();
    int       cur_bci   = jvms->bci();
    if (cur_method != NULL && cur_bci != InvocationEntryBci) {
      Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
      return Interpreter::bytecode_should_reexecute(code) ||
!            (is_anewarray && (code == Bytecodes::_multianewarray));
      // Reexecute _multianewarray bytecode which was replaced with
      // sequence of [a]newarray. See Parse::do_multianewarray().
      //
      // Note: interpreter should not have it set since this optimization
      // is limited by dimensions and guarded by flag so in some cases

*** 1088,10 ***
--- 1100,19 ---
        assert(rsize == 1, "");
        depth = rsize - inputs;
      }
      break;
  
+   case Bytecodes::_withfield: {
+     bool ignored_will_link;
+     ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
+     int      size  = field->type()->size();
+     inputs = size+1;
+     depth = rsize - inputs;
+     break;
+   }
+ 
    case Bytecodes::_ireturn:
    case Bytecodes::_lreturn:
    case Bytecodes::_freturn:
    case Bytecodes::_dreturn:
    case Bytecodes::_areturn:

*** 1170,11 ***
  Node* GraphKit::load_object_klass(Node* obj) {
    // Special-case a fresh allocation to avoid building nodes:
    Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
    if (akls != NULL)  return akls;
    Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
!   return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
  }
  
  //-------------------------load_array_length-----------------------------------
  Node* GraphKit::load_array_length(Node* array) {
    // Special-case a fresh allocation to avoid building nodes:
--- 1191,11 ---
  Node* GraphKit::load_object_klass(Node* obj) {
    // Special-case a fresh allocation to avoid building nodes:
    Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
    if (akls != NULL)  return akls;
    Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
!   return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
  }
  
  //-------------------------load_array_length-----------------------------------
  Node* GraphKit::load_array_length(Node* array) {
    // Special-case a fresh allocation to avoid building nodes:

*** 1219,20 ***
  #endif
  Node* GraphKit::null_check_common(Node* value, BasicType type,
                                    // optional arguments for variations:
                                    bool assert_null,
                                    Node* *null_control,
!                                   bool speculative) {
    assert(!assert_null || null_control == NULL, "not both at once");
    if (stopped())  return top();
    NOT_PRODUCT(explicit_null_checks_inserted++);
  
    // Construct NULL check
    Node *chk = NULL;
    switch(type) {
      case T_LONG   : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
      case T_INT    : chk = new CmpINode(value, _gvn.intcon(0)); break;
      case T_ARRAY  : // fall through
        type = T_OBJECT;  // simplify further tests
      case T_OBJECT : {
        const Type *t = _gvn.type( value );
  
--- 1240,37 ---
  #endif
  Node* GraphKit::null_check_common(Node* value, BasicType type,
                                    // optional arguments for variations:
                                    bool assert_null,
                                    Node* *null_control,
!                                   bool speculative,
+                                   bool is_init_check) {
    assert(!assert_null || null_control == NULL, "not both at once");
    if (stopped())  return top();
    NOT_PRODUCT(explicit_null_checks_inserted++);
  
+   if (value->is_InlineTypePtr()) {
+     // Null checking a scalarized but nullable inline type. Check the is_init
+     // input instead of the oop input to avoid keeping buffer allocations alive.
+     InlineTypePtrNode* vtptr = value->as_InlineTypePtr();
+     while (vtptr->get_oop()->is_InlineTypePtr()) {
+       vtptr = vtptr->get_oop()->as_InlineTypePtr();
+     }
+     null_check_common(vtptr->get_is_init(), T_INT, assert_null, null_control, speculative, true);
+     if (stopped()) {
+       return top();
+     }
+     bool do_replace_in_map = (null_control == NULL || (*null_control) == top());
+     return cast_not_null(value, do_replace_in_map);
+   }
+ 
    // Construct NULL check
    Node *chk = NULL;
    switch(type) {
      case T_LONG   : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
      case T_INT    : chk = new CmpINode(value, _gvn.intcon(0)); break;
+     case T_INLINE_TYPE : // fall through
      case T_ARRAY  : // fall through
        type = T_OBJECT;  // simplify further tests
      case T_OBJECT : {
        const Type *t = _gvn.type( value );
  

*** 1326,11 ***
    // Branch to failure if null
    float ok_prob = PROB_MAX;  // a priori estimate:  nulls never happen
    Deoptimization::DeoptReason reason;
    if (assert_null) {
      reason = Deoptimization::reason_null_assert(speculative);
!   } else if (type == T_OBJECT) {
      reason = Deoptimization::reason_null_check(speculative);
    } else {
      reason = Deoptimization::Reason_div0_check;
    }
    // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
--- 1364,11 ---
    // Branch to failure if null
    float ok_prob = PROB_MAX;  // a priori estimate:  nulls never happen
    Deoptimization::DeoptReason reason;
    if (assert_null) {
      reason = Deoptimization::reason_null_assert(speculative);
!   } else if (type == T_OBJECT || is_init_check) {
      reason = Deoptimization::reason_null_check(speculative);
    } else {
      reason = Deoptimization::Reason_div0_check;
    }
    // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,

*** 1400,14 ***
    }
  
    return value;
  }
  
- 
  //------------------------------cast_not_null----------------------------------
  // Cast obj to not-null on this path
  Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
    const Type *t = _gvn.type(obj);
    const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
    // Object is already not-null?
    if( t == t_not_null ) return obj;
  
--- 1438,32 ---
    }
  
    return value;
  }
  
  //------------------------------cast_not_null----------------------------------
  // Cast obj to not-null on this path
  Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
+   if (obj->is_InlineType()) {
+     return obj;
+   } else if (obj->is_InlineTypePtr()) {
+     // Cast oop input instead
+     Node* cast = cast_not_null(obj->as_InlineTypePtr()->get_oop(), do_replace_in_map);
+     if (cast->is_top()) {
+       // Always null
+       return top();
+     }
+     // Create a new node with the casted oop input and is_init set
+     InlineTypeBaseNode* vt = obj->clone()->as_InlineTypePtr();
+     vt->set_oop(cast);
+     vt->set_is_init(_gvn);
+     vt = _gvn.transform(vt)->as_InlineTypePtr();
+     if (do_replace_in_map) {
+       replace_in_map(obj, vt);
+     }
+     return vt;
+   }
    const Type *t = _gvn.type(obj);
    const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
    // Object is already not-null?
    if( t == t_not_null ) return obj;
  

*** 1537,11 ***
      ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
    } else {
      ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
    }
    ld = _gvn.transform(ld);
!   if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
      // Improve graph before escape analysis and boxing elimination.
      record_for_igvn(ld);
    }
    return ld;
  }
--- 1593,12 ---
      ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
    } else {
      ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
    }
    ld = _gvn.transform(ld);
! 
+   if (((bt == T_OBJECT || bt == T_INLINE_TYPE) && C->do_escape_analysis()) || C->eliminate_boxing()) {
      // Improve graph before escape analysis and boxing elimination.
      record_for_igvn(ld);
    }
    return ld;
  }

*** 1588,11 ***
                                  Node* adr,
                                  const TypePtr* adr_type,
                                  Node* val,
                                  const Type* val_type,
                                  BasicType bt,
!                                 DecoratorSet decorators) {
    // Transformation of a value which could be NULL pointer (CastPP #NULL)
    // could be delayed during Parse (for example, in adjust_map_after_if()).
    // Execute transformation here to avoid barrier generation in such case.
    if (_gvn.type(val) == TypePtr::NULL_PTR) {
      val = _gvn.makecon(TypePtr::NULL_PTR);
--- 1645,12 ---
                                  Node* adr,
                                  const TypePtr* adr_type,
                                  Node* val,
                                  const Type* val_type,
                                  BasicType bt,
!                                 DecoratorSet decorators,
+                                 bool safe_for_replace) {
    // Transformation of a value which could be NULL pointer (CastPP #NULL)
    // could be delayed during Parse (for example, in adjust_map_after_if()).
    // Execute transformation here to avoid barrier generation in such case.
    if (_gvn.type(val) == TypePtr::NULL_PTR) {
      val = _gvn.makecon(TypePtr::NULL_PTR);

*** 1601,10 ***
--- 1659,17 ---
    if (stopped()) {
      return top(); // Dead path ?
    }
  
    assert(val != NULL, "not dead path");
+   if (val->is_InlineType()) {
+     // Store to non-flattened field. Buffer the inline type and make sure
+     // the store is re-executed if the allocation triggers deoptimization.
+     PreserveReexecuteState preexecs(this);
+     jvms()->set_should_reexecute(true);
+     val = val->as_InlineType()->buffer(this, safe_for_replace);
+   }
  
    C2AccessValuePtr addr(adr, adr_type);
    C2AccessValue value(val, val_type);
    C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
    if (access.is_raw()) {

*** 1617,17 ***
  Node* GraphKit::access_load_at(Node* obj,   // containing obj
                                 Node* adr,   // actual adress to store val at
                                 const TypePtr* adr_type,
                                 const Type* val_type,
                                 BasicType bt,
!                                DecoratorSet decorators) {
    if (stopped()) {
      return top(); // Dead path ?
    }
  
    C2AccessValuePtr addr(adr, adr_type);
!   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
    if (access.is_raw()) {
      return _barrier_set->BarrierSetC2::load_at(access, val_type);
    } else {
      return _barrier_set->load_at(access, val_type);
    }
--- 1682,18 ---
  Node* GraphKit::access_load_at(Node* obj,   // containing obj
                                 Node* adr,   // actual adress to store val at
                                 const TypePtr* adr_type,
                                 const Type* val_type,
                                 BasicType bt,
!                                DecoratorSet decorators,
+                                Node* ctl) {
    if (stopped()) {
      return top(); // Dead path ?
    }
  
    C2AccessValuePtr addr(adr, adr_type);
!   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
    if (access.is_raw()) {
      return _barrier_set->BarrierSetC2::load_at(access, val_type);
    } else {
      return _barrier_set->load_at(access, val_type);
    }

*** 1729,10 ***
--- 1795,15 ---
  
  //-------------------------array_element_address-------------------------
  Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
                                        const TypeInt* sizetype, Node* ctrl) {
    uint shift  = exact_log2(type2aelembytes(elembt));
+   ciKlass* arytype_klass = _gvn.type(ary)->is_aryptr()->klass();
+   if (arytype_klass != NULL && arytype_klass->is_flat_array_klass()) {
+     ciFlatArrayKlass* vak = arytype_klass->as_flat_array_klass();
+     shift = vak->log2_element_size();
+   }
    uint header = arrayOopDesc::base_offset_in_bytes(elembt);
  
    // short-circuit a common case (saves lots of confusing waste motion)
    jint idx_con = find_int_con(idx, -1);
    if (idx_con >= 0) {

*** 1749,10 ***
--- 1820,11 ---
  
  //-------------------------load_array_element-------------------------
  Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
    const Type* elemtype = arytype->elem();
    BasicType elembt = elemtype->array_element_basic_type();
+   assert(elembt != T_INLINE_TYPE, "inline types are not supported by this method");
    Node* adr = array_element_address(ary, idx, elembt, arytype->size());
    if (elembt == T_NARROWOOP) {
      elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
    }
    Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,

*** 1760,16 ***
    return ld;
  }
  
  //-------------------------set_arguments_for_java_call-------------------------
  // Arguments (pre-popped from the stack) are taken from the JVMS.
! void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
!   // Add the call arguments:
!   uint nargs = call->method()->arg_size();
!   for (uint i = 0; i < nargs; i++) {
!     Node* arg = argument(i);
!     call->init_req(i + TypeFunc::Parms, arg);
    }
  }
  
  //---------------------------set_edges_for_java_call---------------------------
  // Connect a newly created call into the current JVMS.
--- 1832,42 ---
    return ld;
  }
  
  //-------------------------set_arguments_for_java_call-------------------------
  // Arguments (pre-popped from the stack) are taken from the JVMS.
! void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
!   PreserveReexecuteState preexecs(this);
!   if (EnableValhalla) {
!     // Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization.
!     // At this point, the call hasn't been executed yet, so we will only ever execute the call once.
!     jvms()->set_should_reexecute(true);
+     int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
+     inc_sp(arg_size);
+   }
+   // Add the call arguments
+   const TypeTuple* domain = call->tf()->domain_sig();
+   uint nargs = domain->cnt();
+   for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
+     Node* arg = argument(i-TypeFunc::Parms);
+     const Type* t = domain->field_at(i);
+     if (call->method()->has_scalarized_args() && t->is_inlinetypeptr() && !t->maybe_null() && t->inline_klass()->can_be_passed_as_fields()) {
+       // We don't pass inline type arguments by reference but instead pass each field of the inline type
+       InlineTypeBaseNode* vt = arg->as_InlineTypeBase();
+       vt->pass_fields(this, call, idx);
+       // If an inline type argument is passed as fields, attach the Method* to the call site
+       // to be able to access the extended signature later via attached_method_before_pc().
+       // For example, see CompiledMethod::preserve_callee_argument_oops().
+       call->set_override_symbolic_info(true);
+       continue;
+     } else if (arg->is_InlineType()) {
+       // Pass inline type argument via oop to callee
+       arg = arg->as_InlineType()->buffer(this);
+       if (!is_late_inline) {
+         arg = arg->as_InlineTypePtr()->get_oop();
+       }
+     }
+     call->init_req(idx++, arg);
    }
  }
  
  //---------------------------set_edges_for_java_call---------------------------
  // Connect a newly created call into the current JVMS.

*** 1803,17 ***
  }
  
  Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
    if (stopped())  return top();  // maybe the call folded up?
  
-   // Capture the return value, if any.
-   Node* ret;
-   if (call->method() == NULL ||
-       call->method()->return_type()->basic_type() == T_VOID)
-         ret = top();
-   else  ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
- 
    // Note:  Since any out-of-line call can produce an exception,
    // we always insert an I_O projection from the call into the result.
  
    make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
  
--- 1901,10 ---

*** 1822,10 ***
--- 1913,31 ---
      // through and exceptional paths, so replace the projections for
      // the fall through path.
      set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
      set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
    }
+ 
+   // Capture the return value, if any.
+   Node* ret;
+   if (call->method() == NULL || call->method()->return_type()->basic_type() == T_VOID) {
+     ret = top();
+   } else if (call->method()->return_type()->is_inlinetype()) {
+     const Type* ret_type = call->tf()->range_sig()->field_at(TypeFunc::Parms);
+     if (call->tf()->returns_inline_type_as_fields()) {
+       // Return of multiple values (inline type fields): we create a
+       // InlineType node, each field is a projection from the call.
+       ciInlineKlass* vk = call->method()->return_type()->as_inline_klass();
+       uint base_input = TypeFunc::Parms;
+       ret = InlineTypeNode::make_from_multi(this, call, ret_type->inline_klass(), base_input, false);
+     } else {
+       ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
+       ret = _gvn.transform(InlineTypeNode::make_from_oop(this, ret, ret_type->inline_klass(), !ret_type->maybe_null()));
+     }
+   } else {
+     ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
+   }
+ 
    return ret;
  }
  
  //--------------------set_predefined_input_for_runtime_call--------------------
  // Reading and setting the memory state is way conservative here.

*** 1912,80 ***
    Node* ex_ctl = top();
  
    SafePointNode* final_state = stop();
  
    // Find all the needed outputs of this call
!   CallProjections callprojs;
-   call->extract_projections(&callprojs, true);
  
    Unique_Node_List wl;
    Node* init_mem = call->in(TypeFunc::Memory);
    Node* final_mem = final_state->in(TypeFunc::Memory);
    Node* final_ctl = final_state->in(TypeFunc::Control);
    Node* final_io = final_state->in(TypeFunc::I_O);
  
    // Replace all the old call edges with the edges from the inlining result
!   if (callprojs.fallthrough_catchproj != NULL) {
!     C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
    }
!   if (callprojs.fallthrough_memproj != NULL) {
      if (final_mem->is_MergeMem()) {
        // Parser's exits MergeMem was not transformed but may be optimized
        final_mem = _gvn.transform(final_mem);
      }
!     C->gvn_replace_by(callprojs.fallthrough_memproj,   final_mem);
      add_mergemem_users_to_worklist(wl, final_mem);
    }
!   if (callprojs.fallthrough_ioproj != NULL) {
!     C->gvn_replace_by(callprojs.fallthrough_ioproj,    final_io);
    }
  
    // Replace the result with the new result if it exists and is used
!   if (callprojs.resproj != NULL && result != NULL) {
!     C->gvn_replace_by(callprojs.resproj, result);
    }
  
    if (ejvms == NULL) {
      // No exception edges to simply kill off those paths
!     if (callprojs.catchall_catchproj != NULL) {
!       C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
      }
!     if (callprojs.catchall_memproj != NULL) {
!       C->gvn_replace_by(callprojs.catchall_memproj,   C->top());
      }
!     if (callprojs.catchall_ioproj != NULL) {
!       C->gvn_replace_by(callprojs.catchall_ioproj,    C->top());
      }
      // Replace the old exception object with top
!     if (callprojs.exobj != NULL) {
!       C->gvn_replace_by(callprojs.exobj, C->top());
      }
    } else {
      GraphKit ekit(ejvms);
  
      // Load my combined exception state into the kit, with all phis transformed:
      SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
      replaced_nodes_exception = ex_map->replaced_nodes();
  
      Node* ex_oop = ekit.use_exception_state(ex_map);
  
!     if (callprojs.catchall_catchproj != NULL) {
!       C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
        ex_ctl = ekit.control();
      }
!     if (callprojs.catchall_memproj != NULL) {
        Node* ex_mem = ekit.reset_memory();
!       C->gvn_replace_by(callprojs.catchall_memproj,   ex_mem);
        add_mergemem_users_to_worklist(wl, ex_mem);
      }
!     if (callprojs.catchall_ioproj != NULL) {
!       C->gvn_replace_by(callprojs.catchall_ioproj,    ekit.i_o());
      }
  
      // Replace the old exception object with the newly created one
!     if (callprojs.exobj != NULL) {
!       C->gvn_replace_by(callprojs.exobj, ex_oop);
      }
    }
  
    // Disconnect the call from the graph
    call->disconnect_inputs(C);
--- 2024,83 ---
    Node* ex_ctl = top();
  
    SafePointNode* final_state = stop();
  
    // Find all the needed outputs of this call
!   CallProjections* callprojs = call->extract_projections(true);
  
    Unique_Node_List wl;
    Node* init_mem = call->in(TypeFunc::Memory);
    Node* final_mem = final_state->in(TypeFunc::Memory);
    Node* final_ctl = final_state->in(TypeFunc::Control);
    Node* final_io = final_state->in(TypeFunc::I_O);
  
    // Replace all the old call edges with the edges from the inlining result
!   if (callprojs->fallthrough_catchproj != NULL) {
!     C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
    }
!   if (callprojs->fallthrough_memproj != NULL) {
      if (final_mem->is_MergeMem()) {
        // Parser's exits MergeMem was not transformed but may be optimized
        final_mem = _gvn.transform(final_mem);
      }
!     C->gvn_replace_by(callprojs->fallthrough_memproj,   final_mem);
      add_mergemem_users_to_worklist(wl, final_mem);
    }
!   if (callprojs->fallthrough_ioproj != NULL) {
!     C->gvn_replace_by(callprojs->fallthrough_ioproj,    final_io);
    }
  
    // Replace the result with the new result if it exists and is used
!   if (callprojs->resproj[0] != NULL && result != NULL) {
!     // If the inlined code is dead, the result projections for an inline type returned as
+     // fields have not been replaced. They will go away once the call is replaced by TOP below.
+     assert(callprojs->nb_resproj == 1 || (call->tf()->returns_inline_type_as_fields() && stopped()),
+            "unexpected number of results");
+     C->gvn_replace_by(callprojs->resproj[0], result);
    }
  
    if (ejvms == NULL) {
      // No exception edges to simply kill off those paths
!     if (callprojs->catchall_catchproj != NULL) {
!       C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
      }
!     if (callprojs->catchall_memproj != NULL) {
!       C->gvn_replace_by(callprojs->catchall_memproj,   C->top());
      }
!     if (callprojs->catchall_ioproj != NULL) {
!       C->gvn_replace_by(callprojs->catchall_ioproj,    C->top());
      }
      // Replace the old exception object with top
!     if (callprojs->exobj != NULL) {
!       C->gvn_replace_by(callprojs->exobj, C->top());
      }
    } else {
      GraphKit ekit(ejvms);
  
      // Load my combined exception state into the kit, with all phis transformed:
      SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
      replaced_nodes_exception = ex_map->replaced_nodes();
  
      Node* ex_oop = ekit.use_exception_state(ex_map);
  
!     if (callprojs->catchall_catchproj != NULL) {
!       C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
        ex_ctl = ekit.control();
      }
!     if (callprojs->catchall_memproj != NULL) {
        Node* ex_mem = ekit.reset_memory();
!       C->gvn_replace_by(callprojs->catchall_memproj,   ex_mem);
        add_mergemem_users_to_worklist(wl, ex_mem);
      }
!     if (callprojs->catchall_ioproj != NULL) {
!       C->gvn_replace_by(callprojs->catchall_ioproj,    ekit.i_o());
      }
  
      // Replace the old exception object with the newly created one
!     if (callprojs->exobj != NULL) {
!       C->gvn_replace_by(callprojs->exobj, ex_oop);
      }
    }
  
    // Disconnect the call from the graph
    call->disconnect_inputs(C);

*** 1995,11 ***
    // optimizer doesn't like that.
    while (wl.size() > 0) {
      _gvn.transform(wl.pop());
    }
  
!   if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
      replaced_nodes.apply(C, final_ctl);
    }
    if (!ex_ctl->is_top() && do_replaced_nodes) {
      replaced_nodes_exception.apply(C, ex_ctl);
    }
--- 2110,11 ---
    // optimizer doesn't like that.
    while (wl.size() > 0) {
      _gvn.transform(wl.pop());
    }
  
!   if (callprojs->fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
      replaced_nodes.apply(C, final_ctl);
    }
    if (!ex_ctl->is_top() && do_replaced_nodes) {
      replaced_nodes_exception.apply(C, ex_ctl);
    }

*** 2193,11 ***
      const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
      const TypeOopPtr* xtype = tklass->as_instance_type();
      assert(xtype->klass_is_exact(), "Should be exact");
      // Any reason to believe n is not null (from this profiling or a previous one)?
      assert(ptr_kind != ProfileAlwaysNull, "impossible here");
!     const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
      // record the new speculative type's depth
      speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
      speculative = speculative->with_inline_depth(jvms()->depth());
    } else if (current_type->would_improve_ptr(ptr_kind)) {
      // Profiling report that null was never seen so we can change the
--- 2308,11 ---
      const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
      const TypeOopPtr* xtype = tklass->as_instance_type();
      assert(xtype->klass_is_exact(), "Should be exact");
      // Any reason to believe n is not null (from this profiling or a previous one)?
      assert(ptr_kind != ProfileAlwaysNull, "impossible here");
!     const TypePtr* ptr = (ptr_kind != ProfileNeverNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
      // record the new speculative type's depth
      speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
      speculative = speculative->with_inline_depth(jvms()->depth());
    } else if (current_type->would_improve_ptr(ptr_kind)) {
      // Profiling report that null was never seen so we can change the

*** 2216,11 ***
    }
  
    if (speculative != current_type->speculative()) {
      // Build a type with a speculative type (what we think we know
      // about the type but will need a guard when we use it)
!     const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
      // We're changing the type, we need a new CheckCast node to carry
      // the new type. The new type depends on the control: what
      // profiling tells us is only valid from here as far as we can
      // tell.
      Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
--- 2331,11 ---
    }
  
    if (speculative != current_type->speculative()) {
      // Build a type with a speculative type (what we think we know
      // about the type but will need a guard when we use it)
!     const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
      // We're changing the type, we need a new CheckCast node to carry
      // the new type. The new type depends on the control: what
      // profiling tells us is only valid from here as far as we can
      // tell.
      Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));

*** 2250,23 ***
         java_bc() == Bytecodes::_instanceof ||
         java_bc() == Bytecodes::_aastore) &&
        method()->method_data()->is_mature()) {
      ciProfileData* data = method()->method_data()->bci_to_data(bci());
      if (data != NULL) {
!       if (!data->as_BitData()->null_seen()) {
!         ptr_kind = ProfileNeverNull;
        } else {
!         assert(data->is_ReceiverTypeData(), "bad profile data type");
!         ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
!         uint i = 0;
!         for (; i < call->row_limit(); i++) {
!           ciKlass* receiver = call->receiver(i);
!           if (receiver != NULL) {
!             break;
            }
          }
-         ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
        }
      }
    }
    return record_profile_for_speculation(n, exact_kls, ptr_kind);
  }
--- 2365,34 ---
         java_bc() == Bytecodes::_instanceof ||
         java_bc() == Bytecodes::_aastore) &&
        method()->method_data()->is_mature()) {
      ciProfileData* data = method()->method_data()->bci_to_data(bci());
      if (data != NULL) {
!       if (java_bc() == Bytecodes::_aastore) {
!         ciKlass* array_type = NULL;
+         ciKlass* element_type = NULL;
+         ProfilePtrKind element_ptr = ProfileMaybeNull;
+         bool flat_array = true;
+         bool null_free_array = true;
+         method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
+         exact_kls = element_type;
+         ptr_kind = element_ptr;
        } else {
!         if (!data->as_BitData()->null_seen()) {
!           ptr_kind = ProfileNeverNull;
!         } else {
!           assert(data->is_ReceiverTypeData(), "bad profile data type");
!           ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
!           uint i = 0;
!           for (; i < call->row_limit(); i++) {
+             ciKlass* receiver = call->receiver(i);
+             if (receiver != NULL) {
+               break;
+             }
            }
+           ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
          }
        }
      }
    }
    return record_profile_for_speculation(n, exact_kls, ptr_kind);
  }

*** 2281,14 ***
  void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
    if (!UseTypeSpeculation) {
      return;
    }
    const TypeFunc* tf    = TypeFunc::make(dest_method);
!   int             nargs = tf->domain()->cnt() - TypeFunc::Parms;
    int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
    for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
!     const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
      if (is_reference_type(targ->basic_type())) {
        ProfilePtrKind ptr_kind = ProfileMaybeNull;
        ciKlass* better_type = NULL;
        if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
          record_profile_for_speculation(argument(j), better_type, ptr_kind);
--- 2407,14 ---
  void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
    if (!UseTypeSpeculation) {
      return;
    }
    const TypeFunc* tf    = TypeFunc::make(dest_method);
!   int             nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
    int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
    for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
!     const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
      if (is_reference_type(targ->basic_type())) {
        ProfilePtrKind ptr_kind = ProfileMaybeNull;
        ciKlass* better_type = NULL;
        if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
          record_profile_for_speculation(argument(j), better_type, ptr_kind);

*** 2338,13 ***
  
  void GraphKit::round_double_arguments(ciMethod* dest_method) {
    if (Matcher::strict_fp_requires_explicit_rounding) {
      // (Note:  TypeFunc::make has a cache that makes this fast.)
      const TypeFunc* tf    = TypeFunc::make(dest_method);
!     int             nargs = tf->domain()->cnt() - TypeFunc::Parms;
      for (int j = 0; j < nargs; j++) {
!       const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
        if (targ->basic_type() == T_DOUBLE) {
          // If any parameters are doubles, they must be rounded before
          // the call, dstore_rounding does gvn.transform
          Node *arg = argument(j);
          arg = dstore_rounding(arg);
--- 2464,13 ---
  
  void GraphKit::round_double_arguments(ciMethod* dest_method) {
    if (Matcher::strict_fp_requires_explicit_rounding) {
      // (Note:  TypeFunc::make has a cache that makes this fast.)
      const TypeFunc* tf    = TypeFunc::make(dest_method);
!     int             nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
      for (int j = 0; j < nargs; j++) {
!       const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
        if (targ->basic_type() == T_DOUBLE) {
          // If any parameters are doubles, they must be rounded before
          // the call, dstore_rounding does gvn.transform
          Node *arg = argument(j);
          arg = dstore_rounding(arg);

*** 2504,11 ***
    if (!is_leaf) {
      call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
    } else if (flags & RC_NO_FP) {
      call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
    } else  if (flags & RC_VECTOR){
!     uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
      call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
    } else {
      call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
    }
  
--- 2630,11 ---
    if (!is_leaf) {
      call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
    } else if (flags & RC_NO_FP) {
      call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
    } else  if (flags & RC_VECTOR){
!     uint num_bits = call_type->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
      call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
    } else {
      call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
    }
  

*** 2604,30 ***
    {
      for (uint vm_arg_pos = 0, java_arg_read_pos = 0;
          vm_arg_pos < n_filtered_args; vm_arg_pos++) {
        uint vm_unfiltered_arg_pos = vm_arg_pos + 3; // +3 to skip fallback handle argument and addr (2 since long)
        Node* node = argument(vm_unfiltered_arg_pos);
!       const Type* type = call_type->domain()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos);
        VMReg reg = type == Type::HALF
          ? VMRegImpl::Bad()
          : argRegs[java_arg_read_pos++];
  
        argument_nodes[vm_arg_pos] = node;
        arg_types[TypeFunc::Parms + vm_arg_pos] = type;
        arg_regs.at_put(vm_arg_pos, reg);
      }
    }
  
!   uint n_returns = call_type->range()->cnt() - TypeFunc::Parms;
    GrowableArray<VMReg> ret_regs(C->comp_arena(), n_returns, n_returns, VMRegImpl::Bad());
    const Type** ret_types = TypeTuple::fields(n_returns);
  
    VMReg* retRegs = nep->returnMoves();
    {
      for (uint vm_ret_pos = 0, java_ret_read_pos = 0;
          vm_ret_pos < n_returns; vm_ret_pos++) { // 0 or 1
!       const Type* type = call_type->range()->field_at(TypeFunc::Parms + vm_ret_pos);
        VMReg reg = type == Type::HALF
          ? VMRegImpl::Bad()
          : retRegs[java_ret_read_pos++];
  
        ret_regs.at_put(vm_ret_pos, reg);
--- 2730,30 ---
    {
      for (uint vm_arg_pos = 0, java_arg_read_pos = 0;
          vm_arg_pos < n_filtered_args; vm_arg_pos++) {
        uint vm_unfiltered_arg_pos = vm_arg_pos + 3; // +3 to skip fallback handle argument and addr (2 since long)
        Node* node = argument(vm_unfiltered_arg_pos);
!       const Type* type = call_type->domain_sig()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos);
        VMReg reg = type == Type::HALF
          ? VMRegImpl::Bad()
          : argRegs[java_arg_read_pos++];
  
        argument_nodes[vm_arg_pos] = node;
        arg_types[TypeFunc::Parms + vm_arg_pos] = type;
        arg_regs.at_put(vm_arg_pos, reg);
      }
    }
  
!   uint n_returns = call_type->range_sig()->cnt() - TypeFunc::Parms;
    GrowableArray<VMReg> ret_regs(C->comp_arena(), n_returns, n_returns, VMRegImpl::Bad());
    const Type** ret_types = TypeTuple::fields(n_returns);
  
    VMReg* retRegs = nep->returnMoves();
    {
      for (uint vm_ret_pos = 0, java_ret_read_pos = 0;
          vm_ret_pos < n_returns; vm_ret_pos++) { // 0 or 1
!       const Type* type = call_type->range_sig()->field_at(TypeFunc::Parms + vm_ret_pos);
        VMReg reg = type == Type::HALF
          ? VMRegImpl::Bad()
          : retRegs[java_ret_read_pos++];
  
        ret_regs.at_put(vm_ret_pos, reg);

*** 2950,20 ***
    *ctrl = gvn.transform(r_ok_subtype);
    return gvn.transform(r_not_subtype);
  }
  
  Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
    bool expand_subtype_check = C->post_loop_opts_phase() ||   // macro node expansion is over
                                ExpandSubTypeCheckAtParseTime; // forced expansion
    if (expand_subtype_check) {
      MergeMemNode* mem = merged_memory();
      Node* ctrl = control();
      Node* subklass = obj_or_subklass;
!     if (!_gvn.type(obj_or_subklass)->isa_klassptr()) {
        subklass = load_object_klass(obj_or_subklass);
      }
- 
      Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn);
      set_control(ctrl);
      return n;
    }
  
--- 3076,23 ---
    *ctrl = gvn.transform(r_ok_subtype);
    return gvn.transform(r_not_subtype);
  }
  
  Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
+   const Type* sub_t = _gvn.type(obj_or_subklass);
+   if (sub_t->isa_inlinetype()) {
+     obj_or_subklass = makecon(TypeKlassPtr::make(sub_t->inline_klass()));
+   }
    bool expand_subtype_check = C->post_loop_opts_phase() ||   // macro node expansion is over
                                ExpandSubTypeCheckAtParseTime; // forced expansion
    if (expand_subtype_check) {
      MergeMemNode* mem = merged_memory();
      Node* ctrl = control();
      Node* subklass = obj_or_subklass;
!     if (!sub_t->isa_klassptr() && !sub_t->isa_inlinetype()) {
        subklass = load_object_klass(obj_or_subklass);
      }
      Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn);
      set_control(ctrl);
      return n;
    }
  

*** 2974,40 ***
    return _gvn.transform(new IfFalseNode(iff));
  }
  
  // Profile-driven exact type check:
  Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
!                                     float prob,
-                                     Node* *casted_receiver) {
    assert(!klass->is_interface(), "no exact type check on interfaces");
! 
    const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
    Node* recv_klass = load_object_klass(receiver);
!   Node* want_klass = makecon(tklass);
-   Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
-   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
-   IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
-   set_control( _gvn.transform(new IfTrueNode (iff)));
-   Node* fail = _gvn.transform(new IfFalseNode(iff));
  
    if (!stopped()) {
      const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
!     const TypeOopPtr* recvx_type = tklass->as_instance_type();
!     assert(recvx_type->klass_is_exact(), "");
  
!     if (!receiver_type->higher_equal(recvx_type)) { // ignore redundant casts
        // Subsume downstream occurrences of receiver with a cast to
        // recv_xtype, since now we know what the type will be.
!       Node* cast = new CheckCastPPNode(control(), receiver, recvx_type);
!       (*casted_receiver) = _gvn.transform(cast);
        // (User must make the replace_in_map call.)
      }
    }
  
    return fail;
  }
  
  //------------------------------subtype_check_receiver-------------------------
  Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
                                         Node** casted_receiver) {
    const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
    Node* want_klass = makecon(tklass);
--- 3103,61 ---
    return _gvn.transform(new IfFalseNode(iff));
  }
  
  // Profile-driven exact type check:
  Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
!                                     float prob, Node* *casted_receiver) {
    assert(!klass->is_interface(), "no exact type check on interfaces");
!   Node* fail = top();
+   const Type* rec_t = _gvn.type(receiver);
+   if (rec_t->isa_inlinetype()) {
+     if (klass->equals(rec_t->inline_klass())) {
+       (*casted_receiver) = receiver; // Always passes
+     } else {
+       (*casted_receiver) = top();    // Always fails
+       fail = control();
+       set_control(top());
+     }
+     return fail;
+   }
    const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
    Node* recv_klass = load_object_klass(receiver);
!   fail = type_check(recv_klass, tklass, prob);
  
    if (!stopped()) {
      const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
!     const TypeOopPtr* recv_xtype = tklass->as_instance_type();
!     assert(recv_xtype->klass_is_exact(), "");
  
!     if (!receiver_type->higher_equal(recv_xtype)) { // ignore redundant casts
        // Subsume downstream occurrences of receiver with a cast to
        // recv_xtype, since now we know what the type will be.
!       Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
!       Node* res = _gvn.transform(cast);
+       if (recv_xtype->is_inlinetypeptr()) {
+         assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
+         res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass())->as_InlineTypeBase()->as_ptr(&gvn());
+       }
+       (*casted_receiver) = res;
        // (User must make the replace_in_map call.)
      }
    }
  
    return fail;
  }
  
+ Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
+                            float prob) {
+   Node* want_klass = makecon(tklass);
+   Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
+   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
+   IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
+   set_control(_gvn.transform(new IfTrueNode (iff)));
+   Node* fail = _gvn.transform(new IfFalseNode(iff));
+   return fail;
+ }
+ 
  //------------------------------subtype_check_receiver-------------------------
  Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
                                         Node** casted_receiver) {
    const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
    Node* want_klass = makecon(tklass);

*** 3016,11 ***
  
    // Ignore interface type information until interface types are properly tracked.
    if (!stopped() && !klass->is_interface()) {
      const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
      const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
!     if (!receiver_type->higher_equal(recv_type)) { // ignore redundant casts
        Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
        (*casted_receiver) = _gvn.transform(cast);
      }
    }
  
--- 3166,11 ---
  
    // Ignore interface type information until interface types are properly tracked.
    if (!stopped() && !klass->is_interface()) {
      const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
      const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
!     if (receiver_type != NULL && !receiver_type->higher_equal(recv_type)) { // ignore redundant casts
        Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
        (*casted_receiver) = _gvn.transform(cast);
      }
    }
  

*** 3048,10 ***
--- 3198,13 ---
        return true;
      // If the profile has not seen a null, assume it won't happen.
      assert(java_bc() == Bytecodes::_checkcast ||
             java_bc() == Bytecodes::_instanceof ||
             java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
+     if (java_bc() == Bytecodes::_aastore) {
+       return ((ciArrayLoadStoreData*)data->as_ArrayLoadStoreData())->element()->ptr_kind() == ProfileNeverNull;
+     }
      return !data->as_BitData()->null_seen();
    }
    speculating = false;
    return false;
  }

*** 3127,11 ***
  
    // (No, this isn't a call, but it's enough like a virtual call
    // to use the same ciMethod accessor to get the profile info...)
    // If we have a speculative type use it instead of profiling (which
    // may not help us)
!   ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass;
    if (exact_kls != NULL) {// no cast failures here
      if (require_klass == NULL ||
          C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) {
        // If we narrow the type to match what the type profile sees or
        // the speculative type, we can then remove the rest of the
--- 3280,24 ---
  
    // (No, this isn't a call, but it's enough like a virtual call
    // to use the same ciMethod accessor to get the profile info...)
    // If we have a speculative type use it instead of profiling (which
    // may not help us)
!   ciKlass* exact_kls = spec_klass;
+   if (exact_kls == NULL) {
+     if (java_bc() == Bytecodes::_aastore) {
+       ciKlass* array_type = NULL;
+       ciKlass* element_type = NULL;
+       ProfilePtrKind element_ptr = ProfileMaybeNull;
+       bool flat_array = true;
+       bool null_free_array = true;
+       method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
+       exact_kls = element_type;
+     } else {
+       exact_kls = profile_has_unique_klass();
+     }
+   }
    if (exact_kls != NULL) {// no cast failures here
      if (require_klass == NULL ||
          C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) {
        // If we narrow the type to match what the type profile sees or
        // the speculative type, we can then remove the rest of the

*** 3232,14 ***
      data = method()->method_data()->bci_to_data(bci());
    }
    bool speculative_not_null = false;
    bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
                           && seems_never_null(obj, data, speculative_not_null));
  
    // Null check; get casted pointer; set region slot 3
    Node* null_ctl = top();
!   Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
  
    // If not_null_obj is dead, only null-path is taken
    if (stopped()) {              // Doing instance-of on a NULL?
      set_control(null_ctl);
      return intcon(0);
--- 3398,15 ---
      data = method()->method_data()->bci_to_data(bci());
    }
    bool speculative_not_null = false;
    bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
                           && seems_never_null(obj, data, speculative_not_null));
+   bool is_value = obj->is_InlineType();
  
    // Null check; get casted pointer; set region slot 3
    Node* null_ctl = top();
!   Node* not_null_obj = is_value ? obj : null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
  
    // If not_null_obj is dead, only null-path is taken
    if (stopped()) {              // Doing instance-of on a NULL?
      set_control(null_ctl);
      return intcon(0);

*** 3253,33 ***
      region->del_req(_null_path);
      phi   ->del_req(_null_path);
    }
  
    // Do we know the type check always succeed?
!   bool known_statically = false;
!   if (_gvn.type(superklass)->singleton()) {
!     ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
!     ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
!     if (subk != NULL && subk->is_loaded()) {
!       int static_res = C->static_subtype_check(superk, subk);
!       known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
      }
-   }
  
!   if (!known_statically) {
!     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
!     // We may not have profiling here or it may not help us. If we
!     // have a speculative type use it to perform an exact cast.
!     ciKlass* spec_obj_type = obj_type->speculative_type();
!     if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
!       Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
!       if (stopped()) {            // Profile disagrees with this path.
!         set_control(null_ctl);    // Null is the only remaining possibility.
!         return intcon(0);
!       }
!       if (cast_obj != NULL) {
!         not_null_obj = cast_obj;
        }
      }
    }
  
    // Generate the subtype check
--- 3420,38 ---
      region->del_req(_null_path);
      phi   ->del_req(_null_path);
    }
  
    // Do we know the type check always succeed?
!   if (!is_value) {
!     bool known_statically = false;
!     if (_gvn.type(superklass)->singleton()) {
!       ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
!       ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
!       if (subk != NULL && subk->is_loaded()) {
!         int static_res = C->static_subtype_check(superk, subk);
+         known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
+       }
      }
  
!     if (!known_statically) {
!       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
!       // We may not have profiling here or it may not help us. If we
!       // have a speculative type use it to perform an exact cast.
!       ciKlass* spec_obj_type = obj_type->speculative_type();
!       if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
!         Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
!         if (stopped()) {            // Profile disagrees with this path.
!           set_control(null_ctl);    // Null is the only remaining possibility.
!           return intcon(0);
!         }
!         if (cast_obj != NULL &&
!             // A value that's sometimes null is not something we can optimize well
+             !(cast_obj->is_InlineType() && null_ctl != top())) {
+           not_null_obj = cast_obj;
+           is_value = not_null_obj->is_InlineType();
+         }
        }
      }
    }
  
    // Generate the subtype check

*** 3298,11 ***
    record_for_igvn(region);
  
    // If we know the type check always succeeds then we don't use the
    // profiling data at this bytecode. Don't lose it, feed it to the
    // type system as a speculative type.
!   if (safe_for_replace) {
      Node* casted_obj = record_profiled_receiver_for_speculation(obj);
      replace_in_map(obj, casted_obj);
    }
  
    return _gvn.transform(phi);
--- 3470,11 ---
    record_for_igvn(region);
  
    // If we know the type check always succeeds then we don't use the
    // profiling data at this bytecode. Don't lose it, feed it to the
    // type system as a speculative type.
!   if (safe_for_replace && !is_value) {
      Node* casted_obj = record_profiled_receiver_for_speculation(obj);
      replace_in_map(obj, casted_obj);
    }
  
    return _gvn.transform(phi);

*** 3313,69 ***
  // array store bytecode.  Stack must be as-if BEFORE doing the bytecode so the
  // uncommon-trap paths work.  Adjust stack after this call.
  // If failure_control is supplied and not null, it is filled in with
  // the control edge for the cast failure.  Otherwise, an appropriate
  // uncommon trap or exception is thrown.
! Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
-                               Node* *failure_control) {
    kill_dead_locals();           // Benefit all the uncommon traps
!   const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr();
!   const Type *toop = TypeOopPtr::make_from_klass(tk->klass());
  
    // Fast cutout:  Check the case that the cast is vacuously true.
    // This detects the common cases where the test will short-circuit
    // away completely.  We do this before we perform the null check,
    // because if the test is going to turn into zero code, we don't
    // want a residual null check left around.  (Causes a slowdown,
    // for example, in some objArray manipulations, such as a[i]=a[j].)
    if (tk->singleton()) {
!     const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
!     if (objtp != NULL && objtp->klass() != NULL) {
!       switch (C->static_subtype_check(tk->klass(), objtp->klass())) {
        case Compile::SSC_always_true:
          // If we know the type check always succeed then we don't use
          // the profiling data at this bytecode. Don't lose it, feed it
          // to the type system as a speculative type.
!         return record_profiled_receiver_for_speculation(obj);
        case Compile::SSC_always_false:
!         // It needs a null check because a null will *pass* the cast check.
!         // A non-null value will always produce an exception.
!         if (!objtp->maybe_null()) {
!           builtin_throw(Deoptimization::Reason_class_check, makecon(TypeKlassPtr::make(objtp->klass())));
            return top();
!         } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
!           return null_assert(obj);
          }
-         break; // Fall through to full check
        }
      }
    }
  
    ciProfileData* data = NULL;
-   bool safe_for_replace = false;
    if (failure_control == NULL) {        // use MDO in regular case only
      assert(java_bc() == Bytecodes::_aastore ||
             java_bc() == Bytecodes::_checkcast,
             "interpreter profiles type checks only for these BCs");
!     data = method()->method_data()->bci_to_data(bci());
!     safe_for_replace = true;
    }
  
    // Make the merge point
    enum { _obj_path = 1, _null_path, PATH_LIMIT };
    RegionNode* region = new RegionNode(PATH_LIMIT);
    Node*       phi    = new PhiNode(region, toop);
    C->set_has_split_ifs(true); // Has chance for split-if optimization
  
    // Use null-cast information if it is available
    bool speculative_not_null = false;
    bool never_see_null = ((failure_control == NULL)  // regular case only
                           && seems_never_null(obj, data, speculative_not_null));
  
    // Null check; get casted pointer; set region slot 3
    Node* null_ctl = top();
!   Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
  
    // If not_null_obj is dead, only null-path is taken
    if (stopped()) {              // Doing instance-of on a NULL?
      set_control(null_ctl);
      return null();
--- 3485,109 ---
  // array store bytecode.  Stack must be as-if BEFORE doing the bytecode so the
  // uncommon-trap paths work.  Adjust stack after this call.
  // If failure_control is supplied and not null, it is filled in with
  // the control edge for the cast failure.  Otherwise, an appropriate
  // uncommon trap or exception is thrown.
! Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool null_free) {
    kill_dead_locals();           // Benefit all the uncommon traps
!   const TypeKlassPtr* tk = _gvn.type(superklass)->is_klassptr();
!   const TypeOopPtr* toop = TypeOopPtr::make_from_klass(tk->klass());
+   bool safe_for_replace = (failure_control == NULL);
+   bool from_inline = obj->is_InlineType();
+   assert(!null_free || toop->is_inlinetypeptr(), "must be an inline type pointer");
  
    // Fast cutout:  Check the case that the cast is vacuously true.
    // This detects the common cases where the test will short-circuit
    // away completely.  We do this before we perform the null check,
    // because if the test is going to turn into zero code, we don't
    // want a residual null check left around.  (Causes a slowdown,
    // for example, in some objArray manipulations, such as a[i]=a[j].)
    if (tk->singleton()) {
!     ciKlass* klass = NULL;
!     if (obj->is_InlineTypeBase()) {
!       klass = _gvn.type(obj)->inline_klass();
+     } else {
+       const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
+       if (objtp != NULL) {
+         klass = objtp->klass();
+       }
+     }
+     if (klass != NULL) {
+       switch (C->static_subtype_check(tk->klass(), klass)) {
        case Compile::SSC_always_true:
          // If we know the type check always succeed then we don't use
          // the profiling data at this bytecode. Don't lose it, feed it
          // to the type system as a speculative type.
!         if (!from_inline) {
+           obj = record_profiled_receiver_for_speculation(obj);
+           if (null_free) {
+             assert(safe_for_replace, "must be");
+             obj = null_check(obj);
+           }
+           assert(stopped() || !toop->is_inlinetypeptr() ||
+                  obj->is_InlineTypeBase(), "should have been scalarized");
+         }
+         return obj;
        case Compile::SSC_always_false:
!         if (from_inline || null_free) {
!           if (!from_inline) {
!             assert(safe_for_replace, "must be");
!             null_check(obj);
+           }
+           // Inline type is null-free. Always throw an exception.
+           builtin_throw(Deoptimization::Reason_class_check, makecon(TypeKlassPtr::make(klass)));
            return top();
!         } else {
!           // It needs a null check because a null will *pass* the cast check.
+           const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
+           if (!objtp->maybe_null()) {
+             builtin_throw(Deoptimization::Reason_class_check, makecon(TypeKlassPtr::make(objtp->klass())));
+             return top();
+           } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
+             return null_assert(obj);
+           }
+           break; // Fall through to full check
          }
        }
      }
    }
  
    ciProfileData* data = NULL;
    if (failure_control == NULL) {        // use MDO in regular case only
      assert(java_bc() == Bytecodes::_aastore ||
             java_bc() == Bytecodes::_checkcast,
             "interpreter profiles type checks only for these BCs");
!     if (method()->method_data()->is_mature()) {
!       data = method()->method_data()->bci_to_data(bci());
+     }
    }
  
    // Make the merge point
    enum { _obj_path = 1, _null_path, PATH_LIMIT };
    RegionNode* region = new RegionNode(PATH_LIMIT);
    Node*       phi    = new PhiNode(region, toop);
+   _gvn.set_type(region, Type::CONTROL);
+   _gvn.set_type(phi, toop);
+ 
    C->set_has_split_ifs(true); // Has chance for split-if optimization
  
    // Use null-cast information if it is available
    bool speculative_not_null = false;
    bool never_see_null = ((failure_control == NULL)  // regular case only
                           && seems_never_null(obj, data, speculative_not_null));
  
    // Null check; get casted pointer; set region slot 3
    Node* null_ctl = top();
!   Node* not_null_obj = NULL;
+   if (from_inline) {
+     not_null_obj = obj;
+   } else if (null_free) {
+     assert(safe_for_replace, "must be");
+     not_null_obj = null_check(obj);
+   } else {
+     not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
+   }
  
    // If not_null_obj is dead, only null-path is taken
    if (stopped()) {              // Doing instance-of on a NULL?
      set_control(null_ctl);
      return null();

*** 3389,21 ***
      region->del_req(_null_path);
      phi   ->del_req(_null_path);
    }
  
    Node* cast_obj = NULL;
!   if (tk->klass_is_exact()) {
      // The following optimization tries to statically cast the speculative type of the object
      // (for example obtained during profiling) to the type of the superklass and then do a
      // dynamic check that the type of the object is what we expect. To work correctly
      // for checkcast and aastore the type of superklass should be exact.
      const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
      // We may not have profiling here or it may not help us. If we have
      // a speculative type use it to perform an exact cast.
      ciKlass* spec_obj_type = obj_type->speculative_type();
      if (spec_obj_type != NULL || data != NULL) {
        cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
        if (cast_obj != NULL) {
          if (failure_control != NULL) // failure is now impossible
            (*failure_control) = top();
          // adjust the type of the phi to the exact klass:
          phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
--- 3601,28 ---
      region->del_req(_null_path);
      phi   ->del_req(_null_path);
    }
  
    Node* cast_obj = NULL;
!   if (!from_inline && tk->klass_is_exact()) {
      // The following optimization tries to statically cast the speculative type of the object
      // (for example obtained during profiling) to the type of the superklass and then do a
      // dynamic check that the type of the object is what we expect. To work correctly
      // for checkcast and aastore the type of superklass should be exact.
      const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
      // We may not have profiling here or it may not help us. If we have
      // a speculative type use it to perform an exact cast.
      ciKlass* spec_obj_type = obj_type->speculative_type();
      if (spec_obj_type != NULL || data != NULL) {
        cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
+       if (cast_obj != NULL && cast_obj->is_InlineType()) {
+         if (null_ctl != top()) {
+           cast_obj = NULL; // A value that's sometimes null is not something we can optimize well
+         } else {
+           return cast_obj;
+         }
+       }
        if (cast_obj != NULL) {
          if (failure_control != NULL) // failure is now impossible
            (*failure_control) = top();
          // adjust the type of the phi to the exact klass:
          phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));

*** 3411,20 ***
      }
    }
  
    if (cast_obj == NULL) {
      // Generate the subtype check
!     Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass );
  
      // Plug in success path into the merge
!     cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
      // Failure path ends in uncommon trap (or may be dead - failure impossible)
      if (failure_control == NULL) {
        if (not_subtype_ctrl != top()) { // If failure is possible
          PreserveJVMState pjvms(this);
          set_control(not_subtype_ctrl);
!         builtin_throw(Deoptimization::Reason_class_check, load_object_klass(not_null_obj));
        }
      } else {
        (*failure_control) = not_subtype_ctrl;
      }
    }
--- 3630,26 ---
      }
    }
  
    if (cast_obj == NULL) {
      // Generate the subtype check
!     Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass);
  
      // Plug in success path into the merge
!     cast_obj = from_inline ? not_null_obj : _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
      // Failure path ends in uncommon trap (or may be dead - failure impossible)
      if (failure_control == NULL) {
        if (not_subtype_ctrl != top()) { // If failure is possible
          PreserveJVMState pjvms(this);
          set_control(not_subtype_ctrl);
!         Node* obj_klass = NULL;
+         if (not_null_obj->is_InlineTypeBase()) {
+           obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass()));
+         } else {
+           obj_klass = load_object_klass(not_null_obj);
+         }
+         builtin_throw(Deoptimization::Reason_class_check, obj_klass);
        }
      } else {
        (*failure_control) = not_subtype_ctrl;
      }
    }

*** 3447,11 ***
  
    // Return final merged results
    set_control( _gvn.transform(region) );
    record_for_igvn(region);
  
!   return record_profiled_receiver_for_speculation(res);
  }
  
  //------------------------------next_monitor-----------------------------------
  // What number should be given to the next monitor?
  int GraphKit::next_monitor() {
--- 3672,139 ---
  
    // Return final merged results
    set_control( _gvn.transform(region) );
    record_for_igvn(region);
  
!   bool not_inline = !toop->can_be_inline_type();
+   bool not_flattened = !UseFlatArray || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flatten_array());
+   if (EnableValhalla && not_flattened) {
+     // Check if obj has been loaded from an array
+     obj = obj->isa_DecodeN() ? obj->in(1) : obj;
+     Node* array = NULL;
+     if (obj->isa_Load()) {
+       Node* address = obj->in(MemNode::Address);
+       if (address->isa_AddP()) {
+         array = address->as_AddP()->in(AddPNode::Base);
+       }
+     } else if (obj->is_Phi()) {
+       Node* region = obj->in(0);
+       // TODO make this more robust (see JDK-8231346)
+       if (region->req() == 3 && region->in(2) != NULL && region->in(2)->in(0) != NULL) {
+         IfNode* iff = region->in(2)->in(0)->isa_If();
+         if (iff != NULL) {
+           iff->is_flat_array_check(&_gvn, &array);
+         }
+       }
+     }
+     if (array != NULL) {
+       const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr();
+       if (ary_t != NULL) {
+         if (!ary_t->is_not_null_free() && not_inline) {
+           // Casting array element to a non-inline-type, mark array as not null-free.
+           Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free()));
+           replace_in_map(array, cast);
+         } else if (!ary_t->is_not_flat()) {
+           // Casting array element to a non-flattened type, mark array as not flat.
+           Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat()));
+           replace_in_map(array, cast);
+         }
+       }
+     }
+   }
+ 
+   if (!stopped() && !res->is_InlineTypeBase()) {
+     res = record_profiled_receiver_for_speculation(res);
+     if (toop->is_inlinetypeptr()) {
+       Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass(), !gvn().type(res)->maybe_null());
+       res = vt;
+       if (safe_for_replace) {
+         if (vt->isa_InlineType() && C->inlining_incrementally()) {
+           vt = vt->as_InlineType()->as_ptr(&_gvn);
+         }
+         replace_in_map(obj, vt);
+         replace_in_map(not_null_obj, vt);
+         replace_in_map(res, vt);
+       }
+     }
+   }
+   return res;
+ }
+ 
+ Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
+   Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
+   Node* mark = make_load(NULL, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
+   Node* mask = MakeConX(markWord::inline_type_pattern);
+   Node* masked = _gvn.transform(new AndXNode(mark, mask));
+   Node* cmp = _gvn.transform(new CmpXNode(masked, mask));
+   return _gvn.transform(new BoolNode(cmp, is_inline ? BoolTest::eq : BoolTest::ne));
+ }
+ 
+ Node* GraphKit::is_val_mirror(Node* mirror) {
+   Node* p = basic_plus_adr(mirror, java_lang_Class::secondary_mirror_offset());
+   Node* secondary_mirror = access_load_at(mirror, p, _gvn.type(p)->is_ptr(), TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR), T_OBJECT, IN_HEAP);
+   Node* cmp = _gvn.transform(new CmpPNode(mirror, secondary_mirror));
+   return _gvn.transform(new BoolNode(cmp, BoolTest::eq));
+ }
+ 
+ Node* GraphKit::array_lh_test(Node* klass, jint mask, jint val, bool eq) {
+   Node* lh_adr = basic_plus_adr(klass, in_bytes(Klass::layout_helper_offset()));
+   // Make sure to use immutable memory here to enable hoisting the check out of loops
+   Node* lh_val = _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), lh_adr, lh_adr->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
+   Node* masked = _gvn.transform(new AndINode(lh_val, intcon(mask)));
+   Node* cmp = _gvn.transform(new CmpINode(masked, intcon(val)));
+   return _gvn.transform(new BoolNode(cmp, eq ? BoolTest::eq : BoolTest::ne));
+ }
+ 
+ Node* GraphKit::flat_array_test(Node* ary, bool flat) {
+   // We can't use immutable memory here because the mark word is mutable.
+   // PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the
+   // check is moved out of loops (mainly to enable loop unswitching).
+   Node* mem = UseArrayMarkWordCheck ? memory(Compile::AliasIdxRaw) : immutable_memory();
+   Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, mem, ary));
+   record_for_igvn(cmp); // Give it a chance to be optimized out by IGVN
+   return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne));
+ }
+ 
+ Node* GraphKit::null_free_array_test(Node* klass, bool null_free) {
+   return array_lh_test(klass, Klass::_lh_null_free_bit_inplace, 0, !null_free);
+ }
+ 
+ // Deoptimize if 'ary' is a null-free inline type array and 'val' is null
+ Node* GraphKit::inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) {
+   const Type* val_t = _gvn.type(val);
+   if (val->is_InlineType() || !TypePtr::NULL_PTR->higher_equal(val_t)) {
+     return ary; // Never null
+   }
+   RegionNode* region = new RegionNode(3);
+   Node* null_ctl = top();
+   null_check_oop(val, &null_ctl);
+   if (null_ctl != top()) {
+     PreserveJVMState pjvms(this);
+     set_control(null_ctl);
+     {
+       // Deoptimize if null-free array
+       BuildCutout unless(this, null_free_array_test(load_object_klass(ary), /* null_free = */ false), PROB_MAX);
+       inc_sp(nargs);
+       uncommon_trap(Deoptimization::Reason_null_check,
+                     Deoptimization::Action_none);
+     }
+     region->init_req(1, control());
+   }
+   region->init_req(2, control());
+   set_control(_gvn.transform(region));
+   record_for_igvn(region);
+   if (val_t == TypePtr::NULL_PTR) {
+     // Since we were just successfully storing null, the array can't be null free.
+     const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
+     ary_t = ary_t->cast_to_not_null_free();
+     Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
+     if (safe_for_replace) {
+       replace_in_map(ary, cast);
+     }
+     ary = cast;
+   }
+   return ary;
  }
  
  //------------------------------next_monitor-----------------------------------
  // What number should be given to the next monitor?
  int GraphKit::next_monitor() {

*** 3515,10 ***
--- 3868,11 ---
    // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
    assert(SynchronizationEntryBCI == InvocationEntryBci, "");
  
    if( !GenerateSynchronizationCode )
      return NULL;                // Not locking things?
+ 
    if (stopped())                // Dead monitor?
      return NULL;
  
    assert(dead_locals_are_killed(), "should kill locals before sync. point");
  

*** 3583,10 ***
--- 3937,11 ---
      return;
    if (stopped()) {               // Dead monitor?
      map()->pop_monitor();        // Kill monitor from debug info
      return;
    }
+   assert(!obj->is_InlineTypeBase(), "should not unlock on inline type");
  
    // Memory barrier to avoid floating things down past the locked region
    insert_mem_bar(Op_MemBarReleaseLock);
  
    const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();

*** 3623,12 ***
  // almost always feature constant types.
  Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
    const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
    if (!StressReflectiveCode && inst_klass != NULL) {
      ciKlass* klass = inst_klass->klass();
!     bool    xklass = inst_klass->klass_is_exact();
!     if (xklass || klass->is_array_klass()) {
        jint lhelper = klass->layout_helper();
        if (lhelper != Klass::_lh_neutral_value) {
          constant_value = lhelper;
          return (Node*) NULL;
        }
--- 3978,19 ---
  // almost always feature constant types.
  Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
    const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
    if (!StressReflectiveCode && inst_klass != NULL) {
      ciKlass* klass = inst_klass->klass();
!     assert(klass != NULL, "klass should not be NULL");
!     bool xklass = inst_klass->klass_is_exact();
+     bool can_be_flattened = false;
+     if (UseFlatArray && klass->is_obj_array_klass() && !klass->as_obj_array_klass()->is_elem_null_free()) {
+       // The runtime type of [LMyValue might be [QMyValue due to [QMyValue <: [LMyValue.
+       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
+       can_be_flattened = elem->can_be_inline_klass() && (!elem->is_inlinetype() || elem->flatten_array());
+     }
+     if (!can_be_flattened && (xklass || klass->is_array_klass())) {
        jint lhelper = klass->layout_helper();
        if (lhelper != Klass::_lh_neutral_value) {
          constant_value = lhelper;
          return (Node*) NULL;
        }

*** 3647,11 ***
    DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
    assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
  
    Node* prevmem = kit.memory(alias_idx);
    init_in_merge->set_memory_at(alias_idx, prevmem);
!   kit.set_memory(init_out_raw, alias_idx);
  }
  
  //---------------------------set_output_for_allocation-------------------------
  Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
                                            const TypeOopPtr* oop_type,
--- 4009,13 ---
    DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
    assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
  
    Node* prevmem = kit.memory(alias_idx);
    init_in_merge->set_memory_at(alias_idx, prevmem);
!   if (init_out_raw != NULL) {
+     kit.set_memory(init_out_raw, alias_idx);
+   }
  }
  
  //---------------------------set_output_for_allocation-------------------------
  Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
                                            const TypeOopPtr* oop_type,

*** 3686,21 ***
      // and link them properly (as a group) to the InitializeNode.
      assert(init->in(InitializeNode::Memory) == malloc, "");
      MergeMemNode* minit_in = MergeMemNode::make(malloc);
      init->set_req(InitializeNode::Memory, minit_in);
      record_for_igvn(minit_in); // fold it up later, if possible
      Node* minit_out = memory(rawidx);
      assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
      // Add an edge in the MergeMem for the header fields so an access
      // to one of those has correct memory state
      set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
      set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
      if (oop_type->isa_aryptr()) {
!       const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
!       int            elemidx  = C->get_alias_index(telemref);
!       hook_memory_on_init(*this, elemidx, minit_in, minit_out);
      } else if (oop_type->isa_instptr()) {
        ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
        for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
          ciField* field = ik->nonstatic_field_at(i);
          if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
            continue;  // do not bother to track really large numbers of fields
--- 4050,49 ---
      // and link them properly (as a group) to the InitializeNode.
      assert(init->in(InitializeNode::Memory) == malloc, "");
      MergeMemNode* minit_in = MergeMemNode::make(malloc);
      init->set_req(InitializeNode::Memory, minit_in);
      record_for_igvn(minit_in); // fold it up later, if possible
+     _gvn.set_type(minit_in, Type::MEMORY);
      Node* minit_out = memory(rawidx);
      assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
      // Add an edge in the MergeMem for the header fields so an access
      // to one of those has correct memory state
      set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
      set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
      if (oop_type->isa_aryptr()) {
!       const TypeAryPtr* arytype = oop_type->is_aryptr();
!       if (arytype->klass()->is_flat_array_klass()) {
!         // Initially all flattened array accesses share a single slice
+         // but that changes after parsing. Prepare the memory graph so
+         // it can optimize flattened array accesses properly once they
+         // don't share a single slice.
+         assert(C->flattened_accesses_share_alias(), "should be set at parse time");
+         C->set_flattened_accesses_share_alias(false);
+         ciFlatArrayKlass* vak = arytype->klass()->as_flat_array_klass();
+         ciInlineKlass* vk = vak->element_klass()->as_inline_klass();
+         for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
+           ciField* field = vk->nonstatic_field_at(i);
+           if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
+             continue;  // do not bother to track really large numbers of fields
+           int off_in_vt = field->offset() - vk->first_field_offset();
+           const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
+           int fieldidx = C->get_alias_index(adr_type, true);
+           // Pass NULL for init_out. Having per flat array element field memory edges as uses of the Initialize node
+           // can result in per flat array field Phis to be created which confuses the logic of
+           // Compile::adjust_flattened_array_access_aliases().
+           hook_memory_on_init(*this, fieldidx, minit_in, NULL);
+         }
+         C->set_flattened_accesses_share_alias(true);
+         hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out);
+       } else {
+         const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
+         int            elemidx  = C->get_alias_index(telemref);
+         hook_memory_on_init(*this, elemidx, minit_in, minit_out);
+       }
      } else if (oop_type->isa_instptr()) {
+       set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
        ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
        for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
          ciField* field = ik->nonstatic_field_at(i);
          if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
            continue;  // do not bother to track really large numbers of fields

*** 3747,18 ***
  //  - If 'return_size_val', report the the total object size to the caller.
  //  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
  Node* GraphKit::new_instance(Node* klass_node,
                               Node* extra_slow_test,
                               Node* *return_size_val,
!                              bool deoptimize_on_exception) {
    // Compute size in doublewords
    // The size is always an integral number of doublewords, represented
    // as a positive bytewise size stored in the klass's layout_helper.
    // The layout_helper also encodes (in a low bit) the need for a slow path.
    jint  layout_con = Klass::_lh_neutral_value;
    Node* layout_val = get_layout_helper(klass_node, layout_con);
!   int   layout_is_con = (layout_val == NULL);
  
    if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
    // Generate the initial go-slow test.  It's either ALWAYS (return a
    // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
    // case) a computed value derived from the layout_helper.
--- 4139,19 ---
  //  - If 'return_size_val', report the the total object size to the caller.
  //  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
  Node* GraphKit::new_instance(Node* klass_node,
                               Node* extra_slow_test,
                               Node* *return_size_val,
!                              bool deoptimize_on_exception,
+                              InlineTypeBaseNode* inline_type_node) {
    // Compute size in doublewords
    // The size is always an integral number of doublewords, represented
    // as a positive bytewise size stored in the klass's layout_helper.
    // The layout_helper also encodes (in a low bit) the need for a slow path.
    jint  layout_con = Klass::_lh_neutral_value;
    Node* layout_val = get_layout_helper(klass_node, layout_con);
!   bool  layout_is_con = (layout_val == NULL);
  
    if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
    // Generate the initial go-slow test.  It's either ALWAYS (return a
    // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
    // case) a computed value derived from the layout_helper.

*** 3805,34 ***
    const TypeOopPtr* oop_type = tklass->as_instance_type();
  
    // Now generate allocation code
  
    // The entire memory state is needed for slow path of the allocation
!   // since GC and deoptimization can happened.
    Node *mem = reset_memory();
    set_all_memory(mem); // Create new memory state
  
    AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
                                           control(), mem, i_o(),
                                           size, klass_node,
!                                          initial_slow_test);
  
    return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
  }
  
  //-------------------------------new_array-------------------------------------
! // helper for both newarray and anewarray
  // The 'length' parameter is (obviously) the length of the array.
  // See comments on new_instance for the meaning of the other arguments.
  Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
                            Node* length,         // number of array elements
                            int   nargs,          // number of arguments to push back for uncommon trap
                            Node* *return_size_val,
                            bool deoptimize_on_exception) {
    jint  layout_con = Klass::_lh_neutral_value;
    Node* layout_val = get_layout_helper(klass_node, layout_con);
!   int   layout_is_con = (layout_val == NULL);
  
    if (!layout_is_con && !StressReflectiveCode &&
        !too_many_traps(Deoptimization::Reason_class_check)) {
      // This is a reflective array creation site.
      // Optimistically assume that it is a subtype of Object[],
--- 4198,34 ---
    const TypeOopPtr* oop_type = tklass->as_instance_type();
  
    // Now generate allocation code
  
    // The entire memory state is needed for slow path of the allocation
!   // since GC and deoptimization can happen.
    Node *mem = reset_memory();
    set_all_memory(mem); // Create new memory state
  
    AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
                                           control(), mem, i_o(),
                                           size, klass_node,
!                                          initial_slow_test, inline_type_node);
  
    return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
  }
  
  //-------------------------------new_array-------------------------------------
! // helper for newarray and anewarray
  // The 'length' parameter is (obviously) the length of the array.
  // See comments on new_instance for the meaning of the other arguments.
  Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
                            Node* length,         // number of array elements
                            int   nargs,          // number of arguments to push back for uncommon trap
                            Node* *return_size_val,
                            bool deoptimize_on_exception) {
    jint  layout_con = Klass::_lh_neutral_value;
    Node* layout_val = get_layout_helper(klass_node, layout_con);
!   bool  layout_is_con = (layout_val == NULL);
  
    if (!layout_is_con && !StressReflectiveCode &&
        !too_many_traps(Deoptimization::Reason_class_check)) {
      // This is a reflective array creation site.
      // Optimistically assume that it is a subtype of Object[],

*** 3858,11 ***
    int fast_size_limit = FastAllocateSizeLimit;
    if (layout_is_con) {
      assert(!StressReflectiveCode, "stress mode does not use these paths");
      // Increase the size limit if we have exact knowledge of array type.
      int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
!     fast_size_limit <<= (LogBytesPerLong - log2_esize);
    }
  
    Node* initial_slow_cmp  = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
    Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
  
--- 4251,11 ---
    int fast_size_limit = FastAllocateSizeLimit;
    if (layout_is_con) {
      assert(!StressReflectiveCode, "stress mode does not use these paths");
      // Increase the size limit if we have exact knowledge of array type.
      int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
!     fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
    }
  
    Node* initial_slow_cmp  = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
    Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
  

*** 3876,14 ***
    int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
    // (T_BYTE has the weakest alignment and size restrictions...)
    if (layout_is_con) {
      int       hsize  = Klass::layout_helper_header_size(layout_con);
      int       eshift = Klass::layout_helper_log2_element_size(layout_con);
!     BasicType etype  = Klass::layout_helper_element_type(layout_con);
      if ((round_mask & ~right_n_bits(eshift)) == 0)
        round_mask = 0;  // strength-reduce it if it goes away completely
!     assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
      assert(header_size_min <= hsize, "generic minimum is smallest");
      header_size_min = hsize;
      header_size = intcon(hsize + round_mask);
    } else {
      Node* hss   = intcon(Klass::_lh_header_size_shift);
--- 4269,14 ---
    int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
    // (T_BYTE has the weakest alignment and size restrictions...)
    if (layout_is_con) {
      int       hsize  = Klass::layout_helper_header_size(layout_con);
      int       eshift = Klass::layout_helper_log2_element_size(layout_con);
!     bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con);
      if ((round_mask & ~right_n_bits(eshift)) == 0)
        round_mask = 0;  // strength-reduce it if it goes away completely
!     assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
      assert(header_size_min <= hsize, "generic minimum is smallest");
      header_size_min = hsize;
      header_size = intcon(hsize + round_mask);
    } else {
      Node* hss   = intcon(Klass::_lh_header_size_shift);

*** 3963,33 ***
    }
  
    // Now generate allocation code
  
    // The entire memory state is needed for slow path of the allocation
!   // since GC and deoptimization can happened.
    Node *mem = reset_memory();
    set_all_memory(mem); // Create new memory state
  
    if (initial_slow_test->is_Bool()) {
      // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
      initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
    }
  
    // Create the AllocateArrayNode and its result projections
!   AllocateArrayNode* alloc
!     = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
!                             control(), mem, i_o(),
!                             size, klass_node,
!                             initial_slow_test,
!                             length);
  
    // Cast to correct type.  Note that the klass_node may be constant or not,
    // and in the latter case the actual array type will be inexact also.
    // (This happens via a non-constant argument to inline_native_newArray.)
    // In any case, the value of klass_node provides the desired array type.
    const TypeInt* length_type = _gvn.find_int_type(length);
-   const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
    if (ary_type->isa_aryptr() && length_type != NULL) {
      // Try to get a better type than POS for the size
      ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
    }
  
--- 4356,94 ---
    }
  
    // Now generate allocation code
  
    // The entire memory state is needed for slow path of the allocation
!   // since GC and deoptimization can happen.
    Node *mem = reset_memory();
    set_all_memory(mem); // Create new memory state
  
    if (initial_slow_test->is_Bool()) {
      // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
      initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
    }
  
+   const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr();
+   const TypeOopPtr* ary_type = ary_klass->as_instance_type();
+   const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
+ 
+   // Inline type array variants:
+   // - null-ok:              MyValue.ref[] (ciObjArrayKlass "[LMyValue")
+   // - null-free:            MyValue.val[] (ciObjArrayKlass "[QMyValue")
+   // - null-free, flattened: MyValue.val[] (ciFlatArrayKlass "[QMyValue")
+   // Check if array is a null-free, non-flattened inline type array
+   // that needs to be initialized with the default inline type.
+   Node* default_value = NULL;
+   Node* raw_default_value = NULL;
+   if (ary_ptr != NULL && ary_ptr->klass_is_exact()) {
+     // Array type is known
+     if (ary_ptr->klass()->as_array_klass()->is_elem_null_free()) {
+       ciInlineKlass* vk = ary_ptr->klass()->as_array_klass()->element_klass()->as_inline_klass();
+       if (!vk->flatten_array()) {
+         default_value = InlineTypeNode::default_oop(gvn(), vk);
+       }
+     }
+   } else if (ary_klass->klass()->can_be_inline_array_klass()) {
+     // Array type is not known, add runtime checks
+     assert(!ary_klass->klass_is_exact(), "unexpected exact type");
+     Node* r = new RegionNode(3);
+     default_value = new PhiNode(r, TypeInstPtr::BOTTOM);
+ 
+     Node* bol = array_lh_test(klass_node, Klass::_lh_array_tag_vt_value_bit_inplace | Klass::_lh_null_free_bit_inplace, Klass::_lh_null_free_bit_inplace);
+     IfNode* iff = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
+ 
+     // Null-free, non-flattened inline type array, initialize with the default value
+     set_control(_gvn.transform(new IfTrueNode(iff)));
+     Node* p = basic_plus_adr(klass_node, in_bytes(ArrayKlass::element_klass_offset()));
+     Node* eklass = _gvn.transform(LoadKlassNode::make(_gvn, control(), immutable_memory(), p, TypeInstPtr::KLASS));
+     Node* adr_fixed_block_addr = basic_plus_adr(eklass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()));
+     Node* adr_fixed_block = make_load(control(), adr_fixed_block_addr, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
+     Node* default_value_offset_addr = basic_plus_adr(adr_fixed_block, in_bytes(InlineKlass::default_value_offset_offset()));
+     Node* default_value_offset = make_load(control(), default_value_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
+     Node* elem_mirror = load_mirror_from_klass(eklass);
+     Node* default_value_addr = basic_plus_adr(elem_mirror, ConvI2X(default_value_offset));
+     Node* val = access_load_at(elem_mirror, default_value_addr, _gvn.type(default_value_addr)->is_ptr(), TypeInstPtr::BOTTOM, T_OBJECT, IN_HEAP);
+     r->init_req(1, control());
+     default_value->init_req(1, val);
+ 
+     // Otherwise initialize with all zero
+     r->init_req(2, _gvn.transform(new IfFalseNode(iff)));
+     default_value->init_req(2, null());
+ 
+     set_control(_gvn.transform(r));
+     default_value = _gvn.transform(default_value);
+   }
+   if (default_value != NULL) {
+     if (UseCompressedOops) {
+       // With compressed oops, the 64-bit init value is built from two 32-bit compressed oops
+       default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop()));
+       Node* lower = _gvn.transform(new CastP2XNode(control(), default_value));
+       Node* upper = _gvn.transform(new LShiftLNode(lower, intcon(32)));
+       raw_default_value = _gvn.transform(new OrLNode(lower, upper));
+     } else {
+       raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
+     }
+   }
+ 
    // Create the AllocateArrayNode and its result projections
!   AllocateArrayNode* alloc = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
!                                                    control(), mem, i_o(),
!                                                    size, klass_node,
!                                                    initial_slow_test,
!                                                    length, default_value,
!                                                    raw_default_value);
  
    // Cast to correct type.  Note that the klass_node may be constant or not,
    // and in the latter case the actual array type will be inexact also.
    // (This happens via a non-constant argument to inline_native_newArray.)
    // In any case, the value of klass_node provides the desired array type.
    const TypeInt* length_type = _gvn.find_int_type(length);
    if (ary_type->isa_aryptr() && length_type != NULL) {
      // Try to get a better type than POS for the size
      ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
    }
  

*** 4134,15 ***
  }
  
  Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
    int value_offset = java_lang_String::value_offset();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
!                                                      false, NULL, 0);
    const TypePtr* value_field_type = string_type->add_offset(value_offset);
    const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
!                                                   TypeAry::make(TypeInt::BYTE, TypeInt::POS),
!                                                   ciTypeArrayKlass::make(T_BYTE), true, 0);
    Node* p = basic_plus_adr(str, str, value_offset);
    Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
                                IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
    return load;
  }
--- 4588,15 ---
  }
  
  Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
    int value_offset = java_lang_String::value_offset();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
!                                                      false, NULL, Type::Offset(0));
    const TypePtr* value_field_type = string_type->add_offset(value_offset);
    const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
!                                                   TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, true, true),
!                                                   ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
    Node* p = basic_plus_adr(str, str, value_offset);
    Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
                                IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
    return load;
  }

*** 4151,11 ***
    if (!CompactStrings) {
      return intcon(java_lang_String::CODER_UTF16);
    }
    int coder_offset = java_lang_String::coder_offset();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
!                                                      false, NULL, 0);
    const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
  
    Node* p = basic_plus_adr(str, str, coder_offset);
    Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
                                IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
--- 4605,11 ---
    if (!CompactStrings) {
      return intcon(java_lang_String::CODER_UTF16);
    }
    int coder_offset = java_lang_String::coder_offset();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
!                                                      false, NULL, Type::Offset(0));
    const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
  
    Node* p = basic_plus_adr(str, str, coder_offset);
    Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
                                IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);

*** 4163,21 ***
  }
  
  void GraphKit::store_String_value(Node* str, Node* value) {
    int value_offset = java_lang_String::value_offset();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
!                                                      false, NULL, 0);
    const TypePtr* value_field_type = string_type->add_offset(value_offset);
  
    access_store_at(str,  basic_plus_adr(str, value_offset), value_field_type,
                    value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
  }
  
  void GraphKit::store_String_coder(Node* str, Node* value) {
    int coder_offset = java_lang_String::coder_offset();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
!                                                      false, NULL, 0);
    const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
  
    access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
                    value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
  }
--- 4617,21 ---
  }
  
  void GraphKit::store_String_value(Node* str, Node* value) {
    int value_offset = java_lang_String::value_offset();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
!                                                      false, NULL, Type::Offset(0));
    const TypePtr* value_field_type = string_type->add_offset(value_offset);
  
    access_store_at(str,  basic_plus_adr(str, value_offset), value_field_type,
                    value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
  }
  
  void GraphKit::store_String_coder(Node* str, Node* value) {
    int coder_offset = java_lang_String::coder_offset();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
!                                                      false, NULL, Type::Offset(0));
    const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
  
    access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
                    value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
  }

*** 4286,9 ***
      }
    }
    const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
                                                          /*is_unsigned_load=*/false);
    if (con_type != NULL) {
!     return makecon(con_type);
    }
    return NULL;
  }
--- 4740,24 ---
      }
    }
    const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
                                                          /*is_unsigned_load=*/false);
    if (con_type != NULL) {
!     Node* con = makecon(con_type);
+     if (field->type()->is_inlinetype()) {
+       con = InlineTypeNode::make_from_oop(this, con, field->type()->as_inline_klass(), field->is_null_free());
+     } else if (con_type->is_inlinetypeptr()) {
+       con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass(), field->is_null_free());
+     }
+     return con;
    }
    return NULL;
  }
+ 
+ //---------------------------load_mirror_from_klass----------------------------
+ // Given a klass oop, load its java mirror (a java.lang.Class oop).
+ Node* GraphKit::load_mirror_from_klass(Node* klass) {
+   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
+   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
+   // mirror = ((OopHandle)mirror)->resolve();
+   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
+ }
< prev index next >