< prev index next >

src/hotspot/share/opto/parse1.cpp

Print this page
*** 29,10 ***
--- 29,11 ---
  #include "oops/method.hpp"
  #include "opto/addnode.hpp"
  #include "opto/c2compiler.hpp"
  #include "opto/castnode.hpp"
  #include "opto/idealGraphPrinter.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/locknode.hpp"
  #include "opto/memnode.hpp"
  #include "opto/opaquenode.hpp"
  #include "opto/parse.hpp"
  #include "opto/rootnode.hpp"

*** 100,14 ***
  
  //------------------------------ON STACK REPLACEMENT---------------------------
  
  // Construct a node which can be used to get incoming state for
  // on stack replacement.
! Node *Parse::fetch_interpreter_state(int index,
!                                      BasicType bt,
!                                      Node *local_addrs,
!                                      Node *local_addrs_base) {
    Node *mem = memory(Compile::AliasIdxRaw);
    Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
    Node *ctl = control();
  
    // Very similar to LoadNode::make, except we handle un-aligned longs and
--- 101,20 ---
  
  //------------------------------ON STACK REPLACEMENT---------------------------
  
  // Construct a node which can be used to get incoming state for
  // on stack replacement.
! Node* Parse::fetch_interpreter_state(int index,
!                                      const Type* type,
!                                      Node* local_addrs,
!                                      Node* local_addrs_base) {
+   BasicType bt = type->basic_type();
+   if (type == TypePtr::NULL_PTR) {
+     // Ptr types are mixed together with T_ADDRESS but NULL is
+     // really for T_OBJECT types so correct it.
+     bt = T_OBJECT;
+   }
    Node *mem = memory(Compile::AliasIdxRaw);
    Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
    Node *ctl = control();
  
    // Very similar to LoadNode::make, except we handle un-aligned longs and

*** 115,10 ***
--- 122,11 ---
    Node *l = NULL;
    switch (bt) {                // Signature is flattened
    case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
    case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
    case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;
+   case T_INLINE_TYPE:
    case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
    case T_LONG:
    case T_DOUBLE: {
      // Since arguments are in reverse order, the argument address 'adr'
      // refers to the back half of the long/double.  Recompute adr.

*** 145,12 ***
  // The type is the type predicted by ciTypeFlow.  Note that it is
  // not a general type, but can only come from Type::get_typeflow_type.
  // The safepoint is a map which will feed an uncommon trap.
  Node* Parse::check_interpreter_type(Node* l, const Type* type,
                                      SafePointNode* &bad_type_exit) {
- 
    const TypeOopPtr* tp = type->isa_oopptr();
  
    // TypeFlow may assert null-ness if a type appears unloaded.
    if (type == TypePtr::NULL_PTR ||
        (tp != NULL && !tp->klass()->is_loaded())) {
      // Value must be null, not a real oop.
--- 153,16 ---
  // The type is the type predicted by ciTypeFlow.  Note that it is
  // not a general type, but can only come from Type::get_typeflow_type.
  // The safepoint is a map which will feed an uncommon trap.
  Node* Parse::check_interpreter_type(Node* l, const Type* type,
                                      SafePointNode* &bad_type_exit) {
    const TypeOopPtr* tp = type->isa_oopptr();
+   if (type->isa_inlinetype() != NULL) {
+     // The interpreter passes inline types as oops
+     tp = TypeOopPtr::make_from_klass(type->inline_klass());
+     tp = tp->join_speculative(TypePtr::NOTNULL)->is_oopptr();
+   }
  
    // TypeFlow may assert null-ness if a type appears unloaded.
    if (type == TypePtr::NULL_PTR ||
        (tp != NULL && !tp->klass()->is_loaded())) {
      // Value must be null, not a real oop.

*** 169,16 ***
    // toward more specific classes.  Make sure these specific classes
    // are still in effect.
    if (tp != NULL && tp->klass() != C->env()->Object_klass()) {
      // TypeFlow asserted a specific object type.  Value must have that type.
      Node* bad_type_ctrl = NULL;
      l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
      bad_type_exit->control()->add_req(bad_type_ctrl);
    }
- 
-   BasicType bt_l = _gvn.type(l)->basic_type();
-   BasicType bt_t = type->basic_type();
    assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
    return l;
  }
  
  // Helper routine which sets up elements of the initial parser map when
--- 181,19 ---
    // toward more specific classes.  Make sure these specific classes
    // are still in effect.
    if (tp != NULL && tp->klass() != C->env()->Object_klass()) {
      // TypeFlow asserted a specific object type.  Value must have that type.
      Node* bad_type_ctrl = NULL;
+     if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
+       // Check inline types for null here to prevent checkcast from adding an
+       // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
+       l = null_check_oop(l, &bad_type_ctrl);
+       bad_type_exit->control()->add_req(bad_type_ctrl);
+     }
      l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
      bad_type_exit->control()->add_req(bad_type_ctrl);
    }
    assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
    return l;
  }
  
  // Helper routine which sets up elements of the initial parser map when

*** 187,11 ***
  void Parse::load_interpreter_state(Node* osr_buf) {
    int index;
    int max_locals = jvms()->loc_size();
    int max_stack  = jvms()->stk_size();
  
- 
    // Mismatch between method and jvms can occur since map briefly held
    // an OSR entry state (which takes up one RawPtr word).
    assert(max_locals == method()->max_locals(), "sanity");
    assert(max_stack  >= method()->max_stack(),  "sanity");
    assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
--- 202,10 ---

*** 225,18 ***
    Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
    for (index = 0; index < mcnt; index++) {
      // Make a BoxLockNode for the monitor.
      Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
  
- 
      // Displaced headers and locked objects are interleaved in the
      // temp OSR buffer.  We only copy the locked objects out here.
      // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
!     Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
      // Try and copy the displaced header to the BoxNode
!     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
- 
  
      store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
  
      // Build a bogus FastLockNode (no code will be generated) and push the
      // monitor into our debug info.
--- 239,16 ---
    Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
    for (index = 0; index < mcnt; index++) {
      // Make a BoxLockNode for the monitor.
      Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
  
      // Displaced headers and locked objects are interleaved in the
      // temp OSR buffer.  We only copy the locked objects out here.
      // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
!     Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
      // Try and copy the displaced header to the BoxNode
!     Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
  
      store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
  
      // Build a bogus FastLockNode (no code will be generated) and push the
      // monitor into our debug info.

*** 299,17 ***
      // makes it go dead.
      if (type == Type::BOTTOM) {
        continue;
      }
      // Construct code to access the appropriate local.
!     BasicType bt = type->basic_type();
-     if (type == TypePtr::NULL_PTR) {
-       // Ptr types are mixed together with T_ADDRESS but NULL is
-       // really for T_OBJECT types so correct it.
-       bt = T_OBJECT;
-     }
-     Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
      set_local(index, value);
    }
  
    // Extract the needed stack entries from the interpreter frame.
    for (index = 0; index < sp(); index++) {
--- 311,11 ---
      // makes it go dead.
      if (type == Type::BOTTOM) {
        continue;
      }
      // Construct code to access the appropriate local.
!     Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);
      set_local(index, value);
    }
  
    // Extract the needed stack entries from the interpreter frame.
    for (index = 0; index < sp(); index++) {

*** 593,10 ***
--- 599,31 ---
      if (log)  log->done("parse");
      C->set_default_node_notes(caller_nn);
      return;
    }
  
+   // Handle inline type arguments
+   int arg_size_sig = tf()->domain_sig()->cnt();
+   for (uint i = 0; i < (uint)arg_size_sig; i++) {
+     Node* parm = map()->in(i);
+     const Type* t = _gvn.type(parm);
+     if (t->is_inlinetypeptr()) {
+       // Create InlineTypeNode from the oop and replace the parameter
+       Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass(), !t->maybe_null());
+       map()->replace_edge(parm, vt);
+     } else if (UseTypeSpeculation && (i == (uint)(arg_size_sig - 1)) && !is_osr_parse() &&
+                method()->has_vararg() && t->isa_aryptr() != NULL && !t->is_aryptr()->is_not_null_free()) {
+       // Speculate on varargs Object array being not null-free (and therefore also not flattened)
+       const TypePtr* spec_type = t->speculative();
+       spec_type = (spec_type != NULL && spec_type->isa_aryptr() != NULL) ? spec_type : t->is_aryptr();
+       spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free();
+       spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
+       Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
+       replace_in_map(parm, cast);
+     }
+   }
+ 
    entry_map = map();  // capture any changes performed by method setup code
    assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
  
    // We begin parsing as if we have just encountered a jump to the
    // method entry.

*** 775,12 ***
    gvn().set_type_bottom(memphi);
    _exits.set_i_o(iophi);
    _exits.set_all_memory(memphi);
  
    // Add a return value to the exit state.  (Do not push it yet.)
!   if (tf()->range()->cnt() > TypeFunc::Parms) {
!     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
      if (ret_type->isa_int()) {
        BasicType ret_bt = method()->return_type()->basic_type();
        if (ret_bt == T_BOOLEAN ||
            ret_bt == T_CHAR ||
            ret_bt == T_BYTE ||
--- 802,12 ---
    gvn().set_type_bottom(memphi);
    _exits.set_i_o(iophi);
    _exits.set_all_memory(memphi);
  
    // Add a return value to the exit state.  (Do not push it yet.)
!   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
!     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
      if (ret_type->isa_int()) {
        BasicType ret_bt = method()->return_type()->basic_type();
        if (ret_bt == T_BOOLEAN ||
            ret_bt == T_CHAR ||
            ret_bt == T_BYTE ||

*** 794,30 ***
      // types will not join when we transform and push in do_exits().
      const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
      if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
        ret_type = TypeOopPtr::BOTTOM;
      }
      int         ret_size = type2size[ret_type->basic_type()];
      Node*       ret_phi  = new PhiNode(region, ret_type);
      gvn().set_type_bottom(ret_phi);
      _exits.ensure_stack(ret_size);
!     assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
      assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
      _exits.set_argument(0, ret_phi);  // here is where the parser finds it
      // Note:  ret_phi is not yet pushed, until do_exits.
    }
  }
  
- 
  //----------------------------build_start_state-------------------------------
  // Construct a state which contains only the incoming arguments from an
  // unknown caller.  The method & bci will be NULL & InvocationEntryBci.
  JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
!   int        arg_size = tf->domain()->cnt();
!   int        max_size = MAX2(arg_size, (int)tf->range()->cnt());
    JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
    SafePointNode* map  = new SafePointNode(max_size, jvms);
    record_for_igvn(map);
    assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
    Node_Notes* old_nn = default_node_notes();
    if (old_nn != NULL && has_method()) {
      Node_Notes* entry_nn = old_nn->clone(this);
--- 821,35 ---
      // types will not join when we transform and push in do_exits().
      const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
      if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
        ret_type = TypeOopPtr::BOTTOM;
      }
+     // Scalarize inline type when returning as fields or inlining non-incrementally
+     if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
+         ret_type->is_inlinetypeptr() && !ret_type->maybe_null()) {
+       ret_type = TypeInlineType::make(ret_type->inline_klass());
+     }
      int         ret_size = type2size[ret_type->basic_type()];
      Node*       ret_phi  = new PhiNode(region, ret_type);
      gvn().set_type_bottom(ret_phi);
      _exits.ensure_stack(ret_size);
!     assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
      assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
      _exits.set_argument(0, ret_phi);  // here is where the parser finds it
      // Note:  ret_phi is not yet pushed, until do_exits.
    }
  }
  
  //----------------------------build_start_state-------------------------------
  // Construct a state which contains only the incoming arguments from an
  // unknown caller.  The method & bci will be NULL & InvocationEntryBci.
  JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
!   int        arg_size = tf->domain_sig()->cnt();
!   int        max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
    JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
    SafePointNode* map  = new SafePointNode(max_size, jvms);
+   jvms->set_map(map);
    record_for_igvn(map);
    assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
    Node_Notes* old_nn = default_node_notes();
    if (old_nn != NULL && has_method()) {
      Node_Notes* entry_nn = old_nn->clone(this);

*** 825,23 ***
      entry_jvms->set_offsets(0);
      entry_jvms->set_bci(entry_bci());
      entry_nn->set_jvms(entry_jvms);
      set_default_node_notes(entry_nn);
    }
!   uint i;
!   for (i = 0; i < (uint)arg_size; i++) {
!     Node* parm = initial_gvn()->transform(new ParmNode(start, i));
      map->init_req(i, parm);
      // Record all these guys for later GVN.
      record_for_igvn(parm);
    }
    for (; i < map->req(); i++) {
      map->init_req(i, top());
    }
    assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
    set_default_node_notes(old_nn);
-   jvms->set_map(map);
    return jvms;
  }
  
  //-----------------------------make_node_notes---------------------------------
  Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
--- 857,38 ---
      entry_jvms->set_offsets(0);
      entry_jvms->set_bci(entry_bci());
      entry_nn->set_jvms(entry_jvms);
      set_default_node_notes(entry_nn);
    }
!   PhaseGVN& gvn = *initial_gvn();
!   uint i = 0;
!   for (uint j = 0; i < (uint)arg_size; i++) {
+     const Type* t = tf->domain_sig()->field_at(i);
+     Node* parm = NULL;
+     if (has_scalarized_args() && t->is_inlinetypeptr() && !t->maybe_null() && t->inline_klass()->can_be_passed_as_fields()) {
+       // Inline type arguments are not passed by reference: we get an argument per
+       // field of the inline type. Build InlineTypeNodes from the inline type arguments.
+       GraphKit kit(jvms, &gvn);
+       kit.set_control(map->control());
+       Node* old_mem = map->memory();
+       // Use immutable memory for inline type loads and restore it below
+       kit.set_all_memory(C->immutable_memory());
+       parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, true);
+       map->set_control(kit.control());
+       map->set_memory(old_mem);
+     } else {
+       parm = gvn.transform(new ParmNode(start, j++));
+     }
      map->init_req(i, parm);
      // Record all these guys for later GVN.
      record_for_igvn(parm);
    }
    for (; i < map->req(); i++) {
      map->init_req(i, top());
    }
    assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
    set_default_node_notes(old_nn);
    return jvms;
  }
  
  //-----------------------------make_node_notes---------------------------------
  Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {

*** 864,16 ***
                               kit.i_o(),
                               kit.reset_memory(),
                               kit.frameptr(),
                               kit.returnadr());
    // Add zero or 1 return values
!   int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
    if (ret_size > 0) {
      kit.inc_sp(-ret_size);  // pop the return value(s)
      kit.sync_jvms();
!     ret->add_req(kit.argument(0));
!     // Note:  The second dummy edge is not needed by a ReturnNode.
    }
    // bind it to root
    root()->add_req(ret);
    record_for_igvn(ret);
    initial_gvn()->transform_no_reclaim(ret);
--- 911,35 ---
                               kit.i_o(),
                               kit.reset_memory(),
                               kit.frameptr(),
                               kit.returnadr());
    // Add zero or 1 return values
!   int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
    if (ret_size > 0) {
      kit.inc_sp(-ret_size);  // pop the return value(s)
      kit.sync_jvms();
!     Node* res = kit.argument(0);
!     if (tf()->returns_inline_type_as_fields()) {
+       // Multiple return values (inline type fields): add as many edges
+       // to the Return node as returned values.
+       InlineTypeBaseNode* vt = res->as_InlineTypeBase();
+       ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms);
+       if (vt->is_allocated(&kit.gvn()) && !StressInlineTypeReturnedAsFields) {
+         ret->init_req(TypeFunc::Parms, vt->get_oop());
+       } else {
+         ret->init_req(TypeFunc::Parms, vt->tagged_klass(kit.gvn()));
+       }
+       uint idx = TypeFunc::Parms + 1;
+       vt->pass_fields(&kit, ret, idx);
+     } else {
+       if (res->is_InlineType()) {
+         assert(res->as_InlineType()->is_allocated(&kit.gvn()), "must be allocated");
+         res = res->as_InlineType()->get_oop();
+       }
+       ret->add_req(res);
+       // Note:  The second dummy edge is not needed by a ReturnNode.
+     }
    }
    // bind it to root
    root()->add_req(ret);
    record_for_igvn(ret);
    initial_gvn()->transform_no_reclaim(ret);

*** 993,11 ***
    // "All bets are off" unless the first publication occurs after a
    // normal return from the constructor.  We do not attempt to detect
    // such unusual early publications.  But no barrier is needed on
    // exceptional returns, since they cannot publish normally.
    //
!   if (method()->is_initializer() &&
         (wrote_final() ||
           (AlwaysSafeConstructors && wrote_fields()) ||
           (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
      _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
  
--- 1059,11 ---
    // "All bets are off" unless the first publication occurs after a
    // normal return from the constructor.  We do not attempt to detect
    // such unusual early publications.  But no barrier is needed on
    // exceptional returns, since they cannot publish normally.
    //
!   if (method()->is_object_constructor_or_class_initializer() &&
         (wrote_final() ||
           (AlwaysSafeConstructors && wrote_fields()) ||
           (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
      _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
  

*** 1031,12 ***
      mms.set_memory(_gvn.transform(mms.memory()));
    }
    // Clean up input MergeMems created by transforming the slices
    _gvn.transform(_exits.merged_memory());
  
!   if (tf()->range()->cnt() > TypeFunc::Parms) {
!     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
      Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
      if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
        // If the type we set for the ret_phi in build_exits() is too optimistic and
        // the ret_phi is top now, there's an extremely small chance that it may be due to class
        // loading.  It could also be due to an error, so mark this method as not compilable because
--- 1097,12 ---
      mms.set_memory(_gvn.transform(mms.memory()));
    }
    // Clean up input MergeMems created by transforming the slices
    _gvn.transform(_exits.merged_memory());
  
!   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
!     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
      Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
      if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
        // If the type we set for the ret_phi in build_exits() is too optimistic and
        // the ret_phi is top now, there's an extremely small chance that it may be due to class
        // loading.  It could also be due to an error, so mark this method as not compilable because

*** 1125,11 ***
    _caller->map()->delete_replaced_nodes();
  
    // If this is an inlined method, we may have to do a receiver null check.
    if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
      GraphKit kit(_caller);
!     kit.null_check_receiver_before_call(method());
      _caller = kit.transfer_exceptions_into_jvms();
      if (kit.stopped()) {
        _exits.add_exception_states_from(_caller);
        _exits.set_jvms(_caller);
        return NULL;
--- 1191,11 ---
    _caller->map()->delete_replaced_nodes();
  
    // If this is an inlined method, we may have to do a receiver null check.
    if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
      GraphKit kit(_caller);
!     kit.null_check_receiver_before_call(method(), false);
      _caller = kit.transfer_exceptions_into_jvms();
      if (kit.stopped()) {
        _exits.add_exception_states_from(_caller);
        _exits.set_jvms(_caller);
        return NULL;

*** 1163,11 ***
      set_all_memory(reset_memory());
    }
    assert(merged_memory(), "");
  
    // Now add the locals which are initially bound to arguments:
!   uint arg_size = tf()->domain()->cnt();
    ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
    for (i = TypeFunc::Parms; i < arg_size; i++) {
      map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
    }
  
--- 1229,11 ---
      set_all_memory(reset_memory());
    }
    assert(merged_memory(), "");
  
    // Now add the locals which are initially bound to arguments:
!   uint arg_size = tf()->domain_sig()->cnt();
    ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
    for (i = TypeFunc::Parms; i < arg_size; i++) {
      map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
    }
  

*** 1237,10 ***
--- 1303,11 ---
        ciInstance* mirror = _method->holder()->java_mirror();
        const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
        lock_obj = makecon(t_lock);
      } else {                  // Else pass the "this" pointer,
        lock_obj = local(0);    // which is Parm0 from StartNode
+       assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
      }
      // Clear out dead values from the debug info.
      kill_dead_locals();
      // Build the FastLockNode
      _synch_lock = shared_lock(lock_obj);

*** 1648,10 ***
--- 1715,48 ---
  
    // Zap extra stack slots to top
    assert(sp() == target->start_sp(), "");
    clean_stack(sp());
  
+   // Check for merge conflicts involving inline types
+   JVMState* old_jvms = map()->jvms();
+   int old_bci = bci();
+   JVMState* tmp_jvms = old_jvms->clone_shallow(C);
+   tmp_jvms->set_should_reexecute(true);
+   tmp_jvms->bind_map(map());
+   // Execution needs to restart a the next bytecode (entry of next
+   // block)
+   if (target->is_merged() ||
+       pnum > PhiNode::Input ||
+       target->is_handler() ||
+       target->is_loop_head()) {
+     set_parse_bci(target->start());
+     for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
+       Node* n = map()->in(j);                 // Incoming change to target state.
+       const Type* t = NULL;
+       if (tmp_jvms->is_loc(j)) {
+         t = target->local_type_at(j - tmp_jvms->locoff());
+       } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
+         t = target->stack_type_at(j - tmp_jvms->stkoff());
+       }
+       if (t != NULL && t != Type::BOTTOM) {
+         if (n->is_InlineType() && !t->isa_inlinetype()) {
+           // TODO Currently, the implementation relies on the assumption that InlineTypePtrNodes
+           // are always buffered. We therefore need to allocate here.
+           // Allocate inline type in src block to be able to merge it with oop in target block
+           map()->set_req(j, n->as_InlineType()->buffer(this));
+         } else if (!n->is_InlineTypeBase() && t->is_inlinetypeptr()) {
+           // Scalarize null in src block to be able to merge it with inline type in target block
+           assert(gvn().type(n)->is_zero_type(), "Should have been scalarized");
+           map()->set_req(j, InlineTypePtrNode::make_null(gvn(), t->inline_klass()));
+         }
+       }
+     }
+   }
+   old_jvms->bind_map(map());
+   set_parse_bci(old_bci);
+ 
    if (!target->is_merged()) {   // No prior mapping at this bci
      if (TraceOptoParse) { tty->print(" with empty state");  }
  
      // If this path is dead, do not bother capturing it as a merge.
      // It is "as if" we had 1 fewer predecessors from the beginning.

*** 1701,10 ***
--- 1806,11 ---
  #ifdef ASSERT
      if (target->is_SEL_head()) {
        target->mark_merged_backedge(block());
      }
  #endif
+ 
      // We must not manufacture more phis if the target is already parsed.
      bool nophi = target->is_parsed();
  
      SafePointNode* newin = map();// Hang on to incoming mapping
      Block* save_block = block(); // Hang on to incoming block;

*** 1736,18 ***
      }
  
      // Update all the non-control inputs to map:
      assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
      bool check_elide_phi = target->is_SEL_backedge(save_block);
      for (uint j = 1; j < newin->req(); j++) {
        Node* m = map()->in(j);   // Current state of target.
        Node* n = newin->in(j);   // Incoming change to target state.
        PhiNode* phi;
!       if (m->is_Phi() && m->as_Phi()->region() == r)
          phi = m->as_Phi();
!       else
          phi = NULL;
        if (m != n) {             // Different; must merge
          switch (j) {
          // Frame pointer and Return Address never changes
          case TypeFunc::FramePtr:// Drop m, use the original value
          case TypeFunc::ReturnAdr:
--- 1842,22 ---
      }
  
      // Update all the non-control inputs to map:
      assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
      bool check_elide_phi = target->is_SEL_backedge(save_block);
+     bool last_merge = (pnum == PhiNode::Input);
      for (uint j = 1; j < newin->req(); j++) {
        Node* m = map()->in(j);   // Current state of target.
        Node* n = newin->in(j);   // Incoming change to target state.
        PhiNode* phi;
!       if (m->is_Phi() && m->as_Phi()->region() == r) {
          phi = m->as_Phi();
!       } else if (m->is_InlineTypeBase() && m->as_InlineTypeBase()->has_phi_inputs(r)) {
+         phi = m->as_InlineTypeBase()->get_oop()->as_Phi();
+       } else {
          phi = NULL;
+       }
        if (m != n) {             // Different; must merge
          switch (j) {
          // Frame pointer and Return Address never changes
          case TypeFunc::FramePtr:// Drop m, use the original value
          case TypeFunc::ReturnAdr:

*** 1777,15 ***
        // At this point, n might be top if:
        //  - there is no phi (because TypeFlow detected a conflict), or
        //  - the corresponding control edges is top (a dead incoming path)
        // It is a bug if we create a phi which sees a garbage value on a live path.
  
!       if (phi != NULL) {
          assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
          assert(phi->region() == r, "");
          phi->set_req(pnum, n);  // Then add 'n' to the merge
!         if (pnum == PhiNode::Input) {
            // Last merge for this Phi.
            // So far, Phis have had a reasonable type from ciTypeFlow.
            // Now _gvn will join that with the meet of current inputs.
            // BOTTOM is never permissible here, 'cause pessimistically
            // Phis of pointers cannot lose the basic pointer type.
--- 1887,38 ---
        // At this point, n might be top if:
        //  - there is no phi (because TypeFlow detected a conflict), or
        //  - the corresponding control edges is top (a dead incoming path)
        // It is a bug if we create a phi which sees a garbage value on a live path.
  
!       // Merging two inline types?
+       if (phi != NULL && phi->bottom_type()->is_inlinetypeptr()) {
+         // Reload current state because it may have been updated by ensure_phi
+         m = map()->in(j);
+         InlineTypeBaseNode* vtm = m->as_InlineTypeBase(); // Current inline type
+         InlineTypeBaseNode* vtn = n->as_InlineTypeBase(); // Incoming inline type
+         assert(vtm->get_oop() == phi, "Inline type should have Phi input");
+         if (TraceOptoParse) {
+ #ifdef ASSERT
+           tty->print_cr("\nMerging inline types");
+           tty->print_cr("Current:");
+           vtm->dump(2);
+           tty->print_cr("Incoming:");
+           vtn->dump(2);
+           tty->cr();
+ #endif
+         }
+         // Do the merge
+         vtm->merge_with(&_gvn, vtn, pnum, last_merge);
+         if (last_merge) {
+           map()->set_req(j, _gvn.transform_no_reclaim(vtm));
+           record_for_igvn(vtm);
+         }
+       } else if (phi != NULL) {
          assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
          assert(phi->region() == r, "");
          phi->set_req(pnum, n);  // Then add 'n' to the merge
!         if (last_merge) {
            // Last merge for this Phi.
            // So far, Phis have had a reasonable type from ciTypeFlow.
            // Now _gvn will join that with the meet of current inputs.
            // BOTTOM is never permissible here, 'cause pessimistically
            // Phis of pointers cannot lose the basic pointer type.

*** 1797,12 ***
            record_for_igvn(phi);
          }
        }
      } // End of for all values to be merged
  
!     if (pnum == PhiNode::Input &&
-         !r->in(0)) {         // The occasional useless Region
        assert(control() == r, "");
        set_control(r->nonnull_req());
      }
  
      map()->merge_replaced_nodes_with(newin);
--- 1930,11 ---
            record_for_igvn(phi);
          }
        }
      } // End of for all values to be merged
  
!     if (last_merge && !r->in(0)) {         // The occasional useless Region
        assert(control() == r, "");
        set_control(r->nonnull_req());
      }
  
      map()->merge_replaced_nodes_with(newin);

*** 1950,10 ***
--- 2082,12 ---
        }
      } else {
        if (n->is_Phi() && n->as_Phi()->region() == r) {
          assert(n->req() == pnum, "must be same size as region");
          n->add_req(NULL);
+       } else if (n->is_InlineTypeBase() && n->as_InlineTypeBase()->has_phi_inputs(r)) {
+         n->as_InlineTypeBase()->add_new_path(r);
        }
      }
    }
  
    return pnum;

*** 1972,10 ***
--- 2106,14 ---
    if (o == top())  return NULL; // TOP always merges into TOP
  
    if (o->is_Phi() && o->as_Phi()->region() == region) {
      return o->as_Phi();
    }
+   InlineTypeBaseNode* vt = o->isa_InlineTypeBase();
+   if (vt != NULL && vt->has_phi_inputs(region)) {
+     return vt->get_oop()->as_Phi();
+   }
  
    // Now use a Phi here for merging
    assert(!nocreate, "Cannot build a phi for a block already parsed.");
    const JVMState* jvms = map->jvms();
    const Type* t = NULL;

*** 1991,12 ***
    } else {
      assert(false, "no type information for this phi");
    }
  
    // If the type falls to bottom, then this must be a local that
!   // is mixing ints and oops or some such.  Forcing it to top
!   // makes it go dead.
    if (t == Type::BOTTOM) {
      map->set_req(idx, top());
      return NULL;
    }
  
--- 2129,12 ---
    } else {
      assert(false, "no type information for this phi");
    }
  
    // If the type falls to bottom, then this must be a local that
!   // is already dead or is mixing ints and oops or some such.
!   // Forcing it to top makes it go dead.
    if (t == Type::BOTTOM) {
      map->set_req(idx, top());
      return NULL;
    }
  

*** 2005,15 ***
    if (t == Type::TOP || t == Type::HALF) {
      map->set_req(idx, top());
      return NULL;
    }
  
!   PhiNode* phi = PhiNode::make(region, o, t);
!   gvn().set_type(phi, t);
!   if (C->do_escape_analysis()) record_for_igvn(phi);
!   map->set_req(idx, phi);
!   return phi;
  }
  
  //--------------------------ensure_memory_phi----------------------------------
  // Turn the idx'th slice of the current memory into a Phi
  PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
--- 2143,24 ---
    if (t == Type::TOP || t == Type::HALF) {
      map->set_req(idx, top());
      return NULL;
    }
  
!   if (vt != NULL && (t->is_inlinetypeptr() || t->isa_inlinetype())) {
!     // Inline types are merged by merging their field values.
!     // Create a cloned InlineTypeNode with phi inputs that
!     // represents the merged inline type and update the map.
!     vt = vt->clone_with_phis(&_gvn, region);
+     map->set_req(idx, vt);
+     return vt->get_oop()->as_Phi();
+   } else {
+     PhiNode* phi = PhiNode::make(region, o, t);
+     gvn().set_type(phi, t);
+     if (C->do_escape_analysis()) record_for_igvn(phi);
+     map->set_req(idx, phi);
+     return phi;
+   }
  }
  
  //--------------------------ensure_memory_phi----------------------------------
  // Turn the idx'th slice of the current memory into a Phi
  PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {

*** 2202,64 ***
        method()->intrinsic_id() == vmIntrinsics::_Object_init) {
      call_register_finalizer();
    }
  
    // Do not set_parse_bci, so that return goo is credited to the return insn.
!   set_bci(InvocationEntryBci);
    if (method()->is_synchronized() && GenerateSynchronizationCode) {
      shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
    }
    if (C->env()->dtrace_method_probes()) {
      make_dtrace_method_exit(method());
    }
-   SafePointNode* exit_return = _exits.map();
-   exit_return->in( TypeFunc::Control  )->add_req( control() );
-   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
-   Node *mem = exit_return->in( TypeFunc::Memory   );
-   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
-     if (mms.is_empty()) {
-       // get a copy of the base memory, and patch just this one input
-       const TypePtr* adr_type = mms.adr_type(C);
-       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
-       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
-       gvn().set_type_bottom(phi);
-       phi->del_req(phi->req()-1);  // prepare to re-patch
-       mms.set_memory(phi);
-     }
-     mms.memory()->add_req(mms.memory2());
-   }
- 
    // frame pointer is always same, already captured
    if (value != NULL) {
-     // If returning oops to an interface-return, there is a silent free
-     // cast from oop to interface allowed by the Verifier.  Make it explicit
-     // here.
      Node* phi = _exits.argument(0);
!     const TypeInstPtr *tr = phi->bottom_type()->isa_instptr();
!     if (tr && tr->klass()->is_loaded() &&
!         tr->klass()->is_interface()) {
!       const TypeInstPtr *tp = value->bottom_type()->isa_instptr();
!       if (tp && tp->klass()->is_loaded() &&
!           !tp->klass()->is_interface()) {
          // sharpen the type eagerly; this eases certain assert checking
!         if (tp->higher_equal(TypeInstPtr::NOTNULL))
            tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
          value = _gvn.transform(new CheckCastPPNode(0, value, tr));
        }
      } else {
!       // Also handle returns of oop-arrays to an arrays-of-interface return
        const TypeInstPtr* phi_tip;
        const TypeInstPtr* val_tip;
!       Type::get_arrays_base_elements(phi->bottom_type(), value->bottom_type(), &phi_tip, &val_tip);
        if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() &&
            val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) {
!         value = _gvn.transform(new CheckCastPPNode(0, value, phi->bottom_type()));
        }
      }
      phi->add_req(value);
    }
  
    if (_first_return) {
      _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
      _first_return = false;
    } else {
      _exits.map()->merge_replaced_nodes_with(map());
--- 2349,88 ---
        method()->intrinsic_id() == vmIntrinsics::_Object_init) {
      call_register_finalizer();
    }
  
    // Do not set_parse_bci, so that return goo is credited to the return insn.
!   // vreturn can trigger an allocation so vreturn can throw. Setting
+   // the bci here breaks exception handling. Commenting this out
+   // doesn't seem to break anything.
+   //  set_bci(InvocationEntryBci);
    if (method()->is_synchronized() && GenerateSynchronizationCode) {
      shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
    }
    if (C->env()->dtrace_method_probes()) {
      make_dtrace_method_exit(method());
    }
    // frame pointer is always same, already captured
    if (value != NULL) {
      Node* phi = _exits.argument(0);
!     const Type* return_type = phi->bottom_type();
!     const TypeOopPtr* tr = return_type->isa_oopptr();
!     // The return_type is set in Parse::build_exits().
!     if (return_type->isa_inlinetype()) {
!       // Inline type is returned as fields, make sure it is scalarized
!       if (!value->is_InlineType()) {
+         value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass());
+       }
+       if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
+         // Returning from root or an incrementally inlined method. Make sure all non-flattened
+         // fields are buffered and re-execute if allocation triggers deoptimization.
+         PreserveReexecuteState preexecs(this);
+         assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
+         jvms()->set_should_reexecute(true);
+         inc_sp(1);
+         value = value->as_InlineType()->allocate_fields(this);
+       }
+     } else if (value->is_InlineType()) {
+       // Inline type is returned as oop, make sure it is buffered and re-execute
+       // if allocation triggers deoptimization.
+       PreserveReexecuteState preexecs(this);
+       jvms()->set_should_reexecute(true);
+       inc_sp(1);
+       value = value->as_InlineType()->buffer(this);
+     } else if (tr && tr->isa_instptr() && tr->klass()->is_loaded() && tr->klass()->is_interface()) {
+       // If returning oops to an interface-return, there is a silent free
+       // cast from oop to interface allowed by the Verifier. Make it explicit here.
+       const TypeInstPtr* tp = value->bottom_type()->isa_instptr();
+       if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) {
          // sharpen the type eagerly; this eases certain assert checking
!         if (tp->higher_equal(TypeInstPtr::NOTNULL)) {
            tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
+         }
          value = _gvn.transform(new CheckCastPPNode(0, value, tr));
        }
      } else {
!       // Handle returns of oop-arrays to an arrays-of-interface return
        const TypeInstPtr* phi_tip;
        const TypeInstPtr* val_tip;
!       Type::get_arrays_base_elements(return_type, value->bottom_type(), &phi_tip, &val_tip);
        if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() &&
            val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) {
!         value = _gvn.transform(new CheckCastPPNode(0, value, return_type));
        }
      }
      phi->add_req(value);
    }
  
+   SafePointNode* exit_return = _exits.map();
+   exit_return->in( TypeFunc::Control  )->add_req( control() );
+   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
+   Node *mem = exit_return->in( TypeFunc::Memory   );
+   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
+     if (mms.is_empty()) {
+       // get a copy of the base memory, and patch just this one input
+       const TypePtr* adr_type = mms.adr_type(C);
+       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
+       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
+       gvn().set_type_bottom(phi);
+       phi->del_req(phi->req()-1);  // prepare to re-patch
+       mms.set_memory(phi);
+     }
+     mms.memory()->add_req(mms.memory2());
+   }
+ 
    if (_first_return) {
      _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
      _first_return = false;
    } else {
      _exits.map()->merge_replaced_nodes_with(map());
< prev index next >