< prev index next >

src/hotspot/share/opto/parse1.cpp

Print this page
*** 28,11 ***
--- 28,13 ---
  #include "memory/resourceArea.hpp"
  #include "oops/method.hpp"
  #include "opto/addnode.hpp"
  #include "opto/c2compiler.hpp"
  #include "opto/castnode.hpp"
+ #include "opto/convertnode.hpp"
  #include "opto/idealGraphPrinter.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/locknode.hpp"
  #include "opto/memnode.hpp"
  #include "opto/opaquenode.hpp"
  #include "opto/parse.hpp"
  #include "opto/rootnode.hpp"

*** 100,14 ***
  
  //------------------------------ON STACK REPLACEMENT---------------------------
  
  // Construct a node which can be used to get incoming state for
  // on stack replacement.
! Node *Parse::fetch_interpreter_state(int index,
!                                      BasicType bt,
!                                      Node *local_addrs,
!                                      Node *local_addrs_base) {
    Node *mem = memory(Compile::AliasIdxRaw);
    Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
    Node *ctl = control();
  
    // Very similar to LoadNode::make, except we handle un-aligned longs and
--- 102,20 ---
  
  //------------------------------ON STACK REPLACEMENT---------------------------
  
  // Construct a node which can be used to get incoming state for
  // on stack replacement.
! Node* Parse::fetch_interpreter_state(int index,
!                                      const Type* type,
!                                      Node* local_addrs,
!                                      Node* local_addrs_base) {
+   BasicType bt = type->basic_type();
+   if (type == TypePtr::NULL_PTR) {
+     // Ptr types are mixed together with T_ADDRESS but nullptr is
+     // really for T_OBJECT types so correct it.
+     bt = T_OBJECT;
+   }
    Node *mem = memory(Compile::AliasIdxRaw);
    Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
    Node *ctl = control();
  
    // Very similar to LoadNode::make, except we handle un-aligned longs and

*** 145,11 ***
  // The type is the type predicted by ciTypeFlow.  Note that it is
  // not a general type, but can only come from Type::get_typeflow_type.
  // The safepoint is a map which will feed an uncommon trap.
  Node* Parse::check_interpreter_type(Node* l, const Type* type,
                                      SafePointNode* &bad_type_exit) {
- 
    const TypeOopPtr* tp = type->isa_oopptr();
  
    // TypeFlow may assert null-ness if a type appears unloaded.
    if (type == TypePtr::NULL_PTR ||
        (tp != nullptr && !tp->is_loaded())) {
--- 153,10 ---

*** 169,10 ***
--- 176,16 ---
    // toward more specific classes.  Make sure these specific classes
    // are still in effect.
    if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
      // TypeFlow asserted a specific object type.  Value must have that type.
      Node* bad_type_ctrl = nullptr;
+     if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
+       // Check inline types for null here to prevent checkcast from adding an
+       // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
+       l = null_check_oop(l, &bad_type_ctrl);
+       bad_type_exit->control()->add_req(bad_type_ctrl);
+     }
      l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
      bad_type_exit->control()->add_req(bad_type_ctrl);
    }
  
    assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");

*** 185,11 ***
  void Parse::load_interpreter_state(Node* osr_buf) {
    int index;
    int max_locals = jvms()->loc_size();
    int max_stack  = jvms()->stk_size();
  
- 
    // Mismatch between method and jvms can occur since map briefly held
    // an OSR entry state (which takes up one RawPtr word).
    assert(max_locals == method()->max_locals(), "sanity");
    assert(max_stack  >= method()->max_stack(),  "sanity");
    assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
--- 198,10 ---

*** 240,14 ***
      Node* box = _gvn.transform(osr_box);
  
      // Displaced headers and locked objects are interleaved in the
      // temp OSR buffer.  We only copy the locked objects out here.
      // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
!     Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
      // Try and copy the displaced header to the BoxNode
!     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
- 
  
      store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
  
      // Build a bogus FastLockNode (no code will be generated) and push the
      // monitor into our debug info.
--- 252,13 ---
      Node* box = _gvn.transform(osr_box);
  
      // Displaced headers and locked objects are interleaved in the
      // temp OSR buffer.  We only copy the locked objects out here.
      // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
!     Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
      // Try and copy the displaced header to the BoxNode
!     Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
  
      store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
  
      // Build a bogus FastLockNode (no code will be generated) and push the
      // monitor into our debug info.

*** 311,17 ***
      // makes it go dead.
      if (type == Type::BOTTOM) {
        continue;
      }
      // Construct code to access the appropriate local.
!     BasicType bt = type->basic_type();
-     if (type == TypePtr::NULL_PTR) {
-       // Ptr types are mixed together with T_ADDRESS but null is
-       // really for T_OBJECT types so correct it.
-       bt = T_OBJECT;
-     }
-     Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
      set_local(index, value);
    }
  
    // Extract the needed stack entries from the interpreter frame.
    for (index = 0; index < sp(); index++) {
--- 322,11 ---
      // makes it go dead.
      if (type == Type::BOTTOM) {
        continue;
      }
      // Construct code to access the appropriate local.
!     Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);
      set_local(index, value);
    }
  
    // Extract the needed stack entries from the interpreter frame.
    for (index = 0; index < sp(); index++) {

*** 517,11 ***
      _entry_bci = InvocationEntryBci;
      _flow = method()->get_flow_analysis();
    }
  
    if (_flow->failing()) {
!     assert(false, "type flow analysis failed during parsing");
      C->record_method_not_compilable(_flow->failure_reason());
  #ifndef PRODUCT
        if (PrintOpto && (Verbose || WizardMode)) {
          if (is_osr_parse()) {
            tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
--- 522,13 ---
      _entry_bci = InvocationEntryBci;
      _flow = method()->get_flow_analysis();
    }
  
    if (_flow->failing()) {
!     // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
+     // can lead to this. Re-enable once 8284443 is fixed.
+     //assert(false, "type flow analysis failed during parsing");
      C->record_method_not_compilable(_flow->failure_reason());
  #ifndef PRODUCT
        if (PrintOpto && (Verbose || WizardMode)) {
          if (is_osr_parse()) {
            tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());

*** 608,10 ***
--- 615,32 ---
      if (log)  log->done("parse");
      C->set_default_node_notes(caller_nn);
      return;
    }
  
+   // Handle inline type arguments
+   int arg_size = method()->arg_size();
+   for (int i = 0; i < arg_size; i++) {
+     Node* parm = local(i);
+     const Type* t = _gvn.type(parm);
+     if (t->is_inlinetypeptr()) {
+       // Create InlineTypeNode from the oop and replace the parameter
+       bool is_larval = (i == 0) && method()->is_object_constructor() && !method()->holder()->is_java_lang_Object();
+       Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass(), !t->maybe_null(), is_larval);
+       replace_in_map(parm, vt);
+     } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && method()->has_vararg() &&
+                t->isa_aryptr() != nullptr && !t->is_aryptr()->is_null_free() && !t->is_aryptr()->is_not_null_free()) {
+       // Speculate on varargs Object array being not null-free (and therefore also not flat)
+       const TypePtr* spec_type = t->speculative();
+       spec_type = (spec_type != nullptr && spec_type->isa_aryptr() != nullptr) ? spec_type : t->is_aryptr();
+       spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free();
+       spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
+       Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
+       replace_in_map(parm, cast);
+     }
+   }
+ 
    entry_map = map();  // capture any changes performed by method setup code
    assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
  
    // We begin parsing as if we have just encountered a jump to the
    // method entry.

*** 793,12 ***
    gvn().set_type_bottom(memphi);
    _exits.set_i_o(iophi);
    _exits.set_all_memory(memphi);
  
    // Add a return value to the exit state.  (Do not push it yet.)
!   if (tf()->range()->cnt() > TypeFunc::Parms) {
!     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
      if (ret_type->isa_int()) {
        BasicType ret_bt = method()->return_type()->basic_type();
        if (ret_bt == T_BOOLEAN ||
            ret_bt == T_CHAR ||
            ret_bt == T_BYTE ||
--- 822,12 ---
    gvn().set_type_bottom(memphi);
    _exits.set_i_o(iophi);
    _exits.set_all_memory(memphi);
  
    // Add a return value to the exit state.  (Do not push it yet.)
!   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
!     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
      if (ret_type->isa_int()) {
        BasicType ret_bt = method()->return_type()->basic_type();
        if (ret_bt == T_BOOLEAN ||
            ret_bt == T_CHAR ||
            ret_bt == T_BYTE ||

*** 816,26 ***
      }
      int         ret_size = type2size[ret_type->basic_type()];
      Node*       ret_phi  = new PhiNode(region, ret_type);
      gvn().set_type_bottom(ret_phi);
      _exits.ensure_stack(ret_size);
!     assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
      assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
      _exits.set_argument(0, ret_phi);  // here is where the parser finds it
      // Note:  ret_phi is not yet pushed, until do_exits.
    }
  }
  
- 
  //----------------------------build_start_state-------------------------------
  // Construct a state which contains only the incoming arguments from an
  // unknown caller.  The method & bci will be null & InvocationEntryBci.
  JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
!   int        arg_size = tf->domain()->cnt();
!   int        max_size = MAX2(arg_size, (int)tf->range()->cnt());
    JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
    SafePointNode* map  = new SafePointNode(max_size, jvms);
    record_for_igvn(map);
    assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
    Node_Notes* old_nn = default_node_notes();
    if (old_nn != nullptr && has_method()) {
      Node_Notes* entry_nn = old_nn->clone(this);
--- 845,26 ---
      }
      int         ret_size = type2size[ret_type->basic_type()];
      Node*       ret_phi  = new PhiNode(region, ret_type);
      gvn().set_type_bottom(ret_phi);
      _exits.ensure_stack(ret_size);
!     assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
      assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
      _exits.set_argument(0, ret_phi);  // here is where the parser finds it
      // Note:  ret_phi is not yet pushed, until do_exits.
    }
  }
  
  //----------------------------build_start_state-------------------------------
  // Construct a state which contains only the incoming arguments from an
  // unknown caller.  The method & bci will be null & InvocationEntryBci.
  JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
!   int        arg_size = tf->domain_sig()->cnt();
!   int        max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
    JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
    SafePointNode* map  = new SafePointNode(max_size, jvms);
+   jvms->set_map(map);
    record_for_igvn(map);
    assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
    Node_Notes* old_nn = default_node_notes();
    if (old_nn != nullptr && has_method()) {
      Node_Notes* entry_nn = old_nn->clone(this);

*** 843,23 ***
      entry_jvms->set_offsets(0);
      entry_jvms->set_bci(entry_bci());
      entry_nn->set_jvms(entry_jvms);
      set_default_node_notes(entry_nn);
    }
!   uint i;
!   for (i = 0; i < (uint)arg_size; i++) {
!     Node* parm = initial_gvn()->transform(new ParmNode(start, i));
      map->init_req(i, parm);
      // Record all these guys for later GVN.
      record_for_igvn(parm);
    }
    for (; i < map->req(); i++) {
      map->init_req(i, top());
    }
    assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
    set_default_node_notes(old_nn);
-   jvms->set_map(map);
    return jvms;
  }
  
  //-----------------------------make_node_notes---------------------------------
  Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
--- 872,42 ---
      entry_jvms->set_offsets(0);
      entry_jvms->set_bci(entry_bci());
      entry_nn->set_jvms(entry_jvms);
      set_default_node_notes(entry_nn);
    }
!   PhaseGVN& gvn = *initial_gvn();
!   uint i = 0;
!   int arg_num = 0;
+   for (uint j = 0; i < (uint)arg_size; i++) {
+     const Type* t = tf->domain_sig()->field_at(i);
+     Node* parm = nullptr;
+     if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
+       // Inline type arguments are not passed by reference: we get an argument per
+       // field of the inline type. Build InlineTypeNodes from the inline type arguments.
+       GraphKit kit(jvms, &gvn);
+       kit.set_control(map->control());
+       Node* old_mem = map->memory();
+       // Use immutable memory for inline type loads and restore it below
+       kit.set_all_memory(C->immutable_memory());
+       parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
+       map->set_control(kit.control());
+       map->set_memory(old_mem);
+     } else {
+       parm = gvn.transform(new ParmNode(start, j++));
+     }
      map->init_req(i, parm);
      // Record all these guys for later GVN.
      record_for_igvn(parm);
+     if (i >= TypeFunc::Parms && t != Type::HALF) {
+       arg_num++;
+     }
    }
    for (; i < map->req(); i++) {
      map->init_req(i, top());
    }
    assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
    set_default_node_notes(old_nn);
    return jvms;
  }
  
  //-----------------------------make_node_notes---------------------------------
  Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {

*** 882,16 ***
                               kit.i_o(),
                               kit.reset_memory(),
                               kit.frameptr(),
                               kit.returnadr());
    // Add zero or 1 return values
!   int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
    if (ret_size > 0) {
      kit.inc_sp(-ret_size);  // pop the return value(s)
      kit.sync_jvms();
!     ret->add_req(kit.argument(0));
!     // Note:  The second dummy edge is not needed by a ReturnNode.
    }
    // bind it to root
    root()->add_req(ret);
    record_for_igvn(ret);
    initial_gvn()->transform(ret);
--- 930,38 ---
                               kit.i_o(),
                               kit.reset_memory(),
                               kit.frameptr(),
                               kit.returnadr());
    // Add zero or 1 return values
!   int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
    if (ret_size > 0) {
      kit.inc_sp(-ret_size);  // pop the return value(s)
      kit.sync_jvms();
!     Node* res = kit.argument(0);
!     if (tf()->returns_inline_type_as_fields()) {
+       // Multiple return values (inline type fields): add as many edges
+       // to the Return node as returned values.
+       InlineTypeNode* vt = res->as_InlineType();
+       ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
+       if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
+         ret->init_req(TypeFunc::Parms, vt);
+       } else {
+         // Return the tagged klass pointer to signal scalarization to the caller
+         Node* tagged_klass = vt->tagged_klass(kit.gvn());
+         // Return null if the inline type is null (IsInit field is not set)
+         Node* conv   = kit.gvn().transform(new ConvI2LNode(vt->get_is_init()));
+         Node* shl    = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
+         Node* shr    = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
+         tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
+         ret->init_req(TypeFunc::Parms, tagged_klass);
+       }
+       uint idx = TypeFunc::Parms + 1;
+       vt->pass_fields(&kit, ret, idx, false, false);
+     } else {
+       ret->add_req(res);
+       // Note:  The second dummy edge is not needed by a ReturnNode.
+     }
    }
    // bind it to root
    root()->add_req(ret);
    record_for_igvn(ret);
    initial_gvn()->transform(ret);

*** 1011,11 ***
    // "All bets are off" unless the first publication occurs after a
    // normal return from the constructor.  We do not attempt to detect
    // such unusual early publications.  But no barrier is needed on
    // exceptional returns, since they cannot publish normally.
    //
!   if (method()->is_object_initializer() &&
         (wrote_final() || wrote_stable() ||
           (AlwaysSafeConstructors && wrote_fields()) ||
           (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
      Node* recorded_alloc = alloc_with_final_or_stable();
      _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
--- 1081,11 ---
    // "All bets are off" unless the first publication occurs after a
    // normal return from the constructor.  We do not attempt to detect
    // such unusual early publications.  But no barrier is needed on
    // exceptional returns, since they cannot publish normally.
    //
!   if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
         (wrote_final() || wrote_stable() ||
           (AlwaysSafeConstructors && wrote_fields()) ||
           (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
      Node* recorded_alloc = alloc_with_final_or_stable();
      _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,

*** 1039,12 ***
      mms.set_memory(_gvn.transform(mms.memory()));
    }
    // Clean up input MergeMems created by transforming the slices
    _gvn.transform(_exits.merged_memory());
  
!   if (tf()->range()->cnt() > TypeFunc::Parms) {
!     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
      Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
      if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
        // If the type we set for the ret_phi in build_exits() is too optimistic and
        // the ret_phi is top now, there's an extremely small chance that it may be due to class
        // loading.  It could also be due to an error, so mark this method as not compilable because
--- 1109,12 ---
      mms.set_memory(_gvn.transform(mms.memory()));
    }
    // Clean up input MergeMems created by transforming the slices
    _gvn.transform(_exits.merged_memory());
  
!   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
!     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
      Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
      if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
        // If the type we set for the ret_phi in build_exits() is too optimistic and
        // the ret_phi is top now, there's an extremely small chance that it may be due to class
        // loading.  It could also be due to an error, so mark this method as not compilable because

*** 1134,12 ***
    _caller->map()->delete_replaced_nodes();
  
    // If this is an inlined method, we may have to do a receiver null check.
    if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
      GraphKit kit(_caller);
!     kit.null_check_receiver_before_call(method());
      _caller = kit.transfer_exceptions_into_jvms();
      if (kit.stopped()) {
        _exits.add_exception_states_from(_caller);
        _exits.set_jvms(_caller);
        return nullptr;
      }
--- 1204,18 ---
    _caller->map()->delete_replaced_nodes();
  
    // If this is an inlined method, we may have to do a receiver null check.
    if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
      GraphKit kit(_caller);
!     Node* receiver = kit.argument(0);
+     Node* null_free = kit.null_check_receiver_before_call(method());
      _caller = kit.transfer_exceptions_into_jvms();
+     if (receiver->is_InlineType() && receiver->as_InlineType()->is_larval()) {
+       // Replace the larval inline type receiver in the exit map as well to make sure that
+       // we can find and update it in Parse::do_call when we are done with the initialization.
+       _exits.map()->replace_edge(receiver, null_free);
+     }
      if (kit.stopped()) {
        _exits.add_exception_states_from(_caller);
        _exits.set_jvms(_caller);
        return nullptr;
      }

*** 1172,11 ***
      set_all_memory(reset_memory());
    }
    assert(merged_memory(), "");
  
    // Now add the locals which are initially bound to arguments:
!   uint arg_size = tf()->domain()->cnt();
    ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
    for (i = TypeFunc::Parms; i < arg_size; i++) {
      map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
    }
  
--- 1248,11 ---
      set_all_memory(reset_memory());
    }
    assert(merged_memory(), "");
  
    // Now add the locals which are initially bound to arguments:
!   uint arg_size = tf()->domain_sig()->cnt();
    ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
    for (i = TypeFunc::Parms; i < arg_size; i++) {
      map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
    }
  

*** 1246,10 ***
--- 1322,11 ---
        ciInstance* mirror = _method->holder()->java_mirror();
        const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
        lock_obj = makecon(t_lock);
      } else {                  // Else pass the "this" pointer,
        lock_obj = local(0);    // which is Parm0 from StartNode
+       assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
      }
      // Clear out dead values from the debug info.
      kill_dead_locals();
      // Build the FastLockNode
      _synch_lock = shared_lock(lock_obj);

*** 1681,10 ***
--- 1758,46 ---
  
    // Zap extra stack slots to top
    assert(sp() == target->start_sp(), "");
    clean_stack(sp());
  
+   // Check for merge conflicts involving inline types
+   JVMState* old_jvms = map()->jvms();
+   int old_bci = bci();
+   JVMState* tmp_jvms = old_jvms->clone_shallow(C);
+   tmp_jvms->set_should_reexecute(true);
+   tmp_jvms->bind_map(map());
+   // Execution needs to restart a the next bytecode (entry of next
+   // block)
+   if (target->is_merged() ||
+       pnum > PhiNode::Input ||
+       target->is_handler() ||
+       target->is_loop_head()) {
+     set_parse_bci(target->start());
+     for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
+       Node* n = map()->in(j);                 // Incoming change to target state.
+       const Type* t = nullptr;
+       if (tmp_jvms->is_loc(j)) {
+         t = target->local_type_at(j - tmp_jvms->locoff());
+       } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
+         t = target->stack_type_at(j - tmp_jvms->stkoff());
+       }
+       if (t != nullptr && t != Type::BOTTOM) {
+         if (n->is_InlineType() && !t->is_inlinetypeptr()) {
+           // Allocate inline type in src block to be able to merge it with oop in target block
+           map()->set_req(j, n->as_InlineType()->buffer(this));
+         } else if (!n->is_InlineType() && t->is_inlinetypeptr()) {
+           // Scalarize null in src block to be able to merge it with inline type in target block
+           assert(gvn().type(n)->is_zero_type(), "Should have been scalarized");
+           map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
+         }
+       }
+     }
+   }
+   old_jvms->bind_map(map());
+   set_parse_bci(old_bci);
+ 
    if (!target->is_merged()) {   // No prior mapping at this bci
      if (TraceOptoParse) { tty->print(" with empty state");  }
  
      // If this path is dead, do not bother capturing it as a merge.
      // It is "as if" we had 1 fewer predecessors from the beginning.

*** 1735,10 ***
--- 1848,11 ---
  #ifdef ASSERT
      if (target->is_SEL_head()) {
        target->mark_merged_backedge(block());
      }
  #endif
+ 
      // We must not manufacture more phis if the target is already parsed.
      bool nophi = target->is_parsed();
  
      SafePointNode* newin = map();// Hang on to incoming mapping
      Block* save_block = block(); // Hang on to incoming block;

*** 1770,18 ***
      }
  
      // Update all the non-control inputs to map:
      assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
      bool check_elide_phi = target->is_SEL_backedge(save_block);
      for (uint j = 1; j < newin->req(); j++) {
        Node* m = map()->in(j);   // Current state of target.
        Node* n = newin->in(j);   // Incoming change to target state.
        PhiNode* phi;
!       if (m->is_Phi() && m->as_Phi()->region() == r)
          phi = m->as_Phi();
!       else
          phi = nullptr;
        if (m != n) {             // Different; must merge
          switch (j) {
          // Frame pointer and Return Address never changes
          case TypeFunc::FramePtr:// Drop m, use the original value
          case TypeFunc::ReturnAdr:
--- 1884,22 ---
      }
  
      // Update all the non-control inputs to map:
      assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
      bool check_elide_phi = target->is_SEL_backedge(save_block);
+     bool last_merge = (pnum == PhiNode::Input);
      for (uint j = 1; j < newin->req(); j++) {
        Node* m = map()->in(j);   // Current state of target.
        Node* n = newin->in(j);   // Incoming change to target state.
        PhiNode* phi;
!       if (m->is_Phi() && m->as_Phi()->region() == r) {
          phi = m->as_Phi();
!       } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
+         phi = m->as_InlineType()->get_oop()->as_Phi();
+       } else {
          phi = nullptr;
+       }
        if (m != n) {             // Different; must merge
          switch (j) {
          // Frame pointer and Return Address never changes
          case TypeFunc::FramePtr:// Drop m, use the original value
          case TypeFunc::ReturnAdr:

*** 1824,15 ***
        // At this point, n might be top if:
        //  - there is no phi (because TypeFlow detected a conflict), or
        //  - the corresponding control edges is top (a dead incoming path)
        // It is a bug if we create a phi which sees a garbage value on a live path.
  
!       if (phi != nullptr) {
          assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
          assert(phi->region() == r, "");
          phi->set_req(pnum, n);  // Then add 'n' to the merge
!         if (pnum == PhiNode::Input) {
            // Last merge for this Phi.
            // So far, Phis have had a reasonable type from ciTypeFlow.
            // Now _gvn will join that with the meet of current inputs.
            // BOTTOM is never permissible here, 'cause pessimistically
            // Phis of pointers cannot lose the basic pointer type.
--- 1942,38 ---
        // At this point, n might be top if:
        //  - there is no phi (because TypeFlow detected a conflict), or
        //  - the corresponding control edges is top (a dead incoming path)
        // It is a bug if we create a phi which sees a garbage value on a live path.
  
!       // Merging two inline types?
+       if (phi != nullptr && phi->bottom_type()->is_inlinetypeptr()) {
+         // Reload current state because it may have been updated by ensure_phi
+         m = map()->in(j);
+         InlineTypeNode* vtm = m->as_InlineType(); // Current inline type
+         InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
+         assert(vtm->get_oop() == phi, "Inline type should have Phi input");
+         if (TraceOptoParse) {
+ #ifdef ASSERT
+           tty->print_cr("\nMerging inline types");
+           tty->print_cr("Current:");
+           vtm->dump(2);
+           tty->print_cr("Incoming:");
+           vtn->dump(2);
+           tty->cr();
+ #endif
+         }
+         // Do the merge
+         vtm->merge_with(&_gvn, vtn, pnum, last_merge);
+         if (last_merge) {
+           map()->set_req(j, _gvn.transform(vtm));
+           record_for_igvn(vtm);
+         }
+       } else if (phi != nullptr) {
          assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
          assert(phi->region() == r, "");
          phi->set_req(pnum, n);  // Then add 'n' to the merge
!         if (last_merge) {
            // Last merge for this Phi.
            // So far, Phis have had a reasonable type from ciTypeFlow.
            // Now _gvn will join that with the meet of current inputs.
            // BOTTOM is never permissible here, 'cause pessimistically
            // Phis of pointers cannot lose the basic pointer type.

*** 1844,12 ***
            record_for_igvn(phi);
          }
        }
      } // End of for all values to be merged
  
!     if (pnum == PhiNode::Input &&
-         !r->in(0)) {         // The occasional useless Region
        assert(control() == r, "");
        set_control(r->nonnull_req());
      }
  
      map()->merge_replaced_nodes_with(newin);
--- 1985,11 ---
            record_for_igvn(phi);
          }
        }
      } // End of for all values to be merged
  
!     if (last_merge && !r->in(0)) {         // The occasional useless Region
        assert(control() == r, "");
        set_control(r->nonnull_req());
      }
  
      map()->merge_replaced_nodes_with(newin);

*** 1997,10 ***
--- 2137,12 ---
        }
      } else {
        if (n->is_Phi() && n->as_Phi()->region() == r) {
          assert(n->req() == pnum, "must be same size as region");
          n->add_req(nullptr);
+       } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
+         n->as_InlineType()->add_new_path(r);
        }
      }
    }
  
    return pnum;

*** 2019,10 ***
--- 2161,14 ---
    if (o == top())  return nullptr; // TOP always merges into TOP
  
    if (o->is_Phi() && o->as_Phi()->region() == region) {
      return o->as_Phi();
    }
+   InlineTypeNode* vt = o->isa_InlineType();
+   if (vt != nullptr && vt->has_phi_inputs(region)) {
+     return vt->get_oop()->as_Phi();
+   }
  
    // Now use a Phi here for merging
    assert(!nocreate, "Cannot build a phi for a block already parsed.");
    const JVMState* jvms = map->jvms();
    const Type* t = nullptr;

*** 2038,12 ***
    } else {
      assert(false, "no type information for this phi");
    }
  
    // If the type falls to bottom, then this must be a local that
!   // is mixing ints and oops or some such.  Forcing it to top
!   // makes it go dead.
    if (t == Type::BOTTOM) {
      map->set_req(idx, top());
      return nullptr;
    }
  
--- 2184,12 ---
    } else {
      assert(false, "no type information for this phi");
    }
  
    // If the type falls to bottom, then this must be a local that
!   // is already dead or is mixing ints and oops or some such.
!   // Forcing it to top makes it go dead.
    if (t == Type::BOTTOM) {
      map->set_req(idx, top());
      return nullptr;
    }
  

*** 2052,15 ***
    if (t == Type::TOP || t == Type::HALF) {
      map->set_req(idx, top());
      return nullptr;
    }
  
!   PhiNode* phi = PhiNode::make(region, o, t);
!   gvn().set_type(phi, t);
!   if (C->do_escape_analysis()) record_for_igvn(phi);
!   map->set_req(idx, phi);
!   return phi;
  }
  
  //--------------------------ensure_memory_phi----------------------------------
  // Turn the idx'th slice of the current memory into a Phi
  PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
--- 2198,24 ---
    if (t == Type::TOP || t == Type::HALF) {
      map->set_req(idx, top());
      return nullptr;
    }
  
!   if (vt != nullptr && t->is_inlinetypeptr()) {
!     // Inline types are merged by merging their field values.
!     // Create a cloned InlineTypeNode with phi inputs that
!     // represents the merged inline type and update the map.
!     vt = vt->clone_with_phis(&_gvn, region);
+     map->set_req(idx, vt);
+     return vt->get_oop()->as_Phi();
+   } else {
+     PhiNode* phi = PhiNode::make(region, o, t);
+     gvn().set_type(phi, t);
+     if (C->do_escape_analysis()) record_for_igvn(phi);
+     map->set_req(idx, phi);
+     return phi;
+   }
  }
  
  //--------------------------ensure_memory_phi----------------------------------
  // Turn the idx'th slice of the current memory into a Phi
  PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {

*** 2186,18 ***
--- 2341,54 ---
  void Parse::return_current(Node* value) {
    if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
      call_register_finalizer();
    }
  
+   // frame pointer is always same, already captured
+   if (value != nullptr) {
+     Node* phi = _exits.argument(0);
+     const Type* return_type = phi->bottom_type();
+     const TypeInstPtr* tr = return_type->isa_instptr();
+     assert(!value->is_InlineType() || !value->as_InlineType()->is_larval(), "returning a larval");
+     if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
+         return_type->is_inlinetypeptr()) {
+       // Inline type is returned as fields, make sure it is scalarized
+       if (!value->is_InlineType()) {
+         value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass(), false);
+       }
+       if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
+         // Returning from root or an incrementally inlined method. Make sure all non-flat
+         // fields are buffered and re-execute if allocation triggers deoptimization.
+         PreserveReexecuteState preexecs(this);
+         assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
+         jvms()->set_should_reexecute(true);
+         inc_sp(1);
+         value = value->as_InlineType()->allocate_fields(this);
+       }
+     } else if (value->is_InlineType()) {
+       // Inline type is returned as oop, make sure it is buffered and re-execute
+       // if allocation triggers deoptimization.
+       PreserveReexecuteState preexecs(this);
+       jvms()->set_should_reexecute(true);
+       inc_sp(1);
+       value = value->as_InlineType()->buffer(this);
+     }
+     // ...else
+     // If returning oops to an interface-return, there is a silent free
+     // cast from oop to interface allowed by the Verifier. Make it explicit here.
+     phi->add_req(value);
+   }
+ 
    // Do not set_parse_bci, so that return goo is credited to the return insn.
    set_bci(InvocationEntryBci);
    if (method()->is_synchronized() && GenerateSynchronizationCode) {
      shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
    }
    if (C->env()->dtrace_method_probes()) {
      make_dtrace_method_exit(method());
    }
+ 
    SafePointNode* exit_return = _exits.map();
    exit_return->in( TypeFunc::Control  )->add_req( control() );
    exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
    Node *mem = exit_return->in( TypeFunc::Memory   );
    for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {

*** 2211,19 ***
        mms.set_memory(phi);
      }
      mms.memory()->add_req(mms.memory2());
    }
  
-   // frame pointer is always same, already captured
-   if (value != nullptr) {
-     // If returning oops to an interface-return, there is a silent free
-     // cast from oop to interface allowed by the Verifier.  Make it explicit
-     // here.
-     Node* phi = _exits.argument(0);
-     phi->add_req(value);
-   }
- 
    if (_first_return) {
      _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
      _first_return = false;
    } else {
      _exits.map()->merge_replaced_nodes_with(map());
--- 2402,10 ---
< prev index next >