< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page
*** 27,10 ***
--- 27,11 ---
  #include "compiler/compileLog.hpp"
  #include "gc/shared/barrierSet.hpp"
  #include "gc/shared/c2/barrierSetC2.hpp"
  #include "libadt/vectset.hpp"
  #include "memory/allocation.hpp"
+ #include "memory/metaspace.hpp"
  #include "memory/resourceArea.hpp"
  #include "opto/c2compiler.hpp"
  #include "opto/arraycopynode.hpp"
  #include "opto/callnode.hpp"
  #include "opto/cfgnode.hpp"

*** 159,10 ***
--- 160,20 ---
    // add the phantom_obj only once to them.
    ptnodes_worklist.append(phantom_obj);
    java_objects_worklist.append(phantom_obj);
    for( uint next = 0; next < ideal_nodes.size(); ++next ) {
      Node* n = ideal_nodes.at(next);
+     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
+         !n->in(MemNode::Address)->is_AddP() &&
+         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
+       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
+       Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
+       _igvn->register_new_node_with_optimizer(addp);
+       _igvn->replace_input_of(n, MemNode::Address, addp);
+       ideal_nodes.push(addp);
+       _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
+     }
      // Create PointsTo nodes and add them to Connection Graph. Called
      // only once per ideal node since ideal_nodes is Unique_Node list.
      add_node_to_connection_graph(n, &delayed_worklist);
      PointsToNode* ptn = ptnode_adr(n->_idx);
      if (ptn != nullptr && ptn != phantom_obj) {

*** 691,11 ***
        if (ptn == nullptr || !ptn->scalar_replaceable()) {
          continue;
        }
  
        AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
!       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt);
        if (sobj == nullptr) {
          _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
          return;
        }
  
--- 702,13 ---
        if (ptn == nullptr || !ptn->scalar_replaceable()) {
          continue;
        }
  
        AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
!       Unique_Node_List value_worklist;
+       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
+       guarantee(value_worklist.size() == 0, "Unimplemented: Valhalla support for 8287061");
        if (sobj == nullptr) {
          _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
          return;
        }
  

*** 855,11 ***
      const char* name = call->as_CallStaticJava()->_name;
      assert(name != nullptr, "no name");
      // no arg escapes through uncommon traps
      if (strcmp(name, "uncommon_trap") != 0) {
        // process_call_arguments() assumes that all arguments escape globally
!       const TypeTuple* d = call->tf()->domain();
        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
          const Type* at = d->field_at(i);
          if (at->isa_oopptr() != nullptr) {
            return true;
          }
--- 868,11 ---
      const char* name = call->as_CallStaticJava()->_name;
      assert(name != nullptr, "no name");
      // no arg escapes through uncommon traps
      if (strcmp(name, "uncommon_trap") != 0) {
        // process_call_arguments() assumes that all arguments escape globally
!       const TypeTuple* d = call->tf()->domain_sig();
        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
          const Type* at = d->field_at(i);
          if (at->isa_oopptr() != nullptr) {
            return true;
          }

*** 929,10 ***
--- 942,21 ---
        if ((n->as_Call()->returns_pointer() &&
             n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
            (n->is_CallStaticJava() &&
             n->as_CallStaticJava()->is_boxing_method())) {
          add_call_node(n->as_Call());
+       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
+         bool returns_oop = false;
+         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
+           ProjNode* pn = n->fast_out(i)->as_Proj();
+           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
+             returns_oop = true;
+           }
+         }
+         if (returns_oop) {
+           add_call_node(n->as_Call());
+         }
        }
      }
      return;
    }
    // Put this check here to process call arguments since some call nodes

*** 960,10 ***
--- 984,11 ---
      }
      case Op_CastX2P: {
        map_ideal_node(n, phantom_obj);
        break;
      }
+     case Op_InlineType:
      case Op_CastPP:
      case Op_CheckCastPP:
      case Op_EncodeP:
      case Op_DecodeN:
      case Op_EncodePKlass:

*** 1031,12 ***
        }
        break;
      }
      case Op_Proj: {
        // we are only interested in the oop result projection from a call
!       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
!           n->in(0)->as_Call()->returns_pointer()) {
          add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
        }
        break;
      }
      case Op_Rethrow: // Exception object escapes
--- 1056,14 ---
        }
        break;
      }
      case Op_Proj: {
        // we are only interested in the oop result projection from a call
!       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
!           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
+         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
+                n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
          add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
        }
        break;
      }
      case Op_Rethrow: // Exception object escapes

*** 1134,10 ***
--- 1161,11 ---
        PointsToNode* ptn_base = ptnode_adr(base->_idx);
        assert(ptn_base != nullptr, "field's base should be registered");
        add_base(n_ptn->as_Field(), ptn_base);
        break;
      }
+     case Op_InlineType:
      case Op_CastPP:
      case Op_CheckCastPP:
      case Op_EncodeP:
      case Op_DecodeN:
      case Op_EncodePKlass:

*** 1188,12 ***
        }
        break;
      }
      case Op_Proj: {
        // we are only interested in the oop result projection from a call
!       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
!              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
        add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
        break;
      }
      case Op_Rethrow: // Exception object escapes
      case Op_Return: {
--- 1216,12 ---
        }
        break;
      }
      case Op_Proj: {
        // we are only interested in the oop result projection from a call
!       assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
!              n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
        add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
        break;
      }
      case Op_Rethrow: // Exception object escapes
      case Op_Return: {

*** 1365,11 ***
  #endif
    return false;
  }
  
  void ConnectionGraph::add_call_node(CallNode* call) {
!   assert(call->returns_pointer(), "only for call which returns pointer");
    uint call_idx = call->_idx;
    if (call->is_Allocate()) {
      Node* k = call->in(AllocateNode::KlassNode);
      const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
      assert(kt != nullptr, "TypeKlassPtr  required.");
--- 1393,11 ---
  #endif
    return false;
  }
  
  void ConnectionGraph::add_call_node(CallNode* call) {
!   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
    uint call_idx = call->_idx;
    if (call->is_Allocate()) {
      Node* k = call->in(AllocateNode::KlassNode);
      const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
      assert(kt != nullptr, "TypeKlassPtr  required.");

*** 1441,11 ***
      // For a static call, we know exactly what method is being called.
      // Use bytecode estimator to record whether the call's return value escapes.
      ciMethod* meth = call->as_CallJava()->method();
      if (meth == nullptr) {
        const char* name = call->as_CallStaticJava()->_name;
!       assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
        // Returns a newly allocated non-escaped object.
        add_java_object(call, PointsToNode::NoEscape);
        set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
      } else if (meth->is_boxing_method()) {
        // Returns boxing object
--- 1469,12 ---
      // For a static call, we know exactly what method is being called.
      // Use bytecode estimator to record whether the call's return value escapes.
      ciMethod* meth = call->as_CallJava()->method();
      if (meth == nullptr) {
        const char* name = call->as_CallStaticJava()->_name;
!       assert(strncmp(name, "_multianewarray", 15) == 0 ||
+              strncmp(name, "_load_unknown_inline", 20) == 0, "TODO: add failed case check");
        // Returns a newly allocated non-escaped object.
        add_java_object(call, PointsToNode::NoEscape);
        set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
      } else if (meth->is_boxing_method()) {
        // Returns boxing object

*** 1472,11 ***
          // it's fields will be marked as NoEscape at least.
          add_java_object(call, PointsToNode::NoEscape);
          set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
        } else {
          // Determine whether any arguments are returned.
!         const TypeTuple* d = call->tf()->domain();
          bool ret_arg = false;
          for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
            if (d->field_at(i)->isa_ptr() != nullptr &&
                call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
              ret_arg = true;
--- 1501,11 ---
          // it's fields will be marked as NoEscape at least.
          add_java_object(call, PointsToNode::NoEscape);
          set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
        } else {
          // Determine whether any arguments are returned.
!         const TypeTuple* d = call->tf()->domain_cc();
          bool ret_arg = false;
          for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
            if (d->field_at(i)->isa_ptr() != nullptr &&
                call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
              ret_arg = true;

*** 1520,11 ***
        // fall through
      case Op_CallLeafVector:
      case Op_CallLeaf: {
        // Stub calls, objects do not escape but they are not scale replaceable.
        // Adjust escape state for outgoing arguments.
!       const TypeTuple * d = call->tf()->domain();
        bool src_has_oops = false;
        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
          const Type* at = d->field_at(i);
          Node *arg = call->in(i);
          if (arg == nullptr) {
--- 1549,11 ---
        // fall through
      case Op_CallLeafVector:
      case Op_CallLeaf: {
        // Stub calls, objects do not escape but they are not scale replaceable.
        // Adjust escape state for outgoing arguments.
!       const TypeTuple * d = call->tf()->domain_sig();
        bool src_has_oops = false;
        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
          const Type* at = d->field_at(i);
          Node *arg = call->in(i);
          if (arg == nullptr) {

*** 1551,11 ***
          if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
            assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
                   aat->isa_ptr() != nullptr, "expecting an Ptr");
            bool arg_has_oops = aat->isa_oopptr() &&
                                (aat->isa_instptr() ||
!                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));
            if (i == TypeFunc::Parms) {
              src_has_oops = arg_has_oops;
            }
            //
            // src or dst could be j.l.Object when other is basic type array:
--- 1580,14 ---
          if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
            assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
                   aat->isa_ptr() != nullptr, "expecting an Ptr");
            bool arg_has_oops = aat->isa_oopptr() &&
                                (aat->isa_instptr() ||
!                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
+                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
+                                                                aat->isa_aryptr()->is_flat() &&
+                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
            if (i == TypeFunc::Parms) {
              src_has_oops = arg_has_oops;
            }
            //
            // src or dst could be j.l.Object when other is basic type array:

*** 1600,10 ***
--- 1632,13 ---
                    strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
                    strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
                    strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
                    strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
                    strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
+                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
+                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
+                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
                    strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
                    strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
                    strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
                    strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
                    strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||

*** 1664,11 ***
        }
        BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
        // fall-through if not a Java method or no analyzer information
        if (call_analyzer != nullptr) {
          PointsToNode* call_ptn = ptnode_adr(call->_idx);
!         const TypeTuple* d = call->tf()->domain();
          for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
            const Type* at = d->field_at(i);
            int k = i - TypeFunc::Parms;
            Node* arg = call->in(i);
            PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
--- 1699,11 ---
        }
        BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
        // fall-through if not a Java method or no analyzer information
        if (call_analyzer != nullptr) {
          PointsToNode* call_ptn = ptnode_adr(call->_idx);
!         const TypeTuple* d = call->tf()->domain_cc();
          for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
            const Type* at = d->field_at(i);
            int k = i - TypeFunc::Parms;
            Node* arg = call->in(i);
            PointsToNode* arg_ptn = ptnode_adr(arg->_idx);

*** 1708,11 ***
      }
      default: {
        // Fall-through here if not a Java method or no analyzer information
        // or some other type of call, assume the worst case: all arguments
        // globally escape.
!       const TypeTuple* d = call->tf()->domain();
        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
          const Type* at = d->field_at(i);
          if (at->isa_oopptr() != nullptr) {
            Node* arg = call->in(i);
            if (arg->is_AddP()) {
--- 1743,11 ---
      }
      default: {
        // Fall-through here if not a Java method or no analyzer information
        // or some other type of call, assume the worst case: all arguments
        // globally escape.
!       const TypeTuple* d = call->tf()->domain_cc();
        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
          const Type* at = d->field_at(i);
          if (at->isa_oopptr() != nullptr) {
            Node* arg = call->in(i);
            if (arg->is_AddP()) {

*** 2121,30 ***
  }
  
  // Find fields initializing values for allocations.
  int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
    assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
    Node* alloc = pta->ideal_node();
  
    // Do nothing for Allocate nodes since its fields values are
    // "known" unless they are initialized by arraycopy/clone.
    if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
!     return 0;
    }
!   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");
  #ifdef ASSERT
!   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
      const char* name = alloc->as_CallStaticJava()->_name;
!     assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
    }
  #endif
    // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
    int new_edges = 0;
    for (EdgeIterator i(pta); i.has_next(); i.next()) {
      PointsToNode* field = i.get();
      if (field->is_Field() && field->as_Field()->is_oop()) {
!       if (add_edge(field, phantom_obj)) {
          // New edge was added
          new_edges++;
          add_field_uses_to_worklist(field->as_Field());
        }
      }
--- 2156,40 ---
  }
  
  // Find fields initializing values for allocations.
  int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
    assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
+   PointsToNode* init_val = phantom_obj;
    Node* alloc = pta->ideal_node();
  
    // Do nothing for Allocate nodes since its fields values are
    // "known" unless they are initialized by arraycopy/clone.
    if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
!     if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
+       // Non-flat inline type arrays are initialized with
+       // the default value instead of null. Handle them here.
+       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
+       assert(init_val != nullptr, "default value should be registered");
+     } else {
+       return 0;
+     }
    }
!   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
+   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
  #ifdef ASSERT
!   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
      const char* name = alloc->as_CallStaticJava()->_name;
!     assert(strncmp(name, "_multianewarray", 15) == 0 ||
+            strncmp(name, "_load_unknown_inline", 20) == 0, "sanity");
    }
  #endif
    // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
    int new_edges = 0;
    for (EdgeIterator i(pta); i.has_next(); i.next()) {
      PointsToNode* field = i.get();
      if (field->is_Field() && field->as_Field()->is_oop()) {
!       if (add_edge(field, init_val)) {
          // New edge was added
          new_edges++;
          add_field_uses_to_worklist(field->as_Field());
        }
      }

*** 2155,11 ***
  // Find fields initializing values for allocations.
  int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
    assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
    Node* alloc = pta->ideal_node();
    // Do nothing for Call nodes since its fields values are unknown.
!   if (!alloc->is_Allocate()) {
      return 0;
    }
    InitializeNode* ini = alloc->as_Allocate()->initialization();
    bool visited_bottom_offset = false;
    GrowableArray<int> offsets_worklist;
--- 2200,11 ---
  // Find fields initializing values for allocations.
  int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
    assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
    Node* alloc = pta->ideal_node();
    // Do nothing for Call nodes since its fields values are unknown.
!   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
      return 0;
    }
    InitializeNode* ini = alloc->as_Allocate()->initialization();
    bool visited_bottom_offset = false;
    GrowableArray<int> offsets_worklist;

*** 2241,13 ***
                  }
                }
                if (missed_obj != nullptr) {
                  tty->print_cr("----------field---------------------------------");
                  field->dump();
!                 tty->print_cr("----------missed referernce to object-----------");
                  missed_obj->dump();
!                 tty->print_cr("----------object referernced by init store -----");
                  store->dump();
                  val->dump();
                  assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
                }
              }
--- 2286,13 ---
                  }
                }
                if (missed_obj != nullptr) {
                  tty->print_cr("----------field---------------------------------");
                  field->dump();
!                 tty->print_cr("----------missed reference to object------------");
                  missed_obj->dump();
!                 tty->print_cr("----------object referenced by init store-------");
                  store->dump();
                  val->dump();
                  assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
                }
              }

*** 2545,11 ***
      for (int i = 0; i < cnt; i++) {
        Node *n = C->macro_node(i);
        if (n->is_AbstractLock()) { // Lock and Unlock nodes
          AbstractLockNode* alock = n->as_AbstractLock();
          if (!alock->is_non_esc_obj()) {
!           if (not_global_escape(alock->obj_node())) {
              assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
              // The lock could be marked eliminated by lock coarsening
              // code during first IGVN before EA. Replace coarsened flag
              // to eliminate all associated locks/unlocks.
  #ifdef ASSERT
--- 2590,12 ---
      for (int i = 0; i < cnt; i++) {
        Node *n = C->macro_node(i);
        if (n->is_AbstractLock()) { // Lock and Unlock nodes
          AbstractLockNode* alock = n->as_AbstractLock();
          if (!alock->is_non_esc_obj()) {
!           const Type* obj_type = igvn->type(alock->obj_node());
+           if (not_global_escape(alock->obj_node()) && !obj_type->is_inlinetypeptr()) {
              assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
              // The lock could be marked eliminated by lock coarsening
              // code during first IGVN before EA. Replace coarsened flag
              // to eliminate all associated locks/unlocks.
  #ifdef ASSERT

*** 2586,15 ***
    // MemBarStoreStore node if the allocated object never escapes.
    for (int i = 0; i < storestore_worklist.length(); i++) {
      Node* storestore = storestore_worklist.at(i);
      Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
      if (alloc->is_Allocate() && not_global_escape(alloc)) {
!       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
!       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
!       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
!       igvn->register_new_node_with_optimizer(mb);
!       igvn->replace_node(storestore, mb);
      }
    }
  }
  
  // Optimize objects compare.
--- 2632,20 ---
    // MemBarStoreStore node if the allocated object never escapes.
    for (int i = 0; i < storestore_worklist.length(); i++) {
      Node* storestore = storestore_worklist.at(i);
      Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
      if (alloc->is_Allocate() && not_global_escape(alloc)) {
!       if (alloc->in(AllocateNode::InlineType) != nullptr) {
!         // Non-escaping inline type buffer allocations don't require a membar
!         storestore->as_MemBar()->remove(_igvn);
!       } else {
!         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
+         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
+         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
+         igvn->register_new_node_with_optimizer(mb);
+         igvn->replace_node(storestore, mb);
+       }
      }
    }
  }
  
  // Optimize objects compare.

*** 2744,12 ***
    dst->set_arraycopy_dst();
  }
  
  bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
    const Type* adr_type = n->as_AddP()->bottom_type();
    BasicType bt = T_INT;
!   if (offset == Type::OffsetBot) {
      // Check only oop fields.
      if (!adr_type->isa_aryptr() ||
          adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
          adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
        // OffsetBot is used to reference array's element. Ignore first AddP.
--- 2795,13 ---
    dst->set_arraycopy_dst();
  }
  
  bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
    const Type* adr_type = n->as_AddP()->bottom_type();
+   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
    BasicType bt = T_INT;
!   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
      // Check only oop fields.
      if (!adr_type->isa_aryptr() ||
          adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
          adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
        // OffsetBot is used to reference array's element. Ignore first AddP.

*** 2757,11 ***
          bt = T_OBJECT;
        }
      }
    } else if (offset != oopDesc::klass_offset_in_bytes()) {
      if (adr_type->isa_instptr()) {
!       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
        if (field != nullptr) {
          bt = field->layout_type();
        } else {
          // Check for unsafe oop field access
          if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
--- 2809,11 ---
          bt = T_OBJECT;
        }
      }
    } else if (offset != oopDesc::klass_offset_in_bytes()) {
      if (adr_type->isa_instptr()) {
!       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
        if (field != nullptr) {
          bt = field->layout_type();
        } else {
          // Check for unsafe oop field access
          if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||

*** 2776,12 ***
        if (offset == arrayOopDesc::length_offset_in_bytes()) {
          // Ignore array length load.
        } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
          // Ignore first AddP.
        } else {
!         const Type* elemtype = adr_type->isa_aryptr()->elem();
!         bt = elemtype->array_element_basic_type();
        }
      } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
        // Allocation initialization, ThreadLocal field access, unsafe access
        if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
            n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
--- 2828,18 ---
        if (offset == arrayOopDesc::length_offset_in_bytes()) {
          // Ignore array length load.
        } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
          // Ignore first AddP.
        } else {
!         const Type* elemtype = adr_type->is_aryptr()->elem();
!         if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
+           ciInlineKlass* vk = elemtype->inline_klass();
+           field_offset += vk->first_field_offset();
+           bt = vk->get_field_by_offset(field_offset, false)->layout_type();
+         } else {
+           bt = elemtype->array_element_basic_type();
+         }
        }
      } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
        // Allocation initialization, ThreadLocal field access, unsafe access
        if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
            n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||

*** 2960,13 ***
      assert(offs != Type::OffsetBot ||
             adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
             "offset must be a constant or it is initialization of array");
      return offs;
    }
!   const TypePtr *t_ptr = adr_type->isa_ptr();
-   assert(t_ptr != nullptr, "must be a pointer type");
-   return t_ptr->offset();
  }
  
  Node* ConnectionGraph::get_addp_base(Node *addp) {
    assert(addp->is_AddP(), "must be AddP");
    //
--- 3018,11 ---
      assert(offs != Type::OffsetBot ||
             adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
             "offset must be a constant or it is initialization of array");
      return offs;
    }
!   return adr_type->is_ptr()->flat_offset();
  }
  
  Node* ConnectionGraph::get_addp_base(Node *addp) {
    assert(addp->is_AddP(), "must be AddP");
    //

*** 3116,13 ***
      // compute an appropriate address type (cases #3 and #5).
      assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
      assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
      intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
      assert(offs != Type::OffsetBot, "offset must be a constant");
!     t = base_t->add_offset(offs)->is_oopptr();
    }
!   int inst_id =  base_t->instance_id();
    assert(!t->is_known_instance() || t->instance_id() == inst_id,
                               "old type must be non-instance or match new type");
  
    // The type 't' could be subclass of 'base_t'.
    // As result t->offset() could be large then base_t's size and it will
--- 3172,20 ---
      // compute an appropriate address type (cases #3 and #5).
      assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
      assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
      intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
      assert(offs != Type::OffsetBot, "offset must be a constant");
!     if (base_t->isa_aryptr() != nullptr) {
+       // In the case of a flat inline type array, each field has its
+       // own slice so we need to extract the field being accessed from
+       // the address computation
+       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
+     } else {
+       t = base_t->add_offset(offs)->is_oopptr();
+     }
    }
!   int inst_id = base_t->instance_id();
    assert(!t->is_known_instance() || t->instance_id() == inst_id,
                               "old type must be non-instance or match new type");
  
    // The type 't' could be subclass of 'base_t'.
    // As result t->offset() could be large then base_t's size and it will

*** 3132,21 ***
    // It could happened on subclass's branch (from the type profiling
    // inlining) which was not eliminated during parsing since the exactness
    // of the allocation type was not propagated to the subclass type check.
    //
    // Or the type 't' could be not related to 'base_t' at all.
!   // It could happened when CHA type is different from MDO type on a dead path
    // (for example, from instanceof check) which is not collapsed during parsing.
    //
    // Do nothing for such AddP node and don't process its users since
    // this code branch will go away.
    //
    if (!t->is_known_instance() &&
        !base_t->maybe_java_subtype_of(t)) {
       return false; // bail out
    }
!   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
    // Do NOT remove the next line: ensure a new alias index is allocated
    // for the instance type. Note: C++ will not remove it since the call
    // has side effect.
    int alias_idx = _compile->get_alias_index(tinst);
    igvn->set_type(addp, tinst);
--- 3195,32 ---
    // It could happened on subclass's branch (from the type profiling
    // inlining) which was not eliminated during parsing since the exactness
    // of the allocation type was not propagated to the subclass type check.
    //
    // Or the type 't' could be not related to 'base_t' at all.
!   // It could happen when CHA type is different from MDO type on a dead path
    // (for example, from instanceof check) which is not collapsed during parsing.
    //
    // Do nothing for such AddP node and don't process its users since
    // this code branch will go away.
    //
    if (!t->is_known_instance() &&
        !base_t->maybe_java_subtype_of(t)) {
       return false; // bail out
    }
!   const TypePtr* tinst = base_t->add_offset(t->offset());
+   if (tinst->isa_aryptr() && t->isa_aryptr()) {
+     // In the case of a flat inline type array, each field has its
+     // own slice so we need to keep track of the field being accessed.
+     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
+     // Keep array properties (not flat/null-free)
+     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
+     if (tinst == nullptr) {
+       return false; // Skip dead path with inconsistent properties
+     }
+   }
+ 
    // Do NOT remove the next line: ensure a new alias index is allocated
    // for the instance type. Note: C++ will not remove it since the call
    // has side effect.
    int alias_idx = _compile->get_alias_index(tinst);
    igvn->set_type(addp, tinst);

*** 3842,10 ***
--- 3916,17 ---
            tn_t = tn_type->make_ptr()->isa_oopptr();
          } else {
            tn_t = tn_type->isa_oopptr();
          }
          if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
+           if (tn_t->isa_aryptr()) {
+             // Keep array properties (not flat/null-free)
+             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
+             if (tinst == nullptr) {
+               continue; // Skip dead path with inconsistent properties
+             }
+           }
            if (tn_type->isa_narrowoop()) {
              tn_type = tinst->make_narrowoop();
            } else {
              tn_type = tinst;
            }

*** 3867,11 ***
        continue;
      }
      // push allocation's users on appropriate worklist
      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
        Node *use = n->fast_out(i);
!       if(use->is_Mem() && use->in(MemNode::Address) == n) {
          // Load/store to instance's field
          memnode_worklist.append_if_missing(use);
        } else if (use->is_MemBar()) {
          if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
            memnode_worklist.append_if_missing(use);
--- 3948,11 ---
        continue;
      }
      // push allocation's users on appropriate worklist
      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
        Node *use = n->fast_out(i);
!       if (use->is_Mem() && use->in(MemNode::Address) == n) {
          // Load/store to instance's field
          memnode_worklist.append_if_missing(use);
        } else if (use->is_MemBar()) {
          if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
            memnode_worklist.append_if_missing(use);

*** 3903,10 ***
--- 3984,13 ---
        } else if (use->Opcode() == Op_EncodeISOArray) {
          if (use->in(MemNode::Memory) == n || use->in(3) == n) {
            // EncodeISOArray overwrites destination array
            memnode_worklist.append_if_missing(use);
          }
+       } else if (use->Opcode() == Op_Return) {
+         // Allocation is referenced by field of returned inline type
+         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
        } else {
          uint op = use->Opcode();
          if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
              (use->in(MemNode::Memory) == n)) {
            // They overwrite memory edge corresponding to destination array,

*** 3916,11 ***
                op == Op_FastLock || op == Op_AryEq ||
                op == Op_StrComp || op == Op_CountPositives ||
                op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
                op == Op_StrEquals || op == Op_VectorizedHashCode ||
                op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
!               op == Op_SubTypeCheck ||
                BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
            n->dump();
            use->dump();
            assert(false, "EA: missing allocation reference path");
          }
--- 4000,11 ---
                op == Op_FastLock || op == Op_AryEq ||
                op == Op_StrComp || op == Op_CountPositives ||
                op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
                op == Op_StrEquals || op == Op_VectorizedHashCode ||
                op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
!               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
                BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
            n->dump();
            use->dump();
            assert(false, "EA: missing allocation reference path");
          }

*** 4009,10 ***
--- 4093,13 ---
      } else if (n->Opcode() == Op_StrCompressedCopy ||
                 n->Opcode() == Op_EncodeISOArray) {
        // get the memory projection
        n = n->find_out_with(Op_SCMemProj);
        assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
+     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
+                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
+       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
      } else {
        assert(n->is_Mem(), "memory node required.");
        Node *addr = n->in(MemNode::Address);
        const Type *addr_t = igvn->type(addr);
        if (addr_t == Type::TOP) {

*** 4051,29 ***
        } else if (use->is_MemBar()) {
          if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
            memnode_worklist.append_if_missing(use);
          }
  #ifdef ASSERT
!       } else if(use->is_Mem()) {
          assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
        } else if (use->is_MergeMem()) {
          assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
        } else if (use->Opcode() == Op_EncodeISOArray) {
          if (use->in(MemNode::Memory) == n || use->in(3) == n) {
            // EncodeISOArray overwrites destination array
            memnode_worklist.append_if_missing(use);
          }
        } else {
          uint op = use->Opcode();
          if ((use->in(MemNode::Memory) == n) &&
              (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
            // They overwrite memory edge corresponding to destination array,
            memnode_worklist.append_if_missing(use);
          } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
                op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
                op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
!               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
            n->dump();
            use->dump();
            assert(false, "EA: missing memory path");
          }
  #endif
--- 4138,33 ---
        } else if (use->is_MemBar()) {
          if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
            memnode_worklist.append_if_missing(use);
          }
  #ifdef ASSERT
!       } else if (use->is_Mem()) {
          assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
        } else if (use->is_MergeMem()) {
          assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
        } else if (use->Opcode() == Op_EncodeISOArray) {
          if (use->in(MemNode::Memory) == n || use->in(3) == n) {
            // EncodeISOArray overwrites destination array
            memnode_worklist.append_if_missing(use);
          }
+       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
+                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
+         // store_unknown_inline overwrites destination array
+         memnode_worklist.append_if_missing(use);
        } else {
          uint op = use->Opcode();
          if ((use->in(MemNode::Memory) == n) &&
              (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
            // They overwrite memory edge corresponding to destination array,
            memnode_worklist.append_if_missing(use);
          } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
                op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
                op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
!               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
            n->dump();
            use->dump();
            assert(false, "EA: missing memory path");
          }
  #endif

*** 4156,11 ***
    //  Phase 4:  Update the inputs of non-instance memory Phis and
    //            the Memory input of memnodes
    // First update the inputs of any non-instance Phi's from
    // which we split out an instance Phi.  Note we don't have
    // to recursively process Phi's encountered on the input memory
!   // chains as is done in split_memory_phi() since they  will
    // also be processed here.
    for (int j = 0; j < orig_phis.length(); j++) {
      PhiNode *phi = orig_phis.at(j);
      int alias_idx = _compile->get_alias_index(phi->adr_type());
      igvn->hash_delete(phi);
--- 4247,11 ---
    //  Phase 4:  Update the inputs of non-instance memory Phis and
    //            the Memory input of memnodes
    // First update the inputs of any non-instance Phi's from
    // which we split out an instance Phi.  Note we don't have
    // to recursively process Phi's encountered on the input memory
!   // chains as is done in split_memory_phi() since they will
    // also be processed here.
    for (int j = 0; j < orig_phis.length(); j++) {
      PhiNode *phi = orig_phis.at(j);
      int alias_idx = _compile->get_alias_index(phi->adr_type());
      igvn->hash_delete(phi);
< prev index next >