< prev index next >

src/hotspot/share/opto/loopopts.cpp

Print this page
@@ -31,10 +31,11 @@
  #include "opto/callnode.hpp"
  #include "opto/castnode.hpp"
  #include "opto/connode.hpp"
  #include "opto/castnode.hpp"
  #include "opto/divnode.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/loopnode.hpp"
  #include "opto/matcher.hpp"
  #include "opto/mulnode.hpp"
  #include "opto/movenode.hpp"
  #include "opto/opaquenode.hpp"

@@ -62,10 +63,16 @@
    if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
        n->in(1) == region->as_CountedLoop()->phi()) {
      return nullptr;
    }
  
+   // Inline types should not be split through Phis because they cannot be merged
+   // through Phi nodes but each value input needs to be merged individually.
+   if (n->is_InlineType()) {
+     return nullptr;
+   }
+ 
    if (cannot_split_division(n, region)) {
      return nullptr;
    }
  
    int wins = 0;

@@ -758,10 +765,14 @@
      }
      // Add in cost any speculative ops
      for (uint j = 1; j < region->req(); j++) {
        Node *proj = region->in(j);
        Node *inp = phi->in(j);
+       if (inp->isa_InlineType()) {
+         // TODO 8302217 This prevents PhiNode::push_inline_types_through
+         return nullptr;
+       }
        if (get_ctrl(inp) == proj) { // Found local op
          cost++;
          // Check for a chain of dependent ops; these will all become
          // speculative in a CMOV.
          for (uint k = 1; k < inp->req(); k++)

@@ -1086,10 +1097,58 @@
        }
      }
    }
  }
  
+ // We can't use immutable memory for the flat array check because we are loading the mark word which is
+ // mutable. Although the bits we are interested in are immutable (we check for markWord::unlocked_value),
+ // we need to use raw memory to not break anti dependency analysis. Below code will attempt to still move
+ // flat array checks out of loops, mainly to enable loop unswitching.
+ void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
+   // Skip checks for more than one array
+   if (n->req() > 3) {
+     return;
+   }
+   Node* mem = n->in(FlatArrayCheckNode::Memory);
+   Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
+   IdealLoopTree* check_loop = get_loop(get_ctrl(n));
+   IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
+ 
+   // Check if array is loop invariant
+   if (!check_loop->is_member(ary_loop)) {
+     // Walk up memory graph from the check until we leave the loop
+     VectorSet wq;
+     wq.set(mem->_idx);
+     while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
+       if (mem->is_Phi()) {
+         mem = mem->in(1);
+       } else if (mem->is_MergeMem()) {
+         mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
+       } else if (mem->is_Proj()) {
+         mem = mem->in(0);
+       } else if (mem->is_MemBar() || mem->is_SafePoint()) {
+         mem = mem->in(TypeFunc::Memory);
+       } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
+         mem = mem->in(MemNode::Memory);
+       } else {
+ #ifdef ASSERT
+         mem->dump();
+ #endif
+         ShouldNotReachHere();
+       }
+       if (wq.test_set(mem->_idx)) {
+         return;
+       }
+     }
+     // Replace memory input and re-compute ctrl to move the check out of the loop
+     _igvn.replace_input_of(n, 1, mem);
+     set_ctrl_and_loop(n, get_early_ctrl(n));
+     Node* bol = n->unique_out();
+     set_ctrl_and_loop(bol, get_early_ctrl(bol));
+   }
+ }
+ 
  // Split some nodes that take a counted loop phi as input at a counted
  // loop can cause vectorization of some expressions to fail
  bool PhaseIdealLoop::split_thru_phi_could_prevent_vectorization(Node* n, Node* n_blk) {
    if (!n_blk->is_CountedLoop()) {
      return false;

@@ -1117,10 +1176,16 @@
      return n;
    }
    if (n->is_Proj()) {
      return n;
    }
+ 
+   if (n->isa_FlatArrayCheck()) {
+     move_flat_array_check_out_of_loop(n);
+     return n;
+   }
+ 
    // Do not clone-up CmpFXXX variations, as these are always
    // followed by a CmpI
    if (n->is_Cmp()) {
      return n;
    }

@@ -1396,16 +1461,118 @@
    }
  
    return out_le;
  }
  
+ bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
+   // If the CmpP is a subtype check for a value that has just been
+   // loaded from an array, the subtype check guarantees the value
+   // can't be stored in a flat array and the load of the value
+   // happens with a flat array check then: push the type check
+   // through the phi of the flat array check. This needs special
+   // logic because the subtype check's input is not a phi but a
+   // LoadKlass that must first be cloned through the phi.
+   if (n->Opcode() != Op_CmpP) {
+     return false;
+   }
+ 
+   Node* klassptr = n->in(1);
+   Node* klasscon = n->in(2);
+ 
+   if (klassptr->is_DecodeNarrowPtr()) {
+     klassptr = klassptr->in(1);
+   }
+ 
+   if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
+     return false;
+   }
+ 
+   if (!klasscon->is_Con()) {
+     return false;
+   }
+ 
+   Node* addr = klassptr->in(MemNode::Address);
+ 
+   if (!addr->is_AddP()) {
+     return false;
+   }
+ 
+   intptr_t offset;
+   Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
+ 
+   if (obj == nullptr) {
+     return false;
+   }
+ 
+   assert(obj != nullptr && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
+   if (obj->Opcode() == Op_CastPP) {
+     obj = obj->in(1);
+   }
+ 
+   if (!obj->is_Phi()) {
+     return false;
+   }
+ 
+   Node* region = obj->in(0);
+ 
+   Node* phi = PhiNode::make_blank(region, n->in(1));
+   for (uint i = 1; i < region->req(); i++) {
+     Node* in = obj->in(i);
+     Node* ctrl = region->in(i);
+     if (addr->in(AddPNode::Base) != obj) {
+       Node* cast = addr->in(AddPNode::Base);
+       assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
+       Node* cast_clone = cast->clone();
+       cast_clone->set_req(0, ctrl);
+       cast_clone->set_req(1, in);
+       register_new_node(cast_clone, ctrl);
+       const Type* tcast = cast_clone->Value(&_igvn);
+       _igvn.set_type(cast_clone, tcast);
+       cast_clone->as_Type()->set_type(tcast);
+       in = cast_clone;
+     }
+     Node* addr_clone = addr->clone();
+     addr_clone->set_req(AddPNode::Base, in);
+     addr_clone->set_req(AddPNode::Address, in);
+     register_new_node(addr_clone, ctrl);
+     _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
+     Node* klassptr_clone = klassptr->clone();
+     klassptr_clone->set_req(2, addr_clone);
+     register_new_node(klassptr_clone, ctrl);
+     _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
+     if (klassptr != n->in(1)) {
+       Node* decode = n->in(1);
+       assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
+       Node* decode_clone = decode->clone();
+       decode_clone->set_req(1, klassptr_clone);
+       register_new_node(decode_clone, ctrl);
+       _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
+       klassptr_clone = decode_clone;
+     }
+     phi->set_req(i, klassptr_clone);
+   }
+   register_new_node(phi, region);
+   Node* orig = n->in(1);
+   _igvn.replace_input_of(n, 1, phi);
+   split_if_with_blocks_post(n);
+   if (n->outcnt() != 0) {
+     _igvn.replace_input_of(n, 1, orig);
+     _igvn.remove_dead_node(phi);
+   }
+   return true;
+ }
+ 
  //------------------------------split_if_with_blocks_post----------------------
  // Do the real work in a non-recursive function.  CFG hackery wants to be
  // in the post-order, so it can dirty the I-DOM info and not use the dirtied
  // info.
  void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
  
+   if (flat_array_element_type_check(n)) {
+     return;
+   }
+ 
    // Cloning Cmp through Phi's involves the split-if transform.
    // FastLock is not used by an If
    if (n->is_Cmp() && !n->is_FastLock()) {
      Node *n_ctrl = get_ctrl(n);
      // Determine if the Node has inputs from some local Phi.

@@ -1549,10 +1716,15 @@
    }
  
    try_sink_out_of_loop(n);
  
    try_move_store_after_loop(n);
+ 
+   // Remove multiple allocations of the same inline type
+   if (n->is_InlineType()) {
+     n->as_InlineType()->remove_redundant_allocations(this);
+   }
  }
  
  // Transform:
  //
  // if (some_condition) {

@@ -2033,14 +2205,22 @@
      sample_bool = n->in(1);
      assert(sample_bool->is_Bool(), "wrong type");
    } else {
      sample_bool = n;
    }
-   Node *sample_cmp = sample_bool->in(1);
+   Node* sample_cmp = sample_bool->in(1);
+   const Type* t = Type::TOP;
+   const TypePtr* at = nullptr;
+   if (sample_cmp->is_FlatArrayCheck()) {
+     // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
+     assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
+     t = Type::MEMORY;
+     at = TypeRawPtr::BOTTOM;
+   }
  
    // Make Phis to merge the Cmp's inputs.
-   PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
+   PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
    PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
    for (i = 1; i < phi->req(); i++) {
      Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
      Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
      phi1->set_req(i, n1);
< prev index next >