< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page
@@ -421,11 +421,11 @@
  
    // Remove all useless nodes from future worklist
    worklist->remove_useless_nodes(_useful.member_set());
  
    // Disconnect 'useless' nodes that are adjacent to useful nodes
-   C->remove_useless_nodes(_useful);
+   C->disconnect_useless_nodes(_useful, worklist);
  }
  
  //=============================================================================
  //------------------------------PhaseRenumberLive------------------------------
  // First, remove useless nodes (equivalent to identifying live nodes).

@@ -1225,22 +1225,22 @@
  }
  
  //------------------------------transform--------------------------------------
  // Non-recursive: idealize Node 'n' with respect to its inputs and its value
  Node *PhaseIterGVN::transform( Node *n ) {
-   if (_delay_transform) {
-     // Register the node but don't optimize for now
-     register_new_node_with_optimizer(n);
-     return n;
-   }
- 
    // If brand new node, make space in type array, and give it a type.
    ensure_type_or_null(n);
    if (type_or_null(n) == NULL) {
      set_type_bottom(n);
    }
  
+   if (_delay_transform) {
+     // Add the node to the worklist but don't optimize for now
+     _worklist.push(n);
+     return n;
+   }
+ 
    return transform_old(n);
  }
  
  Node *PhaseIterGVN::transform_old(Node* n) {
    NOT_PRODUCT(set_transforms());

@@ -1499,10 +1499,23 @@
    }
  #endif
    temp->destruct(this);     // reuse the _idx of this little guy
  }
  
+ void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
+   assert(n != NULL, "sanity");
+   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+     Node* u = n->fast_out(i);
+     if (u != n) {
+       rehash_node_delayed(u);
+       int nb = u->replace_edge(n, m);
+       --i, imax -= nb;
+     }
+   }
+   assert(n->outcnt() == 0, "all uses must be deleted");
+ }
+ 
  //------------------------------add_users_to_worklist--------------------------
  void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
      _worklist.push(n->fast_out(i));  // Push on worklist
    }

@@ -1599,10 +1612,19 @@
            }
          }
        }
      }
  
+     // Inline type nodes can have other inline types as users. If an input gets
+     // updated, make sure that inline type users get a chance for optimization.
+     if (use->is_InlineTypeBase()) {
+       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+         Node* u = use->fast_out(i2);
+         if (u->is_InlineTypeBase())
+           _worklist.push(u);
+       }
+     }
      // If changed Cast input, check Phi users for simple cycles
      if (use->is_ConstraintCast()) {
        for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
          Node* u = use->fast_out(i2);
          if (u->is_Phi())

@@ -1644,10 +1666,18 @@
      }
      if (use_op == Op_Initialize) {
        Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
        if (imem != NULL)  add_users_to_worklist0(imem);
      }
+     if (use_op == Op_CastP2X) {
+       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+         Node* u = use->fast_out(i2);
+         if (u->Opcode() == Op_AndX) {
+           _worklist.push(u);
+         }
+       }
+     }
      // Loading the java mirror from a Klass requires two loads and the type
      // of the mirror load depends on the type of 'n'. See LoadNode::Value().
      //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
      BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
      bool has_load_barrier_nodes = bs->has_load_barrier_nodes();

@@ -1668,10 +1698,21 @@
            }
            _worklist.push(u);
          }
        }
      }
+ 
+     // Give CallStaticJavaNode::remove_useless_allocation a chance to run
+     if (use->is_Region()) {
+       Node* c = use;
+       do {
+         c = c->unique_ctrl_out();
+       } while (c != NULL && c->is_Region());
+       if (c != NULL && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
+         _worklist.push(c);
+       }
+     }
    }
  }
  
  /**
   * Remove the speculative part of all types that we know of

@@ -1719,11 +1760,11 @@
  uint PhaseCCP::_total_invokes   = 0;
  uint PhaseCCP::_total_constants = 0;
  #endif
  //------------------------------PhaseCCP---------------------------------------
  // Conditional Constant Propagation, ala Wegman & Zadeck
- PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
+ PhaseCCP::PhaseCCP(PhaseIterGVN* igvn) : PhaseIterGVN(igvn), _trstack(C->live_nodes() >> 1) {
    NOT_PRODUCT( clear_constants(); )
    assert( _worklist.size() == 0, "" );
    // Clear out _nodes from IterGVN.  Must be clear to transform call.
    _nodes.clear();               // Clear out from IterGVN
    analyze();

@@ -1773,10 +1814,15 @@
      if (StressCCP) {
        n = worklist.remove(C->random() % worklist.size());
      } else {
        n = worklist.pop();
      }
+     if (n->is_SafePoint()) {
+       // Make sure safepoints are processed by PhaseCCP::transform even if they are
+       // not reachable from the bottom. Otherwise, infinite loops would be removed.
+       _trstack.push(n);
+     }
      const Type *t = n->Value(this);
      if (t != type(n)) {
        assert(ccp_type_widens(t, type(n)), "ccp type must widen");
  #ifndef PRODUCT
        if( TracePhaseCCP ) {

@@ -1838,10 +1884,18 @@
            PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
            if (phi != NULL) {
              worklist.push(phi);
            }
          }
+         if (m_op == Op_CastP2X) {
+           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
+             Node* u = m->fast_out(i2);
+             if (u->Opcode() == Op_AndX) {
+               worklist.push(u);
+             }
+           }
+         }
          // Loading the java mirror from a Klass requires two loads and the type
          // of the mirror load depends on the type of 'n'. See LoadNode::Value().
          BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
          bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
  

@@ -1884,31 +1938,47 @@
    Node *new_node = _nodes[n->_idx]; // Check for transformed node
    if( new_node != NULL )
      return new_node;                // Been there, done that, return old answer
    new_node = transform_once(n);     // Check for constant
    _nodes.map( n->_idx, new_node );  // Flag as having been cloned
+   _useful.push(new_node); // Keep track of nodes that are reachable from the bottom
  
-   // Allocate stack of size _nodes.Size()/2 to avoid frequent realloc
-   GrowableArray <Node *> trstack(C->live_nodes() >> 1);
- 
-   trstack.push(new_node);           // Process children of cloned node
-   while ( trstack.is_nonempty() ) {
-     Node *clone = trstack.pop();
+   _trstack.push(new_node);           // Process children of cloned node
+   while (_trstack.is_nonempty()) {
+     Node* clone = _trstack.pop();
      uint cnt = clone->req();
      for( uint i = 0; i < cnt; i++ ) {          // For all inputs do
        Node *input = clone->in(i);
        if( input != NULL ) {                    // Ignore NULLs
          Node *new_input = _nodes[input->_idx]; // Check for cloned input node
          if( new_input == NULL ) {
            new_input = transform_once(input);   // Check for constant
            _nodes.map( input->_idx, new_input );// Flag as having been cloned
-           trstack.push(new_input);
+           _useful.push(new_input);
+           _trstack.push(new_input);
          }
          assert( new_input == clone->in(i), "insanity check");
        }
      }
    }
+ 
+   // The above transformation might lead to subgraphs becoming unreachable from the
+   // bottom while still being reachable from the top. As a result, nodes in that
+   // subgraph are not transformed and their bottom types are not updated, leading to
+   // an inconsistency between bottom_type() and type(). In rare cases, LoadNodes in
+   // such a subgraph, kept alive by InlineTypePtrNodes, might be re-enqueued for IGVN
+   // indefinitely by MemNode::Ideal_common because their address type is inconsistent.
+   // Therefore, we aggressively remove all useless nodes here even before
+   // PhaseIdealLoop::build_loop_late gets a chance to remove them anyway.
+   if (C->cached_top_node()) {
+     _useful.push(C->cached_top_node());
+   }
+   C->update_dead_node_list(_useful);
+   remove_useless_nodes(_useful.member_set());
+   _worklist.remove_useless_nodes(_useful.member_set());
+   C->disconnect_useless_nodes(_useful, &_worklist);
+ 
    return new_node;
  }
  
  
  //------------------------------transform_once---------------------------------
< prev index next >