< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page
@@ -1164,22 +1164,22 @@
  }
  
  //------------------------------transform--------------------------------------
  // Non-recursive: idealize Node 'n' with respect to its inputs and its value
  Node *PhaseIterGVN::transform( Node *n ) {
-   if (_delay_transform) {
-     // Register the node but don't optimize for now
-     register_new_node_with_optimizer(n);
-     return n;
-   }
- 
    // If brand new node, make space in type array, and give it a type.
    ensure_type_or_null(n);
    if (type_or_null(n) == nullptr) {
      set_type_bottom(n);
    }
  
+   if (_delay_transform) {
+     // Add the node to the worklist but don't optimize for now
+     _worklist.push(n);
+     return n;
+   }
+ 
    return transform_old(n);
  }
  
  Node *PhaseIterGVN::transform_old(Node* n) {
    NOT_PRODUCT(set_transforms());

@@ -1446,10 +1446,23 @@
    }
  #endif
    temp->destruct(this);     // reuse the _idx of this little guy
  }
  
+ void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
+   assert(n != nullptr, "sanity");
+   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+     Node* u = n->fast_out(i);
+     if (u != n) {
+       rehash_node_delayed(u);
+       int nb = u->replace_edge(n, m);
+       --i, imax -= nb;
+     }
+   }
+   assert(n->outcnt() == 0, "all uses must be deleted");
+ }
+ 
  //------------------------------add_users_to_worklist--------------------------
  void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
      worklist.push(n->fast_out(i));  // Push on worklist
    }

@@ -1598,10 +1611,19 @@
          }
        }
      }
    }
  
+   // Inline type nodes can have other inline types as users. If an input gets
+   // updated, make sure that inline type users get a chance for optimization.
+   if (use->is_InlineType()) {
+     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+       Node* u = use->fast_out(i2);
+       if (u->is_InlineType())
+         worklist.push(u);
+     }
+   }
    // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
    // Patterns:
    // ConstraintCast+ -> Sub
    // ConstraintCast+ -> Phi
    // ConstraintCast+ -> Xor

@@ -1674,10 +1696,18 @@
    // of the mirror load depends on the type of 'n'. See LoadNode::Value().
    //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
    BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
    bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
  
+   if (use_op == Op_CastP2X) {
+     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+       Node* u = use->fast_out(i2);
+       if (u->Opcode() == Op_AndX) {
+         worklist.push(u);
+       }
+     }
+   }
    if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
      for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
        Node* u = use->fast_out(i2);
        const Type* ut = u->bottom_type();
        if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {

@@ -1692,10 +1722,20 @@
          }
          worklist.push(u);
        }
      }
    }
+   // Give CallStaticJavaNode::remove_useless_allocation a chance to run
+   if (use->is_Region()) {
+     Node* c = use;
+     do {
+       c = c->unique_ctrl_out_or_null();
+     } while (c != nullptr && c->is_Region());
+     if (c != nullptr && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
+       worklist.push(c);
+     }
+   }
    if (use->Opcode() == Op_OpaqueZeroTripGuard) {
      assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
      if (use->outcnt() == 1) {
        Node* cmp = use->unique_out();
        worklist.push(cmp);

@@ -1884,10 +1924,11 @@
  void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
    push_phis(worklist, use);
    push_catch(worklist, use);
    push_cmpu(worklist, use);
    push_counted_loop_phi(worklist, parent, use);
+   push_cast(worklist, use);
    push_loadp(worklist, use);
    push_and(worklist, parent, use);
    push_cast_ii(worklist, parent, use);
    push_opaque_zero_trip_guard(worklist, use);
  }

@@ -1945,10 +1986,22 @@
        worklist.push(phi);
      }
    }
  }
  
+ void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
+   uint use_op = use->Opcode();
+   if (use_op == Op_CastP2X) {
+     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+       Node* u = use->fast_out(i2);
+       if (u->Opcode() == Op_AndX) {
+         worklist.push(u);
+       }
+     }
+   }
+ }
+ 
  // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
  // See LoadNode::Value().
  void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
    BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
    bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
< prev index next >