< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page
*** 1133,11 ***
      // after loop-opts, so that should take care of many of these cases.
      return false;
    }
    tty->cr();
    tty->print_cr("Missed Value optimization:");
!   n->dump_bfs(1, 0, "");
    tty->print_cr("Current type:");
    told->dump_on(tty);
    tty->cr();
    tty->print_cr("Optimized type:");
    tnew->dump_on(tty);
--- 1133,11 ---
      // after loop-opts, so that should take care of many of these cases.
      return false;
    }
    tty->cr();
    tty->print_cr("Missed Value optimization:");
!   n->dump_bfs(3, 0, "");
    tty->print_cr("Current type:");
    told->dump_on(tty);
    tty->cr();
    tty->print_cr("Optimized type:");
    tnew->dump_on(tty);

*** 1158,22 ***
  }
  
  //------------------------------transform--------------------------------------
  // Non-recursive: idealize Node 'n' with respect to its inputs and its value
  Node *PhaseIterGVN::transform( Node *n ) {
-   if (_delay_transform) {
-     // Register the node but don't optimize for now
-     register_new_node_with_optimizer(n);
-     return n;
-   }
- 
    // If brand new node, make space in type array, and give it a type.
    ensure_type_or_null(n);
    if (type_or_null(n) == nullptr) {
      set_type_bottom(n);
    }
  
    return transform_old(n);
  }
  
  Node *PhaseIterGVN::transform_old(Node* n) {
    NOT_PRODUCT(set_transforms());
--- 1158,22 ---
  }
  
  //------------------------------transform--------------------------------------
  // Non-recursive: idealize Node 'n' with respect to its inputs and its value
  Node *PhaseIterGVN::transform( Node *n ) {
    // If brand new node, make space in type array, and give it a type.
    ensure_type_or_null(n);
    if (type_or_null(n) == nullptr) {
      set_type_bottom(n);
    }
  
+   if (_delay_transform) {
+     // Add the node to the worklist but don't optimize for now
+     _worklist.push(n);
+     return n;
+   }
+ 
    return transform_old(n);
  }
  
  Node *PhaseIterGVN::transform_old(Node* n) {
    NOT_PRODUCT(set_transforms());

*** 1440,10 ***
--- 1440,23 ---
    }
  #endif
    temp->destruct(this);     // reuse the _idx of this little guy
  }
  
+ void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
+   assert(n != nullptr, "sanity");
+   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+     Node* u = n->fast_out(i);
+     if (u != n) {
+       rehash_node_delayed(u);
+       int nb = u->replace_edge(n, m);
+       --i, imax -= nb;
+     }
+   }
+   assert(n->outcnt() == 0, "all uses must be deleted");
+ }
+ 
  //------------------------------add_users_to_worklist--------------------------
  void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
      worklist.push(n->fast_out(i));  // Push on worklist
    }

*** 1495,10 ***
--- 1508,20 ---
      if (p != nullptr) {
        add_users_to_worklist0(p, worklist);
      }
    }
  
+   // AndLNode::Ideal folds GraphKit::mark_word_test patterns. Give it a chance to run.
+   if (n->is_Load() && use->is_Phi()) {
+     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
+       Node* u = use->fast_out(i);
+       if (u->Opcode() == Op_AndL) {
+         worklist.push(u);
+       }
+     }
+   }
+ 
    uint use_op = use->Opcode();
    if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
      add_users_to_worklist0(use, worklist); // Put Bool on worklist
      if (use->outcnt() > 0) {
        Node* bol = use->raw_out(0);

*** 1592,10 ***
--- 1615,19 ---
          }
        }
      }
    }
  
+   // Inline type nodes can have other inline types as users. If an input gets
+   // updated, make sure that inline type users get a chance for optimization.
+   if (use->is_InlineType()) {
+     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+       Node* u = use->fast_out(i2);
+       if (u->is_InlineType())
+         worklist.push(u);
+     }
+   }
    // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
    // Patterns:
    // ConstraintCast+ -> Sub
    // ConstraintCast+ -> Phi
    // ConstraintCast+ -> Xor

*** 1668,10 ***
--- 1700,18 ---
    // of the mirror load depends on the type of 'n'. See LoadNode::Value().
    //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
    BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
    bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
  
+   if (use_op == Op_CastP2X) {
+     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+       Node* u = use->fast_out(i2);
+       if (u->Opcode() == Op_AndX) {
+         worklist.push(u);
+       }
+     }
+   }
    if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
      for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
        Node* u = use->fast_out(i2);
        const Type* ut = u->bottom_type();
        if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {

*** 1686,10 ***
--- 1726,20 ---
          }
          worklist.push(u);
        }
      }
    }
+   // Give CallStaticJavaNode::remove_useless_allocation a chance to run
+   if (use->is_Region()) {
+     Node* c = use;
+     do {
+       c = c->unique_ctrl_out_or_null();
+     } while (c != nullptr && c->is_Region());
+     if (c != nullptr && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
+       worklist.push(c);
+     }
+   }
    if (use->Opcode() == Op_OpaqueZeroTripGuard) {
      assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
      if (use->outcnt() == 1) {
        Node* cmp = use->unique_out();
        worklist.push(cmp);

*** 1761,11 ***
  
  
  #ifdef ASSERT
  void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) {
    if (tnew->meet(told) != tnew->remove_speculative()) {
!     n->dump(1);
      tty->print("told = "); told->dump(); tty->cr();
      tty->print("tnew = "); tnew->dump(); tty->cr();
      fatal("Not monotonic");
    }
    assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases");
--- 1811,11 ---
  
  
  #ifdef ASSERT
  void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) {
    if (tnew->meet(told) != tnew->remove_speculative()) {
!     n->dump(3);
      tty->print("told = "); told->dump(); tty->cr();
      tty->print("tnew = "); tnew->dump(); tty->cr();
      fatal("Not monotonic");
    }
    assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases");

*** 1878,10 ***
--- 1928,11 ---
  void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
    push_phis(worklist, use);
    push_catch(worklist, use);
    push_cmpu(worklist, use);
    push_counted_loop_phi(worklist, parent, use);
+   push_cast(worklist, use);
    push_loadp(worklist, use);
    push_and(worklist, parent, use);
    push_cast_ii(worklist, parent, use);
    push_opaque_zero_trip_guard(worklist, use);
  }

*** 1940,10 ***
--- 1991,22 ---
        worklist.push(phi);
      }
    }
  }
  
+ void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
+   uint use_op = use->Opcode();
+   if (use_op == Op_CastP2X) {
+     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+       Node* u = use->fast_out(i2);
+       if (u->Opcode() == Op_AndX) {
+         worklist.push(u);
+       }
+     }
+   }
+ }
+ 
  // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
  // See LoadNode::Value().
  void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
    BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
    bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
< prev index next >