1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "libadt/vectset.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "opto/block.hpp"
  30 #include "opto/c2compiler.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/machnode.hpp"
  34 #include "opto/opcodes.hpp"
  35 #include "opto/phaseX.hpp"
  36 #include "opto/rootnode.hpp"
  37 #include "opto/runtime.hpp"
  38 #include "opto/chaitin.hpp"
  39 #include "runtime/deoptimization.hpp"
  40 
  41 // Portions of code courtesy of Clifford Click
  42 
  43 // Optimization - Graph Style
  44 
  45 // To avoid float value underflow
  46 #define MIN_BLOCK_FREQUENCY 1.e-35f
  47 
  48 //----------------------------schedule_node_into_block-------------------------
  49 // Insert node n into block b. Look for projections of n and make sure they
  50 // are in b also.
  51 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
  52   // Set basic block of n, Add n to b,
  53   map_node_to_block(n, b);
  54   b->add_inst(n);
  55 
  56   // After Matching, nearly any old Node may have projections trailing it.
  57   // These are usually machine-dependent flags.  In any case, they might
  58   // float to another block below this one.  Move them up.
  59   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  60     Node*  use  = n->fast_out(i);
  61     if (use->is_Proj()) {
  62       Block* buse = get_block_for_node(use);
  63       if (buse != b) {              // In wrong block?
  64         if (buse != nullptr) {
  65           buse->find_remove(use);   // Remove from wrong block
  66         }
  67         map_node_to_block(use, b);
  68         b->add_inst(use);
  69       }
  70     }
  71   }
  72 }
  73 
  74 //----------------------------replace_block_proj_ctrl-------------------------
  75 // Nodes that have is_block_proj() nodes as their control need to use
  76 // the appropriate Region for their actual block as their control since
  77 // the projection will be in a predecessor block.
  78 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
  79   const Node *in0 = n->in(0);
  80   assert(in0 != nullptr, "Only control-dependent");
  81   const Node *p = in0->is_block_proj();
  82   if (p != nullptr && p != n) {    // Control from a block projection?
  83     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
  84     // Find trailing Region
  85     Block *pb = get_block_for_node(in0); // Block-projection already has basic block
  86     uint j = 0;
  87     if (pb->_num_succs != 1) {  // More then 1 successor?
  88       // Search for successor
  89       uint max = pb->number_of_nodes();
  90       assert( max > 1, "" );
  91       uint start = max - pb->_num_succs;
  92       // Find which output path belongs to projection
  93       for (j = start; j < max; j++) {
  94         if( pb->get_node(j) == in0 )
  95           break;
  96       }
  97       assert( j < max, "must find" );
  98       // Change control to match head of successor basic block
  99       j -= start;
 100     }
 101     n->set_req(0, pb->_succs[j]->head());
 102   }
 103 }
 104 
 105 bool PhaseCFG::is_dominator(Node* dom_node, Node* node) {
 106   assert(is_CFG(node) && is_CFG(dom_node), "node and dom_node must be CFG nodes");
 107   if (dom_node == node) {
 108     return true;
 109   }
 110   Block* d = find_block_for_node(dom_node);
 111   Block* n = find_block_for_node(node);
 112   assert(n != nullptr && d != nullptr, "blocks must exist");
 113 
 114   if (d == n) {
 115     if (dom_node->is_block_start()) {
 116       return true;
 117     }
 118     if (node->is_block_start()) {
 119       return false;
 120     }
 121     if (dom_node->is_block_proj()) {
 122       return false;
 123     }
 124     if (node->is_block_proj()) {
 125       return true;
 126     }
 127 
 128     assert(is_control_proj_or_safepoint(node), "node must be control projection or safepoint");
 129     assert(is_control_proj_or_safepoint(dom_node), "dom_node must be control projection or safepoint");
 130 
 131     // Neither 'node' nor 'dom_node' is a block start or block projection.
 132     // Check if 'dom_node' is above 'node' in the control graph.
 133     if (is_dominating_control(dom_node, node)) {
 134       return true;
 135     }
 136 
 137 #ifdef ASSERT
 138     // If 'dom_node' does not dominate 'node' then 'node' has to dominate 'dom_node'
 139     if (!is_dominating_control(node, dom_node)) {
 140       node->dump();
 141       dom_node->dump();
 142       assert(false, "neither dom_node nor node dominates the other");
 143     }
 144 #endif
 145 
 146     return false;
 147   }
 148   return d->dom_lca(n) == d;
 149 }
 150 
 151 bool PhaseCFG::is_CFG(Node* n) {
 152   return n->is_block_proj() || n->is_block_start() || is_control_proj_or_safepoint(n);
 153 }
 154 
 155 bool PhaseCFG::is_control_proj_or_safepoint(Node* n) const {
 156   bool result = (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) || (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL);
 157   assert(!result || (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint)
 158           || (n->is_Proj() && n->as_Proj()->_con == 0), "If control projection, it must be projection 0");
 159   return result;
 160 }
 161 
 162 Block* PhaseCFG::find_block_for_node(Node* n) const {
 163   if (n->is_block_start() || n->is_block_proj()) {
 164     return get_block_for_node(n);
 165   } else {
 166     // Walk the control graph up if 'n' is not a block start nor a block projection. In this case 'n' must be
 167     // an unmatched control projection or a not yet matched safepoint precedence edge in the middle of a block.
 168     assert(is_control_proj_or_safepoint(n), "must be control projection or safepoint");
 169     Node* ctrl = n->in(0);
 170     while (!ctrl->is_block_start()) {
 171       ctrl = ctrl->in(0);
 172     }
 173     return get_block_for_node(ctrl);
 174   }
 175 }
 176 
 177 // Walk up the control graph from 'n' and check if 'dom_ctrl' is found.
 178 bool PhaseCFG::is_dominating_control(Node* dom_ctrl, Node* n) {
 179   Node* ctrl = n->in(0);
 180   while (!ctrl->is_block_start()) {
 181     if (ctrl == dom_ctrl) {
 182       return true;
 183     }
 184     ctrl = ctrl->in(0);
 185   }
 186   return false;
 187 }
 188 
 189 
 190 //------------------------------schedule_pinned_nodes--------------------------
 191 // Set the basic block for Nodes pinned into blocks
 192 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
 193   // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
 194   GrowableArray <Node*> spstack(C->live_nodes() + 8);
 195   spstack.push(_root);
 196   while (spstack.is_nonempty()) {
 197     Node* node = spstack.pop();
 198     if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
 199       if (node->pinned() && !has_block(node)) {  // Pinned?  Nail it down!
 200         assert(node->in(0), "pinned Node must have Control");
 201         // Before setting block replace block_proj control edge
 202         replace_block_proj_ctrl(node);
 203         Node* input = node->in(0);
 204         while (!input->is_block_start()) {
 205           input = input->in(0);
 206         }
 207         Block* block = get_block_for_node(input); // Basic block of controlling input
 208         schedule_node_into_block(node, block);
 209       }
 210 
 211       // If the node has precedence edges (added when CastPP nodes are
 212       // removed in final_graph_reshaping), fix the control of the
 213       // node to cover the precedence edges and remove the
 214       // dependencies.
 215       Node* n = nullptr;
 216       for (uint i = node->len()-1; i >= node->req(); i--) {
 217         Node* m = node->in(i);
 218         if (m == nullptr) continue;
 219         assert(is_CFG(m), "must be a CFG node");
 220         node->rm_prec(i);
 221         if (n == nullptr) {
 222           n = m;
 223         } else {
 224           assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
 225           n = is_dominator(n, m) ? m : n;
 226         }
 227       }
 228       if (n != nullptr) {
 229         assert(node->in(0), "control should have been set");
 230         assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other");
 231         if (!is_dominator(n, node->in(0))) {
 232           node->set_req(0, n);
 233         }
 234       }
 235 
 236       // process all inputs that are non null
 237       for (int i = node->len()-1; i >= 0; --i) {
 238         if (node->in(i) != nullptr) {
 239           spstack.push(node->in(i));
 240         }
 241       }
 242     }
 243   }
 244 }
 245 
 246 #ifdef ASSERT
 247 // Assert that new input b2 is dominated by all previous inputs.
 248 // Check this by by seeing that it is dominated by b1, the deepest
 249 // input observed until b2.
 250 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
 251   if (b1 == nullptr)  return;
 252   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
 253   Block* tmp = b2;
 254   while (tmp != b1 && tmp != nullptr) {
 255     tmp = tmp->_idom;
 256   }
 257   if (tmp != b1) {
 258     // Detected an unschedulable graph.  Print some nice stuff and die.
 259     tty->print_cr("!!! Unschedulable graph !!!");
 260     for (uint j=0; j<n->len(); j++) { // For all inputs
 261       Node* inn = n->in(j); // Get input
 262       if (inn == nullptr)  continue;  // Ignore null, missing inputs
 263       Block* inb = cfg->get_block_for_node(inn);
 264       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
 265                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
 266       inn->dump();
 267     }
 268     tty->print("Failing node: ");
 269     n->dump();
 270     assert(false, "unscheduable graph");
 271   }
 272 }
 273 #endif
 274 
 275 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
 276   // Find the last input dominated by all other inputs.
 277   Block* deepb           = nullptr;     // Deepest block so far
 278   int    deepb_dom_depth = 0;
 279   for (uint k = 0; k < n->len(); k++) { // For all inputs
 280     Node* inn = n->in(k);               // Get input
 281     if (inn == nullptr)  continue;      // Ignore null, missing inputs
 282     Block* inb = cfg->get_block_for_node(inn);
 283     assert(inb != nullptr, "must already have scheduled this input");
 284     if (deepb_dom_depth < (int) inb->_dom_depth) {
 285       // The new inb must be dominated by the previous deepb.
 286       // The various inputs must be linearly ordered in the dom
 287       // tree, or else there will not be a unique deepest block.
 288       DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
 289       deepb = inb;                      // Save deepest block
 290       deepb_dom_depth = deepb->_dom_depth;
 291     }
 292   }
 293   assert(deepb != nullptr, "must be at least one input to n");
 294   return deepb;
 295 }
 296 
 297 
 298 //------------------------------schedule_early---------------------------------
 299 // Find the earliest Block any instruction can be placed in.  Some instructions
 300 // are pinned into Blocks.  Unpinned instructions can appear in last block in
 301 // which all their inputs occur.
 302 bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
 303   // Allocate stack with enough space to avoid frequent realloc
 304   Node_Stack nstack(roots.size() + 8);
 305   // _root will be processed among C->top() inputs
 306   roots.push(C->top(), 0);
 307   visited.set(C->top()->_idx);
 308 
 309   while (roots.size() != 0) {
 310     // Use local variables nstack_top_n & nstack_top_i to cache values
 311     // on stack's top.
 312     Node* parent_node = roots.node();
 313     uint  input_index = 0;
 314     roots.pop();
 315 
 316     while (true) {
 317       if (input_index == 0) {
 318         // Fixup some control.  Constants without control get attached
 319         // to root and nodes that use is_block_proj() nodes should be attached
 320         // to the region that starts their block.
 321         const Node* control_input = parent_node->in(0);
 322         if (control_input != nullptr) {
 323           replace_block_proj_ctrl(parent_node);
 324         } else {
 325           // Is a constant with NO inputs?
 326           if (parent_node->req() == 1) {
 327             parent_node->set_req(0, _root);
 328           }
 329         }
 330       }
 331 
 332       // First, visit all inputs and force them to get a block.  If an
 333       // input is already in a block we quit following inputs (to avoid
 334       // cycles). Instead we put that Node on a worklist to be handled
 335       // later (since IT'S inputs may not have a block yet).
 336 
 337       // Assume all n's inputs will be processed
 338       bool done = true;
 339 
 340       while (input_index < parent_node->len()) {
 341         Node* in = parent_node->in(input_index++);
 342         if (in == nullptr) {
 343           continue;
 344         }
 345 
 346         int is_visited = visited.test_set(in->_idx);
 347         if (!has_block(in)) {
 348           if (is_visited) {
 349             assert(false, "graph should be schedulable");
 350             return false;
 351           }
 352           // Save parent node and next input's index.
 353           nstack.push(parent_node, input_index);
 354           // Process current input now.
 355           parent_node = in;
 356           input_index = 0;
 357           // Not all n's inputs processed.
 358           done = false;
 359           break;
 360         } else if (!is_visited) {
 361           // Visit this guy later, using worklist
 362           roots.push(in, 0);
 363         }
 364       }
 365 
 366       if (done) {
 367         // All of n's inputs have been processed, complete post-processing.
 368 
 369         // Some instructions are pinned into a block.  These include Region,
 370         // Phi, Start, Return, and other control-dependent instructions and
 371         // any projections which depend on them.
 372         if (!parent_node->pinned()) {
 373           // Set earliest legal block.
 374           Block* earliest_block = find_deepest_input(parent_node, this);
 375           map_node_to_block(parent_node, earliest_block);
 376         } else {
 377           assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
 378         }
 379 
 380         if (nstack.is_empty()) {
 381           // Finished all nodes on stack.
 382           // Process next node on the worklist 'roots'.
 383           break;
 384         }
 385         // Get saved parent node and next input's index.
 386         parent_node = nstack.node();
 387         input_index = nstack.index();
 388         nstack.pop();
 389       }
 390     }
 391   }
 392   return true;
 393 }
 394 
 395 //------------------------------dom_lca----------------------------------------
 396 // Find least common ancestor in dominator tree
 397 // LCA is a current notion of LCA, to be raised above 'this'.
 398 // As a convenient boundary condition, return 'this' if LCA is null.
 399 // Find the LCA of those two nodes.
 400 Block* Block::dom_lca(Block* LCA) {
 401   if (LCA == nullptr || LCA == this)  return this;
 402 
 403   Block* anc = this;
 404   while (anc->_dom_depth > LCA->_dom_depth)
 405     anc = anc->_idom;           // Walk up till anc is as high as LCA
 406 
 407   while (LCA->_dom_depth > anc->_dom_depth)
 408     LCA = LCA->_idom;           // Walk up till LCA is as high as anc
 409 
 410   while (LCA != anc) {          // Walk both up till they are the same
 411     LCA = LCA->_idom;
 412     anc = anc->_idom;
 413   }
 414 
 415   return LCA;
 416 }
 417 
 418 //--------------------------raise_LCA_above_use--------------------------------
 419 // We are placing a definition, and have been given a def->use edge.
 420 // The definition must dominate the use, so move the LCA upward in the
 421 // dominator tree to dominate the use.  If the use is a phi, adjust
 422 // the LCA only with the phi input paths which actually use this def.
 423 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
 424   Block* buse = cfg->get_block_for_node(use);
 425   if (buse == nullptr) return LCA;   // Unused killing Projs have no use block
 426   if (!use->is_Phi())  return buse->dom_lca(LCA);
 427   uint pmax = use->req();       // Number of Phi inputs
 428   // Why does not this loop just break after finding the matching input to
 429   // the Phi?  Well...it's like this.  I do not have true def-use/use-def
 430   // chains.  Means I cannot distinguish, from the def-use direction, which
 431   // of many use-defs lead from the same use to the same def.  That is, this
 432   // Phi might have several uses of the same def.  Each use appears in a
 433   // different predecessor block.  But when I enter here, I cannot distinguish
 434   // which use-def edge I should find the predecessor block for.  So I find
 435   // them all.  Means I do a little extra work if a Phi uses the same value
 436   // more than once.
 437   for (uint j=1; j<pmax; j++) { // For all inputs
 438     if (use->in(j) == def) {    // Found matching input?
 439       Block* pred = cfg->get_block_for_node(buse->pred(j));
 440       LCA = pred->dom_lca(LCA);
 441     }
 442   }
 443   return LCA;
 444 }
 445 
 446 //----------------------------raise_LCA_above_marks----------------------------
 447 // Return a new LCA that dominates LCA and any of its marked predecessors.
 448 // Search all my parents up to 'early' (exclusive), looking for predecessors
 449 // which are marked with the given index.  Return the LCA (in the dom tree)
 450 // of all marked blocks.  If there are none marked, return the original
 451 // LCA.
 452 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
 453   Block_List worklist;
 454   worklist.push(LCA);
 455   while (worklist.size() > 0) {
 456     Block* mid = worklist.pop();
 457     if (mid == early)  continue;  // stop searching here
 458 
 459     // Test and set the visited bit.
 460     if (mid->raise_LCA_visited() == mark)  continue;  // already visited
 461 
 462     // Don't process the current LCA, otherwise the search may terminate early
 463     if (mid != LCA && mid->raise_LCA_mark() == mark) {
 464       // Raise the LCA.
 465       LCA = mid->dom_lca(LCA);
 466       if (LCA == early)  break;   // stop searching everywhere
 467       assert(early->dominates(LCA), "early is high enough");
 468       // Resume searching at that point, skipping intermediate levels.
 469       worklist.push(LCA);
 470       if (LCA == mid)
 471         continue; // Don't mark as visited to avoid early termination.
 472     } else {
 473       // Keep searching through this block's predecessors.
 474       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
 475         Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
 476         worklist.push(mid_parent);
 477       }
 478     }
 479     mid->set_raise_LCA_visited(mark);
 480   }
 481   return LCA;
 482 }
 483 
 484 //--------------------------memory_early_block--------------------------------
 485 // This is a variation of find_deepest_input, the heart of schedule_early.
 486 // Find the "early" block for a load, if we considered only memory and
 487 // address inputs, that is, if other data inputs were ignored.
 488 //
 489 // Because a subset of edges are considered, the resulting block will
 490 // be earlier (at a shallower dom_depth) than the true schedule_early
 491 // point of the node. We compute this earlier block as a more permissive
 492 // site for anti-dependency insertion, but only if subsume_loads is enabled.
 493 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
 494   Node* base;
 495   Node* index;
 496   Node* store = load->in(MemNode::Memory);
 497   load->as_Mach()->memory_inputs(base, index);
 498 
 499   assert(base != NodeSentinel && index != NodeSentinel,
 500          "unexpected base/index inputs");
 501 
 502   Node* mem_inputs[4];
 503   int mem_inputs_length = 0;
 504   if (base != nullptr)  mem_inputs[mem_inputs_length++] = base;
 505   if (index != nullptr) mem_inputs[mem_inputs_length++] = index;
 506   if (store != nullptr) mem_inputs[mem_inputs_length++] = store;
 507 
 508   // In the comparison below, add one to account for the control input,
 509   // which may be null, but always takes up a spot in the in array.
 510   if (mem_inputs_length + 1 < (int) load->req()) {
 511     // This "load" has more inputs than just the memory, base and index inputs.
 512     // For purposes of checking anti-dependences, we need to start
 513     // from the early block of only the address portion of the instruction,
 514     // and ignore other blocks that may have factored into the wider
 515     // schedule_early calculation.
 516     if (load->in(0) != nullptr) mem_inputs[mem_inputs_length++] = load->in(0);
 517 
 518     Block* deepb           = nullptr;        // Deepest block so far
 519     int    deepb_dom_depth = 0;
 520     for (int i = 0; i < mem_inputs_length; i++) {
 521       Block* inb = cfg->get_block_for_node(mem_inputs[i]);
 522       if (deepb_dom_depth < (int) inb->_dom_depth) {
 523         // The new inb must be dominated by the previous deepb.
 524         // The various inputs must be linearly ordered in the dom
 525         // tree, or else there will not be a unique deepest block.
 526         DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
 527         deepb = inb;                      // Save deepest block
 528         deepb_dom_depth = deepb->_dom_depth;
 529       }
 530     }
 531     early = deepb;
 532   }
 533 
 534   return early;
 535 }
 536 
 537 // This function is used by insert_anti_dependences to find unrelated loads for stores in implicit null checks.
 538 bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {
 539   // We expect an anti-dependence edge from 'load' to 'store', except when
 540   // implicit_null_check() has hoisted 'store' above its early block to
 541   // perform an implicit null check, and 'load' is placed in the null
 542   // block. In this case it is safe to ignore the anti-dependence, as the
 543   // null block is only reached if 'store' tries to write to null object and
 544   // 'load' read from non-null object (there is preceding check for that)
 545   // These objects can't be the same.
 546   Block* store_block = get_block_for_node(store);
 547   Block* load_block = get_block_for_node(load);
 548   Node* end = store_block->end();
 549   if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) {
 550     Node* if_true = end->find_out_with(Op_IfTrue);
 551     assert(if_true != nullptr, "null check without null projection");
 552     Node* null_block_region = if_true->find_out_with(Op_Region);
 553     assert(null_block_region != nullptr, "null check without null region");
 554     return get_block_for_node(null_block_region) == load_block;
 555   }
 556   return false;
 557 }
 558 
 559 class DefUseMemStatesQueue : public StackObj {
 560 private:
 561   class DefUsePair : public StackObj {
 562   private:
 563     Node* _def; // memory state
 564     Node* _use; // use of the memory state that also modifies the memory state
 565 
 566   public:
 567     DefUsePair(Node* def, Node* use) :
 568       _def(def), _use(use) {
 569     }
 570 
 571     DefUsePair() :
 572       _def(nullptr), _use(nullptr) {
 573     }
 574 
 575     Node* def() const {
 576       return _def;
 577     }
 578 
 579     Node* use() const {
 580       return _use;
 581     }
 582   };
 583 
 584   GrowableArray<DefUsePair> _queue;
 585   GrowableArray<MergeMemNode*> _worklist_visited; // visited mergemem nodes
 586 
 587   bool already_enqueued(Node* def_mem, PhiNode* use_phi) const {
 588     // def_mem is one of the inputs of use_phi and at least one input of use_phi is
 589     // not def_mem. It's however possible that use_phi has def_mem as input multiple
 590     // times. If that happens, use_phi is recorded as a use of def_mem multiple
 591     // times as well. When PhaseCFG::insert_anti_dependences() goes over
 592     // uses of def_mem and enqueues them for processing, use_phi would then be
 593     // enqueued for processing multiple times when it only needs to be
 594     // processed once. The code below checks if use_phi as a use of def_mem was
 595     // already enqueued to avoid redundant processing of use_phi.
 596     int j = _queue.length()-1;
 597     // If there are any use of def_mem already enqueued, they were enqueued
 598     // last (all use of def_mem are processed in one go).
 599     for (; j >= 0; j--) {
 600       const DefUsePair& def_use_pair = _queue.at(j);
 601       if (def_use_pair.def() != def_mem) {
 602         // We're done with the uses of def_mem
 603         break;
 604       }
 605       if (def_use_pair.use() == use_phi) {
 606         return true;
 607       }
 608     }
 609 #ifdef ASSERT
 610     for (; j >= 0; j--) {
 611       const DefUsePair& def_use_pair = _queue.at(j);
 612       assert(def_use_pair.def() != def_mem, "Should be done with the uses of def_mem");
 613     }
 614 #endif
 615     return false;
 616   }
 617 
 618 public:
 619   DefUseMemStatesQueue(ResourceArea* area) {
 620   }
 621 
 622   void push(Node* def_mem_state, Node* use_mem_state) {
 623     if (use_mem_state->is_MergeMem()) {
 624       // Be sure we don't get into combinatorial problems.
 625       if (!_worklist_visited.append_if_missing(use_mem_state->as_MergeMem())) {
 626         return; // already on work list; do not repeat
 627       }
 628     } else if (use_mem_state->is_Phi()) {
 629       // A Phi could have the same mem as input multiple times. If that's the case, we don't need to enqueue it
 630       // more than once. We otherwise allow phis to be repeated; they can merge two relevant states.
 631       if (already_enqueued(def_mem_state, use_mem_state->as_Phi())) {
 632         return;
 633       }
 634     }
 635 
 636     _queue.push(DefUsePair(def_mem_state, use_mem_state));
 637   }
 638 
 639   bool is_nonempty() const {
 640     return _queue.is_nonempty();
 641   }
 642 
 643   Node* top_def() const {
 644     return _queue.top().def();
 645   }
 646 
 647   Node* top_use() const {
 648     return _queue.top().use();
 649   }
 650 
 651   void pop() {
 652     _queue.pop();
 653   }
 654 };
 655 
 656 //--------------------------insert_anti_dependences---------------------------
 657 // A load may need to witness memory that nearby stores can overwrite.
 658 // For each nearby store, either insert an "anti-dependence" edge
 659 // from the load to the store, or else move LCA upward to force the
 660 // load to (eventually) be scheduled in a block above the store.
 661 //
 662 // Do not add edges to stores on distinct control-flow paths;
 663 // only add edges to stores which might interfere.
 664 //
 665 // Return the (updated) LCA.  There will not be any possibly interfering
 666 // store between the load's "early block" and the updated LCA.
 667 // Any stores in the updated LCA will have new precedence edges
 668 // back to the load.  The caller is expected to schedule the load
 669 // in the LCA, in which case the precedence edges will make LCM
 670 // preserve anti-dependences.  The caller may also hoist the load
 671 // above the LCA, if it is not the early block.
 672 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
 673   ResourceMark rm;
 674   assert(load->needs_anti_dependence_check(), "must be a load of some sort");
 675   assert(LCA != nullptr, "");
 676   DEBUG_ONLY(Block* LCA_orig = LCA);
 677 
 678   // Compute the alias index.  Loads and stores with different alias indices
 679   // do not need anti-dependence edges.
 680   int load_alias_idx = C->get_alias_index(load->adr_type());
 681 #ifdef ASSERT
 682   assert(Compile::AliasIdxTop <= load_alias_idx && load_alias_idx < C->num_alias_types(), "Invalid alias index");
 683   if (load_alias_idx == Compile::AliasIdxBot && C->do_aliasing() &&
 684       (PrintOpto || VerifyAliases ||
 685        (PrintMiscellaneous && (WizardMode || Verbose)))) {
 686     // Load nodes should not consume all of memory.
 687     // Reporting a bottom type indicates a bug in adlc.
 688     // If some particular type of node validly consumes all of memory,
 689     // sharpen the preceding "if" to exclude it, so we can catch bugs here.
 690     tty->print_cr("*** Possible Anti-Dependence Bug:  Load consumes all of memory.");
 691     load->dump(2);
 692     if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
 693   }
 694 #endif
 695 
 696   if (!C->alias_type(load_alias_idx)->is_rewritable()) {
 697     // It is impossible to spoil this load by putting stores before it,
 698     // because we know that the stores will never update the value
 699     // which 'load' must witness.
 700     return LCA;
 701   }
 702 
 703   node_idx_t load_index = load->_idx;
 704 
 705   // Note the earliest legal placement of 'load', as determined by
 706   // by the unique point in the dom tree where all memory effects
 707   // and other inputs are first available.  (Computed by schedule_early.)
 708   // For normal loads, 'early' is the shallowest place (dom graph wise)
 709   // to look for anti-deps between this load and any store.
 710   Block* early = get_block_for_node(load);
 711 
 712   // If we are subsuming loads, compute an "early" block that only considers
 713   // memory or address inputs. This block may be different than the
 714   // schedule_early block in that it could be at an even shallower depth in the
 715   // dominator tree, and allow for a broader discovery of anti-dependences.
 716   if (C->subsume_loads()) {
 717     early = memory_early_block(load, early, this);
 718   }
 719 
 720   ResourceArea* area = Thread::current()->resource_area();
 721   DefUseMemStatesQueue worklist_def_use_mem_states(area); // prior memory state to store and possible-def to explore
 722   Node_List non_early_stores(area); // all relevant stores outside of early
 723   bool must_raise_LCA = false;
 724 
 725   // 'load' uses some memory state; look for users of the same state.
 726   // Recurse through MergeMem nodes to the stores that use them.
 727 
 728   // Each of these stores is a possible definition of memory
 729   // that 'load' needs to use.  We need to force 'load'
 730   // to occur before each such store.  When the store is in
 731   // the same block as 'load', we insert an anti-dependence
 732   // edge load->store.
 733 
 734   // The relevant stores "nearby" the load consist of a tree rooted
 735   // at initial_mem, with internal nodes of type MergeMem.
 736   // Therefore, the branches visited by the worklist are of this form:
 737   //    initial_mem -> (MergeMem ->)* Memory state modifying node
 738   // Memory state modifying nodes include Store and Phi nodes and any node for which needs_anti_dependence_check()
 739   // returns false.
 740   // The anti-dependence constraints apply only to the fringe of this tree.
 741 
 742   Node* initial_mem = load->in(MemNode::Memory);
 743 
 744   // We don't optimize the memory graph for pinned loads, so we may need to raise the
 745   // root of our search tree through the corresponding slices of MergeMem nodes to
 746   // get to the node that really creates the memory state for this slice.
 747   if (load_alias_idx >= Compile::AliasIdxRaw) {
 748     while (initial_mem->is_MergeMem()) {
 749       MergeMemNode* mm = initial_mem->as_MergeMem();
 750       Node* p = mm->memory_at(load_alias_idx);
 751       if (p != mm->base_memory()) {
 752         initial_mem = p;
 753       } else {
 754         break;
 755       }
 756     }
 757   }
 758   worklist_def_use_mem_states.push(nullptr, initial_mem);
 759   while (worklist_def_use_mem_states.is_nonempty()) {
 760     // Examine a nearby store to see if it might interfere with our load.
 761     Node* def_mem_state = worklist_def_use_mem_states.top_def();
 762     Node* use_mem_state = worklist_def_use_mem_states.top_use();
 763     worklist_def_use_mem_states.pop();
 764 
 765     uint op = use_mem_state->Opcode();
 766 
 767 #ifdef ASSERT
 768     // CacheWB nodes are peculiar in a sense that they both are anti-dependent and produce memory.
 769     // Allow them to be treated as a store.
 770     bool is_cache_wb = false;
 771     if (use_mem_state->is_Mach()) {
 772       int ideal_op = use_mem_state->as_Mach()->ideal_Opcode();
 773       is_cache_wb = (ideal_op == Op_CacheWB);
 774     }
 775     assert(!use_mem_state->needs_anti_dependence_check() || is_cache_wb, "no loads");
 776 #endif
 777 
 778     // MergeMems do not directly have anti-deps.
 779     // Treat them as internal nodes in a forward tree of memory states,
 780     // the leaves of which are each a 'possible-def'.
 781     if (use_mem_state == initial_mem    // root (exclusive) of tree we are searching
 782         || op == Op_MergeMem    // internal node of tree we are searching
 783         ) {
 784       def_mem_state = use_mem_state;   // It's not a possibly interfering store.
 785       if (use_mem_state == initial_mem)
 786         initial_mem = nullptr;  // only process initial memory once
 787 
 788       for (DUIterator_Fast imax, i = def_mem_state->fast_outs(imax); i < imax; i++) {
 789         use_mem_state = def_mem_state->fast_out(i);
 790         if (use_mem_state->needs_anti_dependence_check()) {
 791           // use_mem_state is also a kind of load (i.e. needs_anti_dependence_check), and it is not a memory state
 792           // modifying node (store, Phi or MergeMem). Hence, load can't be anti dependent on this node.
 793           continue;
 794         }
 795         worklist_def_use_mem_states.push(def_mem_state, use_mem_state);
 796       }
 797       continue;
 798     }
 799 
 800     if (op == Op_MachProj || op == Op_Catch)   continue;
 801 
 802     // Compute the alias index.  Loads and stores with different alias
 803     // indices do not need anti-dependence edges.  Wide MemBar's are
 804     // anti-dependent on everything (except immutable memories).
 805     const TypePtr* adr_type = use_mem_state->adr_type();
 806     if (!C->can_alias(adr_type, load_alias_idx))  continue;
 807 
 808     // Most slow-path runtime calls do NOT modify Java memory, but
 809     // they can block and so write Raw memory.
 810     if (use_mem_state->is_Mach()) {
 811       MachNode* mstore = use_mem_state->as_Mach();
 812       if (load_alias_idx != Compile::AliasIdxRaw) {
 813         // Check for call into the runtime using the Java calling
 814         // convention (and from there into a wrapper); it has no
 815         // _method.  Can't do this optimization for Native calls because
 816         // they CAN write to Java memory.
 817         if (mstore->ideal_Opcode() == Op_CallStaticJava) {
 818           assert(mstore->is_MachSafePoint(), "");
 819           MachSafePointNode* ms = (MachSafePointNode*) mstore;
 820           assert(ms->is_MachCallJava(), "");
 821           MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
 822           if (mcj->_method == nullptr) {
 823             // These runtime calls do not write to Java visible memory
 824             // (other than Raw) and so do not require anti-dependence edges.
 825             continue;
 826           }
 827         }
 828         // Same for SafePoints: they read/write Raw but only read otherwise.
 829         // This is basically a workaround for SafePoints only defining control
 830         // instead of control + memory.
 831         if (mstore->ideal_Opcode() == Op_SafePoint)
 832           continue;
 833       } else {
 834         // Some raw memory, such as the load of "top" at an allocation,
 835         // can be control dependent on the previous safepoint. See
 836         // comments in GraphKit::allocate_heap() about control input.
 837         // Inserting an anti-dep between such a safepoint and a use
 838         // creates a cycle, and will cause a subsequent failure in
 839         // local scheduling.  (BugId 4919904)
 840         // (%%% How can a control input be a safepoint and not a projection??)
 841         if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
 842           continue;
 843       }
 844     }
 845 
 846     // Identify a block that the current load must be above,
 847     // or else observe that 'store' is all the way up in the
 848     // earliest legal block for 'load'.  In the latter case,
 849     // immediately insert an anti-dependence edge.
 850     Block* store_block = get_block_for_node(use_mem_state);
 851     assert(store_block != nullptr, "unused killing projections skipped above");
 852 
 853     if (use_mem_state->is_Phi()) {
 854       // Loop-phis need to raise load before input. (Other phis are treated
 855       // as store below.)
 856       //
 857       // 'load' uses memory which is one (or more) of the Phi's inputs.
 858       // It must be scheduled not before the Phi, but rather before
 859       // each of the relevant Phi inputs.
 860       //
 861       // Instead of finding the LCA of all inputs to a Phi that match 'mem',
 862       // we mark each corresponding predecessor block and do a combined
 863       // hoisting operation later (raise_LCA_above_marks).
 864       //
 865       // Do not assert(store_block != early, "Phi merging memory after access")
 866       // PhiNode may be at start of block 'early' with backedge to 'early'
 867       DEBUG_ONLY(bool found_match = false);
 868       for (uint j = PhiNode::Input, jmax = use_mem_state->req(); j < jmax; j++) {
 869         if (use_mem_state->in(j) == def_mem_state) {   // Found matching input?
 870           DEBUG_ONLY(found_match = true);
 871           Block* pred_block = get_block_for_node(store_block->pred(j));
 872           if (pred_block != early) {
 873             // If any predecessor of the Phi matches the load's "early block",
 874             // we do not need a precedence edge between the Phi and 'load'
 875             // since the load will be forced into a block preceding the Phi.
 876             pred_block->set_raise_LCA_mark(load_index);
 877             assert(!LCA_orig->dominates(pred_block) ||
 878                    early->dominates(pred_block), "early is high enough");
 879             must_raise_LCA = true;
 880           } else {
 881             // anti-dependent upon PHI pinned below 'early', no edge needed
 882             LCA = early;             // but can not schedule below 'early'
 883           }
 884         }
 885       }
 886       assert(found_match, "no worklist bug");
 887     } else if (store_block != early) {
 888       // 'store' is between the current LCA and earliest possible block.
 889       // Label its block, and decide later on how to raise the LCA
 890       // to include the effect on LCA of this store.
 891       // If this store's block gets chosen as the raised LCA, we
 892       // will find him on the non_early_stores list and stick him
 893       // with a precedence edge.
 894       // (But, don't bother if LCA is already raised all the way.)
 895       if (LCA != early && !unrelated_load_in_store_null_block(use_mem_state, load)) {
 896         store_block->set_raise_LCA_mark(load_index);
 897         must_raise_LCA = true;
 898         non_early_stores.push(use_mem_state);
 899       }
 900     } else {
 901       // Found a possibly-interfering store in the load's 'early' block.
 902       // This means 'load' cannot sink at all in the dominator tree.
 903       // Add an anti-dep edge, and squeeze 'load' into the highest block.
 904       assert(use_mem_state != load->find_exact_control(load->in(0)), "dependence cycle found");
 905       if (verify) {
 906         assert(use_mem_state->find_edge(load) != -1 || unrelated_load_in_store_null_block(use_mem_state, load),
 907                "missing precedence edge");
 908       } else {
 909         use_mem_state->add_prec(load);
 910       }
 911       LCA = early;
 912       // This turns off the process of gathering non_early_stores.
 913     }
 914   }
 915   // (Worklist is now empty; all nearby stores have been visited.)
 916 
 917   // Finished if 'load' must be scheduled in its 'early' block.
 918   // If we found any stores there, they have already been given
 919   // precedence edges.
 920   if (LCA == early)  return LCA;
 921 
 922   // We get here only if there are no possibly-interfering stores
 923   // in the load's 'early' block.  Move LCA up above all predecessors
 924   // which contain stores we have noted.
 925   //
 926   // The raised LCA block can be a home to such interfering stores,
 927   // but its predecessors must not contain any such stores.
 928   //
 929   // The raised LCA will be a lower bound for placing the load,
 930   // preventing the load from sinking past any block containing
 931   // a store that may invalidate the memory state required by 'load'.
 932   if (must_raise_LCA)
 933     LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
 934   if (LCA == early)  return LCA;
 935 
 936   // Insert anti-dependence edges from 'load' to each store
 937   // in the non-early LCA block.
 938   // Mine the non_early_stores list for such stores.
 939   if (LCA->raise_LCA_mark() == load_index) {
 940     while (non_early_stores.size() > 0) {
 941       Node* store = non_early_stores.pop();
 942       Block* store_block = get_block_for_node(store);
 943       if (store_block == LCA) {
 944         // add anti_dependence from store to load in its own block
 945         assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
 946         if (verify) {
 947           assert(store->find_edge(load) != -1, "missing precedence edge");
 948         } else {
 949           store->add_prec(load);
 950         }
 951       } else {
 952         assert(store_block->raise_LCA_mark() == load_index, "block was marked");
 953         // Any other stores we found must be either inside the new LCA
 954         // or else outside the original LCA.  In the latter case, they
 955         // did not interfere with any use of 'load'.
 956         assert(LCA->dominates(store_block)
 957                || !LCA_orig->dominates(store_block), "no stray stores");
 958       }
 959     }
 960   }
 961 
 962   // Return the highest block containing stores; any stores
 963   // within that block have been given anti-dependence edges.
 964   return LCA;
 965 }
 966 
 967 // This class is used to iterate backwards over the nodes in the graph.
 968 
 969 class Node_Backward_Iterator {
 970 
 971 private:
 972   Node_Backward_Iterator();
 973 
 974 public:
 975   // Constructor for the iterator
 976   Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg);
 977 
 978   // Postincrement operator to iterate over the nodes
 979   Node *next();
 980 
 981 private:
 982   VectorSet   &_visited;
 983   Node_Stack  &_stack;
 984   PhaseCFG &_cfg;
 985 };
 986 
 987 // Constructor for the Node_Backward_Iterator
 988 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg)
 989   : _visited(visited), _stack(stack), _cfg(cfg) {
 990   // The stack should contain exactly the root
 991   stack.clear();
 992   stack.push(root, root->outcnt());
 993 
 994   // Clear the visited bits
 995   visited.clear();
 996 }
 997 
 998 // Iterator for the Node_Backward_Iterator
 999 Node *Node_Backward_Iterator::next() {
1000 
1001   // If the _stack is empty, then just return null: finished.
1002   if ( !_stack.size() )
1003     return nullptr;
1004 
1005   // I visit unvisited not-anti-dependence users first, then anti-dependent
1006   // children next. I iterate backwards to support removal of nodes.
1007   // The stack holds states consisting of 3 values:
1008   // current Def node, flag which indicates 1st/2nd pass, index of current out edge
1009   Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1);
1010   bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1);
1011   uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes.
1012   _stack.pop();
1013 
1014   // I cycle here when I am entering a deeper level of recursion.
1015   // The key variable 'self' was set prior to jumping here.
1016   while( 1 ) {
1017 
1018     _visited.set(self->_idx);
1019 
1020     // Now schedule all uses as late as possible.
1021     const Node* src = self->is_Proj() ? self->in(0) : self;
1022     uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
1023 
1024     // Schedule all nodes in a post-order visit
1025     Node *unvisited = nullptr;  // Unvisited anti-dependent Node, if any
1026 
1027     // Scan for unvisited nodes
1028     while (idx > 0) {
1029       // For all uses, schedule late
1030       Node* n = self->raw_out(--idx); // Use
1031 
1032       // Skip already visited children
1033       if ( _visited.test(n->_idx) )
1034         continue;
1035 
1036       // do not traverse backward control edges
1037       Node *use = n->is_Proj() ? n->in(0) : n;
1038       uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
1039 
1040       if ( use_rpo < src_rpo )
1041         continue;
1042 
1043       // Phi nodes always precede uses in a basic block
1044       if ( use_rpo == src_rpo && use->is_Phi() )
1045         continue;
1046 
1047       unvisited = n;      // Found unvisited
1048 
1049       // Check for possible-anti-dependent
1050       // 1st pass: No such nodes, 2nd pass: Only such nodes.
1051       if (n->needs_anti_dependence_check() == iterate_anti_dep) {
1052         unvisited = n;      // Found unvisited
1053         break;
1054       }
1055     }
1056 
1057     // Did I find an unvisited not-anti-dependent Node?
1058     if (!unvisited) {
1059       if (!iterate_anti_dep) {
1060         // 2nd pass: Iterate over nodes which needs_anti_dependence_check.
1061         iterate_anti_dep = true;
1062         idx = self->outcnt();
1063         continue;
1064       }
1065       break;                  // All done with children; post-visit 'self'
1066     }
1067 
1068     // Visit the unvisited Node.  Contains the obvious push to
1069     // indicate I'm entering a deeper level of recursion.  I push the
1070     // old state onto the _stack and set a new state and loop (recurse).
1071     _stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx);
1072     self = unvisited;
1073     iterate_anti_dep = false;
1074     idx = self->outcnt();
1075   } // End recursion loop
1076 
1077   return self;
1078 }
1079 
1080 //------------------------------ComputeLatenciesBackwards----------------------
1081 // Compute the latency of all the instructions.
1082 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) {
1083 #ifndef PRODUCT
1084   if (trace_opto_pipelining())
1085     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
1086 #endif
1087 
1088   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1089   Node *n;
1090 
1091   // Walk over all the nodes from last to first
1092   while ((n = iter.next())) {
1093     // Set the latency for the definitions of this instruction
1094     partial_latency_of_defs(n);
1095   }
1096 } // end ComputeLatenciesBackwards
1097 
1098 //------------------------------partial_latency_of_defs------------------------
1099 // Compute the latency impact of this node on all defs.  This computes
1100 // a number that increases as we approach the beginning of the routine.
1101 void PhaseCFG::partial_latency_of_defs(Node *n) {
1102   // Set the latency for this instruction
1103 #ifndef PRODUCT
1104   if (trace_opto_pipelining()) {
1105     tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1106     dump();
1107   }
1108 #endif
1109 
1110   if (n->is_Proj()) {
1111     n = n->in(0);
1112   }
1113 
1114   if (n->is_Root()) {
1115     return;
1116   }
1117 
1118   uint nlen = n->len();
1119   uint use_latency = get_latency_for_node(n);
1120   uint use_pre_order = get_block_for_node(n)->_pre_order;
1121 
1122   for (uint j = 0; j < nlen; j++) {
1123     Node *def = n->in(j);
1124 
1125     if (!def || def == n) {
1126       continue;
1127     }
1128 
1129     // Walk backwards thru projections
1130     if (def->is_Proj()) {
1131       def = def->in(0);
1132     }
1133 
1134 #ifndef PRODUCT
1135     if (trace_opto_pipelining()) {
1136       tty->print("#    in(%2d): ", j);
1137       def->dump();
1138     }
1139 #endif
1140 
1141     // If the defining block is not known, assume it is ok
1142     Block *def_block = get_block_for_node(def);
1143     uint def_pre_order = def_block ? def_block->_pre_order : 0;
1144 
1145     if ((use_pre_order <  def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
1146       continue;
1147     }
1148 
1149     uint delta_latency = n->latency(j);
1150     uint current_latency = delta_latency + use_latency;
1151 
1152     if (get_latency_for_node(def) < current_latency) {
1153       set_latency_for_node(def, current_latency);
1154     }
1155 
1156 #ifndef PRODUCT
1157     if (trace_opto_pipelining()) {
1158       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
1159     }
1160 #endif
1161   }
1162 }
1163 
1164 //------------------------------latency_from_use-------------------------------
1165 // Compute the latency of a specific use
1166 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
1167   // If self-reference, return no latency
1168   if (use == n || use->is_Root()) {
1169     return 0;
1170   }
1171 
1172   uint def_pre_order = get_block_for_node(def)->_pre_order;
1173   uint latency = 0;
1174 
1175   // If the use is not a projection, then it is simple...
1176   if (!use->is_Proj()) {
1177 #ifndef PRODUCT
1178     if (trace_opto_pipelining()) {
1179       tty->print("#    out(): ");
1180       use->dump();
1181     }
1182 #endif
1183 
1184     uint use_pre_order = get_block_for_node(use)->_pre_order;
1185 
1186     if (use_pre_order < def_pre_order)
1187       return 0;
1188 
1189     if (use_pre_order == def_pre_order && use->is_Phi())
1190       return 0;
1191 
1192     uint nlen = use->len();
1193     uint nl = get_latency_for_node(use);
1194 
1195     for ( uint j=0; j<nlen; j++ ) {
1196       if (use->in(j) == n) {
1197         // Change this if we want local latencies
1198         uint ul = use->latency(j);
1199         uint  l = ul + nl;
1200         if (latency < l) latency = l;
1201 #ifndef PRODUCT
1202         if (trace_opto_pipelining()) {
1203           tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
1204                         nl, j, ul, l, latency);
1205         }
1206 #endif
1207       }
1208     }
1209   } else {
1210     // This is a projection, just grab the latency of the use(s)
1211     for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1212       uint l = latency_from_use(use, def, use->fast_out(j));
1213       if (latency < l) latency = l;
1214     }
1215   }
1216 
1217   return latency;
1218 }
1219 
1220 //------------------------------latency_from_uses------------------------------
1221 // Compute the latency of this instruction relative to all of it's uses.
1222 // This computes a number that increases as we approach the beginning of the
1223 // routine.
1224 void PhaseCFG::latency_from_uses(Node *n) {
1225   // Set the latency for this instruction
1226 #ifndef PRODUCT
1227   if (trace_opto_pipelining()) {
1228     tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1229     dump();
1230   }
1231 #endif
1232   uint latency=0;
1233   const Node *def = n->is_Proj() ? n->in(0): n;
1234 
1235   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1236     uint l = latency_from_use(n, def, n->fast_out(i));
1237 
1238     if (latency < l) latency = l;
1239   }
1240 
1241   set_latency_for_node(n, latency);
1242 }
1243 
1244 //------------------------------is_cheaper_block-------------------------
1245 // Check if a block between early and LCA block of uses is cheaper by
1246 // frequency-based policy, latency-based policy and random-based policy
1247 bool PhaseCFG::is_cheaper_block(Block* LCA, Node* self, uint target_latency,
1248                                 uint end_latency, double least_freq,
1249                                 int cand_cnt, bool in_latency) {
1250   if (StressGCM) {
1251     // Should be randomly accepted in stress mode
1252     return C->randomized_select(cand_cnt);
1253   }
1254 
1255   // Better Frequency
1256   if (LCA->_freq < least_freq) {
1257     return true;
1258   }
1259 
1260   // Otherwise, choose with latency
1261   const double delta = 1 + PROB_UNLIKELY_MAG(4);
1262   if (!in_latency                     &&  // No block containing latency
1263       LCA->_freq < least_freq * delta &&  // No worse frequency
1264       target_latency >= end_latency   &&  // within latency range
1265       !self->is_iteratively_computed()    // But don't hoist IV increments
1266             // because they may end up above other uses of their phi forcing
1267             // their result register to be different from their input.
1268   ) {
1269     return true;
1270   }
1271 
1272   return false;
1273 }
1274 
1275 //------------------------------hoist_to_cheaper_block-------------------------
1276 // Pick a block for node self, between early and LCA block of uses, that is a
1277 // cheaper alternative to LCA.
1278 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1279   Block* least       = LCA;
1280   double least_freq  = least->_freq;
1281   uint target        = get_latency_for_node(self);
1282   uint start_latency = get_latency_for_node(LCA->head());
1283   uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1284   bool in_latency    = (target <= start_latency);
1285   const Block* root_block = get_block_for_node(_root);
1286 
1287   // Turn off latency scheduling if scheduling is just plain off
1288   if (!C->do_scheduling())
1289     in_latency = true;
1290 
1291   // Do not hoist (to cover latency) instructions which target a
1292   // single register.  Hoisting stretches the live range of the
1293   // single register and may force spilling.
1294   MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1295   if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1296     in_latency = true;
1297 
1298 #ifndef PRODUCT
1299   if (trace_opto_pipelining()) {
1300     tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1301     self->dump();
1302     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1303       LCA->_pre_order,
1304       LCA->head()->_idx,
1305       start_latency,
1306       LCA->get_node(LCA->end_idx())->_idx,
1307       end_latency,
1308       least_freq);
1309   }
1310 #endif
1311 
1312   int cand_cnt = 0;  // number of candidates tried
1313 
1314   // Walk up the dominator tree from LCA (Lowest common ancestor) to
1315   // the earliest legal location. Capture the least execution frequency,
1316   // or choose a random block if -XX:+StressGCM, or using latency-based policy
1317   while (LCA != early) {
1318     LCA = LCA->_idom;         // Follow up the dominator tree
1319 
1320     if (LCA == nullptr) {
1321       // Bailout without retry
1322       assert(false, "graph should be schedulable");
1323       C->record_method_not_compilable("late schedule failed: LCA is null");
1324       return least;
1325     }
1326 
1327     // Don't hoist machine instructions to the root basic block
1328     if (mach && LCA == root_block)
1329       break;
1330 
1331     if (self->is_memory_writer() &&
1332         (LCA->_loop->depth() > early->_loop->depth())) {
1333       // LCA is an invalid placement for a memory writer: choosing it would
1334       // cause memory interference, as illustrated in schedule_late().
1335       continue;
1336     }
1337     verify_memory_writer_placement(LCA, self);
1338 
1339     uint start_lat = get_latency_for_node(LCA->head());
1340     uint end_idx   = LCA->end_idx();
1341     uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
1342     double LCA_freq = LCA->_freq;
1343 #ifndef PRODUCT
1344     if (trace_opto_pipelining()) {
1345       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1346         LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1347     }
1348 #endif
1349     cand_cnt++;
1350     if (is_cheaper_block(LCA, self, target, end_lat, least_freq, cand_cnt, in_latency)) {
1351       least = LCA;            // Found cheaper block
1352       least_freq = LCA_freq;
1353       start_latency = start_lat;
1354       end_latency = end_lat;
1355       if (target <= start_lat)
1356         in_latency = true;
1357     }
1358   }
1359 
1360 #ifndef PRODUCT
1361   if (trace_opto_pipelining()) {
1362     tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
1363       least->_pre_order, start_latency, least_freq);
1364   }
1365 #endif
1366 
1367   // See if the latency needs to be updated
1368   if (target < end_latency) {
1369 #ifndef PRODUCT
1370     if (trace_opto_pipelining()) {
1371       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1372     }
1373 #endif
1374     set_latency_for_node(self, end_latency);
1375     partial_latency_of_defs(self);
1376   }
1377 
1378   return least;
1379 }
1380 
1381 
1382 //------------------------------schedule_late-----------------------------------
1383 // Now schedule all codes as LATE as possible.  This is the LCA in the
1384 // dominator tree of all USES of a value.  Pick the block with the least
1385 // loop nesting depth that is lowest in the dominator tree.
1386 extern const char must_clone[];
1387 void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
1388 #ifndef PRODUCT
1389   if (trace_opto_pipelining())
1390     tty->print("\n#---- schedule_late ----\n");
1391 #endif
1392 
1393   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1394   Node *self;
1395 
1396   // Walk over all the nodes from last to first
1397   while ((self = iter.next())) {
1398     Block* early = get_block_for_node(self); // Earliest legal placement
1399 
1400     if (self->is_top()) {
1401       // Top node goes in bb #2 with other constants.
1402       // It must be special-cased, because it has no out edges.
1403       early->add_inst(self);
1404       continue;
1405     }
1406 
1407     // No uses, just terminate
1408     if (self->outcnt() == 0) {
1409       assert(self->is_MachProj(), "sanity");
1410       continue;                   // Must be a dead machine projection
1411     }
1412 
1413     // If node is pinned in the block, then no scheduling can be done.
1414     if( self->pinned() )          // Pinned in block?
1415       continue;
1416 
1417 #ifdef ASSERT
1418     // Assert that memory writers (e.g. stores) have a "home" block (the block
1419     // given by their control input), and that this block corresponds to their
1420     // earliest possible placement. This guarantees that
1421     // hoist_to_cheaper_block() will always have at least one valid choice.
1422     if (self->is_memory_writer()) {
1423       assert(find_block_for_node(self->in(0)) == early,
1424              "The home of a memory writer must also be its earliest placement");
1425     }
1426 #endif
1427 
1428     MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1429     if (mach) {
1430       switch (mach->ideal_Opcode()) {
1431       case Op_CreateEx:
1432         // Don't move exception creation
1433         early->add_inst(self);
1434         continue;
1435         break;
1436       case Op_CheckCastPP: {
1437         // Don't move CheckCastPP nodes away from their input, if the input
1438         // is a rawptr (5071820).
1439         Node *def = self->in(1);
1440         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
1441           early->add_inst(self);
1442 #ifdef ASSERT
1443           _raw_oops.push(def);
1444 #endif
1445           continue;
1446         }
1447         break;
1448       }
1449       default:
1450         break;
1451       }
1452       if (C->has_irreducible_loop() && self->is_memory_writer()) {
1453         // If the CFG is irreducible, place memory writers in their home block.
1454         // This prevents hoist_to_cheaper_block() from accidentally placing such
1455         // nodes into deeper loops, as in the following example:
1456         //
1457         // Home placement of store in B1 (loop L1):
1458         //
1459         // B1 (L1):
1460         //   m1 <- ..
1461         //   m2 <- store m1, ..
1462         // B2 (L2):
1463         //   jump B2
1464         // B3 (L1):
1465         //   .. <- .. m2, ..
1466         //
1467         // Wrong "hoisting" of store to B2 (in loop L2, child of L1):
1468         //
1469         // B1 (L1):
1470         //   m1 <- ..
1471         // B2 (L2):
1472         //   m2 <- store m1, ..
1473         //   # Wrong: m1 and m2 interfere at this point.
1474         //   jump B2
1475         // B3 (L1):
1476         //   .. <- .. m2, ..
1477         //
1478         // This "hoist inversion" can happen due to different factors such as
1479         // inaccurate estimation of frequencies for irreducible CFGs, and loops
1480         // with always-taken exits in reducible CFGs. In the reducible case,
1481         // hoist inversion is prevented by discarding invalid blocks (those in
1482         // deeper loops than the home block). In the irreducible case, the
1483         // invalid blocks cannot be identified due to incomplete loop nesting
1484         // information, hence a conservative solution is taken.
1485 #ifndef PRODUCT
1486         if (trace_opto_pipelining()) {
1487           tty->print_cr("# Irreducible loops: schedule in home block B%d:",
1488                         early->_pre_order);
1489           self->dump();
1490         }
1491 #endif
1492         schedule_node_into_block(self, early);
1493         continue;
1494       }
1495     }
1496 
1497     // Gather LCA of all uses
1498     Block *LCA = nullptr;
1499     {
1500       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1501         // For all uses, find LCA
1502         Node* use = self->fast_out(i);
1503         LCA = raise_LCA_above_use(LCA, use, self, this);
1504       }
1505       guarantee(LCA != nullptr, "There must be a LCA");
1506     }  // (Hide defs of imax, i from rest of block.)
1507 
1508     // Place temps in the block of their use.  This isn't a
1509     // requirement for correctness but it reduces useless
1510     // interference between temps and other nodes.
1511     if (mach != nullptr && mach->is_MachTemp()) {
1512       map_node_to_block(self, LCA);
1513       LCA->add_inst(self);
1514       continue;
1515     }
1516 
1517     // Check if 'self' could be anti-dependent on memory
1518     if (self->needs_anti_dependence_check()) {
1519       // Hoist LCA above possible-defs and insert anti-dependences to
1520       // defs in new LCA block.
1521       LCA = insert_anti_dependences(LCA, self);
1522     }
1523 
1524     if (early->_dom_depth > LCA->_dom_depth) {
1525       // Somehow the LCA has moved above the earliest legal point.
1526       // (One way this can happen is via memory_early_block.)
1527       if (C->subsume_loads() == true && !C->failing()) {
1528         // Retry with subsume_loads == false
1529         // If this is the first failure, the sentinel string will "stick"
1530         // to the Compile object, and the C2Compiler will see it and retry.
1531         C->record_failure(C2Compiler::retry_no_subsuming_loads());
1532       } else {
1533         // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1534         assert(C->failure_is_artificial(), "graph should be schedulable");
1535         C->record_method_not_compilable("late schedule failed: incorrect graph" DEBUG_ONLY(COMMA true));
1536       }
1537       return;
1538     }
1539 
1540     if (self->is_memory_writer()) {
1541       // If the LCA of a memory writer is a descendant of its home loop, hoist
1542       // it into a valid placement.
1543       while (LCA->_loop->depth() > early->_loop->depth()) {
1544         LCA = LCA->_idom;
1545       }
1546       assert(LCA != nullptr, "a valid LCA must exist");
1547       verify_memory_writer_placement(LCA, self);
1548     }
1549 
1550     // If there is no opportunity to hoist, then we're done.
1551     // In stress mode, try to hoist even the single operations.
1552     bool try_to_hoist = StressGCM || (LCA != early);
1553 
1554     // Must clone guys stay next to use; no hoisting allowed.
1555     // Also cannot hoist guys that alter memory or are otherwise not
1556     // allocatable (hoisting can make a value live longer, leading to
1557     // anti and output dependency problems which are normally resolved
1558     // by the register allocator giving everyone a different register).
1559     if (mach != nullptr && must_clone[mach->ideal_Opcode()])
1560       try_to_hoist = false;
1561 
1562     Block* late = nullptr;
1563     if (try_to_hoist) {
1564       // Now find the block with the least execution frequency.
1565       // Start at the latest schedule and work up to the earliest schedule
1566       // in the dominator tree.  Thus the Node will dominate all its uses.
1567       late = hoist_to_cheaper_block(LCA, early, self);
1568     } else {
1569       // Just use the LCA of the uses.
1570       late = LCA;
1571     }
1572 
1573     // Put the node into target block
1574     schedule_node_into_block(self, late);
1575 
1576 #ifdef ASSERT
1577     if (self->needs_anti_dependence_check()) {
1578       // since precedence edges are only inserted when we're sure they
1579       // are needed make sure that after placement in a block we don't
1580       // need any new precedence edges.
1581       verify_anti_dependences(late, self);
1582     }
1583 #endif
1584   } // Loop until all nodes have been visited
1585 
1586 } // end ScheduleLate
1587 
1588 //------------------------------GlobalCodeMotion-------------------------------
1589 void PhaseCFG::global_code_motion() {
1590   ResourceMark rm;
1591 
1592 #ifndef PRODUCT
1593   if (trace_opto_pipelining()) {
1594     tty->print("\n---- Start GlobalCodeMotion ----\n");
1595   }
1596 #endif
1597 
1598   // Initialize the node to block mapping for things on the proj_list
1599   for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1600     unmap_node_from_block(_matcher.get_projection(i));
1601   }
1602 
1603   // Set the basic block for Nodes pinned into blocks
1604   VectorSet visited;
1605   schedule_pinned_nodes(visited);
1606 
1607   // Find the earliest Block any instruction can be placed in.  Some
1608   // instructions are pinned into Blocks.  Unpinned instructions can
1609   // appear in last block in which all their inputs occur.
1610   visited.clear();
1611   Node_Stack stack((C->live_nodes() >> 2) + 16); // pre-grow
1612   if (!schedule_early(visited, stack)) {
1613     // Bailout without retry
1614     assert(false, "early schedule failed");
1615     C->record_method_not_compilable("early schedule failed");
1616     return;
1617   }
1618 
1619   // Build Def-Use edges.
1620   // Compute the latency information (via backwards walk) for all the
1621   // instructions in the graph
1622   _node_latency = new GrowableArray<uint>(); // resource_area allocation
1623 
1624   if (C->do_scheduling()) {
1625     compute_latencies_backwards(visited, stack);
1626   }
1627 
1628   // Now schedule all codes as LATE as possible.  This is the LCA in the
1629   // dominator tree of all USES of a value.  Pick the block with the least
1630   // loop nesting depth that is lowest in the dominator tree.
1631   // ( visited.clear() called in schedule_late()->Node_Backward_Iterator() )
1632   schedule_late(visited, stack);
1633   if (C->failing()) {
1634     return;
1635   }
1636 
1637 #ifndef PRODUCT
1638   if (trace_opto_pipelining()) {
1639     tty->print("\n---- Detect implicit null checks ----\n");
1640   }
1641 #endif
1642 
1643   // Detect implicit-null-check opportunities.  Basically, find null checks
1644   // with suitable memory ops nearby.  Use the memory op to do the null check.
1645   // I can generate a memory op if there is not one nearby.
1646   if (C->is_method_compilation()) {
1647     // By reversing the loop direction we get a very minor gain on mpegaudio.
1648     // Feel free to revert to a forward loop for clarity.
1649     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1650     for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1651       Node* proj = _matcher._null_check_tests[i];
1652       Node* val  = _matcher._null_check_tests[i + 1];
1653       Block* block = get_block_for_node(proj);
1654       implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1655       // The implicit_null_check will only perform the transformation
1656       // if the null branch is truly uncommon, *and* it leads to an
1657       // uncommon trap.  Combined with the too_many_traps guards
1658       // above, this prevents SEGV storms reported in 6366351,
1659       // by recompiling offending methods without this optimization.
1660     }
1661   }
1662 
1663   bool block_size_threshold_ok = false;
1664   intptr_t *recalc_pressure_nodes = nullptr;
1665   if (OptoRegScheduling) {
1666     for (uint i = 0; i < number_of_blocks(); i++) {
1667       Block* block = get_block(i);
1668       if (block->number_of_nodes() > 10) {
1669         block_size_threshold_ok = true;
1670         break;
1671       }
1672     }
1673   }
1674 
1675   // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it
1676   // is key to enabling this feature.
1677   PhaseChaitin regalloc(C->unique(), *this, _matcher, true);
1678   ResourceArea live_arena(mtCompiler);      // Arena for liveness
1679   ResourceMark rm_live(&live_arena);
1680   PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);
1681   PhaseIFG ifg(&live_arena);
1682   if (OptoRegScheduling && block_size_threshold_ok) {
1683     regalloc.mark_ssa();
1684     Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
1685     rm_live.reset_to_mark();           // Reclaim working storage
1686     IndexSet::reset_memory(C, &live_arena);
1687     uint node_size = regalloc._lrg_map.max_lrg_id();
1688     ifg.init(node_size); // Empty IFG
1689     regalloc.set_ifg(ifg);
1690     regalloc.set_live(live);
1691     regalloc.gather_lrg_masks(false);    // Collect LRG masks
1692     live.compute(node_size); // Compute liveness
1693 
1694     recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size);
1695     for (uint i = 0; i < node_size; i++) {
1696       recalc_pressure_nodes[i] = 0;
1697     }
1698   }
1699   _regalloc = &regalloc;
1700 
1701 #ifndef PRODUCT
1702   if (trace_opto_pipelining()) {
1703     tty->print("\n---- Start Local Scheduling ----\n");
1704   }
1705 #endif
1706 
1707   // Schedule locally.  Right now a simple topological sort.
1708   // Later, do a real latency aware scheduler.
1709   GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1710   visited.reset();
1711   for (uint i = 0; i < number_of_blocks(); i++) {
1712     Block* block = get_block(i);
1713     if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) {
1714       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1715         assert(C->failure_is_artificial(), "local schedule failed");
1716         C->record_method_not_compilable("local schedule failed" DEBUG_ONLY(COMMA true));
1717       }
1718       _regalloc = nullptr;
1719       return;
1720     }
1721   }
1722   _regalloc = nullptr;
1723 
1724   // If we inserted any instructions between a Call and his CatchNode,
1725   // clone the instructions on all paths below the Catch.
1726   for (uint i = 0; i < number_of_blocks(); i++) {
1727     Block* block = get_block(i);
1728     call_catch_cleanup(block);
1729   }
1730 
1731 #ifndef PRODUCT
1732   if (trace_opto_pipelining()) {
1733     tty->print("\n---- After GlobalCodeMotion ----\n");
1734     for (uint i = 0; i < number_of_blocks(); i++) {
1735       Block* block = get_block(i);
1736       block->dump();
1737     }
1738   }
1739 #endif
1740   // Dead.
1741   _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1742 }
1743 
1744 bool PhaseCFG::do_global_code_motion() {
1745 
1746   build_dominator_tree();
1747   if (C->failing()) {
1748     return false;
1749   }
1750 
1751   NOT_PRODUCT( C->verify_graph_edges(); )
1752 
1753   estimate_block_frequency();
1754 
1755   global_code_motion();
1756 
1757   if (C->failing()) {
1758     return false;
1759   }
1760 
1761   return true;
1762 }
1763 
1764 //------------------------------Estimate_Block_Frequency-----------------------
1765 // Estimate block frequencies based on IfNode probabilities.
1766 void PhaseCFG::estimate_block_frequency() {
1767 
1768   // Force conditional branches leading to uncommon traps to be unlikely,
1769   // not because we get to the uncommon_trap with less relative frequency,
1770   // but because an uncommon_trap typically causes a deopt, so we only get
1771   // there once.
1772   if (C->do_freq_based_layout()) {
1773     Block_List worklist;
1774     Block* root_blk = get_block(0);
1775     for (uint i = 1; i < root_blk->num_preds(); i++) {
1776       Block *pb = get_block_for_node(root_blk->pred(i));
1777       if (pb->has_uncommon_code()) {
1778         worklist.push(pb);
1779       }
1780     }
1781     while (worklist.size() > 0) {
1782       Block* uct = worklist.pop();
1783       if (uct == get_root_block()) {
1784         continue;
1785       }
1786       for (uint i = 1; i < uct->num_preds(); i++) {
1787         Block *pb = get_block_for_node(uct->pred(i));
1788         if (pb->_num_succs == 1) {
1789           worklist.push(pb);
1790         } else if (pb->num_fall_throughs() == 2) {
1791           pb->update_uncommon_branch(uct);
1792         }
1793       }
1794     }
1795   }
1796 
1797   // Create the loop tree and calculate loop depth.
1798   _root_loop = create_loop_tree();
1799   _root_loop->compute_loop_depth(0);
1800 
1801   // Compute block frequency of each block, relative to a single loop entry.
1802   _root_loop->compute_freq();
1803 
1804   // Adjust all frequencies to be relative to a single method entry
1805   _root_loop->_freq = 1.0;
1806   _root_loop->scale_freq();
1807 
1808   // Save outmost loop frequency for LRG frequency threshold
1809   _outer_loop_frequency = _root_loop->outer_loop_freq();
1810 
1811   // force paths ending at uncommon traps to be infrequent
1812   if (!C->do_freq_based_layout()) {
1813     Block_List worklist;
1814     Block* root_blk = get_block(0);
1815     for (uint i = 1; i < root_blk->num_preds(); i++) {
1816       Block *pb = get_block_for_node(root_blk->pred(i));
1817       if (pb->has_uncommon_code()) {
1818         worklist.push(pb);
1819       }
1820     }
1821     while (worklist.size() > 0) {
1822       Block* uct = worklist.pop();
1823       uct->_freq = PROB_MIN;
1824       for (uint i = 1; i < uct->num_preds(); i++) {
1825         Block *pb = get_block_for_node(uct->pred(i));
1826         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1827           worklist.push(pb);
1828         }
1829       }
1830     }
1831   }
1832 
1833 #ifdef ASSERT
1834   for (uint i = 0; i < number_of_blocks(); i++) {
1835     Block* b = get_block(i);
1836     assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1837   }
1838 #endif
1839 
1840 #ifndef PRODUCT
1841   if (PrintCFGBlockFreq) {
1842     tty->print_cr("CFG Block Frequencies");
1843     _root_loop->dump_tree();
1844     if (Verbose) {
1845       tty->print_cr("PhaseCFG dump");
1846       dump();
1847       tty->print_cr("Node dump");
1848       _root->dump(99999);
1849     }
1850   }
1851 #endif
1852 }
1853 
1854 //----------------------------create_loop_tree--------------------------------
1855 // Create a loop tree from the CFG
1856 CFGLoop* PhaseCFG::create_loop_tree() {
1857 
1858 #ifdef ASSERT
1859   assert(get_block(0) == get_root_block(), "first block should be root block");
1860   for (uint i = 0; i < number_of_blocks(); i++) {
1861     Block* block = get_block(i);
1862     // Check that _loop field are clear...we could clear them if not.
1863     assert(block->_loop == nullptr, "clear _loop expected");
1864     // Sanity check that the RPO numbering is reflected in the _blocks array.
1865     // It doesn't have to be for the loop tree to be built, but if it is not,
1866     // then the blocks have been reordered since dom graph building...which
1867     // may question the RPO numbering
1868     assert(block->_rpo == i, "unexpected reverse post order number");
1869   }
1870 #endif
1871 
1872   int idct = 0;
1873   CFGLoop* root_loop = new CFGLoop(idct++);
1874 
1875   Block_List worklist;
1876 
1877   // Assign blocks to loops
1878   for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
1879     Block* block = get_block(i);
1880 
1881     if (block->head()->is_Loop()) {
1882       Block* loop_head = block;
1883       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1884       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1885       Block* tail = get_block_for_node(tail_n);
1886 
1887       // Defensively filter out Loop nodes for non-single-entry loops.
1888       // For all reasonable loops, the head occurs before the tail in RPO.
1889       if (i <= tail->_rpo) {
1890 
1891         // The tail and (recursive) predecessors of the tail
1892         // are made members of a new loop.
1893 
1894         assert(worklist.size() == 0, "nonempty worklist");
1895         CFGLoop* nloop = new CFGLoop(idct++);
1896         assert(loop_head->_loop == nullptr, "just checking");
1897         loop_head->_loop = nloop;
1898         // Add to nloop so push_pred() will skip over inner loops
1899         nloop->add_member(loop_head);
1900         nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
1901 
1902         while (worklist.size() > 0) {
1903           Block* member = worklist.pop();
1904           if (member != loop_head) {
1905             for (uint j = 1; j < member->num_preds(); j++) {
1906               nloop->push_pred(member, j, worklist, this);
1907             }
1908           }
1909         }
1910       }
1911     }
1912   }
1913 
1914   // Create a member list for each loop consisting
1915   // of both blocks and (immediate child) loops.
1916   for (uint i = 0; i < number_of_blocks(); i++) {
1917     Block* block = get_block(i);
1918     CFGLoop* lp = block->_loop;
1919     if (lp == nullptr) {
1920       // Not assigned to a loop. Add it to the method's pseudo loop.
1921       block->_loop = root_loop;
1922       lp = root_loop;
1923     }
1924     if (lp == root_loop || block != lp->head()) { // loop heads are already members
1925       lp->add_member(block);
1926     }
1927     if (lp != root_loop) {
1928       if (lp->parent() == nullptr) {
1929         // Not a nested loop. Make it a child of the method's pseudo loop.
1930         root_loop->add_nested_loop(lp);
1931       }
1932       if (block == lp->head()) {
1933         // Add nested loop to member list of parent loop.
1934         lp->parent()->add_member(lp);
1935       }
1936     }
1937   }
1938 
1939   return root_loop;
1940 }
1941 
1942 //------------------------------push_pred--------------------------------------
1943 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
1944   Node* pred_n = blk->pred(i);
1945   Block* pred = cfg->get_block_for_node(pred_n);
1946   CFGLoop *pred_loop = pred->_loop;
1947   if (pred_loop == nullptr) {
1948     // Filter out blocks for non-single-entry loops.
1949     // For all reasonable loops, the head occurs before the tail in RPO.
1950     if (pred->_rpo > head()->_rpo) {
1951       pred->_loop = this;
1952       worklist.push(pred);
1953     }
1954   } else if (pred_loop != this) {
1955     // Nested loop.
1956     while (pred_loop->_parent != nullptr && pred_loop->_parent != this) {
1957       pred_loop = pred_loop->_parent;
1958     }
1959     // Make pred's loop be a child
1960     if (pred_loop->_parent == nullptr) {
1961       add_nested_loop(pred_loop);
1962       // Continue with loop entry predecessor.
1963       Block* pred_head = pred_loop->head();
1964       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1965       assert(pred_head != head(), "loop head in only one loop");
1966       push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
1967     } else {
1968       assert(pred_loop->_parent == this && _parent == nullptr, "just checking");
1969     }
1970   }
1971 }
1972 
1973 //------------------------------add_nested_loop--------------------------------
1974 // Make cl a child of the current loop in the loop tree.
1975 void CFGLoop::add_nested_loop(CFGLoop* cl) {
1976   assert(_parent == nullptr, "no parent yet");
1977   assert(cl != this, "not my own parent");
1978   cl->_parent = this;
1979   CFGLoop* ch = _child;
1980   if (ch == nullptr) {
1981     _child = cl;
1982   } else {
1983     while (ch->_sibling != nullptr) { ch = ch->_sibling; }
1984     ch->_sibling = cl;
1985   }
1986 }
1987 
1988 //------------------------------compute_loop_depth-----------------------------
1989 // Store the loop depth in each CFGLoop object.
1990 // Recursively walk the children to do the same for them.
1991 void CFGLoop::compute_loop_depth(int depth) {
1992   _depth = depth;
1993   CFGLoop* ch = _child;
1994   while (ch != nullptr) {
1995     ch->compute_loop_depth(depth + 1);
1996     ch = ch->_sibling;
1997   }
1998 }
1999 
2000 //------------------------------compute_freq-----------------------------------
2001 // Compute the frequency of each block and loop, relative to a single entry
2002 // into the dominating loop head.
2003 void CFGLoop::compute_freq() {
2004   // Bottom up traversal of loop tree (visit inner loops first.)
2005   // Set loop head frequency to 1.0, then transitively
2006   // compute frequency for all successors in the loop,
2007   // as well as for each exit edge.  Inner loops are
2008   // treated as single blocks with loop exit targets
2009   // as the successor blocks.
2010 
2011   // Nested loops first
2012   CFGLoop* ch = _child;
2013   while (ch != nullptr) {
2014     ch->compute_freq();
2015     ch = ch->_sibling;
2016   }
2017   assert (_members.length() > 0, "no empty loops");
2018   Block* hd = head();
2019   hd->_freq = 1.0;
2020   for (int i = 0; i < _members.length(); i++) {
2021     CFGElement* s = _members.at(i);
2022     double freq = s->_freq;
2023     if (s->is_block()) {
2024       Block* b = s->as_Block();
2025       for (uint j = 0; j < b->_num_succs; j++) {
2026         Block* sb = b->_succs[j];
2027         update_succ_freq(sb, freq * b->succ_prob(j));
2028       }
2029     } else {
2030       CFGLoop* lp = s->as_CFGLoop();
2031       assert(lp->_parent == this, "immediate child");
2032       for (int k = 0; k < lp->_exits.length(); k++) {
2033         Block* eb = lp->_exits.at(k).get_target();
2034         double prob = lp->_exits.at(k).get_prob();
2035         update_succ_freq(eb, freq * prob);
2036       }
2037     }
2038   }
2039 
2040   // For all loops other than the outer, "method" loop,
2041   // sum and normalize the exit probability. The "method" loop
2042   // should keep the initial exit probability of 1, so that
2043   // inner blocks do not get erroneously scaled.
2044   if (_depth != 0) {
2045     // Total the exit probabilities for this loop.
2046     double exits_sum = 0.0f;
2047     for (int i = 0; i < _exits.length(); i++) {
2048       exits_sum += _exits.at(i).get_prob();
2049     }
2050 
2051     // Normalize the exit probabilities. Until now, the
2052     // probabilities estimate the possibility of exit per
2053     // a single loop iteration; afterward, they estimate
2054     // the probability of exit per loop entry.
2055     for (int i = 0; i < _exits.length(); i++) {
2056       Block* et = _exits.at(i).get_target();
2057       float new_prob = 0.0f;
2058       if (_exits.at(i).get_prob() > 0.0f) {
2059         new_prob = _exits.at(i).get_prob() / exits_sum;
2060       }
2061       BlockProbPair bpp(et, new_prob);
2062       _exits.at_put(i, bpp);
2063     }
2064 
2065     // Save the total, but guard against unreasonable probability,
2066     // as the value is used to estimate the loop trip count.
2067     // An infinite trip count would blur relative block
2068     // frequencies.
2069     if (exits_sum > 1.0f) exits_sum = 1.0;
2070     if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
2071     _exit_prob = exits_sum;
2072   }
2073 }
2074 
2075 //------------------------------succ_prob-------------------------------------
2076 // Determine the probability of reaching successor 'i' from the receiver block.
2077 float Block::succ_prob(uint i) {
2078   int eidx = end_idx();
2079   Node *n = get_node(eidx);  // Get ending Node
2080 
2081   int op = n->Opcode();
2082   if (n->is_Mach()) {
2083     if (n->is_MachNullCheck()) {
2084       // Can only reach here if called after lcm. The original Op_If is gone,
2085       // so we attempt to infer the probability from one or both of the
2086       // successor blocks.
2087       assert(_num_succs == 2, "expecting 2 successors of a null check");
2088       // If either successor has only one predecessor, then the
2089       // probability estimate can be derived using the
2090       // relative frequency of the successor and this block.
2091       if (_succs[i]->num_preds() == 2) {
2092         return _succs[i]->_freq / _freq;
2093       } else if (_succs[1-i]->num_preds() == 2) {
2094         return 1 - (_succs[1-i]->_freq / _freq);
2095       } else {
2096         // Estimate using both successor frequencies
2097         float freq = _succs[i]->_freq;
2098         return freq / (freq + _succs[1-i]->_freq);
2099       }
2100     }
2101     op = n->as_Mach()->ideal_Opcode();
2102   }
2103 
2104 
2105   // Switch on branch type
2106   switch( op ) {
2107   case Op_CountedLoopEnd:
2108   case Op_If: {
2109     assert (i < 2, "just checking");
2110     // Conditionals pass on only part of their frequency
2111     float prob  = n->as_MachIf()->_prob;
2112     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
2113     // If succ[i] is the FALSE branch, invert path info
2114     if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
2115       return 1.0f - prob; // not taken
2116     } else {
2117       return prob; // taken
2118     }
2119   }
2120 
2121   case Op_Jump:
2122     return n->as_MachJump()->_probs[get_node(i + eidx + 1)->as_JumpProj()->_con];
2123 
2124   case Op_Catch: {
2125     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2126     if (ci->_con == CatchProjNode::fall_through_index) {
2127       // Fall-thru path gets the lion's share.
2128       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
2129     } else {
2130       // Presume exceptional paths are equally unlikely
2131       return PROB_UNLIKELY_MAG(5);
2132     }
2133   }
2134 
2135   case Op_Root:
2136   case Op_Goto:
2137     // Pass frequency straight thru to target
2138     return 1.0f;
2139 
2140   case Op_NeverBranch:
2141     return 0.0f;
2142 
2143   case Op_TailCall:
2144   case Op_TailJump:
2145   case Op_ForwardException:
2146   case Op_Return:
2147   case Op_Halt:
2148   case Op_Rethrow:
2149     // Do not push out freq to root block
2150     return 0.0f;
2151 
2152   default:
2153     ShouldNotReachHere();
2154   }
2155 
2156   return 0.0f;
2157 }
2158 
2159 //------------------------------num_fall_throughs-----------------------------
2160 // Return the number of fall-through candidates for a block
2161 int Block::num_fall_throughs() {
2162   int eidx = end_idx();
2163   Node *n = get_node(eidx);  // Get ending Node
2164 
2165   int op = n->Opcode();
2166   if (n->is_Mach()) {
2167     if (n->is_MachNullCheck()) {
2168       // In theory, either side can fall-thru, for simplicity sake,
2169       // let's say only the false branch can now.
2170       return 1;
2171     }
2172     op = n->as_Mach()->ideal_Opcode();
2173   }
2174 
2175   // Switch on branch type
2176   switch( op ) {
2177   case Op_CountedLoopEnd:
2178   case Op_If:
2179     return 2;
2180 
2181   case Op_Root:
2182   case Op_Goto:
2183     return 1;
2184 
2185   case Op_Catch: {
2186     for (uint i = 0; i < _num_succs; i++) {
2187       const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2188       if (ci->_con == CatchProjNode::fall_through_index) {
2189         return 1;
2190       }
2191     }
2192     return 0;
2193   }
2194 
2195   case Op_Jump:
2196   case Op_NeverBranch:
2197   case Op_TailCall:
2198   case Op_TailJump:
2199   case Op_ForwardException:
2200   case Op_Return:
2201   case Op_Halt:
2202   case Op_Rethrow:
2203     return 0;
2204 
2205   default:
2206     ShouldNotReachHere();
2207   }
2208 
2209   return 0;
2210 }
2211 
2212 //------------------------------succ_fall_through-----------------------------
2213 // Return true if a specific successor could be fall-through target.
2214 bool Block::succ_fall_through(uint i) {
2215   int eidx = end_idx();
2216   Node *n = get_node(eidx);  // Get ending Node
2217 
2218   int op = n->Opcode();
2219   if (n->is_Mach()) {
2220     if (n->is_MachNullCheck()) {
2221       // In theory, either side can fall-thru, for simplicity sake,
2222       // let's say only the false branch can now.
2223       return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
2224     }
2225     op = n->as_Mach()->ideal_Opcode();
2226   }
2227 
2228   // Switch on branch type
2229   switch( op ) {
2230   case Op_CountedLoopEnd:
2231   case Op_If:
2232   case Op_Root:
2233   case Op_Goto:
2234     return true;
2235 
2236   case Op_Catch: {
2237     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2238     return ci->_con == CatchProjNode::fall_through_index;
2239   }
2240 
2241   case Op_Jump:
2242   case Op_NeverBranch:
2243   case Op_TailCall:
2244   case Op_TailJump:
2245   case Op_ForwardException:
2246   case Op_Return:
2247   case Op_Halt:
2248   case Op_Rethrow:
2249     return false;
2250 
2251   default:
2252     ShouldNotReachHere();
2253   }
2254 
2255   return false;
2256 }
2257 
2258 //------------------------------update_uncommon_branch------------------------
2259 // Update the probability of a two-branch to be uncommon
2260 void Block::update_uncommon_branch(Block* ub) {
2261   int eidx = end_idx();
2262   Node *n = get_node(eidx);  // Get ending Node
2263 
2264   int op = n->as_Mach()->ideal_Opcode();
2265 
2266   assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
2267   assert(num_fall_throughs() == 2, "must be a two way branch block");
2268 
2269   // Which successor is ub?
2270   uint s;
2271   for (s = 0; s <_num_succs; s++) {
2272     if (_succs[s] == ub) break;
2273   }
2274   assert(s < 2, "uncommon successor must be found");
2275 
2276   // If ub is the true path, make the proability small, else
2277   // ub is the false path, and make the probability large
2278   bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
2279 
2280   // Get existing probability
2281   float p = n->as_MachIf()->_prob;
2282 
2283   if (invert) p = 1.0 - p;
2284   if (p > PROB_MIN) {
2285     p = PROB_MIN;
2286   }
2287   if (invert) p = 1.0 - p;
2288 
2289   n->as_MachIf()->_prob = p;
2290 }
2291 
2292 //------------------------------update_succ_freq-------------------------------
2293 // Update the appropriate frequency associated with block 'b', a successor of
2294 // a block in this loop.
2295 void CFGLoop::update_succ_freq(Block* b, double freq) {
2296   if (b->_loop == this) {
2297     if (b == head()) {
2298       // back branch within the loop
2299       // Do nothing now, the loop carried frequency will be
2300       // adjust later in scale_freq().
2301     } else {
2302       // simple branch within the loop
2303       b->_freq += freq;
2304     }
2305   } else if (!in_loop_nest(b)) {
2306     // branch is exit from this loop
2307     BlockProbPair bpp(b, freq);
2308     _exits.append(bpp);
2309   } else {
2310     // branch into nested loop
2311     CFGLoop* ch = b->_loop;
2312     ch->_freq += freq;
2313   }
2314 }
2315 
2316 //------------------------------in_loop_nest-----------------------------------
2317 // Determine if block b is in the receiver's loop nest.
2318 bool CFGLoop::in_loop_nest(Block* b) {
2319   int depth = _depth;
2320   CFGLoop* b_loop = b->_loop;
2321   int b_depth = b_loop->_depth;
2322   if (depth == b_depth) {
2323     return true;
2324   }
2325   while (b_depth > depth) {
2326     b_loop = b_loop->_parent;
2327     b_depth = b_loop->_depth;
2328   }
2329   return b_loop == this;
2330 }
2331 
2332 //------------------------------scale_freq-------------------------------------
2333 // Scale frequency of loops and blocks by trip counts from outer loops
2334 // Do a top down traversal of loop tree (visit outer loops first.)
2335 void CFGLoop::scale_freq() {
2336   double loop_freq = _freq * trip_count();
2337   _freq = loop_freq;
2338   for (int i = 0; i < _members.length(); i++) {
2339     CFGElement* s = _members.at(i);
2340     double block_freq = s->_freq * loop_freq;
2341     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
2342       block_freq = MIN_BLOCK_FREQUENCY;
2343     s->_freq = block_freq;
2344   }
2345   CFGLoop* ch = _child;
2346   while (ch != nullptr) {
2347     ch->scale_freq();
2348     ch = ch->_sibling;
2349   }
2350 }
2351 
2352 // Frequency of outer loop
2353 double CFGLoop::outer_loop_freq() const {
2354   if (_child != nullptr) {
2355     return _child->_freq;
2356   }
2357   return _freq;
2358 }
2359 
2360 #ifndef PRODUCT
2361 //------------------------------dump_tree--------------------------------------
2362 void CFGLoop::dump_tree() const {
2363   dump();
2364   if (_child != nullptr)   _child->dump_tree();
2365   if (_sibling != nullptr) _sibling->dump_tree();
2366 }
2367 
2368 //------------------------------dump-------------------------------------------
2369 void CFGLoop::dump() const {
2370   for (int i = 0; i < _depth; i++) tty->print("   ");
2371   tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
2372              _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2373   for (int i = 0; i < _depth; i++) tty->print("   ");
2374   tty->print("         members:");
2375   int k = 0;
2376   for (int i = 0; i < _members.length(); i++) {
2377     if (k++ >= 6) {
2378       tty->print("\n              ");
2379       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2380       k = 0;
2381     }
2382     CFGElement *s = _members.at(i);
2383     if (s->is_block()) {
2384       Block *b = s->as_Block();
2385       tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2386     } else {
2387       CFGLoop* lp = s->as_CFGLoop();
2388       tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2389     }
2390   }
2391   tty->print("\n");
2392   for (int i = 0; i < _depth; i++) tty->print("   ");
2393   tty->print("         exits:  ");
2394   k = 0;
2395   for (int i = 0; i < _exits.length(); i++) {
2396     if (k++ >= 7) {
2397       tty->print("\n              ");
2398       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2399       k = 0;
2400     }
2401     Block *blk = _exits.at(i).get_target();
2402     double prob = _exits.at(i).get_prob();
2403     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2404   }
2405   tty->print("\n");
2406 }
2407 #endif