1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "libadt/vectset.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "opto/block.hpp"
  29 #include "opto/c2compiler.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/chaitin.hpp"
  33 #include "opto/machnode.hpp"
  34 #include "opto/opcodes.hpp"
  35 #include "opto/phaseX.hpp"
  36 #include "opto/rootnode.hpp"
  37 #include "opto/runtime.hpp"
  38 #include "runtime/deoptimization.hpp"
  39 
  40 // Portions of code courtesy of Clifford Click
  41 
  42 // Optimization - Graph Style
  43 
  44 // To avoid float value underflow
  45 #define MIN_BLOCK_FREQUENCY 1.e-35f
  46 
  47 //----------------------------schedule_node_into_block-------------------------
  48 // Insert node n into block b. Look for projections of n and make sure they
  49 // are in b also.
  50 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
  51   // Set basic block of n, Add n to b,
  52   map_node_to_block(n, b);
  53   b->add_inst(n);
  54 
  55   // After Matching, nearly any old Node may have projections trailing it.
  56   // These are usually machine-dependent flags.  In any case, they might
  57   // float to another block below this one.  Move them up.
  58   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  59     Node*  use  = n->fast_out(i);
  60     if (use->is_Proj()) {
  61       Block* buse = get_block_for_node(use);
  62       if (buse != b) {              // In wrong block?
  63         if (buse != nullptr) {
  64           buse->find_remove(use);   // Remove from wrong block
  65         }
  66         map_node_to_block(use, b);
  67         b->add_inst(use);
  68       }
  69     }
  70   }
  71 }
  72 
  73 //----------------------------replace_block_proj_ctrl-------------------------
  74 // Nodes that have is_block_proj() nodes as their control need to use
  75 // the appropriate Region for their actual block as their control since
  76 // the projection will be in a predecessor block.
  77 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
  78   const Node *in0 = n->in(0);
  79   assert(in0 != nullptr, "Only control-dependent");
  80   const Node *p = in0->is_block_proj();
  81   if (p != nullptr && p != n) {    // Control from a block projection?
  82     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
  83     // Find trailing Region
  84     Block *pb = get_block_for_node(in0); // Block-projection already has basic block
  85     uint j = 0;
  86     if (pb->_num_succs != 1) {  // More then 1 successor?
  87       // Search for successor
  88       uint max = pb->number_of_nodes();
  89       assert( max > 1, "" );
  90       uint start = max - pb->_num_succs;
  91       // Find which output path belongs to projection
  92       for (j = start; j < max; j++) {
  93         if( pb->get_node(j) == in0 )
  94           break;
  95       }
  96       assert( j < max, "must find" );
  97       // Change control to match head of successor basic block
  98       j -= start;
  99     }
 100     n->set_req(0, pb->_succs[j]->head());
 101   }
 102 }
 103 
 104 bool PhaseCFG::is_dominator(Node* dom_node, Node* node) {
 105   assert(is_CFG(node) && is_CFG(dom_node), "node and dom_node must be CFG nodes");
 106   if (dom_node == node) {
 107     return true;
 108   }
 109   Block* d = find_block_for_node(dom_node);
 110   Block* n = find_block_for_node(node);
 111   assert(n != nullptr && d != nullptr, "blocks must exist");
 112 
 113   if (d == n) {
 114     if (dom_node->is_block_start()) {
 115       return true;
 116     }
 117     if (node->is_block_start()) {
 118       return false;
 119     }
 120     if (dom_node->is_block_proj()) {
 121       return false;
 122     }
 123     if (node->is_block_proj()) {
 124       return true;
 125     }
 126 
 127     assert(is_control_proj_or_safepoint(node), "node must be control projection or safepoint");
 128     assert(is_control_proj_or_safepoint(dom_node), "dom_node must be control projection or safepoint");
 129 
 130     // Neither 'node' nor 'dom_node' is a block start or block projection.
 131     // Check if 'dom_node' is above 'node' in the control graph.
 132     if (is_dominating_control(dom_node, node)) {
 133       return true;
 134     }
 135 
 136 #ifdef ASSERT
 137     // If 'dom_node' does not dominate 'node' then 'node' has to dominate 'dom_node'
 138     if (!is_dominating_control(node, dom_node)) {
 139       node->dump();
 140       dom_node->dump();
 141       assert(false, "neither dom_node nor node dominates the other");
 142     }
 143 #endif
 144 
 145     return false;
 146   }
 147   return d->dom_lca(n) == d;
 148 }
 149 
 150 bool PhaseCFG::is_CFG(Node* n) {
 151   return n->is_block_proj() || n->is_block_start() || is_control_proj_or_safepoint(n);
 152 }
 153 
 154 bool PhaseCFG::is_control_proj_or_safepoint(Node* n) const {
 155   bool result = n->is_ReachabilityFence() ||
 156                 (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) ||
 157                 (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL);
 158   assert(!n->is_Proj() ||
 159          n->as_Proj()->bottom_type() != Type::CONTROL ||
 160          n->as_Proj()->_con == 0, "If control projection, it must be projection 0");
 161   return result;
 162 }
 163 
 164 Block* PhaseCFG::find_block_for_node(Node* n) const {
 165   if (n->is_block_start() || n->is_block_proj()) {
 166     return get_block_for_node(n);
 167   } else {
 168     // Walk the control graph up if 'n' is not a block start nor a block projection. In this case 'n' must be
 169     // an unmatched control projection or a not yet matched safepoint precedence edge in the middle of a block.
 170     assert(is_control_proj_or_safepoint(n), "must be control projection or safepoint");
 171     Node* ctrl = n->in(0);
 172     while (!ctrl->is_block_start()) {
 173       ctrl = ctrl->in(0);
 174     }
 175     return get_block_for_node(ctrl);
 176   }
 177 }
 178 
 179 // Walk up the control graph from 'n' and check if 'dom_ctrl' is found.
 180 bool PhaseCFG::is_dominating_control(Node* dom_ctrl, Node* n) {
 181   Node* ctrl = n->in(0);
 182   while (!ctrl->is_block_start()) {
 183     if (ctrl == dom_ctrl) {
 184       return true;
 185     }
 186     ctrl = ctrl->in(0);
 187   }
 188   return false;
 189 }
 190 
 191 
 192 //------------------------------schedule_pinned_nodes--------------------------
 193 // Set the basic block for Nodes pinned into blocks
 194 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
 195   // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
 196   GrowableArray <Node*> spstack(C->live_nodes() + 8);
 197   spstack.push(_root);
 198   while (spstack.is_nonempty()) {
 199     Node* node = spstack.pop();
 200     if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
 201       if (node->pinned() && !has_block(node)) {  // Pinned?  Nail it down!
 202         assert(node->in(0), "pinned Node must have Control");
 203         // Before setting block replace block_proj control edge
 204         replace_block_proj_ctrl(node);
 205         Node* input = node->in(0);
 206         while (!input->is_block_start()) {
 207           input = input->in(0);
 208         }
 209         Block* block = get_block_for_node(input); // Basic block of controlling input
 210         schedule_node_into_block(node, block);
 211       }
 212 
 213       // If the node has precedence edges (added when CastPP nodes are
 214       // removed in final_graph_reshaping), fix the control of the
 215       // node to cover the precedence edges and remove the
 216       // dependencies.
 217       Node* n = nullptr;
 218       for (uint i = node->len()-1; i >= node->req(); i--) {
 219         Node* m = node->in(i);
 220         if (m == nullptr) continue;
 221         assert(is_CFG(m), "must be a CFG node");
 222         node->rm_prec(i);
 223         if (n == nullptr) {
 224           n = m;
 225         } else {
 226           assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
 227           n = is_dominator(n, m) ? m : n;
 228         }
 229       }
 230       if (n != nullptr) {
 231         assert(node->in(0), "control should have been set");
 232         assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other");
 233         if (!is_dominator(n, node->in(0))) {
 234           node->set_req(0, n);
 235         }
 236       }
 237 
 238       // process all inputs that are non null
 239       for (int i = node->len()-1; i >= 0; --i) {
 240         if (node->in(i) != nullptr) {
 241           spstack.push(node->in(i));
 242         }
 243       }
 244     }
 245   }
 246 }
 247 
 248 // Assert that new input b2 is dominated by all previous inputs.
 249 // Check this by by seeing that it is dominated by b1, the deepest
 250 // input observed until b2.
 251 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
 252   if (b1 == nullptr)  return;
 253   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
 254   Block* tmp = b2;
 255   while (tmp != b1 && tmp != nullptr) {
 256     tmp = tmp->_idom;
 257   }
 258   if (tmp != b1) {
 259 #ifdef ASSERT
 260     // Detected an unschedulable graph.  Print some nice stuff and die.
 261     tty->print_cr("!!! Unschedulable graph !!!");
 262     for (uint j=0; j<n->len(); j++) { // For all inputs
 263       Node* inn = n->in(j); // Get input
 264       if (inn == nullptr)  continue;  // Ignore null, missing inputs
 265       Block* inb = cfg->get_block_for_node(inn);
 266       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
 267                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
 268       inn->dump();
 269     }
 270     tty->print("Failing node: ");
 271     n->dump();
 272     assert(false, "unschedulable graph");
 273 #endif
 274     cfg->C->record_failure("unschedulable graph");
 275   }
 276 }
 277 
 278 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
 279   // Find the last input dominated by all other inputs.
 280   Block* deepb           = nullptr;     // Deepest block so far
 281   int    deepb_dom_depth = 0;
 282   for (uint k = 0; k < n->len(); k++) { // For all inputs
 283     Node* inn = n->in(k);               // Get input
 284     if (inn == nullptr)  continue;      // Ignore null, missing inputs
 285     Block* inb = cfg->get_block_for_node(inn);
 286     assert(inb != nullptr, "must already have scheduled this input");
 287     if (deepb_dom_depth < (int) inb->_dom_depth) {
 288       // The new inb must be dominated by the previous deepb.
 289       // The various inputs must be linearly ordered in the dom
 290       // tree, or else there will not be a unique deepest block.
 291       assert_dom(deepb, inb, n, cfg);
 292       if (cfg->C->failing()) {
 293         return nullptr;
 294       }
 295       deepb = inb;                      // Save deepest block
 296       deepb_dom_depth = deepb->_dom_depth;
 297     }
 298   }
 299   assert(deepb != nullptr, "must be at least one input to n");
 300   return deepb;
 301 }
 302 
 303 
 304 //------------------------------schedule_early---------------------------------
 305 // Find the earliest Block any instruction can be placed in.  Some instructions
 306 // are pinned into Blocks.  Unpinned instructions can appear in last block in
 307 // which all their inputs occur.
 308 bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
 309   // Allocate stack with enough space to avoid frequent realloc
 310   Node_Stack nstack(roots.size() + 8);
 311   // _root will be processed among C->top() inputs
 312   roots.push(C->top(), 0);
 313   visited.set(C->top()->_idx);
 314 
 315   while (roots.size() != 0) {
 316     // Use local variables nstack_top_n & nstack_top_i to cache values
 317     // on stack's top.
 318     Node* parent_node = roots.node();
 319     uint  input_index = 0;
 320     roots.pop();
 321 
 322     while (true) {
 323       if (input_index == 0) {
 324         // Fixup some control.  Constants without control get attached
 325         // to root and nodes that use is_block_proj() nodes should be attached
 326         // to the region that starts their block.
 327         const Node* control_input = parent_node->in(0);
 328         if (control_input != nullptr) {
 329           replace_block_proj_ctrl(parent_node);
 330         } else {
 331           // Is a constant with NO inputs?
 332           if (parent_node->req() == 1) {
 333             parent_node->set_req(0, _root);
 334           }
 335         }
 336       }
 337 
 338       // First, visit all inputs and force them to get a block.  If an
 339       // input is already in a block we quit following inputs (to avoid
 340       // cycles). Instead we put that Node on a worklist to be handled
 341       // later (since IT'S inputs may not have a block yet).
 342 
 343       // Assume all n's inputs will be processed
 344       bool done = true;
 345 
 346       while (input_index < parent_node->len()) {
 347         Node* in = parent_node->in(input_index++);
 348         if (in == nullptr) {
 349           continue;
 350         }
 351 
 352         int is_visited = visited.test_set(in->_idx);
 353         if (!has_block(in)) {
 354           if (is_visited) {
 355             assert(false, "graph should be schedulable");
 356             return false;
 357           }
 358           // Save parent node and next input's index.
 359           nstack.push(parent_node, input_index);
 360           // Process current input now.
 361           parent_node = in;
 362           input_index = 0;
 363           // Not all n's inputs processed.
 364           done = false;
 365           break;
 366         } else if (!is_visited) {
 367           // Visit this guy later, using worklist
 368           roots.push(in, 0);
 369         }
 370       }
 371 
 372       if (done) {
 373         // All of n's inputs have been processed, complete post-processing.
 374 
 375         // Some instructions are pinned into a block.  These include Region,
 376         // Phi, Start, Return, and other control-dependent instructions and
 377         // any projections which depend on them.
 378         if (!parent_node->pinned()) {
 379           // Set earliest legal block.
 380           Block* earliest_block = find_deepest_input(parent_node, this);
 381           if (C->failing()) {
 382             return false;
 383           }
 384           map_node_to_block(parent_node, earliest_block);
 385         } else {
 386           assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
 387         }
 388 
 389         if (nstack.is_empty()) {
 390           // Finished all nodes on stack.
 391           // Process next node on the worklist 'roots'.
 392           break;
 393         }
 394         // Get saved parent node and next input's index.
 395         parent_node = nstack.node();
 396         input_index = nstack.index();
 397         nstack.pop();
 398       }
 399     }
 400   }
 401   return true;
 402 }
 403 
 404 //------------------------------dom_lca----------------------------------------
 405 // Find least common ancestor in dominator tree
 406 // LCA is a current notion of LCA, to be raised above 'this'.
 407 // As a convenient boundary condition, return 'this' if LCA is null.
 408 // Find the LCA of those two nodes.
 409 Block* Block::dom_lca(Block* LCA) {
 410   if (LCA == nullptr || LCA == this)  return this;
 411 
 412   Block* anc = this;
 413   while (anc->_dom_depth > LCA->_dom_depth)
 414     anc = anc->_idom;           // Walk up till anc is as high as LCA
 415 
 416   while (LCA->_dom_depth > anc->_dom_depth)
 417     LCA = LCA->_idom;           // Walk up till LCA is as high as anc
 418 
 419   while (LCA != anc) {          // Walk both up till they are the same
 420     LCA = LCA->_idom;
 421     anc = anc->_idom;
 422   }
 423 
 424   return LCA;
 425 }
 426 
 427 //--------------------------raise_LCA_above_use--------------------------------
 428 // We are placing a definition, and have been given a def->use edge.
 429 // The definition must dominate the use, so move the LCA upward in the
 430 // dominator tree to dominate the use.  If the use is a phi, adjust
 431 // the LCA only with the phi input paths which actually use this def.
 432 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
 433   Block* buse = cfg->get_block_for_node(use);
 434   if (buse == nullptr) return LCA;   // Unused killing Projs have no use block
 435   if (!use->is_Phi())  return buse->dom_lca(LCA);
 436   uint pmax = use->req();       // Number of Phi inputs
 437   // Why does not this loop just break after finding the matching input to
 438   // the Phi?  Well...it's like this.  I do not have true def-use/use-def
 439   // chains.  Means I cannot distinguish, from the def-use direction, which
 440   // of many use-defs lead from the same use to the same def.  That is, this
 441   // Phi might have several uses of the same def.  Each use appears in a
 442   // different predecessor block.  But when I enter here, I cannot distinguish
 443   // which use-def edge I should find the predecessor block for.  So I find
 444   // them all.  Means I do a little extra work if a Phi uses the same value
 445   // more than once.
 446   for (uint j=1; j<pmax; j++) { // For all inputs
 447     if (use->in(j) == def) {    // Found matching input?
 448       Block* pred = cfg->get_block_for_node(buse->pred(j));
 449       LCA = pred->dom_lca(LCA);
 450     }
 451   }
 452   return LCA;
 453 }
 454 
 455 //----------------------------raise_LCA_above_marks----------------------------
 456 // Return a new LCA that dominates LCA and any of its marked predecessors.
 457 // Search all my parents up to 'early' (exclusive), looking for predecessors
 458 // which are marked with the given index.  Return the LCA (in the dom tree)
 459 // of all marked blocks.  If there are none marked, return the original
 460 // LCA.
 461 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
 462   assert(early->dominates(LCA), "precondition failed");
 463   Block_List worklist;
 464   worklist.push(LCA);
 465   while (worklist.size() > 0) {
 466     Block* mid = worklist.pop();
 467     if (mid == early)  continue;  // stop searching here
 468 
 469     // Test and set the visited bit.
 470     if (mid->raise_LCA_visited() == mark)  continue;  // already visited
 471 
 472     // Don't process the current LCA, otherwise the search may terminate early
 473     if (mid != LCA && mid->raise_LCA_mark() == mark) {
 474       // Raise the LCA.
 475       LCA = mid->dom_lca(LCA);
 476       if (LCA == early)  break;   // stop searching everywhere
 477       assert(early->dominates(LCA), "unsound LCA update");
 478       // Resume searching at that point, skipping intermediate levels.
 479       worklist.push(LCA);
 480       if (LCA == mid)
 481         continue; // Don't mark as visited to avoid early termination.
 482     } else {
 483       // Keep searching through this block's predecessors.
 484       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
 485         Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
 486         worklist.push(mid_parent);
 487       }
 488     }
 489     mid->set_raise_LCA_visited(mark);
 490   }
 491   return LCA;
 492 }
 493 
 494 //--------------------------memory_early_block--------------------------------
 495 // This is a variation of find_deepest_input, the heart of schedule_early.
 496 // Find the "early" block for a load, if we considered only memory and
 497 // address inputs, that is, if other data inputs were ignored.
 498 //
 499 // Because a subset of edges are considered, the resulting block will
 500 // be earlier (at a shallower dom_depth) than the true schedule_early
 501 // point of the node. We compute this earlier block as a more permissive
 502 // site for anti-dependency insertion, but only if subsume_loads is enabled.
 503 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
 504   Node* base;
 505   Node* index;
 506   Node* store = load->in(MemNode::Memory);
 507   load->as_Mach()->memory_inputs(base, index);
 508 
 509   assert(base != NodeSentinel && index != NodeSentinel,
 510          "unexpected base/index inputs");
 511 
 512   Node* mem_inputs[4];
 513   int mem_inputs_length = 0;
 514   if (base != nullptr)  mem_inputs[mem_inputs_length++] = base;
 515   if (index != nullptr) mem_inputs[mem_inputs_length++] = index;
 516   if (store != nullptr) mem_inputs[mem_inputs_length++] = store;
 517 
 518   // In the comparison below, add one to account for the control input,
 519   // which may be null, but always takes up a spot in the in array.
 520   if (mem_inputs_length + 1 < (int) load->req()) {
 521     // This "load" has more inputs than just the memory, base and index inputs.
 522     // For purposes of checking anti-dependences, we need to start
 523     // from the early block of only the address portion of the instruction,
 524     // and ignore other blocks that may have factored into the wider
 525     // schedule_early calculation.
 526     if (load->in(0) != nullptr) mem_inputs[mem_inputs_length++] = load->in(0);
 527 
 528     Block* deepb           = nullptr;        // Deepest block so far
 529     int    deepb_dom_depth = 0;
 530     for (int i = 0; i < mem_inputs_length; i++) {
 531       Block* inb = cfg->get_block_for_node(mem_inputs[i]);
 532       if (deepb_dom_depth < (int) inb->_dom_depth) {
 533         // The new inb must be dominated by the previous deepb.
 534         // The various inputs must be linearly ordered in the dom
 535         // tree, or else there will not be a unique deepest block.
 536         assert_dom(deepb, inb, load, cfg);
 537         if (cfg->C->failing()) {
 538           return nullptr;
 539         }
 540         deepb = inb;                      // Save deepest block
 541         deepb_dom_depth = deepb->_dom_depth;
 542       }
 543     }
 544     early = deepb;
 545   }
 546 
 547   return early;
 548 }
 549 
 550 // This function is used by raise_above_anti_dependences to find unrelated loads for stores in implicit null checks.
 551 bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {
 552   // We expect an anti-dependence edge from 'load' to 'store', except when
 553   // implicit_null_check() has hoisted 'store' above its early block to
 554   // perform an implicit null check, and 'load' is placed in the null
 555   // block. In this case it is safe to ignore the anti-dependence, as the
 556   // null block is only reached if 'store' tries to write to null object and
 557   // 'load' read from non-null object (there is preceding check for that)
 558   // These objects can't be the same.
 559   Block* store_block = get_block_for_node(store);
 560   Block* load_block = get_block_for_node(load);
 561   Node* end = store_block->end();
 562   if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) {
 563     Node* if_true = end->find_out_with(Op_IfTrue);
 564     assert(if_true != nullptr, "null check without null projection");
 565     Node* null_block_region = if_true->find_out_with(Op_Region);
 566     assert(null_block_region != nullptr, "null check without null region");
 567     return get_block_for_node(null_block_region) == load_block;
 568   }
 569   return false;
 570 }
 571 
 572 class DefUseMemStatesQueue : public StackObj {
 573 private:
 574   class DefUsePair : public StackObj {
 575   private:
 576     Node* _def; // memory state
 577     Node* _use; // use of the memory state that also modifies the memory state
 578 
 579   public:
 580     DefUsePair(Node* def, Node* use) :
 581       _def(def), _use(use) {
 582     }
 583 
 584     DefUsePair() :
 585       _def(nullptr), _use(nullptr) {
 586     }
 587 
 588     Node* def() const {
 589       return _def;
 590     }
 591 
 592     Node* use() const {
 593       return _use;
 594     }
 595   };
 596 
 597   GrowableArray<DefUsePair> _queue;
 598   GrowableArray<MergeMemNode*> _worklist_visited; // visited mergemem nodes
 599 
 600   bool already_enqueued(Node* def_mem, PhiNode* use_phi) const {
 601     // def_mem is one of the inputs of use_phi and at least one input of use_phi is
 602     // not def_mem. It's however possible that use_phi has def_mem as input multiple
 603     // times. If that happens, use_phi is recorded as a use of def_mem multiple
 604     // times as well. When PhaseCFG::raise_above_anti_dependences() goes over
 605     // uses of def_mem and enqueues them for processing, use_phi would then be
 606     // enqueued for processing multiple times when it only needs to be
 607     // processed once. The code below checks if use_phi as a use of def_mem was
 608     // already enqueued to avoid redundant processing of use_phi.
 609     int j = _queue.length()-1;
 610     // If there are any use of def_mem already enqueued, they were enqueued
 611     // last (all use of def_mem are processed in one go).
 612     for (; j >= 0; j--) {
 613       const DefUsePair& def_use_pair = _queue.at(j);
 614       if (def_use_pair.def() != def_mem) {
 615         // We're done with the uses of def_mem
 616         break;
 617       }
 618       if (def_use_pair.use() == use_phi) {
 619         return true;
 620       }
 621     }
 622 #ifdef ASSERT
 623     for (; j >= 0; j--) {
 624       const DefUsePair& def_use_pair = _queue.at(j);
 625       assert(def_use_pair.def() != def_mem, "Should be done with the uses of def_mem");
 626     }
 627 #endif
 628     return false;
 629   }
 630 
 631 public:
 632   DefUseMemStatesQueue(ResourceArea* area) {
 633   }
 634 
 635   void push(Node* def_mem_state, Node* use_mem_state) {
 636     if (use_mem_state->is_MergeMem()) {
 637       // Be sure we don't get into combinatorial problems.
 638       if (!_worklist_visited.append_if_missing(use_mem_state->as_MergeMem())) {
 639         return; // already on work list; do not repeat
 640       }
 641     } else if (use_mem_state->is_Phi()) {
 642       // A Phi could have the same mem as input multiple times. If that's the case, we don't need to enqueue it
 643       // more than once. We otherwise allow phis to be repeated; they can merge two relevant states.
 644       if (already_enqueued(def_mem_state, use_mem_state->as_Phi())) {
 645         return;
 646       }
 647     }
 648 
 649     _queue.push(DefUsePair(def_mem_state, use_mem_state));
 650   }
 651 
 652   bool is_nonempty() const {
 653     return _queue.is_nonempty();
 654   }
 655 
 656   Node* top_def() const {
 657     return _queue.top().def();
 658   }
 659 
 660   Node* top_use() const {
 661     return _queue.top().use();
 662   }
 663 
 664   void pop() {
 665     _queue.pop();
 666   }
 667 };
 668 
 669 // Enforce a scheduling of the given 'load' that ensures anti-dependent stores
 670 // do not overwrite the load's input memory state before the load executes.
 671 //
 672 // The given 'load' has a current scheduling range in the dominator tree that
 673 // starts at the load's early block (computed in schedule_early) and ends at
 674 // the given 'LCA' block for the load. However, there may still exist
 675 // anti-dependent stores between the early block and the LCA that overwrite
 676 // memory that the load must witness. For such stores, we must
 677 //
 678 //   1. raise the load's LCA to force the load to (eventually) be scheduled at
 679 //      latest in the store's block, and
 680 //   2. if the load may get scheduled in the store's block, additionally insert
 681 //      an anti-dependence edge (i.e., precedence edge) from the load to the
 682 //      store to ensure LCM schedules the load before the store within the
 683 //      block.
 684 //
 685 // For a given store, we say that the store is on a _distinct_ control-flow
 686 // path relative to the load if there are no paths from early to LCA that go
 687 // through the store's block. Such stores are not anti-dependent, and there is
 688 // no need to update the LCA nor to add anti-dependence edges.
 689 //
 690 // Due to the presence of loops, we must also raise the LCA above
 691 // anti-dependent memory Phis. We defer the details (see later comments in the
 692 // method) and for now look at an example without loops.
 693 //
 694 //          CFG               DOMINATOR TREE
 695 //
 696 //       B1 (early,L)              B1
 697 //       |\________                /\\___
 698 //       |         \              /  \   \
 699 //       B2 (L,S)   \            B2  B7  B6
 700 //      /  \         \           /\\___
 701 //     B3  B4 (S)    B7 (S)     /  \   \
 702 //      \  /         /         B3  B4  B5
 703 //       B5 (LCA,L) /
 704 //        \    ____/
 705 //         \  /
 706 //          B6
 707 //
 708 // Here, the load's scheduling range when calling raise_above_anti_dependences
 709 // is between early and LCA in the dominator tree, i.e., in block B1, B2, or B5
 710 // (indicated with "L"). However, there are a number of stores (indicated with
 711 // "S") that overwrite the memory which the load must witness. First, consider
 712 // the store in B4. We cannot legally schedule the load in B4, so an
 713 // anti-dependence edge is redundant. However, we must raise the LCA above
 714 // B4, which means that the updated LCA is B2. Now, consider the store in B2.
 715 // The LCA is already B2, so we do not need to raise it any further.
 716 // If we, eventually, decide to schedule the load in B2, it could happen that
 717 // LCM decides to place the load after the anti-dependent store in B2.
 718 // Therefore, we now need to add an anti-dependence edge between the load and
 719 // the B2 store, ensuring that the load is scheduled before the store. Finally,
 720 // the store in B7 is on a distinct control-flow path. Therefore, B7 requires
 721 // no action.
 722 //
 723 // The raise_above_anti_dependences method returns the updated LCA and ensures
 724 // there are no anti-dependent stores in any block between the load's early
 725 // block and the updated LCA. Any stores in the updated LCA will have new
 726 // anti-dependence edges back to the load. The caller may schedule the load in
 727 // the updated LCA, or it may hoist the load above the updated LCA, if the
 728 // updated LCA is not the early block.
 729 Block* PhaseCFG::raise_above_anti_dependences(Block* LCA, Node* load, const bool verify) {
 730   ResourceMark rm;
 731   assert(load->needs_anti_dependence_check(), "must be a load of some sort");
 732   assert(LCA != nullptr, "");
 733   DEBUG_ONLY(Block* LCA_orig = LCA);
 734 
 735   // Compute the alias index.  Loads and stores with different alias indices
 736   // do not need anti-dependence edges.
 737   int load_alias_idx = C->get_alias_index(load->adr_type());
 738 #ifdef ASSERT
 739   assert(Compile::AliasIdxTop <= load_alias_idx && load_alias_idx < C->num_alias_types(), "Invalid alias index");
 740   if (load_alias_idx == Compile::AliasIdxBot && C->do_aliasing() &&
 741       (PrintOpto || VerifyAliases ||
 742        (PrintMiscellaneous && (WizardMode || Verbose)))) {
 743     // Load nodes should not consume all of memory.
 744     // Reporting a bottom type indicates a bug in adlc.
 745     // If some particular type of node validly consumes all of memory,
 746     // sharpen the preceding "if" to exclude it, so we can catch bugs here.
 747     tty->print_cr("*** Possible Anti-Dependence Bug:  Load consumes all of memory.");
 748     load->dump(2);
 749     if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
 750   }
 751 #endif
 752 
 753   if (!C->alias_type(load_alias_idx)->is_rewritable()) {
 754     // It is impossible to spoil this load by putting stores before it,
 755     // because we know that the stores will never update the value
 756     // which 'load' must witness.
 757     return LCA;
 758   }
 759 
 760   node_idx_t load_index = load->_idx;
 761 
 762   // Record the earliest legal placement of 'load', as determined by the unique
 763   // point in the dominator tree where all memory effects and other inputs are
 764   // first available (computed by schedule_early). For normal loads, 'early' is
 765   // the shallowest place (dominator-tree wise) to look for anti-dependences
 766   // between this load and any store.
 767   Block* early = get_block_for_node(load);
 768 
 769   // If we are subsuming loads, compute an "early" block that only considers
 770   // memory or address inputs. This block may be different from the
 771   // schedule_early block when it is at an even shallower depth in the
 772   // dominator tree, and allow for a broader discovery of anti-dependences.
 773   if (C->subsume_loads()) {
 774     early = memory_early_block(load, early, this);
 775     if (C->failing()) {
 776       return nullptr;
 777     }
 778   }
 779 
 780   assert(early->dominates(LCA_orig), "precondition failed");
 781 
 782   ResourceArea* area = Thread::current()->resource_area();
 783 
 784   // Bookkeeping of possibly anti-dependent stores that we find outside the
 785   // early block and that may need anti-dependence edges. Note that stores in
 786   // non_early_stores are not necessarily dominated by early. The search starts
 787   // from initial_mem, which can reside in a block that dominates early, and
 788   // therefore, stores we find may be in blocks that are on completely distinct
 789   // control-flow paths compared to early. However, in the end, only stores in
 790   // blocks dominated by early matter. The reason for bookkeeping not only
 791   // relevant stores is efficiency: we lazily record all possible
 792   // anti-dependent stores and add anti-dependence edges only to the relevant
 793   // ones at the very end of this method when we know the final updated LCA.
 794   Node_List non_early_stores(area);
 795 
 796   // Whether we must raise the LCA after the main worklist loop below.
 797   bool must_raise_LCA_above_marks = false;
 798 
 799   // The input load uses some memory state (initial_mem).
 800   Node* initial_mem = load->in(MemNode::Memory);
 801   // To find anti-dependences we must look for users of the same memory state.
 802   // To do this, we search the memory graph downwards from initial_mem. During
 803   // this search, we encounter different types of nodes that we handle
 804   // according to the following three categories:
 805   //
 806   // - MergeMems
 807   // - Memory-state-modifying nodes (informally referred to as "stores" above
 808   //   and below)
 809   // - Memory Phis
 810   //
 811   // MergeMems do not modify the memory state. Anti-dependent stores or memory
 812   // Phis may, however, exist downstream of MergeMems. Therefore, we must
 813   // permit the search to continue through MergeMems. Stores may raise the LCA
 814   // and may potentially also require an anti-dependence edge. Memory Phis may
 815   // raise the LCA but never require anti-dependence edges. See the comments
 816   // throughout the worklist loop below for further details.
 817   //
 818   // It may be useful to think of the anti-dependence search as traversing a
 819   // tree rooted at initial_mem, with internal nodes of type MergeMem and
 820   // memory Phis and stores as (potentially repeated) leaves.
 821 
 822   // We don't optimize the memory graph for pinned loads, so we may need to raise the
 823   // root of our search tree through the corresponding slices of MergeMem nodes to
 824   // get to the node that really creates the memory state for this slice.
 825   if (load_alias_idx >= Compile::AliasIdxRaw) {
 826     while (initial_mem->is_MergeMem()) {
 827       MergeMemNode* mm = initial_mem->as_MergeMem();
 828       Node* p = mm->memory_at(load_alias_idx);
 829       if (p != mm->base_memory()) {
 830         initial_mem = p;
 831       } else {
 832         break;
 833       }
 834     }
 835   }
 836   // To administer the search, we use a worklist consisting of (def,use)-pairs
 837   // of memory states, corresponding to edges in the search tree (and edges
 838   // in the memory graph). We need to keep track of search tree edges in the
 839   // worklist rather than individual nodes due to memory Phis (see details
 840   // below).
 841   DefUseMemStatesQueue worklist(area);
 842   // We start the search at initial_mem and indicate the search root with the
 843   // edge (nullptr, initial_mem).
 844   worklist.push(nullptr, initial_mem);
 845 
 846   // The worklist loop
 847   while (worklist.is_nonempty()) {
 848     // Pop the next edge from the worklist
 849     Node* def_mem_state = worklist.top_def();
 850     Node* use_mem_state = worklist.top_use();
 851     worklist.pop();
 852 
 853     // We are either
 854     // - at the root of the search with the edge (nullptr, initial_mem),
 855     // - just past initial_mem with the edge (initial_mem, use_mem_state), or
 856     // - just past a MergeMem with the edge (MergeMem, use_mem_state).
 857     assert(def_mem_state == nullptr || def_mem_state == initial_mem ||
 858            def_mem_state->is_MergeMem(),
 859            "unexpected memory state");
 860 
 861     const uint op = use_mem_state->Opcode();
 862 
 863 #ifdef ASSERT
 864     // CacheWB nodes are peculiar in a sense that they both are anti-dependent and produce memory.
 865     // Allow them to be treated as a store.
 866     bool is_cache_wb = false;
 867     if (use_mem_state->is_Mach()) {
 868       int ideal_op = use_mem_state->as_Mach()->ideal_Opcode();
 869       is_cache_wb = (ideal_op == Op_CacheWB);
 870     }
 871     assert(!use_mem_state->needs_anti_dependence_check() || is_cache_wb, "no loads");
 872 #endif
 873 
 874     // If we are either at the search root or have found a MergeMem, we step
 875     // past use_mem_state and populate the search worklist with edges
 876     // (use_mem_state, child) for use_mem_state's children.
 877     if (def_mem_state == nullptr // root (exclusive) of tree we are searching
 878         || op == Op_MergeMem     // internal node of tree we are searching
 879     ) {
 880       def_mem_state = use_mem_state;
 881 
 882       for (DUIterator_Fast imax, i = def_mem_state->fast_outs(imax); i < imax; i++) {
 883         use_mem_state = def_mem_state->fast_out(i);
 884         if (use_mem_state->needs_anti_dependence_check()) {
 885           // use_mem_state is also a kind of load (i.e.,
 886           // needs_anti_dependence_check), and it is not a store nor a memory
 887           // Phi. Hence, it is not anti-dependent on the load.
 888           continue;
 889         }
 890         worklist.push(def_mem_state, use_mem_state);
 891       }
 892       // Nothing more to do for the current (nullptr, initial_mem) or
 893       // (initial_mem/MergeMem, MergeMem) edge, move on.
 894       continue;
 895     }
 896 
 897     assert(!use_mem_state->is_MergeMem(),
 898            "use_mem_state should be either a store or a memory Phi");
 899 
 900     if (op == Op_MachProj || op == Op_Catch)   continue;
 901 
 902     // Compute the alias index. If the use_mem_state has an alias index
 903     // different from the load's, it is not anti-dependent. Wide MemBar's
 904     // are anti-dependent with everything (except immutable memories).
 905     const TypePtr* adr_type = use_mem_state->adr_type();
 906     if (!C->can_alias(adr_type, load_alias_idx))  continue;
 907 
 908     // Most slow-path runtime calls do NOT modify Java memory, but
 909     // they can block and so write Raw memory.
 910     if (use_mem_state->is_Mach()) {
 911       MachNode* muse = use_mem_state->as_Mach();
 912       if (load_alias_idx != Compile::AliasIdxRaw) {
 913         // Check for call into the runtime using the Java calling
 914         // convention (and from there into a wrapper); it has no
 915         // _method.  Can't do this optimization for Native calls because
 916         // they CAN write to Java memory.
 917         if (muse->ideal_Opcode() == Op_CallStaticJava) {
 918           assert(muse->is_MachSafePoint(), "");
 919           MachSafePointNode* ms = (MachSafePointNode*)muse;
 920           assert(ms->is_MachCallJava(), "");
 921           MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
 922           if (mcj->_method == nullptr) {
 923             // These runtime calls do not write to Java visible memory
 924             // (other than Raw) and so are not anti-dependent.
 925             continue;
 926           }
 927         }
 928         // Same for SafePoints: they read/write Raw but only read otherwise.
 929         // This is basically a workaround for SafePoints only defining control
 930         // instead of control + memory.
 931         if (muse->ideal_Opcode() == Op_SafePoint) {
 932           continue;
 933         }
 934       } else {
 935         // Some raw memory, such as the load of "top" at an allocation,
 936         // can be control dependent on the previous safepoint. See
 937         // comments in GraphKit::allocate_heap() about control input.
 938         // Inserting an anti-dependence edge between such a safepoint and a use
 939         // creates a cycle, and will cause a subsequent failure in
 940         // local scheduling.  (BugId 4919904)
 941         // (%%% How can a control input be a safepoint and not a projection??)
 942         if (muse->ideal_Opcode() == Op_SafePoint && load->in(0) == muse) {
 943           continue;
 944         }
 945       }
 946     }
 947 
 948     // Determine the block of the use_mem_state.
 949     Block* use_mem_state_block = get_block_for_node(use_mem_state);
 950     assert(use_mem_state_block != nullptr,
 951            "unused killing projections skipped above");
 952 
 953     // For efficiency, we take a lazy approach to both raising the LCA and
 954     // adding anti-dependence edges. In this worklist loop, we only mark blocks
 955     // which we must raise the LCA above (set_raise_LCA_mark), and keep
 956     // track of nodes that potentially need anti-dependence edges
 957     // (non_early_stores). The only exceptions to this are if we
 958     // immediately see that we have to raise the LCA all the way to the early
 959     // block, and if we find stores in the early block (which always need
 960     // anti-dependence edges).
 961     //
 962     // After the worklist loop, we perform an efficient combined LCA-raising
 963     // operation over all marks and only then add anti-dependence edges where
 964     // strictly necessary according to the new raised LCA.
 965 
 966     if (use_mem_state->is_Phi()) {
 967       // We have reached a memory Phi node. On our search from initial_mem to
 968       // the Phi, we have found no anti-dependences (otherwise, we would have
 969       // already terminated the search along this branch). Consider the example
 970       // below, indicating a Phi node and its node inputs (we omit the control
 971       // input).
 972       //
 973       //    def_mem_state
 974       //          |
 975       //          | ? ?
 976       //          \ | /
 977       //           Phi
 978       //
 979       // We reached the Phi from def_mem_state and know that, on this
 980       // particular input, the memory that the load must witness is not
 981       // overwritten. However, for the Phi's other inputs (? in the
 982       // illustration), we have no information and must thus conservatively
 983       // assume that the load's memory is overwritten at and below the Phi.
 984       //
 985       // It is impossible to schedule the load before the Phi in
 986       // the same block as the Phi (use_mem_state_block), and anti-dependence
 987       // edges are, therefore, redundant. We must, however, find the
 988       // predecessor block of use_mem_state_block that corresponds to
 989       // def_mem_state, and raise the LCA above that block. Note that this block
 990       // is not necessarily def_mem_state's block! See the continuation of our
 991       // previous example below (now illustrating blocks instead of nodes)
 992       //
 993       //    def_mem_state's block
 994       //          |
 995       //          |
 996       //      pred_block
 997       //          |
 998       //          |   ?   ?
 999       //          |   |   |
1000       //      use_mem_state_block
1001       //
1002       // Here, we must raise the LCA above pred_block rather than
1003       // def_mem_state's block.
1004       //
1005       // Do not assert(use_mem_state_block != early, "Phi merging memory after access")
1006       // PhiNode may be at start of block 'early' with backedge to 'early'
1007       if (LCA == early) {
1008         // Don't bother if LCA is already raised all the way
1009         continue;
1010       }
1011       DEBUG_ONLY(bool found_match = false);
1012       for (uint j = PhiNode::Input, jmax = use_mem_state->req(); j < jmax; j++) {
1013         if (use_mem_state->in(j) == def_mem_state) {   // Found matching input?
1014           DEBUG_ONLY(found_match = true);
1015           Block* pred_block = get_block_for_node(use_mem_state_block->pred(j));
1016           if (pred_block != early) {
1017             // Lazily set the LCA mark
1018             pred_block->set_raise_LCA_mark(load_index);
1019             must_raise_LCA_above_marks = true;
1020           } else /* if (pred_block == early) */ {
1021             // We know already now that we must raise LCA all the way to early.
1022             LCA = early;
1023             // This turns off the process of gathering non_early_stores.
1024           }
1025         }
1026       }
1027       assert(found_match, "no worklist bug");
1028     } else if (use_mem_state_block != early) {
1029       // We found an anti-dependent store outside the load's 'early' block. The
1030       // store may be between the current LCA and the earliest possible block
1031       // (but it could very well also be on a distinct control-flow path).
1032       // Lazily set the LCA mark and push to non_early_stores.
1033       if (LCA == early) {
1034         // Don't bother if LCA is already raised all the way
1035         continue;
1036       }
1037       if (unrelated_load_in_store_null_block(use_mem_state, load)) {
1038         continue;
1039       }
1040       use_mem_state_block->set_raise_LCA_mark(load_index);
1041       must_raise_LCA_above_marks = true;
1042       non_early_stores.push(use_mem_state);
1043     } else /* if (use_mem_state_block == early) */ {
1044       // We found an anti-dependent store in the load's 'early' block.
1045       // Therefore, we know already now that we must raise LCA all the way to
1046       // early and that we need to add an anti-dependence edge to the store.
1047       assert(use_mem_state != load->find_exact_control(load->in(0)), "dependence cycle found");
1048       if (verify) {
1049         assert(use_mem_state->find_edge(load) != -1 || unrelated_load_in_store_null_block(use_mem_state, load),
1050                "missing precedence edge");
1051       } else {
1052         use_mem_state->add_prec(load);
1053       }
1054       LCA = early;
1055       // This turns off the process of gathering non_early_stores.
1056     }
1057   }
1058   // Worklist is now empty; we have visited all possible anti-dependences.
1059 
1060   // Finished if 'load' must be scheduled in its 'early' block.
1061   // If we found any stores there, they have already been given
1062   // anti-dependence edges.
1063   if (LCA == early) {
1064     return LCA;
1065   }
1066 
1067   // We get here only if there are no anti-dependent stores in the load's
1068   // 'early' block and if no memory Phi has forced LCA to the early block. Now
1069   // we must raise the LCA above the blocks for all the anti-dependent stores
1070   // and above the predecessor blocks of anti-dependent memory Phis we reached
1071   // during the search.
1072   if (must_raise_LCA_above_marks) {
1073     LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
1074   }
1075 
1076   // If LCA == early at this point, there were no stores that required
1077   // anti-dependence edges in the early block. Otherwise, we would have eagerly
1078   // raised the LCA to early already in the worklist loop.
1079   if (LCA == early) {
1080     return LCA;
1081   }
1082 
1083   // The raised LCA block can now be a home to anti-dependent stores for which
1084   // we still need to add anti-dependence edges, but no LCA predecessor block
1085   // contains any such stores (otherwise, we would have raised the LCA even
1086   // higher).
1087   //
1088   // The raised LCA will be a lower bound for placing the load, preventing the
1089   // load from sinking past any block containing a store that may overwrite
1090   // memory that the load must witness.
1091   //
1092   // Now we need to insert the necessary anti-dependence edges from 'load' to
1093   // each store in the non-early LCA block. We have recorded all such potential
1094   // stores in non_early_stores.
1095   //
1096   // If LCA->raise_LCA_mark() != load_index, it means that we raised the LCA to
1097   // a block in which we did not find any anti-dependent stores. So, no need to
1098   // search for any such stores.
1099   if (LCA->raise_LCA_mark() == load_index) {
1100     while (non_early_stores.size() > 0) {
1101       Node* store = non_early_stores.pop();
1102       Block* store_block = get_block_for_node(store);
1103       if (store_block == LCA) {
1104         // Add anti-dependence edge from the load to the store in the non-early
1105         // LCA.
1106         assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
1107         if (verify) {
1108           assert(store->find_edge(load) != -1, "missing precedence edge");
1109         } else {
1110           store->add_prec(load);
1111         }
1112       } else {
1113         assert(store_block->raise_LCA_mark() == load_index, "block was marked");
1114       }
1115     }
1116   }
1117 
1118   assert(LCA->dominates(LCA_orig), "unsound updated LCA");
1119 
1120   // Return the highest block containing stores; any stores
1121   // within that block have been given anti-dependence edges.
1122   return LCA;
1123 }
1124 
1125 // This class is used to iterate backwards over the nodes in the graph.
1126 
1127 class Node_Backward_Iterator {
1128 
1129 private:
1130   Node_Backward_Iterator();
1131 
1132 public:
1133   // Constructor for the iterator
1134   Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg);
1135 
1136   // Postincrement operator to iterate over the nodes
1137   Node *next();
1138 
1139 private:
1140   VectorSet   &_visited;
1141   Node_Stack  &_stack;
1142   PhaseCFG &_cfg;
1143 };
1144 
1145 // Constructor for the Node_Backward_Iterator
1146 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg)
1147   : _visited(visited), _stack(stack), _cfg(cfg) {
1148   // The stack should contain exactly the root
1149   stack.clear();
1150   stack.push(root, root->outcnt());
1151 
1152   // Clear the visited bits
1153   visited.clear();
1154 }
1155 
1156 // Iterator for the Node_Backward_Iterator
1157 Node *Node_Backward_Iterator::next() {
1158 
1159   // If the _stack is empty, then just return null: finished.
1160   if ( !_stack.size() )
1161     return nullptr;
1162 
1163   // I visit unvisited not-anti-dependence users first, then anti-dependent
1164   // children next. I iterate backwards to support removal of nodes.
1165   // The stack holds states consisting of 3 values:
1166   // current Def node, flag which indicates 1st/2nd pass, index of current out edge
1167   Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1);
1168   bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1);
1169   uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes.
1170   _stack.pop();
1171 
1172   // I cycle here when I am entering a deeper level of recursion.
1173   // The key variable 'self' was set prior to jumping here.
1174   while( 1 ) {
1175 
1176     _visited.set(self->_idx);
1177 
1178     // Now schedule all uses as late as possible.
1179     const Node* src = self->is_Proj() ? self->in(0) : self;
1180     uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
1181 
1182     // Schedule all nodes in a post-order visit
1183     Node *unvisited = nullptr;  // Unvisited anti-dependent Node, if any
1184 
1185     // Scan for unvisited nodes
1186     while (idx > 0) {
1187       // For all uses, schedule late
1188       Node* n = self->raw_out(--idx); // Use
1189 
1190       // Skip already visited children
1191       if ( _visited.test(n->_idx) )
1192         continue;
1193 
1194       // do not traverse backward control edges
1195       Node *use = n->is_Proj() ? n->in(0) : n;
1196       uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
1197 
1198       if ( use_rpo < src_rpo )
1199         continue;
1200 
1201       // Phi nodes always precede uses in a basic block
1202       if ( use_rpo == src_rpo && use->is_Phi() )
1203         continue;
1204 
1205       unvisited = n;      // Found unvisited
1206 
1207       // Check for possible-anti-dependent
1208       // 1st pass: No such nodes, 2nd pass: Only such nodes.
1209       if (n->needs_anti_dependence_check() == iterate_anti_dep) {
1210         unvisited = n;      // Found unvisited
1211         break;
1212       }
1213     }
1214 
1215     // Did I find an unvisited not-anti-dependent Node?
1216     if (!unvisited) {
1217       if (!iterate_anti_dep) {
1218         // 2nd pass: Iterate over nodes which needs_anti_dependence_check.
1219         iterate_anti_dep = true;
1220         idx = self->outcnt();
1221         continue;
1222       }
1223       break;                  // All done with children; post-visit 'self'
1224     }
1225 
1226     // Visit the unvisited Node.  Contains the obvious push to
1227     // indicate I'm entering a deeper level of recursion.  I push the
1228     // old state onto the _stack and set a new state and loop (recurse).
1229     _stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx);
1230     self = unvisited;
1231     iterate_anti_dep = false;
1232     idx = self->outcnt();
1233   } // End recursion loop
1234 
1235   return self;
1236 }
1237 
1238 //------------------------------ComputeLatenciesBackwards----------------------
1239 // Compute the latency of all the instructions.
1240 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) {
1241 #ifndef PRODUCT
1242   if (trace_opto_pipelining())
1243     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
1244 #endif
1245 
1246   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1247   Node *n;
1248 
1249   // Walk over all the nodes from last to first
1250   while ((n = iter.next())) {
1251     // Set the latency for the definitions of this instruction
1252     partial_latency_of_defs(n);
1253   }
1254 } // end ComputeLatenciesBackwards
1255 
1256 //------------------------------partial_latency_of_defs------------------------
1257 // Compute the latency impact of this node on all defs.  This computes
1258 // a number that increases as we approach the beginning of the routine.
1259 void PhaseCFG::partial_latency_of_defs(Node *n) {
1260   // Set the latency for this instruction
1261 #ifndef PRODUCT
1262   if (trace_opto_pipelining()) {
1263     tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1264     dump();
1265   }
1266 #endif
1267 
1268   if (n->is_Proj()) {
1269     n = n->in(0);
1270   }
1271 
1272   if (n->is_Root()) {
1273     return;
1274   }
1275 
1276   uint nlen = n->len();
1277   uint use_latency = get_latency_for_node(n);
1278   uint use_pre_order = get_block_for_node(n)->_pre_order;
1279 
1280   for (uint j = 0; j < nlen; j++) {
1281     Node *def = n->in(j);
1282 
1283     if (!def || def == n) {
1284       continue;
1285     }
1286 
1287     // Walk backwards thru projections
1288     if (def->is_Proj()) {
1289       def = def->in(0);
1290     }
1291 
1292 #ifndef PRODUCT
1293     if (trace_opto_pipelining()) {
1294       tty->print("#    in(%2d): ", j);
1295       def->dump();
1296     }
1297 #endif
1298 
1299     // If the defining block is not known, assume it is ok
1300     Block *def_block = get_block_for_node(def);
1301     uint def_pre_order = def_block ? def_block->_pre_order : 0;
1302 
1303     if ((use_pre_order <  def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
1304       continue;
1305     }
1306 
1307     uint delta_latency = n->latency(j);
1308     uint current_latency = delta_latency + use_latency;
1309 
1310     if (get_latency_for_node(def) < current_latency) {
1311       set_latency_for_node(def, current_latency);
1312     }
1313 
1314 #ifndef PRODUCT
1315     if (trace_opto_pipelining()) {
1316       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
1317     }
1318 #endif
1319   }
1320 }
1321 
1322 //------------------------------latency_from_use-------------------------------
1323 // Compute the latency of a specific use
1324 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
1325   // If self-reference, return no latency
1326   if (use == n || use->is_Root()) {
1327     return 0;
1328   }
1329 
1330   uint def_pre_order = get_block_for_node(def)->_pre_order;
1331   uint latency = 0;
1332 
1333   // If the use is not a projection, then it is simple...
1334   if (!use->is_Proj()) {
1335 #ifndef PRODUCT
1336     if (trace_opto_pipelining()) {
1337       tty->print("#    out(): ");
1338       use->dump();
1339     }
1340 #endif
1341 
1342     uint use_pre_order = get_block_for_node(use)->_pre_order;
1343 
1344     if (use_pre_order < def_pre_order)
1345       return 0;
1346 
1347     if (use_pre_order == def_pre_order && use->is_Phi())
1348       return 0;
1349 
1350     uint nlen = use->len();
1351     uint nl = get_latency_for_node(use);
1352 
1353     for ( uint j=0; j<nlen; j++ ) {
1354       if (use->in(j) == n) {
1355         // Change this if we want local latencies
1356         uint ul = use->latency(j);
1357         uint  l = ul + nl;
1358         if (latency < l) latency = l;
1359 #ifndef PRODUCT
1360         if (trace_opto_pipelining()) {
1361           tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
1362                         nl, j, ul, l, latency);
1363         }
1364 #endif
1365       }
1366     }
1367   } else {
1368     // This is a projection, just grab the latency of the use(s)
1369     for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1370       uint l = latency_from_use(use, def, use->fast_out(j));
1371       if (latency < l) latency = l;
1372     }
1373   }
1374 
1375   return latency;
1376 }
1377 
1378 //------------------------------latency_from_uses------------------------------
1379 // Compute the latency of this instruction relative to all of it's uses.
1380 // This computes a number that increases as we approach the beginning of the
1381 // routine.
1382 void PhaseCFG::latency_from_uses(Node *n) {
1383   // Set the latency for this instruction
1384 #ifndef PRODUCT
1385   if (trace_opto_pipelining()) {
1386     tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1387     dump();
1388   }
1389 #endif
1390   uint latency=0;
1391   const Node *def = n->is_Proj() ? n->in(0): n;
1392 
1393   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1394     uint l = latency_from_use(n, def, n->fast_out(i));
1395 
1396     if (latency < l) latency = l;
1397   }
1398 
1399   set_latency_for_node(n, latency);
1400 }
1401 
1402 //------------------------------is_cheaper_block-------------------------
1403 // Check if a block between early and LCA block of uses is cheaper by
1404 // frequency-based policy, latency-based policy and random-based policy
1405 bool PhaseCFG::is_cheaper_block(Block* LCA, Node* self, uint target_latency,
1406                                 uint end_latency, double least_freq,
1407                                 int cand_cnt, bool in_latency) {
1408   if (StressGCM) {
1409     // Should be randomly accepted in stress mode
1410     return C->randomized_select(cand_cnt);
1411   }
1412 
1413   const double delta = 1 + PROB_UNLIKELY_MAG(4);
1414 
1415   // Better Frequency. Add a small delta to the comparison to not needlessly
1416   // hoist because of, e.g., small numerical inaccuracies.
1417   if (LCA->_freq * delta < least_freq) {
1418     return true;
1419   }
1420 
1421   // Otherwise, choose with latency
1422   if (!in_latency                     &&  // No block containing latency
1423       LCA->_freq < least_freq * delta &&  // No worse frequency
1424       target_latency >= end_latency   &&  // within latency range
1425       !self->is_iteratively_computed()    // But don't hoist IV increments
1426             // because they may end up above other uses of their phi forcing
1427             // their result register to be different from their input.
1428   ) {
1429     return true;
1430   }
1431 
1432   return false;
1433 }
1434 
1435 //------------------------------hoist_to_cheaper_block-------------------------
1436 // Pick a block for node self, between early and LCA block of uses, that is a
1437 // cheaper alternative to LCA.
1438 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1439   Block* least       = LCA;
1440   double least_freq  = least->_freq;
1441   uint target        = get_latency_for_node(self);
1442   uint start_latency = get_latency_for_node(LCA->head());
1443   uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1444   bool in_latency    = (target <= start_latency);
1445   const Block* root_block = get_block_for_node(_root);
1446 
1447   // Turn off latency scheduling if scheduling is just plain off
1448   if (!C->do_scheduling())
1449     in_latency = true;
1450 
1451   // Do not hoist (to cover latency) instructions which target a
1452   // single register.  Hoisting stretches the live range of the
1453   // single register and may force spilling.
1454   MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1455   if (mach != nullptr && mach->out_RegMask().is_bound1() && !mach->out_RegMask().is_empty()) {
1456     in_latency = true;
1457   }
1458 
1459 #ifndef PRODUCT
1460   if (trace_opto_pipelining()) {
1461     tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1462     self->dump();
1463     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1464       LCA->_pre_order,
1465       LCA->head()->_idx,
1466       start_latency,
1467       LCA->get_node(LCA->end_idx())->_idx,
1468       end_latency,
1469       least_freq);
1470   }
1471 #endif
1472 
1473   int cand_cnt = 0;  // number of candidates tried
1474 
1475   // Walk up the dominator tree from LCA (Lowest common ancestor) to
1476   // the earliest legal location. Capture the least execution frequency,
1477   // or choose a random block if -XX:+StressGCM, or using latency-based policy
1478   while (LCA != early) {
1479     LCA = LCA->_idom;         // Follow up the dominator tree
1480 
1481     if (LCA == nullptr) {
1482       // Bailout without retry
1483       assert(false, "graph should be schedulable");
1484       C->record_method_not_compilable("late schedule failed: LCA is null");
1485       return least;
1486     }
1487 
1488     // Don't hoist machine instructions to the root basic block
1489     if (mach != nullptr && LCA == root_block)
1490       break;
1491 
1492     if (self->is_memory_writer() &&
1493         (LCA->_loop->depth() > early->_loop->depth())) {
1494       // LCA is an invalid placement for a memory writer: choosing it would
1495       // cause memory interference, as illustrated in schedule_late().
1496       continue;
1497     }
1498     verify_memory_writer_placement(LCA, self);
1499 
1500     uint start_lat = get_latency_for_node(LCA->head());
1501     uint end_idx   = LCA->end_idx();
1502     uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
1503     double LCA_freq = LCA->_freq;
1504 #ifndef PRODUCT
1505     if (trace_opto_pipelining()) {
1506       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1507         LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1508     }
1509 #endif
1510     cand_cnt++;
1511     if (is_cheaper_block(LCA, self, target, end_lat, least_freq, cand_cnt, in_latency)) {
1512       least = LCA;            // Found cheaper block
1513       least_freq = LCA_freq;
1514       start_latency = start_lat;
1515       end_latency = end_lat;
1516       if (target <= start_lat)
1517         in_latency = true;
1518     }
1519   }
1520 
1521 #ifndef PRODUCT
1522   if (trace_opto_pipelining()) {
1523     tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
1524       least->_pre_order, start_latency, least_freq);
1525   }
1526 #endif
1527 
1528   // See if the latency needs to be updated
1529   if (target < end_latency) {
1530 #ifndef PRODUCT
1531     if (trace_opto_pipelining()) {
1532       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1533     }
1534 #endif
1535     set_latency_for_node(self, end_latency);
1536     partial_latency_of_defs(self);
1537   }
1538 
1539   return least;
1540 }
1541 
1542 
1543 //------------------------------schedule_late-----------------------------------
1544 // Now schedule all codes as LATE as possible.  This is the LCA in the
1545 // dominator tree of all USES of a value.  Pick the block with the least
1546 // loop nesting depth that is lowest in the dominator tree.
1547 extern const char must_clone[];
1548 void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
1549 #ifndef PRODUCT
1550   if (trace_opto_pipelining())
1551     tty->print("\n#---- schedule_late ----\n");
1552 #endif
1553 
1554   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1555   Node *self;
1556 
1557   // Walk over all the nodes from last to first
1558   while ((self = iter.next())) {
1559     Block* early = get_block_for_node(self); // Earliest legal placement
1560 
1561     if (self->is_top()) {
1562       // Top node goes in bb #2 with other constants.
1563       // It must be special-cased, because it has no out edges.
1564       early->add_inst(self);
1565       continue;
1566     }
1567 
1568     // No uses, just terminate
1569     if (self->outcnt() == 0) {
1570       assert(self->is_MachProj(), "sanity");
1571       continue;                   // Must be a dead machine projection
1572     }
1573 
1574     // If node is pinned in the block, then no scheduling can be done.
1575     if( self->pinned() )          // Pinned in block?
1576       continue;
1577 
1578 #ifdef ASSERT
1579     // Assert that memory writers (e.g. stores) have a "home" block (the block
1580     // given by their control input), and that this block corresponds to their
1581     // earliest possible placement. This guarantees that
1582     // hoist_to_cheaper_block() will always have at least one valid choice.
1583     if (self->is_memory_writer()) {
1584       assert(find_block_for_node(self->in(0)) == early,
1585              "The home of a memory writer must also be its earliest placement");
1586     }
1587 #endif
1588 
1589     MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1590     if (mach) {
1591       switch (mach->ideal_Opcode()) {
1592       case Op_CreateEx:
1593         // Don't move exception creation
1594         early->add_inst(self);
1595         continue;
1596         break;
1597       case Op_CastI2N:
1598         early->add_inst(self);
1599         continue;
1600       case Op_CheckCastPP: {
1601         // Don't move CheckCastPP nodes away from their input, if the input
1602         // is a rawptr (5071820).
1603         Node *def = self->in(1);
1604         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
1605           early->add_inst(self);
1606 #ifdef ASSERT
1607           _raw_oops.push(def);
1608 #endif
1609           continue;
1610         }
1611         break;
1612       }
1613       default:
1614         break;
1615       }
1616       if (C->has_irreducible_loop() && self->is_memory_writer()) {
1617         // If the CFG is irreducible, place memory writers in their home block.
1618         // This prevents hoist_to_cheaper_block() from accidentally placing such
1619         // nodes into deeper loops, as in the following example:
1620         //
1621         // Home placement of store in B1 (loop L1):
1622         //
1623         // B1 (L1):
1624         //   m1 <- ..
1625         //   m2 <- store m1, ..
1626         // B2 (L2):
1627         //   jump B2
1628         // B3 (L1):
1629         //   .. <- .. m2, ..
1630         //
1631         // Wrong "hoisting" of store to B2 (in loop L2, child of L1):
1632         //
1633         // B1 (L1):
1634         //   m1 <- ..
1635         // B2 (L2):
1636         //   m2 <- store m1, ..
1637         //   # Wrong: m1 and m2 interfere at this point.
1638         //   jump B2
1639         // B3 (L1):
1640         //   .. <- .. m2, ..
1641         //
1642         // This "hoist inversion" can happen due to different factors such as
1643         // inaccurate estimation of frequencies for irreducible CFGs, and loops
1644         // with always-taken exits in reducible CFGs. In the reducible case,
1645         // hoist inversion is prevented by discarding invalid blocks (those in
1646         // deeper loops than the home block). In the irreducible case, the
1647         // invalid blocks cannot be identified due to incomplete loop nesting
1648         // information, hence a conservative solution is taken.
1649 #ifndef PRODUCT
1650         if (trace_opto_pipelining()) {
1651           tty->print_cr("# Irreducible loops: schedule in home block B%d:",
1652                         early->_pre_order);
1653           self->dump();
1654         }
1655 #endif
1656         schedule_node_into_block(self, early);
1657         continue;
1658       }
1659     }
1660 
1661     // Gather LCA of all uses
1662     Block *LCA = nullptr;
1663     {
1664       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1665         // For all uses, find LCA
1666         Node* use = self->fast_out(i);
1667         LCA = raise_LCA_above_use(LCA, use, self, this);
1668       }
1669       guarantee(LCA != nullptr, "There must be a LCA");
1670     }  // (Hide defs of imax, i from rest of block.)
1671 
1672     // Place temps in the block of their use.  This isn't a
1673     // requirement for correctness but it reduces useless
1674     // interference between temps and other nodes.
1675     if (mach != nullptr && mach->is_MachTemp()) {
1676       map_node_to_block(self, LCA);
1677       LCA->add_inst(self);
1678       continue;
1679     }
1680 
1681     // Check if 'self' could be anti-dependent on memory
1682     if (self->needs_anti_dependence_check()) {
1683       // Hoist LCA above possible-defs and insert anti-dependences to
1684       // defs in new LCA block.
1685       LCA = raise_above_anti_dependences(LCA, self);
1686       if (C->failing()) {
1687         return;
1688       }
1689     }
1690 
1691     if (early->_dom_depth > LCA->_dom_depth) {
1692       // Somehow the LCA has moved above the earliest legal point.
1693       // (One way this can happen is via memory_early_block.)
1694       if (C->subsume_loads() == true && !C->failing()) {
1695         // Retry with subsume_loads == false
1696         // If this is the first failure, the sentinel string will "stick"
1697         // to the Compile object, and the C2Compiler will see it and retry.
1698         C->record_failure(C2Compiler::retry_no_subsuming_loads());
1699       } else {
1700         // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1701         assert(C->failure_is_artificial(), "graph should be schedulable");
1702         C->record_method_not_compilable("late schedule failed: incorrect graph" DEBUG_ONLY(COMMA true));
1703       }
1704       return;
1705     }
1706 
1707     if (self->is_memory_writer()) {
1708       // If the LCA of a memory writer is a descendant of its home loop, hoist
1709       // it into a valid placement.
1710       while (LCA->_loop->depth() > early->_loop->depth()) {
1711         LCA = LCA->_idom;
1712       }
1713       assert(LCA != nullptr, "a valid LCA must exist");
1714       verify_memory_writer_placement(LCA, self);
1715     }
1716 
1717     // If there is no opportunity to hoist, then we're done.
1718     // In stress mode, try to hoist even the single operations.
1719     bool try_to_hoist = StressGCM || (LCA != early);
1720 
1721     // Must clone guys stay next to use; no hoisting allowed.
1722     // Also cannot hoist guys that alter memory or are otherwise not
1723     // allocatable (hoisting can make a value live longer, leading to
1724     // anti and output dependency problems which are normally resolved
1725     // by the register allocator giving everyone a different register).
1726     if (mach != nullptr && must_clone[mach->ideal_Opcode()])
1727       try_to_hoist = false;
1728 
1729     Block* late = nullptr;
1730     if (try_to_hoist) {
1731       // Now find the block with the least execution frequency.
1732       // Start at the latest schedule and work up to the earliest schedule
1733       // in the dominator tree.  Thus the Node will dominate all its uses.
1734       late = hoist_to_cheaper_block(LCA, early, self);
1735     } else {
1736       // Just use the LCA of the uses.
1737       late = LCA;
1738     }
1739 
1740     // Put the node into target block
1741     schedule_node_into_block(self, late);
1742 
1743 #ifdef ASSERT
1744     if (self->needs_anti_dependence_check()) {
1745       // since precedence edges are only inserted when we're sure they
1746       // are needed make sure that after placement in a block we don't
1747       // need any new precedence edges.
1748       verify_anti_dependences(late, self);
1749       if (C->failing()) {
1750         return;
1751       }
1752     }
1753 #endif
1754   } // Loop until all nodes have been visited
1755 
1756 } // end ScheduleLate
1757 
1758 //------------------------------GlobalCodeMotion-------------------------------
1759 void PhaseCFG::global_code_motion() {
1760   ResourceMark rm;
1761 
1762 #ifndef PRODUCT
1763   if (trace_opto_pipelining()) {
1764     tty->print("\n---- Start GlobalCodeMotion ----\n");
1765   }
1766 #endif
1767 
1768   // Initialize the node to block mapping for things on the proj_list
1769   for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1770     unmap_node_from_block(_matcher.get_projection(i));
1771   }
1772 
1773   // Set the basic block for Nodes pinned into blocks
1774   VectorSet visited;
1775   schedule_pinned_nodes(visited);
1776 
1777   // Find the earliest Block any instruction can be placed in.  Some
1778   // instructions are pinned into Blocks.  Unpinned instructions can
1779   // appear in last block in which all their inputs occur.
1780   visited.clear();
1781   Node_Stack stack((C->live_nodes() >> 2) + 16); // pre-grow
1782   if (!schedule_early(visited, stack)) {
1783     // Bailout without retry
1784     assert(C->failure_is_artificial(), "early schedule failed");
1785     C->record_method_not_compilable("early schedule failed" DEBUG_ONLY(COMMA true));
1786     return;
1787   }
1788 
1789   // Build Def-Use edges.
1790   // Compute the latency information (via backwards walk) for all the
1791   // instructions in the graph
1792   _node_latency = new GrowableArray<uint>(); // resource_area allocation
1793 
1794   if (C->do_scheduling()) {
1795     compute_latencies_backwards(visited, stack);
1796   }
1797 
1798   // Now schedule all codes as LATE as possible.  This is the LCA in the
1799   // dominator tree of all USES of a value.  Pick the block with the least
1800   // loop nesting depth that is lowest in the dominator tree.
1801   // ( visited.clear() called in schedule_late()->Node_Backward_Iterator() )
1802   schedule_late(visited, stack);
1803   if (C->failing()) {
1804     return;
1805   }
1806 
1807 #ifndef PRODUCT
1808   if (trace_opto_pipelining()) {
1809     tty->print("\n---- Detect implicit null checks ----\n");
1810   }
1811 #endif
1812 
1813   // Detect implicit-null-check opportunities.  Basically, find null checks
1814   // with suitable memory ops nearby.  Use the memory op to do the null check.
1815   // I can generate a memory op if there is not one nearby.
1816   if (C->is_method_compilation()) {
1817     // By reversing the loop direction we get a very minor gain on mpegaudio.
1818     // Feel free to revert to a forward loop for clarity.
1819     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1820     for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1821       Node* proj = _matcher._null_check_tests[i];
1822       Node* val  = _matcher._null_check_tests[i + 1];
1823       Block* block = get_block_for_node(proj);
1824       implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1825       // The implicit_null_check will only perform the transformation
1826       // if the null branch is truly uncommon, *and* it leads to an
1827       // uncommon trap.  Combined with the too_many_traps guards
1828       // above, this prevents SEGV storms reported in 6366351,
1829       // by recompiling offending methods without this optimization.
1830       if (C->failing()) {
1831         return;
1832       }
1833     }
1834   }
1835 
1836   bool block_size_threshold_ok = false;
1837   intptr_t *recalc_pressure_nodes = nullptr;
1838   if (OptoRegScheduling) {
1839     for (uint i = 0; i < number_of_blocks(); i++) {
1840       Block* block = get_block(i);
1841       if (block->number_of_nodes() > 10) {
1842         block_size_threshold_ok = true;
1843         break;
1844       }
1845     }
1846   }
1847 
1848   // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it
1849   // is key to enabling this feature.
1850   PhaseChaitin regalloc(C->unique(), *this, _matcher, true);
1851   ResourceArea live_arena(mtCompiler, Arena::Tag::tag_reglive);      // Arena for liveness
1852   ResourceMark rm_live(&live_arena);
1853   PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);
1854   PhaseIFG ifg(&live_arena);
1855   if (OptoRegScheduling && block_size_threshold_ok) {
1856     regalloc.mark_ssa();
1857     Compile::TracePhase tp(_t_computeLive);
1858     rm_live.reset_to_mark();           // Reclaim working storage
1859     IndexSet::reset_memory(C, &live_arena);
1860     uint node_size = regalloc._lrg_map.max_lrg_id();
1861     ifg.init(node_size); // Empty IFG
1862     regalloc.set_ifg(ifg);
1863     regalloc.set_live(live);
1864     regalloc.gather_lrg_masks(false);    // Collect LRG masks
1865     live.compute(node_size); // Compute liveness
1866 
1867     recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size);
1868     for (uint i = 0; i < node_size; i++) {
1869       recalc_pressure_nodes[i] = 0;
1870     }
1871   }
1872   _regalloc = &regalloc;
1873 
1874 #ifndef PRODUCT
1875   if (trace_opto_pipelining()) {
1876     tty->print("\n---- Start Local Scheduling ----\n");
1877   }
1878 #endif
1879 
1880   // Schedule locally.  Right now a simple topological sort.
1881   // Later, do a real latency aware scheduler.
1882   GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1883   visited.reset();
1884   for (uint i = 0; i < number_of_blocks(); i++) {
1885     Block* block = get_block(i);
1886     if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) {
1887       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1888         assert(C->failure_is_artificial(), "local schedule failed");
1889         C->record_method_not_compilable("local schedule failed" DEBUG_ONLY(COMMA true));
1890       }
1891       _regalloc = nullptr;
1892       return;
1893     }
1894   }
1895   _regalloc = nullptr;
1896 
1897   // If we inserted any instructions between a Call and his CatchNode,
1898   // clone the instructions on all paths below the Catch.
1899   for (uint i = 0; i < number_of_blocks(); i++) {
1900     Block* block = get_block(i);
1901     call_catch_cleanup(block);
1902     if (C->failing()) {
1903       return;
1904     }
1905   }
1906 
1907 #ifndef PRODUCT
1908   if (trace_opto_pipelining()) {
1909     tty->print("\n---- After GlobalCodeMotion ----\n");
1910     for (uint i = 0; i < number_of_blocks(); i++) {
1911       Block* block = get_block(i);
1912       block->dump();
1913     }
1914   }
1915 #endif
1916   // Dead.
1917   _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1918 }
1919 
1920 bool PhaseCFG::do_global_code_motion() {
1921 
1922   build_dominator_tree();
1923   if (C->failing()) {
1924     return false;
1925   }
1926 
1927   NOT_PRODUCT( C->verify_graph_edges(); )
1928 
1929   estimate_block_frequency();
1930 
1931   global_code_motion();
1932 
1933   if (C->failing()) {
1934     return false;
1935   }
1936 
1937   return true;
1938 }
1939 
1940 //------------------------------Estimate_Block_Frequency-----------------------
1941 // Estimate block frequencies based on IfNode probabilities.
1942 void PhaseCFG::estimate_block_frequency() {
1943 
1944   // Force conditional branches leading to uncommon traps to be unlikely,
1945   // not because we get to the uncommon_trap with less relative frequency,
1946   // but because an uncommon_trap typically causes a deopt, so we only get
1947   // there once.
1948   if (C->do_freq_based_layout()) {
1949     Block_List worklist;
1950     Block* root_blk = get_block(0);
1951     for (uint i = 1; i < root_blk->num_preds(); i++) {
1952       Block *pb = get_block_for_node(root_blk->pred(i));
1953       if (pb->has_uncommon_code()) {
1954         worklist.push(pb);
1955       }
1956     }
1957     while (worklist.size() > 0) {
1958       Block* uct = worklist.pop();
1959       if (uct == get_root_block()) {
1960         continue;
1961       }
1962       for (uint i = 1; i < uct->num_preds(); i++) {
1963         Block *pb = get_block_for_node(uct->pred(i));
1964         if (pb->_num_succs == 1) {
1965           worklist.push(pb);
1966         } else if (pb->num_fall_throughs() == 2) {
1967           pb->update_uncommon_branch(uct);
1968         }
1969       }
1970     }
1971   }
1972 
1973   // Create the loop tree and calculate loop depth.
1974   _root_loop = create_loop_tree();
1975   _root_loop->compute_loop_depth(0);
1976 
1977   // Compute block frequency of each block, relative to a single loop entry.
1978   _root_loop->compute_freq();
1979 
1980   // Adjust all frequencies to be relative to a single method entry
1981   _root_loop->_freq = 1.0;
1982   _root_loop->scale_freq();
1983 
1984   // Save outmost loop frequency for LRG frequency threshold
1985   _outer_loop_frequency = _root_loop->outer_loop_freq();
1986 
1987   // force paths ending at uncommon traps to be infrequent
1988   if (!C->do_freq_based_layout()) {
1989     Block_List worklist;
1990     Block* root_blk = get_block(0);
1991     for (uint i = 1; i < root_blk->num_preds(); i++) {
1992       Block *pb = get_block_for_node(root_blk->pred(i));
1993       if (pb->has_uncommon_code()) {
1994         worklist.push(pb);
1995       }
1996     }
1997     while (worklist.size() > 0) {
1998       Block* uct = worklist.pop();
1999       uct->_freq = PROB_MIN;
2000       for (uint i = 1; i < uct->num_preds(); i++) {
2001         Block *pb = get_block_for_node(uct->pred(i));
2002         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
2003           worklist.push(pb);
2004         }
2005       }
2006     }
2007   }
2008 
2009 #ifdef ASSERT
2010   for (uint i = 0; i < number_of_blocks(); i++) {
2011     Block* b = get_block(i);
2012     assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
2013   }
2014 #endif
2015 
2016 #ifndef PRODUCT
2017   if (PrintCFGBlockFreq) {
2018     tty->print_cr("CFG Block Frequencies");
2019     _root_loop->dump_tree();
2020     if (Verbose) {
2021       tty->print_cr("PhaseCFG dump");
2022       dump();
2023       tty->print_cr("Node dump");
2024       _root->dump(99999);
2025     }
2026   }
2027 #endif
2028 }
2029 
2030 //----------------------------create_loop_tree--------------------------------
2031 // Create a loop tree from the CFG
2032 CFGLoop* PhaseCFG::create_loop_tree() {
2033 
2034 #ifdef ASSERT
2035   assert(get_block(0) == get_root_block(), "first block should be root block");
2036   for (uint i = 0; i < number_of_blocks(); i++) {
2037     Block* block = get_block(i);
2038     // Check that _loop field are clear...we could clear them if not.
2039     assert(block->_loop == nullptr, "clear _loop expected");
2040     // Sanity check that the RPO numbering is reflected in the _blocks array.
2041     // It doesn't have to be for the loop tree to be built, but if it is not,
2042     // then the blocks have been reordered since dom graph building...which
2043     // may question the RPO numbering
2044     assert(block->_rpo == i, "unexpected reverse post order number");
2045   }
2046 #endif
2047 
2048   int idct = 0;
2049   CFGLoop* root_loop = new CFGLoop(idct++);
2050 
2051   Block_List worklist;
2052 
2053   // Assign blocks to loops
2054   for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
2055     Block* block = get_block(i);
2056 
2057     if (block->head()->is_Loop()) {
2058       Block* loop_head = block;
2059       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
2060       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
2061       Block* tail = get_block_for_node(tail_n);
2062 
2063       // Defensively filter out Loop nodes for non-single-entry loops.
2064       // For all reasonable loops, the head occurs before the tail in RPO.
2065       if (i <= tail->_rpo) {
2066 
2067         // The tail and (recursive) predecessors of the tail
2068         // are made members of a new loop.
2069 
2070         assert(worklist.size() == 0, "nonempty worklist");
2071         CFGLoop* nloop = new CFGLoop(idct++);
2072         assert(loop_head->_loop == nullptr, "just checking");
2073         loop_head->_loop = nloop;
2074         // Add to nloop so push_pred() will skip over inner loops
2075         nloop->add_member(loop_head);
2076         nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
2077 
2078         while (worklist.size() > 0) {
2079           Block* member = worklist.pop();
2080           if (member != loop_head) {
2081             for (uint j = 1; j < member->num_preds(); j++) {
2082               nloop->push_pred(member, j, worklist, this);
2083             }
2084           }
2085         }
2086       }
2087     }
2088   }
2089 
2090   // Create a member list for each loop consisting
2091   // of both blocks and (immediate child) loops.
2092   for (uint i = 0; i < number_of_blocks(); i++) {
2093     Block* block = get_block(i);
2094     CFGLoop* lp = block->_loop;
2095     if (lp == nullptr) {
2096       // Not assigned to a loop. Add it to the method's pseudo loop.
2097       block->_loop = root_loop;
2098       lp = root_loop;
2099     }
2100     if (lp == root_loop || block != lp->head()) { // loop heads are already members
2101       lp->add_member(block);
2102     }
2103     if (lp != root_loop) {
2104       if (lp->parent() == nullptr) {
2105         // Not a nested loop. Make it a child of the method's pseudo loop.
2106         root_loop->add_nested_loop(lp);
2107       }
2108       if (block == lp->head()) {
2109         // Add nested loop to member list of parent loop.
2110         lp->parent()->add_member(lp);
2111       }
2112     }
2113   }
2114 
2115   return root_loop;
2116 }
2117 
2118 //------------------------------push_pred--------------------------------------
2119 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
2120   Node* pred_n = blk->pred(i);
2121   Block* pred = cfg->get_block_for_node(pred_n);
2122   CFGLoop *pred_loop = pred->_loop;
2123   if (pred_loop == nullptr) {
2124     // Filter out blocks for non-single-entry loops.
2125     // For all reasonable loops, the head occurs before the tail in RPO.
2126     if (pred->_rpo > head()->_rpo) {
2127       pred->_loop = this;
2128       worklist.push(pred);
2129     }
2130   } else if (pred_loop != this) {
2131     // Nested loop.
2132     while (pred_loop->_parent != nullptr && pred_loop->_parent != this) {
2133       pred_loop = pred_loop->_parent;
2134     }
2135     // Make pred's loop be a child
2136     if (pred_loop->_parent == nullptr) {
2137       add_nested_loop(pred_loop);
2138       // Continue with loop entry predecessor.
2139       Block* pred_head = pred_loop->head();
2140       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
2141       assert(pred_head != head(), "loop head in only one loop");
2142       push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
2143     } else {
2144       assert(pred_loop->_parent == this && _parent == nullptr, "just checking");
2145     }
2146   }
2147 }
2148 
2149 //------------------------------add_nested_loop--------------------------------
2150 // Make cl a child of the current loop in the loop tree.
2151 void CFGLoop::add_nested_loop(CFGLoop* cl) {
2152   assert(_parent == nullptr, "no parent yet");
2153   assert(cl != this, "not my own parent");
2154   cl->_parent = this;
2155   CFGLoop* ch = _child;
2156   if (ch == nullptr) {
2157     _child = cl;
2158   } else {
2159     while (ch->_sibling != nullptr) { ch = ch->_sibling; }
2160     ch->_sibling = cl;
2161   }
2162 }
2163 
2164 //------------------------------compute_loop_depth-----------------------------
2165 // Store the loop depth in each CFGLoop object.
2166 // Recursively walk the children to do the same for them.
2167 void CFGLoop::compute_loop_depth(int depth) {
2168   _depth = depth;
2169   CFGLoop* ch = _child;
2170   while (ch != nullptr) {
2171     ch->compute_loop_depth(depth + 1);
2172     ch = ch->_sibling;
2173   }
2174 }
2175 
2176 //------------------------------compute_freq-----------------------------------
2177 // Compute the frequency of each block and loop, relative to a single entry
2178 // into the dominating loop head.
2179 void CFGLoop::compute_freq() {
2180   // Bottom up traversal of loop tree (visit inner loops first.)
2181   // Set loop head frequency to 1.0, then transitively
2182   // compute frequency for all successors in the loop,
2183   // as well as for each exit edge.  Inner loops are
2184   // treated as single blocks with loop exit targets
2185   // as the successor blocks.
2186 
2187   // Nested loops first
2188   CFGLoop* ch = _child;
2189   while (ch != nullptr) {
2190     ch->compute_freq();
2191     ch = ch->_sibling;
2192   }
2193   assert (_members.length() > 0, "no empty loops");
2194   Block* hd = head();
2195   hd->_freq = 1.0;
2196   for (int i = 0; i < _members.length(); i++) {
2197     CFGElement* s = _members.at(i);
2198     double freq = s->_freq;
2199     if (s->is_block()) {
2200       Block* b = s->as_Block();
2201       for (uint j = 0; j < b->_num_succs; j++) {
2202         Block* sb = b->_succs[j];
2203         update_succ_freq(sb, freq * b->succ_prob(j));
2204       }
2205     } else {
2206       CFGLoop* lp = s->as_CFGLoop();
2207       assert(lp->_parent == this, "immediate child");
2208       for (int k = 0; k < lp->_exits.length(); k++) {
2209         Block* eb = lp->_exits.at(k).get_target();
2210         double prob = lp->_exits.at(k).get_prob();
2211         update_succ_freq(eb, freq * prob);
2212       }
2213     }
2214   }
2215 
2216   // For all loops other than the outer, "method" loop,
2217   // sum and normalize the exit probability. The "method" loop
2218   // should keep the initial exit probability of 1, so that
2219   // inner blocks do not get erroneously scaled.
2220   if (_depth != 0) {
2221     // Total the exit probabilities for this loop.
2222     double exits_sum = 0.0f;
2223     for (int i = 0; i < _exits.length(); i++) {
2224       exits_sum += _exits.at(i).get_prob();
2225     }
2226 
2227     // Normalize the exit probabilities. Until now, the
2228     // probabilities estimate the possibility of exit per
2229     // a single loop iteration; afterward, they estimate
2230     // the probability of exit per loop entry.
2231     for (int i = 0; i < _exits.length(); i++) {
2232       Block* et = _exits.at(i).get_target();
2233       float new_prob = 0.0f;
2234       if (_exits.at(i).get_prob() > 0.0f) {
2235         new_prob = _exits.at(i).get_prob() / exits_sum;
2236       }
2237       BlockProbPair bpp(et, new_prob);
2238       _exits.at_put(i, bpp);
2239     }
2240 
2241     // Save the total, but guard against unreasonable probability,
2242     // as the value is used to estimate the loop trip count.
2243     // An infinite trip count would blur relative block
2244     // frequencies.
2245     if (exits_sum > 1.0f) exits_sum = 1.0;
2246     if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
2247     _exit_prob = exits_sum;
2248   }
2249 }
2250 
2251 //------------------------------succ_prob-------------------------------------
2252 // Determine the probability of reaching successor 'i' from the receiver block.
2253 float Block::succ_prob(uint i) {
2254   int eidx = end_idx();
2255   Node *n = get_node(eidx);  // Get ending Node
2256 
2257   int op = n->Opcode();
2258   if (n->is_Mach()) {
2259     if (n->is_MachNullCheck()) {
2260       // Can only reach here if called after lcm. The original Op_If is gone,
2261       // so we attempt to infer the probability from one or both of the
2262       // successor blocks.
2263       assert(_num_succs == 2, "expecting 2 successors of a null check");
2264       // If either successor has only one predecessor, then the
2265       // probability estimate can be derived using the
2266       // relative frequency of the successor and this block.
2267       if (_succs[i]->num_preds() == 2) {
2268         return _succs[i]->_freq / _freq;
2269       } else if (_succs[1-i]->num_preds() == 2) {
2270         return 1 - (_succs[1-i]->_freq / _freq);
2271       } else {
2272         // Estimate using both successor frequencies
2273         float freq = _succs[i]->_freq;
2274         return freq / (freq + _succs[1-i]->_freq);
2275       }
2276     }
2277     op = n->as_Mach()->ideal_Opcode();
2278   }
2279 
2280 
2281   // Switch on branch type
2282   switch( op ) {
2283   case Op_CountedLoopEnd:
2284   case Op_If: {
2285     assert (i < 2, "just checking");
2286     // Conditionals pass on only part of their frequency
2287     float prob  = n->as_MachIf()->_prob;
2288     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
2289     // If succ[i] is the FALSE branch, invert path info
2290     if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
2291       return 1.0f - prob; // not taken
2292     } else {
2293       return prob; // taken
2294     }
2295   }
2296 
2297   case Op_Jump:
2298     return n->as_MachJump()->_probs[get_node(i + eidx + 1)->as_JumpProj()->_con];
2299 
2300   case Op_Catch: {
2301     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2302     if (ci->_con == CatchProjNode::fall_through_index) {
2303       // Fall-thru path gets the lion's share.
2304       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
2305     } else {
2306       // Presume exceptional paths are equally unlikely
2307       return PROB_UNLIKELY_MAG(5);
2308     }
2309   }
2310 
2311   case Op_Root:
2312   case Op_Goto:
2313     // Pass frequency straight thru to target
2314     return 1.0f;
2315 
2316   case Op_NeverBranch: {
2317     Node* succ = n->as_NeverBranch()->proj_out(0)->unique_ctrl_out();
2318     if (_succs[i]->head() == succ) {
2319       return 1.0f;
2320     }
2321     return 0.0f;
2322   }
2323 
2324   case Op_TailCall:
2325   case Op_TailJump:
2326   case Op_ForwardException:
2327   case Op_Return:
2328   case Op_Halt:
2329   case Op_Rethrow:
2330     // Do not push out freq to root block
2331     return 0.0f;
2332 
2333   default:
2334     ShouldNotReachHere();
2335   }
2336 
2337   return 0.0f;
2338 }
2339 
2340 //------------------------------num_fall_throughs-----------------------------
2341 // Return the number of fall-through candidates for a block
2342 int Block::num_fall_throughs() {
2343   int eidx = end_idx();
2344   Node *n = get_node(eidx);  // Get ending Node
2345 
2346   int op = n->Opcode();
2347   if (n->is_Mach()) {
2348     if (n->is_MachNullCheck()) {
2349       // In theory, either side can fall-thru, for simplicity sake,
2350       // let's say only the false branch can now.
2351       return 1;
2352     }
2353     op = n->as_Mach()->ideal_Opcode();
2354   }
2355 
2356   // Switch on branch type
2357   switch( op ) {
2358   case Op_CountedLoopEnd:
2359   case Op_If:
2360     return 2;
2361 
2362   case Op_Root:
2363   case Op_Goto:
2364     return 1;
2365 
2366   case Op_Catch: {
2367     for (uint i = 0; i < _num_succs; i++) {
2368       const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2369       if (ci->_con == CatchProjNode::fall_through_index) {
2370         return 1;
2371       }
2372     }
2373     return 0;
2374   }
2375 
2376   case Op_Jump:
2377   case Op_NeverBranch:
2378   case Op_TailCall:
2379   case Op_TailJump:
2380   case Op_ForwardException:
2381   case Op_Return:
2382   case Op_Halt:
2383   case Op_Rethrow:
2384     return 0;
2385 
2386   default:
2387     ShouldNotReachHere();
2388   }
2389 
2390   return 0;
2391 }
2392 
2393 //------------------------------succ_fall_through-----------------------------
2394 // Return true if a specific successor could be fall-through target.
2395 bool Block::succ_fall_through(uint i) {
2396   int eidx = end_idx();
2397   Node *n = get_node(eidx);  // Get ending Node
2398 
2399   int op = n->Opcode();
2400   if (n->is_Mach()) {
2401     if (n->is_MachNullCheck()) {
2402       // In theory, either side can fall-thru, for simplicity sake,
2403       // let's say only the false branch can now.
2404       return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
2405     }
2406     op = n->as_Mach()->ideal_Opcode();
2407   }
2408 
2409   // Switch on branch type
2410   switch( op ) {
2411   case Op_CountedLoopEnd:
2412   case Op_If:
2413   case Op_Root:
2414   case Op_Goto:
2415     return true;
2416 
2417   case Op_Catch: {
2418     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2419     return ci->_con == CatchProjNode::fall_through_index;
2420   }
2421 
2422   case Op_Jump:
2423   case Op_NeverBranch:
2424   case Op_TailCall:
2425   case Op_TailJump:
2426   case Op_ForwardException:
2427   case Op_Return:
2428   case Op_Halt:
2429   case Op_Rethrow:
2430     return false;
2431 
2432   default:
2433     ShouldNotReachHere();
2434   }
2435 
2436   return false;
2437 }
2438 
2439 //------------------------------update_uncommon_branch------------------------
2440 // Update the probability of a two-branch to be uncommon
2441 void Block::update_uncommon_branch(Block* ub) {
2442   int eidx = end_idx();
2443   Node *n = get_node(eidx);  // Get ending Node
2444 
2445   int op = n->as_Mach()->ideal_Opcode();
2446 
2447   assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
2448   assert(num_fall_throughs() == 2, "must be a two way branch block");
2449 
2450   // Which successor is ub?
2451   uint s;
2452   for (s = 0; s <_num_succs; s++) {
2453     if (_succs[s] == ub) break;
2454   }
2455   assert(s < 2, "uncommon successor must be found");
2456 
2457   // If ub is the true path, make the proability small, else
2458   // ub is the false path, and make the probability large
2459   bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
2460 
2461   // Get existing probability
2462   float p = n->as_MachIf()->_prob;
2463 
2464   if (invert) p = 1.0 - p;
2465   if (p > PROB_MIN) {
2466     p = PROB_MIN;
2467   }
2468   if (invert) p = 1.0 - p;
2469 
2470   n->as_MachIf()->_prob = p;
2471 }
2472 
2473 //------------------------------update_succ_freq-------------------------------
2474 // Update the appropriate frequency associated with block 'b', a successor of
2475 // a block in this loop.
2476 void CFGLoop::update_succ_freq(Block* b, double freq) {
2477   if (b->_loop == this) {
2478     if (b == head()) {
2479       // back branch within the loop
2480       // Do nothing now, the loop carried frequency will be
2481       // adjust later in scale_freq().
2482     } else {
2483       // simple branch within the loop
2484       b->_freq += freq;
2485     }
2486   } else if (!in_loop_nest(b)) {
2487     // branch is exit from this loop
2488     BlockProbPair bpp(b, freq);
2489     _exits.append(bpp);
2490   } else {
2491     // branch into nested loop
2492     CFGLoop* ch = b->_loop;
2493     ch->_freq += freq;
2494   }
2495 }
2496 
2497 //------------------------------in_loop_nest-----------------------------------
2498 // Determine if block b is in the receiver's loop nest.
2499 bool CFGLoop::in_loop_nest(Block* b) {
2500   int depth = _depth;
2501   CFGLoop* b_loop = b->_loop;
2502   int b_depth = b_loop->_depth;
2503   if (depth == b_depth) {
2504     return true;
2505   }
2506   while (b_depth > depth) {
2507     b_loop = b_loop->_parent;
2508     b_depth = b_loop->_depth;
2509   }
2510   return b_loop == this;
2511 }
2512 
2513 //------------------------------scale_freq-------------------------------------
2514 // Scale frequency of loops and blocks by trip counts from outer loops
2515 // Do a top down traversal of loop tree (visit outer loops first.)
2516 void CFGLoop::scale_freq() {
2517   double loop_freq = _freq * trip_count();
2518   _freq = loop_freq;
2519   for (int i = 0; i < _members.length(); i++) {
2520     CFGElement* s = _members.at(i);
2521     double block_freq = s->_freq * loop_freq;
2522     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
2523       block_freq = MIN_BLOCK_FREQUENCY;
2524     s->_freq = block_freq;
2525   }
2526   CFGLoop* ch = _child;
2527   while (ch != nullptr) {
2528     ch->scale_freq();
2529     ch = ch->_sibling;
2530   }
2531 }
2532 
2533 // Frequency of outer loop
2534 double CFGLoop::outer_loop_freq() const {
2535   if (_child != nullptr) {
2536     return _child->_freq;
2537   }
2538   return _freq;
2539 }
2540 
2541 #ifndef PRODUCT
2542 //------------------------------dump_tree--------------------------------------
2543 void CFGLoop::dump_tree() const {
2544   dump();
2545   if (_child != nullptr)   _child->dump_tree();
2546   if (_sibling != nullptr) _sibling->dump_tree();
2547 }
2548 
2549 //------------------------------dump-------------------------------------------
2550 void CFGLoop::dump() const {
2551   for (int i = 0; i < _depth; i++) tty->print("   ");
2552   tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
2553              _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2554   for (int i = 0; i < _depth; i++) tty->print("   ");
2555   tty->print("         members:");
2556   int k = 0;
2557   for (int i = 0; i < _members.length(); i++) {
2558     if (k++ >= 6) {
2559       tty->print("\n              ");
2560       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2561       k = 0;
2562     }
2563     CFGElement *s = _members.at(i);
2564     if (s->is_block()) {
2565       Block *b = s->as_Block();
2566       tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2567     } else {
2568       CFGLoop* lp = s->as_CFGLoop();
2569       tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2570     }
2571   }
2572   tty->print("\n");
2573   for (int i = 0; i < _depth; i++) tty->print("   ");
2574   tty->print("         exits:  ");
2575   k = 0;
2576   for (int i = 0; i < _exits.length(); i++) {
2577     if (k++ >= 7) {
2578       tty->print("\n              ");
2579       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2580       k = 0;
2581     }
2582     Block *blk = _exits.at(i).get_target();
2583     double prob = _exits.at(i).get_prob();
2584     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2585   }
2586   tty->print("\n");
2587 }
2588 #endif