1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "libadt/vectset.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "opto/block.hpp"
  29 #include "opto/c2compiler.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/chaitin.hpp"
  33 #include "opto/machnode.hpp"
  34 #include "opto/opcodes.hpp"
  35 #include "opto/phaseX.hpp"
  36 #include "opto/rootnode.hpp"
  37 #include "opto/runtime.hpp"
  38 #include "runtime/deoptimization.hpp"
  39 
  40 // Portions of code courtesy of Clifford Click
  41 
  42 // Optimization - Graph Style
  43 
  44 // To avoid float value underflow
  45 #define MIN_BLOCK_FREQUENCY 1.e-35f
  46 
  47 //----------------------------schedule_node_into_block-------------------------
  48 // Insert node n into block b. Look for projections of n and make sure they
  49 // are in b also.
  50 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
  51   // Set basic block of n, Add n to b,
  52   map_node_to_block(n, b);
  53   b->add_inst(n);
  54 
  55   // After Matching, nearly any old Node may have projections trailing it.
  56   // These are usually machine-dependent flags.  In any case, they might
  57   // float to another block below this one.  Move them up.
  58   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  59     Node*  use  = n->fast_out(i);
  60     if (use->is_Proj()) {
  61       Block* buse = get_block_for_node(use);
  62       if (buse != b) {              // In wrong block?
  63         if (buse != nullptr) {
  64           buse->find_remove(use);   // Remove from wrong block
  65         }
  66         map_node_to_block(use, b);
  67         b->add_inst(use);
  68       }
  69     }
  70   }
  71 }
  72 
  73 //----------------------------replace_block_proj_ctrl-------------------------
  74 // Nodes that have is_block_proj() nodes as their control need to use
  75 // the appropriate Region for their actual block as their control since
  76 // the projection will be in a predecessor block.
  77 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
  78   const Node *in0 = n->in(0);
  79   assert(in0 != nullptr, "Only control-dependent");
  80   const Node *p = in0->is_block_proj();
  81   if (p != nullptr && p != n) {    // Control from a block projection?
  82     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
  83     // Find trailing Region
  84     Block *pb = get_block_for_node(in0); // Block-projection already has basic block
  85     uint j = 0;
  86     if (pb->_num_succs != 1) {  // More then 1 successor?
  87       // Search for successor
  88       uint max = pb->number_of_nodes();
  89       assert( max > 1, "" );
  90       uint start = max - pb->_num_succs;
  91       // Find which output path belongs to projection
  92       for (j = start; j < max; j++) {
  93         if( pb->get_node(j) == in0 )
  94           break;
  95       }
  96       assert( j < max, "must find" );
  97       // Change control to match head of successor basic block
  98       j -= start;
  99     }
 100     n->set_req(0, pb->_succs[j]->head());
 101   }
 102 }
 103 
 104 bool PhaseCFG::is_dominator(Node* dom_node, Node* node) {
 105   assert(is_CFG(node) && is_CFG(dom_node), "node and dom_node must be CFG nodes");
 106   if (dom_node == node) {
 107     return true;
 108   }
 109   Block* d = find_block_for_node(dom_node);
 110   Block* n = find_block_for_node(node);
 111   assert(n != nullptr && d != nullptr, "blocks must exist");
 112 
 113   if (d == n) {
 114     if (dom_node->is_block_start()) {
 115       return true;
 116     }
 117     if (node->is_block_start()) {
 118       return false;
 119     }
 120     if (dom_node->is_block_proj()) {
 121       return false;
 122     }
 123     if (node->is_block_proj()) {
 124       return true;
 125     }
 126 
 127     assert(is_control_proj_or_safepoint(node), "node must be control projection or safepoint");
 128     assert(is_control_proj_or_safepoint(dom_node), "dom_node must be control projection or safepoint");
 129 
 130     // Neither 'node' nor 'dom_node' is a block start or block projection.
 131     // Check if 'dom_node' is above 'node' in the control graph.
 132     if (is_dominating_control(dom_node, node)) {
 133       return true;
 134     }
 135 
 136 #ifdef ASSERT
 137     // If 'dom_node' does not dominate 'node' then 'node' has to dominate 'dom_node'
 138     if (!is_dominating_control(node, dom_node)) {
 139       node->dump();
 140       dom_node->dump();
 141       assert(false, "neither dom_node nor node dominates the other");
 142     }
 143 #endif
 144 
 145     return false;
 146   }
 147   return d->dom_lca(n) == d;
 148 }
 149 
 150 bool PhaseCFG::is_CFG(Node* n) {
 151   return n->is_block_proj() || n->is_block_start() || is_control_proj_or_safepoint(n);
 152 }
 153 
 154 bool PhaseCFG::is_control_proj_or_safepoint(Node* n) const {
 155   bool result = n->is_ReachabilityFence() ||
 156                 (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) ||
 157                 (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL);
 158   assert(!n->is_Proj() ||
 159          n->as_Proj()->bottom_type() != Type::CONTROL ||
 160          n->as_Proj()->_con == 0, "If control projection, it must be projection 0");
 161   return result;
 162 }
 163 
 164 Block* PhaseCFG::find_block_for_node(Node* n) const {
 165   if (n->is_block_start() || n->is_block_proj()) {
 166     return get_block_for_node(n);
 167   } else {
 168     // Walk the control graph up if 'n' is not a block start nor a block projection. In this case 'n' must be
 169     // an unmatched control projection or a not yet matched safepoint precedence edge in the middle of a block.
 170     assert(is_control_proj_or_safepoint(n), "must be control projection or safepoint");
 171     Node* ctrl = n->in(0);
 172     while (!ctrl->is_block_start()) {
 173       ctrl = ctrl->in(0);
 174     }
 175     return get_block_for_node(ctrl);
 176   }
 177 }
 178 
 179 // Walk up the control graph from 'n' and check if 'dom_ctrl' is found.
 180 bool PhaseCFG::is_dominating_control(Node* dom_ctrl, Node* n) {
 181   Node* ctrl = n->in(0);
 182   while (!ctrl->is_block_start()) {
 183     if (ctrl == dom_ctrl) {
 184       return true;
 185     }
 186     ctrl = ctrl->in(0);
 187   }
 188   return false;
 189 }
 190 
 191 
 192 //------------------------------schedule_pinned_nodes--------------------------
 193 // Set the basic block for Nodes pinned into blocks
 194 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
 195   // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
 196   GrowableArray <Node*> spstack(C->live_nodes() + 8);
 197   spstack.push(_root);
 198   while (spstack.is_nonempty()) {
 199     Node* node = spstack.pop();
 200     if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
 201       if (node->pinned() && !has_block(node)) {  // Pinned?  Nail it down!
 202         assert(node->in(0), "pinned Node must have Control");
 203         // Before setting block replace block_proj control edge
 204         replace_block_proj_ctrl(node);
 205         Node* input = node->in(0);
 206         while (!input->is_block_start()) {
 207           input = input->in(0);
 208         }
 209         Block* block = get_block_for_node(input); // Basic block of controlling input
 210         schedule_node_into_block(node, block);
 211       }
 212 
 213       // If the node has precedence edges (added when CastPP nodes are
 214       // removed in final_graph_reshaping), fix the control of the
 215       // node to cover the precedence edges and remove the
 216       // dependencies.
 217       Node* n = nullptr;
 218       for (uint i = node->len()-1; i >= node->req(); i--) {
 219         Node* m = node->in(i);
 220         if (m == nullptr) continue;
 221         assert(is_CFG(m), "must be a CFG node");
 222         node->rm_prec(i);
 223         if (n == nullptr) {
 224           n = m;
 225         } else {
 226           assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
 227           n = is_dominator(n, m) ? m : n;
 228         }
 229       }
 230       if (n != nullptr) {
 231         assert(node->in(0), "control should have been set");
 232         assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other");
 233         if (!is_dominator(n, node->in(0))) {
 234           node->set_req(0, n);
 235         }
 236       }
 237 
 238       // process all inputs that are non null
 239       for (int i = node->req()-1; i >= 0; --i) {
 240         if (node->in(i) != nullptr) {
 241           spstack.push(node->in(i));
 242         }
 243       }
 244     }
 245   }
 246 }
 247 
 248 // Assert that new input b2 is dominated by all previous inputs.
 249 // Check this by by seeing that it is dominated by b1, the deepest
 250 // input observed until b2.
 251 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
 252   if (b1 == nullptr)  return;
 253   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
 254   Block* tmp = b2;
 255   while (tmp != b1 && tmp != nullptr) {
 256     tmp = tmp->_idom;
 257   }
 258   if (tmp != b1) {
 259 #ifdef ASSERT
 260     // Detected an unschedulable graph.  Print some nice stuff and die.
 261     tty->print_cr("!!! Unschedulable graph !!!");
 262     for (uint j=0; j<n->len(); j++) { // For all inputs
 263       Node* inn = n->in(j); // Get input
 264       if (inn == nullptr)  continue;  // Ignore null, missing inputs
 265       Block* inb = cfg->get_block_for_node(inn);
 266       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
 267                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
 268       inn->dump();
 269     }
 270     tty->print("Failing node: ");
 271     n->dump();
 272     assert(false, "unschedulable graph");
 273 #endif
 274     cfg->C->record_failure("unschedulable graph");
 275   }
 276 }
 277 
 278 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
 279   // Find the last input dominated by all other inputs.
 280   Block* deepb           = nullptr;     // Deepest block so far
 281   int    deepb_dom_depth = 0;
 282   for (uint k = 0; k < n->len(); k++) { // For all inputs
 283     Node* inn = n->in(k);               // Get input
 284     if (inn == nullptr)  continue;      // Ignore null, missing inputs
 285     Block* inb = cfg->get_block_for_node(inn);
 286     assert(inb != nullptr, "must already have scheduled this input");
 287     if (deepb_dom_depth < (int) inb->_dom_depth) {
 288       // The new inb must be dominated by the previous deepb.
 289       // The various inputs must be linearly ordered in the dom
 290       // tree, or else there will not be a unique deepest block.
 291       assert_dom(deepb, inb, n, cfg);
 292       if (cfg->C->failing()) {
 293         return nullptr;
 294       }
 295       deepb = inb;                      // Save deepest block
 296       deepb_dom_depth = deepb->_dom_depth;
 297     }
 298   }
 299   assert(deepb != nullptr, "must be at least one input to n");
 300   return deepb;
 301 }
 302 
 303 
 304 //------------------------------schedule_early---------------------------------
 305 // Find the earliest Block any instruction can be placed in.  Some instructions
 306 // are pinned into Blocks.  Unpinned instructions can appear in last block in
 307 // which all their inputs occur.
 308 bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
 309   // Allocate stack with enough space to avoid frequent realloc
 310   Node_Stack nstack(roots.size() + 8);
 311   // _root will be processed among C->top() inputs
 312   roots.push(C->top(), 0);
 313   visited.set(C->top()->_idx);
 314 
 315   while (roots.size() != 0) {
 316     // Use local variables nstack_top_n & nstack_top_i to cache values
 317     // on stack's top.
 318     Node* parent_node = roots.node();
 319     uint  input_index = 0;
 320     roots.pop();
 321 
 322     while (true) {
 323       if (input_index == 0) {
 324         // Fixup some control.  Constants without control get attached
 325         // to root and nodes that use is_block_proj() nodes should be attached
 326         // to the region that starts their block.
 327         const Node* control_input = parent_node->in(0);
 328         if (control_input != nullptr) {
 329           replace_block_proj_ctrl(parent_node);
 330         } else {
 331           // Is a constant with NO inputs?
 332           if (parent_node->req() == 1) {
 333             parent_node->set_req(0, _root);
 334           }
 335         }
 336       }
 337 
 338       // First, visit all inputs and force them to get a block.  If an
 339       // input is already in a block we quit following inputs (to avoid
 340       // cycles). Instead we put that Node on a worklist to be handled
 341       // later (since IT'S inputs may not have a block yet).
 342 
 343       // Assume all n's inputs will be processed
 344       bool done = true;
 345 
 346       while (input_index < parent_node->len()) {
 347         Node* in = parent_node->in(input_index++);
 348         if (in == nullptr) {
 349           continue;
 350         }
 351 
 352         int is_visited = visited.test_set(in->_idx);
 353         if (!has_block(in)) {
 354           if (is_visited) {
 355             assert(false, "graph should be schedulable");
 356             return false;
 357           }
 358           // Save parent node and next input's index.
 359           nstack.push(parent_node, input_index);
 360           // Process current input now.
 361           parent_node = in;
 362           input_index = 0;
 363           // Not all n's inputs processed.
 364           done = false;
 365           break;
 366         } else if (!is_visited) {
 367           // Visit this guy later, using worklist
 368           roots.push(in, 0);
 369         }
 370       }
 371 
 372       if (done) {
 373         // All of n's inputs have been processed, complete post-processing.
 374 
 375         // Some instructions are pinned into a block.  These include Region,
 376         // Phi, Start, Return, and other control-dependent instructions and
 377         // any projections which depend on them.
 378         if (!parent_node->pinned()) {
 379           // Set earliest legal block.
 380           Block* earliest_block = find_deepest_input(parent_node, this);
 381           if (C->failing()) {
 382             return false;
 383           }
 384           map_node_to_block(parent_node, earliest_block);
 385         } else {
 386           assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
 387         }
 388 
 389         if (nstack.is_empty()) {
 390           // Finished all nodes on stack.
 391           // Process next node on the worklist 'roots'.
 392           break;
 393         }
 394         // Get saved parent node and next input's index.
 395         parent_node = nstack.node();
 396         input_index = nstack.index();
 397         nstack.pop();
 398       }
 399     }
 400   }
 401   return true;
 402 }
 403 
 404 //------------------------------dom_lca----------------------------------------
 405 // Find least common ancestor in dominator tree
 406 // LCA is a current notion of LCA, to be raised above 'this'.
 407 // As a convenient boundary condition, return 'this' if LCA is null.
 408 // Find the LCA of those two nodes.
 409 Block* Block::dom_lca(Block* LCA) {
 410   if (LCA == nullptr || LCA == this)  return this;
 411 
 412   Block* anc = this;
 413   while (anc->_dom_depth > LCA->_dom_depth)
 414     anc = anc->_idom;           // Walk up till anc is as high as LCA
 415 
 416   while (LCA->_dom_depth > anc->_dom_depth)
 417     LCA = LCA->_idom;           // Walk up till LCA is as high as anc
 418 
 419   while (LCA != anc) {          // Walk both up till they are the same
 420     LCA = LCA->_idom;
 421     anc = anc->_idom;
 422   }
 423 
 424   return LCA;
 425 }
 426 
 427 //--------------------------raise_LCA_above_use--------------------------------
 428 // We are placing a definition, and have been given a def->use edge.
 429 // The definition must dominate the use, so move the LCA upward in the
 430 // dominator tree to dominate the use.  If the use is a phi, adjust
 431 // the LCA only with the phi input paths which actually use this def.
 432 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
 433   Block* buse = cfg->get_block_for_node(use);
 434   if (buse == nullptr) return LCA;   // Unused killing Projs have no use block
 435   if (!use->is_Phi())  return buse->dom_lca(LCA);
 436   uint pmax = use->req();       // Number of Phi inputs
 437   // Why does not this loop just break after finding the matching input to
 438   // the Phi?  Well...it's like this.  I do not have true def-use/use-def
 439   // chains.  Means I cannot distinguish, from the def-use direction, which
 440   // of many use-defs lead from the same use to the same def.  That is, this
 441   // Phi might have several uses of the same def.  Each use appears in a
 442   // different predecessor block.  But when I enter here, I cannot distinguish
 443   // which use-def edge I should find the predecessor block for.  So I find
 444   // them all.  Means I do a little extra work if a Phi uses the same value
 445   // more than once.
 446   for (uint j=1; j<pmax; j++) { // For all inputs
 447     if (use->in(j) == def) {    // Found matching input?
 448       Block* pred = cfg->get_block_for_node(buse->pred(j));
 449       LCA = pred->dom_lca(LCA);
 450     }
 451   }
 452   return LCA;
 453 }
 454 
 455 //----------------------------raise_LCA_above_marks----------------------------
 456 // Return a new LCA that dominates LCA and any of its marked predecessors.
 457 // Search all my parents up to 'early' (exclusive), looking for predecessors
 458 // which are marked with the given index.  Return the LCA (in the dom tree)
 459 // of all marked blocks.  If there are none marked, return the original
 460 // LCA.
 461 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
 462   assert(early->dominates(LCA), "precondition failed");
 463   Block_List worklist;
 464   worklist.push(LCA);
 465   while (worklist.size() > 0) {
 466     Block* mid = worklist.pop();
 467     if (mid == early)  continue;  // stop searching here
 468 
 469     // Test and set the visited bit.
 470     if (mid->raise_LCA_visited() == mark)  continue;  // already visited
 471 
 472     // Don't process the current LCA, otherwise the search may terminate early
 473     if (mid != LCA && mid->raise_LCA_mark() == mark) {
 474       // Raise the LCA.
 475       LCA = mid->dom_lca(LCA);
 476       if (LCA == early)  break;   // stop searching everywhere
 477       assert(early->dominates(LCA), "unsound LCA update");
 478       // Resume searching at that point, skipping intermediate levels.
 479       worklist.push(LCA);
 480       if (LCA == mid)
 481         continue; // Don't mark as visited to avoid early termination.
 482     } else {
 483       // Keep searching through this block's predecessors.
 484       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
 485         Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
 486         worklist.push(mid_parent);
 487       }
 488     }
 489     mid->set_raise_LCA_visited(mark);
 490   }
 491   return LCA;
 492 }
 493 
 494 //--------------------------memory_early_block--------------------------------
 495 // This is a variation of find_deepest_input, the heart of schedule_early.
 496 // Find the "early" block for a load, if we considered only memory and
 497 // address inputs, that is, if other data inputs were ignored.
 498 //
 499 // Because a subset of edges are considered, the resulting block will
 500 // be earlier (at a shallower dom_depth) than the true schedule_early
 501 // point of the node. We compute this earlier block as a more permissive
 502 // site for anti-dependency insertion, but only if subsume_loads is enabled.
 503 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
 504   Node* base;
 505   Node* index;
 506   Node* store = load->in(MemNode::Memory);
 507   load->as_Mach()->memory_inputs(base, index);
 508 
 509   assert(base != NodeSentinel && index != NodeSentinel,
 510          "unexpected base/index inputs");
 511 
 512   Node* mem_inputs[4];
 513   int mem_inputs_length = 0;
 514   if (base != nullptr)  mem_inputs[mem_inputs_length++] = base;
 515   if (index != nullptr) mem_inputs[mem_inputs_length++] = index;
 516   if (store != nullptr) mem_inputs[mem_inputs_length++] = store;
 517 
 518   // In the comparison below, add one to account for the control input,
 519   // which may be null, but always takes up a spot in the in array.
 520   if (mem_inputs_length + 1 < (int) load->req()) {
 521     // This "load" has more inputs than just the memory, base and index inputs.
 522     // For purposes of checking anti-dependences, we need to start
 523     // from the early block of only the address portion of the instruction,
 524     // and ignore other blocks that may have factored into the wider
 525     // schedule_early calculation.
 526     if (load->in(0) != nullptr) mem_inputs[mem_inputs_length++] = load->in(0);
 527 
 528     Block* deepb           = nullptr;        // Deepest block so far
 529     int    deepb_dom_depth = 0;
 530     for (int i = 0; i < mem_inputs_length; i++) {
 531       Block* inb = cfg->get_block_for_node(mem_inputs[i]);
 532       if (deepb_dom_depth < (int) inb->_dom_depth) {
 533         // The new inb must be dominated by the previous deepb.
 534         // The various inputs must be linearly ordered in the dom
 535         // tree, or else there will not be a unique deepest block.
 536         assert_dom(deepb, inb, load, cfg);
 537         if (cfg->C->failing()) {
 538           return nullptr;
 539         }
 540         deepb = inb;                      // Save deepest block
 541         deepb_dom_depth = deepb->_dom_depth;
 542       }
 543     }
 544     early = deepb;
 545   }
 546 
 547   return early;
 548 }
 549 
 550 // This function is used by raise_above_anti_dependences to find unrelated loads for stores in implicit null checks.
 551 bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {
 552   // We expect an anti-dependence edge from 'load' to 'store', except when
 553   // implicit_null_check() has hoisted 'store' above its early block to
 554   // perform an implicit null check, and 'load' is placed in the null
 555   // block. In this case it is safe to ignore the anti-dependence, as the
 556   // null block is only reached if 'store' tries to write to null object and
 557   // 'load' read from non-null object (there is preceding check for that)
 558   // These objects can't be the same.
 559   Block* store_block = get_block_for_node(store);
 560   Block* load_block = get_block_for_node(load);
 561   Node* end = store_block->end();
 562   if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) {
 563     Node* if_true = end->find_out_with(Op_IfTrue);
 564     assert(if_true != nullptr, "null check without null projection");
 565     Node* null_block_region = if_true->find_out_with(Op_Region);
 566     assert(null_block_region != nullptr, "null check without null region");
 567     return get_block_for_node(null_block_region) == load_block;
 568   }
 569   return false;
 570 }
 571 
 572 class DefUseMemStatesQueue : public StackObj {
 573 private:
 574   class DefUsePair : public StackObj {
 575   private:
 576     Node* _def; // memory state
 577     Node* _use; // use of the memory state that also modifies the memory state
 578 
 579   public:
 580     DefUsePair(Node* def, Node* use) :
 581       _def(def), _use(use) {
 582     }
 583 
 584     DefUsePair() :
 585       _def(nullptr), _use(nullptr) {
 586     }
 587 
 588     Node* def() const {
 589       return _def;
 590     }
 591 
 592     Node* use() const {
 593       return _use;
 594     }
 595   };
 596 
 597   GrowableArray<DefUsePair> _queue;
 598   GrowableArray<MergeMemNode*> _worklist_visited; // visited mergemem nodes
 599 
 600   bool already_enqueued(Node* def_mem, PhiNode* use_phi) const {
 601     // def_mem is one of the inputs of use_phi and at least one input of use_phi is
 602     // not def_mem. It's however possible that use_phi has def_mem as input multiple
 603     // times. If that happens, use_phi is recorded as a use of def_mem multiple
 604     // times as well. When PhaseCFG::raise_above_anti_dependences() goes over
 605     // uses of def_mem and enqueues them for processing, use_phi would then be
 606     // enqueued for processing multiple times when it only needs to be
 607     // processed once. The code below checks if use_phi as a use of def_mem was
 608     // already enqueued to avoid redundant processing of use_phi.
 609     int j = _queue.length()-1;
 610     // If there are any use of def_mem already enqueued, they were enqueued
 611     // last (all use of def_mem are processed in one go).
 612     for (; j >= 0; j--) {
 613       const DefUsePair& def_use_pair = _queue.at(j);
 614       if (def_use_pair.def() != def_mem) {
 615         // We're done with the uses of def_mem
 616         break;
 617       }
 618       if (def_use_pair.use() == use_phi) {
 619         return true;
 620       }
 621     }
 622 #ifdef ASSERT
 623     for (; j >= 0; j--) {
 624       const DefUsePair& def_use_pair = _queue.at(j);
 625       assert(def_use_pair.def() != def_mem, "Should be done with the uses of def_mem");
 626     }
 627 #endif
 628     return false;
 629   }
 630 
 631 public:
 632   DefUseMemStatesQueue(ResourceArea* area) {
 633   }
 634 
 635   void push(Node* def_mem_state, Node* use_mem_state) {
 636     if (use_mem_state->is_MergeMem()) {
 637       // Be sure we don't get into combinatorial problems.
 638       if (!_worklist_visited.append_if_missing(use_mem_state->as_MergeMem())) {
 639         return; // already on work list; do not repeat
 640       }
 641     } else if (use_mem_state->is_Phi()) {
 642       // A Phi could have the same mem as input multiple times. If that's the case, we don't need to enqueue it
 643       // more than once. We otherwise allow phis to be repeated; they can merge two relevant states.
 644       if (already_enqueued(def_mem_state, use_mem_state->as_Phi())) {
 645         return;
 646       }
 647     }
 648 
 649     _queue.push(DefUsePair(def_mem_state, use_mem_state));
 650   }
 651 
 652   bool is_nonempty() const {
 653     return _queue.is_nonempty();
 654   }
 655 
 656   Node* top_def() const {
 657     return _queue.top().def();
 658   }
 659 
 660   Node* top_use() const {
 661     return _queue.top().use();
 662   }
 663 
 664   void pop() {
 665     _queue.pop();
 666   }
 667 };
 668 
 669 // Enforce a scheduling of the given 'load' that ensures anti-dependent stores
 670 // do not overwrite the load's input memory state before the load executes.
 671 //
 672 // The given 'load' has a current scheduling range in the dominator tree that
 673 // starts at the load's early block (computed in schedule_early) and ends at
 674 // the given 'LCA' block for the load. However, there may still exist
 675 // anti-dependent stores between the early block and the LCA that overwrite
 676 // memory that the load must witness. For such stores, we must
 677 //
 678 //   1. raise the load's LCA to force the load to (eventually) be scheduled at
 679 //      latest in the store's block, and
 680 //   2. if the load may get scheduled in the store's block, additionally insert
 681 //      an anti-dependence edge (i.e., precedence edge) from the load to the
 682 //      store to ensure LCM schedules the load before the store within the
 683 //      block.
 684 //
 685 // For a given store, we say that the store is on a _distinct_ control-flow
 686 // path relative to the load if there are no paths from early to LCA that go
 687 // through the store's block. Such stores are not anti-dependent, and there is
 688 // no need to update the LCA nor to add anti-dependence edges.
 689 //
 690 // Due to the presence of loops, we must also raise the LCA above
 691 // anti-dependent memory Phis. We defer the details (see later comments in the
 692 // method) and for now look at an example without loops.
 693 //
 694 //          CFG               DOMINATOR TREE
 695 //
 696 //       B1 (early,L)              B1
 697 //       |\________                /\\___
 698 //       |         \              /  \   \
 699 //       B2 (L,S)   \            B2  B7  B6
 700 //      /  \         \           /\\___
 701 //     B3  B4 (S)    B7 (S)     /  \   \
 702 //      \  /         /         B3  B4  B5
 703 //       B5 (LCA,L) /
 704 //        \    ____/
 705 //         \  /
 706 //          B6
 707 //
 708 // Here, the load's scheduling range when calling raise_above_anti_dependences
 709 // is between early and LCA in the dominator tree, i.e., in block B1, B2, or B5
 710 // (indicated with "L"). However, there are a number of stores (indicated with
 711 // "S") that overwrite the memory which the load must witness. First, consider
 712 // the store in B4. We cannot legally schedule the load in B4, so an
 713 // anti-dependence edge is redundant. However, we must raise the LCA above
 714 // B4, which means that the updated LCA is B2. Now, consider the store in B2.
 715 // The LCA is already B2, so we do not need to raise it any further.
 716 // If we, eventually, decide to schedule the load in B2, it could happen that
 717 // LCM decides to place the load after the anti-dependent store in B2.
 718 // Therefore, we now need to add an anti-dependence edge between the load and
 719 // the B2 store, ensuring that the load is scheduled before the store. Finally,
 720 // the store in B7 is on a distinct control-flow path. Therefore, B7 requires
 721 // no action.
 722 //
 723 // The raise_above_anti_dependences method returns the updated LCA and ensures
 724 // there are no anti-dependent stores in any block between the load's early
 725 // block and the updated LCA. Any stores in the updated LCA will have new
 726 // anti-dependence edges back to the load. The caller may schedule the load in
 727 // the updated LCA, or it may hoist the load above the updated LCA, if the
 728 // updated LCA is not the early block.
 729 Block* PhaseCFG::raise_above_anti_dependences(Block* LCA, Node* load, const bool verify) {
 730   ResourceMark rm;
 731   assert(load->needs_anti_dependence_check(), "must be a load of some sort");
 732   assert(LCA != nullptr, "");
 733   DEBUG_ONLY(Block* LCA_orig = LCA);
 734 
 735   // Compute the alias index.  Loads and stores with different alias indices
 736   // do not need anti-dependence edges.
 737   int load_alias_idx = C->get_alias_index(load->adr_type());
 738 #ifdef ASSERT
 739   assert(Compile::AliasIdxTop <= load_alias_idx && load_alias_idx < C->num_alias_types(), "Invalid alias index");
 740   if (load_alias_idx == Compile::AliasIdxBot && C->do_aliasing() &&
 741       (PrintOpto || VerifyAliases ||
 742        (PrintMiscellaneous && (WizardMode || Verbose)))) {
 743     // Load nodes should not consume all of memory.
 744     // Reporting a bottom type indicates a bug in adlc.
 745     // If some particular type of node validly consumes all of memory,
 746     // sharpen the preceding "if" to exclude it, so we can catch bugs here.
 747     tty->print_cr("*** Possible Anti-Dependence Bug:  Load consumes all of memory.");
 748     load->dump(2);
 749     if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
 750   }
 751 #endif
 752 
 753   if (!C->alias_type(load_alias_idx)->is_rewritable()) {
 754     // It is impossible to spoil this load by putting stores before it,
 755     // because we know that the stores will never update the value
 756     // which 'load' must witness.
 757     return LCA;
 758   }
 759 
 760   node_idx_t load_index = load->_idx;
 761 
 762   // Record the earliest legal placement of 'load', as determined by the unique
 763   // point in the dominator tree where all memory effects and other inputs are
 764   // first available (computed by schedule_early). For normal loads, 'early' is
 765   // the shallowest place (dominator-tree wise) to look for anti-dependences
 766   // between this load and any store.
 767   Block* early = get_block_for_node(load);
 768 
 769   // If we are subsuming loads, compute an "early" block that only considers
 770   // memory or address inputs. This block may be different from the
 771   // schedule_early block when it is at an even shallower depth in the
 772   // dominator tree, and allow for a broader discovery of anti-dependences.
 773   if (C->subsume_loads()) {
 774     early = memory_early_block(load, early, this);
 775     if (C->failing()) {
 776       return nullptr;
 777     }
 778   }
 779 
 780   assert(early->dominates(LCA_orig), "precondition failed");
 781 
 782   ResourceArea* area = Thread::current()->resource_area();
 783 
 784   // Bookkeeping of possibly anti-dependent stores that we find outside the
 785   // early block and that may need anti-dependence edges. Note that stores in
 786   // non_early_stores are not necessarily dominated by early. The search starts
 787   // from initial_mem, which can reside in a block that dominates early, and
 788   // therefore, stores we find may be in blocks that are on completely distinct
 789   // control-flow paths compared to early. However, in the end, only stores in
 790   // blocks dominated by early matter. The reason for bookkeeping not only
 791   // relevant stores is efficiency: we lazily record all possible
 792   // anti-dependent stores and add anti-dependence edges only to the relevant
 793   // ones at the very end of this method when we know the final updated LCA.
 794   Node_List non_early_stores(area);
 795 
 796   // Whether we must raise the LCA after the main worklist loop below.
 797   bool must_raise_LCA_above_marks = false;
 798 
 799   // The input load uses some memory state (initial_mem).
 800   Node* initial_mem = load->in(MemNode::Memory);
 801   // To find anti-dependences we must look for users of the same memory state.
 802   // To do this, we search the memory graph downwards from initial_mem. During
 803   // this search, we encounter different types of nodes that we handle
 804   // according to the following three categories:
 805   //
 806   // - MergeMems
 807   // - Memory-state-modifying nodes (informally referred to as "stores" above
 808   //   and below)
 809   // - Memory Phis
 810   //
 811   // MergeMems do not modify the memory state. Anti-dependent stores or memory
 812   // Phis may, however, exist downstream of MergeMems. Therefore, we must
 813   // permit the search to continue through MergeMems. Stores may raise the LCA
 814   // and may potentially also require an anti-dependence edge. Memory Phis may
 815   // raise the LCA but never require anti-dependence edges. See the comments
 816   // throughout the worklist loop below for further details.
 817   //
 818   // It may be useful to think of the anti-dependence search as traversing a
 819   // tree rooted at initial_mem, with internal nodes of type MergeMem and
 820   // memory Phis and stores as (potentially repeated) leaves.
 821 
 822   // We don't optimize the memory graph for pinned loads, so we may need to raise the
 823   // root of our search tree through the corresponding slices of MergeMem nodes to
 824   // get to the node that really creates the memory state for this slice.
 825   if (load_alias_idx >= Compile::AliasIdxRaw) {
 826     while (initial_mem->is_MergeMem()) {
 827       MergeMemNode* mm = initial_mem->as_MergeMem();
 828       Node* p = mm->memory_at(load_alias_idx);
 829       if (p != mm->base_memory()) {
 830         initial_mem = p;
 831       } else {
 832         break;
 833       }
 834     }
 835   }
 836   // To administer the search, we use a worklist consisting of (def,use)-pairs
 837   // of memory states, corresponding to edges in the search tree (and edges
 838   // in the memory graph). We need to keep track of search tree edges in the
 839   // worklist rather than individual nodes due to memory Phis (see details
 840   // below).
 841   DefUseMemStatesQueue worklist(area);
 842   // We start the search at initial_mem and indicate the search root with the
 843   // edge (nullptr, initial_mem).
 844   worklist.push(nullptr, initial_mem);
 845 
 846   // The worklist loop
 847   while (worklist.is_nonempty()) {
 848     // Pop the next edge from the worklist
 849     Node* def_mem_state = worklist.top_def();
 850     Node* use_mem_state = worklist.top_use();
 851     worklist.pop();
 852 
 853     // We are either
 854     // - at the root of the search with the edge (nullptr, initial_mem),
 855     // - just past initial_mem with the edge (initial_mem, use_mem_state), or
 856     // - just past a MergeMem with the edge (MergeMem, use_mem_state).
 857     assert(def_mem_state == nullptr || def_mem_state == initial_mem ||
 858            def_mem_state->is_MergeMem(),
 859            "unexpected memory state");
 860 
 861     const uint op = use_mem_state->Opcode();
 862 
 863 #ifdef ASSERT
 864     // CacheWB nodes are peculiar in a sense that they both are anti-dependent and produce memory.
 865     // Allow them to be treated as a store.
 866     bool is_cache_wb = false;
 867     if (use_mem_state->is_Mach()) {
 868       int ideal_op = use_mem_state->as_Mach()->ideal_Opcode();
 869       is_cache_wb = (ideal_op == Op_CacheWB);
 870     }
 871     assert(!use_mem_state->needs_anti_dependence_check() || is_cache_wb, "no loads");
 872 #endif
 873 
 874     // If we are either at the search root or have found a MergeMem, we step
 875     // past use_mem_state and populate the search worklist with edges
 876     // (use_mem_state, child) for use_mem_state's children.
 877     if (def_mem_state == nullptr // root (exclusive) of tree we are searching
 878         || op == Op_MergeMem     // internal node of tree we are searching
 879     ) {
 880       def_mem_state = use_mem_state;
 881 
 882       for (DUIterator_Fast imax, i = def_mem_state->fast_outs(imax); i < imax; i++) {
 883         use_mem_state = def_mem_state->fast_out(i);
 884         if (use_mem_state->needs_anti_dependence_check()) {
 885           // use_mem_state is also a kind of load (i.e.,
 886           // needs_anti_dependence_check), and it is not a store nor a memory
 887           // Phi. Hence, it is not anti-dependent on the load.
 888           continue;
 889         }
 890         worklist.push(def_mem_state, use_mem_state);
 891       }
 892       // Nothing more to do for the current (nullptr, initial_mem) or
 893       // (initial_mem/MergeMem, MergeMem) edge, move on.
 894       continue;
 895     }
 896 
 897     assert(!use_mem_state->is_MergeMem(),
 898            "use_mem_state should be either a store or a memory Phi");
 899 
 900     if (op == Op_MachProj || op == Op_Catch)   continue;
 901 
 902     // Compute the alias index. If the use_mem_state has an alias index
 903     // different from the load's, it is not anti-dependent. Wide MemBar's
 904     // are anti-dependent with everything (except immutable memories).
 905     const TypePtr* adr_type = use_mem_state->adr_type();
 906     if (!C->can_alias(adr_type, load_alias_idx))  continue;
 907 
 908     // Most slow-path runtime calls do NOT modify Java memory, but
 909     // they can block and so write Raw memory.
 910     if (use_mem_state->is_Mach()) {
 911       MachNode* muse = use_mem_state->as_Mach();
 912       if (load_alias_idx != Compile::AliasIdxRaw) {
 913         // Check for call into the runtime using the Java calling
 914         // convention (and from there into a wrapper); it has no
 915         // _method.  Can't do this optimization for Native calls because
 916         // they CAN write to Java memory.
 917         if (muse->ideal_Opcode() == Op_CallStaticJava) {
 918           assert(muse->is_MachSafePoint(), "");
 919           MachSafePointNode* ms = (MachSafePointNode*)muse;
 920           assert(ms->is_MachCallJava(), "");
 921           MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
 922           if (mcj->_method == nullptr) {
 923             // These runtime calls do not write to Java visible memory
 924             // (other than Raw) and so are not anti-dependent.
 925             continue;
 926           }
 927         }
 928         // Same for SafePoints: they read/write Raw but only read otherwise.
 929         // This is basically a workaround for SafePoints only defining control
 930         // instead of control + memory.
 931         if (muse->ideal_Opcode() == Op_SafePoint) {
 932           continue;
 933         }
 934       } else {
 935         // Some raw memory, such as the load of "top" at an allocation,
 936         // can be control dependent on the previous safepoint. See
 937         // comments in GraphKit::allocate_heap() about control input.
 938         // Inserting an anti-dependence edge between such a safepoint and a use
 939         // creates a cycle, and will cause a subsequent failure in
 940         // local scheduling.  (BugId 4919904)
 941         // (%%% How can a control input be a safepoint and not a projection??)
 942         if (muse->ideal_Opcode() == Op_SafePoint && load->in(0) == muse) {
 943           continue;
 944         }
 945       }
 946     }
 947 
 948     // Determine the block of the use_mem_state.
 949     Block* use_mem_state_block = get_block_for_node(use_mem_state);
 950     assert(use_mem_state_block != nullptr,
 951            "unused killing projections skipped above");
 952 
 953     // For efficiency, we take a lazy approach to both raising the LCA and
 954     // adding anti-dependence edges. In this worklist loop, we only mark blocks
 955     // which we must raise the LCA above (set_raise_LCA_mark), and keep
 956     // track of nodes that potentially need anti-dependence edges
 957     // (non_early_stores). The only exceptions to this are if we
 958     // immediately see that we have to raise the LCA all the way to the early
 959     // block, and if we find stores in the early block (which always need
 960     // anti-dependence edges).
 961     //
 962     // After the worklist loop, we perform an efficient combined LCA-raising
 963     // operation over all marks and only then add anti-dependence edges where
 964     // strictly necessary according to the new raised LCA.
 965 
 966     if (use_mem_state->is_Phi()) {
 967       // We have reached a memory Phi node. On our search from initial_mem to
 968       // the Phi, we have found no anti-dependences (otherwise, we would have
 969       // already terminated the search along this branch). Consider the example
 970       // below, indicating a Phi node and its node inputs (we omit the control
 971       // input).
 972       //
 973       //    def_mem_state
 974       //          |
 975       //          | ? ?
 976       //          \ | /
 977       //           Phi
 978       //
 979       // We reached the Phi from def_mem_state and know that, on this
 980       // particular input, the memory that the load must witness is not
 981       // overwritten. However, for the Phi's other inputs (? in the
 982       // illustration), we have no information and must thus conservatively
 983       // assume that the load's memory is overwritten at and below the Phi.
 984       //
 985       // It is impossible to schedule the load before the Phi in
 986       // the same block as the Phi (use_mem_state_block), and anti-dependence
 987       // edges are, therefore, redundant. We must, however, find the
 988       // predecessor block of use_mem_state_block that corresponds to
 989       // def_mem_state, and raise the LCA above that block. Note that this block
 990       // is not necessarily def_mem_state's block! See the continuation of our
 991       // previous example below (now illustrating blocks instead of nodes)
 992       //
 993       //    def_mem_state's block
 994       //          |
 995       //          |
 996       //      pred_block
 997       //          |
 998       //          |   ?   ?
 999       //          |   |   |
1000       //      use_mem_state_block
1001       //
1002       // Here, we must raise the LCA above pred_block rather than
1003       // def_mem_state's block.
1004       //
1005       // Do not assert(use_mem_state_block != early, "Phi merging memory after access")
1006       // PhiNode may be at start of block 'early' with backedge to 'early'
1007       if (LCA == early) {
1008         // Don't bother if LCA is already raised all the way
1009         continue;
1010       }
1011       DEBUG_ONLY(bool found_match = false);
1012       for (uint j = PhiNode::Input, jmax = use_mem_state->req(); j < jmax; j++) {
1013         if (use_mem_state->in(j) == def_mem_state) {   // Found matching input?
1014           DEBUG_ONLY(found_match = true);
1015           Block* pred_block = get_block_for_node(use_mem_state_block->pred(j));
1016           if (pred_block != early) {
1017             // Lazily set the LCA mark
1018             pred_block->set_raise_LCA_mark(load_index);
1019             must_raise_LCA_above_marks = true;
1020           } else /* if (pred_block == early) */ {
1021             // We know already now that we must raise LCA all the way to early.
1022             LCA = early;
1023             // This turns off the process of gathering non_early_stores.
1024           }
1025         }
1026       }
1027       assert(found_match, "no worklist bug");
1028     } else if (use_mem_state_block != early) {
1029       // We found an anti-dependent store outside the load's 'early' block. The
1030       // store may be between the current LCA and the earliest possible block
1031       // (but it could very well also be on a distinct control-flow path).
1032       // Lazily set the LCA mark and push to non_early_stores.
1033       if (LCA == early) {
1034         // Don't bother if LCA is already raised all the way
1035         continue;
1036       }
1037       if (unrelated_load_in_store_null_block(use_mem_state, load)) {
1038         continue;
1039       }
1040       use_mem_state_block->set_raise_LCA_mark(load_index);
1041       must_raise_LCA_above_marks = true;
1042       non_early_stores.push(use_mem_state);
1043     } else /* if (use_mem_state_block == early) */ {
1044       // We found an anti-dependent store in the load's 'early' block.
1045       // Therefore, we know already now that we must raise LCA all the way to
1046       // early and that we need to add an anti-dependence edge to the store.
1047       assert(use_mem_state != load->find_exact_control(load->in(0)), "dependence cycle found");
1048       if (verify) {
1049         assert(use_mem_state->find_edge(load) != -1 || unrelated_load_in_store_null_block(use_mem_state, load),
1050                "missing precedence edge");
1051       } else {
1052         use_mem_state->add_prec(load);
1053       }
1054       LCA = early;
1055       // This turns off the process of gathering non_early_stores.
1056     }
1057   }
1058   // Worklist is now empty; we have visited all possible anti-dependences.
1059 
1060   // Finished if 'load' must be scheduled in its 'early' block.
1061   // If we found any stores there, they have already been given
1062   // anti-dependence edges.
1063   if (LCA == early) {
1064     return LCA;
1065   }
1066 
1067   // We get here only if there are no anti-dependent stores in the load's
1068   // 'early' block and if no memory Phi has forced LCA to the early block. Now
1069   // we must raise the LCA above the blocks for all the anti-dependent stores
1070   // and above the predecessor blocks of anti-dependent memory Phis we reached
1071   // during the search.
1072   if (must_raise_LCA_above_marks) {
1073     LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
1074   }
1075 
1076   // If LCA == early at this point, there were no stores that required
1077   // anti-dependence edges in the early block. Otherwise, we would have eagerly
1078   // raised the LCA to early already in the worklist loop.
1079   if (LCA == early) {
1080     return LCA;
1081   }
1082 
1083   // The raised LCA block can now be a home to anti-dependent stores for which
1084   // we still need to add anti-dependence edges, but no LCA predecessor block
1085   // contains any such stores (otherwise, we would have raised the LCA even
1086   // higher).
1087   //
1088   // The raised LCA will be a lower bound for placing the load, preventing the
1089   // load from sinking past any block containing a store that may overwrite
1090   // memory that the load must witness.
1091   //
1092   // Now we need to insert the necessary anti-dependence edges from 'load' to
1093   // each store in the non-early LCA block. We have recorded all such potential
1094   // stores in non_early_stores.
1095   //
1096   // If LCA->raise_LCA_mark() != load_index, it means that we raised the LCA to
1097   // a block in which we did not find any anti-dependent stores. So, no need to
1098   // search for any such stores.
1099   if (LCA->raise_LCA_mark() == load_index) {
1100     while (non_early_stores.size() > 0) {
1101       Node* store = non_early_stores.pop();
1102       Block* store_block = get_block_for_node(store);
1103       if (store_block == LCA) {
1104         // Add anti-dependence edge from the load to the store in the non-early
1105         // LCA.
1106         assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
1107         if (verify) {
1108           assert(store->find_edge(load) != -1, "missing precedence edge");
1109         } else {
1110           store->add_prec(load);
1111         }
1112       } else {
1113         assert(store_block->raise_LCA_mark() == load_index, "block was marked");
1114       }
1115     }
1116   }
1117 
1118   assert(LCA->dominates(LCA_orig), "unsound updated LCA");
1119 
1120   // Return the highest block containing stores; any stores
1121   // within that block have been given anti-dependence edges.
1122   return LCA;
1123 }
1124 
1125 // This class is used to iterate backwards over the nodes in the graph.
1126 
1127 class Node_Backward_Iterator {
1128 
1129 private:
1130   Node_Backward_Iterator();
1131 
1132 public:
1133   // Constructor for the iterator
1134   Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg);
1135 
1136   // Postincrement operator to iterate over the nodes
1137   Node *next();
1138 
1139 private:
1140   VectorSet   &_visited;
1141   Node_Stack  &_stack;
1142   PhaseCFG &_cfg;
1143 };
1144 
1145 // Constructor for the Node_Backward_Iterator
1146 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg)
1147   : _visited(visited), _stack(stack), _cfg(cfg) {
1148   // The stack should contain exactly the root
1149   stack.clear();
1150   stack.push(root, root->outcnt());
1151 
1152   // Clear the visited bits
1153   visited.clear();
1154 }
1155 
1156 // Iterator for the Node_Backward_Iterator
1157 Node *Node_Backward_Iterator::next() {
1158 
1159   // If the _stack is empty, then just return null: finished.
1160   if ( !_stack.size() )
1161     return nullptr;
1162 
1163   // I visit unvisited not-anti-dependence users first, then anti-dependent
1164   // children next. I iterate backwards to support removal of nodes.
1165   // The stack holds states consisting of 3 values:
1166   // current Def node, flag which indicates 1st/2nd pass, index of current out edge
1167   Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1);
1168   bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1);
1169   uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes.
1170   _stack.pop();
1171 
1172   // I cycle here when I am entering a deeper level of recursion.
1173   // The key variable 'self' was set prior to jumping here.
1174   while( 1 ) {
1175 
1176     _visited.set(self->_idx);
1177 
1178     // Now schedule all uses as late as possible.
1179     const Node* src = self->is_Proj() ? self->in(0) : self;
1180     uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
1181 
1182     // Schedule all nodes in a post-order visit
1183     Node *unvisited = nullptr;  // Unvisited anti-dependent Node, if any
1184 
1185     // Scan for unvisited nodes
1186     while (idx > 0) {
1187       // For all uses, schedule late
1188       Node* n = self->raw_out(--idx); // Use
1189 
1190       // Skip already visited children
1191       if ( _visited.test(n->_idx) )
1192         continue;
1193 
1194       // do not traverse backward control edges
1195       Node *use = n->is_Proj() ? n->in(0) : n;
1196       uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
1197 
1198       if ( use_rpo < src_rpo )
1199         continue;
1200 
1201       // Phi nodes always precede uses in a basic block
1202       if ( use_rpo == src_rpo && use->is_Phi() )
1203         continue;
1204 
1205       unvisited = n;      // Found unvisited
1206 
1207       // Check for possible-anti-dependent
1208       // 1st pass: No such nodes, 2nd pass: Only such nodes.
1209       if (n->needs_anti_dependence_check() == iterate_anti_dep) {
1210         unvisited = n;      // Found unvisited
1211         break;
1212       }
1213     }
1214 
1215     // Did I find an unvisited not-anti-dependent Node?
1216     if (!unvisited) {
1217       if (!iterate_anti_dep) {
1218         // 2nd pass: Iterate over nodes which needs_anti_dependence_check.
1219         iterate_anti_dep = true;
1220         idx = self->outcnt();
1221         continue;
1222       }
1223       break;                  // All done with children; post-visit 'self'
1224     }
1225 
1226     // Visit the unvisited Node.  Contains the obvious push to
1227     // indicate I'm entering a deeper level of recursion.  I push the
1228     // old state onto the _stack and set a new state and loop (recurse).
1229     _stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx);
1230     self = unvisited;
1231     iterate_anti_dep = false;
1232     idx = self->outcnt();
1233   } // End recursion loop
1234 
1235   return self;
1236 }
1237 
1238 //------------------------------ComputeLatenciesBackwards----------------------
1239 // Compute the latency of all the instructions.
1240 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) {
1241 #ifndef PRODUCT
1242   if (trace_opto_pipelining())
1243     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
1244 #endif
1245 
1246   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1247   Node *n;
1248 
1249   // Walk over all the nodes from last to first
1250   while ((n = iter.next())) {
1251     // Set the latency for the definitions of this instruction
1252     partial_latency_of_defs(n);
1253   }
1254 } // end ComputeLatenciesBackwards
1255 
1256 //------------------------------partial_latency_of_defs------------------------
1257 // Compute the latency impact of this node on all defs.  This computes
1258 // a number that increases as we approach the beginning of the routine.
1259 void PhaseCFG::partial_latency_of_defs(Node *n) {
1260   // Set the latency for this instruction
1261 #ifndef PRODUCT
1262   if (trace_opto_pipelining()) {
1263     tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1264     dump();
1265   }
1266 #endif
1267 
1268   if (n->is_Proj()) {
1269     n = n->in(0);
1270   }
1271 
1272   if (n->is_Root()) {
1273     return;
1274   }
1275 
1276   uint nlen = n->len();
1277   uint use_latency = get_latency_for_node(n);
1278   uint use_pre_order = get_block_for_node(n)->_pre_order;
1279 
1280   for (uint j = 0; j < nlen; j++) {
1281     Node *def = n->in(j);
1282 
1283     if (!def || def == n) {
1284       continue;
1285     }
1286 
1287     // Walk backwards thru projections
1288     if (def->is_Proj()) {
1289       def = def->in(0);
1290     }
1291 
1292 #ifndef PRODUCT
1293     if (trace_opto_pipelining()) {
1294       tty->print("#    in(%2d): ", j);
1295       def->dump();
1296     }
1297 #endif
1298 
1299     // If the defining block is not known, assume it is ok
1300     Block *def_block = get_block_for_node(def);
1301     uint def_pre_order = def_block ? def_block->_pre_order : 0;
1302 
1303     if ((use_pre_order <  def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
1304       continue;
1305     }
1306 
1307     uint delta_latency = n->latency(j);
1308     uint current_latency = delta_latency + use_latency;
1309 
1310     if (get_latency_for_node(def) < current_latency) {
1311       set_latency_for_node(def, current_latency);
1312     }
1313 
1314 #ifndef PRODUCT
1315     if (trace_opto_pipelining()) {
1316       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
1317     }
1318 #endif
1319   }
1320 }
1321 
1322 //------------------------------latency_from_use-------------------------------
1323 // Compute the latency of a specific use
1324 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
1325   // If self-reference, return no latency
1326   if (use == n || use->is_Root()) {
1327     return 0;
1328   }
1329 
1330   uint def_pre_order = get_block_for_node(def)->_pre_order;
1331   uint latency = 0;
1332 
1333   // If the use is not a projection, then it is simple...
1334   if (!use->is_Proj()) {
1335 #ifndef PRODUCT
1336     if (trace_opto_pipelining()) {
1337       tty->print("#    out(): ");
1338       use->dump();
1339     }
1340 #endif
1341 
1342     uint use_pre_order = get_block_for_node(use)->_pre_order;
1343 
1344     if (use_pre_order < def_pre_order)
1345       return 0;
1346 
1347     if (use_pre_order == def_pre_order && use->is_Phi())
1348       return 0;
1349 
1350     uint nlen = use->len();
1351     uint nl = get_latency_for_node(use);
1352 
1353     for ( uint j=0; j<nlen; j++ ) {
1354       if (use->in(j) == n) {
1355         // Change this if we want local latencies
1356         uint ul = use->latency(j);
1357         uint  l = ul + nl;
1358         if (latency < l) latency = l;
1359 #ifndef PRODUCT
1360         if (trace_opto_pipelining()) {
1361           tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
1362                         nl, j, ul, l, latency);
1363         }
1364 #endif
1365       }
1366     }
1367   } else {
1368     // This is a projection, just grab the latency of the use(s)
1369     for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1370       uint l = latency_from_use(use, def, use->fast_out(j));
1371       if (latency < l) latency = l;
1372     }
1373   }
1374 
1375   return latency;
1376 }
1377 
1378 //------------------------------latency_from_uses------------------------------
1379 // Compute the latency of this instruction relative to all of it's uses.
1380 // This computes a number that increases as we approach the beginning of the
1381 // routine.
1382 void PhaseCFG::latency_from_uses(Node *n) {
1383   // Set the latency for this instruction
1384 #ifndef PRODUCT
1385   if (trace_opto_pipelining()) {
1386     tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1387     dump();
1388   }
1389 #endif
1390   uint latency=0;
1391   const Node *def = n->is_Proj() ? n->in(0): n;
1392 
1393   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1394     uint l = latency_from_use(n, def, n->fast_out(i));
1395 
1396     if (latency < l) latency = l;
1397   }
1398 
1399   set_latency_for_node(n, latency);
1400 }
1401 
1402 //------------------------------is_cheaper_block-------------------------
1403 // Check if a block between early and LCA block of uses is cheaper by
1404 // frequency-based policy, latency-based policy and random-based policy
1405 bool PhaseCFG::is_cheaper_block(Block* LCA, Node* self, uint target_latency,
1406                                 uint end_latency, double least_freq,
1407                                 int cand_cnt, bool in_latency) {
1408   if (StressGCM) {
1409     // Should be randomly accepted in stress mode
1410     return C->randomized_select(cand_cnt);
1411   }
1412 
1413   const double delta = 1 + PROB_UNLIKELY_MAG(4);
1414 
1415   // Better Frequency. Add a small delta to the comparison to not needlessly
1416   // hoist because of, e.g., small numerical inaccuracies.
1417   if (LCA->_freq * delta < least_freq) {
1418     return true;
1419   }
1420 
1421   // Otherwise, choose with latency
1422   if (!in_latency                     &&  // No block containing latency
1423       LCA->_freq < least_freq * delta &&  // No worse frequency
1424       target_latency >= end_latency   &&  // within latency range
1425       !self->is_iteratively_computed()    // But don't hoist IV increments
1426             // because they may end up above other uses of their phi forcing
1427             // their result register to be different from their input.
1428   ) {
1429     return true;
1430   }
1431 
1432   return false;
1433 }
1434 
1435 //------------------------------hoist_to_cheaper_block-------------------------
1436 // Pick a block for node self, between early and LCA block of uses, that is a
1437 // cheaper alternative to LCA.
1438 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1439   Block* least       = LCA;
1440   double least_freq  = least->_freq;
1441   uint target        = get_latency_for_node(self);
1442   uint start_latency = get_latency_for_node(LCA->head());
1443   uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1444   bool in_latency    = (target <= start_latency);
1445   const Block* root_block = get_block_for_node(_root);
1446 
1447   // Turn off latency scheduling if scheduling is just plain off
1448   if (!C->do_scheduling())
1449     in_latency = true;
1450 
1451   // Do not hoist (to cover latency) instructions which target a
1452   // single register.  Hoisting stretches the live range of the
1453   // single register and may force spilling.
1454   MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1455   if (mach != nullptr && mach->out_RegMask().is_bound1() && !mach->out_RegMask().is_empty()) {
1456     in_latency = true;
1457   }
1458 
1459 #ifndef PRODUCT
1460   if (trace_opto_pipelining()) {
1461     tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1462     self->dump();
1463     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1464       LCA->_pre_order,
1465       LCA->head()->_idx,
1466       start_latency,
1467       LCA->get_node(LCA->end_idx())->_idx,
1468       end_latency,
1469       least_freq);
1470   }
1471 #endif
1472 
1473   int cand_cnt = 0;  // number of candidates tried
1474 
1475   // Walk up the dominator tree from LCA (Lowest common ancestor) to
1476   // the earliest legal location. Capture the least execution frequency,
1477   // or choose a random block if -XX:+StressGCM, or using latency-based policy
1478   while (LCA != early) {
1479     LCA = LCA->_idom;         // Follow up the dominator tree
1480 
1481     if (LCA == nullptr) {
1482       // Bailout without retry
1483       assert(false, "graph should be schedulable");
1484       C->record_method_not_compilable("late schedule failed: LCA is null");
1485       return least;
1486     }
1487 
1488     // Don't hoist machine instructions to the root basic block
1489     if (mach != nullptr && LCA == root_block)
1490       break;
1491 
1492     if (self->is_memory_writer() &&
1493         (LCA->_loop->depth() > early->_loop->depth())) {
1494       // LCA is an invalid placement for a memory writer: choosing it would
1495       // cause memory interference, as illustrated in schedule_late().
1496       continue;
1497     }
1498     verify_memory_writer_placement(LCA, self);
1499 
1500     uint start_lat = get_latency_for_node(LCA->head());
1501     uint end_idx   = LCA->end_idx();
1502     uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
1503     double LCA_freq = LCA->_freq;
1504 #ifndef PRODUCT
1505     if (trace_opto_pipelining()) {
1506       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1507         LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1508     }
1509 #endif
1510     cand_cnt++;
1511     if (is_cheaper_block(LCA, self, target, end_lat, least_freq, cand_cnt, in_latency)) {
1512       least = LCA;            // Found cheaper block
1513       least_freq = LCA_freq;
1514       start_latency = start_lat;
1515       end_latency = end_lat;
1516       if (target <= start_lat)
1517         in_latency = true;
1518     }
1519   }
1520 
1521 #ifndef PRODUCT
1522   if (trace_opto_pipelining()) {
1523     tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
1524       least->_pre_order, start_latency, least_freq);
1525   }
1526 #endif
1527 
1528   // See if the latency needs to be updated
1529   if (target < end_latency) {
1530 #ifndef PRODUCT
1531     if (trace_opto_pipelining()) {
1532       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1533     }
1534 #endif
1535     set_latency_for_node(self, end_latency);
1536     partial_latency_of_defs(self);
1537   }
1538 
1539   return least;
1540 }
1541 
1542 
1543 //------------------------------schedule_late-----------------------------------
1544 // Now schedule all codes as LATE as possible.  This is the LCA in the
1545 // dominator tree of all USES of a value.  Pick the block with the least
1546 // loop nesting depth that is lowest in the dominator tree.
1547 extern const char must_clone[];
1548 void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
1549 #ifndef PRODUCT
1550   if (trace_opto_pipelining())
1551     tty->print("\n#---- schedule_late ----\n");
1552 #endif
1553 
1554   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1555   Node *self;
1556 
1557   // Walk over all the nodes from last to first
1558   while ((self = iter.next())) {
1559     Block* early = get_block_for_node(self); // Earliest legal placement
1560 
1561     if (self->is_top()) {
1562       // Top node goes in bb #2 with other constants.
1563       // It must be special-cased, because it has no out edges.
1564       early->add_inst(self);
1565       continue;
1566     }
1567 
1568     // No uses, just terminate
1569     if (self->outcnt() == 0) {
1570       assert(self->is_MachProj(), "sanity");
1571       continue;                   // Must be a dead machine projection
1572     }
1573 
1574     // If node is pinned in the block, then no scheduling can be done.
1575     if( self->pinned() )          // Pinned in block?
1576       continue;
1577 
1578 #ifdef ASSERT
1579     // Assert that memory writers (e.g. stores) have a "home" block (the block
1580     // given by their control input), and that this block corresponds to their
1581     // earliest possible placement. This guarantees that
1582     // hoist_to_cheaper_block() will always have at least one valid choice.
1583     if (self->is_memory_writer()) {
1584       assert(find_block_for_node(self->in(0)) == early,
1585              "The home of a memory writer must also be its earliest placement");
1586     }
1587 #endif
1588 
1589     MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1590     if (mach) {
1591       switch (mach->ideal_Opcode()) {
1592       case Op_CreateEx:
1593         // Don't move exception creation
1594         early->add_inst(self);
1595         continue;
1596         break;
1597       case Op_CheckCastPP: {
1598         // Don't move CheckCastPP nodes away from their input, if the input
1599         // is a rawptr (5071820).
1600         Node *def = self->in(1);
1601         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
1602           early->add_inst(self);
1603 #ifdef ASSERT
1604           _raw_oops.push(def);
1605 #endif
1606           continue;
1607         }
1608         break;
1609       }
1610       default:
1611         break;
1612       }
1613       if (C->has_irreducible_loop() && self->is_memory_writer()) {
1614         // If the CFG is irreducible, place memory writers in their home block.
1615         // This prevents hoist_to_cheaper_block() from accidentally placing such
1616         // nodes into deeper loops, as in the following example:
1617         //
1618         // Home placement of store in B1 (loop L1):
1619         //
1620         // B1 (L1):
1621         //   m1 <- ..
1622         //   m2 <- store m1, ..
1623         // B2 (L2):
1624         //   jump B2
1625         // B3 (L1):
1626         //   .. <- .. m2, ..
1627         //
1628         // Wrong "hoisting" of store to B2 (in loop L2, child of L1):
1629         //
1630         // B1 (L1):
1631         //   m1 <- ..
1632         // B2 (L2):
1633         //   m2 <- store m1, ..
1634         //   # Wrong: m1 and m2 interfere at this point.
1635         //   jump B2
1636         // B3 (L1):
1637         //   .. <- .. m2, ..
1638         //
1639         // This "hoist inversion" can happen due to different factors such as
1640         // inaccurate estimation of frequencies for irreducible CFGs, and loops
1641         // with always-taken exits in reducible CFGs. In the reducible case,
1642         // hoist inversion is prevented by discarding invalid blocks (those in
1643         // deeper loops than the home block). In the irreducible case, the
1644         // invalid blocks cannot be identified due to incomplete loop nesting
1645         // information, hence a conservative solution is taken.
1646 #ifndef PRODUCT
1647         if (trace_opto_pipelining()) {
1648           tty->print_cr("# Irreducible loops: schedule in home block B%d:",
1649                         early->_pre_order);
1650           self->dump();
1651         }
1652 #endif
1653         schedule_node_into_block(self, early);
1654         continue;
1655       }
1656     }
1657 
1658     // Gather LCA of all uses
1659     Block *LCA = nullptr;
1660     {
1661       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1662         // For all uses, find LCA
1663         Node* use = self->fast_out(i);
1664         LCA = raise_LCA_above_use(LCA, use, self, this);
1665       }
1666       guarantee(LCA != nullptr, "There must be a LCA");
1667     }  // (Hide defs of imax, i from rest of block.)
1668 
1669     // Place temps in the block of their use.  This isn't a
1670     // requirement for correctness but it reduces useless
1671     // interference between temps and other nodes.
1672     if (mach != nullptr && mach->is_MachTemp()) {
1673       map_node_to_block(self, LCA);
1674       LCA->add_inst(self);
1675       continue;
1676     }
1677 
1678     // Check if 'self' could be anti-dependent on memory
1679     if (self->needs_anti_dependence_check()) {
1680       // Hoist LCA above possible-defs and insert anti-dependences to
1681       // defs in new LCA block.
1682       LCA = raise_above_anti_dependences(LCA, self);
1683       if (C->failing()) {
1684         return;
1685       }
1686     }
1687 
1688     if (early->_dom_depth > LCA->_dom_depth) {
1689       // Somehow the LCA has moved above the earliest legal point.
1690       // (One way this can happen is via memory_early_block.)
1691       if (C->subsume_loads() == true && !C->failing()) {
1692         // Retry with subsume_loads == false
1693         // If this is the first failure, the sentinel string will "stick"
1694         // to the Compile object, and the C2Compiler will see it and retry.
1695         C->record_failure(C2Compiler::retry_no_subsuming_loads());
1696       } else {
1697         // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1698         assert(C->failure_is_artificial(), "graph should be schedulable");
1699         C->record_method_not_compilable("late schedule failed: incorrect graph" DEBUG_ONLY(COMMA true));
1700       }
1701       return;
1702     }
1703 
1704     if (self->is_memory_writer()) {
1705       // If the LCA of a memory writer is a descendant of its home loop, hoist
1706       // it into a valid placement.
1707       while (LCA->_loop->depth() > early->_loop->depth()) {
1708         LCA = LCA->_idom;
1709       }
1710       assert(LCA != nullptr, "a valid LCA must exist");
1711       verify_memory_writer_placement(LCA, self);
1712     }
1713 
1714     // If there is no opportunity to hoist, then we're done.
1715     // In stress mode, try to hoist even the single operations.
1716     bool try_to_hoist = StressGCM || (LCA != early);
1717 
1718     // Must clone guys stay next to use; no hoisting allowed.
1719     // Also cannot hoist guys that alter memory or are otherwise not
1720     // allocatable (hoisting can make a value live longer, leading to
1721     // anti and output dependency problems which are normally resolved
1722     // by the register allocator giving everyone a different register).
1723     if (mach != nullptr && must_clone[mach->ideal_Opcode()])
1724       try_to_hoist = false;
1725 
1726     Block* late = nullptr;
1727     if (try_to_hoist) {
1728       // Now find the block with the least execution frequency.
1729       // Start at the latest schedule and work up to the earliest schedule
1730       // in the dominator tree.  Thus the Node will dominate all its uses.
1731       late = hoist_to_cheaper_block(LCA, early, self);
1732     } else {
1733       // Just use the LCA of the uses.
1734       late = LCA;
1735     }
1736 
1737     // Put the node into target block
1738     schedule_node_into_block(self, late);
1739 
1740 #ifdef ASSERT
1741     if (self->needs_anti_dependence_check()) {
1742       // since precedence edges are only inserted when we're sure they
1743       // are needed make sure that after placement in a block we don't
1744       // need any new precedence edges.
1745       verify_anti_dependences(late, self);
1746       if (C->failing()) {
1747         return;
1748       }
1749     }
1750 #endif
1751   } // Loop until all nodes have been visited
1752 
1753 } // end ScheduleLate
1754 
1755 //------------------------------GlobalCodeMotion-------------------------------
1756 void PhaseCFG::global_code_motion() {
1757   ResourceMark rm;
1758 
1759 #ifndef PRODUCT
1760   if (trace_opto_pipelining()) {
1761     tty->print("\n---- Start GlobalCodeMotion ----\n");
1762   }
1763 #endif
1764 
1765   // Initialize the node to block mapping for things on the proj_list
1766   for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1767     unmap_node_from_block(_matcher.get_projection(i));
1768   }
1769 
1770   // Set the basic block for Nodes pinned into blocks
1771   VectorSet visited;
1772   schedule_pinned_nodes(visited);
1773 
1774   // Find the earliest Block any instruction can be placed in.  Some
1775   // instructions are pinned into Blocks.  Unpinned instructions can
1776   // appear in last block in which all their inputs occur.
1777   visited.clear();
1778   Node_Stack stack((C->live_nodes() >> 2) + 16); // pre-grow
1779   if (!schedule_early(visited, stack)) {
1780     // Bailout without retry
1781     assert(C->failure_is_artificial(), "early schedule failed");
1782     C->record_method_not_compilable("early schedule failed" DEBUG_ONLY(COMMA true));
1783     return;
1784   }
1785 
1786   // Build Def-Use edges.
1787   // Compute the latency information (via backwards walk) for all the
1788   // instructions in the graph
1789   _node_latency = new GrowableArray<uint>(); // resource_area allocation
1790 
1791   if (C->do_scheduling()) {
1792     compute_latencies_backwards(visited, stack);
1793   }
1794 
1795   // Now schedule all codes as LATE as possible.  This is the LCA in the
1796   // dominator tree of all USES of a value.  Pick the block with the least
1797   // loop nesting depth that is lowest in the dominator tree.
1798   // ( visited.clear() called in schedule_late()->Node_Backward_Iterator() )
1799   schedule_late(visited, stack);
1800   if (C->failing()) {
1801     return;
1802   }
1803 
1804 #ifndef PRODUCT
1805   if (trace_opto_pipelining()) {
1806     tty->print("\n---- Detect implicit null checks ----\n");
1807   }
1808 #endif
1809 
1810   // Detect implicit-null-check opportunities.  Basically, find null checks
1811   // with suitable memory ops nearby.  Use the memory op to do the null check.
1812   // I can generate a memory op if there is not one nearby.
1813   if (C->is_method_compilation()) {
1814     // By reversing the loop direction we get a very minor gain on mpegaudio.
1815     // Feel free to revert to a forward loop for clarity.
1816     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1817     for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1818       Node* proj = _matcher._null_check_tests[i];
1819       Node* val  = _matcher._null_check_tests[i + 1];
1820       Block* block = get_block_for_node(proj);
1821       implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1822       // The implicit_null_check will only perform the transformation
1823       // if the null branch is truly uncommon, *and* it leads to an
1824       // uncommon trap.  Combined with the too_many_traps guards
1825       // above, this prevents SEGV storms reported in 6366351,
1826       // by recompiling offending methods without this optimization.
1827       if (C->failing()) {
1828         return;
1829       }
1830     }
1831   }
1832 
1833   bool block_size_threshold_ok = false;
1834   intptr_t *recalc_pressure_nodes = nullptr;
1835   if (OptoRegScheduling) {
1836     for (uint i = 0; i < number_of_blocks(); i++) {
1837       Block* block = get_block(i);
1838       if (block->number_of_nodes() > 10) {
1839         block_size_threshold_ok = true;
1840         break;
1841       }
1842     }
1843   }
1844 
1845   // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it
1846   // is key to enabling this feature.
1847   PhaseChaitin regalloc(C->unique(), *this, _matcher, true);
1848   ResourceArea live_arena(mtCompiler, Arena::Tag::tag_reglive);      // Arena for liveness
1849   ResourceMark rm_live(&live_arena);
1850   PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);
1851   PhaseIFG ifg(&live_arena);
1852   if (OptoRegScheduling && block_size_threshold_ok) {
1853     regalloc.mark_ssa();
1854     Compile::TracePhase tp(_t_computeLive);
1855     rm_live.reset_to_mark();           // Reclaim working storage
1856     IndexSet::reset_memory(C, &live_arena);
1857     uint node_size = regalloc._lrg_map.max_lrg_id();
1858     ifg.init(node_size); // Empty IFG
1859     regalloc.set_ifg(ifg);
1860     regalloc.set_live(live);
1861     regalloc.gather_lrg_masks(false);    // Collect LRG masks
1862     live.compute(node_size); // Compute liveness
1863 
1864     recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size);
1865     for (uint i = 0; i < node_size; i++) {
1866       recalc_pressure_nodes[i] = 0;
1867     }
1868   }
1869   _regalloc = &regalloc;
1870 
1871 #ifndef PRODUCT
1872   if (trace_opto_pipelining()) {
1873     tty->print("\n---- Start Local Scheduling ----\n");
1874   }
1875 #endif
1876 
1877   // Schedule locally.  Right now a simple topological sort.
1878   // Later, do a real latency aware scheduler.
1879   GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1880   visited.reset();
1881   for (uint i = 0; i < number_of_blocks(); i++) {
1882     Block* block = get_block(i);
1883     if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) {
1884       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1885         assert(C->failure_is_artificial(), "local schedule failed");
1886         C->record_method_not_compilable("local schedule failed" DEBUG_ONLY(COMMA true));
1887       }
1888       _regalloc = nullptr;
1889       return;
1890     }
1891   }
1892   _regalloc = nullptr;
1893 
1894   // If we inserted any instructions between a Call and his CatchNode,
1895   // clone the instructions on all paths below the Catch.
1896   for (uint i = 0; i < number_of_blocks(); i++) {
1897     Block* block = get_block(i);
1898     call_catch_cleanup(block);
1899     if (C->failing()) {
1900       return;
1901     }
1902   }
1903 
1904 #ifndef PRODUCT
1905   if (trace_opto_pipelining()) {
1906     tty->print("\n---- After GlobalCodeMotion ----\n");
1907     for (uint i = 0; i < number_of_blocks(); i++) {
1908       Block* block = get_block(i);
1909       block->dump();
1910     }
1911   }
1912 #endif
1913   // Dead.
1914   _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1915 }
1916 
1917 bool PhaseCFG::do_global_code_motion() {
1918 
1919   build_dominator_tree();
1920   if (C->failing()) {
1921     return false;
1922   }
1923 
1924   NOT_PRODUCT( C->verify_graph_edges(); )
1925 
1926   estimate_block_frequency();
1927 
1928   global_code_motion();
1929 
1930   if (C->failing()) {
1931     return false;
1932   }
1933 
1934   return true;
1935 }
1936 
1937 //------------------------------Estimate_Block_Frequency-----------------------
1938 // Estimate block frequencies based on IfNode probabilities.
1939 void PhaseCFG::estimate_block_frequency() {
1940 
1941   // Force conditional branches leading to uncommon traps to be unlikely,
1942   // not because we get to the uncommon_trap with less relative frequency,
1943   // but because an uncommon_trap typically causes a deopt, so we only get
1944   // there once.
1945   if (C->do_freq_based_layout()) {
1946     Block_List worklist;
1947     Block* root_blk = get_block(0);
1948     for (uint i = 1; i < root_blk->num_preds(); i++) {
1949       Block *pb = get_block_for_node(root_blk->pred(i));
1950       if (pb->has_uncommon_code()) {
1951         worklist.push(pb);
1952       }
1953     }
1954     while (worklist.size() > 0) {
1955       Block* uct = worklist.pop();
1956       if (uct == get_root_block()) {
1957         continue;
1958       }
1959       for (uint i = 1; i < uct->num_preds(); i++) {
1960         Block *pb = get_block_for_node(uct->pred(i));
1961         if (pb->_num_succs == 1) {
1962           worklist.push(pb);
1963         } else if (pb->num_fall_throughs() == 2) {
1964           pb->update_uncommon_branch(uct);
1965         }
1966       }
1967     }
1968   }
1969 
1970   // Create the loop tree and calculate loop depth.
1971   _root_loop = create_loop_tree();
1972   _root_loop->compute_loop_depth(0);
1973 
1974   // Compute block frequency of each block, relative to a single loop entry.
1975   _root_loop->compute_freq();
1976 
1977   // Adjust all frequencies to be relative to a single method entry
1978   _root_loop->_freq = 1.0;
1979   _root_loop->scale_freq();
1980 
1981   // Save outmost loop frequency for LRG frequency threshold
1982   _outer_loop_frequency = _root_loop->outer_loop_freq();
1983 
1984   // force paths ending at uncommon traps to be infrequent
1985   if (!C->do_freq_based_layout()) {
1986     Block_List worklist;
1987     Block* root_blk = get_block(0);
1988     for (uint i = 1; i < root_blk->num_preds(); i++) {
1989       Block *pb = get_block_for_node(root_blk->pred(i));
1990       if (pb->has_uncommon_code()) {
1991         worklist.push(pb);
1992       }
1993     }
1994     while (worklist.size() > 0) {
1995       Block* uct = worklist.pop();
1996       uct->_freq = PROB_MIN;
1997       for (uint i = 1; i < uct->num_preds(); i++) {
1998         Block *pb = get_block_for_node(uct->pred(i));
1999         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
2000           worklist.push(pb);
2001         }
2002       }
2003     }
2004   }
2005 
2006 #ifdef ASSERT
2007   for (uint i = 0; i < number_of_blocks(); i++) {
2008     Block* b = get_block(i);
2009     assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
2010   }
2011 #endif
2012 
2013 #ifndef PRODUCT
2014   if (PrintCFGBlockFreq) {
2015     tty->print_cr("CFG Block Frequencies");
2016     _root_loop->dump_tree();
2017     if (Verbose) {
2018       tty->print_cr("PhaseCFG dump");
2019       dump();
2020       tty->print_cr("Node dump");
2021       _root->dump(99999);
2022     }
2023   }
2024 #endif
2025 }
2026 
2027 //----------------------------create_loop_tree--------------------------------
2028 // Create a loop tree from the CFG
2029 CFGLoop* PhaseCFG::create_loop_tree() {
2030 
2031 #ifdef ASSERT
2032   assert(get_block(0) == get_root_block(), "first block should be root block");
2033   for (uint i = 0; i < number_of_blocks(); i++) {
2034     Block* block = get_block(i);
2035     // Check that _loop field are clear...we could clear them if not.
2036     assert(block->_loop == nullptr, "clear _loop expected");
2037     // Sanity check that the RPO numbering is reflected in the _blocks array.
2038     // It doesn't have to be for the loop tree to be built, but if it is not,
2039     // then the blocks have been reordered since dom graph building...which
2040     // may question the RPO numbering
2041     assert(block->_rpo == i, "unexpected reverse post order number");
2042   }
2043 #endif
2044 
2045   int idct = 0;
2046   CFGLoop* root_loop = new CFGLoop(idct++);
2047 
2048   Block_List worklist;
2049 
2050   // Assign blocks to loops
2051   for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
2052     Block* block = get_block(i);
2053 
2054     if (block->head()->is_Loop()) {
2055       Block* loop_head = block;
2056       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
2057       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
2058       Block* tail = get_block_for_node(tail_n);
2059 
2060       // Defensively filter out Loop nodes for non-single-entry loops.
2061       // For all reasonable loops, the head occurs before the tail in RPO.
2062       if (i <= tail->_rpo) {
2063 
2064         // The tail and (recursive) predecessors of the tail
2065         // are made members of a new loop.
2066 
2067         assert(worklist.size() == 0, "nonempty worklist");
2068         CFGLoop* nloop = new CFGLoop(idct++);
2069         assert(loop_head->_loop == nullptr, "just checking");
2070         loop_head->_loop = nloop;
2071         // Add to nloop so push_pred() will skip over inner loops
2072         nloop->add_member(loop_head);
2073         nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
2074 
2075         while (worklist.size() > 0) {
2076           Block* member = worklist.pop();
2077           if (member != loop_head) {
2078             for (uint j = 1; j < member->num_preds(); j++) {
2079               nloop->push_pred(member, j, worklist, this);
2080             }
2081           }
2082         }
2083       }
2084     }
2085   }
2086 
2087   // Create a member list for each loop consisting
2088   // of both blocks and (immediate child) loops.
2089   for (uint i = 0; i < number_of_blocks(); i++) {
2090     Block* block = get_block(i);
2091     CFGLoop* lp = block->_loop;
2092     if (lp == nullptr) {
2093       // Not assigned to a loop. Add it to the method's pseudo loop.
2094       block->_loop = root_loop;
2095       lp = root_loop;
2096     }
2097     if (lp == root_loop || block != lp->head()) { // loop heads are already members
2098       lp->add_member(block);
2099     }
2100     if (lp != root_loop) {
2101       if (lp->parent() == nullptr) {
2102         // Not a nested loop. Make it a child of the method's pseudo loop.
2103         root_loop->add_nested_loop(lp);
2104       }
2105       if (block == lp->head()) {
2106         // Add nested loop to member list of parent loop.
2107         lp->parent()->add_member(lp);
2108       }
2109     }
2110   }
2111 
2112   return root_loop;
2113 }
2114 
2115 //------------------------------push_pred--------------------------------------
2116 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
2117   Node* pred_n = blk->pred(i);
2118   Block* pred = cfg->get_block_for_node(pred_n);
2119   CFGLoop *pred_loop = pred->_loop;
2120   if (pred_loop == nullptr) {
2121     // Filter out blocks for non-single-entry loops.
2122     // For all reasonable loops, the head occurs before the tail in RPO.
2123     if (pred->_rpo > head()->_rpo) {
2124       pred->_loop = this;
2125       worklist.push(pred);
2126     }
2127   } else if (pred_loop != this) {
2128     // Nested loop.
2129     while (pred_loop->_parent != nullptr && pred_loop->_parent != this) {
2130       pred_loop = pred_loop->_parent;
2131     }
2132     // Make pred's loop be a child
2133     if (pred_loop->_parent == nullptr) {
2134       add_nested_loop(pred_loop);
2135       // Continue with loop entry predecessor.
2136       Block* pred_head = pred_loop->head();
2137       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
2138       assert(pred_head != head(), "loop head in only one loop");
2139       push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
2140     } else {
2141       assert(pred_loop->_parent == this && _parent == nullptr, "just checking");
2142     }
2143   }
2144 }
2145 
2146 //------------------------------add_nested_loop--------------------------------
2147 // Make cl a child of the current loop in the loop tree.
2148 void CFGLoop::add_nested_loop(CFGLoop* cl) {
2149   assert(_parent == nullptr, "no parent yet");
2150   assert(cl != this, "not my own parent");
2151   cl->_parent = this;
2152   CFGLoop* ch = _child;
2153   if (ch == nullptr) {
2154     _child = cl;
2155   } else {
2156     while (ch->_sibling != nullptr) { ch = ch->_sibling; }
2157     ch->_sibling = cl;
2158   }
2159 }
2160 
2161 //------------------------------compute_loop_depth-----------------------------
2162 // Store the loop depth in each CFGLoop object.
2163 // Recursively walk the children to do the same for them.
2164 void CFGLoop::compute_loop_depth(int depth) {
2165   _depth = depth;
2166   CFGLoop* ch = _child;
2167   while (ch != nullptr) {
2168     ch->compute_loop_depth(depth + 1);
2169     ch = ch->_sibling;
2170   }
2171 }
2172 
2173 //------------------------------compute_freq-----------------------------------
2174 // Compute the frequency of each block and loop, relative to a single entry
2175 // into the dominating loop head.
2176 void CFGLoop::compute_freq() {
2177   // Bottom up traversal of loop tree (visit inner loops first.)
2178   // Set loop head frequency to 1.0, then transitively
2179   // compute frequency for all successors in the loop,
2180   // as well as for each exit edge.  Inner loops are
2181   // treated as single blocks with loop exit targets
2182   // as the successor blocks.
2183 
2184   // Nested loops first
2185   CFGLoop* ch = _child;
2186   while (ch != nullptr) {
2187     ch->compute_freq();
2188     ch = ch->_sibling;
2189   }
2190   assert (_members.length() > 0, "no empty loops");
2191   Block* hd = head();
2192   hd->_freq = 1.0;
2193   for (int i = 0; i < _members.length(); i++) {
2194     CFGElement* s = _members.at(i);
2195     double freq = s->_freq;
2196     if (s->is_block()) {
2197       Block* b = s->as_Block();
2198       for (uint j = 0; j < b->_num_succs; j++) {
2199         Block* sb = b->_succs[j];
2200         update_succ_freq(sb, freq * b->succ_prob(j));
2201       }
2202     } else {
2203       CFGLoop* lp = s->as_CFGLoop();
2204       assert(lp->_parent == this, "immediate child");
2205       for (int k = 0; k < lp->_exits.length(); k++) {
2206         Block* eb = lp->_exits.at(k).get_target();
2207         double prob = lp->_exits.at(k).get_prob();
2208         update_succ_freq(eb, freq * prob);
2209       }
2210     }
2211   }
2212 
2213   // For all loops other than the outer, "method" loop,
2214   // sum and normalize the exit probability. The "method" loop
2215   // should keep the initial exit probability of 1, so that
2216   // inner blocks do not get erroneously scaled.
2217   if (_depth != 0) {
2218     // Total the exit probabilities for this loop.
2219     double exits_sum = 0.0f;
2220     for (int i = 0; i < _exits.length(); i++) {
2221       exits_sum += _exits.at(i).get_prob();
2222     }
2223 
2224     // Normalize the exit probabilities. Until now, the
2225     // probabilities estimate the possibility of exit per
2226     // a single loop iteration; afterward, they estimate
2227     // the probability of exit per loop entry.
2228     for (int i = 0; i < _exits.length(); i++) {
2229       Block* et = _exits.at(i).get_target();
2230       float new_prob = 0.0f;
2231       if (_exits.at(i).get_prob() > 0.0f) {
2232         new_prob = _exits.at(i).get_prob() / exits_sum;
2233       }
2234       BlockProbPair bpp(et, new_prob);
2235       _exits.at_put(i, bpp);
2236     }
2237 
2238     // Save the total, but guard against unreasonable probability,
2239     // as the value is used to estimate the loop trip count.
2240     // An infinite trip count would blur relative block
2241     // frequencies.
2242     if (exits_sum > 1.0f) exits_sum = 1.0;
2243     if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
2244     _exit_prob = exits_sum;
2245   }
2246 }
2247 
2248 //------------------------------succ_prob-------------------------------------
2249 // Determine the probability of reaching successor 'i' from the receiver block.
2250 float Block::succ_prob(uint i) {
2251   int eidx = end_idx();
2252   Node *n = get_node(eidx);  // Get ending Node
2253 
2254   int op = n->Opcode();
2255   if (n->is_Mach()) {
2256     if (n->is_MachNullCheck()) {
2257       // Can only reach here if called after lcm. The original Op_If is gone,
2258       // so we attempt to infer the probability from one or both of the
2259       // successor blocks.
2260       assert(_num_succs == 2, "expecting 2 successors of a null check");
2261       // If either successor has only one predecessor, then the
2262       // probability estimate can be derived using the
2263       // relative frequency of the successor and this block.
2264       if (_succs[i]->num_preds() == 2) {
2265         return _succs[i]->_freq / _freq;
2266       } else if (_succs[1-i]->num_preds() == 2) {
2267         return 1 - (_succs[1-i]->_freq / _freq);
2268       } else {
2269         // Estimate using both successor frequencies
2270         float freq = _succs[i]->_freq;
2271         return freq / (freq + _succs[1-i]->_freq);
2272       }
2273     }
2274     op = n->as_Mach()->ideal_Opcode();
2275   }
2276 
2277 
2278   // Switch on branch type
2279   switch( op ) {
2280   case Op_CountedLoopEnd:
2281   case Op_If: {
2282     assert (i < 2, "just checking");
2283     // Conditionals pass on only part of their frequency
2284     float prob  = n->as_MachIf()->_prob;
2285     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
2286     // If succ[i] is the FALSE branch, invert path info
2287     if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
2288       return 1.0f - prob; // not taken
2289     } else {
2290       return prob; // taken
2291     }
2292   }
2293 
2294   case Op_Jump:
2295     return n->as_MachJump()->_probs[get_node(i + eidx + 1)->as_JumpProj()->_con];
2296 
2297   case Op_Catch: {
2298     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2299     if (ci->_con == CatchProjNode::fall_through_index) {
2300       // Fall-thru path gets the lion's share.
2301       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
2302     } else {
2303       // Presume exceptional paths are equally unlikely
2304       return PROB_UNLIKELY_MAG(5);
2305     }
2306   }
2307 
2308   case Op_Root:
2309   case Op_Goto:
2310     // Pass frequency straight thru to target
2311     return 1.0f;
2312 
2313   case Op_NeverBranch: {
2314     Node* succ = n->as_NeverBranch()->proj_out(0)->unique_ctrl_out();
2315     if (_succs[i]->head() == succ) {
2316       return 1.0f;
2317     }
2318     return 0.0f;
2319   }
2320 
2321   case Op_TailCall:
2322   case Op_TailJump:
2323   case Op_ForwardException:
2324   case Op_Return:
2325   case Op_Halt:
2326   case Op_Rethrow:
2327     // Do not push out freq to root block
2328     return 0.0f;
2329 
2330   default:
2331     ShouldNotReachHere();
2332   }
2333 
2334   return 0.0f;
2335 }
2336 
2337 //------------------------------num_fall_throughs-----------------------------
2338 // Return the number of fall-through candidates for a block
2339 int Block::num_fall_throughs() {
2340   int eidx = end_idx();
2341   Node *n = get_node(eidx);  // Get ending Node
2342 
2343   int op = n->Opcode();
2344   if (n->is_Mach()) {
2345     if (n->is_MachNullCheck()) {
2346       // In theory, either side can fall-thru, for simplicity sake,
2347       // let's say only the false branch can now.
2348       return 1;
2349     }
2350     op = n->as_Mach()->ideal_Opcode();
2351   }
2352 
2353   // Switch on branch type
2354   switch( op ) {
2355   case Op_CountedLoopEnd:
2356   case Op_If:
2357     return 2;
2358 
2359   case Op_Root:
2360   case Op_Goto:
2361     return 1;
2362 
2363   case Op_Catch: {
2364     for (uint i = 0; i < _num_succs; i++) {
2365       const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2366       if (ci->_con == CatchProjNode::fall_through_index) {
2367         return 1;
2368       }
2369     }
2370     return 0;
2371   }
2372 
2373   case Op_Jump:
2374   case Op_NeverBranch:
2375   case Op_TailCall:
2376   case Op_TailJump:
2377   case Op_ForwardException:
2378   case Op_Return:
2379   case Op_Halt:
2380   case Op_Rethrow:
2381     return 0;
2382 
2383   default:
2384     ShouldNotReachHere();
2385   }
2386 
2387   return 0;
2388 }
2389 
2390 //------------------------------succ_fall_through-----------------------------
2391 // Return true if a specific successor could be fall-through target.
2392 bool Block::succ_fall_through(uint i) {
2393   int eidx = end_idx();
2394   Node *n = get_node(eidx);  // Get ending Node
2395 
2396   int op = n->Opcode();
2397   if (n->is_Mach()) {
2398     if (n->is_MachNullCheck()) {
2399       // In theory, either side can fall-thru, for simplicity sake,
2400       // let's say only the false branch can now.
2401       return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
2402     }
2403     op = n->as_Mach()->ideal_Opcode();
2404   }
2405 
2406   // Switch on branch type
2407   switch( op ) {
2408   case Op_CountedLoopEnd:
2409   case Op_If:
2410   case Op_Root:
2411   case Op_Goto:
2412     return true;
2413 
2414   case Op_Catch: {
2415     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2416     return ci->_con == CatchProjNode::fall_through_index;
2417   }
2418 
2419   case Op_Jump:
2420   case Op_NeverBranch:
2421   case Op_TailCall:
2422   case Op_TailJump:
2423   case Op_ForwardException:
2424   case Op_Return:
2425   case Op_Halt:
2426   case Op_Rethrow:
2427     return false;
2428 
2429   default:
2430     ShouldNotReachHere();
2431   }
2432 
2433   return false;
2434 }
2435 
2436 //------------------------------update_uncommon_branch------------------------
2437 // Update the probability of a two-branch to be uncommon
2438 void Block::update_uncommon_branch(Block* ub) {
2439   int eidx = end_idx();
2440   Node *n = get_node(eidx);  // Get ending Node
2441 
2442   int op = n->as_Mach()->ideal_Opcode();
2443 
2444   assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
2445   assert(num_fall_throughs() == 2, "must be a two way branch block");
2446 
2447   // Which successor is ub?
2448   uint s;
2449   for (s = 0; s <_num_succs; s++) {
2450     if (_succs[s] == ub) break;
2451   }
2452   assert(s < 2, "uncommon successor must be found");
2453 
2454   // If ub is the true path, make the proability small, else
2455   // ub is the false path, and make the probability large
2456   bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
2457 
2458   // Get existing probability
2459   float p = n->as_MachIf()->_prob;
2460 
2461   if (invert) p = 1.0 - p;
2462   if (p > PROB_MIN) {
2463     p = PROB_MIN;
2464   }
2465   if (invert) p = 1.0 - p;
2466 
2467   n->as_MachIf()->_prob = p;
2468 }
2469 
2470 //------------------------------update_succ_freq-------------------------------
2471 // Update the appropriate frequency associated with block 'b', a successor of
2472 // a block in this loop.
2473 void CFGLoop::update_succ_freq(Block* b, double freq) {
2474   if (b->_loop == this) {
2475     if (b == head()) {
2476       // back branch within the loop
2477       // Do nothing now, the loop carried frequency will be
2478       // adjust later in scale_freq().
2479     } else {
2480       // simple branch within the loop
2481       b->_freq += freq;
2482     }
2483   } else if (!in_loop_nest(b)) {
2484     // branch is exit from this loop
2485     BlockProbPair bpp(b, freq);
2486     _exits.append(bpp);
2487   } else {
2488     // branch into nested loop
2489     CFGLoop* ch = b->_loop;
2490     ch->_freq += freq;
2491   }
2492 }
2493 
2494 //------------------------------in_loop_nest-----------------------------------
2495 // Determine if block b is in the receiver's loop nest.
2496 bool CFGLoop::in_loop_nest(Block* b) {
2497   int depth = _depth;
2498   CFGLoop* b_loop = b->_loop;
2499   int b_depth = b_loop->_depth;
2500   if (depth == b_depth) {
2501     return true;
2502   }
2503   while (b_depth > depth) {
2504     b_loop = b_loop->_parent;
2505     b_depth = b_loop->_depth;
2506   }
2507   return b_loop == this;
2508 }
2509 
2510 //------------------------------scale_freq-------------------------------------
2511 // Scale frequency of loops and blocks by trip counts from outer loops
2512 // Do a top down traversal of loop tree (visit outer loops first.)
2513 void CFGLoop::scale_freq() {
2514   double loop_freq = _freq * trip_count();
2515   _freq = loop_freq;
2516   for (int i = 0; i < _members.length(); i++) {
2517     CFGElement* s = _members.at(i);
2518     double block_freq = s->_freq * loop_freq;
2519     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
2520       block_freq = MIN_BLOCK_FREQUENCY;
2521     s->_freq = block_freq;
2522   }
2523   CFGLoop* ch = _child;
2524   while (ch != nullptr) {
2525     ch->scale_freq();
2526     ch = ch->_sibling;
2527   }
2528 }
2529 
2530 // Frequency of outer loop
2531 double CFGLoop::outer_loop_freq() const {
2532   if (_child != nullptr) {
2533     return _child->_freq;
2534   }
2535   return _freq;
2536 }
2537 
2538 #ifndef PRODUCT
2539 //------------------------------dump_tree--------------------------------------
2540 void CFGLoop::dump_tree() const {
2541   dump();
2542   if (_child != nullptr)   _child->dump_tree();
2543   if (_sibling != nullptr) _sibling->dump_tree();
2544 }
2545 
2546 //------------------------------dump-------------------------------------------
2547 void CFGLoop::dump() const {
2548   for (int i = 0; i < _depth; i++) tty->print("   ");
2549   tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
2550              _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2551   for (int i = 0; i < _depth; i++) tty->print("   ");
2552   tty->print("         members:");
2553   int k = 0;
2554   for (int i = 0; i < _members.length(); i++) {
2555     if (k++ >= 6) {
2556       tty->print("\n              ");
2557       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2558       k = 0;
2559     }
2560     CFGElement *s = _members.at(i);
2561     if (s->is_block()) {
2562       Block *b = s->as_Block();
2563       tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2564     } else {
2565       CFGLoop* lp = s->as_CFGLoop();
2566       tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2567     }
2568   }
2569   tty->print("\n");
2570   for (int i = 0; i < _depth; i++) tty->print("   ");
2571   tty->print("         exits:  ");
2572   k = 0;
2573   for (int i = 0; i < _exits.length(); i++) {
2574     if (k++ >= 7) {
2575       tty->print("\n              ");
2576       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2577       k = 0;
2578     }
2579     Block *blk = _exits.at(i).get_target();
2580     double prob = _exits.at(i).get_prob();
2581     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2582   }
2583   tty->print("\n");
2584 }
2585 #endif