1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "libadt/vectset.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "opto/block.hpp"
  29 #include "opto/c2compiler.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/machnode.hpp"
  33 #include "opto/opcodes.hpp"
  34 #include "opto/phaseX.hpp"
  35 #include "opto/rootnode.hpp"
  36 #include "opto/runtime.hpp"
  37 #include "runtime/deoptimization.hpp"
  38 #if defined AD_MD_HPP
  39 # include AD_MD_HPP
  40 #elif defined TARGET_ARCH_MODEL_x86_32
  41 # include "adfiles/ad_x86_32.hpp"
  42 #elif defined TARGET_ARCH_MODEL_x86_64
  43 # include "adfiles/ad_x86_64.hpp"
  44 #elif defined TARGET_ARCH_MODEL_aarch64
  45 # include "adfiles/ad_aarch64.hpp"
  46 #elif defined TARGET_ARCH_MODEL_sparc
  47 # include "adfiles/ad_sparc.hpp"
  48 #elif defined TARGET_ARCH_MODEL_zero
  49 # include "adfiles/ad_zero.hpp"
  50 #elif defined TARGET_ARCH_MODEL_ppc_64
  51 # include "adfiles/ad_ppc_64.hpp"
  52 #endif
  53 
  54 
  55 // Portions of code courtesy of Clifford Click
  56 
  57 // Optimization - Graph Style
  58 
  59 // To avoid float value underflow
  60 #define MIN_BLOCK_FREQUENCY 1.e-35f
  61 
  62 //----------------------------schedule_node_into_block-------------------------
  63 // Insert node n into block b. Look for projections of n and make sure they
  64 // are in b also.
  65 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
  66   // Set basic block of n, Add n to b,
  67   map_node_to_block(n, b);
  68   b->add_inst(n);
  69 
  70   // After Matching, nearly any old Node may have projections trailing it.
  71   // These are usually machine-dependent flags.  In any case, they might
  72   // float to another block below this one.  Move them up.
  73   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  74     Node*  use  = n->fast_out(i);
  75     if (use->is_Proj()) {
  76       Block* buse = get_block_for_node(use);
  77       if (buse != b) {              // In wrong block?
  78         if (buse != NULL) {
  79           buse->find_remove(use);   // Remove from wrong block
  80         }
  81         map_node_to_block(use, b);
  82         b->add_inst(use);
  83       }
  84     }
  85   }
  86 }
  87 
  88 //----------------------------replace_block_proj_ctrl-------------------------
  89 // Nodes that have is_block_proj() nodes as their control need to use
  90 // the appropriate Region for their actual block as their control since
  91 // the projection will be in a predecessor block.
  92 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
  93   const Node *in0 = n->in(0);
  94   assert(in0 != NULL, "Only control-dependent");
  95   const Node *p = in0->is_block_proj();
  96   if (p != NULL && p != n) {    // Control from a block projection?
  97     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
  98     // Find trailing Region
  99     Block *pb = get_block_for_node(in0); // Block-projection already has basic block
 100     uint j = 0;
 101     if (pb->_num_succs != 1) {  // More then 1 successor?
 102       // Search for successor
 103       uint max = pb->number_of_nodes();
 104       assert( max > 1, "" );
 105       uint start = max - pb->_num_succs;
 106       // Find which output path belongs to projection
 107       for (j = start; j < max; j++) {
 108         if( pb->get_node(j) == in0 )
 109           break;
 110       }
 111       assert( j < max, "must find" );
 112       // Change control to match head of successor basic block
 113       j -= start;
 114     }
 115     n->set_req(0, pb->_succs[j]->head());
 116   }
 117 }
 118 
 119 static bool is_dominator(Block* d, Block* n) {
 120   return d->dom_lca(n) == d;
 121 }
 122 
 123 //------------------------------schedule_pinned_nodes--------------------------
 124 // Set the basic block for Nodes pinned into blocks
 125 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
 126   // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
 127   GrowableArray <Node *> spstack(C->live_nodes() + 8);
 128   spstack.push(_root);
 129   while (spstack.is_nonempty()) {
 130     Node* node = spstack.pop();
 131     if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
 132       if (node->pinned() && !has_block(node)) {  // Pinned?  Nail it down!
 133         assert(node->in(0), "pinned Node must have Control");
 134         // Before setting block replace block_proj control edge
 135         replace_block_proj_ctrl(node);
 136         Node* input = node->in(0);
 137         while (!input->is_block_start()) {
 138           input = input->in(0);
 139         }
 140         Block* block = get_block_for_node(input); // Basic block of controlling input
 141         schedule_node_into_block(node, block);
 142       }
 143 
 144       // If the node has precedence edges (added when CastPP nodes are
 145       // removed in final_graph_reshaping), fix the control of the
 146       // node to cover the precedence edges and remove the
 147       // dependencies.
 148       Node* n = NULL;
 149       for (uint i = node->len()-1; i >= node->req(); i--) {
 150         Node* m = node->in(i);
 151         if (m == NULL) continue;
 152         // Skip the precedence edge if the test that guarded a CastPP:
 153         // - was optimized out during escape analysis
 154         // (OptimizePtrCompare): the CastPP's control isn't an end of
 155         // block.
 156         // - is moved in the branch of a dominating If: the control of
 157         // the CastPP is then a Region.
 158         if (m->is_block_proj() || m->is_block_start()) {
 159           node->rm_prec(i);
 160           if (n == NULL) {
 161             n = m;
 162           } else {
 163             Block* bn = get_block_for_node(n);
 164             Block* bm = get_block_for_node(m);
 165             assert(is_dominator(bn, bm) || is_dominator(bm, bn), "one must dominate the other");
 166             n = is_dominator(bn, bm) ? m : n;
 167           }
 168         }
 169       }
 170       if (n != NULL) {
 171         assert(node->in(0), "control should have been set");
 172         Block* bn = get_block_for_node(n);
 173         Block* bnode = get_block_for_node(node->in(0));
 174         assert(is_dominator(bn, bnode) || is_dominator(bnode, bn), "one must dominate the other");
 175         if (!is_dominator(bn, bnode)) {
 176           node->set_req(0, n);
 177         }
 178       }
 179 
 180       // process all inputs that are non NULL
 181       for (int i = node->req() - 1; i >= 0; --i) {
 182         if (node->in(i) != NULL) {
 183           spstack.push(node->in(i));
 184         }
 185       }
 186     }
 187   }
 188 }
 189 
 190 #ifdef ASSERT
 191 // Assert that new input b2 is dominated by all previous inputs.
 192 // Check this by by seeing that it is dominated by b1, the deepest
 193 // input observed until b2.
 194 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
 195   if (b1 == NULL)  return;
 196   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
 197   Block* tmp = b2;
 198   while (tmp != b1 && tmp != NULL) {
 199     tmp = tmp->_idom;
 200   }
 201   if (tmp != b1) {
 202     // Detected an unschedulable graph.  Print some nice stuff and die.
 203     tty->print_cr("!!! Unschedulable graph !!!");
 204     for (uint j=0; j<n->len(); j++) { // For all inputs
 205       Node* inn = n->in(j); // Get input
 206       if (inn == NULL)  continue;  // Ignore NULL, missing inputs
 207       Block* inb = cfg->get_block_for_node(inn);
 208       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
 209                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
 210       inn->dump();
 211     }
 212     tty->print("Failing node: ");
 213     n->dump();
 214     assert(false, "unscheduable graph");
 215   }
 216 }
 217 #endif
 218 
 219 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
 220   // Find the last input dominated by all other inputs.
 221   Block* deepb           = NULL;        // Deepest block so far
 222   int    deepb_dom_depth = 0;
 223   for (uint k = 0; k < n->len(); k++) { // For all inputs
 224     Node* inn = n->in(k);               // Get input
 225     if (inn == NULL)  continue;         // Ignore NULL, missing inputs
 226     Block* inb = cfg->get_block_for_node(inn);
 227     assert(inb != NULL, "must already have scheduled this input");
 228     if (deepb_dom_depth < (int) inb->_dom_depth) {
 229       // The new inb must be dominated by the previous deepb.
 230       // The various inputs must be linearly ordered in the dom
 231       // tree, or else there will not be a unique deepest block.
 232       DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
 233       deepb = inb;                      // Save deepest block
 234       deepb_dom_depth = deepb->_dom_depth;
 235     }
 236   }
 237   assert(deepb != NULL, "must be at least one input to n");
 238   return deepb;
 239 }
 240 
 241 
 242 //------------------------------schedule_early---------------------------------
 243 // Find the earliest Block any instruction can be placed in.  Some instructions
 244 // are pinned into Blocks.  Unpinned instructions can appear in last block in
 245 // which all their inputs occur.
 246 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
 247   // Allocate stack with enough space to avoid frequent realloc
 248   Node_Stack nstack(roots.Size() + 8);
 249   // _root will be processed among C->top() inputs
 250   roots.push(C->top());
 251   visited.set(C->top()->_idx);
 252 
 253   while (roots.size() != 0) {
 254     // Use local variables nstack_top_n & nstack_top_i to cache values
 255     // on stack's top.
 256     Node* parent_node = roots.pop();
 257     uint  input_index = 0;
 258 
 259     while (true) {
 260       if (input_index == 0) {
 261         // Fixup some control.  Constants without control get attached
 262         // to root and nodes that use is_block_proj() nodes should be attached
 263         // to the region that starts their block.
 264         const Node* control_input = parent_node->in(0);
 265         if (control_input != NULL) {
 266           replace_block_proj_ctrl(parent_node);
 267         } else {
 268           // Is a constant with NO inputs?
 269           if (parent_node->req() == 1) {
 270             parent_node->set_req(0, _root);
 271           }
 272         }
 273       }
 274 
 275       // First, visit all inputs and force them to get a block.  If an
 276       // input is already in a block we quit following inputs (to avoid
 277       // cycles). Instead we put that Node on a worklist to be handled
 278       // later (since IT'S inputs may not have a block yet).
 279 
 280       // Assume all n's inputs will be processed
 281       bool done = true;
 282 
 283       while (input_index < parent_node->len()) {
 284         Node* in = parent_node->in(input_index++);
 285         if (in == NULL) {
 286           continue;
 287         }
 288 
 289         int is_visited = visited.test_set(in->_idx);
 290         if (!has_block(in)) {
 291           if (is_visited) {
 292             assert(false, "graph should be schedulable");
 293             return false;
 294           }
 295           // Save parent node and next input's index.
 296           nstack.push(parent_node, input_index);
 297           // Process current input now.
 298           parent_node = in;
 299           input_index = 0;
 300           // Not all n's inputs processed.
 301           done = false;
 302           break;
 303         } else if (!is_visited) {
 304           // Visit this guy later, using worklist
 305           roots.push(in);
 306         }
 307       }
 308 
 309       if (done) {
 310         // All of n's inputs have been processed, complete post-processing.
 311 
 312         // Some instructions are pinned into a block.  These include Region,
 313         // Phi, Start, Return, and other control-dependent instructions and
 314         // any projections which depend on them.
 315         if (!parent_node->pinned()) {
 316           // Set earliest legal block.
 317           Block* earliest_block = find_deepest_input(parent_node, this);
 318           map_node_to_block(parent_node, earliest_block);
 319         } else {
 320           assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
 321         }
 322 
 323         if (nstack.is_empty()) {
 324           // Finished all nodes on stack.
 325           // Process next node on the worklist 'roots'.
 326           break;
 327         }
 328         // Get saved parent node and next input's index.
 329         parent_node = nstack.node();
 330         input_index = nstack.index();
 331         nstack.pop();
 332       }
 333     }
 334   }
 335   return true;
 336 }
 337 
 338 //------------------------------dom_lca----------------------------------------
 339 // Find least common ancestor in dominator tree
 340 // LCA is a current notion of LCA, to be raised above 'this'.
 341 // As a convenient boundary condition, return 'this' if LCA is NULL.
 342 // Find the LCA of those two nodes.
 343 Block* Block::dom_lca(Block* LCA) {
 344   if (LCA == NULL || LCA == this)  return this;
 345 
 346   Block* anc = this;
 347   while (anc->_dom_depth > LCA->_dom_depth)
 348     anc = anc->_idom;           // Walk up till anc is as high as LCA
 349 
 350   while (LCA->_dom_depth > anc->_dom_depth)
 351     LCA = LCA->_idom;           // Walk up till LCA is as high as anc
 352 
 353   while (LCA != anc) {          // Walk both up till they are the same
 354     LCA = LCA->_idom;
 355     anc = anc->_idom;
 356   }
 357 
 358   return LCA;
 359 }
 360 
 361 //--------------------------raise_LCA_above_use--------------------------------
 362 // We are placing a definition, and have been given a def->use edge.
 363 // The definition must dominate the use, so move the LCA upward in the
 364 // dominator tree to dominate the use.  If the use is a phi, adjust
 365 // the LCA only with the phi input paths which actually use this def.
 366 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
 367   Block* buse = cfg->get_block_for_node(use);
 368   if (buse == NULL)    return LCA;   // Unused killing Projs have no use block
 369   if (!use->is_Phi())  return buse->dom_lca(LCA);
 370   uint pmax = use->req();       // Number of Phi inputs
 371   // Why does not this loop just break after finding the matching input to
 372   // the Phi?  Well...it's like this.  I do not have true def-use/use-def
 373   // chains.  Means I cannot distinguish, from the def-use direction, which
 374   // of many use-defs lead from the same use to the same def.  That is, this
 375   // Phi might have several uses of the same def.  Each use appears in a
 376   // different predecessor block.  But when I enter here, I cannot distinguish
 377   // which use-def edge I should find the predecessor block for.  So I find
 378   // them all.  Means I do a little extra work if a Phi uses the same value
 379   // more than once.
 380   for (uint j=1; j<pmax; j++) { // For all inputs
 381     if (use->in(j) == def) {    // Found matching input?
 382       Block* pred = cfg->get_block_for_node(buse->pred(j));
 383       LCA = pred->dom_lca(LCA);
 384     }
 385   }
 386   return LCA;
 387 }
 388 
 389 //----------------------------raise_LCA_above_marks----------------------------
 390 // Return a new LCA that dominates LCA and any of its marked predecessors.
 391 // Search all my parents up to 'early' (exclusive), looking for predecessors
 392 // which are marked with the given index.  Return the LCA (in the dom tree)
 393 // of all marked blocks.  If there are none marked, return the original
 394 // LCA.
 395 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
 396   Block_List worklist;
 397   worklist.push(LCA);
 398   while (worklist.size() > 0) {
 399     Block* mid = worklist.pop();
 400     if (mid == early)  continue;  // stop searching here
 401 
 402     // Test and set the visited bit.
 403     if (mid->raise_LCA_visited() == mark)  continue;  // already visited
 404 
 405     // Don't process the current LCA, otherwise the search may terminate early
 406     if (mid != LCA && mid->raise_LCA_mark() == mark) {
 407       // Raise the LCA.
 408       LCA = mid->dom_lca(LCA);
 409       if (LCA == early)  break;   // stop searching everywhere
 410       assert(early->dominates(LCA), "early is high enough");
 411       // Resume searching at that point, skipping intermediate levels.
 412       worklist.push(LCA);
 413       if (LCA == mid)
 414         continue; // Don't mark as visited to avoid early termination.
 415     } else {
 416       // Keep searching through this block's predecessors.
 417       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
 418         Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
 419         worklist.push(mid_parent);
 420       }
 421     }
 422     mid->set_raise_LCA_visited(mark);
 423   }
 424   return LCA;
 425 }
 426 
 427 //--------------------------memory_early_block--------------------------------
 428 // This is a variation of find_deepest_input, the heart of schedule_early.
 429 // Find the "early" block for a load, if we considered only memory and
 430 // address inputs, that is, if other data inputs were ignored.
 431 //
 432 // Because a subset of edges are considered, the resulting block will
 433 // be earlier (at a shallower dom_depth) than the true schedule_early
 434 // point of the node. We compute this earlier block as a more permissive
 435 // site for anti-dependency insertion, but only if subsume_loads is enabled.
 436 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
 437   Node* base;
 438   Node* index;
 439   Node* store = load->in(MemNode::Memory);
 440   load->as_Mach()->memory_inputs(base, index);
 441 
 442   assert(base != NodeSentinel && index != NodeSentinel,
 443          "unexpected base/index inputs");
 444 
 445   Node* mem_inputs[4];
 446   int mem_inputs_length = 0;
 447   if (base != NULL)  mem_inputs[mem_inputs_length++] = base;
 448   if (index != NULL) mem_inputs[mem_inputs_length++] = index;
 449   if (store != NULL) mem_inputs[mem_inputs_length++] = store;
 450 
 451   // In the comparision below, add one to account for the control input,
 452   // which may be null, but always takes up a spot in the in array.
 453   if (mem_inputs_length + 1 < (int) load->req()) {
 454     // This "load" has more inputs than just the memory, base and index inputs.
 455     // For purposes of checking anti-dependences, we need to start
 456     // from the early block of only the address portion of the instruction,
 457     // and ignore other blocks that may have factored into the wider
 458     // schedule_early calculation.
 459     if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
 460 
 461     Block* deepb           = NULL;        // Deepest block so far
 462     int    deepb_dom_depth = 0;
 463     for (int i = 0; i < mem_inputs_length; i++) {
 464       Block* inb = cfg->get_block_for_node(mem_inputs[i]);
 465       if (deepb_dom_depth < (int) inb->_dom_depth) {
 466         // The new inb must be dominated by the previous deepb.
 467         // The various inputs must be linearly ordered in the dom
 468         // tree, or else there will not be a unique deepest block.
 469         DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
 470         deepb = inb;                      // Save deepest block
 471         deepb_dom_depth = deepb->_dom_depth;
 472       }
 473     }
 474     early = deepb;
 475   }
 476 
 477   return early;
 478 }
 479 
 480 //--------------------------insert_anti_dependences---------------------------
 481 // A load may need to witness memory that nearby stores can overwrite.
 482 // For each nearby store, either insert an "anti-dependence" edge
 483 // from the load to the store, or else move LCA upward to force the
 484 // load to (eventually) be scheduled in a block above the store.
 485 //
 486 // Do not add edges to stores on distinct control-flow paths;
 487 // only add edges to stores which might interfere.
 488 //
 489 // Return the (updated) LCA.  There will not be any possibly interfering
 490 // store between the load's "early block" and the updated LCA.
 491 // Any stores in the updated LCA will have new precedence edges
 492 // back to the load.  The caller is expected to schedule the load
 493 // in the LCA, in which case the precedence edges will make LCM
 494 // preserve anti-dependences.  The caller may also hoist the load
 495 // above the LCA, if it is not the early block.
 496 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
 497   assert(load->needs_anti_dependence_check(), "must be a load of some sort");
 498   assert(LCA != NULL, "");
 499   DEBUG_ONLY(Block* LCA_orig = LCA);
 500 
 501   // Compute the alias index.  Loads and stores with different alias indices
 502   // do not need anti-dependence edges.
 503   uint load_alias_idx = C->get_alias_index(load->adr_type());
 504 #ifdef ASSERT
 505   if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&
 506       (PrintOpto || VerifyAliases ||
 507        PrintMiscellaneous && (WizardMode || Verbose))) {
 508     // Load nodes should not consume all of memory.
 509     // Reporting a bottom type indicates a bug in adlc.
 510     // If some particular type of node validly consumes all of memory,
 511     // sharpen the preceding "if" to exclude it, so we can catch bugs here.
 512     tty->print_cr("*** Possible Anti-Dependence Bug:  Load consumes all of memory.");
 513     load->dump(2);
 514     if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
 515   }
 516 #endif
 517   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),
 518          "String compare is only known 'load' that does not conflict with any stores");
 519   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals),
 520          "String equals is a 'load' that does not conflict with any stores");
 521   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf),
 522          "String indexOf is a 'load' that does not conflict with any stores");
 523   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq),
 524          "Arrays equals is a 'load' that do not conflict with any stores");
 525 
 526   if (!C->alias_type(load_alias_idx)->is_rewritable()) {
 527     // It is impossible to spoil this load by putting stores before it,
 528     // because we know that the stores will never update the value
 529     // which 'load' must witness.
 530     return LCA;
 531   }
 532 
 533   node_idx_t load_index = load->_idx;
 534 
 535   // Note the earliest legal placement of 'load', as determined by
 536   // by the unique point in the dom tree where all memory effects
 537   // and other inputs are first available.  (Computed by schedule_early.)
 538   // For normal loads, 'early' is the shallowest place (dom graph wise)
 539   // to look for anti-deps between this load and any store.
 540   Block* early = get_block_for_node(load);
 541 
 542   // If we are subsuming loads, compute an "early" block that only considers
 543   // memory or address inputs. This block may be different than the
 544   // schedule_early block in that it could be at an even shallower depth in the
 545   // dominator tree, and allow for a broader discovery of anti-dependences.
 546   if (C->subsume_loads()) {
 547     early = memory_early_block(load, early, this);
 548   }
 549 
 550   ResourceArea *area = Thread::current()->resource_area();
 551   Node_List worklist_mem(area);     // prior memory state to store
 552   Node_List worklist_store(area);   // possible-def to explore
 553   Node_List worklist_visited(area); // visited mergemem nodes
 554   Node_List non_early_stores(area); // all relevant stores outside of early
 555   bool must_raise_LCA = false;
 556 
 557 #ifdef TRACK_PHI_INPUTS
 558   // %%% This extra checking fails because MergeMem nodes are not GVNed.
 559   // Provide "phi_inputs" to check if every input to a PhiNode is from the
 560   // original memory state.  This indicates a PhiNode for which should not
 561   // prevent the load from sinking.  For such a block, set_raise_LCA_mark
 562   // may be overly conservative.
 563   // Mechanism: count inputs seen for each Phi encountered in worklist_store.
 564   DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0));
 565 #endif
 566 
 567   // 'load' uses some memory state; look for users of the same state.
 568   // Recurse through MergeMem nodes to the stores that use them.
 569 
 570   // Each of these stores is a possible definition of memory
 571   // that 'load' needs to use.  We need to force 'load'
 572   // to occur before each such store.  When the store is in
 573   // the same block as 'load', we insert an anti-dependence
 574   // edge load->store.
 575 
 576   // The relevant stores "nearby" the load consist of a tree rooted
 577   // at initial_mem, with internal nodes of type MergeMem.
 578   // Therefore, the branches visited by the worklist are of this form:
 579   //    initial_mem -> (MergeMem ->)* store
 580   // The anti-dependence constraints apply only to the fringe of this tree.
 581 
 582   Node* initial_mem = load->in(MemNode::Memory);
 583   worklist_store.push(initial_mem);
 584   worklist_visited.push(initial_mem);
 585   worklist_mem.push(NULL);
 586   while (worklist_store.size() > 0) {
 587     // Examine a nearby store to see if it might interfere with our load.
 588     Node* mem   = worklist_mem.pop();
 589     Node* store = worklist_store.pop();
 590     uint op = store->Opcode();
 591 
 592     // MergeMems do not directly have anti-deps.
 593     // Treat them as internal nodes in a forward tree of memory states,
 594     // the leaves of which are each a 'possible-def'.
 595     if (store == initial_mem    // root (exclusive) of tree we are searching
 596         || op == Op_MergeMem    // internal node of tree we are searching
 597         ) {
 598       mem = store;   // It's not a possibly interfering store.
 599       if (store == initial_mem)
 600         initial_mem = NULL;  // only process initial memory once
 601 
 602       for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 603         store = mem->fast_out(i);
 604         if (store->is_MergeMem()) {
 605           // Be sure we don't get into combinatorial problems.
 606           // (Allow phis to be repeated; they can merge two relevant states.)
 607           uint j = worklist_visited.size();
 608           for (; j > 0; j--) {
 609             if (worklist_visited.at(j-1) == store)  break;
 610           }
 611           if (j > 0)  continue; // already on work list; do not repeat
 612           worklist_visited.push(store);
 613         }
 614         worklist_mem.push(mem);
 615         worklist_store.push(store);
 616       }
 617       continue;
 618     }
 619 
 620     if (op == Op_MachProj || op == Op_Catch)   continue;
 621     if (store->needs_anti_dependence_check())  continue;  // not really a store
 622 
 623     // Compute the alias index.  Loads and stores with different alias
 624     // indices do not need anti-dependence edges.  Wide MemBar's are
 625     // anti-dependent on everything (except immutable memories).
 626     const TypePtr* adr_type = store->adr_type();
 627     if (!C->can_alias(adr_type, load_alias_idx))  continue;
 628 
 629     // Most slow-path runtime calls do NOT modify Java memory, but
 630     // they can block and so write Raw memory.
 631     if (store->is_Mach()) {
 632       MachNode* mstore = store->as_Mach();
 633       if (load_alias_idx != Compile::AliasIdxRaw) {
 634         // Check for call into the runtime using the Java calling
 635         // convention (and from there into a wrapper); it has no
 636         // _method.  Can't do this optimization for Native calls because
 637         // they CAN write to Java memory.
 638         if (mstore->ideal_Opcode() == Op_CallStaticJava) {
 639           assert(mstore->is_MachSafePoint(), "");
 640           MachSafePointNode* ms = (MachSafePointNode*) mstore;
 641           assert(ms->is_MachCallJava(), "");
 642           MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
 643           if (mcj->_method == NULL) {
 644             // These runtime calls do not write to Java visible memory
 645             // (other than Raw) and so do not require anti-dependence edges.
 646             continue;
 647           }
 648         }
 649         // Same for SafePoints: they read/write Raw but only read otherwise.
 650         // This is basically a workaround for SafePoints only defining control
 651         // instead of control + memory.
 652         if (mstore->ideal_Opcode() == Op_SafePoint)
 653           continue;
 654       } else {
 655         // Some raw memory, such as the load of "top" at an allocation,
 656         // can be control dependent on the previous safepoint. See
 657         // comments in GraphKit::allocate_heap() about control input.
 658         // Inserting an anti-dep between such a safepoint and a use
 659         // creates a cycle, and will cause a subsequent failure in
 660         // local scheduling.  (BugId 4919904)
 661         // (%%% How can a control input be a safepoint and not a projection??)
 662         if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
 663           continue;
 664       }
 665     }
 666 
 667     // Identify a block that the current load must be above,
 668     // or else observe that 'store' is all the way up in the
 669     // earliest legal block for 'load'.  In the latter case,
 670     // immediately insert an anti-dependence edge.
 671     Block* store_block = get_block_for_node(store);
 672     assert(store_block != NULL, "unused killing projections skipped above");
 673 
 674     if (store->is_Phi()) {
 675       // 'load' uses memory which is one (or more) of the Phi's inputs.
 676       // It must be scheduled not before the Phi, but rather before
 677       // each of the relevant Phi inputs.
 678       //
 679       // Instead of finding the LCA of all inputs to a Phi that match 'mem',
 680       // we mark each corresponding predecessor block and do a combined
 681       // hoisting operation later (raise_LCA_above_marks).
 682       //
 683       // Do not assert(store_block != early, "Phi merging memory after access")
 684       // PhiNode may be at start of block 'early' with backedge to 'early'
 685       DEBUG_ONLY(bool found_match = false);
 686       for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
 687         if (store->in(j) == mem) {   // Found matching input?
 688           DEBUG_ONLY(found_match = true);
 689           Block* pred_block = get_block_for_node(store_block->pred(j));
 690           if (pred_block != early) {
 691             // If any predecessor of the Phi matches the load's "early block",
 692             // we do not need a precedence edge between the Phi and 'load'
 693             // since the load will be forced into a block preceding the Phi.
 694             pred_block->set_raise_LCA_mark(load_index);
 695             assert(!LCA_orig->dominates(pred_block) ||
 696                    early->dominates(pred_block), "early is high enough");
 697             must_raise_LCA = true;
 698           } else {
 699             // anti-dependent upon PHI pinned below 'early', no edge needed
 700             LCA = early;             // but can not schedule below 'early'
 701           }
 702         }
 703       }
 704       assert(found_match, "no worklist bug");
 705 #ifdef TRACK_PHI_INPUTS
 706 #ifdef ASSERT
 707       // This assert asks about correct handling of PhiNodes, which may not
 708       // have all input edges directly from 'mem'. See BugId 4621264
 709       int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;
 710       // Increment by exactly one even if there are multiple copies of 'mem'
 711       // coming into the phi, because we will run this block several times
 712       // if there are several copies of 'mem'.  (That's how DU iterators work.)
 713       phi_inputs.at_put(store->_idx, num_mem_inputs);
 714       assert(PhiNode::Input + num_mem_inputs < store->req(),
 715              "Expect at least one phi input will not be from original memory state");
 716 #endif //ASSERT
 717 #endif //TRACK_PHI_INPUTS
 718     } else if (store_block != early) {
 719       // 'store' is between the current LCA and earliest possible block.
 720       // Label its block, and decide later on how to raise the LCA
 721       // to include the effect on LCA of this store.
 722       // If this store's block gets chosen as the raised LCA, we
 723       // will find him on the non_early_stores list and stick him
 724       // with a precedence edge.
 725       // (But, don't bother if LCA is already raised all the way.)
 726       if (LCA != early) {
 727         store_block->set_raise_LCA_mark(load_index);
 728         must_raise_LCA = true;
 729         non_early_stores.push(store);
 730       }
 731     } else {
 732       // Found a possibly-interfering store in the load's 'early' block.
 733       // This means 'load' cannot sink at all in the dominator tree.
 734       // Add an anti-dep edge, and squeeze 'load' into the highest block.
 735       assert(store != load->in(0), "dependence cycle found");
 736       if (verify) {
 737         assert(store->find_edge(load) != -1, "missing precedence edge");
 738       } else {
 739         store->add_prec(load);
 740       }
 741       LCA = early;
 742       // This turns off the process of gathering non_early_stores.
 743     }
 744   }
 745   // (Worklist is now empty; all nearby stores have been visited.)
 746 
 747   // Finished if 'load' must be scheduled in its 'early' block.
 748   // If we found any stores there, they have already been given
 749   // precedence edges.
 750   if (LCA == early)  return LCA;
 751 
 752   // We get here only if there are no possibly-interfering stores
 753   // in the load's 'early' block.  Move LCA up above all predecessors
 754   // which contain stores we have noted.
 755   //
 756   // The raised LCA block can be a home to such interfering stores,
 757   // but its predecessors must not contain any such stores.
 758   //
 759   // The raised LCA will be a lower bound for placing the load,
 760   // preventing the load from sinking past any block containing
 761   // a store that may invalidate the memory state required by 'load'.
 762   if (must_raise_LCA)
 763     LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
 764   if (LCA == early)  return LCA;
 765 
 766   // Insert anti-dependence edges from 'load' to each store
 767   // in the non-early LCA block.
 768   // Mine the non_early_stores list for such stores.
 769   if (LCA->raise_LCA_mark() == load_index) {
 770     while (non_early_stores.size() > 0) {
 771       Node* store = non_early_stores.pop();
 772       Block* store_block = get_block_for_node(store);
 773       if (store_block == LCA) {
 774         // add anti_dependence from store to load in its own block
 775         assert(store != load->in(0), "dependence cycle found");
 776         if (verify) {
 777           assert(store->find_edge(load) != -1, "missing precedence edge");
 778         } else {
 779           store->add_prec(load);
 780         }
 781       } else {
 782         assert(store_block->raise_LCA_mark() == load_index, "block was marked");
 783         // Any other stores we found must be either inside the new LCA
 784         // or else outside the original LCA.  In the latter case, they
 785         // did not interfere with any use of 'load'.
 786         assert(LCA->dominates(store_block)
 787                || !LCA_orig->dominates(store_block), "no stray stores");
 788       }
 789     }
 790   }
 791 
 792   // Return the highest block containing stores; any stores
 793   // within that block have been given anti-dependence edges.
 794   return LCA;
 795 }
 796 
 797 // This class is used to iterate backwards over the nodes in the graph.
 798 
 799 class Node_Backward_Iterator {
 800 
 801 private:
 802   Node_Backward_Iterator();
 803 
 804 public:
 805   // Constructor for the iterator
 806   Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
 807 
 808   // Postincrement operator to iterate over the nodes
 809   Node *next();
 810 
 811 private:
 812   VectorSet   &_visited;
 813   Node_List   &_stack;
 814   PhaseCFG &_cfg;
 815 };
 816 
 817 // Constructor for the Node_Backward_Iterator
 818 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
 819   : _visited(visited), _stack(stack), _cfg(cfg) {
 820   // The stack should contain exactly the root
 821   stack.clear();
 822   stack.push(root);
 823 
 824   // Clear the visited bits
 825   visited.Clear();
 826 }
 827 
 828 // Iterator for the Node_Backward_Iterator
 829 Node *Node_Backward_Iterator::next() {
 830 
 831   // If the _stack is empty, then just return NULL: finished.
 832   if ( !_stack.size() )
 833     return NULL;
 834 
 835   // '_stack' is emulating a real _stack.  The 'visit-all-users' loop has been
 836   // made stateless, so I do not need to record the index 'i' on my _stack.
 837   // Instead I visit all users each time, scanning for unvisited users.
 838   // I visit unvisited not-anti-dependence users first, then anti-dependent
 839   // children next.
 840   Node *self = _stack.pop();
 841 
 842   // I cycle here when I am entering a deeper level of recursion.
 843   // The key variable 'self' was set prior to jumping here.
 844   while( 1 ) {
 845 
 846     _visited.set(self->_idx);
 847 
 848     // Now schedule all uses as late as possible.
 849     const Node* src = self->is_Proj() ? self->in(0) : self;
 850     uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
 851 
 852     // Schedule all nodes in a post-order visit
 853     Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
 854 
 855     // Scan for unvisited nodes
 856     for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
 857       // For all uses, schedule late
 858       Node* n = self->fast_out(i); // Use
 859 
 860       // Skip already visited children
 861       if ( _visited.test(n->_idx) )
 862         continue;
 863 
 864       // do not traverse backward control edges
 865       Node *use = n->is_Proj() ? n->in(0) : n;
 866       uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
 867 
 868       if ( use_rpo < src_rpo )
 869         continue;
 870 
 871       // Phi nodes always precede uses in a basic block
 872       if ( use_rpo == src_rpo && use->is_Phi() )
 873         continue;
 874 
 875       unvisited = n;      // Found unvisited
 876 
 877       // Check for possible-anti-dependent
 878       if( !n->needs_anti_dependence_check() )
 879         break;            // Not visited, not anti-dep; schedule it NOW
 880     }
 881 
 882     // Did I find an unvisited not-anti-dependent Node?
 883     if ( !unvisited )
 884       break;                  // All done with children; post-visit 'self'
 885 
 886     // Visit the unvisited Node.  Contains the obvious push to
 887     // indicate I'm entering a deeper level of recursion.  I push the
 888     // old state onto the _stack and set a new state and loop (recurse).
 889     _stack.push(self);
 890     self = unvisited;
 891   } // End recursion loop
 892 
 893   return self;
 894 }
 895 
 896 //------------------------------ComputeLatenciesBackwards----------------------
 897 // Compute the latency of all the instructions.
 898 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) {
 899 #ifndef PRODUCT
 900   if (trace_opto_pipelining())
 901     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
 902 #endif
 903 
 904   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
 905   Node *n;
 906 
 907   // Walk over all the nodes from last to first
 908   while (n = iter.next()) {
 909     // Set the latency for the definitions of this instruction
 910     partial_latency_of_defs(n);
 911   }
 912 } // end ComputeLatenciesBackwards
 913 
 914 //------------------------------partial_latency_of_defs------------------------
 915 // Compute the latency impact of this node on all defs.  This computes
 916 // a number that increases as we approach the beginning of the routine.
 917 void PhaseCFG::partial_latency_of_defs(Node *n) {
 918   // Set the latency for this instruction
 919 #ifndef PRODUCT
 920   if (trace_opto_pipelining()) {
 921     tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
 922     dump();
 923   }
 924 #endif
 925 
 926   if (n->is_Proj()) {
 927     n = n->in(0);
 928   }
 929 
 930   if (n->is_Root()) {
 931     return;
 932   }
 933 
 934   uint nlen = n->len();
 935   uint use_latency = get_latency_for_node(n);
 936   uint use_pre_order = get_block_for_node(n)->_pre_order;
 937 
 938   for (uint j = 0; j < nlen; j++) {
 939     Node *def = n->in(j);
 940 
 941     if (!def || def == n) {
 942       continue;
 943     }
 944 
 945     // Walk backwards thru projections
 946     if (def->is_Proj()) {
 947       def = def->in(0);
 948     }
 949 
 950 #ifndef PRODUCT
 951     if (trace_opto_pipelining()) {
 952       tty->print("#    in(%2d): ", j);
 953       def->dump();
 954     }
 955 #endif
 956 
 957     // If the defining block is not known, assume it is ok
 958     Block *def_block = get_block_for_node(def);
 959     uint def_pre_order = def_block ? def_block->_pre_order : 0;
 960 
 961     if ((use_pre_order <  def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
 962       continue;
 963     }
 964 
 965     uint delta_latency = n->latency(j);
 966     uint current_latency = delta_latency + use_latency;
 967 
 968     if (get_latency_for_node(def) < current_latency) {
 969       set_latency_for_node(def, current_latency);
 970     }
 971 
 972 #ifndef PRODUCT
 973     if (trace_opto_pipelining()) {
 974       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
 975     }
 976 #endif
 977   }
 978 }
 979 
 980 //------------------------------latency_from_use-------------------------------
 981 // Compute the latency of a specific use
 982 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
 983   // If self-reference, return no latency
 984   if (use == n || use->is_Root()) {
 985     return 0;
 986   }
 987 
 988   uint def_pre_order = get_block_for_node(def)->_pre_order;
 989   uint latency = 0;
 990 
 991   // If the use is not a projection, then it is simple...
 992   if (!use->is_Proj()) {
 993 #ifndef PRODUCT
 994     if (trace_opto_pipelining()) {
 995       tty->print("#    out(): ");
 996       use->dump();
 997     }
 998 #endif
 999 
1000     uint use_pre_order = get_block_for_node(use)->_pre_order;
1001 
1002     if (use_pre_order < def_pre_order)
1003       return 0;
1004 
1005     if (use_pre_order == def_pre_order && use->is_Phi())
1006       return 0;
1007 
1008     uint nlen = use->len();
1009     uint nl = get_latency_for_node(use);
1010 
1011     for ( uint j=0; j<nlen; j++ ) {
1012       if (use->in(j) == n) {
1013         // Change this if we want local latencies
1014         uint ul = use->latency(j);
1015         uint  l = ul + nl;
1016         if (latency < l) latency = l;
1017 #ifndef PRODUCT
1018         if (trace_opto_pipelining()) {
1019           tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
1020                         nl, j, ul, l, latency);
1021         }
1022 #endif
1023       }
1024     }
1025   } else {
1026     // This is a projection, just grab the latency of the use(s)
1027     for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1028       uint l = latency_from_use(use, def, use->fast_out(j));
1029       if (latency < l) latency = l;
1030     }
1031   }
1032 
1033   return latency;
1034 }
1035 
1036 //------------------------------latency_from_uses------------------------------
1037 // Compute the latency of this instruction relative to all of it's uses.
1038 // This computes a number that increases as we approach the beginning of the
1039 // routine.
1040 void PhaseCFG::latency_from_uses(Node *n) {
1041   // Set the latency for this instruction
1042 #ifndef PRODUCT
1043   if (trace_opto_pipelining()) {
1044     tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1045     dump();
1046   }
1047 #endif
1048   uint latency=0;
1049   const Node *def = n->is_Proj() ? n->in(0): n;
1050 
1051   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1052     uint l = latency_from_use(n, def, n->fast_out(i));
1053 
1054     if (latency < l) latency = l;
1055   }
1056 
1057   set_latency_for_node(n, latency);
1058 }
1059 
1060 //------------------------------hoist_to_cheaper_block-------------------------
1061 // Pick a block for node self, between early and LCA, that is a cheaper
1062 // alternative to LCA.
1063 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1064   const double delta = 1+PROB_UNLIKELY_MAG(4);
1065   Block* least       = LCA;
1066   double least_freq  = least->_freq;
1067   uint target        = get_latency_for_node(self);
1068   uint start_latency = get_latency_for_node(LCA->head());
1069   uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1070   bool in_latency    = (target <= start_latency);
1071   const Block* root_block = get_block_for_node(_root);
1072 
1073   // Turn off latency scheduling if scheduling is just plain off
1074   if (!C->do_scheduling())
1075     in_latency = true;
1076 
1077   // Do not hoist (to cover latency) instructions which target a
1078   // single register.  Hoisting stretches the live range of the
1079   // single register and may force spilling.
1080   MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1081   if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1082     in_latency = true;
1083 
1084 #ifndef PRODUCT
1085   if (trace_opto_pipelining()) {
1086     tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1087     self->dump();
1088     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1089       LCA->_pre_order,
1090       LCA->head()->_idx,
1091       start_latency,
1092       LCA->get_node(LCA->end_idx())->_idx,
1093       end_latency,
1094       least_freq);
1095   }
1096 #endif
1097 
1098   int cand_cnt = 0;  // number of candidates tried
1099 
1100   // Walk up the dominator tree from LCA (Lowest common ancestor) to
1101   // the earliest legal location.  Capture the least execution frequency.
1102   while (LCA != early) {
1103     LCA = LCA->_idom;         // Follow up the dominator tree
1104 
1105     if (LCA == NULL) {
1106       // Bailout without retry
1107       assert(false, "graph should be schedulable");
1108       C->record_method_not_compilable("late schedule failed: LCA == NULL");
1109       return least;
1110     }
1111 
1112     // Don't hoist machine instructions to the root basic block
1113     if (mach && LCA == root_block)
1114       break;
1115 
1116     uint start_lat = get_latency_for_node(LCA->head());
1117     uint end_idx   = LCA->end_idx();
1118     uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
1119     double LCA_freq = LCA->_freq;
1120 #ifndef PRODUCT
1121     if (trace_opto_pipelining()) {
1122       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1123         LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1124     }
1125 #endif
1126     cand_cnt++;
1127     if (LCA_freq < least_freq              || // Better Frequency
1128         (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
1129          (!StressGCM                    &&    // Otherwise, choose with latency
1130           !in_latency                   &&    // No block containing latency
1131           LCA_freq < least_freq * delta &&    // No worse frequency
1132           target >= end_lat             &&    // within latency range
1133           !self->is_iteratively_computed() )  // But don't hoist IV increments
1134              // because they may end up above other uses of their phi forcing
1135              // their result register to be different from their input.
1136        ) {
1137       least = LCA;            // Found cheaper block
1138       least_freq = LCA_freq;
1139       start_latency = start_lat;
1140       end_latency = end_lat;
1141       if (target <= start_lat)
1142         in_latency = true;
1143     }
1144   }
1145 
1146 #ifndef PRODUCT
1147   if (trace_opto_pipelining()) {
1148     tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
1149       least->_pre_order, start_latency, least_freq);
1150   }
1151 #endif
1152 
1153   // See if the latency needs to be updated
1154   if (target < end_latency) {
1155 #ifndef PRODUCT
1156     if (trace_opto_pipelining()) {
1157       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1158     }
1159 #endif
1160     set_latency_for_node(self, end_latency);
1161     partial_latency_of_defs(self);
1162   }
1163 
1164   return least;
1165 }
1166 
1167 
1168 //------------------------------schedule_late-----------------------------------
1169 // Now schedule all codes as LATE as possible.  This is the LCA in the
1170 // dominator tree of all USES of a value.  Pick the block with the least
1171 // loop nesting depth that is lowest in the dominator tree.
1172 extern const char must_clone[];
1173 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
1174 #ifndef PRODUCT
1175   if (trace_opto_pipelining())
1176     tty->print("\n#---- schedule_late ----\n");
1177 #endif
1178 
1179   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1180   Node *self;
1181 
1182   // Walk over all the nodes from last to first
1183   while (self = iter.next()) {
1184     Block* early = get_block_for_node(self); // Earliest legal placement
1185 
1186     if (self->is_top()) {
1187       // Top node goes in bb #2 with other constants.
1188       // It must be special-cased, because it has no out edges.
1189       early->add_inst(self);
1190       continue;
1191     }
1192 
1193     // No uses, just terminate
1194     if (self->outcnt() == 0) {
1195       assert(self->is_MachProj(), "sanity");
1196       continue;                   // Must be a dead machine projection
1197     }
1198 
1199     // If node is pinned in the block, then no scheduling can be done.
1200     if( self->pinned() )          // Pinned in block?
1201       continue;
1202 
1203     MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1204     if (mach) {
1205       switch (mach->ideal_Opcode()) {
1206       case Op_CreateEx:
1207         // Don't move exception creation
1208         early->add_inst(self);
1209         continue;
1210         break;
1211       case Op_CheckCastPP:
1212         // Don't move CheckCastPP nodes away from their input, if the input
1213         // is a rawptr (5071820).
1214         Node *def = self->in(1);
1215         if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
1216           early->add_inst(self);
1217 #ifdef ASSERT
1218           _raw_oops.push(def);
1219 #endif
1220           continue;
1221         }
1222         break;
1223       }
1224     }
1225 
1226     // Gather LCA of all uses
1227     Block *LCA = NULL;
1228     {
1229       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1230         // For all uses, find LCA
1231         Node* use = self->fast_out(i);
1232         LCA = raise_LCA_above_use(LCA, use, self, this);
1233       }
1234     }  // (Hide defs of imax, i from rest of block.)
1235 
1236     // Place temps in the block of their use.  This isn't a
1237     // requirement for correctness but it reduces useless
1238     // interference between temps and other nodes.
1239     if (mach != NULL && mach->is_MachTemp()) {
1240       map_node_to_block(self, LCA);
1241       LCA->add_inst(self);
1242       continue;
1243     }
1244 
1245     // Check if 'self' could be anti-dependent on memory
1246     if (self->needs_anti_dependence_check()) {
1247       // Hoist LCA above possible-defs and insert anti-dependences to
1248       // defs in new LCA block.
1249       LCA = insert_anti_dependences(LCA, self);
1250     }
1251 
1252     if (early->_dom_depth > LCA->_dom_depth) {
1253       // Somehow the LCA has moved above the earliest legal point.
1254       // (One way this can happen is via memory_early_block.)
1255       if (C->subsume_loads() == true && !C->failing()) {
1256         // Retry with subsume_loads == false
1257         // If this is the first failure, the sentinel string will "stick"
1258         // to the Compile object, and the C2Compiler will see it and retry.
1259         C->record_failure(C2Compiler::retry_no_subsuming_loads());
1260       } else {
1261         // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1262         assert(false, "graph should be schedulable");
1263         C->record_method_not_compilable("late schedule failed: incorrect graph");
1264       }
1265       return;
1266     }
1267 
1268     // If there is no opportunity to hoist, then we're done.
1269     // In stress mode, try to hoist even the single operations.
1270     bool try_to_hoist = StressGCM || (LCA != early);
1271 
1272     // Must clone guys stay next to use; no hoisting allowed.
1273     // Also cannot hoist guys that alter memory or are otherwise not
1274     // allocatable (hoisting can make a value live longer, leading to
1275     // anti and output dependency problems which are normally resolved
1276     // by the register allocator giving everyone a different register).
1277     if (mach != NULL && must_clone[mach->ideal_Opcode()])
1278       try_to_hoist = false;
1279 
1280     Block* late = NULL;
1281     if (try_to_hoist) {
1282       // Now find the block with the least execution frequency.
1283       // Start at the latest schedule and work up to the earliest schedule
1284       // in the dominator tree.  Thus the Node will dominate all its uses.
1285       late = hoist_to_cheaper_block(LCA, early, self);
1286     } else {
1287       // Just use the LCA of the uses.
1288       late = LCA;
1289     }
1290 
1291     // Put the node into target block
1292     schedule_node_into_block(self, late);
1293 
1294 #ifdef ASSERT
1295     if (self->needs_anti_dependence_check()) {
1296       // since precedence edges are only inserted when we're sure they
1297       // are needed make sure that after placement in a block we don't
1298       // need any new precedence edges.
1299       verify_anti_dependences(late, self);
1300     }
1301 #endif
1302   } // Loop until all nodes have been visited
1303 
1304 } // end ScheduleLate
1305 
1306 //------------------------------GlobalCodeMotion-------------------------------
1307 void PhaseCFG::global_code_motion() {
1308   ResourceMark rm;
1309 
1310 #ifndef PRODUCT
1311   if (trace_opto_pipelining()) {
1312     tty->print("\n---- Start GlobalCodeMotion ----\n");
1313   }
1314 #endif
1315 
1316   // Initialize the node to block mapping for things on the proj_list
1317   for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1318     unmap_node_from_block(_matcher.get_projection(i));
1319   }
1320 
1321   // Set the basic block for Nodes pinned into blocks
1322   Arena* arena = Thread::current()->resource_area();
1323   VectorSet visited(arena);
1324   schedule_pinned_nodes(visited);
1325 
1326   // Find the earliest Block any instruction can be placed in.  Some
1327   // instructions are pinned into Blocks.  Unpinned instructions can
1328   // appear in last block in which all their inputs occur.
1329   visited.Clear();
1330   Node_List stack(arena);
1331   // Pre-grow the list
1332   stack.map((C->live_nodes() >> 1) + 16, NULL);
1333   if (!schedule_early(visited, stack)) {
1334     // Bailout without retry
1335     C->record_method_not_compilable("early schedule failed");
1336     return;
1337   }
1338 
1339   // Build Def-Use edges.
1340   // Compute the latency information (via backwards walk) for all the
1341   // instructions in the graph
1342   _node_latency = new GrowableArray<uint>(); // resource_area allocation
1343 
1344   if (C->do_scheduling()) {
1345     compute_latencies_backwards(visited, stack);
1346   }
1347 
1348   // Now schedule all codes as LATE as possible.  This is the LCA in the
1349   // dominator tree of all USES of a value.  Pick the block with the least
1350   // loop nesting depth that is lowest in the dominator tree.
1351   // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
1352   schedule_late(visited, stack);
1353   if (C->failing()) {
1354     // schedule_late fails only when graph is incorrect.
1355     assert(!VerifyGraphEdges, "verification should have failed");
1356     return;
1357   }
1358 
1359 #ifndef PRODUCT
1360   if (trace_opto_pipelining()) {
1361     tty->print("\n---- Detect implicit null checks ----\n");
1362   }
1363 #endif
1364 
1365   // Detect implicit-null-check opportunities.  Basically, find NULL checks
1366   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
1367   // I can generate a memory op if there is not one nearby.
1368   if (C->is_method_compilation()) {
1369     // By reversing the loop direction we get a very minor gain on mpegaudio.
1370     // Feel free to revert to a forward loop for clarity.
1371     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1372     for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1373       Node* proj = _matcher._null_check_tests[i];
1374       Node* val  = _matcher._null_check_tests[i + 1];
1375       Block* block = get_block_for_node(proj);
1376       implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1377       // The implicit_null_check will only perform the transformation
1378       // if the null branch is truly uncommon, *and* it leads to an
1379       // uncommon trap.  Combined with the too_many_traps guards
1380       // above, this prevents SEGV storms reported in 6366351,
1381       // by recompiling offending methods without this optimization.
1382     }
1383   }
1384 
1385 #ifndef PRODUCT
1386   if (trace_opto_pipelining()) {
1387     tty->print("\n---- Start Local Scheduling ----\n");
1388   }
1389 #endif
1390 
1391   // Schedule locally.  Right now a simple topological sort.
1392   // Later, do a real latency aware scheduler.
1393   GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1394   visited.Clear();
1395   for (uint i = 0; i < number_of_blocks(); i++) {
1396     Block* block = get_block(i);
1397     if (!schedule_local(block, ready_cnt, visited)) {
1398       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1399         C->record_method_not_compilable("local schedule failed");
1400       }
1401       return;
1402     }
1403   }
1404 
1405   // If we inserted any instructions between a Call and his CatchNode,
1406   // clone the instructions on all paths below the Catch.
1407   for (uint i = 0; i < number_of_blocks(); i++) {
1408     Block* block = get_block(i);
1409     call_catch_cleanup(block);
1410   }
1411 
1412 #ifndef PRODUCT
1413   if (trace_opto_pipelining()) {
1414     tty->print("\n---- After GlobalCodeMotion ----\n");
1415     for (uint i = 0; i < number_of_blocks(); i++) {
1416       Block* block = get_block(i);
1417       block->dump();
1418     }
1419   }
1420 #endif
1421   // Dead.
1422   _node_latency = (GrowableArray<uint> *)0xdeadbeef;
1423 }
1424 
1425 bool PhaseCFG::do_global_code_motion() {
1426 
1427   build_dominator_tree();
1428   if (C->failing()) {
1429     return false;
1430   }
1431 
1432   NOT_PRODUCT( C->verify_graph_edges(); )
1433 
1434   estimate_block_frequency();
1435 
1436   global_code_motion();
1437 
1438   if (C->failing()) {
1439     return false;
1440   }
1441 
1442   return true;
1443 }
1444 
1445 //------------------------------Estimate_Block_Frequency-----------------------
1446 // Estimate block frequencies based on IfNode probabilities.
1447 void PhaseCFG::estimate_block_frequency() {
1448 
1449   // Force conditional branches leading to uncommon traps to be unlikely,
1450   // not because we get to the uncommon_trap with less relative frequency,
1451   // but because an uncommon_trap typically causes a deopt, so we only get
1452   // there once.
1453   if (C->do_freq_based_layout()) {
1454     Block_List worklist;
1455     Block* root_blk = get_block(0);
1456     for (uint i = 1; i < root_blk->num_preds(); i++) {
1457       Block *pb = get_block_for_node(root_blk->pred(i));
1458       if (pb->has_uncommon_code()) {
1459         worklist.push(pb);
1460       }
1461     }
1462     while (worklist.size() > 0) {
1463       Block* uct = worklist.pop();
1464       if (uct == get_root_block()) {
1465         continue;
1466       }
1467       for (uint i = 1; i < uct->num_preds(); i++) {
1468         Block *pb = get_block_for_node(uct->pred(i));
1469         if (pb->_num_succs == 1) {
1470           worklist.push(pb);
1471         } else if (pb->num_fall_throughs() == 2) {
1472           pb->update_uncommon_branch(uct);
1473         }
1474       }
1475     }
1476   }
1477 
1478   // Create the loop tree and calculate loop depth.
1479   _root_loop = create_loop_tree();
1480   _root_loop->compute_loop_depth(0);
1481 
1482   // Compute block frequency of each block, relative to a single loop entry.
1483   _root_loop->compute_freq();
1484 
1485   // Adjust all frequencies to be relative to a single method entry
1486   _root_loop->_freq = 1.0;
1487   _root_loop->scale_freq();
1488 
1489   // Save outmost loop frequency for LRG frequency threshold
1490   _outer_loop_frequency = _root_loop->outer_loop_freq();
1491 
1492   // force paths ending at uncommon traps to be infrequent
1493   if (!C->do_freq_based_layout()) {
1494     Block_List worklist;
1495     Block* root_blk = get_block(0);
1496     for (uint i = 1; i < root_blk->num_preds(); i++) {
1497       Block *pb = get_block_for_node(root_blk->pred(i));
1498       if (pb->has_uncommon_code()) {
1499         worklist.push(pb);
1500       }
1501     }
1502     while (worklist.size() > 0) {
1503       Block* uct = worklist.pop();
1504       uct->_freq = PROB_MIN;
1505       for (uint i = 1; i < uct->num_preds(); i++) {
1506         Block *pb = get_block_for_node(uct->pred(i));
1507         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1508           worklist.push(pb);
1509         }
1510       }
1511     }
1512   }
1513 
1514 #ifdef ASSERT
1515   for (uint i = 0; i < number_of_blocks(); i++) {
1516     Block* b = get_block(i);
1517     assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1518   }
1519 #endif
1520 
1521 #ifndef PRODUCT
1522   if (PrintCFGBlockFreq) {
1523     tty->print_cr("CFG Block Frequencies");
1524     _root_loop->dump_tree();
1525     if (Verbose) {
1526       tty->print_cr("PhaseCFG dump");
1527       dump();
1528       tty->print_cr("Node dump");
1529       _root->dump(99999);
1530     }
1531   }
1532 #endif
1533 }
1534 
1535 //----------------------------create_loop_tree--------------------------------
1536 // Create a loop tree from the CFG
1537 CFGLoop* PhaseCFG::create_loop_tree() {
1538 
1539 #ifdef ASSERT
1540   assert(get_block(0) == get_root_block(), "first block should be root block");
1541   for (uint i = 0; i < number_of_blocks(); i++) {
1542     Block* block = get_block(i);
1543     // Check that _loop field are clear...we could clear them if not.
1544     assert(block->_loop == NULL, "clear _loop expected");
1545     // Sanity check that the RPO numbering is reflected in the _blocks array.
1546     // It doesn't have to be for the loop tree to be built, but if it is not,
1547     // then the blocks have been reordered since dom graph building...which
1548     // may question the RPO numbering
1549     assert(block->_rpo == i, "unexpected reverse post order number");
1550   }
1551 #endif
1552 
1553   int idct = 0;
1554   CFGLoop* root_loop = new CFGLoop(idct++);
1555 
1556   Block_List worklist;
1557 
1558   // Assign blocks to loops
1559   for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
1560     Block* block = get_block(i);
1561 
1562     if (block->head()->is_Loop()) {
1563       Block* loop_head = block;
1564       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1565       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1566       Block* tail = get_block_for_node(tail_n);
1567 
1568       // Defensively filter out Loop nodes for non-single-entry loops.
1569       // For all reasonable loops, the head occurs before the tail in RPO.
1570       if (i <= tail->_rpo) {
1571 
1572         // The tail and (recursive) predecessors of the tail
1573         // are made members of a new loop.
1574 
1575         assert(worklist.size() == 0, "nonempty worklist");
1576         CFGLoop* nloop = new CFGLoop(idct++);
1577         assert(loop_head->_loop == NULL, "just checking");
1578         loop_head->_loop = nloop;
1579         // Add to nloop so push_pred() will skip over inner loops
1580         nloop->add_member(loop_head);
1581         nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
1582 
1583         while (worklist.size() > 0) {
1584           Block* member = worklist.pop();
1585           if (member != loop_head) {
1586             for (uint j = 1; j < member->num_preds(); j++) {
1587               nloop->push_pred(member, j, worklist, this);
1588             }
1589           }
1590         }
1591       }
1592     }
1593   }
1594 
1595   // Create a member list for each loop consisting
1596   // of both blocks and (immediate child) loops.
1597   for (uint i = 0; i < number_of_blocks(); i++) {
1598     Block* block = get_block(i);
1599     CFGLoop* lp = block->_loop;
1600     if (lp == NULL) {
1601       // Not assigned to a loop. Add it to the method's pseudo loop.
1602       block->_loop = root_loop;
1603       lp = root_loop;
1604     }
1605     if (lp == root_loop || block != lp->head()) { // loop heads are already members
1606       lp->add_member(block);
1607     }
1608     if (lp != root_loop) {
1609       if (lp->parent() == NULL) {
1610         // Not a nested loop. Make it a child of the method's pseudo loop.
1611         root_loop->add_nested_loop(lp);
1612       }
1613       if (block == lp->head()) {
1614         // Add nested loop to member list of parent loop.
1615         lp->parent()->add_member(lp);
1616       }
1617     }
1618   }
1619 
1620   return root_loop;
1621 }
1622 
1623 //------------------------------push_pred--------------------------------------
1624 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
1625   Node* pred_n = blk->pred(i);
1626   Block* pred = cfg->get_block_for_node(pred_n);
1627   CFGLoop *pred_loop = pred->_loop;
1628   if (pred_loop == NULL) {
1629     // Filter out blocks for non-single-entry loops.
1630     // For all reasonable loops, the head occurs before the tail in RPO.
1631     if (pred->_rpo > head()->_rpo) {
1632       pred->_loop = this;
1633       worklist.push(pred);
1634     }
1635   } else if (pred_loop != this) {
1636     // Nested loop.
1637     while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
1638       pred_loop = pred_loop->_parent;
1639     }
1640     // Make pred's loop be a child
1641     if (pred_loop->_parent == NULL) {
1642       add_nested_loop(pred_loop);
1643       // Continue with loop entry predecessor.
1644       Block* pred_head = pred_loop->head();
1645       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1646       assert(pred_head != head(), "loop head in only one loop");
1647       push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
1648     } else {
1649       assert(pred_loop->_parent == this && _parent == NULL, "just checking");
1650     }
1651   }
1652 }
1653 
1654 //------------------------------add_nested_loop--------------------------------
1655 // Make cl a child of the current loop in the loop tree.
1656 void CFGLoop::add_nested_loop(CFGLoop* cl) {
1657   assert(_parent == NULL, "no parent yet");
1658   assert(cl != this, "not my own parent");
1659   cl->_parent = this;
1660   CFGLoop* ch = _child;
1661   if (ch == NULL) {
1662     _child = cl;
1663   } else {
1664     while (ch->_sibling != NULL) { ch = ch->_sibling; }
1665     ch->_sibling = cl;
1666   }
1667 }
1668 
1669 //------------------------------compute_loop_depth-----------------------------
1670 // Store the loop depth in each CFGLoop object.
1671 // Recursively walk the children to do the same for them.
1672 void CFGLoop::compute_loop_depth(int depth) {
1673   _depth = depth;
1674   CFGLoop* ch = _child;
1675   while (ch != NULL) {
1676     ch->compute_loop_depth(depth + 1);
1677     ch = ch->_sibling;
1678   }
1679 }
1680 
1681 //------------------------------compute_freq-----------------------------------
1682 // Compute the frequency of each block and loop, relative to a single entry
1683 // into the dominating loop head.
1684 void CFGLoop::compute_freq() {
1685   // Bottom up traversal of loop tree (visit inner loops first.)
1686   // Set loop head frequency to 1.0, then transitively
1687   // compute frequency for all successors in the loop,
1688   // as well as for each exit edge.  Inner loops are
1689   // treated as single blocks with loop exit targets
1690   // as the successor blocks.
1691 
1692   // Nested loops first
1693   CFGLoop* ch = _child;
1694   while (ch != NULL) {
1695     ch->compute_freq();
1696     ch = ch->_sibling;
1697   }
1698   assert (_members.length() > 0, "no empty loops");
1699   Block* hd = head();
1700   hd->_freq = 1.0f;
1701   for (int i = 0; i < _members.length(); i++) {
1702     CFGElement* s = _members.at(i);
1703     float freq = s->_freq;
1704     if (s->is_block()) {
1705       Block* b = s->as_Block();
1706       for (uint j = 0; j < b->_num_succs; j++) {
1707         Block* sb = b->_succs[j];
1708         update_succ_freq(sb, freq * b->succ_prob(j));
1709       }
1710     } else {
1711       CFGLoop* lp = s->as_CFGLoop();
1712       assert(lp->_parent == this, "immediate child");
1713       for (int k = 0; k < lp->_exits.length(); k++) {
1714         Block* eb = lp->_exits.at(k).get_target();
1715         float prob = lp->_exits.at(k).get_prob();
1716         update_succ_freq(eb, freq * prob);
1717       }
1718     }
1719   }
1720 
1721   // For all loops other than the outer, "method" loop,
1722   // sum and normalize the exit probability. The "method" loop
1723   // should keep the initial exit probability of 1, so that
1724   // inner blocks do not get erroneously scaled.
1725   if (_depth != 0) {
1726     // Total the exit probabilities for this loop.
1727     float exits_sum = 0.0f;
1728     for (int i = 0; i < _exits.length(); i++) {
1729       exits_sum += _exits.at(i).get_prob();
1730     }
1731 
1732     // Normalize the exit probabilities. Until now, the
1733     // probabilities estimate the possibility of exit per
1734     // a single loop iteration; afterward, they estimate
1735     // the probability of exit per loop entry.
1736     for (int i = 0; i < _exits.length(); i++) {
1737       Block* et = _exits.at(i).get_target();
1738       float new_prob = 0.0f;
1739       if (_exits.at(i).get_prob() > 0.0f) {
1740         new_prob = _exits.at(i).get_prob() / exits_sum;
1741       }
1742       BlockProbPair bpp(et, new_prob);
1743       _exits.at_put(i, bpp);
1744     }
1745 
1746     // Save the total, but guard against unreasonable probability,
1747     // as the value is used to estimate the loop trip count.
1748     // An infinite trip count would blur relative block
1749     // frequencies.
1750     if (exits_sum > 1.0f) exits_sum = 1.0;
1751     if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
1752     _exit_prob = exits_sum;
1753   }
1754 }
1755 
1756 //------------------------------succ_prob-------------------------------------
1757 // Determine the probability of reaching successor 'i' from the receiver block.
1758 float Block::succ_prob(uint i) {
1759   int eidx = end_idx();
1760   Node *n = get_node(eidx);  // Get ending Node
1761 
1762   int op = n->Opcode();
1763   if (n->is_Mach()) {
1764     if (n->is_MachNullCheck()) {
1765       // Can only reach here if called after lcm. The original Op_If is gone,
1766       // so we attempt to infer the probability from one or both of the
1767       // successor blocks.
1768       assert(_num_succs == 2, "expecting 2 successors of a null check");
1769       // If either successor has only one predecessor, then the
1770       // probability estimate can be derived using the
1771       // relative frequency of the successor and this block.
1772       if (_succs[i]->num_preds() == 2) {
1773         return _succs[i]->_freq / _freq;
1774       } else if (_succs[1-i]->num_preds() == 2) {
1775         return 1 - (_succs[1-i]->_freq / _freq);
1776       } else {
1777         // Estimate using both successor frequencies
1778         float freq = _succs[i]->_freq;
1779         return freq / (freq + _succs[1-i]->_freq);
1780       }
1781     }
1782     op = n->as_Mach()->ideal_Opcode();
1783   }
1784 
1785 
1786   // Switch on branch type
1787   switch( op ) {
1788   case Op_CountedLoopEnd:
1789   case Op_If: {
1790     assert (i < 2, "just checking");
1791     // Conditionals pass on only part of their frequency
1792     float prob  = n->as_MachIf()->_prob;
1793     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
1794     // If succ[i] is the FALSE branch, invert path info
1795     if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
1796       return 1.0f - prob; // not taken
1797     } else {
1798       return prob; // taken
1799     }
1800   }
1801 
1802   case Op_Jump:
1803     // Divide the frequency between all successors evenly
1804     return 1.0f/_num_succs;
1805 
1806   case Op_Catch: {
1807     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1808     if (ci->_con == CatchProjNode::fall_through_index) {
1809       // Fall-thru path gets the lion's share.
1810       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
1811     } else {
1812       // Presume exceptional paths are equally unlikely
1813       return PROB_UNLIKELY_MAG(5);
1814     }
1815   }
1816 
1817   case Op_Root:
1818   case Op_Goto:
1819     // Pass frequency straight thru to target
1820     return 1.0f;
1821 
1822   case Op_NeverBranch:
1823     return 0.0f;
1824 
1825   case Op_TailCall:
1826   case Op_TailJump:
1827   case Op_Return:
1828   case Op_Halt:
1829   case Op_Rethrow:
1830     // Do not push out freq to root block
1831     return 0.0f;
1832 
1833   default:
1834     ShouldNotReachHere();
1835   }
1836 
1837   return 0.0f;
1838 }
1839 
1840 //------------------------------num_fall_throughs-----------------------------
1841 // Return the number of fall-through candidates for a block
1842 int Block::num_fall_throughs() {
1843   int eidx = end_idx();
1844   Node *n = get_node(eidx);  // Get ending Node
1845 
1846   int op = n->Opcode();
1847   if (n->is_Mach()) {
1848     if (n->is_MachNullCheck()) {
1849       // In theory, either side can fall-thru, for simplicity sake,
1850       // let's say only the false branch can now.
1851       return 1;
1852     }
1853     op = n->as_Mach()->ideal_Opcode();
1854   }
1855 
1856   // Switch on branch type
1857   switch( op ) {
1858   case Op_CountedLoopEnd:
1859   case Op_If:
1860     return 2;
1861 
1862   case Op_Root:
1863   case Op_Goto:
1864     return 1;
1865 
1866   case Op_Catch: {
1867     for (uint i = 0; i < _num_succs; i++) {
1868       const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1869       if (ci->_con == CatchProjNode::fall_through_index) {
1870         return 1;
1871       }
1872     }
1873     return 0;
1874   }
1875 
1876   case Op_Jump:
1877   case Op_NeverBranch:
1878   case Op_TailCall:
1879   case Op_TailJump:
1880   case Op_Return:
1881   case Op_Halt:
1882   case Op_Rethrow:
1883     return 0;
1884 
1885   default:
1886     ShouldNotReachHere();
1887   }
1888 
1889   return 0;
1890 }
1891 
1892 //------------------------------succ_fall_through-----------------------------
1893 // Return true if a specific successor could be fall-through target.
1894 bool Block::succ_fall_through(uint i) {
1895   int eidx = end_idx();
1896   Node *n = get_node(eidx);  // Get ending Node
1897 
1898   int op = n->Opcode();
1899   if (n->is_Mach()) {
1900     if (n->is_MachNullCheck()) {
1901       // In theory, either side can fall-thru, for simplicity sake,
1902       // let's say only the false branch can now.
1903       return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
1904     }
1905     op = n->as_Mach()->ideal_Opcode();
1906   }
1907 
1908   // Switch on branch type
1909   switch( op ) {
1910   case Op_CountedLoopEnd:
1911   case Op_If:
1912   case Op_Root:
1913   case Op_Goto:
1914     return true;
1915 
1916   case Op_Catch: {
1917     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1918     return ci->_con == CatchProjNode::fall_through_index;
1919   }
1920 
1921   case Op_Jump:
1922   case Op_NeverBranch:
1923   case Op_TailCall:
1924   case Op_TailJump:
1925   case Op_Return:
1926   case Op_Halt:
1927   case Op_Rethrow:
1928     return false;
1929 
1930   default:
1931     ShouldNotReachHere();
1932   }
1933 
1934   return false;
1935 }
1936 
1937 //------------------------------update_uncommon_branch------------------------
1938 // Update the probability of a two-branch to be uncommon
1939 void Block::update_uncommon_branch(Block* ub) {
1940   int eidx = end_idx();
1941   Node *n = get_node(eidx);  // Get ending Node
1942 
1943   int op = n->as_Mach()->ideal_Opcode();
1944 
1945   assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
1946   assert(num_fall_throughs() == 2, "must be a two way branch block");
1947 
1948   // Which successor is ub?
1949   uint s;
1950   for (s = 0; s <_num_succs; s++) {
1951     if (_succs[s] == ub) break;
1952   }
1953   assert(s < 2, "uncommon successor must be found");
1954 
1955   // If ub is the true path, make the proability small, else
1956   // ub is the false path, and make the probability large
1957   bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
1958 
1959   // Get existing probability
1960   float p = n->as_MachIf()->_prob;
1961 
1962   if (invert) p = 1.0 - p;
1963   if (p > PROB_MIN) {
1964     p = PROB_MIN;
1965   }
1966   if (invert) p = 1.0 - p;
1967 
1968   n->as_MachIf()->_prob = p;
1969 }
1970 
1971 //------------------------------update_succ_freq-------------------------------
1972 // Update the appropriate frequency associated with block 'b', a successor of
1973 // a block in this loop.
1974 void CFGLoop::update_succ_freq(Block* b, float freq) {
1975   if (b->_loop == this) {
1976     if (b == head()) {
1977       // back branch within the loop
1978       // Do nothing now, the loop carried frequency will be
1979       // adjust later in scale_freq().
1980     } else {
1981       // simple branch within the loop
1982       b->_freq += freq;
1983     }
1984   } else if (!in_loop_nest(b)) {
1985     // branch is exit from this loop
1986     BlockProbPair bpp(b, freq);
1987     _exits.append(bpp);
1988   } else {
1989     // branch into nested loop
1990     CFGLoop* ch = b->_loop;
1991     ch->_freq += freq;
1992   }
1993 }
1994 
1995 //------------------------------in_loop_nest-----------------------------------
1996 // Determine if block b is in the receiver's loop nest.
1997 bool CFGLoop::in_loop_nest(Block* b) {
1998   int depth = _depth;
1999   CFGLoop* b_loop = b->_loop;
2000   int b_depth = b_loop->_depth;
2001   if (depth == b_depth) {
2002     return true;
2003   }
2004   while (b_depth > depth) {
2005     b_loop = b_loop->_parent;
2006     b_depth = b_loop->_depth;
2007   }
2008   return b_loop == this;
2009 }
2010 
2011 //------------------------------scale_freq-------------------------------------
2012 // Scale frequency of loops and blocks by trip counts from outer loops
2013 // Do a top down traversal of loop tree (visit outer loops first.)
2014 void CFGLoop::scale_freq() {
2015   float loop_freq = _freq * trip_count();
2016   _freq = loop_freq;
2017   for (int i = 0; i < _members.length(); i++) {
2018     CFGElement* s = _members.at(i);
2019     float block_freq = s->_freq * loop_freq;
2020     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
2021       block_freq = MIN_BLOCK_FREQUENCY;
2022     s->_freq = block_freq;
2023   }
2024   CFGLoop* ch = _child;
2025   while (ch != NULL) {
2026     ch->scale_freq();
2027     ch = ch->_sibling;
2028   }
2029 }
2030 
2031 // Frequency of outer loop
2032 float CFGLoop::outer_loop_freq() const {
2033   if (_child != NULL) {
2034     return _child->_freq;
2035   }
2036   return _freq;
2037 }
2038 
2039 #ifndef PRODUCT
2040 //------------------------------dump_tree--------------------------------------
2041 void CFGLoop::dump_tree() const {
2042   dump();
2043   if (_child != NULL)   _child->dump_tree();
2044   if (_sibling != NULL) _sibling->dump_tree();
2045 }
2046 
2047 //------------------------------dump-------------------------------------------
2048 void CFGLoop::dump() const {
2049   for (int i = 0; i < _depth; i++) tty->print("   ");
2050   tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
2051              _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2052   for (int i = 0; i < _depth; i++) tty->print("   ");
2053   tty->print("         members:");
2054   int k = 0;
2055   for (int i = 0; i < _members.length(); i++) {
2056     if (k++ >= 6) {
2057       tty->print("\n              ");
2058       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2059       k = 0;
2060     }
2061     CFGElement *s = _members.at(i);
2062     if (s->is_block()) {
2063       Block *b = s->as_Block();
2064       tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2065     } else {
2066       CFGLoop* lp = s->as_CFGLoop();
2067       tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2068     }
2069   }
2070   tty->print("\n");
2071   for (int i = 0; i < _depth; i++) tty->print("   ");
2072   tty->print("         exits:  ");
2073   k = 0;
2074   for (int i = 0; i < _exits.length(); i++) {
2075     if (k++ >= 7) {
2076       tty->print("\n              ");
2077       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2078       k = 0;
2079     }
2080     Block *blk = _exits.at(i).get_target();
2081     float prob = _exits.at(i).get_prob();
2082     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2083   }
2084   tty->print("\n");
2085 }
2086 #endif