1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/block.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/machnode.hpp" 33 #include "opto/opcodes.hpp" 34 #include "opto/phaseX.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/runtime.hpp" 37 #include "runtime/deoptimization.hpp" 38 #if defined AD_MD_HPP 39 # include AD_MD_HPP 40 #elif defined TARGET_ARCH_MODEL_x86_32 41 # include "adfiles/ad_x86_32.hpp" 42 #elif defined TARGET_ARCH_MODEL_x86_64 43 # include "adfiles/ad_x86_64.hpp" 44 #elif defined TARGET_ARCH_MODEL_aarch64 45 # include "adfiles/ad_aarch64.hpp" 46 #elif defined TARGET_ARCH_MODEL_sparc 47 # include "adfiles/ad_sparc.hpp" 48 #elif defined TARGET_ARCH_MODEL_zero 49 # include "adfiles/ad_zero.hpp" 50 #elif defined TARGET_ARCH_MODEL_ppc_64 51 # include "adfiles/ad_ppc_64.hpp" 52 #endif 53 54 55 // Portions of code courtesy of Clifford Click 56 57 // Optimization - Graph Style 58 59 // To avoid float value underflow 60 #define MIN_BLOCK_FREQUENCY 1.e-35f 61 62 //----------------------------schedule_node_into_block------------------------- 63 // Insert node n into block b. Look for projections of n and make sure they 64 // are in b also. 65 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { 66 // Set basic block of n, Add n to b, 67 map_node_to_block(n, b); 68 b->add_inst(n); 69 70 // After Matching, nearly any old Node may have projections trailing it. 71 // These are usually machine-dependent flags. In any case, they might 72 // float to another block below this one. Move them up. 73 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 74 Node* use = n->fast_out(i); 75 if (use->is_Proj()) { 76 Block* buse = get_block_for_node(use); 77 if (buse != b) { // In wrong block? 78 if (buse != NULL) { 79 buse->find_remove(use); // Remove from wrong block 80 } 81 map_node_to_block(use, b); 82 b->add_inst(use); 83 } 84 } 85 } 86 } 87 88 //----------------------------replace_block_proj_ctrl------------------------- 89 // Nodes that have is_block_proj() nodes as their control need to use 90 // the appropriate Region for their actual block as their control since 91 // the projection will be in a predecessor block. 92 void PhaseCFG::replace_block_proj_ctrl( Node *n ) { 93 const Node *in0 = n->in(0); 94 assert(in0 != NULL, "Only control-dependent"); 95 const Node *p = in0->is_block_proj(); 96 if (p != NULL && p != n) { // Control from a block projection? 97 assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); 98 // Find trailing Region 99 Block *pb = get_block_for_node(in0); // Block-projection already has basic block 100 uint j = 0; 101 if (pb->_num_succs != 1) { // More then 1 successor? 102 // Search for successor 103 uint max = pb->number_of_nodes(); 104 assert( max > 1, "" ); 105 uint start = max - pb->_num_succs; 106 // Find which output path belongs to projection 107 for (j = start; j < max; j++) { 108 if( pb->get_node(j) == in0 ) 109 break; 110 } 111 assert( j < max, "must find" ); 112 // Change control to match head of successor basic block 113 j -= start; 114 } 115 n->set_req(0, pb->_succs[j]->head()); 116 } 117 } 118 119 static bool is_dominator(Block* d, Block* n) { 120 return d->dom_lca(n) == d; 121 } 122 123 //------------------------------schedule_pinned_nodes-------------------------- 124 // Set the basic block for Nodes pinned into blocks 125 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { 126 // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc 127 GrowableArray <Node *> spstack(C->live_nodes() + 8); 128 spstack.push(_root); 129 while (spstack.is_nonempty()) { 130 Node* node = spstack.pop(); 131 if (!visited.test_set(node->_idx)) { // Test node and flag it as visited 132 if (node->pinned() && !has_block(node)) { // Pinned? Nail it down! 133 assert(node->in(0), "pinned Node must have Control"); 134 // Before setting block replace block_proj control edge 135 replace_block_proj_ctrl(node); 136 Node* input = node->in(0); 137 while (!input->is_block_start()) { 138 input = input->in(0); 139 } 140 Block* block = get_block_for_node(input); // Basic block of controlling input 141 schedule_node_into_block(node, block); 142 } 143 144 // If the node has precedence edges (added when CastPP nodes are 145 // removed in final_graph_reshaping), fix the control of the 146 // node to cover the precedence edges and remove the 147 // dependencies. 148 Node* n = NULL; 149 for (uint i = node->len()-1; i >= node->req(); i--) { 150 Node* m = node->in(i); 151 if (m == NULL) continue; 152 // Skip the precedence edge if the test that guarded a CastPP: 153 // - was optimized out during escape analysis 154 // (OptimizePtrCompare): the CastPP's control isn't an end of 155 // block. 156 // - is moved in the branch of a dominating If: the control of 157 // the CastPP is then a Region. 158 if (m->is_block_proj() || m->is_block_start()) { 159 node->rm_prec(i); 160 if (n == NULL) { 161 n = m; 162 } else { 163 Block* bn = get_block_for_node(n); 164 Block* bm = get_block_for_node(m); 165 assert(is_dominator(bn, bm) || is_dominator(bm, bn), "one must dominate the other"); 166 n = is_dominator(bn, bm) ? m : n; 167 } 168 } 169 } 170 if (n != NULL) { 171 assert(node->in(0), "control should have been set"); 172 Block* bn = get_block_for_node(n); 173 Block* bnode = get_block_for_node(node->in(0)); 174 assert(is_dominator(bn, bnode) || is_dominator(bnode, bn), "one must dominate the other"); 175 if (!is_dominator(bn, bnode)) { 176 node->set_req(0, n); 177 } 178 } 179 180 // process all inputs that are non NULL 181 for (int i = node->req() - 1; i >= 0; --i) { 182 if (node->in(i) != NULL) { 183 spstack.push(node->in(i)); 184 } 185 } 186 } 187 } 188 } 189 190 #ifdef ASSERT 191 // Assert that new input b2 is dominated by all previous inputs. 192 // Check this by by seeing that it is dominated by b1, the deepest 193 // input observed until b2. 194 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { 195 if (b1 == NULL) return; 196 assert(b1->_dom_depth < b2->_dom_depth, "sanity"); 197 Block* tmp = b2; 198 while (tmp != b1 && tmp != NULL) { 199 tmp = tmp->_idom; 200 } 201 if (tmp != b1) { 202 // Detected an unschedulable graph. Print some nice stuff and die. 203 tty->print_cr("!!! Unschedulable graph !!!"); 204 for (uint j=0; j<n->len(); j++) { // For all inputs 205 Node* inn = n->in(j); // Get input 206 if (inn == NULL) continue; // Ignore NULL, missing inputs 207 Block* inb = cfg->get_block_for_node(inn); 208 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, 209 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); 210 inn->dump(); 211 } 212 tty->print("Failing node: "); 213 n->dump(); 214 assert(false, "unscheduable graph"); 215 } 216 } 217 #endif 218 219 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { 220 // Find the last input dominated by all other inputs. 221 Block* deepb = NULL; // Deepest block so far 222 int deepb_dom_depth = 0; 223 for (uint k = 0; k < n->len(); k++) { // For all inputs 224 Node* inn = n->in(k); // Get input 225 if (inn == NULL) continue; // Ignore NULL, missing inputs 226 Block* inb = cfg->get_block_for_node(inn); 227 assert(inb != NULL, "must already have scheduled this input"); 228 if (deepb_dom_depth < (int) inb->_dom_depth) { 229 // The new inb must be dominated by the previous deepb. 230 // The various inputs must be linearly ordered in the dom 231 // tree, or else there will not be a unique deepest block. 232 DEBUG_ONLY(assert_dom(deepb, inb, n, cfg)); 233 deepb = inb; // Save deepest block 234 deepb_dom_depth = deepb->_dom_depth; 235 } 236 } 237 assert(deepb != NULL, "must be at least one input to n"); 238 return deepb; 239 } 240 241 242 //------------------------------schedule_early--------------------------------- 243 // Find the earliest Block any instruction can be placed in. Some instructions 244 // are pinned into Blocks. Unpinned instructions can appear in last block in 245 // which all their inputs occur. 246 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) { 247 // Allocate stack with enough space to avoid frequent realloc 248 Node_Stack nstack(roots.Size() + 8); 249 // _root will be processed among C->top() inputs 250 roots.push(C->top()); 251 visited.set(C->top()->_idx); 252 253 while (roots.size() != 0) { 254 // Use local variables nstack_top_n & nstack_top_i to cache values 255 // on stack's top. 256 Node* parent_node = roots.pop(); 257 uint input_index = 0; 258 259 while (true) { 260 if (input_index == 0) { 261 // Fixup some control. Constants without control get attached 262 // to root and nodes that use is_block_proj() nodes should be attached 263 // to the region that starts their block. 264 const Node* control_input = parent_node->in(0); 265 if (control_input != NULL) { 266 replace_block_proj_ctrl(parent_node); 267 } else { 268 // Is a constant with NO inputs? 269 if (parent_node->req() == 1) { 270 parent_node->set_req(0, _root); 271 } 272 } 273 } 274 275 // First, visit all inputs and force them to get a block. If an 276 // input is already in a block we quit following inputs (to avoid 277 // cycles). Instead we put that Node on a worklist to be handled 278 // later (since IT'S inputs may not have a block yet). 279 280 // Assume all n's inputs will be processed 281 bool done = true; 282 283 while (input_index < parent_node->len()) { 284 Node* in = parent_node->in(input_index++); 285 if (in == NULL) { 286 continue; 287 } 288 289 int is_visited = visited.test_set(in->_idx); 290 if (!has_block(in)) { 291 if (is_visited) { 292 assert(false, "graph should be schedulable"); 293 return false; 294 } 295 // Save parent node and next input's index. 296 nstack.push(parent_node, input_index); 297 // Process current input now. 298 parent_node = in; 299 input_index = 0; 300 // Not all n's inputs processed. 301 done = false; 302 break; 303 } else if (!is_visited) { 304 // Visit this guy later, using worklist 305 roots.push(in); 306 } 307 } 308 309 if (done) { 310 // All of n's inputs have been processed, complete post-processing. 311 312 // Some instructions are pinned into a block. These include Region, 313 // Phi, Start, Return, and other control-dependent instructions and 314 // any projections which depend on them. 315 if (!parent_node->pinned()) { 316 // Set earliest legal block. 317 Block* earliest_block = find_deepest_input(parent_node, this); 318 map_node_to_block(parent_node, earliest_block); 319 } else { 320 assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge"); 321 } 322 323 if (nstack.is_empty()) { 324 // Finished all nodes on stack. 325 // Process next node on the worklist 'roots'. 326 break; 327 } 328 // Get saved parent node and next input's index. 329 parent_node = nstack.node(); 330 input_index = nstack.index(); 331 nstack.pop(); 332 } 333 } 334 } 335 return true; 336 } 337 338 //------------------------------dom_lca---------------------------------------- 339 // Find least common ancestor in dominator tree 340 // LCA is a current notion of LCA, to be raised above 'this'. 341 // As a convenient boundary condition, return 'this' if LCA is NULL. 342 // Find the LCA of those two nodes. 343 Block* Block::dom_lca(Block* LCA) { 344 if (LCA == NULL || LCA == this) return this; 345 346 Block* anc = this; 347 while (anc->_dom_depth > LCA->_dom_depth) 348 anc = anc->_idom; // Walk up till anc is as high as LCA 349 350 while (LCA->_dom_depth > anc->_dom_depth) 351 LCA = LCA->_idom; // Walk up till LCA is as high as anc 352 353 while (LCA != anc) { // Walk both up till they are the same 354 LCA = LCA->_idom; 355 anc = anc->_idom; 356 } 357 358 return LCA; 359 } 360 361 //--------------------------raise_LCA_above_use-------------------------------- 362 // We are placing a definition, and have been given a def->use edge. 363 // The definition must dominate the use, so move the LCA upward in the 364 // dominator tree to dominate the use. If the use is a phi, adjust 365 // the LCA only with the phi input paths which actually use this def. 366 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) { 367 Block* buse = cfg->get_block_for_node(use); 368 if (buse == NULL) return LCA; // Unused killing Projs have no use block 369 if (!use->is_Phi()) return buse->dom_lca(LCA); 370 uint pmax = use->req(); // Number of Phi inputs 371 // Why does not this loop just break after finding the matching input to 372 // the Phi? Well...it's like this. I do not have true def-use/use-def 373 // chains. Means I cannot distinguish, from the def-use direction, which 374 // of many use-defs lead from the same use to the same def. That is, this 375 // Phi might have several uses of the same def. Each use appears in a 376 // different predecessor block. But when I enter here, I cannot distinguish 377 // which use-def edge I should find the predecessor block for. So I find 378 // them all. Means I do a little extra work if a Phi uses the same value 379 // more than once. 380 for (uint j=1; j<pmax; j++) { // For all inputs 381 if (use->in(j) == def) { // Found matching input? 382 Block* pred = cfg->get_block_for_node(buse->pred(j)); 383 LCA = pred->dom_lca(LCA); 384 } 385 } 386 return LCA; 387 } 388 389 //----------------------------raise_LCA_above_marks---------------------------- 390 // Return a new LCA that dominates LCA and any of its marked predecessors. 391 // Search all my parents up to 'early' (exclusive), looking for predecessors 392 // which are marked with the given index. Return the LCA (in the dom tree) 393 // of all marked blocks. If there are none marked, return the original 394 // LCA. 395 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) { 396 Block_List worklist; 397 worklist.push(LCA); 398 while (worklist.size() > 0) { 399 Block* mid = worklist.pop(); 400 if (mid == early) continue; // stop searching here 401 402 // Test and set the visited bit. 403 if (mid->raise_LCA_visited() == mark) continue; // already visited 404 405 // Don't process the current LCA, otherwise the search may terminate early 406 if (mid != LCA && mid->raise_LCA_mark() == mark) { 407 // Raise the LCA. 408 LCA = mid->dom_lca(LCA); 409 if (LCA == early) break; // stop searching everywhere 410 assert(early->dominates(LCA), "early is high enough"); 411 // Resume searching at that point, skipping intermediate levels. 412 worklist.push(LCA); 413 if (LCA == mid) 414 continue; // Don't mark as visited to avoid early termination. 415 } else { 416 // Keep searching through this block's predecessors. 417 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { 418 Block* mid_parent = cfg->get_block_for_node(mid->pred(j)); 419 worklist.push(mid_parent); 420 } 421 } 422 mid->set_raise_LCA_visited(mark); 423 } 424 return LCA; 425 } 426 427 //--------------------------memory_early_block-------------------------------- 428 // This is a variation of find_deepest_input, the heart of schedule_early. 429 // Find the "early" block for a load, if we considered only memory and 430 // address inputs, that is, if other data inputs were ignored. 431 // 432 // Because a subset of edges are considered, the resulting block will 433 // be earlier (at a shallower dom_depth) than the true schedule_early 434 // point of the node. We compute this earlier block as a more permissive 435 // site for anti-dependency insertion, but only if subsume_loads is enabled. 436 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) { 437 Node* base; 438 Node* index; 439 Node* store = load->in(MemNode::Memory); 440 load->as_Mach()->memory_inputs(base, index); 441 442 assert(base != NodeSentinel && index != NodeSentinel, 443 "unexpected base/index inputs"); 444 445 Node* mem_inputs[4]; 446 int mem_inputs_length = 0; 447 if (base != NULL) mem_inputs[mem_inputs_length++] = base; 448 if (index != NULL) mem_inputs[mem_inputs_length++] = index; 449 if (store != NULL) mem_inputs[mem_inputs_length++] = store; 450 451 // In the comparision below, add one to account for the control input, 452 // which may be null, but always takes up a spot in the in array. 453 if (mem_inputs_length + 1 < (int) load->req()) { 454 // This "load" has more inputs than just the memory, base and index inputs. 455 // For purposes of checking anti-dependences, we need to start 456 // from the early block of only the address portion of the instruction, 457 // and ignore other blocks that may have factored into the wider 458 // schedule_early calculation. 459 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); 460 461 Block* deepb = NULL; // Deepest block so far 462 int deepb_dom_depth = 0; 463 for (int i = 0; i < mem_inputs_length; i++) { 464 Block* inb = cfg->get_block_for_node(mem_inputs[i]); 465 if (deepb_dom_depth < (int) inb->_dom_depth) { 466 // The new inb must be dominated by the previous deepb. 467 // The various inputs must be linearly ordered in the dom 468 // tree, or else there will not be a unique deepest block. 469 DEBUG_ONLY(assert_dom(deepb, inb, load, cfg)); 470 deepb = inb; // Save deepest block 471 deepb_dom_depth = deepb->_dom_depth; 472 } 473 } 474 early = deepb; 475 } 476 477 return early; 478 } 479 480 //--------------------------insert_anti_dependences--------------------------- 481 // A load may need to witness memory that nearby stores can overwrite. 482 // For each nearby store, either insert an "anti-dependence" edge 483 // from the load to the store, or else move LCA upward to force the 484 // load to (eventually) be scheduled in a block above the store. 485 // 486 // Do not add edges to stores on distinct control-flow paths; 487 // only add edges to stores which might interfere. 488 // 489 // Return the (updated) LCA. There will not be any possibly interfering 490 // store between the load's "early block" and the updated LCA. 491 // Any stores in the updated LCA will have new precedence edges 492 // back to the load. The caller is expected to schedule the load 493 // in the LCA, in which case the precedence edges will make LCM 494 // preserve anti-dependences. The caller may also hoist the load 495 // above the LCA, if it is not the early block. 496 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { 497 assert(load->needs_anti_dependence_check(), "must be a load of some sort"); 498 assert(LCA != NULL, ""); 499 DEBUG_ONLY(Block* LCA_orig = LCA); 500 501 // Compute the alias index. Loads and stores with different alias indices 502 // do not need anti-dependence edges. 503 uint load_alias_idx = C->get_alias_index(load->adr_type()); 504 #ifdef ASSERT 505 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && 506 (PrintOpto || VerifyAliases || 507 PrintMiscellaneous && (WizardMode || Verbose))) { 508 // Load nodes should not consume all of memory. 509 // Reporting a bottom type indicates a bug in adlc. 510 // If some particular type of node validly consumes all of memory, 511 // sharpen the preceding "if" to exclude it, so we can catch bugs here. 512 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); 513 load->dump(2); 514 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); 515 } 516 #endif 517 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), 518 "String compare is only known 'load' that does not conflict with any stores"); 519 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals), 520 "String equals is a 'load' that does not conflict with any stores"); 521 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf), 522 "String indexOf is a 'load' that does not conflict with any stores"); 523 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq), 524 "Arrays equals is a 'load' that do not conflict with any stores"); 525 526 if (!C->alias_type(load_alias_idx)->is_rewritable()) { 527 // It is impossible to spoil this load by putting stores before it, 528 // because we know that the stores will never update the value 529 // which 'load' must witness. 530 return LCA; 531 } 532 533 node_idx_t load_index = load->_idx; 534 535 // Note the earliest legal placement of 'load', as determined by 536 // by the unique point in the dom tree where all memory effects 537 // and other inputs are first available. (Computed by schedule_early.) 538 // For normal loads, 'early' is the shallowest place (dom graph wise) 539 // to look for anti-deps between this load and any store. 540 Block* early = get_block_for_node(load); 541 542 // If we are subsuming loads, compute an "early" block that only considers 543 // memory or address inputs. This block may be different than the 544 // schedule_early block in that it could be at an even shallower depth in the 545 // dominator tree, and allow for a broader discovery of anti-dependences. 546 if (C->subsume_loads()) { 547 early = memory_early_block(load, early, this); 548 } 549 550 ResourceArea *area = Thread::current()->resource_area(); 551 Node_List worklist_mem(area); // prior memory state to store 552 Node_List worklist_store(area); // possible-def to explore 553 Node_List worklist_visited(area); // visited mergemem nodes 554 Node_List non_early_stores(area); // all relevant stores outside of early 555 bool must_raise_LCA = false; 556 557 #ifdef TRACK_PHI_INPUTS 558 // %%% This extra checking fails because MergeMem nodes are not GVNed. 559 // Provide "phi_inputs" to check if every input to a PhiNode is from the 560 // original memory state. This indicates a PhiNode for which should not 561 // prevent the load from sinking. For such a block, set_raise_LCA_mark 562 // may be overly conservative. 563 // Mechanism: count inputs seen for each Phi encountered in worklist_store. 564 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); 565 #endif 566 567 // 'load' uses some memory state; look for users of the same state. 568 // Recurse through MergeMem nodes to the stores that use them. 569 570 // Each of these stores is a possible definition of memory 571 // that 'load' needs to use. We need to force 'load' 572 // to occur before each such store. When the store is in 573 // the same block as 'load', we insert an anti-dependence 574 // edge load->store. 575 576 // The relevant stores "nearby" the load consist of a tree rooted 577 // at initial_mem, with internal nodes of type MergeMem. 578 // Therefore, the branches visited by the worklist are of this form: 579 // initial_mem -> (MergeMem ->)* store 580 // The anti-dependence constraints apply only to the fringe of this tree. 581 582 Node* initial_mem = load->in(MemNode::Memory); 583 worklist_store.push(initial_mem); 584 worklist_visited.push(initial_mem); 585 worklist_mem.push(NULL); 586 while (worklist_store.size() > 0) { 587 // Examine a nearby store to see if it might interfere with our load. 588 Node* mem = worklist_mem.pop(); 589 Node* store = worklist_store.pop(); 590 uint op = store->Opcode(); 591 592 // MergeMems do not directly have anti-deps. 593 // Treat them as internal nodes in a forward tree of memory states, 594 // the leaves of which are each a 'possible-def'. 595 if (store == initial_mem // root (exclusive) of tree we are searching 596 || op == Op_MergeMem // internal node of tree we are searching 597 ) { 598 mem = store; // It's not a possibly interfering store. 599 if (store == initial_mem) 600 initial_mem = NULL; // only process initial memory once 601 602 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 603 store = mem->fast_out(i); 604 if (store->is_MergeMem()) { 605 // Be sure we don't get into combinatorial problems. 606 // (Allow phis to be repeated; they can merge two relevant states.) 607 uint j = worklist_visited.size(); 608 for (; j > 0; j--) { 609 if (worklist_visited.at(j-1) == store) break; 610 } 611 if (j > 0) continue; // already on work list; do not repeat 612 worklist_visited.push(store); 613 } 614 worklist_mem.push(mem); 615 worklist_store.push(store); 616 } 617 continue; 618 } 619 620 if (op == Op_MachProj || op == Op_Catch) continue; 621 if (store->needs_anti_dependence_check()) continue; // not really a store 622 623 // Compute the alias index. Loads and stores with different alias 624 // indices do not need anti-dependence edges. Wide MemBar's are 625 // anti-dependent on everything (except immutable memories). 626 const TypePtr* adr_type = store->adr_type(); 627 if (!C->can_alias(adr_type, load_alias_idx)) continue; 628 629 // Most slow-path runtime calls do NOT modify Java memory, but 630 // they can block and so write Raw memory. 631 if (store->is_Mach()) { 632 MachNode* mstore = store->as_Mach(); 633 if (load_alias_idx != Compile::AliasIdxRaw) { 634 // Check for call into the runtime using the Java calling 635 // convention (and from there into a wrapper); it has no 636 // _method. Can't do this optimization for Native calls because 637 // they CAN write to Java memory. 638 if (mstore->ideal_Opcode() == Op_CallStaticJava) { 639 assert(mstore->is_MachSafePoint(), ""); 640 MachSafePointNode* ms = (MachSafePointNode*) mstore; 641 assert(ms->is_MachCallJava(), ""); 642 MachCallJavaNode* mcj = (MachCallJavaNode*) ms; 643 if (mcj->_method == NULL) { 644 // These runtime calls do not write to Java visible memory 645 // (other than Raw) and so do not require anti-dependence edges. 646 continue; 647 } 648 } 649 // Same for SafePoints: they read/write Raw but only read otherwise. 650 // This is basically a workaround for SafePoints only defining control 651 // instead of control + memory. 652 if (mstore->ideal_Opcode() == Op_SafePoint) 653 continue; 654 } else { 655 // Some raw memory, such as the load of "top" at an allocation, 656 // can be control dependent on the previous safepoint. See 657 // comments in GraphKit::allocate_heap() about control input. 658 // Inserting an anti-dep between such a safepoint and a use 659 // creates a cycle, and will cause a subsequent failure in 660 // local scheduling. (BugId 4919904) 661 // (%%% How can a control input be a safepoint and not a projection??) 662 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) 663 continue; 664 } 665 } 666 667 // Identify a block that the current load must be above, 668 // or else observe that 'store' is all the way up in the 669 // earliest legal block for 'load'. In the latter case, 670 // immediately insert an anti-dependence edge. 671 Block* store_block = get_block_for_node(store); 672 assert(store_block != NULL, "unused killing projections skipped above"); 673 674 if (store->is_Phi()) { 675 // 'load' uses memory which is one (or more) of the Phi's inputs. 676 // It must be scheduled not before the Phi, but rather before 677 // each of the relevant Phi inputs. 678 // 679 // Instead of finding the LCA of all inputs to a Phi that match 'mem', 680 // we mark each corresponding predecessor block and do a combined 681 // hoisting operation later (raise_LCA_above_marks). 682 // 683 // Do not assert(store_block != early, "Phi merging memory after access") 684 // PhiNode may be at start of block 'early' with backedge to 'early' 685 DEBUG_ONLY(bool found_match = false); 686 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { 687 if (store->in(j) == mem) { // Found matching input? 688 DEBUG_ONLY(found_match = true); 689 Block* pred_block = get_block_for_node(store_block->pred(j)); 690 if (pred_block != early) { 691 // If any predecessor of the Phi matches the load's "early block", 692 // we do not need a precedence edge between the Phi and 'load' 693 // since the load will be forced into a block preceding the Phi. 694 pred_block->set_raise_LCA_mark(load_index); 695 assert(!LCA_orig->dominates(pred_block) || 696 early->dominates(pred_block), "early is high enough"); 697 must_raise_LCA = true; 698 } else { 699 // anti-dependent upon PHI pinned below 'early', no edge needed 700 LCA = early; // but can not schedule below 'early' 701 } 702 } 703 } 704 assert(found_match, "no worklist bug"); 705 #ifdef TRACK_PHI_INPUTS 706 #ifdef ASSERT 707 // This assert asks about correct handling of PhiNodes, which may not 708 // have all input edges directly from 'mem'. See BugId 4621264 709 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; 710 // Increment by exactly one even if there are multiple copies of 'mem' 711 // coming into the phi, because we will run this block several times 712 // if there are several copies of 'mem'. (That's how DU iterators work.) 713 phi_inputs.at_put(store->_idx, num_mem_inputs); 714 assert(PhiNode::Input + num_mem_inputs < store->req(), 715 "Expect at least one phi input will not be from original memory state"); 716 #endif //ASSERT 717 #endif //TRACK_PHI_INPUTS 718 } else if (store_block != early) { 719 // 'store' is between the current LCA and earliest possible block. 720 // Label its block, and decide later on how to raise the LCA 721 // to include the effect on LCA of this store. 722 // If this store's block gets chosen as the raised LCA, we 723 // will find him on the non_early_stores list and stick him 724 // with a precedence edge. 725 // (But, don't bother if LCA is already raised all the way.) 726 if (LCA != early) { 727 store_block->set_raise_LCA_mark(load_index); 728 must_raise_LCA = true; 729 non_early_stores.push(store); 730 } 731 } else { 732 // Found a possibly-interfering store in the load's 'early' block. 733 // This means 'load' cannot sink at all in the dominator tree. 734 // Add an anti-dep edge, and squeeze 'load' into the highest block. 735 assert(store != load->in(0), "dependence cycle found"); 736 if (verify) { 737 assert(store->find_edge(load) != -1, "missing precedence edge"); 738 } else { 739 store->add_prec(load); 740 } 741 LCA = early; 742 // This turns off the process of gathering non_early_stores. 743 } 744 } 745 // (Worklist is now empty; all nearby stores have been visited.) 746 747 // Finished if 'load' must be scheduled in its 'early' block. 748 // If we found any stores there, they have already been given 749 // precedence edges. 750 if (LCA == early) return LCA; 751 752 // We get here only if there are no possibly-interfering stores 753 // in the load's 'early' block. Move LCA up above all predecessors 754 // which contain stores we have noted. 755 // 756 // The raised LCA block can be a home to such interfering stores, 757 // but its predecessors must not contain any such stores. 758 // 759 // The raised LCA will be a lower bound for placing the load, 760 // preventing the load from sinking past any block containing 761 // a store that may invalidate the memory state required by 'load'. 762 if (must_raise_LCA) 763 LCA = raise_LCA_above_marks(LCA, load->_idx, early, this); 764 if (LCA == early) return LCA; 765 766 // Insert anti-dependence edges from 'load' to each store 767 // in the non-early LCA block. 768 // Mine the non_early_stores list for such stores. 769 if (LCA->raise_LCA_mark() == load_index) { 770 while (non_early_stores.size() > 0) { 771 Node* store = non_early_stores.pop(); 772 Block* store_block = get_block_for_node(store); 773 if (store_block == LCA) { 774 // add anti_dependence from store to load in its own block 775 assert(store != load->in(0), "dependence cycle found"); 776 if (verify) { 777 assert(store->find_edge(load) != -1, "missing precedence edge"); 778 } else { 779 store->add_prec(load); 780 } 781 } else { 782 assert(store_block->raise_LCA_mark() == load_index, "block was marked"); 783 // Any other stores we found must be either inside the new LCA 784 // or else outside the original LCA. In the latter case, they 785 // did not interfere with any use of 'load'. 786 assert(LCA->dominates(store_block) 787 || !LCA_orig->dominates(store_block), "no stray stores"); 788 } 789 } 790 } 791 792 // Return the highest block containing stores; any stores 793 // within that block have been given anti-dependence edges. 794 return LCA; 795 } 796 797 // This class is used to iterate backwards over the nodes in the graph. 798 799 class Node_Backward_Iterator { 800 801 private: 802 Node_Backward_Iterator(); 803 804 public: 805 // Constructor for the iterator 806 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg); 807 808 // Postincrement operator to iterate over the nodes 809 Node *next(); 810 811 private: 812 VectorSet &_visited; 813 Node_List &_stack; 814 PhaseCFG &_cfg; 815 }; 816 817 // Constructor for the Node_Backward_Iterator 818 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg) 819 : _visited(visited), _stack(stack), _cfg(cfg) { 820 // The stack should contain exactly the root 821 stack.clear(); 822 stack.push(root); 823 824 // Clear the visited bits 825 visited.Clear(); 826 } 827 828 // Iterator for the Node_Backward_Iterator 829 Node *Node_Backward_Iterator::next() { 830 831 // If the _stack is empty, then just return NULL: finished. 832 if ( !_stack.size() ) 833 return NULL; 834 835 // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been 836 // made stateless, so I do not need to record the index 'i' on my _stack. 837 // Instead I visit all users each time, scanning for unvisited users. 838 // I visit unvisited not-anti-dependence users first, then anti-dependent 839 // children next. 840 Node *self = _stack.pop(); 841 842 // I cycle here when I am entering a deeper level of recursion. 843 // The key variable 'self' was set prior to jumping here. 844 while( 1 ) { 845 846 _visited.set(self->_idx); 847 848 // Now schedule all uses as late as possible. 849 const Node* src = self->is_Proj() ? self->in(0) : self; 850 uint src_rpo = _cfg.get_block_for_node(src)->_rpo; 851 852 // Schedule all nodes in a post-order visit 853 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any 854 855 // Scan for unvisited nodes 856 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 857 // For all uses, schedule late 858 Node* n = self->fast_out(i); // Use 859 860 // Skip already visited children 861 if ( _visited.test(n->_idx) ) 862 continue; 863 864 // do not traverse backward control edges 865 Node *use = n->is_Proj() ? n->in(0) : n; 866 uint use_rpo = _cfg.get_block_for_node(use)->_rpo; 867 868 if ( use_rpo < src_rpo ) 869 continue; 870 871 // Phi nodes always precede uses in a basic block 872 if ( use_rpo == src_rpo && use->is_Phi() ) 873 continue; 874 875 unvisited = n; // Found unvisited 876 877 // Check for possible-anti-dependent 878 if( !n->needs_anti_dependence_check() ) 879 break; // Not visited, not anti-dep; schedule it NOW 880 } 881 882 // Did I find an unvisited not-anti-dependent Node? 883 if ( !unvisited ) 884 break; // All done with children; post-visit 'self' 885 886 // Visit the unvisited Node. Contains the obvious push to 887 // indicate I'm entering a deeper level of recursion. I push the 888 // old state onto the _stack and set a new state and loop (recurse). 889 _stack.push(self); 890 self = unvisited; 891 } // End recursion loop 892 893 return self; 894 } 895 896 //------------------------------ComputeLatenciesBackwards---------------------- 897 // Compute the latency of all the instructions. 898 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) { 899 #ifndef PRODUCT 900 if (trace_opto_pipelining()) 901 tty->print("\n#---- ComputeLatenciesBackwards ----\n"); 902 #endif 903 904 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 905 Node *n; 906 907 // Walk over all the nodes from last to first 908 while (n = iter.next()) { 909 // Set the latency for the definitions of this instruction 910 partial_latency_of_defs(n); 911 } 912 } // end ComputeLatenciesBackwards 913 914 //------------------------------partial_latency_of_defs------------------------ 915 // Compute the latency impact of this node on all defs. This computes 916 // a number that increases as we approach the beginning of the routine. 917 void PhaseCFG::partial_latency_of_defs(Node *n) { 918 // Set the latency for this instruction 919 #ifndef PRODUCT 920 if (trace_opto_pipelining()) { 921 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 922 dump(); 923 } 924 #endif 925 926 if (n->is_Proj()) { 927 n = n->in(0); 928 } 929 930 if (n->is_Root()) { 931 return; 932 } 933 934 uint nlen = n->len(); 935 uint use_latency = get_latency_for_node(n); 936 uint use_pre_order = get_block_for_node(n)->_pre_order; 937 938 for (uint j = 0; j < nlen; j++) { 939 Node *def = n->in(j); 940 941 if (!def || def == n) { 942 continue; 943 } 944 945 // Walk backwards thru projections 946 if (def->is_Proj()) { 947 def = def->in(0); 948 } 949 950 #ifndef PRODUCT 951 if (trace_opto_pipelining()) { 952 tty->print("# in(%2d): ", j); 953 def->dump(); 954 } 955 #endif 956 957 // If the defining block is not known, assume it is ok 958 Block *def_block = get_block_for_node(def); 959 uint def_pre_order = def_block ? def_block->_pre_order : 0; 960 961 if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) { 962 continue; 963 } 964 965 uint delta_latency = n->latency(j); 966 uint current_latency = delta_latency + use_latency; 967 968 if (get_latency_for_node(def) < current_latency) { 969 set_latency_for_node(def, current_latency); 970 } 971 972 #ifndef PRODUCT 973 if (trace_opto_pipelining()) { 974 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def)); 975 } 976 #endif 977 } 978 } 979 980 //------------------------------latency_from_use------------------------------- 981 // Compute the latency of a specific use 982 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { 983 // If self-reference, return no latency 984 if (use == n || use->is_Root()) { 985 return 0; 986 } 987 988 uint def_pre_order = get_block_for_node(def)->_pre_order; 989 uint latency = 0; 990 991 // If the use is not a projection, then it is simple... 992 if (!use->is_Proj()) { 993 #ifndef PRODUCT 994 if (trace_opto_pipelining()) { 995 tty->print("# out(): "); 996 use->dump(); 997 } 998 #endif 999 1000 uint use_pre_order = get_block_for_node(use)->_pre_order; 1001 1002 if (use_pre_order < def_pre_order) 1003 return 0; 1004 1005 if (use_pre_order == def_pre_order && use->is_Phi()) 1006 return 0; 1007 1008 uint nlen = use->len(); 1009 uint nl = get_latency_for_node(use); 1010 1011 for ( uint j=0; j<nlen; j++ ) { 1012 if (use->in(j) == n) { 1013 // Change this if we want local latencies 1014 uint ul = use->latency(j); 1015 uint l = ul + nl; 1016 if (latency < l) latency = l; 1017 #ifndef PRODUCT 1018 if (trace_opto_pipelining()) { 1019 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", 1020 nl, j, ul, l, latency); 1021 } 1022 #endif 1023 } 1024 } 1025 } else { 1026 // This is a projection, just grab the latency of the use(s) 1027 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 1028 uint l = latency_from_use(use, def, use->fast_out(j)); 1029 if (latency < l) latency = l; 1030 } 1031 } 1032 1033 return latency; 1034 } 1035 1036 //------------------------------latency_from_uses------------------------------ 1037 // Compute the latency of this instruction relative to all of it's uses. 1038 // This computes a number that increases as we approach the beginning of the 1039 // routine. 1040 void PhaseCFG::latency_from_uses(Node *n) { 1041 // Set the latency for this instruction 1042 #ifndef PRODUCT 1043 if (trace_opto_pipelining()) { 1044 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 1045 dump(); 1046 } 1047 #endif 1048 uint latency=0; 1049 const Node *def = n->is_Proj() ? n->in(0): n; 1050 1051 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1052 uint l = latency_from_use(n, def, n->fast_out(i)); 1053 1054 if (latency < l) latency = l; 1055 } 1056 1057 set_latency_for_node(n, latency); 1058 } 1059 1060 //------------------------------hoist_to_cheaper_block------------------------- 1061 // Pick a block for node self, between early and LCA, that is a cheaper 1062 // alternative to LCA. 1063 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { 1064 const double delta = 1+PROB_UNLIKELY_MAG(4); 1065 Block* least = LCA; 1066 double least_freq = least->_freq; 1067 uint target = get_latency_for_node(self); 1068 uint start_latency = get_latency_for_node(LCA->head()); 1069 uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx())); 1070 bool in_latency = (target <= start_latency); 1071 const Block* root_block = get_block_for_node(_root); 1072 1073 // Turn off latency scheduling if scheduling is just plain off 1074 if (!C->do_scheduling()) 1075 in_latency = true; 1076 1077 // Do not hoist (to cover latency) instructions which target a 1078 // single register. Hoisting stretches the live range of the 1079 // single register and may force spilling. 1080 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1081 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) 1082 in_latency = true; 1083 1084 #ifndef PRODUCT 1085 if (trace_opto_pipelining()) { 1086 tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self)); 1087 self->dump(); 1088 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1089 LCA->_pre_order, 1090 LCA->head()->_idx, 1091 start_latency, 1092 LCA->get_node(LCA->end_idx())->_idx, 1093 end_latency, 1094 least_freq); 1095 } 1096 #endif 1097 1098 int cand_cnt = 0; // number of candidates tried 1099 1100 // Walk up the dominator tree from LCA (Lowest common ancestor) to 1101 // the earliest legal location. Capture the least execution frequency. 1102 while (LCA != early) { 1103 LCA = LCA->_idom; // Follow up the dominator tree 1104 1105 if (LCA == NULL) { 1106 // Bailout without retry 1107 assert(false, "graph should be schedulable"); 1108 C->record_method_not_compilable("late schedule failed: LCA == NULL"); 1109 return least; 1110 } 1111 1112 // Don't hoist machine instructions to the root basic block 1113 if (mach && LCA == root_block) 1114 break; 1115 1116 uint start_lat = get_latency_for_node(LCA->head()); 1117 uint end_idx = LCA->end_idx(); 1118 uint end_lat = get_latency_for_node(LCA->get_node(end_idx)); 1119 double LCA_freq = LCA->_freq; 1120 #ifndef PRODUCT 1121 if (trace_opto_pipelining()) { 1122 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1123 LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq); 1124 } 1125 #endif 1126 cand_cnt++; 1127 if (LCA_freq < least_freq || // Better Frequency 1128 (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode 1129 (!StressGCM && // Otherwise, choose with latency 1130 !in_latency && // No block containing latency 1131 LCA_freq < least_freq * delta && // No worse frequency 1132 target >= end_lat && // within latency range 1133 !self->is_iteratively_computed() ) // But don't hoist IV increments 1134 // because they may end up above other uses of their phi forcing 1135 // their result register to be different from their input. 1136 ) { 1137 least = LCA; // Found cheaper block 1138 least_freq = LCA_freq; 1139 start_latency = start_lat; 1140 end_latency = end_lat; 1141 if (target <= start_lat) 1142 in_latency = true; 1143 } 1144 } 1145 1146 #ifndef PRODUCT 1147 if (trace_opto_pipelining()) { 1148 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", 1149 least->_pre_order, start_latency, least_freq); 1150 } 1151 #endif 1152 1153 // See if the latency needs to be updated 1154 if (target < end_latency) { 1155 #ifndef PRODUCT 1156 if (trace_opto_pipelining()) { 1157 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); 1158 } 1159 #endif 1160 set_latency_for_node(self, end_latency); 1161 partial_latency_of_defs(self); 1162 } 1163 1164 return least; 1165 } 1166 1167 1168 //------------------------------schedule_late----------------------------------- 1169 // Now schedule all codes as LATE as possible. This is the LCA in the 1170 // dominator tree of all USES of a value. Pick the block with the least 1171 // loop nesting depth that is lowest in the dominator tree. 1172 extern const char must_clone[]; 1173 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) { 1174 #ifndef PRODUCT 1175 if (trace_opto_pipelining()) 1176 tty->print("\n#---- schedule_late ----\n"); 1177 #endif 1178 1179 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 1180 Node *self; 1181 1182 // Walk over all the nodes from last to first 1183 while (self = iter.next()) { 1184 Block* early = get_block_for_node(self); // Earliest legal placement 1185 1186 if (self->is_top()) { 1187 // Top node goes in bb #2 with other constants. 1188 // It must be special-cased, because it has no out edges. 1189 early->add_inst(self); 1190 continue; 1191 } 1192 1193 // No uses, just terminate 1194 if (self->outcnt() == 0) { 1195 assert(self->is_MachProj(), "sanity"); 1196 continue; // Must be a dead machine projection 1197 } 1198 1199 // If node is pinned in the block, then no scheduling can be done. 1200 if( self->pinned() ) // Pinned in block? 1201 continue; 1202 1203 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1204 if (mach) { 1205 switch (mach->ideal_Opcode()) { 1206 case Op_CreateEx: 1207 // Don't move exception creation 1208 early->add_inst(self); 1209 continue; 1210 break; 1211 case Op_CheckCastPP: 1212 // Don't move CheckCastPP nodes away from their input, if the input 1213 // is a rawptr (5071820). 1214 Node *def = self->in(1); 1215 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { 1216 early->add_inst(self); 1217 #ifdef ASSERT 1218 _raw_oops.push(def); 1219 #endif 1220 continue; 1221 } 1222 break; 1223 } 1224 } 1225 1226 // Gather LCA of all uses 1227 Block *LCA = NULL; 1228 { 1229 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 1230 // For all uses, find LCA 1231 Node* use = self->fast_out(i); 1232 LCA = raise_LCA_above_use(LCA, use, self, this); 1233 } 1234 } // (Hide defs of imax, i from rest of block.) 1235 1236 // Place temps in the block of their use. This isn't a 1237 // requirement for correctness but it reduces useless 1238 // interference between temps and other nodes. 1239 if (mach != NULL && mach->is_MachTemp()) { 1240 map_node_to_block(self, LCA); 1241 LCA->add_inst(self); 1242 continue; 1243 } 1244 1245 // Check if 'self' could be anti-dependent on memory 1246 if (self->needs_anti_dependence_check()) { 1247 // Hoist LCA above possible-defs and insert anti-dependences to 1248 // defs in new LCA block. 1249 LCA = insert_anti_dependences(LCA, self); 1250 } 1251 1252 if (early->_dom_depth > LCA->_dom_depth) { 1253 // Somehow the LCA has moved above the earliest legal point. 1254 // (One way this can happen is via memory_early_block.) 1255 if (C->subsume_loads() == true && !C->failing()) { 1256 // Retry with subsume_loads == false 1257 // If this is the first failure, the sentinel string will "stick" 1258 // to the Compile object, and the C2Compiler will see it and retry. 1259 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 1260 } else { 1261 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) 1262 assert(false, "graph should be schedulable"); 1263 C->record_method_not_compilable("late schedule failed: incorrect graph"); 1264 } 1265 return; 1266 } 1267 1268 // If there is no opportunity to hoist, then we're done. 1269 // In stress mode, try to hoist even the single operations. 1270 bool try_to_hoist = StressGCM || (LCA != early); 1271 1272 // Must clone guys stay next to use; no hoisting allowed. 1273 // Also cannot hoist guys that alter memory or are otherwise not 1274 // allocatable (hoisting can make a value live longer, leading to 1275 // anti and output dependency problems which are normally resolved 1276 // by the register allocator giving everyone a different register). 1277 if (mach != NULL && must_clone[mach->ideal_Opcode()]) 1278 try_to_hoist = false; 1279 1280 Block* late = NULL; 1281 if (try_to_hoist) { 1282 // Now find the block with the least execution frequency. 1283 // Start at the latest schedule and work up to the earliest schedule 1284 // in the dominator tree. Thus the Node will dominate all its uses. 1285 late = hoist_to_cheaper_block(LCA, early, self); 1286 } else { 1287 // Just use the LCA of the uses. 1288 late = LCA; 1289 } 1290 1291 // Put the node into target block 1292 schedule_node_into_block(self, late); 1293 1294 #ifdef ASSERT 1295 if (self->needs_anti_dependence_check()) { 1296 // since precedence edges are only inserted when we're sure they 1297 // are needed make sure that after placement in a block we don't 1298 // need any new precedence edges. 1299 verify_anti_dependences(late, self); 1300 } 1301 #endif 1302 } // Loop until all nodes have been visited 1303 1304 } // end ScheduleLate 1305 1306 //------------------------------GlobalCodeMotion------------------------------- 1307 void PhaseCFG::global_code_motion() { 1308 ResourceMark rm; 1309 1310 #ifndef PRODUCT 1311 if (trace_opto_pipelining()) { 1312 tty->print("\n---- Start GlobalCodeMotion ----\n"); 1313 } 1314 #endif 1315 1316 // Initialize the node to block mapping for things on the proj_list 1317 for (uint i = 0; i < _matcher.number_of_projections(); i++) { 1318 unmap_node_from_block(_matcher.get_projection(i)); 1319 } 1320 1321 // Set the basic block for Nodes pinned into blocks 1322 Arena* arena = Thread::current()->resource_area(); 1323 VectorSet visited(arena); 1324 schedule_pinned_nodes(visited); 1325 1326 // Find the earliest Block any instruction can be placed in. Some 1327 // instructions are pinned into Blocks. Unpinned instructions can 1328 // appear in last block in which all their inputs occur. 1329 visited.Clear(); 1330 Node_List stack(arena); 1331 // Pre-grow the list 1332 stack.map((C->live_nodes() >> 1) + 16, NULL); 1333 if (!schedule_early(visited, stack)) { 1334 // Bailout without retry 1335 C->record_method_not_compilable("early schedule failed"); 1336 return; 1337 } 1338 1339 // Build Def-Use edges. 1340 // Compute the latency information (via backwards walk) for all the 1341 // instructions in the graph 1342 _node_latency = new GrowableArray<uint>(); // resource_area allocation 1343 1344 if (C->do_scheduling()) { 1345 compute_latencies_backwards(visited, stack); 1346 } 1347 1348 // Now schedule all codes as LATE as possible. This is the LCA in the 1349 // dominator tree of all USES of a value. Pick the block with the least 1350 // loop nesting depth that is lowest in the dominator tree. 1351 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) 1352 schedule_late(visited, stack); 1353 if (C->failing()) { 1354 return; 1355 } 1356 1357 #ifndef PRODUCT 1358 if (trace_opto_pipelining()) { 1359 tty->print("\n---- Detect implicit null checks ----\n"); 1360 } 1361 #endif 1362 1363 // Detect implicit-null-check opportunities. Basically, find NULL checks 1364 // with suitable memory ops nearby. Use the memory op to do the NULL check. 1365 // I can generate a memory op if there is not one nearby. 1366 if (C->is_method_compilation()) { 1367 // By reversing the loop direction we get a very minor gain on mpegaudio. 1368 // Feel free to revert to a forward loop for clarity. 1369 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { 1370 for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) { 1371 Node* proj = _matcher._null_check_tests[i]; 1372 Node* val = _matcher._null_check_tests[i + 1]; 1373 Block* block = get_block_for_node(proj); 1374 implicit_null_check(block, proj, val, C->allowed_deopt_reasons()); 1375 // The implicit_null_check will only perform the transformation 1376 // if the null branch is truly uncommon, *and* it leads to an 1377 // uncommon trap. Combined with the too_many_traps guards 1378 // above, this prevents SEGV storms reported in 6366351, 1379 // by recompiling offending methods without this optimization. 1380 } 1381 } 1382 1383 #ifndef PRODUCT 1384 if (trace_opto_pipelining()) { 1385 tty->print("\n---- Start Local Scheduling ----\n"); 1386 } 1387 #endif 1388 1389 // Schedule locally. Right now a simple topological sort. 1390 // Later, do a real latency aware scheduler. 1391 GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1); 1392 visited.Clear(); 1393 for (uint i = 0; i < number_of_blocks(); i++) { 1394 Block* block = get_block(i); 1395 if (!schedule_local(block, ready_cnt, visited)) { 1396 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { 1397 C->record_method_not_compilable("local schedule failed"); 1398 } 1399 return; 1400 } 1401 } 1402 1403 // If we inserted any instructions between a Call and his CatchNode, 1404 // clone the instructions on all paths below the Catch. 1405 for (uint i = 0; i < number_of_blocks(); i++) { 1406 Block* block = get_block(i); 1407 call_catch_cleanup(block); 1408 } 1409 1410 #ifndef PRODUCT 1411 if (trace_opto_pipelining()) { 1412 tty->print("\n---- After GlobalCodeMotion ----\n"); 1413 for (uint i = 0; i < number_of_blocks(); i++) { 1414 Block* block = get_block(i); 1415 block->dump(); 1416 } 1417 } 1418 #endif 1419 // Dead. 1420 _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef); 1421 } 1422 1423 bool PhaseCFG::do_global_code_motion() { 1424 1425 build_dominator_tree(); 1426 if (C->failing()) { 1427 return false; 1428 } 1429 1430 NOT_PRODUCT( C->verify_graph_edges(); ) 1431 1432 estimate_block_frequency(); 1433 1434 global_code_motion(); 1435 1436 if (C->failing()) { 1437 return false; 1438 } 1439 1440 return true; 1441 } 1442 1443 //------------------------------Estimate_Block_Frequency----------------------- 1444 // Estimate block frequencies based on IfNode probabilities. 1445 void PhaseCFG::estimate_block_frequency() { 1446 1447 // Force conditional branches leading to uncommon traps to be unlikely, 1448 // not because we get to the uncommon_trap with less relative frequency, 1449 // but because an uncommon_trap typically causes a deopt, so we only get 1450 // there once. 1451 if (C->do_freq_based_layout()) { 1452 Block_List worklist; 1453 Block* root_blk = get_block(0); 1454 for (uint i = 1; i < root_blk->num_preds(); i++) { 1455 Block *pb = get_block_for_node(root_blk->pred(i)); 1456 if (pb->has_uncommon_code()) { 1457 worklist.push(pb); 1458 } 1459 } 1460 while (worklist.size() > 0) { 1461 Block* uct = worklist.pop(); 1462 if (uct == get_root_block()) { 1463 continue; 1464 } 1465 for (uint i = 1; i < uct->num_preds(); i++) { 1466 Block *pb = get_block_for_node(uct->pred(i)); 1467 if (pb->_num_succs == 1) { 1468 worklist.push(pb); 1469 } else if (pb->num_fall_throughs() == 2) { 1470 pb->update_uncommon_branch(uct); 1471 } 1472 } 1473 } 1474 } 1475 1476 // Create the loop tree and calculate loop depth. 1477 _root_loop = create_loop_tree(); 1478 _root_loop->compute_loop_depth(0); 1479 1480 // Compute block frequency of each block, relative to a single loop entry. 1481 _root_loop->compute_freq(); 1482 1483 // Adjust all frequencies to be relative to a single method entry 1484 _root_loop->_freq = 1.0; 1485 _root_loop->scale_freq(); 1486 1487 // Save outmost loop frequency for LRG frequency threshold 1488 _outer_loop_frequency = _root_loop->outer_loop_freq(); 1489 1490 // force paths ending at uncommon traps to be infrequent 1491 if (!C->do_freq_based_layout()) { 1492 Block_List worklist; 1493 Block* root_blk = get_block(0); 1494 for (uint i = 1; i < root_blk->num_preds(); i++) { 1495 Block *pb = get_block_for_node(root_blk->pred(i)); 1496 if (pb->has_uncommon_code()) { 1497 worklist.push(pb); 1498 } 1499 } 1500 while (worklist.size() > 0) { 1501 Block* uct = worklist.pop(); 1502 uct->_freq = PROB_MIN; 1503 for (uint i = 1; i < uct->num_preds(); i++) { 1504 Block *pb = get_block_for_node(uct->pred(i)); 1505 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { 1506 worklist.push(pb); 1507 } 1508 } 1509 } 1510 } 1511 1512 #ifdef ASSERT 1513 for (uint i = 0; i < number_of_blocks(); i++) { 1514 Block* b = get_block(i); 1515 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); 1516 } 1517 #endif 1518 1519 #ifndef PRODUCT 1520 if (PrintCFGBlockFreq) { 1521 tty->print_cr("CFG Block Frequencies"); 1522 _root_loop->dump_tree(); 1523 if (Verbose) { 1524 tty->print_cr("PhaseCFG dump"); 1525 dump(); 1526 tty->print_cr("Node dump"); 1527 _root->dump(99999); 1528 } 1529 } 1530 #endif 1531 } 1532 1533 //----------------------------create_loop_tree-------------------------------- 1534 // Create a loop tree from the CFG 1535 CFGLoop* PhaseCFG::create_loop_tree() { 1536 1537 #ifdef ASSERT 1538 assert(get_block(0) == get_root_block(), "first block should be root block"); 1539 for (uint i = 0; i < number_of_blocks(); i++) { 1540 Block* block = get_block(i); 1541 // Check that _loop field are clear...we could clear them if not. 1542 assert(block->_loop == NULL, "clear _loop expected"); 1543 // Sanity check that the RPO numbering is reflected in the _blocks array. 1544 // It doesn't have to be for the loop tree to be built, but if it is not, 1545 // then the blocks have been reordered since dom graph building...which 1546 // may question the RPO numbering 1547 assert(block->_rpo == i, "unexpected reverse post order number"); 1548 } 1549 #endif 1550 1551 int idct = 0; 1552 CFGLoop* root_loop = new CFGLoop(idct++); 1553 1554 Block_List worklist; 1555 1556 // Assign blocks to loops 1557 for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block 1558 Block* block = get_block(i); 1559 1560 if (block->head()->is_Loop()) { 1561 Block* loop_head = block; 1562 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1563 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); 1564 Block* tail = get_block_for_node(tail_n); 1565 1566 // Defensively filter out Loop nodes for non-single-entry loops. 1567 // For all reasonable loops, the head occurs before the tail in RPO. 1568 if (i <= tail->_rpo) { 1569 1570 // The tail and (recursive) predecessors of the tail 1571 // are made members of a new loop. 1572 1573 assert(worklist.size() == 0, "nonempty worklist"); 1574 CFGLoop* nloop = new CFGLoop(idct++); 1575 assert(loop_head->_loop == NULL, "just checking"); 1576 loop_head->_loop = nloop; 1577 // Add to nloop so push_pred() will skip over inner loops 1578 nloop->add_member(loop_head); 1579 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this); 1580 1581 while (worklist.size() > 0) { 1582 Block* member = worklist.pop(); 1583 if (member != loop_head) { 1584 for (uint j = 1; j < member->num_preds(); j++) { 1585 nloop->push_pred(member, j, worklist, this); 1586 } 1587 } 1588 } 1589 } 1590 } 1591 } 1592 1593 // Create a member list for each loop consisting 1594 // of both blocks and (immediate child) loops. 1595 for (uint i = 0; i < number_of_blocks(); i++) { 1596 Block* block = get_block(i); 1597 CFGLoop* lp = block->_loop; 1598 if (lp == NULL) { 1599 // Not assigned to a loop. Add it to the method's pseudo loop. 1600 block->_loop = root_loop; 1601 lp = root_loop; 1602 } 1603 if (lp == root_loop || block != lp->head()) { // loop heads are already members 1604 lp->add_member(block); 1605 } 1606 if (lp != root_loop) { 1607 if (lp->parent() == NULL) { 1608 // Not a nested loop. Make it a child of the method's pseudo loop. 1609 root_loop->add_nested_loop(lp); 1610 } 1611 if (block == lp->head()) { 1612 // Add nested loop to member list of parent loop. 1613 lp->parent()->add_member(lp); 1614 } 1615 } 1616 } 1617 1618 return root_loop; 1619 } 1620 1621 //------------------------------push_pred-------------------------------------- 1622 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) { 1623 Node* pred_n = blk->pred(i); 1624 Block* pred = cfg->get_block_for_node(pred_n); 1625 CFGLoop *pred_loop = pred->_loop; 1626 if (pred_loop == NULL) { 1627 // Filter out blocks for non-single-entry loops. 1628 // For all reasonable loops, the head occurs before the tail in RPO. 1629 if (pred->_rpo > head()->_rpo) { 1630 pred->_loop = this; 1631 worklist.push(pred); 1632 } 1633 } else if (pred_loop != this) { 1634 // Nested loop. 1635 while (pred_loop->_parent != NULL && pred_loop->_parent != this) { 1636 pred_loop = pred_loop->_parent; 1637 } 1638 // Make pred's loop be a child 1639 if (pred_loop->_parent == NULL) { 1640 add_nested_loop(pred_loop); 1641 // Continue with loop entry predecessor. 1642 Block* pred_head = pred_loop->head(); 1643 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1644 assert(pred_head != head(), "loop head in only one loop"); 1645 push_pred(pred_head, LoopNode::EntryControl, worklist, cfg); 1646 } else { 1647 assert(pred_loop->_parent == this && _parent == NULL, "just checking"); 1648 } 1649 } 1650 } 1651 1652 //------------------------------add_nested_loop-------------------------------- 1653 // Make cl a child of the current loop in the loop tree. 1654 void CFGLoop::add_nested_loop(CFGLoop* cl) { 1655 assert(_parent == NULL, "no parent yet"); 1656 assert(cl != this, "not my own parent"); 1657 cl->_parent = this; 1658 CFGLoop* ch = _child; 1659 if (ch == NULL) { 1660 _child = cl; 1661 } else { 1662 while (ch->_sibling != NULL) { ch = ch->_sibling; } 1663 ch->_sibling = cl; 1664 } 1665 } 1666 1667 //------------------------------compute_loop_depth----------------------------- 1668 // Store the loop depth in each CFGLoop object. 1669 // Recursively walk the children to do the same for them. 1670 void CFGLoop::compute_loop_depth(int depth) { 1671 _depth = depth; 1672 CFGLoop* ch = _child; 1673 while (ch != NULL) { 1674 ch->compute_loop_depth(depth + 1); 1675 ch = ch->_sibling; 1676 } 1677 } 1678 1679 //------------------------------compute_freq----------------------------------- 1680 // Compute the frequency of each block and loop, relative to a single entry 1681 // into the dominating loop head. 1682 void CFGLoop::compute_freq() { 1683 // Bottom up traversal of loop tree (visit inner loops first.) 1684 // Set loop head frequency to 1.0, then transitively 1685 // compute frequency for all successors in the loop, 1686 // as well as for each exit edge. Inner loops are 1687 // treated as single blocks with loop exit targets 1688 // as the successor blocks. 1689 1690 // Nested loops first 1691 CFGLoop* ch = _child; 1692 while (ch != NULL) { 1693 ch->compute_freq(); 1694 ch = ch->_sibling; 1695 } 1696 assert (_members.length() > 0, "no empty loops"); 1697 Block* hd = head(); 1698 hd->_freq = 1.0f; 1699 for (int i = 0; i < _members.length(); i++) { 1700 CFGElement* s = _members.at(i); 1701 float freq = s->_freq; 1702 if (s->is_block()) { 1703 Block* b = s->as_Block(); 1704 for (uint j = 0; j < b->_num_succs; j++) { 1705 Block* sb = b->_succs[j]; 1706 update_succ_freq(sb, freq * b->succ_prob(j)); 1707 } 1708 } else { 1709 CFGLoop* lp = s->as_CFGLoop(); 1710 assert(lp->_parent == this, "immediate child"); 1711 for (int k = 0; k < lp->_exits.length(); k++) { 1712 Block* eb = lp->_exits.at(k).get_target(); 1713 float prob = lp->_exits.at(k).get_prob(); 1714 update_succ_freq(eb, freq * prob); 1715 } 1716 } 1717 } 1718 1719 // For all loops other than the outer, "method" loop, 1720 // sum and normalize the exit probability. The "method" loop 1721 // should keep the initial exit probability of 1, so that 1722 // inner blocks do not get erroneously scaled. 1723 if (_depth != 0) { 1724 // Total the exit probabilities for this loop. 1725 float exits_sum = 0.0f; 1726 for (int i = 0; i < _exits.length(); i++) { 1727 exits_sum += _exits.at(i).get_prob(); 1728 } 1729 1730 // Normalize the exit probabilities. Until now, the 1731 // probabilities estimate the possibility of exit per 1732 // a single loop iteration; afterward, they estimate 1733 // the probability of exit per loop entry. 1734 for (int i = 0; i < _exits.length(); i++) { 1735 Block* et = _exits.at(i).get_target(); 1736 float new_prob = 0.0f; 1737 if (_exits.at(i).get_prob() > 0.0f) { 1738 new_prob = _exits.at(i).get_prob() / exits_sum; 1739 } 1740 BlockProbPair bpp(et, new_prob); 1741 _exits.at_put(i, bpp); 1742 } 1743 1744 // Save the total, but guard against unreasonable probability, 1745 // as the value is used to estimate the loop trip count. 1746 // An infinite trip count would blur relative block 1747 // frequencies. 1748 if (exits_sum > 1.0f) exits_sum = 1.0; 1749 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; 1750 _exit_prob = exits_sum; 1751 } 1752 } 1753 1754 //------------------------------succ_prob------------------------------------- 1755 // Determine the probability of reaching successor 'i' from the receiver block. 1756 float Block::succ_prob(uint i) { 1757 int eidx = end_idx(); 1758 Node *n = get_node(eidx); // Get ending Node 1759 1760 int op = n->Opcode(); 1761 if (n->is_Mach()) { 1762 if (n->is_MachNullCheck()) { 1763 // Can only reach here if called after lcm. The original Op_If is gone, 1764 // so we attempt to infer the probability from one or both of the 1765 // successor blocks. 1766 assert(_num_succs == 2, "expecting 2 successors of a null check"); 1767 // If either successor has only one predecessor, then the 1768 // probability estimate can be derived using the 1769 // relative frequency of the successor and this block. 1770 if (_succs[i]->num_preds() == 2) { 1771 return _succs[i]->_freq / _freq; 1772 } else if (_succs[1-i]->num_preds() == 2) { 1773 return 1 - (_succs[1-i]->_freq / _freq); 1774 } else { 1775 // Estimate using both successor frequencies 1776 float freq = _succs[i]->_freq; 1777 return freq / (freq + _succs[1-i]->_freq); 1778 } 1779 } 1780 op = n->as_Mach()->ideal_Opcode(); 1781 } 1782 1783 1784 // Switch on branch type 1785 switch( op ) { 1786 case Op_CountedLoopEnd: 1787 case Op_If: { 1788 assert (i < 2, "just checking"); 1789 // Conditionals pass on only part of their frequency 1790 float prob = n->as_MachIf()->_prob; 1791 assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); 1792 // If succ[i] is the FALSE branch, invert path info 1793 if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) { 1794 return 1.0f - prob; // not taken 1795 } else { 1796 return prob; // taken 1797 } 1798 } 1799 1800 case Op_Jump: 1801 // Divide the frequency between all successors evenly 1802 return 1.0f/_num_succs; 1803 1804 case Op_Catch: { 1805 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1806 if (ci->_con == CatchProjNode::fall_through_index) { 1807 // Fall-thru path gets the lion's share. 1808 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; 1809 } else { 1810 // Presume exceptional paths are equally unlikely 1811 return PROB_UNLIKELY_MAG(5); 1812 } 1813 } 1814 1815 case Op_Root: 1816 case Op_Goto: 1817 // Pass frequency straight thru to target 1818 return 1.0f; 1819 1820 case Op_NeverBranch: 1821 return 0.0f; 1822 1823 case Op_TailCall: 1824 case Op_TailJump: 1825 case Op_Return: 1826 case Op_Halt: 1827 case Op_Rethrow: 1828 // Do not push out freq to root block 1829 return 0.0f; 1830 1831 default: 1832 ShouldNotReachHere(); 1833 } 1834 1835 return 0.0f; 1836 } 1837 1838 //------------------------------num_fall_throughs----------------------------- 1839 // Return the number of fall-through candidates for a block 1840 int Block::num_fall_throughs() { 1841 int eidx = end_idx(); 1842 Node *n = get_node(eidx); // Get ending Node 1843 1844 int op = n->Opcode(); 1845 if (n->is_Mach()) { 1846 if (n->is_MachNullCheck()) { 1847 // In theory, either side can fall-thru, for simplicity sake, 1848 // let's say only the false branch can now. 1849 return 1; 1850 } 1851 op = n->as_Mach()->ideal_Opcode(); 1852 } 1853 1854 // Switch on branch type 1855 switch( op ) { 1856 case Op_CountedLoopEnd: 1857 case Op_If: 1858 return 2; 1859 1860 case Op_Root: 1861 case Op_Goto: 1862 return 1; 1863 1864 case Op_Catch: { 1865 for (uint i = 0; i < _num_succs; i++) { 1866 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1867 if (ci->_con == CatchProjNode::fall_through_index) { 1868 return 1; 1869 } 1870 } 1871 return 0; 1872 } 1873 1874 case Op_Jump: 1875 case Op_NeverBranch: 1876 case Op_TailCall: 1877 case Op_TailJump: 1878 case Op_Return: 1879 case Op_Halt: 1880 case Op_Rethrow: 1881 return 0; 1882 1883 default: 1884 ShouldNotReachHere(); 1885 } 1886 1887 return 0; 1888 } 1889 1890 //------------------------------succ_fall_through----------------------------- 1891 // Return true if a specific successor could be fall-through target. 1892 bool Block::succ_fall_through(uint i) { 1893 int eidx = end_idx(); 1894 Node *n = get_node(eidx); // Get ending Node 1895 1896 int op = n->Opcode(); 1897 if (n->is_Mach()) { 1898 if (n->is_MachNullCheck()) { 1899 // In theory, either side can fall-thru, for simplicity sake, 1900 // let's say only the false branch can now. 1901 return get_node(i + eidx + 1)->Opcode() == Op_IfFalse; 1902 } 1903 op = n->as_Mach()->ideal_Opcode(); 1904 } 1905 1906 // Switch on branch type 1907 switch( op ) { 1908 case Op_CountedLoopEnd: 1909 case Op_If: 1910 case Op_Root: 1911 case Op_Goto: 1912 return true; 1913 1914 case Op_Catch: { 1915 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1916 return ci->_con == CatchProjNode::fall_through_index; 1917 } 1918 1919 case Op_Jump: 1920 case Op_NeverBranch: 1921 case Op_TailCall: 1922 case Op_TailJump: 1923 case Op_Return: 1924 case Op_Halt: 1925 case Op_Rethrow: 1926 return false; 1927 1928 default: 1929 ShouldNotReachHere(); 1930 } 1931 1932 return false; 1933 } 1934 1935 //------------------------------update_uncommon_branch------------------------ 1936 // Update the probability of a two-branch to be uncommon 1937 void Block::update_uncommon_branch(Block* ub) { 1938 int eidx = end_idx(); 1939 Node *n = get_node(eidx); // Get ending Node 1940 1941 int op = n->as_Mach()->ideal_Opcode(); 1942 1943 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); 1944 assert(num_fall_throughs() == 2, "must be a two way branch block"); 1945 1946 // Which successor is ub? 1947 uint s; 1948 for (s = 0; s <_num_succs; s++) { 1949 if (_succs[s] == ub) break; 1950 } 1951 assert(s < 2, "uncommon successor must be found"); 1952 1953 // If ub is the true path, make the proability small, else 1954 // ub is the false path, and make the probability large 1955 bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse); 1956 1957 // Get existing probability 1958 float p = n->as_MachIf()->_prob; 1959 1960 if (invert) p = 1.0 - p; 1961 if (p > PROB_MIN) { 1962 p = PROB_MIN; 1963 } 1964 if (invert) p = 1.0 - p; 1965 1966 n->as_MachIf()->_prob = p; 1967 } 1968 1969 //------------------------------update_succ_freq------------------------------- 1970 // Update the appropriate frequency associated with block 'b', a successor of 1971 // a block in this loop. 1972 void CFGLoop::update_succ_freq(Block* b, float freq) { 1973 if (b->_loop == this) { 1974 if (b == head()) { 1975 // back branch within the loop 1976 // Do nothing now, the loop carried frequency will be 1977 // adjust later in scale_freq(). 1978 } else { 1979 // simple branch within the loop 1980 b->_freq += freq; 1981 } 1982 } else if (!in_loop_nest(b)) { 1983 // branch is exit from this loop 1984 BlockProbPair bpp(b, freq); 1985 _exits.append(bpp); 1986 } else { 1987 // branch into nested loop 1988 CFGLoop* ch = b->_loop; 1989 ch->_freq += freq; 1990 } 1991 } 1992 1993 //------------------------------in_loop_nest----------------------------------- 1994 // Determine if block b is in the receiver's loop nest. 1995 bool CFGLoop::in_loop_nest(Block* b) { 1996 int depth = _depth; 1997 CFGLoop* b_loop = b->_loop; 1998 int b_depth = b_loop->_depth; 1999 if (depth == b_depth) { 2000 return true; 2001 } 2002 while (b_depth > depth) { 2003 b_loop = b_loop->_parent; 2004 b_depth = b_loop->_depth; 2005 } 2006 return b_loop == this; 2007 } 2008 2009 //------------------------------scale_freq------------------------------------- 2010 // Scale frequency of loops and blocks by trip counts from outer loops 2011 // Do a top down traversal of loop tree (visit outer loops first.) 2012 void CFGLoop::scale_freq() { 2013 float loop_freq = _freq * trip_count(); 2014 _freq = loop_freq; 2015 for (int i = 0; i < _members.length(); i++) { 2016 CFGElement* s = _members.at(i); 2017 float block_freq = s->_freq * loop_freq; 2018 if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY) 2019 block_freq = MIN_BLOCK_FREQUENCY; 2020 s->_freq = block_freq; 2021 } 2022 CFGLoop* ch = _child; 2023 while (ch != NULL) { 2024 ch->scale_freq(); 2025 ch = ch->_sibling; 2026 } 2027 } 2028 2029 // Frequency of outer loop 2030 float CFGLoop::outer_loop_freq() const { 2031 if (_child != NULL) { 2032 return _child->_freq; 2033 } 2034 return _freq; 2035 } 2036 2037 #ifndef PRODUCT 2038 //------------------------------dump_tree-------------------------------------- 2039 void CFGLoop::dump_tree() const { 2040 dump(); 2041 if (_child != NULL) _child->dump_tree(); 2042 if (_sibling != NULL) _sibling->dump_tree(); 2043 } 2044 2045 //------------------------------dump------------------------------------------- 2046 void CFGLoop::dump() const { 2047 for (int i = 0; i < _depth; i++) tty->print(" "); 2048 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", 2049 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); 2050 for (int i = 0; i < _depth; i++) tty->print(" "); 2051 tty->print(" members:"); 2052 int k = 0; 2053 for (int i = 0; i < _members.length(); i++) { 2054 if (k++ >= 6) { 2055 tty->print("\n "); 2056 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2057 k = 0; 2058 } 2059 CFGElement *s = _members.at(i); 2060 if (s->is_block()) { 2061 Block *b = s->as_Block(); 2062 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); 2063 } else { 2064 CFGLoop* lp = s->as_CFGLoop(); 2065 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); 2066 } 2067 } 2068 tty->print("\n"); 2069 for (int i = 0; i < _depth; i++) tty->print(" "); 2070 tty->print(" exits: "); 2071 k = 0; 2072 for (int i = 0; i < _exits.length(); i++) { 2073 if (k++ >= 7) { 2074 tty->print("\n "); 2075 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2076 k = 0; 2077 } 2078 Block *blk = _exits.at(i).get_target(); 2079 float prob = _exits.at(i).get_prob(); 2080 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); 2081 } 2082 tty->print("\n"); 2083 } 2084 #endif