1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "libadt/vectset.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "compiler/compilerDirectives.hpp" 29 #include "opto/block.hpp" 30 #include "opto/cfgnode.hpp" 31 #include "opto/chaitin.hpp" 32 #include "opto/loopnode.hpp" 33 #include "opto/machnode.hpp" 34 #include "opto/matcher.hpp" 35 #include "opto/opcodes.hpp" 36 #include "opto/rootnode.hpp" 37 #include "utilities/copy.hpp" 38 #include "utilities/powerOfTwo.hpp" 39 40 void Block_Array::grow( uint i ) { 41 _nesting.check(_arena); // Check if a potential reallocation in the arena is safe 42 if (i < Max()) { 43 return; // No need to grow 44 } 45 DEBUG_ONLY(_limit = i+1); 46 if( i < _size ) return; 47 if( !_size ) { 48 _size = 1; 49 _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) ); 50 _blocks[0] = nullptr; 51 } 52 uint old = _size; 53 _size = next_power_of_2(i); 54 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*)); 55 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) ); 56 } 57 58 void Block_List::remove(uint i) { 59 assert(i < _cnt, "index out of bounds"); 60 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*))); 61 pop(); // shrink list by one block 62 } 63 64 void Block_List::insert(uint i, Block *b) { 65 push(b); // grow list by one block 66 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*))); 67 _blocks[i] = b; 68 } 69 70 #ifndef PRODUCT 71 void Block_List::print() { 72 for (uint i=0; i < size(); i++) { 73 tty->print("B%d ", _blocks[i]->_pre_order); 74 } 75 tty->print("size = %d\n", size()); 76 } 77 #endif 78 79 uint Block::code_alignment() const { 80 // Check for Root block 81 if (_pre_order == 0) return CodeEntryAlignment; 82 // Check for Start block 83 if (_pre_order == 1) return InteriorEntryAlignment; 84 // Check for loop alignment 85 if (has_loop_alignment()) return loop_alignment(); 86 87 return relocInfo::addr_unit(); // no particular alignment 88 } 89 90 uint Block::compute_loop_alignment() { 91 Node *h = head(); 92 int unit_sz = relocInfo::addr_unit(); 93 if (h->is_Loop() && h->as_Loop()->is_inner_loop()) { 94 // Pre- and post-loops have low trip count so do not bother with 95 // NOPs for align loop head. The constants are hidden from tuning 96 // but only because my "divide by 4" heuristic surely gets nearly 97 // all possible gain (a "do not align at all" heuristic has a 98 // chance of getting a really tiny gain). 99 if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() || 100 h->as_CountedLoop()->is_post_loop())) { 101 return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz; 102 } 103 // Loops with low backedge frequency should not be aligned. 104 Node *n = h->in(LoopNode::LoopBackControl)->in(0); 105 if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) { 106 return unit_sz; // Loop does not loop, more often than not! 107 } 108 return OptoLoopAlignment; // Otherwise align loop head 109 } 110 111 return unit_sz; // no particular alignment 112 } 113 114 // Compute the size of first 'inst_cnt' instructions in this block. 115 // Return the number of instructions left to compute if the block has 116 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size 117 // exceeds OptoLoopAlignment. 118 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, 119 PhaseRegAlloc* ra) { 120 uint last_inst = number_of_nodes(); 121 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) { 122 uint inst_size = get_node(j)->size(ra); 123 if( inst_size > 0 ) { 124 inst_cnt--; 125 uint sz = sum_size + inst_size; 126 if( sz <= (uint)OptoLoopAlignment ) { 127 // Compute size of instructions which fit into fetch buffer only 128 // since all inst_cnt instructions will not fit even if we align them. 129 sum_size = sz; 130 } else { 131 return 0; 132 } 133 } 134 } 135 return inst_cnt; 136 } 137 138 uint Block::find_node( const Node *n ) const { 139 for( uint i = 0; i < number_of_nodes(); i++ ) { 140 if( get_node(i) == n ) 141 return i; 142 } 143 ShouldNotReachHere(); 144 return 0; 145 } 146 147 // Find and remove n from block list 148 void Block::find_remove( const Node *n ) { 149 remove_node(find_node(n)); 150 } 151 152 bool Block::contains(const Node *n) const { 153 return _nodes.contains(n); 154 } 155 156 bool Block::is_trivially_unreachable() const { 157 return num_preds() <= 1 && !head()->is_Root() && !head()->is_Start(); 158 } 159 160 // Return empty status of a block. Empty blocks contain only the head, other 161 // ideal nodes, and an optional trailing goto. 162 int Block::is_Empty() const { 163 164 // Root or start block is not considered empty 165 if (head()->is_Root() || head()->is_Start()) { 166 return not_empty; 167 } 168 169 int success_result = completely_empty; 170 int end_idx = number_of_nodes() - 1; 171 172 // Check for ending goto 173 if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) { 174 success_result = empty_with_goto; 175 end_idx--; 176 } 177 178 // Unreachable blocks are considered empty 179 if (is_trivially_unreachable()) { 180 return success_result; 181 } 182 183 // Ideal nodes (except BoxLock) are allowable in empty blocks: skip them. Only 184 // Mach and BoxLock nodes turn directly into code via emit(). 185 while ((end_idx > 0) && 186 !get_node(end_idx)->is_Mach() && 187 !get_node(end_idx)->is_BoxLock()) { 188 end_idx--; 189 } 190 191 // No room for any interesting instructions? 192 if (end_idx == 0) { 193 return success_result; 194 } 195 196 return not_empty; 197 } 198 199 // Return true if the block's code implies that it is likely to be 200 // executed infrequently. Check to see if the block ends in a Halt or 201 // a low probability call. 202 bool Block::has_uncommon_code() const { 203 Node* en = end(); 204 205 if (en->is_MachGoto()) 206 en = en->in(0); 207 if (en->is_Catch()) 208 en = en->in(0); 209 if (en->is_MachProj() && en->in(0)->is_MachCall()) { 210 MachCallNode* call = en->in(0)->as_MachCall(); 211 if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) { 212 // This is true for slow-path stubs like new_{instance,array}, 213 // slow_arraycopy, complete_monitor_locking, uncommon_trap. 214 // The magic number corresponds to the probability of an uncommon_trap, 215 // even though it is a count not a probability. 216 return true; 217 } 218 } 219 220 int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode(); 221 return op == Op_Halt; 222 } 223 224 // True if block is low enough frequency or guarded by a test which 225 // mostly does not go here. 226 bool PhaseCFG::is_uncommon(const Block* block) { 227 // Initial blocks must never be moved, so are never uncommon. 228 if (block->head()->is_Root() || block->head()->is_Start()) return false; 229 230 // Check for way-low freq 231 if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true; 232 233 // Look for code shape indicating uncommon_trap or slow path 234 if (block->has_uncommon_code()) return true; 235 236 const float epsilon = 0.05f; 237 const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon); 238 uint uncommon_preds = 0; 239 uint freq_preds = 0; 240 uint uncommon_for_freq_preds = 0; 241 242 for( uint i=1; i< block->num_preds(); i++ ) { 243 Block* guard = get_block_for_node(block->pred(i)); 244 // Check to see if this block follows its guard 1 time out of 10000 245 // or less. 246 // 247 // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which 248 // we intend to be "uncommon", such as slow-path TLE allocation, 249 // predicted call failure, and uncommon trap triggers. 250 // 251 // Use an epsilon value of 5% to allow for variability in frequency 252 // predictions and floating point calculations. The net effect is 253 // that guard_factor is set to 9500. 254 // 255 // Ignore low-frequency blocks. 256 // The next check is (guard->_freq < 1.e-5 * 9500.). 257 if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) { 258 uncommon_preds++; 259 } else { 260 freq_preds++; 261 if(block->_freq < guard->_freq * guard_factor ) { 262 uncommon_for_freq_preds++; 263 } 264 } 265 } 266 if( block->num_preds() > 1 && 267 // The block is uncommon if all preds are uncommon or 268 (uncommon_preds == (block->num_preds()-1) || 269 // it is uncommon for all frequent preds. 270 uncommon_for_freq_preds == freq_preds) ) { 271 return true; 272 } 273 return false; 274 } 275 276 #ifndef PRODUCT 277 void Block::dump_bidx(const Block* orig, outputStream* st) const { 278 if (_pre_order) st->print("B%d", _pre_order); 279 else st->print("N%d", head()->_idx); 280 281 if (Verbose && orig != this) { 282 // Dump the original block's idx 283 st->print(" ("); 284 orig->dump_bidx(orig, st); 285 st->print(")"); 286 } 287 } 288 289 void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const { 290 if (is_connector()) { 291 for (uint i=1; i<num_preds(); i++) { 292 Block *p = cfg->get_block_for_node(pred(i)); 293 p->dump_pred(cfg, orig, st); 294 } 295 } else { 296 dump_bidx(orig, st); 297 st->print(" "); 298 } 299 } 300 301 void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const { 302 // Print the basic block. 303 dump_bidx(this, st); 304 st->print(": "); 305 306 // Print the outgoing CFG edges. 307 st->print("#\tout( "); 308 for( uint i=0; i<_num_succs; i++ ) { 309 non_connector_successor(i)->dump_bidx(_succs[i], st); 310 st->print(" "); 311 } 312 313 // Print the incoming CFG edges. 314 st->print(") <- "); 315 if( head()->is_block_start() ) { 316 st->print("in( "); 317 for (uint i=1; i<num_preds(); i++) { 318 Node *s = pred(i); 319 if (cfg != nullptr) { 320 Block *p = cfg->get_block_for_node(s); 321 p->dump_pred(cfg, p, st); 322 } else { 323 while (!s->is_block_start()) { 324 s = s->in(0); 325 } 326 st->print("N%d ", s->_idx ); 327 } 328 } 329 st->print(") "); 330 } else { 331 st->print("BLOCK HEAD IS JUNK "); 332 } 333 334 // Print loop, if any 335 const Block *bhead = this; // Head of self-loop 336 Node *bh = bhead->head(); 337 338 if ((cfg != nullptr) && bh->is_Loop() && !head()->is_Root()) { 339 LoopNode *loop = bh->as_Loop(); 340 const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl)); 341 while (bx->is_connector()) { 342 bx = cfg->get_block_for_node(bx->pred(1)); 343 } 344 st->print("Loop( B%d-B%d ", bhead->_pre_order, bx->_pre_order); 345 // Dump any loop-specific bits, especially for CountedLoops. 346 loop->dump_spec(st); 347 st->print(")"); 348 } else if (has_loop_alignment()) { 349 st->print("top-of-loop"); 350 } 351 352 // Print frequency and other optimization-relevant information 353 st->print(" Freq: %g",_freq); 354 if( Verbose || WizardMode ) { 355 st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth); 356 st->print(" RegPressure: %d",_reg_pressure); 357 st->print(" IHRP Index: %d",_ihrp_index); 358 st->print(" FRegPressure: %d",_freg_pressure); 359 st->print(" FHRP Index: %d",_fhrp_index); 360 } 361 st->cr(); 362 } 363 364 void Block::dump() const { 365 dump(nullptr); 366 } 367 368 void Block::dump(const PhaseCFG* cfg) const { 369 dump_head(cfg); 370 for (uint i=0; i< number_of_nodes(); i++) { 371 get_node(i)->dump(); 372 } 373 tty->print("\n"); 374 } 375 #endif 376 377 PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher) 378 : Phase(CFG) 379 , _root(root) 380 , _blocks(arena) 381 , _block_arena(arena) 382 , _regalloc(nullptr) 383 , _scheduling_for_pressure(false) 384 , _matcher(matcher) 385 , _node_to_block_mapping(arena) 386 , _node_latency(nullptr) 387 #ifndef PRODUCT 388 , _trace_opto_pipelining(C->directive()->TraceOptoPipeliningOption) 389 #endif 390 #ifdef ASSERT 391 , _raw_oops(arena) 392 #endif 393 { 394 ResourceMark rm; 395 // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode, 396 // then Match it into a machine-specific Node. Then clone the machine 397 // Node on demand. 398 Node *x = new GotoNode(nullptr); 399 x->init_req(0, x); 400 _goto = matcher.match_tree(x); 401 assert(_goto != nullptr || C->failure_is_artificial(), ""); 402 if (C->failing()) { 403 return; 404 } 405 _goto->set_req(0,_goto); 406 407 // Build the CFG in Reverse Post Order 408 _number_of_blocks = build_cfg(); 409 _root_block = get_block_for_node(_root); 410 } 411 412 // Build a proper looking CFG. Make every block begin with either a StartNode 413 // or a RegionNode. Make every block end with either a Goto, If or Return. 414 // The RootNode both starts and ends it's own block. Do this with a recursive 415 // backwards walk over the control edges. 416 uint PhaseCFG::build_cfg() { 417 VectorSet visited; 418 419 // Allocate stack with enough space to avoid frequent realloc 420 Node_Stack nstack(C->live_nodes() >> 1); 421 nstack.push(_root, 0); 422 uint sum = 0; // Counter for blocks 423 424 while (nstack.is_nonempty()) { 425 // node and in's index from stack's top 426 // 'np' is _root (see above) or RegionNode, StartNode: we push on stack 427 // only nodes which point to the start of basic block (see below). 428 Node *np = nstack.node(); 429 // idx > 0, except for the first node (_root) pushed on stack 430 // at the beginning when idx == 0. 431 // We will use the condition (idx == 0) later to end the build. 432 uint idx = nstack.index(); 433 Node *proj = np->in(idx); 434 const Node *x = proj->is_block_proj(); 435 // Does the block end with a proper block-ending Node? One of Return, 436 // If or Goto? (This check should be done for visited nodes also). 437 if (x == nullptr) { // Does not end right... 438 Node *g = _goto->clone(); // Force it to end in a Goto 439 g->set_req(0, proj); 440 np->set_req(idx, g); 441 x = proj = g; 442 } 443 if (!visited.test_set(x->_idx)) { // Visit this block once 444 // Skip any control-pinned middle'in stuff 445 Node *p = proj; 446 do { 447 proj = p; // Update pointer to last Control 448 p = p->in(0); // Move control forward 449 } while( !p->is_block_proj() && 450 !p->is_block_start() ); 451 // Make the block begin with one of Region or StartNode. 452 if( !p->is_block_start() ) { 453 RegionNode *r = new RegionNode( 2 ); 454 r->init_req(1, p); // Insert RegionNode in the way 455 proj->set_req(0, r); // Insert RegionNode in the way 456 p = r; 457 } 458 // 'p' now points to the start of this basic block 459 460 // Put self in array of basic blocks 461 Block *bb = new (_block_arena) Block(_block_arena, p); 462 map_node_to_block(p, bb); 463 map_node_to_block(x, bb); 464 if( x != p ) { // Only for root is x == p 465 bb->push_node((Node*)x); 466 } 467 // Now handle predecessors 468 ++sum; // Count 1 for self block 469 uint cnt = bb->num_preds(); 470 for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors 471 Node *prevproj = p->in(i); // Get prior input 472 assert( !prevproj->is_Con(), "dead input not removed" ); 473 // Check to see if p->in(i) is a "control-dependent" CFG edge - 474 // i.e., it splits at the source (via an IF or SWITCH) and merges 475 // at the destination (via a many-input Region). 476 // This breaks critical edges. The RegionNode to start the block 477 // will be added when <p,i> is pulled off the node stack 478 if ( cnt > 2 ) { // Merging many things? 479 assert( prevproj== bb->pred(i),""); 480 if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge? 481 // Force a block on the control-dependent edge 482 Node *g = _goto->clone(); // Force it to end in a Goto 483 g->set_req(0,prevproj); 484 p->set_req(i,g); 485 } 486 } 487 nstack.push(p, i); // 'p' is RegionNode or StartNode 488 } 489 } else { // Post-processing visited nodes 490 nstack.pop(); // remove node from stack 491 // Check if it the fist node pushed on stack at the beginning. 492 if (idx == 0) break; // end of the build 493 // Find predecessor basic block 494 Block *pb = get_block_for_node(x); 495 // Insert into nodes array, if not already there 496 if (!has_block(proj)) { 497 assert( x != proj, "" ); 498 // Map basic block of projection 499 map_node_to_block(proj, pb); 500 pb->push_node(proj); 501 } 502 // Insert self as a child of my predecessor block 503 pb->_succs.map(pb->_num_succs++, get_block_for_node(np)); 504 assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(), 505 "too many control users, not a CFG?" ); 506 } 507 } 508 // Return number of basic blocks for all children and self 509 return sum; 510 } 511 512 // Inserts a goto & corresponding basic block between 513 // block[block_no] and its succ_no'th successor block 514 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) { 515 // get block with block_no 516 assert(block_no < number_of_blocks(), "illegal block number"); 517 Block* in = get_block(block_no); 518 // get successor block succ_no 519 assert(succ_no < in->_num_succs, "illegal successor number"); 520 Block* out = in->_succs[succ_no]; 521 // Compute frequency of the new block. Do this before inserting 522 // new block in case succ_prob() needs to infer the probability from 523 // surrounding blocks. 524 float freq = in->_freq * in->succ_prob(succ_no); 525 // get ProjNode corresponding to the succ_no'th successor of the in block 526 ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj(); 527 // create region for basic block 528 RegionNode* region = new RegionNode(2); 529 region->init_req(1, proj); 530 // setup corresponding basic block 531 Block* block = new (_block_arena) Block(_block_arena, region); 532 map_node_to_block(region, block); 533 C->regalloc()->set_bad(region->_idx); 534 // add a goto node 535 Node* gto = _goto->clone(); // get a new goto node 536 gto->set_req(0, region); 537 // add it to the basic block 538 block->push_node(gto); 539 map_node_to_block(gto, block); 540 C->regalloc()->set_bad(gto->_idx); 541 // hook up successor block 542 block->_succs.map(block->_num_succs++, out); 543 // remap successor's predecessors if necessary 544 for (uint i = 1; i < out->num_preds(); i++) { 545 if (out->pred(i) == proj) out->head()->set_req(i, gto); 546 } 547 // remap predecessor's successor to new block 548 in->_succs.map(succ_no, block); 549 // Set the frequency of the new block 550 block->_freq = freq; 551 // add new basic block to basic block list 552 add_block_at(block_no + 1, block); 553 // Update dominator tree information of the new goto block. 554 block->_idom = in; 555 block->_dom_depth = in->_dom_depth + 1; 556 if (out->_idom != in) { 557 // The successor block was not immediately dominated by the predecessor 558 // block, so there is no dominator subtree to update. 559 return; 560 } 561 // Update immediate dominator of the successor block. 562 out->_idom = block; 563 // Increment the dominator tree depth of the goto block's descendants. These 564 // are found by a depth-first search starting from the successor block. Two 565 // domination properties guarantee that only descendant blocks are visited: 566 // 1) all dominators of a block b must appear in any path from the root to b; 567 // 2) if a block b does not dominate another block b', b cannot dominate any 568 // block reachable from b' either. 569 // The exploration uses header indices as block identifiers, since 570 // Block::_pre_order might not be unique in the context of this function. 571 ResourceMark rm; 572 VectorSet descendants; 573 descendants.set(block->head()->_idx); // The goto block is a descendant of itself. 574 Block_List worklist; 575 worklist.push(out); // Start exploring from the successor block. 576 while (worklist.size() > 0) { 577 Block* b = worklist.pop(); 578 // The immediate dominator of b is a descendant, hence b is also a 579 // descendant. Even though all predecessors of b might not have been visited 580 // yet, we know that all dominators of b have been already visited (since 581 // they must appear in any path from the goto block to b). 582 descendants.set(b->head()->_idx); 583 b->_dom_depth++; 584 for (uint i = 0; i < b->_num_succs; i++) { 585 Block* s = b->_succs[i]; 586 if (s != get_root_block() && 587 !descendants.test(s->head()->_idx) && 588 // Do not search below non-descendant successors, since any block 589 // reachable from them cannot be descendant either. 590 descendants.test(s->_idom->head()->_idx)) { 591 worklist.push(s); 592 } 593 } 594 } 595 } 596 597 // Does this block end in a multiway branch that cannot have the default case 598 // flipped for another case? 599 static bool no_flip_branch(Block *b) { 600 int branch_idx = b->number_of_nodes() - b->_num_succs-1; 601 if (branch_idx < 1) { 602 return false; 603 } 604 Node *branch = b->get_node(branch_idx); 605 if (branch->is_Catch()) { 606 return true; 607 } 608 if (branch->is_Mach()) { 609 if (branch->is_MachNullCheck()) { 610 return true; 611 } 612 int iop = branch->as_Mach()->ideal_Opcode(); 613 if (iop == Op_FastLock || iop == Op_FastUnlock) { 614 return true; 615 } 616 // Don't flip if branch has an implicit check. 617 if (branch->as_Mach()->is_TrapBasedCheckNode()) { 618 return true; 619 } 620 } 621 return false; 622 } 623 624 // Check for NeverBranch at block end. This needs to become a GOTO to the 625 // true target. NeverBranch are treated as a conditional branch that always 626 // goes the same direction for most of the optimizer and are used to give a 627 // fake exit path to infinite loops. At this late stage they need to turn 628 // into Goto's so that when you enter the infinite loop you indeed hang. 629 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) { 630 int end_idx = b->end_idx(); 631 NeverBranchNode* never_branch = b->get_node(end_idx)->as_NeverBranch(); 632 Block* succ = get_block_for_node(never_branch->proj_out(0)->unique_ctrl_out()); 633 Block* dead = get_block_for_node(never_branch->proj_out(1)->unique_ctrl_out()); 634 assert(succ == b->_succs[0] || succ == b->_succs[1], "succ is a successor"); 635 assert(dead == b->_succs[0] || dead == b->_succs[1], "dead is a successor"); 636 637 Node* gto = _goto->clone(); // get a new goto node 638 gto->set_req(0, b->head()); 639 Node *bp = b->get_node(end_idx); 640 b->map_node(gto, end_idx); // Slam over NeverBranch 641 map_node_to_block(gto, b); 642 C->regalloc()->set_bad(gto->_idx); 643 b->pop_node(); // Yank projections 644 b->pop_node(); // Yank projections 645 b->_succs.map(0,succ); // Map only successor 646 b->_num_succs = 1; 647 // remap successor's predecessors if necessary 648 uint j; 649 for (j = 1; j < succ->num_preds(); j++) { 650 if (succ->pred(j)->in(0) == bp) { 651 succ->head()->set_req(j, gto); 652 } 653 } 654 // Kill alternate exit path 655 for (j = 1; j < dead->num_preds(); j++) { 656 if (dead->pred(j)->in(0) == bp) { 657 break; 658 } 659 } 660 // Scan through block, yanking dead path from 661 // all regions and phis. 662 dead->head()->del_req(j); 663 for (int k = 1; dead->get_node(k)->is_Phi(); k++) { 664 dead->get_node(k)->del_req(j); 665 } 666 } 667 668 // Helper function to move block bx to the slot following b_index. Return 669 // true if the move is successful, otherwise false 670 bool PhaseCFG::move_to_next(Block* bx, uint b_index) { 671 if (bx == nullptr) return false; 672 673 // Return false if bx is already scheduled. 674 uint bx_index = bx->_pre_order; 675 if ((bx_index <= b_index) && (get_block(bx_index) == bx)) { 676 return false; 677 } 678 679 // Find the current index of block bx on the block list 680 bx_index = b_index + 1; 681 while (bx_index < number_of_blocks() && get_block(bx_index) != bx) { 682 bx_index++; 683 } 684 assert(get_block(bx_index) == bx, "block not found"); 685 686 // If the previous block conditionally falls into bx, return false, 687 // because moving bx will create an extra jump. 688 for(uint k = 1; k < bx->num_preds(); k++ ) { 689 Block* pred = get_block_for_node(bx->pred(k)); 690 if (pred == get_block(bx_index - 1)) { 691 if (pred->_num_succs != 1) { 692 return false; 693 } 694 } 695 } 696 697 // Reinsert bx just past block 'b' 698 _blocks.remove(bx_index); 699 _blocks.insert(b_index + 1, bx); 700 return true; 701 } 702 703 // Move empty and uncommon blocks to the end. 704 void PhaseCFG::move_to_end(Block *b, uint i) { 705 int e = b->is_Empty(); 706 if (e != Block::not_empty) { 707 if (e == Block::empty_with_goto) { 708 // Remove the goto, but leave the block. 709 b->pop_node(); 710 } 711 // Mark this block as a connector block, which will cause it to be 712 // ignored in certain functions such as non_connector_successor(). 713 b->set_connector(); 714 } 715 // Move the empty block to the end, and don't recheck. 716 _blocks.remove(i); 717 _blocks.push(b); 718 } 719 720 // Set loop alignment for every block 721 void PhaseCFG::set_loop_alignment() { 722 uint last = number_of_blocks(); 723 assert(get_block(0) == get_root_block(), ""); 724 725 for (uint i = 1; i < last; i++) { 726 Block* block = get_block(i); 727 if (block->head()->is_Loop()) { 728 block->set_loop_alignment(block); 729 } 730 } 731 } 732 733 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks 734 // to the end. 735 void PhaseCFG::remove_empty_blocks() { 736 // Move uncommon blocks to the end 737 uint last = number_of_blocks(); 738 assert(get_block(0) == get_root_block(), ""); 739 740 for (uint i = 1; i < last; i++) { 741 Block* block = get_block(i); 742 if (block->is_connector()) { 743 break; 744 } 745 746 // Check for NeverBranch at block end. This needs to become a GOTO to the 747 // true target. NeverBranch are treated as a conditional branch that 748 // always goes the same direction for most of the optimizer and are used 749 // to give a fake exit path to infinite loops. At this late stage they 750 // need to turn into Goto's so that when you enter the infinite loop you 751 // indeed hang. 752 if (block->get_node(block->end_idx())->is_NeverBranch()) { 753 convert_NeverBranch_to_Goto(block); 754 } 755 756 // Look for uncommon blocks and move to end. 757 if (!C->do_freq_based_layout()) { 758 if (is_uncommon(block)) { 759 move_to_end(block, i); 760 last--; // No longer check for being uncommon! 761 if (no_flip_branch(block)) { // Fall-thru case must follow? 762 // Find the fall-thru block 763 block = get_block(i); 764 move_to_end(block, i); 765 last--; 766 } 767 // backup block counter post-increment 768 i--; 769 } 770 } 771 } 772 773 // Move empty blocks to the end 774 last = number_of_blocks(); 775 for (uint i = 1; i < last; i++) { 776 Block* block = get_block(i); 777 if (block->is_Empty() != Block::not_empty) { 778 move_to_end(block, i); 779 last--; 780 i--; 781 } 782 } // End of for all blocks 783 } 784 785 Block *PhaseCFG::fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext) { 786 // Trap based checks must fall through to the successor with 787 // PROB_ALWAYS. 788 // They should be an If with 2 successors. 789 assert(branch->is_MachIf(), "must be If"); 790 assert(block->_num_succs == 2, "must have 2 successors"); 791 792 // Get the If node and the projection for the first successor. 793 MachIfNode *iff = block->get_node(block->number_of_nodes()-3)->as_MachIf(); 794 ProjNode *proj0 = block->get_node(block->number_of_nodes()-2)->as_Proj(); 795 ProjNode *proj1 = block->get_node(block->number_of_nodes()-1)->as_Proj(); 796 ProjNode *projt = (proj0->Opcode() == Op_IfTrue) ? proj0 : proj1; 797 ProjNode *projf = (proj0->Opcode() == Op_IfFalse) ? proj0 : proj1; 798 799 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. 800 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0"); 801 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1"); 802 803 ProjNode *proj_always; 804 ProjNode *proj_never; 805 // We must negate the branch if the implicit check doesn't follow 806 // the branch's TRUE path. Then, the new TRUE branch target will 807 // be the old FALSE branch target. 808 if (iff->_prob <= 2*PROB_NEVER) { // There are small rounding errors. 809 proj_never = projt; 810 proj_always = projf; 811 } else { 812 // We must negate the branch if the trap doesn't follow the 813 // branch's TRUE path. Then, the new TRUE branch target will 814 // be the old FALSE branch target. 815 proj_never = projf; 816 proj_always = projt; 817 iff->negate(); 818 } 819 assert(iff->_prob <= 2*PROB_NEVER, "Trap based checks are expected to trap never!"); 820 // Map the successors properly 821 block->_succs.map(0, get_block_for_node(proj_never ->raw_out(0))); // The target of the trap. 822 block->_succs.map(1, get_block_for_node(proj_always->raw_out(0))); // The fall through target. 823 824 if (block->get_node(block->number_of_nodes() - block->_num_succs + 1) != proj_always) { 825 block->map_node(proj_never, block->number_of_nodes() - block->_num_succs + 0); 826 block->map_node(proj_always, block->number_of_nodes() - block->_num_succs + 1); 827 } 828 829 // Place the fall through block after this block. 830 Block *bs1 = block->non_connector_successor(1); 831 if (bs1 != bnext && move_to_next(bs1, block_pos)) { 832 bnext = bs1; 833 } 834 // If the fall through block still is not the next block, insert a goto. 835 if (bs1 != bnext) { 836 insert_goto_at(block_pos, 1); 837 } 838 return bnext; 839 } 840 841 // Fix up the final control flow for basic blocks. 842 void PhaseCFG::fixup_flow() { 843 // Fixup final control flow for the blocks. Remove jump-to-next 844 // block. If neither arm of an IF follows the conditional branch, we 845 // have to add a second jump after the conditional. We place the 846 // TRUE branch target in succs[0] for both GOTOs and IFs. 847 for (uint i = 0; i < number_of_blocks(); i++) { 848 Block* block = get_block(i); 849 block->_pre_order = i; // turn pre-order into block-index 850 851 // Connector blocks need no further processing. 852 if (block->is_connector()) { 853 assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end"); 854 continue; 855 } 856 assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors"); 857 858 Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : nullptr; 859 Block* bs0 = block->non_connector_successor(0); 860 861 // Check for multi-way branches where I cannot negate the test to 862 // exchange the true and false targets. 863 if (no_flip_branch(block)) { 864 // Find fall through case - if must fall into its target. 865 // Get the index of the branch's first successor. 866 int branch_idx = block->number_of_nodes() - block->_num_succs; 867 868 // The branch is 1 before the branch's first successor. 869 Node *branch = block->get_node(branch_idx-1); 870 871 // Handle no-flip branches which have implicit checks and which require 872 // special block ordering and individual semantics of the 'fall through 873 // case'. 874 if ((TrapBasedNullChecks || TrapBasedRangeChecks) && 875 branch->is_Mach() && branch->as_Mach()->is_TrapBasedCheckNode()) { 876 bnext = fixup_trap_based_check(branch, block, i, bnext); 877 } else { 878 // Else, default handling for no-flip branches 879 for (uint j2 = 0; j2 < block->_num_succs; j2++) { 880 const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj(); 881 if (p->_con == 0) { 882 // successor j2 is fall through case 883 if (block->non_connector_successor(j2) != bnext) { 884 // but it is not the next block => insert a goto 885 insert_goto_at(i, j2); 886 } 887 // Put taken branch in slot 0 888 if (j2 == 0 && block->_num_succs == 2) { 889 // Flip targets in succs map 890 Block *tbs0 = block->_succs[0]; 891 Block *tbs1 = block->_succs[1]; 892 block->_succs.map(0, tbs1); 893 block->_succs.map(1, tbs0); 894 } 895 break; 896 } 897 } 898 } 899 900 // Remove all CatchProjs 901 for (uint j = 0; j < block->_num_succs; j++) { 902 block->pop_node(); 903 } 904 905 } else if (block->_num_succs == 1) { 906 // Block ends in a Goto? 907 if (bnext == bs0) { 908 // We fall into next block; remove the Goto 909 block->pop_node(); 910 } 911 912 } else if(block->_num_succs == 2) { // Block ends in a If? 913 // Get opcode of 1st projection (matches _succs[0]) 914 // Note: Since this basic block has 2 exits, the last 2 nodes must 915 // be projections (in any order), the 3rd last node must be 916 // the IfNode (we have excluded other 2-way exits such as 917 // CatchNodes already). 918 MachNode* iff = block->get_node(block->number_of_nodes() - 3)->as_Mach(); 919 ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj(); 920 ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj(); 921 922 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. 923 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0"); 924 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1"); 925 926 Block* bs1 = block->non_connector_successor(1); 927 928 // Check for neither successor block following the current 929 // block ending in a conditional. If so, move one of the 930 // successors after the current one, provided that the 931 // successor was previously unscheduled, but moveable 932 // (i.e., all paths to it involve a branch). 933 if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) { 934 // Choose the more common successor based on the probability 935 // of the conditional branch. 936 Block* bx = bs0; 937 Block* by = bs1; 938 939 // _prob is the probability of taking the true path. Make 940 // p the probability of taking successor #1. 941 float p = iff->as_MachIf()->_prob; 942 if (proj0->Opcode() == Op_IfTrue) { 943 p = 1.0 - p; 944 } 945 946 // Prefer successor #1 if p > 0.5 947 if (p > PROB_FAIR) { 948 bx = bs1; 949 by = bs0; 950 } 951 952 // Attempt the more common successor first 953 if (move_to_next(bx, i)) { 954 bnext = bx; 955 } else if (move_to_next(by, i)) { 956 bnext = by; 957 } 958 } 959 960 // Check for conditional branching the wrong way. Negate 961 // conditional, if needed, so it falls into the following block 962 // and branches to the not-following block. 963 964 // Check for the next block being in succs[0]. We are going to branch 965 // to succs[0], so we want the fall-thru case as the next block in 966 // succs[1]. 967 if (bnext == bs0) { 968 // Fall-thru case in succs[0], should be in succs[1], so flip targets in _succs map 969 Block* tbs0 = block->_succs[0]; 970 Block* tbs1 = block->_succs[1]; 971 block->_succs.map(0, tbs1); 972 block->_succs.map(1, tbs0); 973 // Flip projection for each target 974 swap(proj0, proj1); 975 } else if(bnext != bs1) { 976 // Need a double-branch 977 // The existing conditional branch need not change. 978 // Add a unconditional branch to the false target. 979 // Alas, it must appear in its own block and adding a 980 // block this late in the game is complicated. Sigh. 981 insert_goto_at(i, 1); 982 } 983 984 // Make sure we TRUE branch to the target 985 if (proj0->Opcode() == Op_IfFalse) { 986 iff->as_MachIf()->negate(); 987 } 988 989 block->pop_node(); // Remove IfFalse & IfTrue projections 990 block->pop_node(); 991 992 } else { 993 // Multi-exit block, e.g. a switch statement 994 // But we don't need to do anything here 995 } 996 } // End of for all blocks 997 } 998 999 void PhaseCFG::remove_unreachable_blocks() { 1000 ResourceMark rm; 1001 Block_List unreachable; 1002 // Initialize worklist of unreachable blocks to be removed. 1003 for (uint i = 0; i < number_of_blocks(); i++) { 1004 Block* block = get_block(i); 1005 assert(block->_pre_order == i, "Block::pre_order does not match block index"); 1006 if (block->is_trivially_unreachable()) { 1007 unreachable.push(block); 1008 } 1009 } 1010 // Now remove all blocks that are transitively unreachable. 1011 while (unreachable.size() > 0) { 1012 Block* dead = unreachable.pop(); 1013 // When this code runs (after PhaseCFG::fixup_flow()), Block::_pre_order 1014 // does not contain pre-order but block-list indices. Ensure they stay 1015 // contiguous by decrementing _pre_order for all elements after 'dead'. 1016 // Block::_rpo does not contain valid reverse post-order indices anymore 1017 // (they are invalidated by block insertions in PhaseCFG::fixup_flow()), 1018 // so there is no need to update them. 1019 for (uint i = dead->_pre_order + 1; i < number_of_blocks(); i++) { 1020 get_block(i)->_pre_order--; 1021 } 1022 _blocks.remove(dead->_pre_order); 1023 _number_of_blocks--; 1024 // Update the successors' predecessor list and push new unreachable blocks. 1025 for (uint i = 0; i < dead->_num_succs; i++) { 1026 Block* succ = dead->_succs[i]; 1027 Node* head = succ->head(); 1028 for (int j = head->req() - 1; j >= 1; j--) { 1029 if (get_block_for_node(head->in(j)) == dead) { 1030 head->del_req(j); 1031 } 1032 } 1033 if (succ->is_trivially_unreachable()) { 1034 unreachable.push(succ); 1035 } 1036 } 1037 } 1038 } 1039 1040 // postalloc_expand: Expand nodes after register allocation. 1041 // 1042 // postalloc_expand has to be called after register allocation, just 1043 // before output (i.e. scheduling). It only gets called if 1044 // Matcher::require_postalloc_expand is true. 1045 // 1046 // Background: 1047 // 1048 // Nodes that are expandend (one compound node requiring several 1049 // assembler instructions to be implemented split into two or more 1050 // non-compound nodes) after register allocation are not as nice as 1051 // the ones expanded before register allocation - they don't 1052 // participate in optimizations as global code motion. But after 1053 // register allocation we can expand nodes that use registers which 1054 // are not spillable or registers that are not allocated, because the 1055 // old compound node is simply replaced (in its location in the basic 1056 // block) by a new subgraph which does not contain compound nodes any 1057 // more. The scheduler called during output can later on process these 1058 // non-compound nodes. 1059 // 1060 // Implementation: 1061 // 1062 // Nodes requiring postalloc expand are specified in the ad file by using 1063 // a postalloc_expand statement instead of ins_encode. A postalloc_expand 1064 // contains a single call to an encoding, as does an ins_encode 1065 // statement. Instead of an emit() function a postalloc_expand() function 1066 // is generated that doesn't emit assembler but creates a new 1067 // subgraph. The code below calls this postalloc_expand function for each 1068 // node with the appropriate attribute. This function returns the new 1069 // nodes generated in an array passed in the call. The old node, 1070 // potential MachTemps before and potential Projs after it then get 1071 // disconnected and replaced by the new nodes. The instruction 1072 // generating the result has to be the last one in the array. In 1073 // general it is assumed that Projs after the node expanded are 1074 // kills. These kills are not required any more after expanding as 1075 // there are now explicitly visible def-use chains and the Projs are 1076 // removed. This does not hold for calls: They do not only have 1077 // kill-Projs but also Projs defining values. Therefore Projs after 1078 // the node expanded are removed for all but for calls. If a node is 1079 // to be reused, it must be added to the nodes list returned, and it 1080 // will be added again. 1081 // 1082 // Implementing the postalloc_expand function for a node in an enc_class 1083 // is rather tedious. It requires knowledge about many node details, as 1084 // the nodes and the subgraph must be hand crafted. To simplify this, 1085 // adlc generates some utility variables into the postalloc_expand function, 1086 // e.g., holding the operands as specified by the postalloc_expand encoding 1087 // specification, e.g.: 1088 // * unsigned idx_<par_name> holding the index of the node in the ins 1089 // * Node *n_<par_name> holding the node loaded from the ins 1090 // * MachOpnd *op_<par_name> holding the corresponding operand 1091 // 1092 // The ordering of operands can not be determined by looking at a 1093 // rule. Especially if a match rule matches several different trees, 1094 // several nodes are generated from one instruct specification with 1095 // different operand orderings. In this case the adlc generated 1096 // variables are the only way to access the ins and operands 1097 // deterministically. 1098 // 1099 // If assigning a register to a node that contains an oop, don't 1100 // forget to call ra_->set_oop() for the node. 1101 void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) { 1102 GrowableArray <Node *> new_nodes(32); // Array with new nodes filled by postalloc_expand function of node. 1103 GrowableArray <Node *> remove(32); 1104 GrowableArray <Node *> succs(32); 1105 unsigned int max_idx = C->unique(); // Remember to distinguish new from old nodes. 1106 DEBUG_ONLY(bool foundNode = false); 1107 1108 // for all blocks 1109 for (uint i = 0; i < number_of_blocks(); i++) { 1110 Block *b = _blocks[i]; 1111 // For all instructions in the current block. 1112 for (uint j = 0; j < b->number_of_nodes(); j++) { 1113 Node *n = b->get_node(j); 1114 if (n->is_Mach() && n->as_Mach()->requires_postalloc_expand()) { 1115 #ifdef ASSERT 1116 if (TracePostallocExpand) { 1117 if (!foundNode) { 1118 foundNode = true; 1119 tty->print("POSTALLOC EXPANDING %d %s\n", C->compile_id(), 1120 C->method() ? C->method()->name()->as_utf8() : C->stub_name()); 1121 } 1122 tty->print(" postalloc expanding "); n->dump(); 1123 if (Verbose) { 1124 tty->print(" with ins:\n"); 1125 for (uint k = 0; k < n->len(); ++k) { 1126 if (n->in(k)) { tty->print(" "); n->in(k)->dump(); } 1127 } 1128 } 1129 } 1130 #endif 1131 new_nodes.clear(); 1132 // Collect nodes that have to be removed from the block later on. 1133 uint req = n->req(); 1134 remove.clear(); 1135 for (uint k = 0; k < req; ++k) { 1136 if (n->in(k) && n->in(k)->is_MachTemp()) { 1137 remove.push(n->in(k)); // MachTemps which are inputs to the old node have to be removed. 1138 n->in(k)->del_req(0); 1139 j--; 1140 } 1141 } 1142 1143 // Check whether we can allocate enough nodes. We set a fix limit for 1144 // the size of postalloc expands with this. 1145 uint unique_limit = C->unique() + 40; 1146 if (unique_limit >= _ra->node_regs_max_index()) { 1147 Compile::current()->record_failure("out of nodes in postalloc expand"); 1148 return; 1149 } 1150 1151 // Emit (i.e. generate new nodes). 1152 n->as_Mach()->postalloc_expand(&new_nodes, _ra); 1153 1154 assert(C->unique() < unique_limit, "You allocated too many nodes in your postalloc expand."); 1155 1156 // Disconnect the inputs of the old node. 1157 // 1158 // We reuse MachSpillCopy nodes. If we need to expand them, there 1159 // are many, so reusing pays off. If reused, the node already 1160 // has the new ins. n must be the last node on new_nodes list. 1161 if (!n->is_MachSpillCopy()) { 1162 for (int k = req - 1; k >= 0; --k) { 1163 n->del_req(k); 1164 } 1165 } 1166 1167 #ifdef ASSERT 1168 // Check that all nodes have proper operands. 1169 for (int k = 0; k < new_nodes.length(); ++k) { 1170 if (new_nodes.at(k)->_idx < max_idx || !new_nodes.at(k)->is_Mach()) continue; // old node, Proj ... 1171 MachNode *m = new_nodes.at(k)->as_Mach(); 1172 for (unsigned int l = 0; l < m->num_opnds(); ++l) { 1173 if (MachOper::notAnOper(m->_opnds[l])) { 1174 outputStream *os = tty; 1175 os->print("Node %s ", m->Name()); 1176 os->print("has invalid opnd %d: %p\n", l, m->_opnds[l]); 1177 assert(0, "Invalid operands, see inline trace in hs_err_pid file."); 1178 } 1179 } 1180 } 1181 #endif 1182 1183 // Collect succs of old node in remove (for projections) and in succs (for 1184 // all other nodes) do _not_ collect projections in remove (but in succs) 1185 // in case the node is a call. We need the projections for calls as they are 1186 // associated with registers (i.e. they are defs). 1187 succs.clear(); 1188 for (DUIterator k = n->outs(); n->has_out(k); k++) { 1189 if (n->out(k)->is_Proj() && !n->is_MachCall() && !n->is_MachBranch()) { 1190 remove.push(n->out(k)); 1191 } else { 1192 succs.push(n->out(k)); 1193 } 1194 } 1195 // Replace old node n as input of its succs by last of the new nodes. 1196 for (int k = 0; k < succs.length(); ++k) { 1197 Node *succ = succs.at(k); 1198 for (uint l = 0; l < succ->req(); ++l) { 1199 if (succ->in(l) == n) { 1200 succ->set_req(l, new_nodes.at(new_nodes.length() - 1)); 1201 } 1202 } 1203 for (uint l = succ->req(); l < succ->len(); ++l) { 1204 if (succ->in(l) == n) { 1205 succ->set_prec(l, new_nodes.at(new_nodes.length() - 1)); 1206 } 1207 } 1208 } 1209 1210 // Index of old node in block. 1211 uint index = b->find_node(n); 1212 // Insert new nodes into block and map them in nodes->blocks array 1213 // and remember last node in n2. 1214 Node *n2 = nullptr; 1215 for (int k = 0; k < new_nodes.length(); ++k) { 1216 n2 = new_nodes.at(k); 1217 b->insert_node(n2, ++index); 1218 map_node_to_block(n2, b); 1219 } 1220 1221 // Add old node n to remove and remove them all from block. 1222 remove.push(n); 1223 j--; 1224 #ifdef ASSERT 1225 if (TracePostallocExpand && Verbose) { 1226 tty->print(" removing:\n"); 1227 for (int k = 0; k < remove.length(); ++k) { 1228 tty->print(" "); remove.at(k)->dump(); 1229 } 1230 tty->print(" inserting:\n"); 1231 for (int k = 0; k < new_nodes.length(); ++k) { 1232 tty->print(" "); new_nodes.at(k)->dump(); 1233 } 1234 } 1235 #endif 1236 for (int k = 0; k < remove.length(); ++k) { 1237 if (b->contains(remove.at(k))) { 1238 b->find_remove(remove.at(k)); 1239 } else { 1240 assert(remove.at(k)->is_Proj() && (remove.at(k)->in(0)->is_MachBranch()), ""); 1241 } 1242 } 1243 // If anything has been inserted (n2 != nullptr), continue after last node inserted. 1244 // This does not always work. Some postalloc expands don't insert any nodes, if they 1245 // do optimizations (e.g., max(x,x)). In this case we decrement j accordingly. 1246 j = n2 ? b->find_node(n2) : j; 1247 } 1248 } 1249 } 1250 1251 #ifdef ASSERT 1252 if (foundNode) { 1253 tty->print("FINISHED %d %s\n", C->compile_id(), 1254 C->method() ? C->method()->name()->as_utf8() : C->stub_name()); 1255 tty->flush(); 1256 } 1257 #endif 1258 } 1259 1260 1261 //------------------------------dump------------------------------------------- 1262 #ifndef PRODUCT 1263 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const { 1264 const Node *x = end->is_block_proj(); 1265 assert( x, "not a CFG" ); 1266 1267 // Do not visit this block again 1268 if( visited.test_set(x->_idx) ) return; 1269 1270 // Skip through this block 1271 const Node *p = x; 1272 do { 1273 p = p->in(0); // Move control forward 1274 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" ); 1275 } while( !p->is_block_start() ); 1276 1277 // Recursively visit 1278 for (uint i = 1; i < p->req(); i++) { 1279 _dump_cfg(p->in(i), visited); 1280 } 1281 1282 // Dump the block 1283 get_block_for_node(p)->dump(this); 1284 } 1285 1286 void PhaseCFG::dump( ) const { 1287 tty->print("\n--- CFG --- %d BBs\n", number_of_blocks()); 1288 if (_blocks.size()) { // Did we do basic-block layout? 1289 for (uint i = 0; i < number_of_blocks(); i++) { 1290 const Block* block = get_block(i); 1291 block->dump(this); 1292 } 1293 } else { // Else do it with a DFS 1294 VectorSet visited(_block_arena); 1295 _dump_cfg(_root,visited); 1296 } 1297 } 1298 1299 void PhaseCFG::dump_headers() { 1300 for (uint i = 0; i < number_of_blocks(); i++) { 1301 Block* block = get_block(i); 1302 if (block != nullptr) { 1303 block->dump_head(this); 1304 } 1305 } 1306 } 1307 #endif // !PRODUCT 1308 1309 #ifdef ASSERT 1310 void PhaseCFG::verify_memory_writer_placement(const Block* b, const Node* n) const { 1311 if (!n->is_memory_writer()) { 1312 return; 1313 } 1314 CFGLoop* home_or_ancestor = find_block_for_node(n->in(0))->_loop; 1315 bool found = false; 1316 do { 1317 if (b->_loop == home_or_ancestor) { 1318 found = true; 1319 break; 1320 } 1321 home_or_ancestor = home_or_ancestor->parent(); 1322 } while (home_or_ancestor != nullptr); 1323 assert(found, "block b is not in n's home loop or an ancestor of it"); 1324 } 1325 1326 void PhaseCFG::verify_dominator_tree() const { 1327 for (uint i = 0; i < number_of_blocks(); i++) { 1328 Block* block = get_block(i); 1329 assert(block->_dom_depth <= number_of_blocks(), "unexpected dominator tree depth"); 1330 if (block == get_root_block()) { 1331 assert(block->_dom_depth == 1, "unexpected root dominator tree depth"); 1332 // The root block does not have an immediate dominator, stop checking. 1333 continue; 1334 } 1335 assert(block->_idom != nullptr, "non-root blocks must have immediate dominators"); 1336 assert(block->_dom_depth == block->_idom->_dom_depth + 1, 1337 "the dominator tree depth of a node must succeed that of its immediate dominator"); 1338 assert(block->num_preds() > 2 || block->_idom == get_block_for_node(block->pred(1)), 1339 "the immediate dominator of a single-predecessor block must be the predecessor"); 1340 } 1341 } 1342 1343 void PhaseCFG::verify() const { 1344 // Verify sane CFG 1345 for (uint i = 0; i < number_of_blocks(); i++) { 1346 Block* block = get_block(i); 1347 uint cnt = block->number_of_nodes(); 1348 uint j; 1349 for (j = 0; j < cnt; j++) { 1350 Node *n = block->get_node(j); 1351 assert(get_block_for_node(n) == block, ""); 1352 if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) { 1353 assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block"); 1354 } 1355 verify_memory_writer_placement(block, n); 1356 if (n->needs_anti_dependence_check()) { 1357 verify_anti_dependences(block, n); 1358 if (C->failing()) { 1359 return; 1360 } 1361 } 1362 for (uint k = 0; k < n->req(); k++) { 1363 Node *def = n->in(k); 1364 if (def && def != n) { 1365 Block* def_block = get_block_for_node(def); 1366 assert(def_block || def->is_Con(), "must have block; constants for debug info ok"); 1367 // Verify that all definitions dominate their uses (except for virtual 1368 // instructions merging multiple definitions). 1369 assert(n->is_Root() || n->is_Region() || n->is_Phi() || n->is_MachMerge() || 1370 def_block->dominates(block), 1371 "uses must be dominated by definitions"); 1372 // Verify that instructions in the block are in correct order. 1373 // Uses must follow their definition if they are at the same block. 1374 // Mostly done to check that MachSpillCopy nodes are placed correctly 1375 // when CreateEx node is moved in build_ifg_physical(). 1376 if (def_block == block && !(block->head()->is_Loop() && n->is_Phi()) && 1377 // See (+++) comment in reg_split.cpp 1378 !(n->jvms() != nullptr && n->jvms()->is_monitor_use(k))) { 1379 bool is_loop = false; 1380 if (n->is_Phi()) { 1381 for (uint l = 1; l < def->req(); l++) { 1382 if (n == def->in(l)) { 1383 is_loop = true; 1384 break; // Some kind of loop 1385 } 1386 } 1387 } 1388 // Uses must be before definition, except if: 1389 // - We are in some kind of loop we already detected 1390 // - We are in infinite loop, where Region may not have been turned into LoopNode 1391 assert(block->find_node(def) < j || 1392 is_loop || 1393 (n->is_Phi() && block->head()->as_Region()->is_in_infinite_subgraph()), 1394 "uses must follow definitions (except in loops)"); 1395 } 1396 } 1397 } 1398 if (n->is_Proj()) { 1399 assert(j >= 1, "a projection cannot be the first instruction in a block"); 1400 Node* pred = block->get_node(j - 1); 1401 Node* parent = n->in(0); 1402 assert(parent != nullptr, "projections must have a parent"); 1403 assert(pred == parent || (pred->is_Proj() && pred->in(0) == parent), 1404 "projections must follow their parents or other sibling projections"); 1405 } 1406 } 1407 1408 j = block->end_idx(); 1409 Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj(); 1410 assert(bp, "last instruction must be a block proj"); 1411 assert(bp == block->get_node(j), "wrong number of successors for this block"); 1412 if (bp->is_Catch()) { 1413 while (block->get_node(--j)->is_MachProj()) { 1414 ; 1415 } 1416 assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call"); 1417 } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) { 1418 assert(block->_num_succs == 2, "Conditional branch must have two targets"); 1419 } 1420 } 1421 verify_dominator_tree(); 1422 } 1423 #endif // ASSERT 1424 1425 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) { 1426 Copy::zero_to_bytes( _indices, sizeof(uint)*max ); 1427 } 1428 1429 void UnionFind::extend( uint from_idx, uint to_idx ) { 1430 _nesting.check(); // Check if a potential reallocation in the resource arena is safe 1431 if( from_idx >= _max ) { 1432 uint size = 16; 1433 while( size <= from_idx ) size <<=1; 1434 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size ); 1435 _max = size; 1436 } 1437 while( _cnt <= from_idx ) _indices[_cnt++] = 0; 1438 _indices[from_idx] = to_idx; 1439 } 1440 1441 void UnionFind::reset( uint max ) { 1442 // Force the Union-Find mapping to be at least this large 1443 extend(max,0); 1444 // Initialize to be the ID mapping. 1445 for( uint i=0; i<max; i++ ) map(i,i); 1446 } 1447 1448 // Straight out of Tarjan's union-find algorithm 1449 uint UnionFind::Find_compress( uint idx ) { 1450 uint cur = idx; 1451 uint next = lookup(cur); 1452 while( next != cur ) { // Scan chain of equivalences 1453 assert( next < cur, "always union smaller" ); 1454 cur = next; // until find a fixed-point 1455 next = lookup(cur); 1456 } 1457 // Core of union-find algorithm: update chain of 1458 // equivalences to be equal to the root. 1459 while( idx != next ) { 1460 uint tmp = lookup(idx); 1461 map(idx, next); 1462 idx = tmp; 1463 } 1464 return idx; 1465 } 1466 1467 // Like Find above, but no path compress, so bad asymptotic behavior 1468 uint UnionFind::Find_const( uint idx ) const { 1469 if( idx == 0 ) return idx; // Ignore the zero idx 1470 // Off the end? This can happen during debugging dumps 1471 // when data structures have not finished being updated. 1472 if( idx >= _max ) return idx; 1473 uint next = lookup(idx); 1474 while( next != idx ) { // Scan chain of equivalences 1475 idx = next; // until find a fixed-point 1476 next = lookup(idx); 1477 } 1478 return next; 1479 } 1480 1481 // union 2 sets together. 1482 void UnionFind::Union( uint idx1, uint idx2 ) { 1483 uint src = Find(idx1); 1484 uint dst = Find(idx2); 1485 assert( src, "" ); 1486 assert( dst, "" ); 1487 assert( src < _max, "oob" ); 1488 assert( dst < _max, "oob" ); 1489 assert( src < dst, "always union smaller" ); 1490 map(dst,src); 1491 } 1492 1493 #ifndef PRODUCT 1494 void Trace::dump( ) const { 1495 tty->print_cr("Trace (freq %f)", first_block()->_freq); 1496 for (Block *b = first_block(); b != nullptr; b = next(b)) { 1497 tty->print(" B%d", b->_pre_order); 1498 if (b->head()->is_Loop()) { 1499 tty->print(" (L%d)", b->compute_loop_alignment()); 1500 } 1501 if (b->has_loop_alignment()) { 1502 tty->print(" (T%d)", b->code_alignment()); 1503 } 1504 } 1505 tty->cr(); 1506 } 1507 1508 void CFGEdge::dump( ) const { 1509 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ", 1510 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct); 1511 switch(state()) { 1512 case connected: 1513 tty->print("connected"); 1514 break; 1515 case open: 1516 tty->print("open"); 1517 break; 1518 case interior: 1519 tty->print("interior"); 1520 break; 1521 } 1522 if (infrequent()) { 1523 tty->print(" infrequent"); 1524 } 1525 tty->cr(); 1526 } 1527 #endif 1528 1529 // Comparison function for edges 1530 static int edge_order(CFGEdge **e0, CFGEdge **e1) { 1531 float freq0 = (*e0)->freq(); 1532 float freq1 = (*e1)->freq(); 1533 if (freq0 != freq1) { 1534 return freq0 > freq1 ? -1 : 1; 1535 } 1536 1537 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo; 1538 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo; 1539 1540 return dist1 - dist0; 1541 } 1542 1543 // Comparison function for edges 1544 extern "C" int trace_frequency_order(const void *p0, const void *p1) { 1545 Trace *tr0 = *(Trace **) p0; 1546 Trace *tr1 = *(Trace **) p1; 1547 Block *b0 = tr0->first_block(); 1548 Block *b1 = tr1->first_block(); 1549 1550 // The trace of connector blocks goes at the end; 1551 // we only expect one such trace 1552 if (b0->is_connector() != b1->is_connector()) { 1553 return b1->is_connector() ? -1 : 1; 1554 } 1555 1556 // Pull more frequently executed blocks to the beginning 1557 float freq0 = b0->_freq; 1558 float freq1 = b1->_freq; 1559 if (freq0 != freq1) { 1560 return freq0 > freq1 ? -1 : 1; 1561 } 1562 1563 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo; 1564 1565 return diff; 1566 } 1567 1568 // Find edges of interest, i.e, those which can fall through. Presumes that 1569 // edges which don't fall through are of low frequency and can be generally 1570 // ignored. Initialize the list of traces. 1571 void PhaseBlockLayout::find_edges() { 1572 // Walk the blocks, creating edges and Traces 1573 uint i; 1574 Trace *tr = nullptr; 1575 for (i = 0; i < _cfg.number_of_blocks(); i++) { 1576 Block* b = _cfg.get_block(i); 1577 tr = new Trace(b, next, prev); 1578 traces[tr->id()] = tr; 1579 1580 // All connector blocks should be at the end of the list 1581 if (b->is_connector()) break; 1582 1583 // If this block and the next one have a one-to-one successor 1584 // predecessor relationship, simply append the next block 1585 int nfallthru = b->num_fall_throughs(); 1586 while (nfallthru == 1 && 1587 b->succ_fall_through(0)) { 1588 Block *n = b->_succs[0]; 1589 1590 // Skip over single-entry connector blocks, we don't want to 1591 // add them to the trace. 1592 while (n->is_connector() && n->num_preds() == 1) { 1593 n = n->_succs[0]; 1594 } 1595 1596 // We see a merge point, so stop search for the next block 1597 if (n->num_preds() != 1) break; 1598 1599 i++; 1600 assert(n == _cfg.get_block(i), "expecting next block"); 1601 tr->append(n); 1602 uf->map(n->_pre_order, tr->id()); 1603 traces[n->_pre_order] = nullptr; 1604 nfallthru = b->num_fall_throughs(); 1605 b = n; 1606 } 1607 1608 if (nfallthru > 0) { 1609 // Create a CFGEdge for each outgoing 1610 // edge that could be a fall-through. 1611 for (uint j = 0; j < b->_num_succs; j++ ) { 1612 if (b->succ_fall_through(j)) { 1613 Block *target = b->non_connector_successor(j); 1614 float freq = b->_freq * b->succ_prob(j); 1615 int from_pct = (int) ((100 * freq) / b->_freq); 1616 float f_to_pct = (100 * freq) / target->_freq; 1617 int to_pct = (f_to_pct < 100.0) ? (int)f_to_pct : 100; 1618 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct)); 1619 } 1620 } 1621 } 1622 } 1623 1624 // Group connector blocks into one trace 1625 for (i++; i < _cfg.number_of_blocks(); i++) { 1626 Block *b = _cfg.get_block(i); 1627 assert(b->is_connector(), "connector blocks at the end"); 1628 tr->append(b); 1629 uf->map(b->_pre_order, tr->id()); 1630 traces[b->_pre_order] = nullptr; 1631 } 1632 } 1633 1634 // Union two traces together in uf, and null out the trace in the list 1635 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) { 1636 uint old_id = old_trace->id(); 1637 uint updated_id = updated_trace->id(); 1638 1639 uint lo_id = updated_id; 1640 uint hi_id = old_id; 1641 1642 // If from is greater than to, swap values to meet 1643 // UnionFind guarantee. 1644 if (updated_id > old_id) { 1645 lo_id = old_id; 1646 hi_id = updated_id; 1647 1648 // Fix up the trace ids 1649 traces[lo_id] = traces[updated_id]; 1650 updated_trace->set_id(lo_id); 1651 } 1652 1653 // Union the lower with the higher and remove the pointer 1654 // to the higher. 1655 uf->Union(lo_id, hi_id); 1656 traces[hi_id] = nullptr; 1657 } 1658 1659 // Append traces together via the most frequently executed edges 1660 void PhaseBlockLayout::grow_traces() { 1661 // Order the edges, and drive the growth of Traces via the most 1662 // frequently executed edges. 1663 edges->sort(edge_order); 1664 for (int i = 0; i < edges->length(); i++) { 1665 CFGEdge *e = edges->at(i); 1666 1667 if (e->state() != CFGEdge::open) continue; 1668 1669 Block *src_block = e->from(); 1670 Block *targ_block = e->to(); 1671 1672 // Don't grow traces along backedges? 1673 if (!BlockLayoutRotateLoops) { 1674 if (targ_block->_rpo <= src_block->_rpo) { 1675 targ_block->set_loop_alignment(targ_block); 1676 continue; 1677 } 1678 } 1679 1680 Trace *src_trace = trace(src_block); 1681 Trace *targ_trace = trace(targ_block); 1682 1683 // If the edge in question can join two traces at their ends, 1684 // append one trace to the other. 1685 if (src_trace->last_block() == src_block) { 1686 if (src_trace == targ_trace) { 1687 e->set_state(CFGEdge::interior); 1688 if (targ_trace->backedge(e)) { 1689 // Reset i to catch any newly eligible edge 1690 // (Or we could remember the first "open" edge, and reset there) 1691 i = 0; 1692 } 1693 } else if (targ_trace->first_block() == targ_block) { 1694 e->set_state(CFGEdge::connected); 1695 src_trace->append(targ_trace); 1696 union_traces(src_trace, targ_trace); 1697 } 1698 } 1699 } 1700 } 1701 1702 // Embed one trace into another, if the fork or join points are sufficiently 1703 // balanced. 1704 void PhaseBlockLayout::merge_traces(bool fall_thru_only) { 1705 // Walk the edge list a another time, looking at unprocessed edges. 1706 // Fold in diamonds 1707 for (int i = 0; i < edges->length(); i++) { 1708 CFGEdge *e = edges->at(i); 1709 1710 if (e->state() != CFGEdge::open) continue; 1711 if (fall_thru_only) { 1712 if (e->infrequent()) continue; 1713 } 1714 1715 Block *src_block = e->from(); 1716 Trace *src_trace = trace(src_block); 1717 bool src_at_tail = src_trace->last_block() == src_block; 1718 1719 Block *targ_block = e->to(); 1720 Trace *targ_trace = trace(targ_block); 1721 bool targ_at_start = targ_trace->first_block() == targ_block; 1722 1723 if (src_trace == targ_trace) { 1724 // This may be a loop, but we can't do much about it. 1725 e->set_state(CFGEdge::interior); 1726 continue; 1727 } 1728 1729 if (fall_thru_only) { 1730 // If the edge links the middle of two traces, we can't do anything. 1731 // Mark the edge and continue. 1732 if (!src_at_tail & !targ_at_start) { 1733 continue; 1734 } 1735 1736 // Don't grow traces along backedges? 1737 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) { 1738 continue; 1739 } 1740 1741 // If both ends of the edge are available, why didn't we handle it earlier? 1742 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier."); 1743 1744 if (targ_at_start) { 1745 // Insert the "targ" trace in the "src" trace if the insertion point 1746 // is a two way branch. 1747 // Better profitability check possible, but may not be worth it. 1748 // Someday, see if the this "fork" has an associated "join"; 1749 // then make a policy on merging this trace at the fork or join. 1750 // For example, other things being equal, it may be better to place this 1751 // trace at the join point if the "src" trace ends in a two-way, but 1752 // the insertion point is one-way. 1753 assert(src_block->num_fall_throughs() == 2, "unexpected diamond"); 1754 e->set_state(CFGEdge::connected); 1755 src_trace->insert_after(src_block, targ_trace); 1756 union_traces(src_trace, targ_trace); 1757 } else if (src_at_tail) { 1758 if (src_trace != trace(_cfg.get_root_block())) { 1759 e->set_state(CFGEdge::connected); 1760 targ_trace->insert_before(targ_block, src_trace); 1761 union_traces(targ_trace, src_trace); 1762 } 1763 } 1764 } else if (e->state() == CFGEdge::open) { 1765 // Append traces, even without a fall-thru connection. 1766 // But leave root entry at the beginning of the block list. 1767 if (targ_trace != trace(_cfg.get_root_block())) { 1768 e->set_state(CFGEdge::connected); 1769 src_trace->append(targ_trace); 1770 union_traces(src_trace, targ_trace); 1771 } 1772 } 1773 } 1774 } 1775 1776 // Order the sequence of the traces in some desirable way 1777 void PhaseBlockLayout::reorder_traces(int count) { 1778 Trace** new_traces = NEW_RESOURCE_ARRAY(Trace*, count); 1779 Block_List worklist; 1780 int new_count = 0; 1781 1782 // Compact the traces. 1783 for (int i = 0; i < count; i++) { 1784 Trace* tr = traces[i]; 1785 if (tr != nullptr) { 1786 new_traces[new_count++] = tr; 1787 } 1788 } 1789 1790 // The entry block should be first on the new trace list. 1791 Trace* tr = trace(_cfg.get_root_block()); 1792 assert(tr == new_traces[0], "entry trace misplaced"); 1793 1794 // Sort the new trace list by frequency 1795 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order); 1796 1797 // Collect all blocks from existing Traces 1798 _cfg.clear_blocks(); 1799 for (int i = 0; i < new_count; i++) { 1800 Trace* tr = new_traces[i]; 1801 if (tr != nullptr) { 1802 // push blocks onto the CFG list 1803 for (Block* b = tr->first_block(); b != nullptr; b = tr->next(b)) { 1804 _cfg.add_block(b); 1805 } 1806 } 1807 } 1808 } 1809 1810 // Order basic blocks based on frequency 1811 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) 1812 : Phase(BlockLayout) 1813 , _cfg(cfg) { 1814 ResourceMark rm; 1815 1816 // List of traces 1817 int size = _cfg.number_of_blocks() + 1; 1818 traces = NEW_RESOURCE_ARRAY(Trace*, size); 1819 memset(traces, 0, size*sizeof(Trace*)); 1820 next = NEW_RESOURCE_ARRAY(Block*, size); 1821 memset(next, 0, size*sizeof(Block*)); 1822 prev = NEW_RESOURCE_ARRAY(Block*, size); 1823 memset(prev , 0, size*sizeof(Block*)); 1824 1825 // List of edges 1826 edges = new GrowableArray<CFGEdge*>; 1827 1828 // Mapping block index --> block_trace 1829 uf = new UnionFind(size); 1830 uf->reset(size); 1831 1832 // Find edges and create traces. 1833 find_edges(); 1834 1835 // Grow traces at their ends via most frequent edges. 1836 grow_traces(); 1837 1838 // Merge one trace into another, but only at fall-through points. 1839 // This may make diamonds and other related shapes in a trace. 1840 merge_traces(true); 1841 1842 // Run merge again, allowing two traces to be catenated, even if 1843 // one does not fall through into the other. This appends loosely 1844 // related traces to be near each other. 1845 merge_traces(false); 1846 1847 // Re-order all the remaining traces by frequency 1848 reorder_traces(size); 1849 1850 assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink"); 1851 } 1852 1853 1854 // Edge e completes a loop in a trace. If the target block is head of the 1855 // loop, rotate the loop block so that the loop ends in a conditional branch. 1856 bool Trace::backedge(CFGEdge *e) { 1857 bool loop_rotated = false; 1858 Block *src_block = e->from(); 1859 Block *targ_block = e->to(); 1860 1861 assert(last_block() == src_block, "loop discovery at back branch"); 1862 if (first_block() == targ_block) { 1863 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) { 1864 // Find the last block in the trace that has a conditional 1865 // branch. 1866 Block *b; 1867 for (b = last_block(); b != nullptr; b = prev(b)) { 1868 if (b->num_fall_throughs() == 2) { 1869 break; 1870 } 1871 } 1872 1873 if (b != last_block() && b != nullptr) { 1874 loop_rotated = true; 1875 1876 // Rotate the loop by doing two-part linked-list surgery. 1877 append(first_block()); 1878 break_loop_after(b); 1879 } 1880 } 1881 1882 // Backbranch to the top of a trace 1883 // Scroll forward through the trace from the targ_block. If we find 1884 // a loop head before another loop top, use the loop head alignment. 1885 for (Block *b = targ_block; b != nullptr; b = next(b)) { 1886 if (b->has_loop_alignment()) { 1887 break; 1888 } 1889 if (b->head()->is_Loop()) { 1890 targ_block = b; 1891 break; 1892 } 1893 } 1894 1895 first_block()->set_loop_alignment(targ_block); 1896 1897 } else { 1898 // That loop may already have a loop top (we're reaching it again 1899 // through the backedge of an outer loop) 1900 Block* b = prev(targ_block); 1901 bool has_top = targ_block->head()->is_Loop() && b->has_loop_alignment() && !b->head()->is_Loop(); 1902 if (!has_top) { 1903 // Backbranch into the middle of a trace 1904 targ_block->set_loop_alignment(targ_block); 1905 } 1906 } 1907 1908 return loop_rotated; 1909 }