1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_BLOCK_HPP 26 #define SHARE_OPTO_BLOCK_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/phase.hpp" 31 #include "utilities/powerOfTwo.hpp" 32 33 // Optimization - Graph Style 34 35 class Block; 36 class CFGLoop; 37 class MachCallNode; 38 class Matcher; 39 class RootNode; 40 class VectorSet; 41 class PhaseChaitin; 42 struct Tarjan; 43 44 //------------------------------Block_Array------------------------------------ 45 // Map dense integer indices to Blocks. Uses classic doubling-array trick. 46 // Abstractly provides an infinite array of Block*'s, initialized to null. 47 // Note that the constructor just zeros things, and since I use Arena 48 // allocation I do not need a destructor to reclaim storage. 49 class Block_Array : public ArenaObj { 50 uint _size; // allocated size, as opposed to formal limit 51 DEBUG_ONLY(uint _limit;) // limit to formal domain 52 Arena *_arena; // Arena to allocate in 53 ReallocMark _nesting; // Safety checks for arena reallocation 54 protected: 55 Block **_blocks; 56 void grow( uint i ); // Grow array node to fit 57 58 public: 59 Block_Array(Arena *a) : _size(OptoBlockListSize), _arena(a) { 60 DEBUG_ONLY(_limit=0); 61 _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); 62 for( int i = 0; i < OptoBlockListSize; i++ ) { 63 _blocks[i] = nullptr; 64 } 65 } 66 Block *lookup( uint i ) const // Lookup, or null for not mapped 67 { return (i<Max()) ? _blocks[i] : (Block*)nullptr; } 68 Block *operator[] ( uint i ) const // Lookup, or assert for not mapped 69 { assert( i < Max(), "oob" ); return _blocks[i]; } 70 // Extend the mapping: index i maps to Block *n. 71 void map( uint i, Block *n ) { grow(i); _blocks[i] = n; } 72 uint Max() const { DEBUG_ONLY(return _limit); return _size; } 73 }; 74 75 76 class Block_List : public Block_Array { 77 public: 78 uint _cnt; 79 Block_List() : Block_List(Thread::current()->resource_area()) { } 80 Block_List(Arena* a) : Block_Array(a), _cnt(0) { } 81 82 void push( Block *b ) { map(_cnt++,b); } 83 Block *pop() { return _blocks[--_cnt]; } 84 Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;} 85 void remove( uint i ); 86 void insert( uint i, Block *n ); 87 uint size() const { return _cnt; } 88 void reset() { _cnt = 0; } 89 void print(); 90 }; 91 92 93 class CFGElement : public AnyObj { 94 public: 95 double _freq; // Execution frequency (estimate) 96 97 CFGElement() : _freq(0.0) {} 98 virtual bool is_block() { return false; } 99 virtual bool is_loop() { return false; } 100 Block* as_Block() { assert(is_block(), "must be block"); return (Block*)this; } 101 CFGLoop* as_CFGLoop() { assert(is_loop(), "must be loop"); return (CFGLoop*)this; } 102 }; 103 104 //------------------------------Block------------------------------------------ 105 // This class defines a Basic Block. 106 // Basic blocks are used during the output routines, and are not used during 107 // any optimization pass. They are created late in the game. 108 class Block : public CFGElement { 109 110 private: 111 // Nodes in this block, in order 112 Node_List _nodes; 113 114 public: 115 116 // Get the node at index 'at_index', if 'at_index' is out of bounds return null 117 Node* get_node(uint at_index) const { 118 return _nodes[at_index]; 119 } 120 121 // Get the number of nodes in this block 122 uint number_of_nodes() const { 123 return _nodes.size(); 124 } 125 126 // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased 127 void map_node(Node* node, uint to_index) { 128 _nodes.map(to_index, node); 129 } 130 131 // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash 132 void insert_node(Node* node, uint at_index) { 133 _nodes.insert(at_index, node); 134 } 135 136 // Remove a node at index 'at_index' 137 void remove_node(uint at_index) { 138 _nodes.remove(at_index); 139 } 140 141 // Push a node 'node' onto the node list 142 void push_node(Node* node) { 143 _nodes.push(node); 144 } 145 146 // Pop the last node off the node list 147 Node* pop_node() { 148 return _nodes.pop(); 149 } 150 151 // Basic blocks have a Node which defines Control for all Nodes pinned in 152 // this block. This Node is a RegionNode. Exception-causing Nodes 153 // (division, subroutines) and Phi functions are always pinned. Later, 154 // every Node will get pinned to some block. 155 Node *head() const { return get_node(0); } 156 157 // CAUTION: num_preds() is ONE based, so that predecessor numbers match 158 // input edges to Regions and Phis. 159 uint num_preds() const { return head()->req(); } 160 Node *pred(uint i) const { return head()->in(i); } 161 162 // Array of successor blocks, same size as projs array 163 Block_Array _succs; 164 165 // Basic blocks have some number of Nodes which split control to all 166 // following blocks. These Nodes are always Projections. The field in 167 // the Projection and the block-ending Node determine which Block follows. 168 uint _num_succs; 169 170 // Basic blocks also carry all sorts of good old fashioned DFS information 171 // used to find loops, loop nesting depth, dominators, etc. 172 uint _pre_order; // Pre-order DFS number 173 174 // Dominator tree 175 uint _dom_depth; // Depth in dominator tree for fast LCA 176 Block* _idom; // Immediate dominator block 177 178 CFGLoop *_loop; // Loop to which this block belongs 179 uint _rpo; // Number in reverse post order walk 180 181 virtual bool is_block() { return true; } 182 float succ_prob(uint i); // return probability of i'th successor 183 int num_fall_throughs(); // How many fall-through candidate this block has 184 void update_uncommon_branch(Block* un); // Lower branch prob to uncommon code 185 bool succ_fall_through(uint i); // Is successor "i" is a fall-through candidate 186 Block* lone_fall_through(); // Return lone fall-through Block or null 187 188 Block* dom_lca(Block* that); // Compute LCA in dominator tree. 189 190 bool dominates(Block* that) { 191 int dom_diff = this->_dom_depth - that->_dom_depth; 192 if (dom_diff > 0) return false; 193 for (; dom_diff < 0; dom_diff++) that = that->_idom; 194 return this == that; 195 } 196 197 // Report the alignment required by this block. Must be a power of 2. 198 // The previous block will insert nops to get this alignment. 199 uint code_alignment() const; 200 uint compute_loop_alignment(); 201 202 // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies. 203 // It is currently also used to scale such frequencies relative to 204 // FreqCountInvocations relative to the old value of 1500. 205 #define BLOCK_FREQUENCY(f) ((f * (double) 1500) / FreqCountInvocations) 206 207 // Register Pressure (estimate) for Splitting heuristic 208 uint _reg_pressure; 209 uint _ihrp_index; 210 uint _freg_pressure; 211 uint _fhrp_index; 212 213 // Mark and visited bits for an LCA calculation in raise_above_anti_dependences. 214 // Since they hold unique node indexes, they do not need reinitialization. 215 node_idx_t _raise_LCA_mark; 216 void set_raise_LCA_mark(node_idx_t x) { _raise_LCA_mark = x; } 217 node_idx_t raise_LCA_mark() const { return _raise_LCA_mark; } 218 node_idx_t _raise_LCA_visited; 219 void set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; } 220 node_idx_t raise_LCA_visited() const { return _raise_LCA_visited; } 221 222 // Estimated size in bytes of first instructions in a loop. 223 uint _first_inst_size; 224 uint first_inst_size() const { return _first_inst_size; } 225 void set_first_inst_size(uint s) { _first_inst_size = s; } 226 227 // Compute the size of first instructions in this block. 228 uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra); 229 230 // Compute alignment padding if the block needs it. 231 // Align a loop if loop's padding is less or equal to padding limit 232 // or the size of first instructions in the loop > padding. 233 uint alignment_padding(int current_offset) { 234 int block_alignment = code_alignment(); 235 int max_pad = block_alignment-relocInfo::addr_unit(); 236 if( max_pad > 0 ) { 237 assert(is_power_of_2(max_pad+relocInfo::addr_unit()), ""); 238 int current_alignment = current_offset & max_pad; 239 if( current_alignment != 0 ) { 240 uint padding = (block_alignment-current_alignment) & max_pad; 241 if( has_loop_alignment() && 242 padding > (uint)MaxLoopPad && 243 first_inst_size() <= padding ) { 244 return 0; 245 } 246 return padding; 247 } 248 } 249 return 0; 250 } 251 252 // Connector blocks. Connector blocks are basic blocks devoid of 253 // instructions, but may have relevant non-instruction Nodes, such as 254 // Phis or MergeMems. Such blocks are discovered and marked during the 255 // RemoveEmpty phase, and elided during Output. 256 bool _connector; 257 void set_connector() { _connector = true; } 258 bool is_connector() const { return _connector; }; 259 260 // Loop_alignment will be set for blocks which are at the top of loops. 261 // The block layout pass may rotate loops such that the loop head may not 262 // be the sequentially first block of the loop encountered in the linear 263 // list of blocks. If the layout pass is not run, loop alignment is set 264 // for each block which is the head of a loop. 265 uint _loop_alignment; 266 void set_loop_alignment(Block *loop_top) { 267 uint new_alignment = loop_top->compute_loop_alignment(); 268 if (new_alignment > _loop_alignment) { 269 _loop_alignment = new_alignment; 270 } 271 } 272 uint loop_alignment() const { return _loop_alignment; } 273 bool has_loop_alignment() const { return loop_alignment() > 0; } 274 275 // Create a new Block with given head Node. 276 // Creates the (empty) predecessor arrays. 277 Block( Arena *a, Node *headnode ) 278 : CFGElement(), 279 _nodes(a), 280 _succs(a), 281 _num_succs(0), 282 _pre_order(0), 283 _idom(nullptr), 284 _loop(nullptr), 285 _reg_pressure(0), 286 _ihrp_index(1), 287 _freg_pressure(0), 288 _fhrp_index(1), 289 _raise_LCA_mark(0), 290 _raise_LCA_visited(0), 291 _first_inst_size(999999), 292 _connector(false), 293 _loop_alignment(0) { 294 _nodes.push(headnode); 295 } 296 297 // Index of 'end' Node 298 uint end_idx() const { 299 // %%%%% add a proj after every goto 300 // so (last->is_block_proj() != last) always, then simplify this code 301 // This will not give correct end_idx for block 0 when it only contains root. 302 int last_idx = _nodes.size() - 1; 303 Node *last = _nodes[last_idx]; 304 assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], ""); 305 return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs); 306 } 307 308 // Basic blocks have a Node which ends them. This Node determines which 309 // basic block follows this one in the program flow. This Node is either an 310 // IfNode, a GotoNode, a JmpNode, or a ReturnNode. 311 Node *end() const { return _nodes[end_idx()]; } 312 313 // Add an instruction to an existing block. It must go after the head 314 // instruction and before the end instruction. 315 void add_inst( Node *n ) { insert_node(n, end_idx()); } 316 // Find node in block. Fails if node not in block. 317 uint find_node( const Node *n ) const; 318 // Find and remove n from block list 319 void find_remove( const Node *n ); 320 // Check whether the node is in the block. 321 bool contains (const Node *n) const; 322 323 // Whether the block is not root-like and does not have any predecessors. 324 bool is_trivially_unreachable() const; 325 326 // Return the empty status of a block 327 enum { not_empty, empty_with_goto, completely_empty }; 328 int is_Empty() const; 329 330 // Forward through connectors 331 Block* non_connector() { 332 Block* s = this; 333 while (s->is_connector()) { 334 s = s->_succs[0]; 335 } 336 return s; 337 } 338 339 // Return true if b is a successor of this block 340 bool has_successor(Block* b) const { 341 for (uint i = 0; i < _num_succs; i++ ) { 342 if (non_connector_successor(i) == b) { 343 return true; 344 } 345 } 346 return false; 347 } 348 349 // Successor block, after forwarding through connectors 350 Block* non_connector_successor(int i) const { 351 return _succs[i]->non_connector(); 352 } 353 354 // Examine block's code shape to predict if it is not commonly executed. 355 bool has_uncommon_code() const; 356 357 #ifndef PRODUCT 358 // Debugging print of basic block 359 void dump_bidx(const Block* orig, outputStream* st = tty) const; 360 void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const; 361 void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const; 362 void dump() const; 363 void dump(const PhaseCFG* cfg) const; 364 #endif 365 }; 366 367 368 //------------------------------PhaseCFG--------------------------------------- 369 // Build an array of Basic Block pointers, one per Node. 370 class PhaseCFG : public Phase { 371 private: 372 // Root of whole program 373 RootNode* _root; 374 375 // The block containing the root node 376 Block* _root_block; 377 378 // List of basic blocks that are created during CFG creation 379 Block_List _blocks; 380 381 // Count of basic blocks 382 uint _number_of_blocks; 383 384 // Arena for the blocks to be stored in 385 Arena* _block_arena; 386 387 // Info used for scheduling 388 PhaseChaitin* _regalloc; 389 390 // Register pressure heuristic used? 391 bool _scheduling_for_pressure; 392 393 // The matcher for this compilation 394 Matcher& _matcher; 395 396 // Map nodes to owning basic block 397 Block_Array _node_to_block_mapping; 398 399 // Loop from the root 400 CFGLoop* _root_loop; 401 402 // Outmost loop frequency 403 double _outer_loop_frequency; 404 405 // Per node latency estimation, valid only during GCM 406 GrowableArray<uint>* _node_latency; 407 408 // Build a proper looking cfg. Return count of basic blocks 409 uint build_cfg(); 410 411 // Build the dominator tree so that we know where we can move instructions 412 void build_dominator_tree(); 413 414 // Estimate block frequencies based on IfNode probabilities, so that we know where we want to move instructions 415 void estimate_block_frequency(); 416 417 // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific 418 // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block. 419 // Move nodes to ensure correctness from GVN and also try to move nodes out of loops. 420 void global_code_motion(); 421 422 // Schedule Nodes early in their basic blocks. 423 bool schedule_early(VectorSet &visited, Node_Stack &roots); 424 425 // For each node, find the latest block it can be scheduled into 426 // and then select the cheapest block between the latest and earliest 427 // block to place the node. 428 void schedule_late(VectorSet &visited, Node_Stack &stack); 429 430 // Compute the (backwards) latency of a node from a single use 431 int latency_from_use(Node *n, const Node *def, Node *use); 432 433 // Compute the (backwards) latency of a node from the uses of this instruction 434 void partial_latency_of_defs(Node *n); 435 436 // Compute the instruction global latency with a backwards walk 437 void compute_latencies_backwards(VectorSet &visited, Node_Stack &stack); 438 439 // Check if a block between early and LCA block of uses is cheaper by 440 // frequency-based policy, latency-based policy and random-based policy 441 bool is_cheaper_block(Block* LCA, Node* self, uint target_latency, 442 uint end_latency, double least_freq, 443 int cand_cnt, bool in_latency); 444 445 // Pick a block between early and late that is a cheaper alternative 446 // to late. Helper for schedule_late. 447 Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self); 448 449 bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call, intptr_t* recacl_pressure_nodes); 450 void set_next_call(const Block* block, Node* n, VectorSet& next_call) const; 451 void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call); 452 453 // Perform basic-block local scheduling 454 Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot, 455 intptr_t* recacl_pressure_nodes); 456 void adjust_register_pressure(Node* n, Block* block, intptr_t *recalc_pressure_nodes, bool finalize_mode); 457 458 // Schedule a call next in the block 459 uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call); 460 461 // Cleanup if any code lands between a Call and his Catch 462 void call_catch_cleanup(Block* block); 463 464 Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx); 465 void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx); 466 467 // Detect implicit-null-check opportunities. Basically, find null checks 468 // with suitable memory ops nearby. Use the memory op to do the null check. 469 // I can generate a memory op if there is not one nearby. 470 void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons); 471 472 // Perform a Depth First Search (DFS). 473 // Setup 'vertex' as DFS to vertex mapping. 474 // Setup 'semi' as vertex to DFS mapping. 475 // Set 'parent' to DFS parent. 476 uint do_DFS(Tarjan* tarjan, uint rpo_counter); 477 478 // Helper function to insert a node into a block 479 void schedule_node_into_block( Node *n, Block *b ); 480 481 void replace_block_proj_ctrl( Node *n ); 482 483 // Set the basic block for pinned Nodes 484 void schedule_pinned_nodes( VectorSet &visited ); 485 486 // I'll need a few machine-specific GotoNodes. Clone from this one. 487 // Used when building the CFG and creating end nodes for blocks. 488 MachNode* _goto; 489 490 Block* raise_above_anti_dependences(Block* LCA, Node* load, bool verify = false); 491 void verify_anti_dependences(Block* LCA, Node* load) const { 492 assert(LCA == get_block_for_node(load), "should already be scheduled"); 493 const_cast<PhaseCFG*>(this)->raise_above_anti_dependences(LCA, load, true); 494 } 495 496 bool move_to_next(Block* bx, uint b_index); 497 void move_to_end(Block* bx, uint b_index); 498 499 void insert_goto_at(uint block_no, uint succ_no); 500 501 // Check for NeverBranch at block end. This needs to become a GOTO to the 502 // true target. NeverBranch are treated as a conditional branch that always 503 // goes the same direction for most of the optimizer and are used to give a 504 // fake exit path to infinite loops. At this late stage they need to turn 505 // into Goto's so that when you enter the infinite loop you indeed hang. 506 void convert_NeverBranch_to_Goto(Block *b); 507 508 CFGLoop* create_loop_tree(); 509 bool is_dominator(Node* dom_node, Node* node); 510 bool is_CFG(Node* n); 511 bool is_control_proj_or_safepoint(Node* n) const; 512 Block* find_block_for_node(Node* n) const; 513 bool is_dominating_control(Node* dom_ctrl, Node* n); 514 #ifndef PRODUCT 515 bool _trace_opto_pipelining; // tracing flag 516 #endif 517 518 public: 519 PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher); 520 521 void set_latency_for_node(Node* node, int latency) { 522 _node_latency->at_put_grow(node->_idx, latency); 523 } 524 525 uint get_latency_for_node(Node* node) { 526 return _node_latency->at_grow(node->_idx); 527 } 528 529 // Get the outer most frequency 530 double get_outer_loop_frequency() const { 531 return _outer_loop_frequency; 532 } 533 534 // Get the root node of the CFG 535 RootNode* get_root_node() const { 536 return _root; 537 } 538 539 // Get the block of the root node 540 Block* get_root_block() const { 541 return _root_block; 542 } 543 544 // Add a block at a position and moves the later ones one step 545 void add_block_at(uint pos, Block* block) { 546 _blocks.insert(pos, block); 547 _number_of_blocks++; 548 } 549 550 // Adds a block to the top of the block list 551 void add_block(Block* block) { 552 _blocks.push(block); 553 _number_of_blocks++; 554 } 555 556 // Clear the list of blocks 557 void clear_blocks() { 558 _blocks.reset(); 559 _number_of_blocks = 0; 560 } 561 562 // Get the block at position pos in _blocks 563 Block* get_block(uint pos) const { 564 return _blocks[pos]; 565 } 566 567 // Number of blocks 568 uint number_of_blocks() const { 569 return _number_of_blocks; 570 } 571 572 // set which block this node should reside in 573 void map_node_to_block(const Node* node, Block* block) { 574 _node_to_block_mapping.map(node->_idx, block); 575 } 576 577 // removes the mapping from a node to a block 578 void unmap_node_from_block(const Node* node) { 579 _node_to_block_mapping.map(node->_idx, nullptr); 580 } 581 582 // get the block in which this node resides 583 Block* get_block_for_node(const Node* node) const { 584 return _node_to_block_mapping[node->_idx]; 585 } 586 587 // does this node reside in a block; return true 588 bool has_block(const Node* node) const { 589 return (_node_to_block_mapping.lookup(node->_idx) != nullptr); 590 } 591 592 // Use frequency calculations and code shape to predict if the block 593 // is uncommon. 594 bool is_uncommon(const Block* block); 595 596 #ifdef ASSERT 597 Unique_Node_List _raw_oops; 598 #endif 599 600 // Do global code motion by first building dominator tree and estimate block frequency 601 // Returns true on success 602 bool do_global_code_motion(); 603 604 // Compute the (backwards) latency of a node from the uses 605 void latency_from_uses(Node *n); 606 607 // Set loop alignment 608 void set_loop_alignment(); 609 610 // Remove empty basic blocks 611 void remove_empty_blocks(); 612 Block *fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext); 613 void fixup_flow(); 614 // Remove all blocks that are transitively unreachable. Such blocks can be 615 // found e.g. after PhaseCFG::convert_NeverBranch_to_Goto(). This function 616 // assumes post-fixup_flow() block indices (Block::_pre_order, Block::_rpo). 617 void remove_unreachable_blocks(); 618 619 // Insert a node into a block at index and map the node to the block 620 void insert(Block *b, uint idx, Node *n) { 621 b->insert_node(n , idx); 622 map_node_to_block(n, b); 623 } 624 625 // Check all nodes and postalloc_expand them if necessary. 626 void postalloc_expand(PhaseRegAlloc* _ra); 627 628 #ifndef PRODUCT 629 bool trace_opto_pipelining() const { return _trace_opto_pipelining; } 630 631 // Debugging print of CFG 632 void dump( ) const; // CFG only 633 void _dump_cfg( const Node *end, VectorSet &visited ) const; 634 void dump_headers(); 635 #else 636 bool trace_opto_pipelining() const { return false; } 637 #endif 638 639 bool unrelated_load_in_store_null_block(Node* store, Node* load); 640 641 // Check that block b is in the home loop (or an ancestor) of n, if n is a 642 // memory writer. 643 void verify_memory_writer_placement(const Block* b, const Node* n) const NOT_DEBUG_RETURN; 644 // Check local dominator tree invariants. 645 void verify_dominator_tree() const NOT_DEBUG_RETURN; 646 void verify() const NOT_DEBUG_RETURN; 647 }; 648 649 650 //------------------------------UnionFind-------------------------------------- 651 // Map Block indices to a block-index for a cfg-cover. 652 // Array lookup in the optimized case. 653 class UnionFind : public ResourceObj { 654 uint _cnt, _max; 655 uint* _indices; 656 ReallocMark _nesting; // Safety checks for arena reallocation 657 public: 658 UnionFind( uint max ); 659 void reset( uint max ); // Reset to identity map for [0..max] 660 661 uint lookup( uint nidx ) const { 662 return _indices[nidx]; 663 } 664 uint operator[] (uint nidx) const { return lookup(nidx); } 665 666 void map( uint from_idx, uint to_idx ) { 667 assert( from_idx < _cnt, "oob" ); 668 _indices[from_idx] = to_idx; 669 } 670 void extend( uint from_idx, uint to_idx ); 671 672 uint Size() const { return _cnt; } 673 674 uint Find( uint idx ) { 675 assert( idx < 65536, "Must fit into uint"); 676 uint uf_idx = lookup(idx); 677 return (uf_idx == idx) ? uf_idx : Find_compress(idx); 678 } 679 uint Find_compress( uint idx ); 680 uint Find_const( uint idx ) const; 681 void Union( uint idx1, uint idx2 ); 682 683 }; 684 685 //----------------------------BlockProbPair--------------------------- 686 // Ordered pair of Node*. 687 class BlockProbPair { 688 protected: 689 Block* _target; // block target 690 double _prob; // probability of edge to block 691 public: 692 BlockProbPair() : _target(nullptr), _prob(0.0) {} 693 BlockProbPair(Block* b, double p) : _target(b), _prob(p) {} 694 695 Block* get_target() const { return _target; } 696 double get_prob() const { return _prob; } 697 }; 698 699 //------------------------------CFGLoop------------------------------------------- 700 class CFGLoop : public CFGElement { 701 int _id; 702 int _depth; 703 CFGLoop *_parent; // root of loop tree is the method level "pseudo" loop, it's parent is null 704 CFGLoop *_sibling; // null terminated list 705 CFGLoop *_child; // first child, use child's sibling to visit all immediately nested loops 706 GrowableArray<CFGElement*> _members; // list of members of loop 707 GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities 708 double _exit_prob; // probability any loop exit is taken on a single loop iteration 709 void update_succ_freq(Block* b, double freq); 710 711 public: 712 CFGLoop(int id) : 713 CFGElement(), 714 _id(id), 715 _depth(0), 716 _parent(nullptr), 717 _sibling(nullptr), 718 _child(nullptr), 719 _exit_prob(1.0f) {} 720 CFGLoop* parent() { return _parent; } 721 void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg); 722 void add_member(CFGElement *s) { _members.push(s); } 723 void add_nested_loop(CFGLoop* cl); 724 Block* head() { 725 assert(_members.at(0)->is_block(), "head must be a block"); 726 Block* hd = _members.at(0)->as_Block(); 727 assert(hd->_loop == this, "just checking"); 728 assert(hd->head()->is_Loop(), "must begin with loop head node"); 729 return hd; 730 } 731 Block* backedge_block(); // Return the block on the backedge of the loop (else null) 732 void compute_loop_depth(int depth); 733 void compute_freq(); // compute frequency with loop assuming head freq 1.0f 734 void scale_freq(); // scale frequency by loop trip count (including outer loops) 735 double outer_loop_freq() const; // frequency of outer loop 736 bool in_loop_nest(Block* b); 737 double trip_count() const { return 1.0 / _exit_prob; } 738 virtual bool is_loop() { return true; } 739 int id() { return _id; } 740 int depth() { return _depth; } 741 742 #ifndef PRODUCT 743 void dump( ) const; 744 void dump_tree() const; 745 #endif 746 }; 747 748 749 //----------------------------------CFGEdge------------------------------------ 750 // A edge between two basic blocks that will be embodied by a branch or a 751 // fall-through. 752 class CFGEdge : public ResourceObj { 753 private: 754 Block * _from; // Source basic block 755 Block * _to; // Destination basic block 756 double _freq; // Execution frequency (estimate) 757 int _state; 758 bool _infrequent; 759 int _from_pct; 760 int _to_pct; 761 762 // Private accessors 763 int from_pct() const { return _from_pct; } 764 int to_pct() const { return _to_pct; } 765 int from_infrequent() const { return from_pct() < BlockLayoutMinDiamondPercentage; } 766 int to_infrequent() const { return to_pct() < BlockLayoutMinDiamondPercentage; } 767 768 public: 769 enum { 770 open, // initial edge state; unprocessed 771 connected, // edge used to connect two traces together 772 interior // edge is interior to trace (could be backedge) 773 }; 774 775 CFGEdge(Block *from, Block *to, double freq, int from_pct, int to_pct) : 776 _from(from), _to(to), _freq(freq), 777 _state(open), _from_pct(from_pct), _to_pct(to_pct) { 778 _infrequent = from_infrequent() || to_infrequent(); 779 } 780 781 double freq() const { return _freq; } 782 Block* from() const { return _from; } 783 Block* to () const { return _to; } 784 int infrequent() const { return _infrequent; } 785 int state() const { return _state; } 786 787 void set_state(int state) { _state = state; } 788 789 #ifndef PRODUCT 790 void dump( ) const; 791 #endif 792 }; 793 794 795 //-----------------------------------Trace------------------------------------- 796 // An ordered list of basic blocks. 797 class Trace : public ResourceObj { 798 private: 799 uint _id; // Unique Trace id (derived from initial block) 800 Block ** _next_list; // Array mapping index to next block 801 Block ** _prev_list; // Array mapping index to previous block 802 Block * _first; // First block in the trace 803 Block * _last; // Last block in the trace 804 805 void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; } 806 807 // Return the block that precedes "b" in the trace. 808 Block * prev(Block *b) const { return _prev_list[b->_pre_order]; } 809 void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; } 810 811 // We've discovered a loop in this trace. Reset last to be "b", and first as 812 // the block following "b 813 void break_loop_after(Block *b) { 814 _last = b; 815 _first = next(b); 816 set_prev(_first, nullptr); 817 set_next(_last, nullptr); 818 } 819 820 public: 821 822 Trace(Block *b, Block **next_list, Block **prev_list) : 823 _id(b->_pre_order), 824 _next_list(next_list), 825 _prev_list(prev_list), 826 _first(b), 827 _last(b) { 828 set_next(b, nullptr); 829 set_prev(b, nullptr); 830 }; 831 832 // Return the id number 833 uint id() const { return _id; } 834 void set_id(uint id) { _id = id; } 835 836 // Return the first block in the trace 837 Block * first_block() const { return _first; } 838 839 // Return the last block in the trace 840 Block * last_block() const { return _last; } 841 842 // Return the block that follows "b" in the trace. 843 Block * next(Block *b) const { return _next_list[b->_pre_order]; } 844 845 // Insert a trace in the middle of this one after b 846 void insert_after(Block *b, Trace *tr) { 847 set_next(tr->last_block(), next(b)); 848 if (next(b) != nullptr) { 849 set_prev(next(b), tr->last_block()); 850 } 851 852 set_next(b, tr->first_block()); 853 set_prev(tr->first_block(), b); 854 855 if (b == _last) { 856 _last = tr->last_block(); 857 } 858 } 859 860 void insert_before(Block *b, Trace *tr) { 861 Block *p = prev(b); 862 assert(p != nullptr, "use append instead"); 863 insert_after(p, tr); 864 } 865 866 // Append another trace to this one. 867 void append(Trace *tr) { 868 insert_after(_last, tr); 869 } 870 871 // Append a block at the end of this trace 872 void append(Block *b) { 873 set_next(_last, b); 874 set_prev(b, _last); 875 _last = b; 876 } 877 878 bool backedge(CFGEdge *e); 879 880 #ifndef PRODUCT 881 void dump( ) const; 882 #endif 883 }; 884 885 //------------------------------PhaseBlockLayout------------------------------- 886 // Rearrange blocks into some canonical order, based on edges and their frequencies 887 class PhaseBlockLayout : public Phase { 888 PhaseCFG &_cfg; // Control flow graph 889 890 GrowableArray<CFGEdge *> *edges; 891 Trace **traces; 892 Block **next; 893 Block **prev; 894 UnionFind *uf; 895 896 // Given a block, find its encompassing Trace 897 Trace * trace(Block *b) { 898 return traces[uf->Find_compress(b->_pre_order)]; 899 } 900 public: 901 PhaseBlockLayout(PhaseCFG &cfg); 902 903 void find_edges(); 904 void grow_traces(); 905 void merge_traces(bool loose_connections); 906 void reorder_traces(int count); 907 void union_traces(Trace* from, Trace* to); 908 }; 909 910 #endif // SHARE_OPTO_BLOCK_HPP