1 /* 2 * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "compiler/oopMap.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "opto/addnode.hpp" 31 #include "opto/block.hpp" 32 #include "opto/callnode.hpp" 33 #include "opto/cfgnode.hpp" 34 #include "opto/chaitin.hpp" 35 #include "opto/coalesce.hpp" 36 #include "opto/connode.hpp" 37 #include "opto/idealGraphPrinter.hpp" 38 #include "opto/indexSet.hpp" 39 #include "opto/machnode.hpp" 40 #include "opto/memnode.hpp" 41 #include "opto/movenode.hpp" 42 #include "opto/opcodes.hpp" 43 #include "opto/rootnode.hpp" 44 #include "utilities/align.hpp" 45 46 #ifndef PRODUCT 47 void LRG::dump() const { 48 ttyLocker ttyl; 49 tty->print("%d ",num_regs()); 50 _mask.dump(); 51 if( _msize_valid ) { 52 if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size); 53 else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size()); 54 } else { 55 tty->print(", #?(%d) ",_mask.Size()); 56 } 57 58 tty->print("EffDeg: "); 59 if( _degree_valid ) tty->print( "%d ", _eff_degree ); 60 else tty->print("? "); 61 62 if( is_multidef() ) { 63 tty->print("MultiDef "); 64 if (_defs != NULL) { 65 tty->print("("); 66 for (int i = 0; i < _defs->length(); i++) { 67 tty->print("N%d ", _defs->at(i)->_idx); 68 } 69 tty->print(") "); 70 } 71 } 72 else if( _def == 0 ) tty->print("Dead "); 73 else tty->print("Def: N%d ",_def->_idx); 74 75 tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score()); 76 // Flags 77 if( _is_oop ) tty->print("Oop "); 78 if( _is_float ) tty->print("Float "); 79 if( _is_vector ) tty->print("Vector "); 80 if( _is_predicate ) tty->print("Predicate "); 81 if( _is_scalable ) tty->print("Scalable "); 82 if( _was_spilled1 ) tty->print("Spilled "); 83 if( _was_spilled2 ) tty->print("Spilled2 "); 84 if( _direct_conflict ) tty->print("Direct_conflict "); 85 if( _fat_proj ) tty->print("Fat "); 86 if( _was_lo ) tty->print("Lo "); 87 if( _has_copy ) tty->print("Copy "); 88 if( _at_risk ) tty->print("Risk "); 89 90 if( _must_spill ) tty->print("Must_spill "); 91 if( _is_bound ) tty->print("Bound "); 92 if( _msize_valid ) { 93 if( _degree_valid && lo_degree() ) tty->print("Trivial "); 94 } 95 96 tty->cr(); 97 } 98 #endif 99 100 // Compute score from cost and area. Low score is best to spill. 101 static double raw_score( double cost, double area ) { 102 return cost - (area*RegisterCostAreaRatio) * 1.52588e-5; 103 } 104 105 double LRG::score() const { 106 // Scale _area by RegisterCostAreaRatio/64K then subtract from cost. 107 // Bigger area lowers score, encourages spilling this live range. 108 // Bigger cost raise score, prevents spilling this live range. 109 // (Note: 1/65536 is the magic constant below; I dont trust the C optimizer 110 // to turn a divide by a constant into a multiply by the reciprical). 111 double score = raw_score( _cost, _area); 112 113 // Account for area. Basically, LRGs covering large areas are better 114 // to spill because more other LRGs get freed up. 115 if( _area == 0.0 ) // No area? Then no progress to spill 116 return 1e35; 117 118 if( _was_spilled2 ) // If spilled once before, we are unlikely 119 return score + 1e30; // to make progress again. 120 121 if( _cost >= _area*3.0 ) // Tiny area relative to cost 122 return score + 1e17; // Probably no progress to spill 123 124 if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost 125 return score + 1e10; // Likely no progress to spill 126 127 return score; 128 } 129 130 #define NUMBUCKS 3 131 132 // Straight out of Tarjan's union-find algorithm 133 uint LiveRangeMap::find_compress(uint lrg) { 134 uint cur = lrg; 135 uint next = _uf_map.at(cur); 136 while (next != cur) { // Scan chain of equivalences 137 assert( next < cur, "always union smaller"); 138 cur = next; // until find a fixed-point 139 next = _uf_map.at(cur); 140 } 141 142 // Core of union-find algorithm: update chain of 143 // equivalences to be equal to the root. 144 while (lrg != next) { 145 uint tmp = _uf_map.at(lrg); 146 _uf_map.at_put(lrg, next); 147 lrg = tmp; 148 } 149 return lrg; 150 } 151 152 // Reset the Union-Find map to identity 153 void LiveRangeMap::reset_uf_map(uint max_lrg_id) { 154 _max_lrg_id= max_lrg_id; 155 // Force the Union-Find mapping to be at least this large 156 _uf_map.at_put_grow(_max_lrg_id, 0); 157 // Initialize it to be the ID mapping. 158 for (uint i = 0; i < _max_lrg_id; ++i) { 159 _uf_map.at_put(i, i); 160 } 161 } 162 163 // Make all Nodes map directly to their final live range; no need for 164 // the Union-Find mapping after this call. 165 void LiveRangeMap::compress_uf_map_for_nodes() { 166 // For all Nodes, compress mapping 167 uint unique = _names.length(); 168 for (uint i = 0; i < unique; ++i) { 169 uint lrg = _names.at(i); 170 uint compressed_lrg = find(lrg); 171 if (lrg != compressed_lrg) { 172 _names.at_put(i, compressed_lrg); 173 } 174 } 175 } 176 177 // Like Find above, but no path compress, so bad asymptotic behavior 178 uint LiveRangeMap::find_const(uint lrg) const { 179 if (!lrg) { 180 return lrg; // Ignore the zero LRG 181 } 182 183 // Off the end? This happens during debugging dumps when you got 184 // brand new live ranges but have not told the allocator yet. 185 if (lrg >= _max_lrg_id) { 186 return lrg; 187 } 188 189 uint next = _uf_map.at(lrg); 190 while (next != lrg) { // Scan chain of equivalences 191 assert(next < lrg, "always union smaller"); 192 lrg = next; // until find a fixed-point 193 next = _uf_map.at(lrg); 194 } 195 return next; 196 } 197 198 PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool scheduling_info_generated) 199 : PhaseRegAlloc(unique, cfg, matcher, 200 #ifndef PRODUCT 201 print_chaitin_statistics 202 #else 203 NULL 204 #endif 205 ) 206 , _live(0) 207 , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0) 208 , _oldphi(unique) 209 #ifndef PRODUCT 210 , _trace_spilling(C->directive()->TraceSpillingOption) 211 #endif 212 , _lrg_map(Thread::current()->resource_area(), unique) 213 , _scheduling_info_generated(scheduling_info_generated) 214 , _sched_int_pressure(0, Matcher::int_pressure_limit()) 215 , _sched_float_pressure(0, Matcher::float_pressure_limit()) 216 , _scratch_int_pressure(0, Matcher::int_pressure_limit()) 217 , _scratch_float_pressure(0, Matcher::float_pressure_limit()) 218 { 219 Compile::TracePhase tp("ctorChaitin", &timers[_t_ctorChaitin]); 220 221 _high_frequency_lrg = MIN2(double(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency()); 222 223 // Build a list of basic blocks, sorted by frequency 224 _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks()); 225 // Experiment with sorting strategies to speed compilation 226 double cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket 227 Block **buckets[NUMBUCKS]; // Array of buckets 228 uint buckcnt[NUMBUCKS]; // Array of bucket counters 229 double buckval[NUMBUCKS]; // Array of bucket value cutoffs 230 for (uint i = 0; i < NUMBUCKS; i++) { 231 buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks()); 232 buckcnt[i] = 0; 233 // Bump by three orders of magnitude each time 234 cutoff *= 0.001; 235 buckval[i] = cutoff; 236 for (uint j = 0; j < _cfg.number_of_blocks(); j++) { 237 buckets[i][j] = NULL; 238 } 239 } 240 // Sort blocks into buckets 241 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 242 for (uint j = 0; j < NUMBUCKS; j++) { 243 if ((j == NUMBUCKS - 1) || (_cfg.get_block(i)->_freq > buckval[j])) { 244 // Assign block to end of list for appropriate bucket 245 buckets[j][buckcnt[j]++] = _cfg.get_block(i); 246 break; // kick out of inner loop 247 } 248 } 249 } 250 // Dump buckets into final block array 251 uint blkcnt = 0; 252 for (uint i = 0; i < NUMBUCKS; i++) { 253 for (uint j = 0; j < buckcnt[i]; j++) { 254 _blks[blkcnt++] = buckets[i][j]; 255 } 256 } 257 258 assert(blkcnt == _cfg.number_of_blocks(), "Block array not totally filled"); 259 } 260 261 // union 2 sets together. 262 void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { 263 uint src = _lrg_map.find(src_n); 264 uint dst = _lrg_map.find(dst_n); 265 assert(src, ""); 266 assert(dst, ""); 267 assert(src < _lrg_map.max_lrg_id(), "oob"); 268 assert(dst < _lrg_map.max_lrg_id(), "oob"); 269 assert(src < dst, "always union smaller"); 270 _lrg_map.uf_map(dst, src); 271 } 272 273 void PhaseChaitin::new_lrg(const Node *x, uint lrg) { 274 // Make the Node->LRG mapping 275 _lrg_map.extend(x->_idx,lrg); 276 // Make the Union-Find mapping an identity function 277 _lrg_map.uf_extend(lrg, lrg); 278 } 279 280 281 int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) { 282 assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections"); 283 DEBUG_ONLY( Block* borig = _cfg.get_block_for_node(orig); ) 284 int found_projs = 0; 285 uint cnt = orig->outcnt(); 286 for (uint i = 0; i < cnt; i++) { 287 Node* proj = orig->raw_out(i); 288 if (proj->is_MachProj()) { 289 assert(proj->outcnt() == 0, "only kill projections are expected here"); 290 assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections"); 291 found_projs++; 292 // Copy kill projections after the cloned node 293 Node* kills = proj->clone(); 294 kills->set_req(0, copy); 295 b->insert_node(kills, idx++); 296 _cfg.map_node_to_block(kills, b); 297 new_lrg(kills, max_lrg_id++); 298 } 299 } 300 return found_projs; 301 } 302 303 // Renumber the live ranges to compact them. Makes the IFG smaller. 304 void PhaseChaitin::compact() { 305 Compile::TracePhase tp("chaitinCompact", &timers[_t_chaitinCompact]); 306 307 // Current the _uf_map contains a series of short chains which are headed 308 // by a self-cycle. All the chains run from big numbers to little numbers. 309 // The Find() call chases the chains & shortens them for the next Find call. 310 // We are going to change this structure slightly. Numbers above a moving 311 // wave 'i' are unchanged. Numbers below 'j' point directly to their 312 // compacted live range with no further chaining. There are no chains or 313 // cycles below 'i', so the Find call no longer works. 314 uint j=1; 315 uint i; 316 for (i = 1; i < _lrg_map.max_lrg_id(); i++) { 317 uint lr = _lrg_map.uf_live_range_id(i); 318 // Ignore unallocated live ranges 319 if (!lr) { 320 continue; 321 } 322 assert(lr <= i, ""); 323 _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr)); 324 } 325 // Now change the Node->LR mapping to reflect the compacted names 326 uint unique = _lrg_map.size(); 327 for (i = 0; i < unique; i++) { 328 uint lrg_id = _lrg_map.live_range_id(i); 329 _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id)); 330 } 331 332 // Reset the Union-Find mapping 333 _lrg_map.reset_uf_map(j); 334 } 335 336 void PhaseChaitin::Register_Allocate() { 337 338 // Above the OLD FP (and in registers) are the incoming arguments. Stack 339 // slots in this area are called "arg_slots". Above the NEW FP (and in 340 // registers) is the outgoing argument area; above that is the spill/temp 341 // area. These are all "frame_slots". Arg_slots start at the zero 342 // stack_slots and count up to the known arg_size. Frame_slots start at 343 // the stack_slot #arg_size and go up. After allocation I map stack 344 // slots to actual offsets. Stack-slots in the arg_slot area are biased 345 // by the frame_size; stack-slots in the frame_slot area are biased by 0. 346 347 _trip_cnt = 0; 348 _alternate = 0; 349 _matcher._allocation_started = true; 350 351 ResourceArea split_arena(mtCompiler); // Arena for Split local resources 352 ResourceArea live_arena(mtCompiler); // Arena for liveness & IFG info 353 ResourceMark rm(&live_arena); 354 355 // Need live-ness for the IFG; need the IFG for coalescing. If the 356 // liveness is JUST for coalescing, then I can get some mileage by renaming 357 // all copy-related live ranges low and then using the max copy-related 358 // live range as a cut-off for LIVE and the IFG. In other words, I can 359 // build a subset of LIVE and IFG just for copies. 360 PhaseLive live(_cfg, _lrg_map.names(), &live_arena, false); 361 362 // Need IFG for coalescing and coloring 363 PhaseIFG ifg(&live_arena); 364 _ifg = &ifg; 365 366 // Come out of SSA world to the Named world. Assign (virtual) registers to 367 // Nodes. Use the same register for all inputs and the output of PhiNodes 368 // - effectively ending SSA form. This requires either coalescing live 369 // ranges or inserting copies. For the moment, we insert "virtual copies" 370 // - we pretend there is a copy prior to each Phi in predecessor blocks. 371 // We will attempt to coalesce such "virtual copies" before we manifest 372 // them for real. 373 de_ssa(); 374 375 #ifdef ASSERT 376 // Veify the graph before RA. 377 verify(&live_arena); 378 #endif 379 380 { 381 Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); 382 _live = NULL; // Mark live as being not available 383 rm.reset_to_mark(); // Reclaim working storage 384 IndexSet::reset_memory(C, &live_arena); 385 ifg.init(_lrg_map.max_lrg_id()); // Empty IFG 386 gather_lrg_masks( false ); // Collect LRG masks 387 live.compute(_lrg_map.max_lrg_id()); // Compute liveness 388 _live = &live; // Mark LIVE as being available 389 } 390 391 // Base pointers are currently "used" by instructions which define new 392 // derived pointers. This makes base pointers live up to the where the 393 // derived pointer is made, but not beyond. Really, they need to be live 394 // across any GC point where the derived value is live. So this code looks 395 // at all the GC points, and "stretches" the live range of any base pointer 396 // to the GC point. 397 if (stretch_base_pointer_live_ranges(&live_arena)) { 398 Compile::TracePhase tp("computeLive (sbplr)", &timers[_t_computeLive]); 399 // Since some live range stretched, I need to recompute live 400 _live = NULL; 401 rm.reset_to_mark(); // Reclaim working storage 402 IndexSet::reset_memory(C, &live_arena); 403 ifg.init(_lrg_map.max_lrg_id()); 404 gather_lrg_masks(false); 405 live.compute(_lrg_map.max_lrg_id()); 406 _live = &live; 407 } 408 // Create the interference graph using virtual copies 409 build_ifg_virtual(); // Include stack slots this time 410 411 // The IFG is/was triangular. I am 'squaring it up' so Union can run 412 // faster. Union requires a 'for all' operation which is slow on the 413 // triangular adjacency matrix (quick reminder: the IFG is 'sparse' - 414 // meaning I can visit all the Nodes neighbors less than a Node in time 415 // O(# of neighbors), but I have to visit all the Nodes greater than a 416 // given Node and search them for an instance, i.e., time O(#MaxLRG)). 417 _ifg->SquareUp(); 418 419 // Aggressive (but pessimistic) copy coalescing. 420 // This pass works on virtual copies. Any virtual copies which are not 421 // coalesced get manifested as actual copies 422 { 423 Compile::TracePhase tp("chaitinCoalesce1", &timers[_t_chaitinCoalesce1]); 424 425 PhaseAggressiveCoalesce coalesce(*this); 426 coalesce.coalesce_driver(); 427 // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do 428 // not match the Phi itself, insert a copy. 429 coalesce.insert_copies(_matcher); 430 if (C->failing()) { 431 return; 432 } 433 } 434 435 // After aggressive coalesce, attempt a first cut at coloring. 436 // To color, we need the IFG and for that we need LIVE. 437 { 438 Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); 439 _live = NULL; 440 rm.reset_to_mark(); // Reclaim working storage 441 IndexSet::reset_memory(C, &live_arena); 442 ifg.init(_lrg_map.max_lrg_id()); 443 gather_lrg_masks( true ); 444 live.compute(_lrg_map.max_lrg_id()); 445 _live = &live; 446 } 447 448 // Build physical interference graph 449 uint must_spill = 0; 450 must_spill = build_ifg_physical(&live_arena); 451 // If we have a guaranteed spill, might as well spill now 452 if (must_spill) { 453 if(!_lrg_map.max_lrg_id()) { 454 return; 455 } 456 // Bail out if unique gets too large (ie - unique > MaxNodeLimit) 457 C->check_node_count(10*must_spill, "out of nodes before split"); 458 if (C->failing()) { 459 return; 460 } 461 462 uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere 463 _lrg_map.set_max_lrg_id(new_max_lrg_id); 464 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) 465 // or we failed to split 466 C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split"); 467 if (C->failing()) { 468 return; 469 } 470 471 NOT_PRODUCT(C->verify_graph_edges();) 472 473 compact(); // Compact LRGs; return new lower max lrg 474 475 { 476 Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); 477 _live = NULL; 478 rm.reset_to_mark(); // Reclaim working storage 479 IndexSet::reset_memory(C, &live_arena); 480 ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph 481 gather_lrg_masks( true ); // Collect intersect mask 482 live.compute(_lrg_map.max_lrg_id()); // Compute LIVE 483 _live = &live; 484 } 485 build_ifg_physical(&live_arena); 486 _ifg->SquareUp(); 487 _ifg->Compute_Effective_Degree(); 488 // Only do conservative coalescing if requested 489 if (OptoCoalesce) { 490 Compile::TracePhase tp("chaitinCoalesce2", &timers[_t_chaitinCoalesce2]); 491 // Conservative (and pessimistic) copy coalescing of those spills 492 PhaseConservativeCoalesce coalesce(*this); 493 // If max live ranges greater than cutoff, don't color the stack. 494 // This cutoff can be larger than below since it is only done once. 495 coalesce.coalesce_driver(); 496 } 497 _lrg_map.compress_uf_map_for_nodes(); 498 499 #ifdef ASSERT 500 verify(&live_arena, true); 501 #endif 502 } else { 503 ifg.SquareUp(); 504 ifg.Compute_Effective_Degree(); 505 #ifdef ASSERT 506 set_was_low(); 507 #endif 508 } 509 510 // Prepare for Simplify & Select 511 cache_lrg_info(); // Count degree of LRGs 512 513 // Simplify the InterFerence Graph by removing LRGs of low degree. 514 // LRGs of low degree are trivially colorable. 515 Simplify(); 516 517 // Select colors by re-inserting LRGs back into the IFG in reverse order. 518 // Return whether or not something spills. 519 uint spills = Select( ); 520 521 // If we spill, split and recycle the entire thing 522 while( spills ) { 523 if( _trip_cnt++ > 24 ) { 524 DEBUG_ONLY( dump_for_spill_split_recycle(); ) 525 if( _trip_cnt > 27 ) { 526 C->record_method_not_compilable("failed spill-split-recycle sanity check"); 527 return; 528 } 529 } 530 531 if (!_lrg_map.max_lrg_id()) { 532 return; 533 } 534 uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere 535 _lrg_map.set_max_lrg_id(new_max_lrg_id); 536 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) 537 C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split"); 538 if (C->failing()) { 539 return; 540 } 541 542 compact(); // Compact LRGs; return new lower max lrg 543 544 // Nuke the live-ness and interference graph and LiveRanGe info 545 { 546 Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); 547 _live = NULL; 548 rm.reset_to_mark(); // Reclaim working storage 549 IndexSet::reset_memory(C, &live_arena); 550 ifg.init(_lrg_map.max_lrg_id()); 551 552 // Create LiveRanGe array. 553 // Intersect register masks for all USEs and DEFs 554 gather_lrg_masks(true); 555 live.compute(_lrg_map.max_lrg_id()); 556 _live = &live; 557 } 558 must_spill = build_ifg_physical(&live_arena); 559 _ifg->SquareUp(); 560 _ifg->Compute_Effective_Degree(); 561 562 // Only do conservative coalescing if requested 563 if (OptoCoalesce) { 564 Compile::TracePhase tp("chaitinCoalesce3", &timers[_t_chaitinCoalesce3]); 565 // Conservative (and pessimistic) copy coalescing 566 PhaseConservativeCoalesce coalesce(*this); 567 // Check for few live ranges determines how aggressive coalesce is. 568 coalesce.coalesce_driver(); 569 } 570 _lrg_map.compress_uf_map_for_nodes(); 571 #ifdef ASSERT 572 verify(&live_arena, true); 573 #endif 574 cache_lrg_info(); // Count degree of LRGs 575 576 // Simplify the InterFerence Graph by removing LRGs of low degree. 577 // LRGs of low degree are trivially colorable. 578 Simplify(); 579 580 // Select colors by re-inserting LRGs back into the IFG in reverse order. 581 // Return whether or not something spills. 582 spills = Select(); 583 } 584 585 // Count number of Simplify-Select trips per coloring success. 586 _allocator_attempts += _trip_cnt + 1; 587 _allocator_successes += 1; 588 589 // Peephole remove copies 590 post_allocate_copy_removal(); 591 592 // Merge multidefs if multiple defs representing the same value are used in a single block. 593 merge_multidefs(); 594 595 #ifdef ASSERT 596 // Veify the graph after RA. 597 verify(&live_arena); 598 #endif 599 600 // max_reg is past the largest *register* used. 601 // Convert that to a frame_slot number. 602 if (_max_reg <= _matcher._new_SP) { 603 _framesize = C->out_preserve_stack_slots(); 604 } 605 else { 606 _framesize = _max_reg -_matcher._new_SP; 607 } 608 assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough"); 609 610 // This frame must preserve the required fp alignment 611 _framesize = align_up(_framesize, Matcher::stack_alignment_in_slots()); 612 assert(_framesize <= 1000000, "sanity check"); 613 #ifndef PRODUCT 614 _total_framesize += _framesize; 615 if ((int)_framesize > _max_framesize) { 616 _max_framesize = _framesize; 617 } 618 #endif 619 620 // Convert CISC spills 621 fixup_spills(); 622 623 // Log regalloc results 624 CompileLog* log = Compile::current()->log(); 625 if (log != NULL) { 626 log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing()); 627 } 628 629 if (C->failing()) { 630 return; 631 } 632 633 NOT_PRODUCT(C->verify_graph_edges();) 634 635 // Move important info out of the live_arena to longer lasting storage. 636 alloc_node_regs(_lrg_map.size()); 637 for (uint i=0; i < _lrg_map.size(); i++) { 638 if (_lrg_map.live_range_id(i)) { // Live range associated with Node? 639 LRG &lrg = lrgs(_lrg_map.live_range_id(i)); 640 if (!lrg.alive()) { 641 set_bad(i); 642 } else if ((lrg.num_regs() == 1 && !lrg.is_scalable()) || 643 (lrg.is_scalable() && lrg.scalable_reg_slots() == 1)) { 644 set1(i, lrg.reg()); 645 } else { // Must be a register-set 646 if (!lrg._fat_proj) { // Must be aligned adjacent register set 647 // Live ranges record the highest register in their mask. 648 // We want the low register for the AD file writer's convenience. 649 OptoReg::Name hi = lrg.reg(); // Get hi register 650 int num_regs = lrg.num_regs(); 651 if (lrg.is_scalable() && OptoReg::is_stack(hi)) { 652 // For scalable vector registers, when they are allocated in physical 653 // registers, num_regs is RegMask::SlotsPerVecA for reg mask of scalable 654 // vector. If they are allocated on stack, we need to get the actual 655 // num_regs, which reflects the physical length of scalable registers. 656 num_regs = lrg.scalable_reg_slots(); 657 } 658 if (num_regs == 1) { 659 set1(i, hi); 660 } else { 661 OptoReg::Name lo = OptoReg::add(hi, (1 - num_regs)); // Find lo 662 // We have to use pair [lo,lo+1] even for wide vectors/vmasks because 663 // the rest of code generation works only with pairs. It is safe 664 // since for registers encoding only 'lo' is used. 665 // Second reg from pair is used in ScheduleAndBundle with vector max 666 // size 8 which corresponds to registers pair. 667 // It is also used in BuildOopMaps but oop operations are not 668 // vectorized. 669 set2(i, lo); 670 } 671 } else { // Misaligned; extract 2 bits 672 OptoReg::Name hi = lrg.reg(); // Get hi register 673 lrg.Remove(hi); // Yank from mask 674 int lo = lrg.mask().find_first_elem(); // Find lo 675 set_pair(i, hi, lo); 676 } 677 } 678 if( lrg._is_oop ) _node_oops.set(i); 679 } else { 680 set_bad(i); 681 } 682 } 683 684 // Done! 685 _live = NULL; 686 _ifg = NULL; 687 C->set_indexSet_arena(NULL); // ResourceArea is at end of scope 688 } 689 690 void PhaseChaitin::de_ssa() { 691 // Set initial Names for all Nodes. Most Nodes get the virtual register 692 // number. A few get the ZERO live range number. These do not 693 // get allocated, but instead rely on correct scheduling to ensure that 694 // only one instance is simultaneously live at a time. 695 uint lr_counter = 1; 696 for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) { 697 Block* block = _cfg.get_block(i); 698 uint cnt = block->number_of_nodes(); 699 700 // Handle all the normal Nodes in the block 701 for( uint j = 0; j < cnt; j++ ) { 702 Node *n = block->get_node(j); 703 // Pre-color to the zero live range, or pick virtual register 704 const RegMask &rm = n->out_RegMask(); 705 _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0); 706 } 707 } 708 709 // Reset the Union-Find mapping to be identity 710 _lrg_map.reset_uf_map(lr_counter); 711 } 712 713 void PhaseChaitin::mark_ssa() { 714 // Use ssa names to populate the live range maps or if no mask 715 // is available, use the 0 entry. 716 uint max_idx = 0; 717 for ( uint i = 0; i < _cfg.number_of_blocks(); i++ ) { 718 Block* block = _cfg.get_block(i); 719 uint cnt = block->number_of_nodes(); 720 721 // Handle all the normal Nodes in the block 722 for ( uint j = 0; j < cnt; j++ ) { 723 Node *n = block->get_node(j); 724 // Pre-color to the zero live range, or pick virtual register 725 const RegMask &rm = n->out_RegMask(); 726 _lrg_map.map(n->_idx, rm.is_NotEmpty() ? n->_idx : 0); 727 max_idx = (n->_idx > max_idx) ? n->_idx : max_idx; 728 } 729 } 730 _lrg_map.set_max_lrg_id(max_idx+1); 731 732 // Reset the Union-Find mapping to be identity 733 _lrg_map.reset_uf_map(max_idx+1); 734 } 735 736 737 // Gather LiveRanGe information, including register masks. Modification of 738 // cisc spillable in_RegMasks should not be done before AggressiveCoalesce. 739 void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { 740 741 // Nail down the frame pointer live range 742 uint fp_lrg = _lrg_map.live_range_id(_cfg.get_root_node()->in(1)->in(TypeFunc::FramePtr)); 743 lrgs(fp_lrg)._cost += 1e12; // Cost is infinite 744 745 // For all blocks 746 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 747 Block* block = _cfg.get_block(i); 748 749 // For all instructions 750 for (uint j = 1; j < block->number_of_nodes(); j++) { 751 Node* n = block->get_node(j); 752 uint input_edge_start =1; // Skip control most nodes 753 bool is_machine_node = false; 754 if (n->is_Mach()) { 755 is_machine_node = true; 756 input_edge_start = n->as_Mach()->oper_input_base(); 757 } 758 uint idx = n->is_Copy(); 759 760 // Get virtual register number, same as LiveRanGe index 761 uint vreg = _lrg_map.live_range_id(n); 762 LRG& lrg = lrgs(vreg); 763 if (vreg) { // No vreg means un-allocable (e.g. memory) 764 765 // Check for float-vs-int live range (used in register-pressure 766 // calculations) 767 const Type *n_type = n->bottom_type(); 768 if (n_type->is_floatingpoint()) { 769 lrg._is_float = 1; 770 } 771 772 // Check for twice prior spilling. Once prior spilling might have 773 // spilled 'soft', 2nd prior spill should have spilled 'hard' and 774 // further spilling is unlikely to make progress. 775 if (_spilled_once.test(n->_idx)) { 776 lrg._was_spilled1 = 1; 777 if (_spilled_twice.test(n->_idx)) { 778 lrg._was_spilled2 = 1; 779 } 780 } 781 782 #ifndef PRODUCT 783 // Collect bits not used by product code, but which may be useful for 784 // debugging. 785 786 // Collect has-copy bit 787 if (idx) { 788 lrg._has_copy = 1; 789 uint clidx = _lrg_map.live_range_id(n->in(idx)); 790 LRG& copy_src = lrgs(clidx); 791 copy_src._has_copy = 1; 792 } 793 794 if (trace_spilling() && lrg._def != NULL) { 795 // collect defs for MultiDef printing 796 if (lrg._defs == NULL) { 797 lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL); 798 lrg._defs->append(lrg._def); 799 } 800 lrg._defs->append(n); 801 } 802 #endif 803 804 // Check for a single def LRG; these can spill nicely 805 // via rematerialization. Flag as NULL for no def found 806 // yet, or 'n' for single def or -1 for many defs. 807 lrg._def = lrg._def ? NodeSentinel : n; 808 809 // Limit result register mask to acceptable registers 810 const RegMask &rm = n->out_RegMask(); 811 lrg.AND( rm ); 812 813 uint ireg = n->ideal_reg(); 814 assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP, 815 "oops must be in Op_RegP's" ); 816 817 // Check for vector live range (only if vector register is used). 818 // On SPARC vector uses RegD which could be misaligned so it is not 819 // processes as vector in RA. 820 if (RegMask::is_vector(ireg)) { 821 lrg._is_vector = 1; 822 if (Matcher::implements_scalable_vector && ireg == Op_VecA) { 823 assert(Matcher::supports_scalable_vector(), "scalable vector should be supported"); 824 lrg._is_scalable = 1; 825 // For scalable vector, when it is allocated in physical register, 826 // num_regs is RegMask::SlotsPerVecA for reg mask, 827 // which may not be the actual physical register size. 828 // If it is allocated in stack, we need to get the actual 829 // physical length of scalable vector register. 830 lrg.set_scalable_reg_slots(Matcher::scalable_vector_reg_size(T_FLOAT)); 831 } 832 } 833 834 if (ireg == Op_RegVectMask) { 835 assert(Matcher::has_predicated_vectors(), "predicated vector should be supported"); 836 lrg._is_predicate = 1; 837 if (Matcher::supports_scalable_vector()) { 838 lrg._is_scalable = 1; 839 // For scalable predicate, when it is allocated in physical register, 840 // num_regs is RegMask::SlotsPerRegVectMask for reg mask, 841 // which may not be the actual physical register size. 842 // If it is allocated in stack, we need to get the actual 843 // physical length of scalable predicate register. 844 lrg.set_scalable_reg_slots(Matcher::scalable_predicate_reg_slots()); 845 } 846 } 847 assert(n_type->isa_vect() == NULL || lrg._is_vector || 848 ireg == Op_RegD || ireg == Op_RegL || ireg == Op_RegVectMask, 849 "vector must be in vector registers"); 850 851 // Check for bound register masks 852 const RegMask &lrgmask = lrg.mask(); 853 if (lrgmask.is_bound(ireg)) { 854 lrg._is_bound = 1; 855 } 856 857 // Check for maximum frequency value 858 if (lrg._maxfreq < block->_freq) { 859 lrg._maxfreq = block->_freq; 860 } 861 862 // Check for oop-iness, or long/double 863 // Check for multi-kill projection 864 switch (ireg) { 865 case MachProjNode::fat_proj: 866 // Fat projections have size equal to number of registers killed 867 lrg.set_num_regs(rm.Size()); 868 lrg.set_reg_pressure(lrg.num_regs()); 869 lrg._fat_proj = 1; 870 lrg._is_bound = 1; 871 break; 872 case Op_RegP: 873 #ifdef _LP64 874 lrg.set_num_regs(2); // Size is 2 stack words 875 #else 876 lrg.set_num_regs(1); // Size is 1 stack word 877 #endif 878 // Register pressure is tracked relative to the maximum values 879 // suggested for that platform, INTPRESSURE and FLOATPRESSURE, 880 // and relative to other types which compete for the same regs. 881 // 882 // The following table contains suggested values based on the 883 // architectures as defined in each .ad file. 884 // INTPRESSURE and FLOATPRESSURE may be tuned differently for 885 // compile-speed or performance. 886 // Note1: 887 // SPARC and SPARCV9 reg_pressures are at 2 instead of 1 888 // since .ad registers are defined as high and low halves. 889 // These reg_pressure values remain compatible with the code 890 // in is_high_pressure() which relates get_invalid_mask_size(), 891 // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE. 892 // Note2: 893 // SPARC -d32 has 24 registers available for integral values, 894 // but only 10 of these are safe for 64-bit longs. 895 // Using set_reg_pressure(2) for both int and long means 896 // the allocator will believe it can fit 26 longs into 897 // registers. Using 2 for longs and 1 for ints means the 898 // allocator will attempt to put 52 integers into registers. 899 // The settings below limit this problem to methods with 900 // many long values which are being run on 32-bit SPARC. 901 // 902 // ------------------- reg_pressure -------------------- 903 // Each entry is reg_pressure_per_value,number_of_regs 904 // RegL RegI RegFlags RegF RegD INTPRESSURE FLOATPRESSURE 905 // IA32 2 1 1 1 1 6 6 906 // IA64 1 1 1 1 1 50 41 907 // SPARC 2 2 2 2 2 48 (24) 52 (26) 908 // SPARCV9 2 2 2 2 2 48 (24) 52 (26) 909 // AMD64 1 1 1 1 1 14 15 910 // ----------------------------------------------------- 911 lrg.set_reg_pressure(1); // normally one value per register 912 if( n_type->isa_oop_ptr() ) { 913 lrg._is_oop = 1; 914 } 915 break; 916 case Op_RegL: // Check for long or double 917 case Op_RegD: 918 lrg.set_num_regs(2); 919 // Define platform specific register pressure 920 #if defined(ARM32) 921 lrg.set_reg_pressure(2); 922 #elif defined(IA32) 923 if( ireg == Op_RegL ) { 924 lrg.set_reg_pressure(2); 925 } else { 926 lrg.set_reg_pressure(1); 927 } 928 #else 929 lrg.set_reg_pressure(1); // normally one value per register 930 #endif 931 // If this def of a double forces a mis-aligned double, 932 // flag as '_fat_proj' - really flag as allowing misalignment 933 // AND changes how we count interferences. A mis-aligned 934 // double can interfere with TWO aligned pairs, or effectively 935 // FOUR registers! 936 if (rm.is_misaligned_pair()) { 937 lrg._fat_proj = 1; 938 lrg._is_bound = 1; 939 } 940 break; 941 case Op_RegVectMask: 942 assert(Matcher::has_predicated_vectors(), "sanity"); 943 assert(RegMask::num_registers(Op_RegVectMask) == RegMask::SlotsPerRegVectMask, "sanity"); 944 lrg.set_num_regs(RegMask::SlotsPerRegVectMask); 945 lrg.set_reg_pressure(1); 946 break; 947 case Op_RegF: 948 case Op_RegI: 949 case Op_RegN: 950 case Op_RegFlags: 951 case 0: // not an ideal register 952 lrg.set_num_regs(1); 953 lrg.set_reg_pressure(1); 954 break; 955 case Op_VecA: 956 assert(Matcher::supports_scalable_vector(), "does not support scalable vector"); 957 assert(RegMask::num_registers(Op_VecA) == RegMask::SlotsPerVecA, "sanity"); 958 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecA), "vector should be aligned"); 959 lrg.set_num_regs(RegMask::SlotsPerVecA); 960 lrg.set_reg_pressure(1); 961 break; 962 case Op_VecS: 963 assert(Matcher::vector_size_supported(T_BYTE,4), "sanity"); 964 assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity"); 965 lrg.set_num_regs(RegMask::SlotsPerVecS); 966 lrg.set_reg_pressure(1); 967 break; 968 case Op_VecD: 969 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity"); 970 assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity"); 971 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned"); 972 lrg.set_num_regs(RegMask::SlotsPerVecD); 973 lrg.set_reg_pressure(1); 974 break; 975 case Op_VecX: 976 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity"); 977 assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity"); 978 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned"); 979 lrg.set_num_regs(RegMask::SlotsPerVecX); 980 lrg.set_reg_pressure(1); 981 break; 982 case Op_VecY: 983 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity"); 984 assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity"); 985 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned"); 986 lrg.set_num_regs(RegMask::SlotsPerVecY); 987 lrg.set_reg_pressure(1); 988 break; 989 case Op_VecZ: 990 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecZ), "sanity"); 991 assert(RegMask::num_registers(Op_VecZ) == RegMask::SlotsPerVecZ, "sanity"); 992 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecZ), "vector should be aligned"); 993 lrg.set_num_regs(RegMask::SlotsPerVecZ); 994 lrg.set_reg_pressure(1); 995 break; 996 default: 997 ShouldNotReachHere(); 998 } 999 } 1000 1001 // Now do the same for inputs 1002 uint cnt = n->req(); 1003 // Setup for CISC SPILLING 1004 uint inp = (uint)AdlcVMDeps::Not_cisc_spillable; 1005 if( UseCISCSpill && after_aggressive ) { 1006 inp = n->cisc_operand(); 1007 if( inp != (uint)AdlcVMDeps::Not_cisc_spillable ) 1008 // Convert operand number to edge index number 1009 inp = n->as_Mach()->operand_index(inp); 1010 } 1011 1012 // Prepare register mask for each input 1013 for( uint k = input_edge_start; k < cnt; k++ ) { 1014 uint vreg = _lrg_map.live_range_id(n->in(k)); 1015 if (!vreg) { 1016 continue; 1017 } 1018 1019 // If this instruction is CISC Spillable, add the flags 1020 // bit to its appropriate input 1021 if( UseCISCSpill && after_aggressive && inp == k ) { 1022 #ifndef PRODUCT 1023 if( TraceCISCSpill ) { 1024 tty->print(" use_cisc_RegMask: "); 1025 n->dump(); 1026 } 1027 #endif 1028 n->as_Mach()->use_cisc_RegMask(); 1029 } 1030 1031 if (is_machine_node && _scheduling_info_generated) { 1032 MachNode* cur_node = n->as_Mach(); 1033 // this is cleaned up by register allocation 1034 if (k >= cur_node->num_opnds()) continue; 1035 } 1036 1037 LRG &lrg = lrgs(vreg); 1038 // // Testing for floating point code shape 1039 // Node *test = n->in(k); 1040 // if( test->is_Mach() ) { 1041 // MachNode *m = test->as_Mach(); 1042 // int op = m->ideal_Opcode(); 1043 // if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) { 1044 // int zzz = 1; 1045 // } 1046 // } 1047 1048 // Limit result register mask to acceptable registers. 1049 // Do not limit registers from uncommon uses before 1050 // AggressiveCoalesce. This effectively pre-virtual-splits 1051 // around uncommon uses of common defs. 1052 const RegMask &rm = n->in_RegMask(k); 1053 if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) { 1054 // Since we are BEFORE aggressive coalesce, leave the register 1055 // mask untrimmed by the call. This encourages more coalescing. 1056 // Later, AFTER aggressive, this live range will have to spill 1057 // but the spiller handles slow-path calls very nicely. 1058 } else { 1059 lrg.AND( rm ); 1060 } 1061 1062 // Check for bound register masks 1063 const RegMask &lrgmask = lrg.mask(); 1064 uint kreg = n->in(k)->ideal_reg(); 1065 bool is_vect = RegMask::is_vector(kreg); 1066 assert(n->in(k)->bottom_type()->isa_vect() == NULL || is_vect || 1067 kreg == Op_RegD || kreg == Op_RegL || kreg == Op_RegVectMask, 1068 "vector must be in vector registers"); 1069 if (lrgmask.is_bound(kreg)) 1070 lrg._is_bound = 1; 1071 1072 // If this use of a double forces a mis-aligned double, 1073 // flag as '_fat_proj' - really flag as allowing misalignment 1074 // AND changes how we count interferences. A mis-aligned 1075 // double can interfere with TWO aligned pairs, or effectively 1076 // FOUR registers! 1077 #ifdef ASSERT 1078 if (is_vect && !_scheduling_info_generated) { 1079 if (lrg.num_regs() != 0) { 1080 assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned"); 1081 assert(!lrg._fat_proj, "sanity"); 1082 assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity"); 1083 } else { 1084 assert(n->is_Phi(), "not all inputs processed only if Phi"); 1085 } 1086 } 1087 #endif 1088 if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) { 1089 lrg._fat_proj = 1; 1090 lrg._is_bound = 1; 1091 } 1092 // if the LRG is an unaligned pair, we will have to spill 1093 // so clear the LRG's register mask if it is not already spilled 1094 if (!is_vect && !n->is_SpillCopy() && 1095 (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) && 1096 lrgmask.is_misaligned_pair()) { 1097 lrg.Clear(); 1098 } 1099 1100 // Check for maximum frequency value 1101 if (lrg._maxfreq < block->_freq) { 1102 lrg._maxfreq = block->_freq; 1103 } 1104 1105 } // End for all allocated inputs 1106 } // end for all instructions 1107 } // end for all blocks 1108 1109 // Final per-liverange setup 1110 for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) { 1111 LRG &lrg = lrgs(i2); 1112 assert(!lrg._is_vector || !lrg._fat_proj, "sanity"); 1113 if (lrg.num_regs() > 1 && !lrg._fat_proj) { 1114 lrg.clear_to_sets(); 1115 } 1116 lrg.compute_set_mask_size(); 1117 if (lrg.not_free()) { // Handle case where we lose from the start 1118 lrg.set_reg(OptoReg::Name(LRG::SPILL_REG)); 1119 lrg._direct_conflict = 1; 1120 } 1121 lrg.set_degree(0); // no neighbors in IFG yet 1122 } 1123 } 1124 1125 // Set the was-lo-degree bit. Conservative coalescing should not change the 1126 // colorability of the graph. If any live range was of low-degree before 1127 // coalescing, it should Simplify. This call sets the was-lo-degree bit. 1128 // The bit is checked in Simplify. 1129 void PhaseChaitin::set_was_low() { 1130 #ifdef ASSERT 1131 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { 1132 int size = lrgs(i).num_regs(); 1133 uint old_was_lo = lrgs(i)._was_lo; 1134 lrgs(i)._was_lo = 0; 1135 if( lrgs(i).lo_degree() ) { 1136 lrgs(i)._was_lo = 1; // Trivially of low degree 1137 } else { // Else check the Brigg's assertion 1138 // Brigg's observation is that the lo-degree neighbors of a 1139 // hi-degree live range will not interfere with the color choices 1140 // of said hi-degree live range. The Simplify reverse-stack-coloring 1141 // order takes care of the details. Hence you do not have to count 1142 // low-degree neighbors when determining if this guy colors. 1143 int briggs_degree = 0; 1144 IndexSet *s = _ifg->neighbors(i); 1145 IndexSetIterator elements(s); 1146 uint lidx; 1147 while((lidx = elements.next()) != 0) { 1148 if( !lrgs(lidx).lo_degree() ) 1149 briggs_degree += MAX2(size,lrgs(lidx).num_regs()); 1150 } 1151 if( briggs_degree < lrgs(i).degrees_of_freedom() ) 1152 lrgs(i)._was_lo = 1; // Low degree via the briggs assertion 1153 } 1154 assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease"); 1155 } 1156 #endif 1157 } 1158 1159 // Compute cost/area ratio, in case we spill. Build the lo-degree list. 1160 void PhaseChaitin::cache_lrg_info( ) { 1161 Compile::TracePhase tp("chaitinCacheLRG", &timers[_t_chaitinCacheLRG]); 1162 1163 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { 1164 LRG &lrg = lrgs(i); 1165 1166 // Check for being of low degree: means we can be trivially colored. 1167 // Low degree, dead or must-spill guys just get to simplify right away 1168 if( lrg.lo_degree() || 1169 !lrg.alive() || 1170 lrg._must_spill ) { 1171 // Split low degree list into those guys that must get a 1172 // register and those that can go to register or stack. 1173 // The idea is LRGs that can go register or stack color first when 1174 // they have a good chance of getting a register. The register-only 1175 // lo-degree live ranges always get a register. 1176 OptoReg::Name hi_reg = lrg.mask().find_last_elem(); 1177 if( OptoReg::is_stack(hi_reg)) { // Can go to stack? 1178 lrg._next = _lo_stk_degree; 1179 _lo_stk_degree = i; 1180 } else { 1181 lrg._next = _lo_degree; 1182 _lo_degree = i; 1183 } 1184 } else { // Else high degree 1185 lrgs(_hi_degree)._prev = i; 1186 lrg._next = _hi_degree; 1187 lrg._prev = 0; 1188 _hi_degree = i; 1189 } 1190 } 1191 } 1192 1193 // Simplify the IFG by removing LRGs of low degree. 1194 void PhaseChaitin::Simplify( ) { 1195 Compile::TracePhase tp("chaitinSimplify", &timers[_t_chaitinSimplify]); 1196 1197 while( 1 ) { // Repeat till simplified it all 1198 // May want to explore simplifying lo_degree before _lo_stk_degree. 1199 // This might result in more spills coloring into registers during 1200 // Select(). 1201 while( _lo_degree || _lo_stk_degree ) { 1202 // If possible, pull from lo_stk first 1203 uint lo; 1204 if( _lo_degree ) { 1205 lo = _lo_degree; 1206 _lo_degree = lrgs(lo)._next; 1207 } else { 1208 lo = _lo_stk_degree; 1209 _lo_stk_degree = lrgs(lo)._next; 1210 } 1211 1212 // Put the simplified guy on the simplified list. 1213 lrgs(lo)._next = _simplified; 1214 _simplified = lo; 1215 // If this guy is "at risk" then mark his current neighbors 1216 if (lrgs(lo)._at_risk && !_ifg->neighbors(lo)->is_empty()) { 1217 IndexSetIterator elements(_ifg->neighbors(lo)); 1218 uint datum; 1219 while ((datum = elements.next()) != 0) { 1220 lrgs(datum)._risk_bias = lo; 1221 } 1222 } 1223 1224 // Yank this guy from the IFG. 1225 IndexSet *adj = _ifg->remove_node(lo); 1226 if (adj->is_empty()) { 1227 continue; 1228 } 1229 1230 // If any neighbors' degrees fall below their number of 1231 // allowed registers, then put that neighbor on the low degree 1232 // list. Note that 'degree' can only fall and 'numregs' is 1233 // unchanged by this action. Thus the two are equal at most once, 1234 // so LRGs hit the lo-degree worklist at most once. 1235 IndexSetIterator elements(adj); 1236 uint neighbor; 1237 while ((neighbor = elements.next()) != 0) { 1238 LRG *n = &lrgs(neighbor); 1239 #ifdef ASSERT 1240 if (VerifyRegisterAllocator) { 1241 assert( _ifg->effective_degree(neighbor) == n->degree(), "" ); 1242 } 1243 #endif 1244 1245 // Check for just becoming of-low-degree just counting registers. 1246 // _must_spill live ranges are already on the low degree list. 1247 if (n->just_lo_degree() && !n->_must_spill) { 1248 assert(!_ifg->_yanked->test(neighbor), "Cannot move to lo degree twice"); 1249 // Pull from hi-degree list 1250 uint prev = n->_prev; 1251 uint next = n->_next; 1252 if (prev) { 1253 lrgs(prev)._next = next; 1254 } else { 1255 _hi_degree = next; 1256 } 1257 lrgs(next)._prev = prev; 1258 n->_next = _lo_degree; 1259 _lo_degree = neighbor; 1260 } 1261 } 1262 } // End of while lo-degree/lo_stk_degree worklist not empty 1263 1264 // Check for got everything: is hi-degree list empty? 1265 if (!_hi_degree) break; 1266 1267 // Time to pick a potential spill guy 1268 uint lo_score = _hi_degree; 1269 double score = lrgs(lo_score).score(); 1270 double area = lrgs(lo_score)._area; 1271 double cost = lrgs(lo_score)._cost; 1272 bool bound = lrgs(lo_score)._is_bound; 1273 1274 // Find cheapest guy 1275 debug_only( int lo_no_simplify=0; ); 1276 for (uint i = _hi_degree; i; i = lrgs(i)._next) { 1277 assert(!_ifg->_yanked->test(i), ""); 1278 // It's just vaguely possible to move hi-degree to lo-degree without 1279 // going through a just-lo-degree stage: If you remove a double from 1280 // a float live range it's degree will drop by 2 and you can skip the 1281 // just-lo-degree stage. It's very rare (shows up after 5000+ methods 1282 // in -Xcomp of Java2Demo). So just choose this guy to simplify next. 1283 if( lrgs(i).lo_degree() ) { 1284 lo_score = i; 1285 break; 1286 } 1287 debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; ); 1288 double iscore = lrgs(i).score(); 1289 double iarea = lrgs(i)._area; 1290 double icost = lrgs(i)._cost; 1291 bool ibound = lrgs(i)._is_bound; 1292 1293 // Compare cost/area of i vs cost/area of lo_score. Smaller cost/area 1294 // wins. Ties happen because all live ranges in question have spilled 1295 // a few times before and the spill-score adds a huge number which 1296 // washes out the low order bits. We are choosing the lesser of 2 1297 // evils; in this case pick largest area to spill. 1298 // Ties also happen when live ranges are defined and used only inside 1299 // one block. In which case their area is 0 and score set to max. 1300 // In such case choose bound live range over unbound to free registers 1301 // or with smaller cost to spill. 1302 if ( iscore < score || 1303 (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) || 1304 (iscore == score && iarea == area && 1305 ( (ibound && !bound) || (ibound == bound && (icost < cost)) )) ) { 1306 lo_score = i; 1307 score = iscore; 1308 area = iarea; 1309 cost = icost; 1310 bound = ibound; 1311 } 1312 } 1313 LRG *lo_lrg = &lrgs(lo_score); 1314 // The live range we choose for spilling is either hi-degree, or very 1315 // rarely it can be low-degree. If we choose a hi-degree live range 1316 // there better not be any lo-degree choices. 1317 assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" ); 1318 1319 // Pull from hi-degree list 1320 uint prev = lo_lrg->_prev; 1321 uint next = lo_lrg->_next; 1322 if( prev ) lrgs(prev)._next = next; 1323 else _hi_degree = next; 1324 lrgs(next)._prev = prev; 1325 // Jam him on the lo-degree list, despite his high degree. 1326 // Maybe he'll get a color, and maybe he'll spill. 1327 // Only Select() will know. 1328 lrgs(lo_score)._at_risk = true; 1329 _lo_degree = lo_score; 1330 lo_lrg->_next = 0; 1331 1332 } // End of while not simplified everything 1333 1334 } 1335 1336 // Is 'reg' register legal for 'lrg'? 1337 static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) { 1338 if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) && 1339 lrg.mask().Member(OptoReg::add(reg,-chunk))) { 1340 // RA uses OptoReg which represent the highest element of a registers set. 1341 // For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set 1342 // in which XMMd is used by RA to represent such vectors. A double value 1343 // uses [XMM,XMMb] pairs and XMMb is used by RA for it. 1344 // The register mask uses largest bits set of overlapping register sets. 1345 // On x86 with AVX it uses 8 bits for each XMM registers set. 1346 // 1347 // The 'lrg' already has cleared-to-set register mask (done in Select() 1348 // before calling choose_color()). Passing mask.Member(reg) check above 1349 // indicates that the size (num_regs) of 'reg' set is less or equal to 1350 // 'lrg' set size. 1351 // For set size 1 any register which is member of 'lrg' mask is legal. 1352 if (lrg.num_regs()==1) 1353 return true; 1354 // For larger sets only an aligned register with the same set size is legal. 1355 int mask = lrg.num_regs()-1; 1356 if ((reg&mask) == mask) 1357 return true; 1358 } 1359 return false; 1360 } 1361 1362 static OptoReg::Name find_first_set(LRG &lrg, RegMask mask, int chunk) { 1363 int num_regs = lrg.num_regs(); 1364 OptoReg::Name assigned = mask.find_first_set(lrg, num_regs); 1365 1366 if (lrg.is_scalable()) { 1367 // a physical register is found 1368 if (chunk == 0 && OptoReg::is_reg(assigned)) { 1369 return assigned; 1370 } 1371 1372 // find available stack slots for scalable register 1373 if (lrg._is_vector) { 1374 num_regs = lrg.scalable_reg_slots(); 1375 // if actual scalable vector register is exactly SlotsPerVecA * 32 bits 1376 if (num_regs == RegMask::SlotsPerVecA) { 1377 return assigned; 1378 } 1379 1380 // mask has been cleared out by clear_to_sets(SlotsPerVecA) before choose_color, but it 1381 // does not work for scalable size. We have to find adjacent scalable_reg_slots() bits 1382 // instead of SlotsPerVecA bits. 1383 assigned = mask.find_first_set(lrg, num_regs); // find highest valid reg 1384 while (OptoReg::is_valid(assigned) && RegMask::can_represent(assigned)) { 1385 // Verify the found reg has scalable_reg_slots() bits set. 1386 if (mask.is_valid_reg(assigned, num_regs)) { 1387 return assigned; 1388 } else { 1389 // Remove more for each iteration 1390 mask.Remove(assigned - num_regs + 1); // Unmask the lowest reg 1391 mask.clear_to_sets(RegMask::SlotsPerVecA); // Align by SlotsPerVecA bits 1392 assigned = mask.find_first_set(lrg, num_regs); 1393 } 1394 } 1395 return OptoReg::Bad; // will cause chunk change, and retry next chunk 1396 } else if (lrg._is_predicate) { 1397 assert(num_regs == RegMask::SlotsPerRegVectMask, "scalable predicate register"); 1398 num_regs = lrg.scalable_reg_slots(); 1399 mask.clear_to_sets(num_regs); 1400 return mask.find_first_set(lrg, num_regs); 1401 } 1402 } 1403 1404 return assigned; 1405 } 1406 1407 // Choose a color using the biasing heuristic 1408 OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) { 1409 1410 // Check for "at_risk" LRG's 1411 uint risk_lrg = _lrg_map.find(lrg._risk_bias); 1412 if (risk_lrg != 0 && !_ifg->neighbors(risk_lrg)->is_empty()) { 1413 // Walk the colored neighbors of the "at_risk" candidate 1414 // Choose a color which is both legal and already taken by a neighbor 1415 // of the "at_risk" candidate in order to improve the chances of the 1416 // "at_risk" candidate of coloring 1417 IndexSetIterator elements(_ifg->neighbors(risk_lrg)); 1418 uint datum; 1419 while ((datum = elements.next()) != 0) { 1420 OptoReg::Name reg = lrgs(datum).reg(); 1421 // If this LRG's register is legal for us, choose it 1422 if (is_legal_reg(lrg, reg, chunk)) 1423 return reg; 1424 } 1425 } 1426 1427 uint copy_lrg = _lrg_map.find(lrg._copy_bias); 1428 if (copy_lrg != 0) { 1429 // If he has a color, 1430 if(!_ifg->_yanked->test(copy_lrg)) { 1431 OptoReg::Name reg = lrgs(copy_lrg).reg(); 1432 // And it is legal for you, 1433 if (is_legal_reg(lrg, reg, chunk)) 1434 return reg; 1435 } else if( chunk == 0 ) { 1436 // Choose a color which is legal for him 1437 RegMask tempmask = lrg.mask(); 1438 tempmask.AND(lrgs(copy_lrg).mask()); 1439 tempmask.clear_to_sets(lrg.num_regs()); 1440 OptoReg::Name reg = find_first_set(lrg, tempmask, chunk); 1441 if (OptoReg::is_valid(reg)) 1442 return reg; 1443 } 1444 } 1445 1446 // If no bias info exists, just go with the register selection ordering 1447 if (lrg._is_vector || lrg.num_regs() == 2 || lrg.is_scalable()) { 1448 // Find an aligned set 1449 return OptoReg::add(find_first_set(lrg, lrg.mask(), chunk), chunk); 1450 } 1451 1452 // CNC - Fun hack. Alternate 1st and 2nd selection. Enables post-allocate 1453 // copy removal to remove many more copies, by preventing a just-assigned 1454 // register from being repeatedly assigned. 1455 OptoReg::Name reg = lrg.mask().find_first_elem(); 1456 if( (++_alternate & 1) && OptoReg::is_valid(reg) ) { 1457 // This 'Remove; find; Insert' idiom is an expensive way to find the 1458 // SECOND element in the mask. 1459 lrg.Remove(reg); 1460 OptoReg::Name reg2 = lrg.mask().find_first_elem(); 1461 lrg.Insert(reg); 1462 if( OptoReg::is_reg(reg2)) 1463 reg = reg2; 1464 } 1465 return OptoReg::add( reg, chunk ); 1466 } 1467 1468 // Choose a color in the current chunk 1469 OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) { 1470 assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)"); 1471 assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)"); 1472 1473 if( lrg.num_regs() == 1 || // Common Case 1474 !lrg._fat_proj ) // Aligned+adjacent pairs ok 1475 // Use a heuristic to "bias" the color choice 1476 return bias_color(lrg, chunk); 1477 1478 assert(!lrg._is_vector, "should be not vector here" ); 1479 assert( lrg.num_regs() >= 2, "dead live ranges do not color" ); 1480 1481 // Fat-proj case or misaligned double argument. 1482 assert(lrg.compute_mask_size() == lrg.num_regs() || 1483 lrg.num_regs() == 2,"fat projs exactly color" ); 1484 assert( !chunk, "always color in 1st chunk" ); 1485 // Return the highest element in the set. 1486 return lrg.mask().find_last_elem(); 1487 } 1488 1489 // Select colors by re-inserting LRGs back into the IFG. LRGs are re-inserted 1490 // in reverse order of removal. As long as nothing of hi-degree was yanked, 1491 // everything going back is guaranteed a color. Select that color. If some 1492 // hi-degree LRG cannot get a color then we record that we must spill. 1493 uint PhaseChaitin::Select( ) { 1494 Compile::TracePhase tp("chaitinSelect", &timers[_t_chaitinSelect]); 1495 1496 uint spill_reg = LRG::SPILL_REG; 1497 _max_reg = OptoReg::Name(0); // Past max register used 1498 while( _simplified ) { 1499 // Pull next LRG from the simplified list - in reverse order of removal 1500 uint lidx = _simplified; 1501 LRG *lrg = &lrgs(lidx); 1502 _simplified = lrg->_next; 1503 1504 #ifndef PRODUCT 1505 if (trace_spilling()) { 1506 ttyLocker ttyl; 1507 tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(), 1508 lrg->degrees_of_freedom()); 1509 lrg->dump(); 1510 } 1511 #endif 1512 1513 // Re-insert into the IFG 1514 _ifg->re_insert(lidx); 1515 if( !lrg->alive() ) continue; 1516 // capture allstackedness flag before mask is hacked 1517 const int is_allstack = lrg->mask().is_AllStack(); 1518 1519 // Yeah, yeah, yeah, I know, I know. I can refactor this 1520 // to avoid the GOTO, although the refactored code will not 1521 // be much clearer. We arrive here IFF we have a stack-based 1522 // live range that cannot color in the current chunk, and it 1523 // has to move into the next free stack chunk. 1524 int chunk = 0; // Current chunk is first chunk 1525 retry_next_chunk: 1526 1527 // Remove neighbor colors 1528 IndexSet *s = _ifg->neighbors(lidx); 1529 debug_only(RegMask orig_mask = lrg->mask();) 1530 1531 if (!s->is_empty()) { 1532 IndexSetIterator elements(s); 1533 uint neighbor; 1534 while ((neighbor = elements.next()) != 0) { 1535 // Note that neighbor might be a spill_reg. In this case, exclusion 1536 // of its color will be a no-op, since the spill_reg chunk is in outer 1537 // space. Also, if neighbor is in a different chunk, this exclusion 1538 // will be a no-op. (Later on, if lrg runs out of possible colors in 1539 // its chunk, a new chunk of color may be tried, in which case 1540 // examination of neighbors is started again, at retry_next_chunk.) 1541 LRG &nlrg = lrgs(neighbor); 1542 OptoReg::Name nreg = nlrg.reg(); 1543 // Only subtract masks in the same chunk 1544 if (nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE) { 1545 #ifndef PRODUCT 1546 uint size = lrg->mask().Size(); 1547 RegMask rm = lrg->mask(); 1548 #endif 1549 lrg->SUBTRACT(nlrg.mask()); 1550 #ifndef PRODUCT 1551 if (trace_spilling() && lrg->mask().Size() != size) { 1552 ttyLocker ttyl; 1553 tty->print("L%d ", lidx); 1554 rm.dump(); 1555 tty->print(" intersected L%d ", neighbor); 1556 nlrg.mask().dump(); 1557 tty->print(" removed "); 1558 rm.SUBTRACT(lrg->mask()); 1559 rm.dump(); 1560 tty->print(" leaving "); 1561 lrg->mask().dump(); 1562 tty->cr(); 1563 } 1564 #endif 1565 } 1566 } 1567 } 1568 //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness"); 1569 // Aligned pairs need aligned masks 1570 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity"); 1571 if (lrg->num_regs() > 1 && !lrg->_fat_proj) { 1572 lrg->clear_to_sets(); 1573 } 1574 1575 // Check if a color is available and if so pick the color 1576 OptoReg::Name reg = choose_color( *lrg, chunk ); 1577 1578 //--------------- 1579 // If we fail to color and the AllStack flag is set, trigger 1580 // a chunk-rollover event 1581 if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) { 1582 // Bump register mask up to next stack chunk 1583 chunk += RegMask::CHUNK_SIZE; 1584 lrg->Set_All(); 1585 goto retry_next_chunk; 1586 } 1587 1588 //--------------- 1589 // Did we get a color? 1590 else if( OptoReg::is_valid(reg)) { 1591 #ifndef PRODUCT 1592 RegMask avail_rm = lrg->mask(); 1593 #endif 1594 1595 // Record selected register 1596 lrg->set_reg(reg); 1597 1598 if( reg >= _max_reg ) // Compute max register limit 1599 _max_reg = OptoReg::add(reg,1); 1600 // Fold reg back into normal space 1601 reg = OptoReg::add(reg,-chunk); 1602 1603 // If the live range is not bound, then we actually had some choices 1604 // to make. In this case, the mask has more bits in it than the colors 1605 // chosen. Restrict the mask to just what was picked. 1606 int n_regs = lrg->num_regs(); 1607 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity"); 1608 if (n_regs == 1 || !lrg->_fat_proj) { 1609 if (Matcher::supports_scalable_vector()) { 1610 assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecA, "sanity"); 1611 } else { 1612 assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecZ, "sanity"); 1613 } 1614 lrg->Clear(); // Clear the mask 1615 lrg->Insert(reg); // Set regmask to match selected reg 1616 // For vectors and pairs, also insert the low bit of the pair 1617 // We always choose the high bit, then mask the low bits by register size 1618 if (lrg->is_scalable() && OptoReg::is_stack(lrg->reg())) { // stack 1619 n_regs = lrg->scalable_reg_slots(); 1620 } 1621 for (int i = 1; i < n_regs; i++) { 1622 lrg->Insert(OptoReg::add(reg,-i)); 1623 } 1624 lrg->set_mask_size(n_regs); 1625 } else { // Else fatproj 1626 // mask must be equal to fatproj bits, by definition 1627 } 1628 #ifndef PRODUCT 1629 if (trace_spilling()) { 1630 ttyLocker ttyl; 1631 tty->print("L%d selected ", lidx); 1632 lrg->mask().dump(); 1633 tty->print(" from "); 1634 avail_rm.dump(); 1635 tty->cr(); 1636 } 1637 #endif 1638 // Note that reg is the highest-numbered register in the newly-bound mask. 1639 } // end color available case 1640 1641 //--------------- 1642 // Live range is live and no colors available 1643 else { 1644 assert( lrg->alive(), "" ); 1645 assert( !lrg->_fat_proj || lrg->is_multidef() || 1646 lrg->_def->outcnt() > 0, "fat_proj cannot spill"); 1647 assert( !orig_mask.is_AllStack(), "All Stack does not spill" ); 1648 1649 // Assign the special spillreg register 1650 lrg->set_reg(OptoReg::Name(spill_reg++)); 1651 // Do not empty the regmask; leave mask_size lying around 1652 // for use during Spilling 1653 #ifndef PRODUCT 1654 if( trace_spilling() ) { 1655 ttyLocker ttyl; 1656 tty->print("L%d spilling with neighbors: ", lidx); 1657 s->dump(); 1658 debug_only(tty->print(" original mask: ")); 1659 debug_only(orig_mask.dump()); 1660 dump_lrg(lidx); 1661 } 1662 #endif 1663 } // end spill case 1664 1665 } 1666 1667 return spill_reg-LRG::SPILL_REG; // Return number of spills 1668 } 1669 1670 // Set the 'spilled_once' or 'spilled_twice' flag on a node. 1671 void PhaseChaitin::set_was_spilled( Node *n ) { 1672 if( _spilled_once.test_set(n->_idx) ) 1673 _spilled_twice.set(n->_idx); 1674 } 1675 1676 // Convert Ideal spill instructions into proper FramePtr + offset Loads and 1677 // Stores. Use-def chains are NOT preserved, but Node->LRG->reg maps are. 1678 void PhaseChaitin::fixup_spills() { 1679 // This function does only cisc spill work. 1680 if( !UseCISCSpill ) return; 1681 1682 Compile::TracePhase tp("fixupSpills", &timers[_t_fixupSpills]); 1683 1684 // Grab the Frame Pointer 1685 Node *fp = _cfg.get_root_block()->head()->in(1)->in(TypeFunc::FramePtr); 1686 1687 // For all blocks 1688 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 1689 Block* block = _cfg.get_block(i); 1690 1691 // For all instructions in block 1692 uint last_inst = block->end_idx(); 1693 for (uint j = 1; j <= last_inst; j++) { 1694 Node* n = block->get_node(j); 1695 1696 // Dead instruction??? 1697 assert( n->outcnt() != 0 ||// Nothing dead after post alloc 1698 C->top() == n || // Or the random TOP node 1699 n->is_Proj(), // Or a fat-proj kill node 1700 "No dead instructions after post-alloc" ); 1701 1702 int inp = n->cisc_operand(); 1703 if( inp != AdlcVMDeps::Not_cisc_spillable ) { 1704 // Convert operand number to edge index number 1705 MachNode *mach = n->as_Mach(); 1706 inp = mach->operand_index(inp); 1707 Node *src = n->in(inp); // Value to load or store 1708 LRG &lrg_cisc = lrgs(_lrg_map.find_const(src)); 1709 OptoReg::Name src_reg = lrg_cisc.reg(); 1710 // Doubles record the HIGH register of an adjacent pair. 1711 src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs()); 1712 if( OptoReg::is_stack(src_reg) ) { // If input is on stack 1713 // This is a CISC Spill, get stack offset and construct new node 1714 #ifndef PRODUCT 1715 if( TraceCISCSpill ) { 1716 tty->print(" reg-instr: "); 1717 n->dump(); 1718 } 1719 #endif 1720 int stk_offset = reg2offset(src_reg); 1721 // Bailout if we might exceed node limit when spilling this instruction 1722 C->check_node_count(0, "out of nodes fixing spills"); 1723 if (C->failing()) return; 1724 // Transform node 1725 MachNode *cisc = mach->cisc_version(stk_offset)->as_Mach(); 1726 cisc->set_req(inp,fp); // Base register is frame pointer 1727 if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) { 1728 assert( cisc->oper_input_base() == 2, "Only adding one edge"); 1729 cisc->ins_req(1,src); // Requires a memory edge 1730 } 1731 block->map_node(cisc, j); // Insert into basic block 1732 n->subsume_by(cisc, C); // Correct graph 1733 // 1734 ++_used_cisc_instructions; 1735 #ifndef PRODUCT 1736 if( TraceCISCSpill ) { 1737 tty->print(" cisc-instr: "); 1738 cisc->dump(); 1739 } 1740 #endif 1741 } else { 1742 #ifndef PRODUCT 1743 if( TraceCISCSpill ) { 1744 tty->print(" using reg-instr: "); 1745 n->dump(); 1746 } 1747 #endif 1748 ++_unused_cisc_instructions; // input can be on stack 1749 } 1750 } 1751 1752 } // End of for all instructions 1753 1754 } // End of for all blocks 1755 } 1756 1757 // Helper to stretch above; recursively discover the base Node for a 1758 // given derived Node. Easy for AddP-related machine nodes, but needs 1759 // to be recursive for derived Phis. 1760 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) { 1761 // See if already computed; if so return it 1762 if( derived_base_map[derived->_idx] ) 1763 return derived_base_map[derived->_idx]; 1764 1765 // See if this happens to be a base. 1766 // NOTE: we use TypePtr instead of TypeOopPtr because we can have 1767 // pointers derived from NULL! These are always along paths that 1768 // can't happen at run-time but the optimizer cannot deduce it so 1769 // we have to handle it gracefully. 1770 assert(!derived->bottom_type()->isa_narrowoop() || 1771 derived->bottom_type()->make_ptr()->is_ptr()->offset() == 0, "sanity"); 1772 const TypePtr *tj = derived->bottom_type()->isa_ptr(); 1773 // If its an OOP with a non-zero offset, then it is derived. 1774 if (tj == NULL || tj->offset() == 0) { 1775 derived_base_map[derived->_idx] = derived; 1776 return derived; 1777 } 1778 // Derived is NULL+offset? Base is NULL! 1779 if( derived->is_Con() ) { 1780 Node *base = _matcher.mach_null(); 1781 assert(base != NULL, "sanity"); 1782 if (base->in(0) == NULL) { 1783 // Initialize it once and make it shared: 1784 // set control to _root and place it into Start block 1785 // (where top() node is placed). 1786 base->init_req(0, _cfg.get_root_node()); 1787 Block *startb = _cfg.get_block_for_node(C->top()); 1788 uint node_pos = startb->find_node(C->top()); 1789 startb->insert_node(base, node_pos); 1790 _cfg.map_node_to_block(base, startb); 1791 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); 1792 1793 // The loadConP0 might have projection nodes depending on architecture 1794 // Add the projection nodes to the CFG 1795 for (DUIterator_Fast imax, i = base->fast_outs(imax); i < imax; i++) { 1796 Node* use = base->fast_out(i); 1797 if (use->is_MachProj()) { 1798 startb->insert_node(use, ++node_pos); 1799 _cfg.map_node_to_block(use, startb); 1800 new_lrg(use, maxlrg++); 1801 } 1802 } 1803 } 1804 if (_lrg_map.live_range_id(base) == 0) { 1805 new_lrg(base, maxlrg++); 1806 } 1807 assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared"); 1808 derived_base_map[derived->_idx] = base; 1809 return base; 1810 } 1811 1812 // Check for AddP-related opcodes 1813 if (!derived->is_Phi()) { 1814 assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, "but is: %s", derived->Name()); 1815 Node *base = derived->in(AddPNode::Base); 1816 derived_base_map[derived->_idx] = base; 1817 return base; 1818 } 1819 1820 // Recursively find bases for Phis. 1821 // First check to see if we can avoid a base Phi here. 1822 Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg); 1823 uint i; 1824 for( i = 2; i < derived->req(); i++ ) 1825 if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg)) 1826 break; 1827 // Went to the end without finding any different bases? 1828 if( i == derived->req() ) { // No need for a base Phi here 1829 derived_base_map[derived->_idx] = base; 1830 return base; 1831 } 1832 1833 // Now we see we need a base-Phi here to merge the bases 1834 const Type *t = base->bottom_type(); 1835 base = new PhiNode( derived->in(0), t ); 1836 for( i = 1; i < derived->req(); i++ ) { 1837 base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg)); 1838 t = t->meet(base->in(i)->bottom_type()); 1839 } 1840 base->as_Phi()->set_type(t); 1841 1842 // Search the current block for an existing base-Phi 1843 Block *b = _cfg.get_block_for_node(derived); 1844 for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi 1845 Node *phi = b->get_node(i); 1846 if( !phi->is_Phi() ) { // Found end of Phis with no match? 1847 b->insert_node(base, i); // Must insert created Phi here as base 1848 _cfg.map_node_to_block(base, b); 1849 new_lrg(base,maxlrg++); 1850 break; 1851 } 1852 // See if Phi matches. 1853 uint j; 1854 for( j = 1; j < base->req(); j++ ) 1855 if( phi->in(j) != base->in(j) && 1856 !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs 1857 break; 1858 if( j == base->req() ) { // All inputs match? 1859 base = phi; // Then use existing 'phi' and drop 'base' 1860 break; 1861 } 1862 } 1863 1864 1865 // Cache info for later passes 1866 derived_base_map[derived->_idx] = base; 1867 return base; 1868 } 1869 1870 // At each Safepoint, insert extra debug edges for each pair of derived value/ 1871 // base pointer that is live across the Safepoint for oopmap building. The 1872 // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the 1873 // required edge set. 1874 bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) { 1875 int must_recompute_live = false; 1876 uint maxlrg = _lrg_map.max_lrg_id(); 1877 Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique()); 1878 memset( derived_base_map, 0, sizeof(Node*)*C->unique() ); 1879 1880 // For all blocks in RPO do... 1881 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 1882 Block* block = _cfg.get_block(i); 1883 // Note use of deep-copy constructor. I cannot hammer the original 1884 // liveout bits, because they are needed by the following coalesce pass. 1885 IndexSet liveout(_live->live(block)); 1886 1887 for (uint j = block->end_idx() + 1; j > 1; j--) { 1888 Node* n = block->get_node(j - 1); 1889 1890 // Pre-split compares of loop-phis. Loop-phis form a cycle we would 1891 // like to see in the same register. Compare uses the loop-phi and so 1892 // extends its live range BUT cannot be part of the cycle. If this 1893 // extended live range overlaps with the update of the loop-phi value 1894 // we need both alive at the same time -- which requires at least 1 1895 // copy. But because Intel has only 2-address registers we end up with 1896 // at least 2 copies, one before the loop-phi update instruction and 1897 // one after. Instead we split the input to the compare just after the 1898 // phi. 1899 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) { 1900 Node *phi = n->in(1); 1901 if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) { 1902 Block *phi_block = _cfg.get_block_for_node(phi); 1903 if (_cfg.get_block_for_node(phi_block->pred(2)) == block) { 1904 const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI]; 1905 Node *spill = new MachSpillCopyNode(MachSpillCopyNode::LoopPhiInput, phi, *mask, *mask); 1906 insert_proj( phi_block, 1, spill, maxlrg++ ); 1907 n->set_req(1,spill); 1908 must_recompute_live = true; 1909 } 1910 } 1911 } 1912 1913 // Get value being defined 1914 uint lidx = _lrg_map.live_range_id(n); 1915 // Ignore the occasional brand-new live range 1916 if (lidx && lidx < _lrg_map.max_lrg_id()) { 1917 // Remove from live-out set 1918 liveout.remove(lidx); 1919 1920 // Copies do not define a new value and so do not interfere. 1921 // Remove the copies source from the liveout set before interfering. 1922 uint idx = n->is_Copy(); 1923 if (idx) { 1924 liveout.remove(_lrg_map.live_range_id(n->in(idx))); 1925 } 1926 } 1927 1928 // Found a safepoint? 1929 JVMState *jvms = n->jvms(); 1930 if (jvms && !liveout.is_empty()) { 1931 // Now scan for a live derived pointer 1932 IndexSetIterator elements(&liveout); 1933 uint neighbor; 1934 while ((neighbor = elements.next()) != 0) { 1935 // Find reaching DEF for base and derived values 1936 // This works because we are still in SSA during this call. 1937 Node *derived = lrgs(neighbor)._def; 1938 const TypePtr *tj = derived->bottom_type()->isa_ptr(); 1939 assert(!derived->bottom_type()->isa_narrowoop() || 1940 derived->bottom_type()->make_ptr()->is_ptr()->offset() == 0, "sanity"); 1941 // If its an OOP with a non-zero offset, then it is derived. 1942 if (tj && tj->offset() != 0 && tj->isa_oop_ptr()) { 1943 Node *base = find_base_for_derived(derived_base_map, derived, maxlrg); 1944 assert(base->_idx < _lrg_map.size(), ""); 1945 // Add reaching DEFs of derived pointer and base pointer as a 1946 // pair of inputs 1947 n->add_req(derived); 1948 n->add_req(base); 1949 1950 // See if the base pointer is already live to this point. 1951 // Since I'm working on the SSA form, live-ness amounts to 1952 // reaching def's. So if I find the base's live range then 1953 // I know the base's def reaches here. 1954 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or 1955 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND 1956 (_lrg_map.live_range_id(base) > 0) && // not a constant 1957 _cfg.get_block_for_node(base) != block) { // base not def'd in blk) 1958 // Base pointer is not currently live. Since I stretched 1959 // the base pointer to here and it crosses basic-block 1960 // boundaries, the global live info is now incorrect. 1961 // Recompute live. 1962 must_recompute_live = true; 1963 } // End of if base pointer is not live to debug info 1964 } 1965 } // End of scan all live data for derived ptrs crossing GC point 1966 } // End of if found a GC point 1967 1968 // Make all inputs live 1969 if (!n->is_Phi()) { // Phi function uses come from prior block 1970 for (uint k = 1; k < n->req(); k++) { 1971 uint lidx = _lrg_map.live_range_id(n->in(k)); 1972 if (lidx < _lrg_map.max_lrg_id()) { 1973 liveout.insert(lidx); 1974 } 1975 } 1976 } 1977 1978 } // End of forall instructions in block 1979 liveout.clear(); // Free the memory used by liveout. 1980 1981 } // End of forall blocks 1982 _lrg_map.set_max_lrg_id(maxlrg); 1983 1984 // If I created a new live range I need to recompute live 1985 if (maxlrg != _ifg->_maxlrg) { 1986 must_recompute_live = true; 1987 } 1988 1989 return must_recompute_live != 0; 1990 } 1991 1992 // Extend the node to LRG mapping 1993 1994 void PhaseChaitin::add_reference(const Node *node, const Node *old_node) { 1995 _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node)); 1996 } 1997 1998 #ifndef PRODUCT 1999 void PhaseChaitin::dump(const Node* n) const { 2000 uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0; 2001 tty->print("L%d",r); 2002 if (r && n->Opcode() != Op_Phi) { 2003 if( _node_regs ) { // Got a post-allocation copy of allocation? 2004 tty->print("["); 2005 OptoReg::Name second = get_reg_second(n); 2006 if( OptoReg::is_valid(second) ) { 2007 if( OptoReg::is_reg(second) ) 2008 tty->print("%s:",Matcher::regName[second]); 2009 else 2010 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second)); 2011 } 2012 OptoReg::Name first = get_reg_first(n); 2013 if( OptoReg::is_reg(first) ) 2014 tty->print("%s]",Matcher::regName[first]); 2015 else 2016 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first)); 2017 } else 2018 n->out_RegMask().dump(); 2019 } 2020 tty->print("/N%d\t",n->_idx); 2021 tty->print("%s === ", n->Name()); 2022 uint k; 2023 for (k = 0; k < n->req(); k++) { 2024 Node *m = n->in(k); 2025 if (!m) { 2026 tty->print("_ "); 2027 } 2028 else { 2029 uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; 2030 tty->print("L%d",r); 2031 // Data MultiNode's can have projections with no real registers. 2032 // Don't die while dumping them. 2033 int op = n->Opcode(); 2034 if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) { 2035 if( _node_regs ) { 2036 tty->print("["); 2037 OptoReg::Name second = get_reg_second(n->in(k)); 2038 if( OptoReg::is_valid(second) ) { 2039 if( OptoReg::is_reg(second) ) 2040 tty->print("%s:",Matcher::regName[second]); 2041 else 2042 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), 2043 reg2offset_unchecked(second)); 2044 } 2045 OptoReg::Name first = get_reg_first(n->in(k)); 2046 if( OptoReg::is_reg(first) ) 2047 tty->print("%s]",Matcher::regName[first]); 2048 else 2049 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), 2050 reg2offset_unchecked(first)); 2051 } else 2052 n->in_RegMask(k).dump(); 2053 } 2054 tty->print("/N%d ",m->_idx); 2055 } 2056 } 2057 if( k < n->len() && n->in(k) ) tty->print("| "); 2058 for( ; k < n->len(); k++ ) { 2059 Node *m = n->in(k); 2060 if(!m) { 2061 break; 2062 } 2063 uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; 2064 tty->print("L%d",r); 2065 tty->print("/N%d ",m->_idx); 2066 } 2067 if( n->is_Mach() ) n->as_Mach()->dump_spec(tty); 2068 else n->dump_spec(tty); 2069 if( _spilled_once.test(n->_idx ) ) { 2070 tty->print(" Spill_1"); 2071 if( _spilled_twice.test(n->_idx ) ) 2072 tty->print(" Spill_2"); 2073 } 2074 tty->print("\n"); 2075 } 2076 2077 void PhaseChaitin::dump(const Block* b) const { 2078 b->dump_head(&_cfg); 2079 2080 // For all instructions 2081 for( uint j = 0; j < b->number_of_nodes(); j++ ) 2082 dump(b->get_node(j)); 2083 // Print live-out info at end of block 2084 if( _live ) { 2085 tty->print("Liveout: "); 2086 IndexSet *live = _live->live(b); 2087 IndexSetIterator elements(live); 2088 tty->print("{"); 2089 uint i; 2090 while ((i = elements.next()) != 0) { 2091 tty->print("L%d ", _lrg_map.find_const(i)); 2092 } 2093 tty->print_cr("}"); 2094 } 2095 tty->print("\n"); 2096 } 2097 2098 void PhaseChaitin::dump() const { 2099 tty->print( "--- Chaitin -- argsize: %d framesize: %d ---\n", 2100 _matcher._new_SP, _framesize ); 2101 2102 // For all blocks 2103 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 2104 dump(_cfg.get_block(i)); 2105 } 2106 // End of per-block dump 2107 tty->print("\n"); 2108 2109 if (!_ifg) { 2110 tty->print("(No IFG.)\n"); 2111 return; 2112 } 2113 2114 // Dump LRG array 2115 tty->print("--- Live RanGe Array ---\n"); 2116 for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) { 2117 tty->print("L%d: ",i2); 2118 if (i2 < _ifg->_maxlrg) { 2119 lrgs(i2).dump(); 2120 } 2121 else { 2122 tty->print_cr("new LRG"); 2123 } 2124 } 2125 tty->cr(); 2126 2127 // Dump lo-degree list 2128 tty->print("Lo degree: "); 2129 for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next ) 2130 tty->print("L%d ",i3); 2131 tty->cr(); 2132 2133 // Dump lo-stk-degree list 2134 tty->print("Lo stk degree: "); 2135 for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next ) 2136 tty->print("L%d ",i4); 2137 tty->cr(); 2138 2139 // Dump lo-degree list 2140 tty->print("Hi degree: "); 2141 for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next ) 2142 tty->print("L%d ",i5); 2143 tty->cr(); 2144 } 2145 2146 void PhaseChaitin::dump_degree_lists() const { 2147 // Dump lo-degree list 2148 tty->print("Lo degree: "); 2149 for( uint i = _lo_degree; i; i = lrgs(i)._next ) 2150 tty->print("L%d ",i); 2151 tty->cr(); 2152 2153 // Dump lo-stk-degree list 2154 tty->print("Lo stk degree: "); 2155 for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next ) 2156 tty->print("L%d ",i2); 2157 tty->cr(); 2158 2159 // Dump lo-degree list 2160 tty->print("Hi degree: "); 2161 for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next ) 2162 tty->print("L%d ",i3); 2163 tty->cr(); 2164 } 2165 2166 void PhaseChaitin::dump_simplified() const { 2167 tty->print("Simplified: "); 2168 for( uint i = _simplified; i; i = lrgs(i)._next ) 2169 tty->print("L%d ",i); 2170 tty->cr(); 2171 } 2172 2173 static char *print_reg(OptoReg::Name reg, const PhaseChaitin* pc, char* buf) { 2174 if ((int)reg < 0) 2175 sprintf(buf, "<OptoReg::%d>", (int)reg); 2176 else if (OptoReg::is_reg(reg)) 2177 strcpy(buf, Matcher::regName[reg]); 2178 else 2179 sprintf(buf,"%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer), 2180 pc->reg2offset(reg)); 2181 return buf+strlen(buf); 2182 } 2183 2184 // Dump a register name into a buffer. Be intelligent if we get called 2185 // before allocation is complete. 2186 char *PhaseChaitin::dump_register(const Node* n, char* buf) const { 2187 if( _node_regs ) { 2188 // Post allocation, use direct mappings, no LRG info available 2189 print_reg( get_reg_first(n), this, buf ); 2190 } else { 2191 uint lidx = _lrg_map.find_const(n); // Grab LRG number 2192 if( !_ifg ) { 2193 sprintf(buf,"L%d",lidx); // No register binding yet 2194 } else if( !lidx ) { // Special, not allocated value 2195 strcpy(buf,"Special"); 2196 } else { 2197 if (lrgs(lidx)._is_vector) { 2198 if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs())) 2199 print_reg( lrgs(lidx).reg(), this, buf ); // a bound machine register 2200 else 2201 sprintf(buf,"L%d",lidx); // No register binding yet 2202 } else if( (lrgs(lidx).num_regs() == 1) 2203 ? lrgs(lidx).mask().is_bound1() 2204 : lrgs(lidx).mask().is_bound_pair() ) { 2205 // Hah! We have a bound machine register 2206 print_reg( lrgs(lidx).reg(), this, buf ); 2207 } else { 2208 sprintf(buf,"L%d",lidx); // No register binding yet 2209 } 2210 } 2211 } 2212 return buf+strlen(buf); 2213 } 2214 2215 void PhaseChaitin::dump_for_spill_split_recycle() const { 2216 if( WizardMode && (PrintCompilation || PrintOpto) ) { 2217 // Display which live ranges need to be split and the allocator's state 2218 tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt); 2219 for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) { 2220 if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) { 2221 tty->print("L%d: ", bidx); 2222 lrgs(bidx).dump(); 2223 } 2224 } 2225 tty->cr(); 2226 dump(); 2227 } 2228 } 2229 2230 void PhaseChaitin::dump_frame() const { 2231 const char *fp = OptoReg::regname(OptoReg::c_frame_pointer); 2232 const TypeTuple *domain = C->tf()->domain_cc(); 2233 const int argcnt = domain->cnt() - TypeFunc::Parms; 2234 2235 // Incoming arguments in registers dump 2236 for( int k = 0; k < argcnt; k++ ) { 2237 OptoReg::Name parmreg = _matcher._parm_regs[k].first(); 2238 if( OptoReg::is_reg(parmreg)) { 2239 const char *reg_name = OptoReg::regname(parmreg); 2240 tty->print("#r%3.3d %s", parmreg, reg_name); 2241 parmreg = _matcher._parm_regs[k].second(); 2242 if( OptoReg::is_reg(parmreg)) { 2243 tty->print(":%s", OptoReg::regname(parmreg)); 2244 } 2245 tty->print(" : parm %d: ", k); 2246 domain->field_at(k + TypeFunc::Parms)->dump(); 2247 tty->cr(); 2248 } 2249 } 2250 2251 // Check for un-owned padding above incoming args 2252 OptoReg::Name reg = _matcher._new_SP; 2253 if( reg > _matcher._in_arg_limit ) { 2254 reg = OptoReg::add(reg, -1); 2255 tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg)); 2256 } 2257 2258 // Incoming argument area dump 2259 OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots()); 2260 while( reg > begin_in_arg ) { 2261 reg = OptoReg::add(reg, -1); 2262 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg)); 2263 int j; 2264 for( j = 0; j < argcnt; j++) { 2265 if( _matcher._parm_regs[j].first() == reg || 2266 _matcher._parm_regs[j].second() == reg ) { 2267 tty->print("parm %d: ",j); 2268 domain->field_at(j + TypeFunc::Parms)->dump(); 2269 tty->cr(); 2270 break; 2271 } 2272 } 2273 if( j >= argcnt ) 2274 tty->print_cr("HOLE, owned by SELF"); 2275 } 2276 2277 // Old outgoing preserve area 2278 while( reg > _matcher._old_SP ) { 2279 reg = OptoReg::add(reg, -1); 2280 tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg)); 2281 } 2282 2283 // Old SP 2284 tty->print_cr("# -- Old %s -- Framesize: %d --",fp, 2285 reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize); 2286 2287 // Preserve area dump 2288 int fixed_slots = C->fixed_slots(); 2289 OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots()); 2290 OptoReg::Name return_addr = _matcher.return_addr(); 2291 2292 reg = OptoReg::add(reg, -1); 2293 while (OptoReg::is_stack(reg)) { 2294 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg)); 2295 if (return_addr == reg) { 2296 tty->print_cr("return address"); 2297 } else if (reg >= begin_in_preserve) { 2298 // Preserved slots are present on x86 2299 if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word)) 2300 tty->print_cr("saved fp register"); 2301 else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) && 2302 VerifyStackAtCalls) 2303 tty->print_cr("0xBADB100D +VerifyStackAtCalls"); 2304 else 2305 tty->print_cr("in_preserve"); 2306 } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) { 2307 tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg)); 2308 } else { 2309 tty->print_cr("pad2, stack alignment"); 2310 } 2311 reg = OptoReg::add(reg, -1); 2312 } 2313 2314 // Spill area dump 2315 reg = OptoReg::add(_matcher._new_SP, _framesize ); 2316 while( reg > _matcher._out_arg_limit ) { 2317 reg = OptoReg::add(reg, -1); 2318 tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg)); 2319 } 2320 2321 // Outgoing argument area dump 2322 while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) { 2323 reg = OptoReg::add(reg, -1); 2324 tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg)); 2325 } 2326 2327 // Outgoing new preserve area 2328 while( reg > _matcher._new_SP ) { 2329 reg = OptoReg::add(reg, -1); 2330 tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg)); 2331 } 2332 tty->print_cr("#"); 2333 } 2334 2335 void PhaseChaitin::dump_bb(uint pre_order) const { 2336 tty->print_cr("---dump of B%d---",pre_order); 2337 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 2338 Block* block = _cfg.get_block(i); 2339 if (block->_pre_order == pre_order) { 2340 dump(block); 2341 } 2342 } 2343 } 2344 2345 void PhaseChaitin::dump_lrg(uint lidx, bool defs_only) const { 2346 tty->print_cr("---dump of L%d---",lidx); 2347 2348 if (_ifg) { 2349 if (lidx >= _lrg_map.max_lrg_id()) { 2350 tty->print("Attempt to print live range index beyond max live range.\n"); 2351 return; 2352 } 2353 tty->print("L%d: ",lidx); 2354 if (lidx < _ifg->_maxlrg) { 2355 lrgs(lidx).dump(); 2356 } else { 2357 tty->print_cr("new LRG"); 2358 } 2359 } 2360 if( _ifg && lidx < _ifg->_maxlrg) { 2361 tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx)); 2362 _ifg->neighbors(lidx)->dump(); 2363 tty->cr(); 2364 } 2365 // For all blocks 2366 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 2367 Block* block = _cfg.get_block(i); 2368 int dump_once = 0; 2369 2370 // For all instructions 2371 for( uint j = 0; j < block->number_of_nodes(); j++ ) { 2372 Node *n = block->get_node(j); 2373 if (_lrg_map.find_const(n) == lidx) { 2374 if (!dump_once++) { 2375 tty->cr(); 2376 block->dump_head(&_cfg); 2377 } 2378 dump(n); 2379 continue; 2380 } 2381 if (!defs_only) { 2382 uint cnt = n->req(); 2383 for( uint k = 1; k < cnt; k++ ) { 2384 Node *m = n->in(k); 2385 if (!m) { 2386 continue; // be robust in the dumper 2387 } 2388 if (_lrg_map.find_const(m) == lidx) { 2389 if (!dump_once++) { 2390 tty->cr(); 2391 block->dump_head(&_cfg); 2392 } 2393 dump(n); 2394 } 2395 } 2396 } 2397 } 2398 } // End of per-block dump 2399 tty->cr(); 2400 } 2401 #endif // not PRODUCT 2402 2403 #ifdef ASSERT 2404 // Verify that base pointers and derived pointers are still sane. 2405 void PhaseChaitin::verify_base_ptrs(ResourceArea* a) const { 2406 Unique_Node_List worklist(a); 2407 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 2408 Block* block = _cfg.get_block(i); 2409 for (uint j = block->end_idx() + 1; j > 1; j--) { 2410 Node* n = block->get_node(j-1); 2411 if (n->is_Phi()) { 2412 break; 2413 } 2414 // Found a safepoint? 2415 if (n->is_MachSafePoint()) { 2416 MachSafePointNode* sfpt = n->as_MachSafePoint(); 2417 JVMState* jvms = sfpt->jvms(); 2418 if (jvms != NULL) { 2419 // Now scan for a live derived pointer 2420 if (jvms->oopoff() < sfpt->req()) { 2421 // Check each derived/base pair 2422 for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) { 2423 Node* check = sfpt->in(idx); 2424 bool is_derived = ((idx - jvms->oopoff()) & 1) == 0; 2425 // search upwards through spills and spill phis for AddP 2426 worklist.clear(); 2427 worklist.push(check); 2428 uint k = 0; 2429 while (k < worklist.size()) { 2430 check = worklist.at(k); 2431 assert(check, "Bad base or derived pointer"); 2432 // See PhaseChaitin::find_base_for_derived() for all cases. 2433 int isc = check->is_Copy(); 2434 if (isc) { 2435 worklist.push(check->in(isc)); 2436 } else if (check->is_Phi()) { 2437 for (uint m = 1; m < check->req(); m++) { 2438 worklist.push(check->in(m)); 2439 } 2440 } else if (check->is_Con()) { 2441 if (is_derived && check->bottom_type()->is_ptr()->offset() != 0) { 2442 // Derived is NULL+non-zero offset, base must be NULL. 2443 assert(check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad derived pointer"); 2444 } else { 2445 assert(check->bottom_type()->is_ptr()->offset() == 0, "Bad base pointer"); 2446 // Base either ConP(NULL) or loadConP 2447 if (check->is_Mach()) { 2448 assert(check->as_Mach()->ideal_Opcode() == Op_ConP, "Bad base pointer"); 2449 } else { 2450 assert(check->Opcode() == Op_ConP && 2451 check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad base pointer"); 2452 } 2453 } 2454 } else if (check->bottom_type()->is_ptr()->offset() == 0) { 2455 if (check->is_Proj() || (check->is_Mach() && 2456 (check->as_Mach()->ideal_Opcode() == Op_CreateEx || 2457 check->as_Mach()->ideal_Opcode() == Op_ThreadLocal || 2458 check->as_Mach()->ideal_Opcode() == Op_CMoveP || 2459 check->as_Mach()->ideal_Opcode() == Op_CheckCastPP || 2460 #ifdef _LP64 2461 (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP) || 2462 (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN) || 2463 (UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass) || 2464 #endif // _LP64 2465 check->as_Mach()->ideal_Opcode() == Op_LoadP || 2466 check->as_Mach()->ideal_Opcode() == Op_LoadKlass))) { 2467 // Valid nodes 2468 } else { 2469 check->dump(); 2470 assert(false, "Bad base or derived pointer"); 2471 } 2472 } else { 2473 assert(is_derived, "Bad base pointer"); 2474 assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP, "Bad derived pointer"); 2475 } 2476 k++; 2477 assert(k < 100000, "Derived pointer checking in infinite loop"); 2478 } // End while 2479 } 2480 } // End of check for derived pointers 2481 } // End of Kcheck for debug info 2482 } // End of if found a safepoint 2483 } // End of forall instructions in block 2484 } // End of forall blocks 2485 } 2486 2487 // Verify that graphs and base pointers are still sane. 2488 void PhaseChaitin::verify(ResourceArea* a, bool verify_ifg) const { 2489 if (VerifyRegisterAllocator) { 2490 _cfg.verify(); 2491 verify_base_ptrs(a); 2492 if (verify_ifg) { 2493 _ifg->verify(this); 2494 } 2495 } 2496 } 2497 #endif // ASSERT 2498 2499 int PhaseChaitin::_final_loads = 0; 2500 int PhaseChaitin::_final_stores = 0; 2501 int PhaseChaitin::_final_memoves= 0; 2502 int PhaseChaitin::_final_copies = 0; 2503 double PhaseChaitin::_final_load_cost = 0; 2504 double PhaseChaitin::_final_store_cost = 0; 2505 double PhaseChaitin::_final_memove_cost= 0; 2506 double PhaseChaitin::_final_copy_cost = 0; 2507 int PhaseChaitin::_conserv_coalesce = 0; 2508 int PhaseChaitin::_conserv_coalesce_pair = 0; 2509 int PhaseChaitin::_conserv_coalesce_trie = 0; 2510 int PhaseChaitin::_conserv_coalesce_quad = 0; 2511 int PhaseChaitin::_post_alloc = 0; 2512 int PhaseChaitin::_lost_opp_pp_coalesce = 0; 2513 int PhaseChaitin::_lost_opp_cflow_coalesce = 0; 2514 int PhaseChaitin::_used_cisc_instructions = 0; 2515 int PhaseChaitin::_unused_cisc_instructions = 0; 2516 int PhaseChaitin::_allocator_attempts = 0; 2517 int PhaseChaitin::_allocator_successes = 0; 2518 2519 #ifndef PRODUCT 2520 uint PhaseChaitin::_high_pressure = 0; 2521 uint PhaseChaitin::_low_pressure = 0; 2522 2523 void PhaseChaitin::print_chaitin_statistics() { 2524 tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies); 2525 tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost); 2526 tty->print_cr("Adjusted spill cost = %7.0f.", 2527 _final_load_cost*4.0 + _final_store_cost * 2.0 + 2528 _final_copy_cost*1.0 + _final_memove_cost*12.0); 2529 tty->print("Conservatively coalesced %d copies, %d pairs", 2530 _conserv_coalesce, _conserv_coalesce_pair); 2531 if( _conserv_coalesce_trie || _conserv_coalesce_quad ) 2532 tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad); 2533 tty->print_cr(", %d post alloc.", _post_alloc); 2534 if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce ) 2535 tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.", 2536 _lost_opp_pp_coalesce, _lost_opp_cflow_coalesce ); 2537 if( _used_cisc_instructions || _unused_cisc_instructions ) 2538 tty->print_cr("Used cisc instruction %d, remained in register %d", 2539 _used_cisc_instructions, _unused_cisc_instructions); 2540 if( _allocator_successes != 0 ) 2541 tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes); 2542 tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure); 2543 } 2544 #endif // not PRODUCT