1 /* 2 * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "compiler/compileLog.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/divnode.hpp" 31 #include "opto/matcher.hpp" 32 #include "opto/memnode.hpp" 33 #include "opto/mulnode.hpp" 34 #include "opto/opcodes.hpp" 35 #include "opto/superword.hpp" 36 #include "opto/vectornode.hpp" 37 38 // 39 // S U P E R W O R D T R A N S F O R M 40 //============================================================================= 41 42 //------------------------------SuperWord--------------------------- 43 SuperWord::SuperWord(PhaseIdealLoop* phase) : 44 _phase(phase), 45 _igvn(phase->_igvn), 46 _arena(phase->C->comp_arena()), 47 _packset(arena(), 8, 0, NULL), // packs for the current block 48 _bb_idx(arena(), (int)(1.10 * phase->C->unique()), 0, 0), // node idx to index in bb 49 _block(arena(), 8, 0, NULL), // nodes in current block 50 _data_entry(arena(), 8, 0, NULL), // nodes with all inputs from outside 51 _mem_slice_head(arena(), 8, 0, NULL), // memory slice heads 52 _mem_slice_tail(arena(), 8, 0, NULL), // memory slice tails 53 _node_info(arena(), 8, 0, SWNodeInfo::initial), // info needed per node 54 _align_to_ref(NULL), // memory reference to align vectors to 55 _disjoint_ptrs(arena(), 8, 0, OrderedPair::initial), // runtime disambiguated pointer pairs 56 _dg(_arena), // dependence graph 57 _visited(arena()), // visited node set 58 _post_visited(arena()), // post visited node set 59 _n_idx_list(arena(), 8), // scratch list of (node,index) pairs 60 _stk(arena(), 8, 0, NULL), // scratch stack of nodes 61 _nlist(arena(), 8, 0, NULL), // scratch list of nodes 62 _lpt(NULL), // loop tree node 63 _lp(NULL), // LoopNode 64 _bb(NULL), // basic block 65 _iv(NULL) // induction var 66 {} 67 68 //------------------------------transform_loop--------------------------- 69 void SuperWord::transform_loop(IdealLoopTree* lpt) { 70 assert(UseSuperWord, "should be"); 71 // Do vectors exist on this architecture? 72 if (Matcher::vector_width_in_bytes(T_BYTE) < 2) return; 73 74 assert(lpt->_head->is_CountedLoop(), "must be"); 75 CountedLoopNode *cl = lpt->_head->as_CountedLoop(); 76 77 if (!cl->is_valid_counted_loop()) return; // skip malformed counted loop 78 79 if (!cl->is_main_loop() ) return; // skip normal, pre, and post loops 80 81 // Check for no control flow in body (other than exit) 82 Node *cl_exit = cl->loopexit(); 83 if (cl_exit->in(0) != lpt->_head) return; 84 85 // Make sure the are no extra control users of the loop backedge 86 if (cl->back_control()->outcnt() != 1) { 87 return; 88 } 89 90 // Check for pre-loop ending with CountedLoopEnd(Bool(Cmp(x,Opaque1(limit)))) 91 CountedLoopEndNode* pre_end = get_pre_loop_end(cl); 92 if (pre_end == NULL) return; 93 Node *pre_opaq1 = pre_end->limit(); 94 if (pre_opaq1->Opcode() != Op_Opaque1) return; 95 96 init(); // initialize data structures 97 98 set_lpt(lpt); 99 set_lp(cl); 100 101 // For now, define one block which is the entire loop body 102 set_bb(cl); 103 104 assert(_packset.length() == 0, "packset must be empty"); 105 SLP_extract(); 106 } 107 108 //------------------------------SLP_extract--------------------------- 109 // Extract the superword level parallelism 110 // 111 // 1) A reverse post-order of nodes in the block is constructed. By scanning 112 // this list from first to last, all definitions are visited before their uses. 113 // 114 // 2) A point-to-point dependence graph is constructed between memory references. 115 // This simplies the upcoming "independence" checker. 116 // 117 // 3) The maximum depth in the node graph from the beginning of the block 118 // to each node is computed. This is used to prune the graph search 119 // in the independence checker. 120 // 121 // 4) For integer types, the necessary bit width is propagated backwards 122 // from stores to allow packed operations on byte, char, and short 123 // integers. This reverses the promotion to type "int" that javac 124 // did for operations like: char c1,c2,c3; c1 = c2 + c3. 125 // 126 // 5) One of the memory references is picked to be an aligned vector reference. 127 // The pre-loop trip count is adjusted to align this reference in the 128 // unrolled body. 129 // 130 // 6) The initial set of pack pairs is seeded with memory references. 131 // 132 // 7) The set of pack pairs is extended by following use->def and def->use links. 133 // 134 // 8) The pairs are combined into vector sized packs. 135 // 136 // 9) Reorder the memory slices to co-locate members of the memory packs. 137 // 138 // 10) Generate ideal vector nodes for the final set of packs and where necessary, 139 // inserting scalar promotion, vector creation from multiple scalars, and 140 // extraction of scalar values from vectors. 141 // 142 void SuperWord::SLP_extract() { 143 144 // Ready the block 145 146 if (!construct_bb()) 147 return; // Exit if no interesting nodes or complex graph. 148 149 dependence_graph(); 150 151 compute_max_depth(); 152 153 compute_vector_element_type(); 154 155 // Attempt vectorization 156 157 find_adjacent_refs(); 158 159 extend_packlist(); 160 161 combine_packs(); 162 163 construct_my_pack_map(); 164 165 filter_packs(); 166 167 schedule(); 168 169 output(); 170 } 171 172 //------------------------------find_adjacent_refs--------------------------- 173 // Find the adjacent memory references and create pack pairs for them. 174 // This is the initial set of packs that will then be extended by 175 // following use->def and def->use links. The align positions are 176 // assigned relative to the reference "align_to_ref" 177 void SuperWord::find_adjacent_refs() { 178 // Get list of memory operations 179 Node_List memops; 180 for (int i = 0; i < _block.length(); i++) { 181 Node* n = _block.at(i); 182 if (n->is_Mem() && !n->is_LoadStore() && in_bb(n) && 183 is_java_primitive(n->as_Mem()->memory_type())) { 184 int align = memory_alignment(n->as_Mem(), 0); 185 if (align != bottom_align) { 186 memops.push(n); 187 } 188 } 189 } 190 191 Node_List align_to_refs; 192 int best_iv_adjustment = 0; 193 MemNode* best_align_to_mem_ref = NULL; 194 195 while (memops.size() != 0) { 196 // Find a memory reference to align to. 197 MemNode* mem_ref = find_align_to_ref(memops); 198 if (mem_ref == NULL) break; 199 align_to_refs.push(mem_ref); 200 int iv_adjustment = get_iv_adjustment(mem_ref); 201 202 if (best_align_to_mem_ref == NULL) { 203 // Set memory reference which is the best from all memory operations 204 // to be used for alignment. The pre-loop trip count is modified to align 205 // this reference to a vector-aligned address. 206 best_align_to_mem_ref = mem_ref; 207 best_iv_adjustment = iv_adjustment; 208 } 209 210 SWPointer align_to_ref_p(mem_ref, this); 211 // Set alignment relative to "align_to_ref" for all related memory operations. 212 for (int i = memops.size() - 1; i >= 0; i--) { 213 MemNode* s = memops.at(i)->as_Mem(); 214 if (isomorphic(s, mem_ref)) { 215 SWPointer p2(s, this); 216 if (p2.comparable(align_to_ref_p)) { 217 int align = memory_alignment(s, iv_adjustment); 218 set_alignment(s, align); 219 } 220 } 221 } 222 223 // Create initial pack pairs of memory operations for which 224 // alignment is set and vectors will be aligned. 225 bool create_pack = true; 226 if (memory_alignment(mem_ref, best_iv_adjustment) == 0) { 227 if (!Matcher::misaligned_vectors_ok()) { 228 int vw = vector_width(mem_ref); 229 int vw_best = vector_width(best_align_to_mem_ref); 230 if (vw > vw_best) { 231 // Do not vectorize a memory access with more elements per vector 232 // if unaligned memory access is not allowed because number of 233 // iterations in pre-loop will be not enough to align it. 234 create_pack = false; 235 } else { 236 SWPointer p2(best_align_to_mem_ref, this); 237 if (align_to_ref_p.invar() != p2.invar()) { 238 // Do not vectorize memory accesses with different invariants 239 // if unaligned memory accesses are not allowed. 240 create_pack = false; 241 } 242 } 243 } 244 } else { 245 if (same_velt_type(mem_ref, best_align_to_mem_ref)) { 246 // Can't allow vectorization of unaligned memory accesses with the 247 // same type since it could be overlapped accesses to the same array. 248 create_pack = false; 249 } else { 250 // Allow independent (different type) unaligned memory operations 251 // if HW supports them. 252 if (!Matcher::misaligned_vectors_ok()) { 253 create_pack = false; 254 } else { 255 // Check if packs of the same memory type but 256 // with a different alignment were created before. 257 for (uint i = 0; i < align_to_refs.size(); i++) { 258 MemNode* mr = align_to_refs.at(i)->as_Mem(); 259 if (same_velt_type(mr, mem_ref) && 260 memory_alignment(mr, iv_adjustment) != 0) 261 create_pack = false; 262 } 263 } 264 } 265 } 266 if (create_pack) { 267 for (uint i = 0; i < memops.size(); i++) { 268 Node* s1 = memops.at(i); 269 int align = alignment(s1); 270 if (align == top_align) continue; 271 for (uint j = 0; j < memops.size(); j++) { 272 Node* s2 = memops.at(j); 273 if (alignment(s2) == top_align) continue; 274 if (s1 != s2 && are_adjacent_refs(s1, s2)) { 275 if (stmts_can_pack(s1, s2, align)) { 276 Node_List* pair = new Node_List(); 277 pair->push(s1); 278 pair->push(s2); 279 _packset.append(pair); 280 } 281 } 282 } 283 } 284 } else { // Don't create unaligned pack 285 // First, remove remaining memory ops of the same type from the list. 286 for (int i = memops.size() - 1; i >= 0; i--) { 287 MemNode* s = memops.at(i)->as_Mem(); 288 if (same_velt_type(s, mem_ref)) { 289 memops.remove(i); 290 } 291 } 292 293 // Second, remove already constructed packs of the same type. 294 for (int i = _packset.length() - 1; i >= 0; i--) { 295 Node_List* p = _packset.at(i); 296 MemNode* s = p->at(0)->as_Mem(); 297 if (same_velt_type(s, mem_ref)) { 298 remove_pack_at(i); 299 } 300 } 301 302 // If needed find the best memory reference for loop alignment again. 303 if (same_velt_type(mem_ref, best_align_to_mem_ref)) { 304 // Put memory ops from remaining packs back on memops list for 305 // the best alignment search. 306 uint orig_msize = memops.size(); 307 for (int i = 0; i < _packset.length(); i++) { 308 Node_List* p = _packset.at(i); 309 MemNode* s = p->at(0)->as_Mem(); 310 assert(!same_velt_type(s, mem_ref), "sanity"); 311 memops.push(s); 312 } 313 MemNode* best_align_to_mem_ref = find_align_to_ref(memops); 314 if (best_align_to_mem_ref == NULL) break; 315 best_iv_adjustment = get_iv_adjustment(best_align_to_mem_ref); 316 // Restore list. 317 while (memops.size() > orig_msize) 318 (void)memops.pop(); 319 } 320 } // unaligned memory accesses 321 322 // Remove used mem nodes. 323 for (int i = memops.size() - 1; i >= 0; i--) { 324 MemNode* m = memops.at(i)->as_Mem(); 325 if (alignment(m) != top_align) { 326 memops.remove(i); 327 } 328 } 329 330 } // while (memops.size() != 0 331 set_align_to_ref(best_align_to_mem_ref); 332 333 #ifndef PRODUCT 334 if (TraceSuperWord) { 335 tty->print_cr("\nAfter find_adjacent_refs"); 336 print_packset(); 337 } 338 #endif 339 } 340 341 //------------------------------find_align_to_ref--------------------------- 342 // Find a memory reference to align the loop induction variable to. 343 // Looks first at stores then at loads, looking for a memory reference 344 // with the largest number of references similar to it. 345 MemNode* SuperWord::find_align_to_ref(Node_List &memops) { 346 GrowableArray<int> cmp_ct(arena(), memops.size(), memops.size(), 0); 347 348 // Count number of comparable memory ops 349 for (uint i = 0; i < memops.size(); i++) { 350 MemNode* s1 = memops.at(i)->as_Mem(); 351 SWPointer p1(s1, this); 352 // Discard if pre loop can't align this reference 353 if (!ref_is_alignable(p1)) { 354 *cmp_ct.adr_at(i) = 0; 355 continue; 356 } 357 for (uint j = i+1; j < memops.size(); j++) { 358 MemNode* s2 = memops.at(j)->as_Mem(); 359 if (isomorphic(s1, s2)) { 360 SWPointer p2(s2, this); 361 if (p1.comparable(p2)) { 362 (*cmp_ct.adr_at(i))++; 363 (*cmp_ct.adr_at(j))++; 364 } 365 } 366 } 367 } 368 369 // Find Store (or Load) with the greatest number of "comparable" references, 370 // biggest vector size, smallest data size and smallest iv offset. 371 int max_ct = 0; 372 int max_vw = 0; 373 int max_idx = -1; 374 int min_size = max_jint; 375 int min_iv_offset = max_jint; 376 for (uint j = 0; j < memops.size(); j++) { 377 MemNode* s = memops.at(j)->as_Mem(); 378 if (s->is_Store()) { 379 int vw = vector_width_in_bytes(s); 380 assert(vw > 1, "sanity"); 381 SWPointer p(s, this); 382 if (cmp_ct.at(j) > max_ct || 383 cmp_ct.at(j) == max_ct && 384 (vw > max_vw || 385 vw == max_vw && 386 (data_size(s) < min_size || 387 data_size(s) == min_size && 388 (p.offset_in_bytes() < min_iv_offset)))) { 389 max_ct = cmp_ct.at(j); 390 max_vw = vw; 391 max_idx = j; 392 min_size = data_size(s); 393 min_iv_offset = p.offset_in_bytes(); 394 } 395 } 396 } 397 // If no stores, look at loads 398 if (max_ct == 0) { 399 for (uint j = 0; j < memops.size(); j++) { 400 MemNode* s = memops.at(j)->as_Mem(); 401 if (s->is_Load()) { 402 int vw = vector_width_in_bytes(s); 403 assert(vw > 1, "sanity"); 404 SWPointer p(s, this); 405 if (cmp_ct.at(j) > max_ct || 406 cmp_ct.at(j) == max_ct && 407 (vw > max_vw || 408 vw == max_vw && 409 (data_size(s) < min_size || 410 data_size(s) == min_size && 411 (p.offset_in_bytes() < min_iv_offset)))) { 412 max_ct = cmp_ct.at(j); 413 max_vw = vw; 414 max_idx = j; 415 min_size = data_size(s); 416 min_iv_offset = p.offset_in_bytes(); 417 } 418 } 419 } 420 } 421 422 #ifdef ASSERT 423 if (TraceSuperWord && Verbose) { 424 tty->print_cr("\nVector memops after find_align_to_refs"); 425 for (uint i = 0; i < memops.size(); i++) { 426 MemNode* s = memops.at(i)->as_Mem(); 427 s->dump(); 428 } 429 } 430 #endif 431 432 if (max_ct > 0) { 433 #ifdef ASSERT 434 if (TraceSuperWord) { 435 tty->print("\nVector align to node: "); 436 memops.at(max_idx)->as_Mem()->dump(); 437 } 438 #endif 439 return memops.at(max_idx)->as_Mem(); 440 } 441 return NULL; 442 } 443 444 //------------------------------ref_is_alignable--------------------------- 445 // Can the preloop align the reference to position zero in the vector? 446 bool SuperWord::ref_is_alignable(SWPointer& p) { 447 if (!p.has_iv()) { 448 return true; // no induction variable 449 } 450 CountedLoopEndNode* pre_end = get_pre_loop_end(lp()->as_CountedLoop()); 451 assert(pre_end != NULL, "we must have a correct pre-loop"); 452 assert(pre_end->stride_is_con(), "pre loop stride is constant"); 453 int preloop_stride = pre_end->stride_con(); 454 455 int span = preloop_stride * p.scale_in_bytes(); 456 int mem_size = p.memory_size(); 457 int offset = p.offset_in_bytes(); 458 // Stride one accesses are alignable if offset is aligned to memory operation size. 459 // Offset can be unaligned when UseUnalignedAccesses is used. 460 if (ABS(span) == mem_size && (ABS(offset) % mem_size) == 0) { 461 return true; 462 } 463 // If the initial offset from start of the object is computable, 464 // check if the pre-loop can align the final offset accordingly. 465 // 466 // In other words: Can we find an i such that the offset 467 // after i pre-loop iterations is aligned to vw? 468 // (init_offset + pre_loop) % vw == 0 (1) 469 // where 470 // pre_loop = i * span 471 // is the number of bytes added to the offset by i pre-loop iterations. 472 // 473 // For this to hold we need pre_loop to increase init_offset by 474 // pre_loop = vw - (init_offset % vw) 475 // 476 // This is only possible if pre_loop is divisible by span because each 477 // pre-loop iteration increases the initial offset by 'span' bytes: 478 // (vw - (init_offset % vw)) % span == 0 479 // 480 int vw = vector_width_in_bytes(p.mem()); 481 assert(vw > 1, "sanity"); 482 Node* init_nd = pre_end->init_trip(); 483 if (init_nd->is_Con() && p.invar() == NULL) { 484 int init = init_nd->bottom_type()->is_int()->get_con(); 485 int init_offset = init * p.scale_in_bytes() + offset; 486 if (init_offset < 0) { // negative offset from object start? 487 return false; // may happen in dead loop 488 } 489 if (vw % span == 0) { 490 // If vm is a multiple of span, we use formula (1). 491 if (span > 0) { 492 return (vw - (init_offset % vw)) % span == 0; 493 } else { 494 assert(span < 0, "nonzero stride * scale"); 495 return (init_offset % vw) % -span == 0; 496 } 497 } else if (span % vw == 0) { 498 // If span is a multiple of vw, we can simplify formula (1) to: 499 // (init_offset + i * span) % vw == 0 500 // => 501 // (init_offset % vw) + ((i * span) % vw) == 0 502 // => 503 // init_offset % vw == 0 504 // 505 // Because we add a multiple of vw to the initial offset, the final 506 // offset is a multiple of vw if and only if init_offset is a multiple. 507 // 508 return (init_offset % vw) == 0; 509 } 510 } 511 return false; 512 } 513 514 //---------------------------get_iv_adjustment--------------------------- 515 // Calculate loop's iv adjustment for this memory ops. 516 int SuperWord::get_iv_adjustment(MemNode* mem_ref) { 517 SWPointer align_to_ref_p(mem_ref, this); 518 int offset = align_to_ref_p.offset_in_bytes(); 519 int scale = align_to_ref_p.scale_in_bytes(); 520 int elt_size = align_to_ref_p.memory_size(); 521 int vw = vector_width_in_bytes(mem_ref); 522 assert(vw > 1, "sanity"); 523 int iv_adjustment; 524 if (scale != 0) { 525 int stride_sign = (scale * iv_stride()) > 0 ? 1 : -1; 526 // At least one iteration is executed in pre-loop by default. As result 527 // several iterations are needed to align memory operations in main-loop even 528 // if offset is 0. 529 int iv_adjustment_in_bytes = (stride_sign * vw - (offset % vw)); 530 assert(((ABS(iv_adjustment_in_bytes) % elt_size) == 0), 531 err_msg_res("(%d) should be divisible by (%d)", iv_adjustment_in_bytes, elt_size)); 532 iv_adjustment = iv_adjustment_in_bytes/elt_size; 533 } else { 534 // This memory op is not dependent on iv (scale == 0) 535 iv_adjustment = 0; 536 } 537 538 #ifndef PRODUCT 539 if (TraceSuperWord) 540 tty->print_cr("\noffset = %d iv_adjust = %d elt_size = %d scale = %d iv_stride = %d vect_size %d", 541 offset, iv_adjustment, elt_size, scale, iv_stride(), vw); 542 #endif 543 return iv_adjustment; 544 } 545 546 //---------------------------dependence_graph--------------------------- 547 // Construct dependency graph. 548 // Add dependence edges to load/store nodes for memory dependence 549 // A.out()->DependNode.in(1) and DependNode.out()->B.prec(x) 550 void SuperWord::dependence_graph() { 551 // First, assign a dependence node to each memory node 552 for (int i = 0; i < _block.length(); i++ ) { 553 Node *n = _block.at(i); 554 if (n->is_Mem() || n->is_Phi() && n->bottom_type() == Type::MEMORY) { 555 _dg.make_node(n); 556 } 557 } 558 559 // For each memory slice, create the dependences 560 for (int i = 0; i < _mem_slice_head.length(); i++) { 561 Node* n = _mem_slice_head.at(i); 562 Node* n_tail = _mem_slice_tail.at(i); 563 564 // Get slice in predecessor order (last is first) 565 mem_slice_preds(n_tail, n, _nlist); 566 567 // Make the slice dependent on the root 568 DepMem* slice = _dg.dep(n); 569 _dg.make_edge(_dg.root(), slice); 570 571 // Create a sink for the slice 572 DepMem* slice_sink = _dg.make_node(NULL); 573 _dg.make_edge(slice_sink, _dg.tail()); 574 575 // Now visit each pair of memory ops, creating the edges 576 for (int j = _nlist.length() - 1; j >= 0 ; j--) { 577 Node* s1 = _nlist.at(j); 578 579 // If no dependency yet, use slice 580 if (_dg.dep(s1)->in_cnt() == 0) { 581 _dg.make_edge(slice, s1); 582 } 583 SWPointer p1(s1->as_Mem(), this); 584 bool sink_dependent = true; 585 for (int k = j - 1; k >= 0; k--) { 586 Node* s2 = _nlist.at(k); 587 if (s1->is_Load() && s2->is_Load()) 588 continue; 589 SWPointer p2(s2->as_Mem(), this); 590 591 int cmp = p1.cmp(p2); 592 if (SuperWordRTDepCheck && 593 p1.base() != p2.base() && p1.valid() && p2.valid()) { 594 // Create a runtime check to disambiguate 595 OrderedPair pp(p1.base(), p2.base()); 596 _disjoint_ptrs.append_if_missing(pp); 597 } else if (!SWPointer::not_equal(cmp)) { 598 // Possibly same address 599 _dg.make_edge(s1, s2); 600 sink_dependent = false; 601 } 602 } 603 if (sink_dependent) { 604 _dg.make_edge(s1, slice_sink); 605 } 606 } 607 #ifndef PRODUCT 608 if (TraceSuperWord) { 609 tty->print_cr("\nDependence graph for slice: %d", n->_idx); 610 for (int q = 0; q < _nlist.length(); q++) { 611 _dg.print(_nlist.at(q)); 612 } 613 tty->cr(); 614 } 615 #endif 616 _nlist.clear(); 617 } 618 619 #ifndef PRODUCT 620 if (TraceSuperWord) { 621 tty->print_cr("\ndisjoint_ptrs: %s", _disjoint_ptrs.length() > 0 ? "" : "NONE"); 622 for (int r = 0; r < _disjoint_ptrs.length(); r++) { 623 _disjoint_ptrs.at(r).print(); 624 tty->cr(); 625 } 626 tty->cr(); 627 } 628 #endif 629 } 630 631 //---------------------------mem_slice_preds--------------------------- 632 // Return a memory slice (node list) in predecessor order starting at "start" 633 void SuperWord::mem_slice_preds(Node* start, Node* stop, GrowableArray<Node*> &preds) { 634 assert(preds.length() == 0, "start empty"); 635 Node* n = start; 636 Node* prev = NULL; 637 while (true) { 638 assert(in_bb(n), "must be in block"); 639 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 640 Node* out = n->fast_out(i); 641 if (out->is_Load()) { 642 if (in_bb(out)) { 643 preds.push(out); 644 } 645 } else { 646 // FIXME 647 if (out->is_MergeMem() && !in_bb(out)) { 648 // Either unrolling is causing a memory edge not to disappear, 649 // or need to run igvn.optimize() again before SLP 650 } else if (out->is_Phi() && out->bottom_type() == Type::MEMORY && !in_bb(out)) { 651 // Ditto. Not sure what else to check further. 652 } else if (out->Opcode() == Op_StoreCM && out->in(MemNode::OopStore) == n) { 653 // StoreCM has an input edge used as a precedence edge. 654 // Maybe an issue when oop stores are vectorized. 655 } else { 656 assert(out == prev || prev == NULL, "no branches off of store slice"); 657 } 658 } 659 } 660 if (n == stop) break; 661 preds.push(n); 662 prev = n; 663 assert(n->is_Mem(), err_msg_res("unexpected node %s", n->Name())); 664 n = n->in(MemNode::Memory); 665 } 666 } 667 668 //------------------------------stmts_can_pack--------------------------- 669 // Can s1 and s2 be in a pack with s1 immediately preceding s2 and 670 // s1 aligned at "align" 671 bool SuperWord::stmts_can_pack(Node* s1, Node* s2, int align) { 672 673 // Do not use superword for non-primitives 674 BasicType bt1 = velt_basic_type(s1); 675 BasicType bt2 = velt_basic_type(s2); 676 if(!is_java_primitive(bt1) || !is_java_primitive(bt2)) 677 return false; 678 if (Matcher::max_vector_size(bt1) < 2) { 679 return false; // No vectors for this type 680 } 681 682 if (isomorphic(s1, s2)) { 683 if (independent(s1, s2)) { 684 if (!exists_at(s1, 0) && !exists_at(s2, 1)) { 685 if (!s1->is_Mem() || are_adjacent_refs(s1, s2)) { 686 int s1_align = alignment(s1); 687 int s2_align = alignment(s2); 688 if (s1_align == top_align || s1_align == align) { 689 if (s2_align == top_align || s2_align == align + data_size(s1)) { 690 return true; 691 } 692 } 693 } 694 } 695 } 696 } 697 return false; 698 } 699 700 //------------------------------exists_at--------------------------- 701 // Does s exist in a pack at position pos? 702 bool SuperWord::exists_at(Node* s, uint pos) { 703 for (int i = 0; i < _packset.length(); i++) { 704 Node_List* p = _packset.at(i); 705 if (p->at(pos) == s) { 706 return true; 707 } 708 } 709 return false; 710 } 711 712 //------------------------------are_adjacent_refs--------------------------- 713 // Is s1 immediately before s2 in memory? 714 bool SuperWord::are_adjacent_refs(Node* s1, Node* s2) { 715 if (!s1->is_Mem() || !s2->is_Mem()) return false; 716 if (!in_bb(s1) || !in_bb(s2)) return false; 717 718 // Do not use superword for non-primitives 719 if (!is_java_primitive(s1->as_Mem()->memory_type()) || 720 !is_java_primitive(s2->as_Mem()->memory_type())) { 721 return false; 722 } 723 724 // FIXME - co_locate_pack fails on Stores in different mem-slices, so 725 // only pack memops that are in the same alias set until that's fixed. 726 if (_phase->C->get_alias_index(s1->as_Mem()->adr_type()) != 727 _phase->C->get_alias_index(s2->as_Mem()->adr_type())) 728 return false; 729 SWPointer p1(s1->as_Mem(), this); 730 SWPointer p2(s2->as_Mem(), this); 731 if (p1.base() != p2.base() || !p1.comparable(p2)) return false; 732 int diff = p2.offset_in_bytes() - p1.offset_in_bytes(); 733 return diff == data_size(s1); 734 } 735 736 //------------------------------isomorphic--------------------------- 737 // Are s1 and s2 similar? 738 bool SuperWord::isomorphic(Node* s1, Node* s2) { 739 if (s1->Opcode() != s2->Opcode()) return false; 740 if (s1->req() != s2->req()) return false; 741 if (s1->in(0) != s2->in(0)) return false; 742 if (!same_velt_type(s1, s2)) return false; 743 return true; 744 } 745 746 //------------------------------independent--------------------------- 747 // Is there no data path from s1 to s2 or s2 to s1? 748 bool SuperWord::independent(Node* s1, Node* s2) { 749 // assert(s1->Opcode() == s2->Opcode(), "check isomorphic first"); 750 int d1 = depth(s1); 751 int d2 = depth(s2); 752 if (d1 == d2) return s1 != s2; 753 Node* deep = d1 > d2 ? s1 : s2; 754 Node* shallow = d1 > d2 ? s2 : s1; 755 756 visited_clear(); 757 758 return independent_path(shallow, deep); 759 } 760 761 //------------------------------independent_path------------------------------ 762 // Helper for independent 763 bool SuperWord::independent_path(Node* shallow, Node* deep, uint dp) { 764 if (dp >= 1000) return false; // stop deep recursion 765 visited_set(deep); 766 int shal_depth = depth(shallow); 767 assert(shal_depth <= depth(deep), "must be"); 768 for (DepPreds preds(deep, _dg); !preds.done(); preds.next()) { 769 Node* pred = preds.current(); 770 if (in_bb(pred) && !visited_test(pred)) { 771 if (shallow == pred) { 772 return false; 773 } 774 if (shal_depth < depth(pred) && !independent_path(shallow, pred, dp+1)) { 775 return false; 776 } 777 } 778 } 779 return true; 780 } 781 782 //------------------------------set_alignment--------------------------- 783 void SuperWord::set_alignment(Node* s1, Node* s2, int align) { 784 set_alignment(s1, align); 785 if (align == top_align || align == bottom_align) { 786 set_alignment(s2, align); 787 } else { 788 set_alignment(s2, align + data_size(s1)); 789 } 790 } 791 792 //------------------------------data_size--------------------------- 793 int SuperWord::data_size(Node* s) { 794 int bsize = type2aelembytes(velt_basic_type(s)); 795 assert(bsize != 0, "valid size"); 796 return bsize; 797 } 798 799 //------------------------------extend_packlist--------------------------- 800 // Extend packset by following use->def and def->use links from pack members. 801 void SuperWord::extend_packlist() { 802 bool changed; 803 do { 804 changed = false; 805 for (int i = 0; i < _packset.length(); i++) { 806 Node_List* p = _packset.at(i); 807 changed |= follow_use_defs(p); 808 changed |= follow_def_uses(p); 809 } 810 } while (changed); 811 812 #ifndef PRODUCT 813 if (TraceSuperWord) { 814 tty->print_cr("\nAfter extend_packlist"); 815 print_packset(); 816 } 817 #endif 818 } 819 820 //------------------------------follow_use_defs--------------------------- 821 // Extend the packset by visiting operand definitions of nodes in pack p 822 bool SuperWord::follow_use_defs(Node_List* p) { 823 assert(p->size() == 2, "just checking"); 824 Node* s1 = p->at(0); 825 Node* s2 = p->at(1); 826 assert(s1->req() == s2->req(), "just checking"); 827 assert(alignment(s1) + data_size(s1) == alignment(s2), "just checking"); 828 829 if (s1->is_Load()) return false; 830 831 int align = alignment(s1); 832 bool changed = false; 833 int start = s1->is_Store() ? MemNode::ValueIn : 1; 834 int end = s1->is_Store() ? MemNode::ValueIn+1 : s1->req(); 835 for (int j = start; j < end; j++) { 836 Node* t1 = s1->in(j); 837 Node* t2 = s2->in(j); 838 if (!in_bb(t1) || !in_bb(t2)) 839 continue; 840 if (stmts_can_pack(t1, t2, align)) { 841 if (est_savings(t1, t2) >= 0) { 842 Node_List* pair = new Node_List(); 843 pair->push(t1); 844 pair->push(t2); 845 _packset.append(pair); 846 set_alignment(t1, t2, align); 847 changed = true; 848 } 849 } 850 } 851 return changed; 852 } 853 854 //------------------------------follow_def_uses--------------------------- 855 // Extend the packset by visiting uses of nodes in pack p 856 bool SuperWord::follow_def_uses(Node_List* p) { 857 bool changed = false; 858 Node* s1 = p->at(0); 859 Node* s2 = p->at(1); 860 assert(p->size() == 2, "just checking"); 861 assert(s1->req() == s2->req(), "just checking"); 862 assert(alignment(s1) + data_size(s1) == alignment(s2), "just checking"); 863 864 if (s1->is_Store()) return false; 865 866 int align = alignment(s1); 867 int savings = -1; 868 Node* u1 = NULL; 869 Node* u2 = NULL; 870 for (DUIterator_Fast imax, i = s1->fast_outs(imax); i < imax; i++) { 871 Node* t1 = s1->fast_out(i); 872 if (!in_bb(t1)) continue; 873 for (DUIterator_Fast jmax, j = s2->fast_outs(jmax); j < jmax; j++) { 874 Node* t2 = s2->fast_out(j); 875 if (!in_bb(t2)) continue; 876 if (!opnd_positions_match(s1, t1, s2, t2)) 877 continue; 878 if (stmts_can_pack(t1, t2, align)) { 879 int my_savings = est_savings(t1, t2); 880 if (my_savings > savings) { 881 savings = my_savings; 882 u1 = t1; 883 u2 = t2; 884 } 885 } 886 } 887 } 888 if (savings >= 0) { 889 Node_List* pair = new Node_List(); 890 pair->push(u1); 891 pair->push(u2); 892 _packset.append(pair); 893 set_alignment(u1, u2, align); 894 changed = true; 895 } 896 return changed; 897 } 898 899 //---------------------------opnd_positions_match------------------------- 900 // Is the use of d1 in u1 at the same operand position as d2 in u2? 901 bool SuperWord::opnd_positions_match(Node* d1, Node* u1, Node* d2, Node* u2) { 902 uint ct = u1->req(); 903 if (ct != u2->req()) return false; 904 uint i1 = 0; 905 uint i2 = 0; 906 do { 907 for (i1++; i1 < ct; i1++) if (u1->in(i1) == d1) break; 908 for (i2++; i2 < ct; i2++) if (u2->in(i2) == d2) break; 909 if (i1 != i2) { 910 if ((i1 == (3-i2)) && (u2->is_Add() || u2->is_Mul())) { 911 // Further analysis relies on operands position matching. 912 u2->swap_edges(i1, i2); 913 } else { 914 return false; 915 } 916 } 917 } while (i1 < ct); 918 return true; 919 } 920 921 //------------------------------est_savings--------------------------- 922 // Estimate the savings from executing s1 and s2 as a pack 923 int SuperWord::est_savings(Node* s1, Node* s2) { 924 int save_in = 2 - 1; // 2 operations per instruction in packed form 925 926 // inputs 927 for (uint i = 1; i < s1->req(); i++) { 928 Node* x1 = s1->in(i); 929 Node* x2 = s2->in(i); 930 if (x1 != x2) { 931 if (are_adjacent_refs(x1, x2)) { 932 save_in += adjacent_profit(x1, x2); 933 } else if (!in_packset(x1, x2)) { 934 save_in -= pack_cost(2); 935 } else { 936 save_in += unpack_cost(2); 937 } 938 } 939 } 940 941 // uses of result 942 uint ct = 0; 943 int save_use = 0; 944 for (DUIterator_Fast imax, i = s1->fast_outs(imax); i < imax; i++) { 945 Node* s1_use = s1->fast_out(i); 946 for (int j = 0; j < _packset.length(); j++) { 947 Node_List* p = _packset.at(j); 948 if (p->at(0) == s1_use) { 949 for (DUIterator_Fast kmax, k = s2->fast_outs(kmax); k < kmax; k++) { 950 Node* s2_use = s2->fast_out(k); 951 if (p->at(p->size()-1) == s2_use) { 952 ct++; 953 if (are_adjacent_refs(s1_use, s2_use)) { 954 save_use += adjacent_profit(s1_use, s2_use); 955 } 956 } 957 } 958 } 959 } 960 } 961 962 if (ct < s1->outcnt()) save_use += unpack_cost(1); 963 if (ct < s2->outcnt()) save_use += unpack_cost(1); 964 965 return MAX2(save_in, save_use); 966 } 967 968 //------------------------------costs--------------------------- 969 int SuperWord::adjacent_profit(Node* s1, Node* s2) { return 2; } 970 int SuperWord::pack_cost(int ct) { return ct; } 971 int SuperWord::unpack_cost(int ct) { return ct; } 972 973 //------------------------------combine_packs--------------------------- 974 // Combine packs A and B with A.last == B.first into A.first..,A.last,B.second,..B.last 975 void SuperWord::combine_packs() { 976 bool changed = true; 977 // Combine packs regardless max vector size. 978 while (changed) { 979 changed = false; 980 for (int i = 0; i < _packset.length(); i++) { 981 Node_List* p1 = _packset.at(i); 982 if (p1 == NULL) continue; 983 for (int j = 0; j < _packset.length(); j++) { 984 Node_List* p2 = _packset.at(j); 985 if (p2 == NULL) continue; 986 if (i == j) continue; 987 if (p1->at(p1->size()-1) == p2->at(0)) { 988 for (uint k = 1; k < p2->size(); k++) { 989 p1->push(p2->at(k)); 990 } 991 _packset.at_put(j, NULL); 992 changed = true; 993 } 994 } 995 } 996 } 997 998 // Split packs which have size greater then max vector size. 999 for (int i = 0; i < _packset.length(); i++) { 1000 Node_List* p1 = _packset.at(i); 1001 if (p1 != NULL) { 1002 BasicType bt = velt_basic_type(p1->at(0)); 1003 uint max_vlen = Matcher::max_vector_size(bt); // Max elements in vector 1004 assert(is_power_of_2(max_vlen), "sanity"); 1005 uint psize = p1->size(); 1006 if (!is_power_of_2(psize)) { 1007 // Skip pack which can't be vector. 1008 // case1: for(...) { a[i] = i; } elements values are different (i+x) 1009 // case2: for(...) { a[i] = b[i+1]; } can't align both, load and store 1010 _packset.at_put(i, NULL); 1011 continue; 1012 } 1013 if (psize > max_vlen) { 1014 Node_List* pack = new Node_List(); 1015 for (uint j = 0; j < psize; j++) { 1016 pack->push(p1->at(j)); 1017 if (pack->size() >= max_vlen) { 1018 assert(is_power_of_2(pack->size()), "sanity"); 1019 _packset.append(pack); 1020 pack = new Node_List(); 1021 } 1022 } 1023 _packset.at_put(i, NULL); 1024 } 1025 } 1026 } 1027 1028 // Compress list. 1029 for (int i = _packset.length() - 1; i >= 0; i--) { 1030 Node_List* p1 = _packset.at(i); 1031 if (p1 == NULL) { 1032 _packset.remove_at(i); 1033 } 1034 } 1035 1036 #ifndef PRODUCT 1037 if (TraceSuperWord) { 1038 tty->print_cr("\nAfter combine_packs"); 1039 print_packset(); 1040 } 1041 #endif 1042 } 1043 1044 //-----------------------------construct_my_pack_map-------------------------- 1045 // Construct the map from nodes to packs. Only valid after the 1046 // point where a node is only in one pack (after combine_packs). 1047 void SuperWord::construct_my_pack_map() { 1048 Node_List* rslt = NULL; 1049 for (int i = 0; i < _packset.length(); i++) { 1050 Node_List* p = _packset.at(i); 1051 for (uint j = 0; j < p->size(); j++) { 1052 Node* s = p->at(j); 1053 assert(my_pack(s) == NULL, "only in one pack"); 1054 set_my_pack(s, p); 1055 } 1056 } 1057 } 1058 1059 //------------------------------filter_packs--------------------------- 1060 // Remove packs that are not implemented or not profitable. 1061 void SuperWord::filter_packs() { 1062 1063 // Remove packs that are not implemented 1064 for (int i = _packset.length() - 1; i >= 0; i--) { 1065 Node_List* pk = _packset.at(i); 1066 bool impl = implemented(pk); 1067 if (!impl) { 1068 #ifndef PRODUCT 1069 if (TraceSuperWord && Verbose) { 1070 tty->print_cr("Unimplemented"); 1071 pk->at(0)->dump(); 1072 } 1073 #endif 1074 remove_pack_at(i); 1075 } 1076 } 1077 1078 // Remove packs that are not profitable 1079 bool changed; 1080 do { 1081 changed = false; 1082 for (int i = _packset.length() - 1; i >= 0; i--) { 1083 Node_List* pk = _packset.at(i); 1084 bool prof = profitable(pk); 1085 if (!prof) { 1086 #ifndef PRODUCT 1087 if (TraceSuperWord && Verbose) { 1088 tty->print_cr("Unprofitable"); 1089 pk->at(0)->dump(); 1090 } 1091 #endif 1092 remove_pack_at(i); 1093 changed = true; 1094 } 1095 } 1096 } while (changed); 1097 1098 #ifndef PRODUCT 1099 if (TraceSuperWord) { 1100 tty->print_cr("\nAfter filter_packs"); 1101 print_packset(); 1102 tty->cr(); 1103 } 1104 #endif 1105 } 1106 1107 //------------------------------implemented--------------------------- 1108 // Can code be generated for pack p? 1109 bool SuperWord::implemented(Node_List* p) { 1110 Node* p0 = p->at(0); 1111 return VectorNode::implemented(p0->Opcode(), p->size(), velt_basic_type(p0)); 1112 } 1113 1114 //------------------------------same_inputs-------------------------- 1115 // For pack p, are all idx operands the same? 1116 static bool same_inputs(Node_List* p, int idx) { 1117 Node* p0 = p->at(0); 1118 uint vlen = p->size(); 1119 Node* p0_def = p0->in(idx); 1120 for (uint i = 1; i < vlen; i++) { 1121 Node* pi = p->at(i); 1122 Node* pi_def = pi->in(idx); 1123 if (p0_def != pi_def) 1124 return false; 1125 } 1126 return true; 1127 } 1128 1129 //------------------------------profitable--------------------------- 1130 // For pack p, are all operands and all uses (with in the block) vector? 1131 bool SuperWord::profitable(Node_List* p) { 1132 Node* p0 = p->at(0); 1133 uint start, end; 1134 VectorNode::vector_operands(p0, &start, &end); 1135 1136 // Return false if some inputs are not vectors or vectors with different 1137 // size or alignment. 1138 // Also, for now, return false if not scalar promotion case when inputs are 1139 // the same. Later, implement PackNode and allow differing, non-vector inputs 1140 // (maybe just the ones from outside the block.) 1141 for (uint i = start; i < end; i++) { 1142 if (!is_vector_use(p0, i)) 1143 return false; 1144 } 1145 if (VectorNode::is_shift(p0)) { 1146 // For now, return false if shift count is vector or not scalar promotion 1147 // case (different shift counts) because it is not supported yet. 1148 Node* cnt = p0->in(2); 1149 Node_List* cnt_pk = my_pack(cnt); 1150 if (cnt_pk != NULL) 1151 return false; 1152 if (!same_inputs(p, 2)) 1153 return false; 1154 } 1155 if (!p0->is_Store()) { 1156 // For now, return false if not all uses are vector. 1157 // Later, implement ExtractNode and allow non-vector uses (maybe 1158 // just the ones outside the block.) 1159 for (uint i = 0; i < p->size(); i++) { 1160 Node* def = p->at(i); 1161 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 1162 Node* use = def->fast_out(j); 1163 for (uint k = 0; k < use->req(); k++) { 1164 Node* n = use->in(k); 1165 if (def == n) { 1166 if (!is_vector_use(use, k)) { 1167 return false; 1168 } 1169 } 1170 } 1171 } 1172 } 1173 } 1174 return true; 1175 } 1176 1177 //------------------------------schedule--------------------------- 1178 // Adjust the memory graph for the packed operations 1179 void SuperWord::schedule() { 1180 1181 // Co-locate in the memory graph the members of each memory pack 1182 for (int i = 0; i < _packset.length(); i++) { 1183 co_locate_pack(_packset.at(i)); 1184 } 1185 } 1186 1187 //-------------------------------remove_and_insert------------------- 1188 // Remove "current" from its current position in the memory graph and insert 1189 // it after the appropriate insertion point (lip or uip). 1190 void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip, 1191 Node *uip, Unique_Node_List &sched_before) { 1192 Node* my_mem = current->in(MemNode::Memory); 1193 bool sched_up = sched_before.member(current); 1194 1195 // remove current_store from its current position in the memmory graph 1196 for (DUIterator i = current->outs(); current->has_out(i); i++) { 1197 Node* use = current->out(i); 1198 if (use->is_Mem()) { 1199 assert(use->in(MemNode::Memory) == current, "must be"); 1200 if (use == prev) { // connect prev to my_mem 1201 _igvn.replace_input_of(use, MemNode::Memory, my_mem); 1202 --i; //deleted this edge; rescan position 1203 } else if (sched_before.member(use)) { 1204 if (!sched_up) { // Will be moved together with current 1205 _igvn.replace_input_of(use, MemNode::Memory, uip); 1206 --i; //deleted this edge; rescan position 1207 } 1208 } else { 1209 if (sched_up) { // Will be moved together with current 1210 _igvn.replace_input_of(use, MemNode::Memory, lip); 1211 --i; //deleted this edge; rescan position 1212 } 1213 } 1214 } 1215 } 1216 1217 Node *insert_pt = sched_up ? uip : lip; 1218 1219 // all uses of insert_pt's memory state should use current's instead 1220 for (DUIterator i = insert_pt->outs(); insert_pt->has_out(i); i++) { 1221 Node* use = insert_pt->out(i); 1222 if (use->is_Mem()) { 1223 assert(use->in(MemNode::Memory) == insert_pt, "must be"); 1224 _igvn.replace_input_of(use, MemNode::Memory, current); 1225 --i; //deleted this edge; rescan position 1226 } else if (!sched_up && use->is_Phi() && use->bottom_type() == Type::MEMORY) { 1227 uint pos; //lip (lower insert point) must be the last one in the memory slice 1228 for (pos=1; pos < use->req(); pos++) { 1229 if (use->in(pos) == insert_pt) break; 1230 } 1231 _igvn.replace_input_of(use, pos, current); 1232 --i; 1233 } 1234 } 1235 1236 //connect current to insert_pt 1237 _igvn.replace_input_of(current, MemNode::Memory, insert_pt); 1238 } 1239 1240 //------------------------------co_locate_pack---------------------------------- 1241 // To schedule a store pack, we need to move any sandwiched memory ops either before 1242 // or after the pack, based upon dependence information: 1243 // (1) If any store in the pack depends on the sandwiched memory op, the 1244 // sandwiched memory op must be scheduled BEFORE the pack; 1245 // (2) If a sandwiched memory op depends on any store in the pack, the 1246 // sandwiched memory op must be scheduled AFTER the pack; 1247 // (3) If a sandwiched memory op (say, memA) depends on another sandwiched 1248 // memory op (say memB), memB must be scheduled before memA. So, if memA is 1249 // scheduled before the pack, memB must also be scheduled before the pack; 1250 // (4) If there is no dependence restriction for a sandwiched memory op, we simply 1251 // schedule this store AFTER the pack 1252 // (5) We know there is no dependence cycle, so there in no other case; 1253 // (6) Finally, all memory ops in another single pack should be moved in the same direction. 1254 // 1255 // To schedule a load pack, we use the memory state of either the first or the last load in 1256 // the pack, based on the dependence constraint. 1257 void SuperWord::co_locate_pack(Node_List* pk) { 1258 if (pk->at(0)->is_Store()) { 1259 MemNode* first = executed_first(pk)->as_Mem(); 1260 MemNode* last = executed_last(pk)->as_Mem(); 1261 Unique_Node_List schedule_before_pack; 1262 Unique_Node_List memops; 1263 1264 MemNode* current = last->in(MemNode::Memory)->as_Mem(); 1265 MemNode* previous = last; 1266 while (true) { 1267 assert(in_bb(current), "stay in block"); 1268 memops.push(previous); 1269 for (DUIterator i = current->outs(); current->has_out(i); i++) { 1270 Node* use = current->out(i); 1271 if (use->is_Mem() && use != previous) 1272 memops.push(use); 1273 } 1274 if (current == first) break; 1275 previous = current; 1276 current = current->in(MemNode::Memory)->as_Mem(); 1277 } 1278 1279 // determine which memory operations should be scheduled before the pack 1280 for (uint i = 1; i < memops.size(); i++) { 1281 Node *s1 = memops.at(i); 1282 if (!in_pack(s1, pk) && !schedule_before_pack.member(s1)) { 1283 for (uint j = 0; j< i; j++) { 1284 Node *s2 = memops.at(j); 1285 if (!independent(s1, s2)) { 1286 if (in_pack(s2, pk) || schedule_before_pack.member(s2)) { 1287 schedule_before_pack.push(s1); // s1 must be scheduled before 1288 Node_List* mem_pk = my_pack(s1); 1289 if (mem_pk != NULL) { 1290 for (uint ii = 0; ii < mem_pk->size(); ii++) { 1291 Node* s = mem_pk->at(ii); // follow partner 1292 if (memops.member(s) && !schedule_before_pack.member(s)) 1293 schedule_before_pack.push(s); 1294 } 1295 } 1296 break; 1297 } 1298 } 1299 } 1300 } 1301 } 1302 1303 Node* upper_insert_pt = first->in(MemNode::Memory); 1304 // Following code moves loads connected to upper_insert_pt below aliased stores. 1305 // Collect such loads here and reconnect them back to upper_insert_pt later. 1306 memops.clear(); 1307 for (DUIterator i = upper_insert_pt->outs(); upper_insert_pt->has_out(i); i++) { 1308 Node* use = upper_insert_pt->out(i); 1309 if (use->is_Mem() && !use->is_Store()) { 1310 memops.push(use); 1311 } 1312 } 1313 1314 MemNode* lower_insert_pt = last; 1315 previous = last; //previous store in pk 1316 current = last->in(MemNode::Memory)->as_Mem(); 1317 1318 // start scheduling from "last" to "first" 1319 while (true) { 1320 assert(in_bb(current), "stay in block"); 1321 assert(in_pack(previous, pk), "previous stays in pack"); 1322 Node* my_mem = current->in(MemNode::Memory); 1323 1324 if (in_pack(current, pk)) { 1325 // Forward users of my memory state (except "previous) to my input memory state 1326 for (DUIterator i = current->outs(); current->has_out(i); i++) { 1327 Node* use = current->out(i); 1328 if (use->is_Mem() && use != previous) { 1329 assert(use->in(MemNode::Memory) == current, "must be"); 1330 if (schedule_before_pack.member(use)) { 1331 _igvn.replace_input_of(use, MemNode::Memory, upper_insert_pt); 1332 } else { 1333 _igvn.replace_input_of(use, MemNode::Memory, lower_insert_pt); 1334 } 1335 --i; // deleted this edge; rescan position 1336 } 1337 } 1338 previous = current; 1339 } else { // !in_pack(current, pk) ==> a sandwiched store 1340 remove_and_insert(current, previous, lower_insert_pt, upper_insert_pt, schedule_before_pack); 1341 } 1342 1343 if (current == first) break; 1344 current = my_mem->as_Mem(); 1345 } // end while 1346 1347 // Reconnect loads back to upper_insert_pt. 1348 for (uint i = 0; i < memops.size(); i++) { 1349 Node *ld = memops.at(i); 1350 if (ld->in(MemNode::Memory) != upper_insert_pt) { 1351 _igvn.replace_input_of(ld, MemNode::Memory, upper_insert_pt); 1352 } 1353 } 1354 } else if (pk->at(0)->is_Load()) { //load 1355 // all loads in the pack should have the same memory state. By default, 1356 // we use the memory state of the last load. However, if any load could 1357 // not be moved down due to the dependence constraint, we use the memory 1358 // state of the first load. 1359 Node* last_mem = executed_last(pk)->in(MemNode::Memory); 1360 Node* first_mem = executed_first(pk)->in(MemNode::Memory); 1361 bool schedule_last = true; 1362 for (uint i = 0; i < pk->size(); i++) { 1363 Node* ld = pk->at(i); 1364 for (Node* current = last_mem; current != ld->in(MemNode::Memory); 1365 current=current->in(MemNode::Memory)) { 1366 assert(current != first_mem, "corrupted memory graph"); 1367 if(current->is_Mem() && !independent(current, ld)){ 1368 schedule_last = false; // a later store depends on this load 1369 break; 1370 } 1371 } 1372 } 1373 1374 Node* mem_input = schedule_last ? last_mem : first_mem; 1375 _igvn.hash_delete(mem_input); 1376 // Give each load the same memory state 1377 for (uint i = 0; i < pk->size(); i++) { 1378 LoadNode* ld = pk->at(i)->as_Load(); 1379 _igvn.replace_input_of(ld, MemNode::Memory, mem_input); 1380 } 1381 } 1382 } 1383 1384 //------------------------------output--------------------------- 1385 // Convert packs into vector node operations 1386 void SuperWord::output() { 1387 if (_packset.length() == 0) return; 1388 1389 #ifndef PRODUCT 1390 if (TraceLoopOpts) { 1391 tty->print("SuperWord "); 1392 lpt()->dump_head(); 1393 } 1394 #endif 1395 1396 // MUST ENSURE main loop's initial value is properly aligned: 1397 // (iv_initial_value + min_iv_offset) % vector_width_in_bytes() == 0 1398 1399 align_initial_loop_index(align_to_ref()); 1400 1401 // Insert extract (unpack) operations for scalar uses 1402 for (int i = 0; i < _packset.length(); i++) { 1403 insert_extracts(_packset.at(i)); 1404 } 1405 1406 Compile* C = _phase->C; 1407 uint max_vlen_in_bytes = 0; 1408 for (int i = 0; i < _block.length(); i++) { 1409 Node* n = _block.at(i); 1410 Node_List* p = my_pack(n); 1411 if (p && n == executed_last(p)) { 1412 uint vlen = p->size(); 1413 uint vlen_in_bytes = 0; 1414 Node* vn = NULL; 1415 Node* low_adr = p->at(0); 1416 Node* first = executed_first(p); 1417 int opc = n->Opcode(); 1418 if (n->is_Load()) { 1419 Node* ctl = n->in(MemNode::Control); 1420 Node* mem = first->in(MemNode::Memory); 1421 SWPointer p1(n->as_Mem(), this); 1422 // Identify the memory dependency for the new loadVector node by 1423 // walking up through memory chain. 1424 // This is done to give flexibility to the new loadVector node so that 1425 // it can move above independent storeVector nodes. 1426 while (mem->is_StoreVector()) { 1427 SWPointer p2(mem->as_Mem(), this); 1428 int cmp = p1.cmp(p2); 1429 if (SWPointer::not_equal(cmp) || !SWPointer::comparable(cmp)) { 1430 mem = mem->in(MemNode::Memory); 1431 } else { 1432 break; // dependent memory 1433 } 1434 } 1435 Node* adr = low_adr->in(MemNode::Address); 1436 const TypePtr* atyp = n->adr_type(); 1437 vn = LoadVectorNode::make(C, opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n), control_dependency(p)); 1438 vlen_in_bytes = vn->as_LoadVector()->memory_size(); 1439 } else if (n->is_Store()) { 1440 // Promote value to be stored to vector 1441 Node* val = vector_opd(p, MemNode::ValueIn); 1442 Node* ctl = n->in(MemNode::Control); 1443 Node* mem = first->in(MemNode::Memory); 1444 Node* adr = low_adr->in(MemNode::Address); 1445 const TypePtr* atyp = n->adr_type(); 1446 vn = StoreVectorNode::make(C, opc, ctl, mem, adr, atyp, val, vlen); 1447 vlen_in_bytes = vn->as_StoreVector()->memory_size(); 1448 } else if (n->req() == 3) { 1449 // Promote operands to vector 1450 Node* in1 = vector_opd(p, 1); 1451 Node* in2 = vector_opd(p, 2); 1452 if (VectorNode::is_invariant_vector(in1) && (n->is_Add() || n->is_Mul())) { 1453 // Move invariant vector input into second position to avoid register spilling. 1454 Node* tmp = in1; 1455 in1 = in2; 1456 in2 = tmp; 1457 } 1458 vn = VectorNode::make(C, opc, in1, in2, vlen, velt_basic_type(n)); 1459 vlen_in_bytes = vn->as_Vector()->length_in_bytes(); 1460 } else { 1461 ShouldNotReachHere(); 1462 } 1463 assert(vn != NULL, "sanity"); 1464 _igvn.register_new_node_with_optimizer(vn); 1465 _phase->set_ctrl(vn, _phase->get_ctrl(p->at(0))); 1466 for (uint j = 0; j < p->size(); j++) { 1467 Node* pm = p->at(j); 1468 _igvn.replace_node(pm, vn); 1469 } 1470 _igvn._worklist.push(vn); 1471 1472 if (vlen_in_bytes > max_vlen_in_bytes) { 1473 max_vlen_in_bytes = vlen_in_bytes; 1474 } 1475 #ifdef ASSERT 1476 if (TraceNewVectors) { 1477 tty->print("new Vector node: "); 1478 vn->dump(); 1479 } 1480 #endif 1481 } 1482 } 1483 C->set_max_vector_size(max_vlen_in_bytes); 1484 } 1485 1486 //------------------------------vector_opd--------------------------- 1487 // Create a vector operand for the nodes in pack p for operand: in(opd_idx) 1488 Node* SuperWord::vector_opd(Node_List* p, int opd_idx) { 1489 Node* p0 = p->at(0); 1490 uint vlen = p->size(); 1491 Node* opd = p0->in(opd_idx); 1492 1493 if (same_inputs(p, opd_idx)) { 1494 if (opd->is_Vector() || opd->is_LoadVector()) { 1495 assert(((opd_idx != 2) || !VectorNode::is_shift(p0)), "shift's count can't be vector"); 1496 return opd; // input is matching vector 1497 } 1498 if ((opd_idx == 2) && VectorNode::is_shift(p0)) { 1499 Compile* C = _phase->C; 1500 Node* cnt = opd; 1501 // Vector instructions do not mask shift count, do it here. 1502 juint mask = (p0->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1); 1503 const TypeInt* t = opd->find_int_type(); 1504 if (t != NULL && t->is_con()) { 1505 juint shift = t->get_con(); 1506 if (shift > mask) { // Unsigned cmp 1507 cnt = ConNode::make(C, TypeInt::make(shift & mask)); 1508 } 1509 } else { 1510 if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) { 1511 cnt = ConNode::make(C, TypeInt::make(mask)); 1512 _igvn.register_new_node_with_optimizer(cnt); 1513 cnt = new (C) AndINode(opd, cnt); 1514 _igvn.register_new_node_with_optimizer(cnt); 1515 _phase->set_ctrl(cnt, _phase->get_ctrl(opd)); 1516 } 1517 assert(opd->bottom_type()->isa_int(), "int type only"); 1518 // Move non constant shift count into vector register. 1519 cnt = VectorNode::shift_count(C, p0, cnt, vlen, velt_basic_type(p0)); 1520 } 1521 if (cnt != opd) { 1522 _igvn.register_new_node_with_optimizer(cnt); 1523 _phase->set_ctrl(cnt, _phase->get_ctrl(opd)); 1524 } 1525 return cnt; 1526 } 1527 assert(!opd->is_StoreVector(), "such vector is not expected here"); 1528 // Convert scalar input to vector with the same number of elements as 1529 // p0's vector. Use p0's type because size of operand's container in 1530 // vector should match p0's size regardless operand's size. 1531 const Type* p0_t = velt_type(p0); 1532 VectorNode* vn = VectorNode::scalar2vector(_phase->C, opd, vlen, p0_t); 1533 1534 _igvn.register_new_node_with_optimizer(vn); 1535 _phase->set_ctrl(vn, _phase->get_ctrl(opd)); 1536 #ifdef ASSERT 1537 if (TraceNewVectors) { 1538 tty->print("new Vector node: "); 1539 vn->dump(); 1540 } 1541 #endif 1542 return vn; 1543 } 1544 1545 // Insert pack operation 1546 BasicType bt = velt_basic_type(p0); 1547 PackNode* pk = PackNode::make(_phase->C, opd, vlen, bt); 1548 DEBUG_ONLY( const BasicType opd_bt = opd->bottom_type()->basic_type(); ) 1549 1550 for (uint i = 1; i < vlen; i++) { 1551 Node* pi = p->at(i); 1552 Node* in = pi->in(opd_idx); 1553 assert(my_pack(in) == NULL, "Should already have been unpacked"); 1554 assert(opd_bt == in->bottom_type()->basic_type(), "all same type"); 1555 pk->add_opd(in); 1556 } 1557 _igvn.register_new_node_with_optimizer(pk); 1558 _phase->set_ctrl(pk, _phase->get_ctrl(opd)); 1559 #ifdef ASSERT 1560 if (TraceNewVectors) { 1561 tty->print("new Vector node: "); 1562 pk->dump(); 1563 } 1564 #endif 1565 return pk; 1566 } 1567 1568 //------------------------------insert_extracts--------------------------- 1569 // If a use of pack p is not a vector use, then replace the 1570 // use with an extract operation. 1571 void SuperWord::insert_extracts(Node_List* p) { 1572 if (p->at(0)->is_Store()) return; 1573 assert(_n_idx_list.is_empty(), "empty (node,index) list"); 1574 1575 // Inspect each use of each pack member. For each use that is 1576 // not a vector use, replace the use with an extract operation. 1577 1578 for (uint i = 0; i < p->size(); i++) { 1579 Node* def = p->at(i); 1580 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 1581 Node* use = def->fast_out(j); 1582 for (uint k = 0; k < use->req(); k++) { 1583 Node* n = use->in(k); 1584 if (def == n) { 1585 if (!is_vector_use(use, k)) { 1586 _n_idx_list.push(use, k); 1587 } 1588 } 1589 } 1590 } 1591 } 1592 1593 while (_n_idx_list.is_nonempty()) { 1594 Node* use = _n_idx_list.node(); 1595 int idx = _n_idx_list.index(); 1596 _n_idx_list.pop(); 1597 Node* def = use->in(idx); 1598 1599 // Insert extract operation 1600 _igvn.hash_delete(def); 1601 int def_pos = alignment(def) / data_size(def); 1602 1603 Node* ex = ExtractNode::make(_phase->C, def, def_pos, velt_basic_type(def)); 1604 _igvn.register_new_node_with_optimizer(ex); 1605 _phase->set_ctrl(ex, _phase->get_ctrl(def)); 1606 _igvn.replace_input_of(use, idx, ex); 1607 _igvn._worklist.push(def); 1608 1609 bb_insert_after(ex, bb_idx(def)); 1610 set_velt_type(ex, velt_type(def)); 1611 } 1612 } 1613 1614 //------------------------------is_vector_use--------------------------- 1615 // Is use->in(u_idx) a vector use? 1616 bool SuperWord::is_vector_use(Node* use, int u_idx) { 1617 Node_List* u_pk = my_pack(use); 1618 if (u_pk == NULL) return false; 1619 Node* def = use->in(u_idx); 1620 Node_List* d_pk = my_pack(def); 1621 if (d_pk == NULL) { 1622 // check for scalar promotion 1623 Node* n = u_pk->at(0)->in(u_idx); 1624 for (uint i = 1; i < u_pk->size(); i++) { 1625 if (u_pk->at(i)->in(u_idx) != n) return false; 1626 } 1627 return true; 1628 } 1629 if (u_pk->size() != d_pk->size()) 1630 return false; 1631 for (uint i = 0; i < u_pk->size(); i++) { 1632 Node* ui = u_pk->at(i); 1633 Node* di = d_pk->at(i); 1634 if (ui->in(u_idx) != di || alignment(ui) != alignment(di)) 1635 return false; 1636 } 1637 return true; 1638 } 1639 1640 //------------------------------construct_bb--------------------------- 1641 // Construct reverse postorder list of block members 1642 bool SuperWord::construct_bb() { 1643 Node* entry = bb(); 1644 1645 assert(_stk.length() == 0, "stk is empty"); 1646 assert(_block.length() == 0, "block is empty"); 1647 assert(_data_entry.length() == 0, "data_entry is empty"); 1648 assert(_mem_slice_head.length() == 0, "mem_slice_head is empty"); 1649 assert(_mem_slice_tail.length() == 0, "mem_slice_tail is empty"); 1650 1651 // Find non-control nodes with no inputs from within block, 1652 // create a temporary map from node _idx to bb_idx for use 1653 // by the visited and post_visited sets, 1654 // and count number of nodes in block. 1655 int bb_ct = 0; 1656 for (uint i = 0; i < lpt()->_body.size(); i++ ) { 1657 Node *n = lpt()->_body.at(i); 1658 set_bb_idx(n, i); // Create a temporary map 1659 if (in_bb(n)) { 1660 if (n->is_LoadStore() || n->is_MergeMem() || 1661 (n->is_Proj() && !n->as_Proj()->is_CFG())) { 1662 // Bailout if the loop has LoadStore, MergeMem or data Proj 1663 // nodes. Superword optimization does not work with them. 1664 return false; 1665 } 1666 bb_ct++; 1667 if (!n->is_CFG()) { 1668 bool found = false; 1669 for (uint j = 0; j < n->req(); j++) { 1670 Node* def = n->in(j); 1671 if (def && in_bb(def)) { 1672 found = true; 1673 break; 1674 } 1675 } 1676 if (!found) { 1677 assert(n != entry, "can't be entry"); 1678 _data_entry.push(n); 1679 } 1680 } 1681 } 1682 } 1683 1684 // Find memory slices (head and tail) 1685 for (DUIterator_Fast imax, i = lp()->fast_outs(imax); i < imax; i++) { 1686 Node *n = lp()->fast_out(i); 1687 if (in_bb(n) && (n->is_Phi() && n->bottom_type() == Type::MEMORY)) { 1688 Node* n_tail = n->in(LoopNode::LoopBackControl); 1689 if (n_tail != n->in(LoopNode::EntryControl)) { 1690 if (!n_tail->is_Mem()) { 1691 assert(n_tail->is_Mem(), err_msg_res("unexpected node for memory slice: %s", n_tail->Name())); 1692 return false; // Bailout 1693 } 1694 _mem_slice_head.push(n); 1695 _mem_slice_tail.push(n_tail); 1696 } 1697 } 1698 } 1699 1700 // Create an RPO list of nodes in block 1701 1702 visited_clear(); 1703 post_visited_clear(); 1704 1705 // Push all non-control nodes with no inputs from within block, then control entry 1706 for (int j = 0; j < _data_entry.length(); j++) { 1707 Node* n = _data_entry.at(j); 1708 visited_set(n); 1709 _stk.push(n); 1710 } 1711 visited_set(entry); 1712 _stk.push(entry); 1713 1714 // Do a depth first walk over out edges 1715 int rpo_idx = bb_ct - 1; 1716 int size; 1717 while ((size = _stk.length()) > 0) { 1718 Node* n = _stk.top(); // Leave node on stack 1719 if (!visited_test_set(n)) { 1720 // forward arc in graph 1721 } else if (!post_visited_test(n)) { 1722 // cross or back arc 1723 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1724 Node *use = n->fast_out(i); 1725 if (in_bb(use) && !visited_test(use) && 1726 // Don't go around backedge 1727 (!use->is_Phi() || n == entry)) { 1728 _stk.push(use); 1729 } 1730 } 1731 if (_stk.length() == size) { 1732 // There were no additional uses, post visit node now 1733 _stk.pop(); // Remove node from stack 1734 assert(rpo_idx >= 0, ""); 1735 _block.at_put_grow(rpo_idx, n); 1736 rpo_idx--; 1737 post_visited_set(n); 1738 assert(rpo_idx >= 0 || _stk.is_empty(), ""); 1739 } 1740 } else { 1741 _stk.pop(); // Remove post-visited node from stack 1742 } 1743 } 1744 1745 // Create real map of block indices for nodes 1746 for (int j = 0; j < _block.length(); j++) { 1747 Node* n = _block.at(j); 1748 set_bb_idx(n, j); 1749 } 1750 1751 initialize_bb(); // Ensure extra info is allocated. 1752 1753 #ifndef PRODUCT 1754 if (TraceSuperWord) { 1755 print_bb(); 1756 tty->print_cr("\ndata entry nodes: %s", _data_entry.length() > 0 ? "" : "NONE"); 1757 for (int m = 0; m < _data_entry.length(); m++) { 1758 tty->print("%3d ", m); 1759 _data_entry.at(m)->dump(); 1760 } 1761 tty->print_cr("\nmemory slices: %s", _mem_slice_head.length() > 0 ? "" : "NONE"); 1762 for (int m = 0; m < _mem_slice_head.length(); m++) { 1763 tty->print("%3d ", m); _mem_slice_head.at(m)->dump(); 1764 tty->print(" "); _mem_slice_tail.at(m)->dump(); 1765 } 1766 } 1767 #endif 1768 assert(rpo_idx == -1 && bb_ct == _block.length(), "all block members found"); 1769 return (_mem_slice_head.length() > 0) || (_data_entry.length() > 0); 1770 } 1771 1772 //------------------------------initialize_bb--------------------------- 1773 // Initialize per node info 1774 void SuperWord::initialize_bb() { 1775 Node* last = _block.at(_block.length() - 1); 1776 grow_node_info(bb_idx(last)); 1777 } 1778 1779 //------------------------------bb_insert_after--------------------------- 1780 // Insert n into block after pos 1781 void SuperWord::bb_insert_after(Node* n, int pos) { 1782 int n_pos = pos + 1; 1783 // Make room 1784 for (int i = _block.length() - 1; i >= n_pos; i--) { 1785 _block.at_put_grow(i+1, _block.at(i)); 1786 } 1787 for (int j = _node_info.length() - 1; j >= n_pos; j--) { 1788 _node_info.at_put_grow(j+1, _node_info.at(j)); 1789 } 1790 // Set value 1791 _block.at_put_grow(n_pos, n); 1792 _node_info.at_put_grow(n_pos, SWNodeInfo::initial); 1793 // Adjust map from node->_idx to _block index 1794 for (int i = n_pos; i < _block.length(); i++) { 1795 set_bb_idx(_block.at(i), i); 1796 } 1797 } 1798 1799 //------------------------------compute_max_depth--------------------------- 1800 // Compute max depth for expressions from beginning of block 1801 // Use to prune search paths during test for independence. 1802 void SuperWord::compute_max_depth() { 1803 int ct = 0; 1804 bool again; 1805 do { 1806 again = false; 1807 for (int i = 0; i < _block.length(); i++) { 1808 Node* n = _block.at(i); 1809 if (!n->is_Phi()) { 1810 int d_orig = depth(n); 1811 int d_in = 0; 1812 for (DepPreds preds(n, _dg); !preds.done(); preds.next()) { 1813 Node* pred = preds.current(); 1814 if (in_bb(pred)) { 1815 d_in = MAX2(d_in, depth(pred)); 1816 } 1817 } 1818 if (d_in + 1 != d_orig) { 1819 set_depth(n, d_in + 1); 1820 again = true; 1821 } 1822 } 1823 } 1824 ct++; 1825 } while (again); 1826 #ifndef PRODUCT 1827 if (TraceSuperWord && Verbose) 1828 tty->print_cr("compute_max_depth iterated: %d times", ct); 1829 #endif 1830 } 1831 1832 //-------------------------compute_vector_element_type----------------------- 1833 // Compute necessary vector element type for expressions 1834 // This propagates backwards a narrower integer type when the 1835 // upper bits of the value are not needed. 1836 // Example: char a,b,c; a = b + c; 1837 // Normally the type of the add is integer, but for packed character 1838 // operations the type of the add needs to be char. 1839 void SuperWord::compute_vector_element_type() { 1840 #ifndef PRODUCT 1841 if (TraceSuperWord && Verbose) 1842 tty->print_cr("\ncompute_velt_type:"); 1843 #endif 1844 1845 // Initial type 1846 for (int i = 0; i < _block.length(); i++) { 1847 Node* n = _block.at(i); 1848 set_velt_type(n, container_type(n)); 1849 } 1850 1851 // Propagate integer narrowed type backwards through operations 1852 // that don't depend on higher order bits 1853 for (int i = _block.length() - 1; i >= 0; i--) { 1854 Node* n = _block.at(i); 1855 // Only integer types need be examined 1856 const Type* vtn = velt_type(n); 1857 if (vtn->basic_type() == T_INT) { 1858 uint start, end; 1859 VectorNode::vector_operands(n, &start, &end); 1860 1861 for (uint j = start; j < end; j++) { 1862 Node* in = n->in(j); 1863 // Don't propagate through a memory 1864 if (!in->is_Mem() && in_bb(in) && velt_type(in)->basic_type() == T_INT && 1865 data_size(n) < data_size(in)) { 1866 bool same_type = true; 1867 for (DUIterator_Fast kmax, k = in->fast_outs(kmax); k < kmax; k++) { 1868 Node *use = in->fast_out(k); 1869 if (!in_bb(use) || !same_velt_type(use, n)) { 1870 same_type = false; 1871 break; 1872 } 1873 } 1874 if (same_type) { 1875 // For right shifts of small integer types (bool, byte, char, short) 1876 // we need precise information about sign-ness. Only Load nodes have 1877 // this information because Store nodes are the same for signed and 1878 // unsigned values. And any arithmetic operation after a load may 1879 // expand a value to signed Int so such right shifts can't be used 1880 // because vector elements do not have upper bits of Int. 1881 const Type* vt = vtn; 1882 if (VectorNode::is_shift(in)) { 1883 Node* load = in->in(1); 1884 if (load->is_Load() && in_bb(load) && (velt_type(load)->basic_type() == T_INT)) { 1885 vt = velt_type(load); 1886 } else if (in->Opcode() != Op_LShiftI) { 1887 // Widen type to Int to avoid creation of right shift vector 1888 // (align + data_size(s1) check in stmts_can_pack() will fail). 1889 // Note, left shifts work regardless type. 1890 vt = TypeInt::INT; 1891 } 1892 } 1893 set_velt_type(in, vt); 1894 } 1895 } 1896 } 1897 } 1898 } 1899 #ifndef PRODUCT 1900 if (TraceSuperWord && Verbose) { 1901 for (int i = 0; i < _block.length(); i++) { 1902 Node* n = _block.at(i); 1903 velt_type(n)->dump(); 1904 tty->print("\t"); 1905 n->dump(); 1906 } 1907 } 1908 #endif 1909 } 1910 1911 //------------------------------memory_alignment--------------------------- 1912 // Alignment within a vector memory reference 1913 int SuperWord::memory_alignment(MemNode* s, int iv_adjust) { 1914 SWPointer p(s, this); 1915 if (!p.valid()) { 1916 return bottom_align; 1917 } 1918 int vw = vector_width_in_bytes(s); 1919 if (vw < 2) { 1920 return bottom_align; // No vectors for this type 1921 } 1922 int offset = p.offset_in_bytes(); 1923 offset += iv_adjust*p.memory_size(); 1924 int off_rem = offset % vw; 1925 int off_mod = off_rem >= 0 ? off_rem : off_rem + vw; 1926 return off_mod; 1927 } 1928 1929 //---------------------------container_type--------------------------- 1930 // Smallest type containing range of values 1931 const Type* SuperWord::container_type(Node* n) { 1932 if (n->is_Mem()) { 1933 BasicType bt = n->as_Mem()->memory_type(); 1934 if (n->is_Store() && (bt == T_CHAR)) { 1935 // Use T_SHORT type instead of T_CHAR for stored values because any 1936 // preceding arithmetic operation extends values to signed Int. 1937 bt = T_SHORT; 1938 } 1939 if (n->Opcode() == Op_LoadUB) { 1940 // Adjust type for unsigned byte loads, it is important for right shifts. 1941 // T_BOOLEAN is used because there is no basic type representing type 1942 // TypeInt::UBYTE. Use of T_BOOLEAN for vectors is fine because only 1943 // size (one byte) and sign is important. 1944 bt = T_BOOLEAN; 1945 } 1946 return Type::get_const_basic_type(bt); 1947 } 1948 const Type* t = _igvn.type(n); 1949 if (t->basic_type() == T_INT) { 1950 // A narrow type of arithmetic operations will be determined by 1951 // propagating the type of memory operations. 1952 return TypeInt::INT; 1953 } 1954 return t; 1955 } 1956 1957 bool SuperWord::same_velt_type(Node* n1, Node* n2) { 1958 const Type* vt1 = velt_type(n1); 1959 const Type* vt2 = velt_type(n2); 1960 if (vt1->basic_type() == T_INT && vt2->basic_type() == T_INT) { 1961 // Compare vectors element sizes for integer types. 1962 return data_size(n1) == data_size(n2); 1963 } 1964 return vt1 == vt2; 1965 } 1966 1967 //------------------------------in_packset--------------------------- 1968 // Are s1 and s2 in a pack pair and ordered as s1,s2? 1969 bool SuperWord::in_packset(Node* s1, Node* s2) { 1970 for (int i = 0; i < _packset.length(); i++) { 1971 Node_List* p = _packset.at(i); 1972 assert(p->size() == 2, "must be"); 1973 if (p->at(0) == s1 && p->at(p->size()-1) == s2) { 1974 return true; 1975 } 1976 } 1977 return false; 1978 } 1979 1980 //------------------------------in_pack--------------------------- 1981 // Is s in pack p? 1982 Node_List* SuperWord::in_pack(Node* s, Node_List* p) { 1983 for (uint i = 0; i < p->size(); i++) { 1984 if (p->at(i) == s) { 1985 return p; 1986 } 1987 } 1988 return NULL; 1989 } 1990 1991 //------------------------------remove_pack_at--------------------------- 1992 // Remove the pack at position pos in the packset 1993 void SuperWord::remove_pack_at(int pos) { 1994 Node_List* p = _packset.at(pos); 1995 for (uint i = 0; i < p->size(); i++) { 1996 Node* s = p->at(i); 1997 set_my_pack(s, NULL); 1998 } 1999 _packset.remove_at(pos); 2000 } 2001 2002 //------------------------------executed_first--------------------------- 2003 // Return the node executed first in pack p. Uses the RPO block list 2004 // to determine order. 2005 Node* SuperWord::executed_first(Node_List* p) { 2006 Node* n = p->at(0); 2007 int n_rpo = bb_idx(n); 2008 for (uint i = 1; i < p->size(); i++) { 2009 Node* s = p->at(i); 2010 int s_rpo = bb_idx(s); 2011 if (s_rpo < n_rpo) { 2012 n = s; 2013 n_rpo = s_rpo; 2014 } 2015 } 2016 return n; 2017 } 2018 2019 //------------------------------executed_last--------------------------- 2020 // Return the node executed last in pack p. 2021 Node* SuperWord::executed_last(Node_List* p) { 2022 Node* n = p->at(0); 2023 int n_rpo = bb_idx(n); 2024 for (uint i = 1; i < p->size(); i++) { 2025 Node* s = p->at(i); 2026 int s_rpo = bb_idx(s); 2027 if (s_rpo > n_rpo) { 2028 n = s; 2029 n_rpo = s_rpo; 2030 } 2031 } 2032 return n; 2033 } 2034 2035 LoadNode::ControlDependency SuperWord::control_dependency(Node_List* p) { 2036 LoadNode::ControlDependency dep = LoadNode::DependsOnlyOnTest; 2037 for (uint i = 0; i < p->size(); i++) { 2038 Node* n = p->at(i); 2039 assert(n->is_Load(), "only meaningful for loads"); 2040 if (!n->depends_only_on_test()) { 2041 dep = LoadNode::Pinned; 2042 } 2043 } 2044 return dep; 2045 } 2046 2047 2048 //----------------------------align_initial_loop_index--------------------------- 2049 // Adjust pre-loop limit so that in main loop, a load/store reference 2050 // to align_to_ref will be a position zero in the vector. 2051 // (iv + k) mod vector_align == 0 2052 void SuperWord::align_initial_loop_index(MemNode* align_to_ref) { 2053 CountedLoopNode *main_head = lp()->as_CountedLoop(); 2054 assert(main_head->is_main_loop(), ""); 2055 CountedLoopEndNode* pre_end = get_pre_loop_end(main_head); 2056 assert(pre_end != NULL, "we must have a correct pre-loop"); 2057 Node *pre_opaq1 = pre_end->limit(); 2058 assert(pre_opaq1->Opcode() == Op_Opaque1, ""); 2059 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; 2060 Node *lim0 = pre_opaq->in(1); 2061 2062 // Where we put new limit calculations 2063 Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl); 2064 2065 // Ensure the original loop limit is available from the 2066 // pre-loop Opaque1 node. 2067 Node *orig_limit = pre_opaq->original_loop_limit(); 2068 assert(orig_limit != NULL && _igvn.type(orig_limit) != Type::TOP, ""); 2069 2070 SWPointer align_to_ref_p(align_to_ref, this); 2071 assert(align_to_ref_p.valid(), "sanity"); 2072 2073 // Given: 2074 // lim0 == original pre loop limit 2075 // V == v_align (power of 2) 2076 // invar == extra invariant piece of the address expression 2077 // e == offset [ +/- invar ] 2078 // 2079 // When reassociating expressions involving '%' the basic rules are: 2080 // (a - b) % k == 0 => a % k == b % k 2081 // and: 2082 // (a + b) % k == 0 => a % k == (k - b) % k 2083 // 2084 // For stride > 0 && scale > 0, 2085 // Derive the new pre-loop limit "lim" such that the two constraints: 2086 // (1) lim = lim0 + N (where N is some positive integer < V) 2087 // (2) (e + lim) % V == 0 2088 // are true. 2089 // 2090 // Substituting (1) into (2), 2091 // (e + lim0 + N) % V == 0 2092 // solve for N: 2093 // N = (V - (e + lim0)) % V 2094 // substitute back into (1), so that new limit 2095 // lim = lim0 + (V - (e + lim0)) % V 2096 // 2097 // For stride > 0 && scale < 0 2098 // Constraints: 2099 // lim = lim0 + N 2100 // (e - lim) % V == 0 2101 // Solving for lim: 2102 // (e - lim0 - N) % V == 0 2103 // N = (e - lim0) % V 2104 // lim = lim0 + (e - lim0) % V 2105 // 2106 // For stride < 0 && scale > 0 2107 // Constraints: 2108 // lim = lim0 - N 2109 // (e + lim) % V == 0 2110 // Solving for lim: 2111 // (e + lim0 - N) % V == 0 2112 // N = (e + lim0) % V 2113 // lim = lim0 - (e + lim0) % V 2114 // 2115 // For stride < 0 && scale < 0 2116 // Constraints: 2117 // lim = lim0 - N 2118 // (e - lim) % V == 0 2119 // Solving for lim: 2120 // (e - lim0 + N) % V == 0 2121 // N = (V - (e - lim0)) % V 2122 // lim = lim0 - (V - (e - lim0)) % V 2123 2124 int vw = vector_width_in_bytes(align_to_ref); 2125 int stride = iv_stride(); 2126 int scale = align_to_ref_p.scale_in_bytes(); 2127 int elt_size = align_to_ref_p.memory_size(); 2128 int v_align = vw / elt_size; 2129 assert(v_align > 1, "sanity"); 2130 int offset = align_to_ref_p.offset_in_bytes() / elt_size; 2131 Node *offsn = _igvn.intcon(offset); 2132 2133 Node *e = offsn; 2134 if (align_to_ref_p.invar() != NULL) { 2135 // incorporate any extra invariant piece producing (offset +/- invar) >>> log2(elt) 2136 Node* log2_elt = _igvn.intcon(exact_log2(elt_size)); 2137 Node* aref = new (_phase->C) URShiftINode(align_to_ref_p.invar(), log2_elt); 2138 _igvn.register_new_node_with_optimizer(aref); 2139 _phase->set_ctrl(aref, pre_ctrl); 2140 if (align_to_ref_p.negate_invar()) { 2141 e = new (_phase->C) SubINode(e, aref); 2142 } else { 2143 e = new (_phase->C) AddINode(e, aref); 2144 } 2145 _igvn.register_new_node_with_optimizer(e); 2146 _phase->set_ctrl(e, pre_ctrl); 2147 } 2148 if (vw > ObjectAlignmentInBytes) { 2149 // incorporate base e +/- base && Mask >>> log2(elt) 2150 Node* xbase = new(_phase->C) CastP2XNode(NULL, align_to_ref_p.base()); 2151 _igvn.register_new_node_with_optimizer(xbase); 2152 #ifdef _LP64 2153 xbase = new (_phase->C) ConvL2INode(xbase); 2154 _igvn.register_new_node_with_optimizer(xbase); 2155 #endif 2156 Node* mask = _igvn.intcon(vw-1); 2157 Node* masked_xbase = new (_phase->C) AndINode(xbase, mask); 2158 _igvn.register_new_node_with_optimizer(masked_xbase); 2159 Node* log2_elt = _igvn.intcon(exact_log2(elt_size)); 2160 Node* bref = new (_phase->C) URShiftINode(masked_xbase, log2_elt); 2161 _igvn.register_new_node_with_optimizer(bref); 2162 _phase->set_ctrl(bref, pre_ctrl); 2163 e = new (_phase->C) AddINode(e, bref); 2164 _igvn.register_new_node_with_optimizer(e); 2165 _phase->set_ctrl(e, pre_ctrl); 2166 } 2167 2168 // compute e +/- lim0 2169 if (scale < 0) { 2170 e = new (_phase->C) SubINode(e, lim0); 2171 } else { 2172 e = new (_phase->C) AddINode(e, lim0); 2173 } 2174 _igvn.register_new_node_with_optimizer(e); 2175 _phase->set_ctrl(e, pre_ctrl); 2176 2177 if (stride * scale > 0) { 2178 // compute V - (e +/- lim0) 2179 Node* va = _igvn.intcon(v_align); 2180 e = new (_phase->C) SubINode(va, e); 2181 _igvn.register_new_node_with_optimizer(e); 2182 _phase->set_ctrl(e, pre_ctrl); 2183 } 2184 // compute N = (exp) % V 2185 Node* va_msk = _igvn.intcon(v_align - 1); 2186 Node* N = new (_phase->C) AndINode(e, va_msk); 2187 _igvn.register_new_node_with_optimizer(N); 2188 _phase->set_ctrl(N, pre_ctrl); 2189 2190 // substitute back into (1), so that new limit 2191 // lim = lim0 + N 2192 Node* lim; 2193 if (stride < 0) { 2194 lim = new (_phase->C) SubINode(lim0, N); 2195 } else { 2196 lim = new (_phase->C) AddINode(lim0, N); 2197 } 2198 _igvn.register_new_node_with_optimizer(lim); 2199 _phase->set_ctrl(lim, pre_ctrl); 2200 Node* constrained = 2201 (stride > 0) ? (Node*) new (_phase->C) MinINode(lim, orig_limit) 2202 : (Node*) new (_phase->C) MaxINode(lim, orig_limit); 2203 _igvn.register_new_node_with_optimizer(constrained); 2204 _phase->set_ctrl(constrained, pre_ctrl); 2205 _igvn.hash_delete(pre_opaq); 2206 pre_opaq->set_req(1, constrained); 2207 } 2208 2209 //----------------------------get_pre_loop_end--------------------------- 2210 // Find pre loop end from main loop. Returns null if none. 2211 CountedLoopEndNode* SuperWord::get_pre_loop_end(CountedLoopNode* cl) { 2212 // The loop cannot be optimized if the graph shape at 2213 // the loop entry is inappropriate. 2214 if (!PhaseIdealLoop::is_canonical_main_loop_entry(cl)) { 2215 return NULL; 2216 } 2217 2218 Node* p_f = cl->in(LoopNode::EntryControl)->in(0)->in(0); 2219 if (!p_f->is_IfFalse()) return NULL; 2220 if (!p_f->in(0)->is_CountedLoopEnd()) return NULL; 2221 CountedLoopEndNode* pre_end = p_f->in(0)->as_CountedLoopEnd(); 2222 CountedLoopNode* loop_node = pre_end->loopnode(); 2223 if (loop_node == NULL || !loop_node->is_pre_loop()) return NULL; 2224 return pre_end; 2225 } 2226 2227 2228 //------------------------------init--------------------------- 2229 void SuperWord::init() { 2230 _dg.init(); 2231 _packset.clear(); 2232 _disjoint_ptrs.clear(); 2233 _block.clear(); 2234 _data_entry.clear(); 2235 _mem_slice_head.clear(); 2236 _mem_slice_tail.clear(); 2237 _node_info.clear(); 2238 _align_to_ref = NULL; 2239 _lpt = NULL; 2240 _lp = NULL; 2241 _bb = NULL; 2242 _iv = NULL; 2243 } 2244 2245 //------------------------------print_packset--------------------------- 2246 void SuperWord::print_packset() { 2247 #ifndef PRODUCT 2248 tty->print_cr("packset"); 2249 for (int i = 0; i < _packset.length(); i++) { 2250 tty->print_cr("Pack: %d", i); 2251 Node_List* p = _packset.at(i); 2252 print_pack(p); 2253 } 2254 #endif 2255 } 2256 2257 //------------------------------print_pack--------------------------- 2258 void SuperWord::print_pack(Node_List* p) { 2259 for (uint i = 0; i < p->size(); i++) { 2260 print_stmt(p->at(i)); 2261 } 2262 } 2263 2264 //------------------------------print_bb--------------------------- 2265 void SuperWord::print_bb() { 2266 #ifndef PRODUCT 2267 tty->print_cr("\nBlock"); 2268 for (int i = 0; i < _block.length(); i++) { 2269 Node* n = _block.at(i); 2270 tty->print("%d ", i); 2271 if (n) { 2272 n->dump(); 2273 } 2274 } 2275 #endif 2276 } 2277 2278 //------------------------------print_stmt--------------------------- 2279 void SuperWord::print_stmt(Node* s) { 2280 #ifndef PRODUCT 2281 tty->print(" align: %d \t", alignment(s)); 2282 s->dump(); 2283 #endif 2284 } 2285 2286 //------------------------------blank--------------------------- 2287 char* SuperWord::blank(uint depth) { 2288 static char blanks[101]; 2289 assert(depth < 101, "too deep"); 2290 for (uint i = 0; i < depth; i++) blanks[i] = ' '; 2291 blanks[depth] = '\0'; 2292 return blanks; 2293 } 2294 2295 2296 //==============================SWPointer=========================== 2297 2298 //----------------------------SWPointer------------------------ 2299 SWPointer::SWPointer(MemNode* mem, SuperWord* slp) : 2300 _mem(mem), _slp(slp), _base(NULL), _adr(NULL), 2301 _scale(0), _offset(0), _invar(NULL), _negate_invar(false) { 2302 2303 Node* adr = mem->in(MemNode::Address); 2304 if (!adr->is_AddP()) { 2305 assert(!valid(), "too complex"); 2306 return; 2307 } 2308 // Match AddP(base, AddP(ptr, k*iv [+ invariant]), constant) 2309 Node* base = adr->in(AddPNode::Base); 2310 // The base address should be loop invariant 2311 if (!invariant(base)) { 2312 assert(!valid(), "base address is loop variant"); 2313 return; 2314 } 2315 //unsafe reference could not be aligned appropriately without runtime checking 2316 if (base == NULL || base->bottom_type() == Type::TOP) { 2317 assert(!valid(), "unsafe access"); 2318 return; 2319 } 2320 for (int i = 0; i < 3; i++) { 2321 if (!scaled_iv_plus_offset(adr->in(AddPNode::Offset))) { 2322 assert(!valid(), "too complex"); 2323 return; 2324 } 2325 adr = adr->in(AddPNode::Address); 2326 if (base == adr || !adr->is_AddP()) { 2327 break; // stop looking at addp's 2328 } 2329 } 2330 _base = base; 2331 _adr = adr; 2332 assert(valid(), "Usable"); 2333 } 2334 2335 // Following is used to create a temporary object during 2336 // the pattern match of an address expression. 2337 SWPointer::SWPointer(SWPointer* p) : 2338 _mem(p->_mem), _slp(p->_slp), _base(NULL), _adr(NULL), 2339 _scale(0), _offset(0), _invar(NULL), _negate_invar(false) {} 2340 2341 //------------------------scaled_iv_plus_offset-------------------- 2342 // Match: k*iv + offset 2343 // where: k is a constant that maybe zero, and 2344 // offset is (k2 [+/- invariant]) where k2 maybe zero and invariant is optional 2345 bool SWPointer::scaled_iv_plus_offset(Node* n) { 2346 if (scaled_iv(n)) { 2347 return true; 2348 } 2349 if (offset_plus_k(n)) { 2350 return true; 2351 } 2352 int opc = n->Opcode(); 2353 if (opc == Op_AddI) { 2354 if (scaled_iv(n->in(1)) && offset_plus_k(n->in(2))) { 2355 return true; 2356 } 2357 if (scaled_iv(n->in(2)) && offset_plus_k(n->in(1))) { 2358 return true; 2359 } 2360 } else if (opc == Op_SubI) { 2361 if (scaled_iv(n->in(1)) && offset_plus_k(n->in(2), true)) { 2362 return true; 2363 } 2364 if (scaled_iv(n->in(2)) && offset_plus_k(n->in(1))) { 2365 _scale *= -1; 2366 return true; 2367 } 2368 } 2369 return false; 2370 } 2371 2372 //----------------------------scaled_iv------------------------ 2373 // Match: k*iv where k is a constant that's not zero 2374 bool SWPointer::scaled_iv(Node* n) { 2375 if (_scale != 0) { 2376 return false; // already found a scale 2377 } 2378 if (n == iv()) { 2379 _scale = 1; 2380 return true; 2381 } 2382 int opc = n->Opcode(); 2383 if (opc == Op_MulI) { 2384 if (n->in(1) == iv() && n->in(2)->is_Con()) { 2385 _scale = n->in(2)->get_int(); 2386 return true; 2387 } else if (n->in(2) == iv() && n->in(1)->is_Con()) { 2388 _scale = n->in(1)->get_int(); 2389 return true; 2390 } 2391 } else if (opc == Op_LShiftI) { 2392 if (n->in(1) == iv() && n->in(2)->is_Con()) { 2393 _scale = 1 << n->in(2)->get_int(); 2394 return true; 2395 } 2396 } else if (opc == Op_ConvI2L) { 2397 if (n->in(1)->Opcode() == Op_CastII && 2398 n->in(1)->as_CastII()->has_range_check()) { 2399 // Skip range check dependent CastII nodes 2400 n = n->in(1); 2401 } 2402 if (scaled_iv_plus_offset(n->in(1))) { 2403 return true; 2404 } 2405 } else if (opc == Op_LShiftL) { 2406 if (!has_iv() && _invar == NULL) { 2407 // Need to preserve the current _offset value, so 2408 // create a temporary object for this expression subtree. 2409 // Hacky, so should re-engineer the address pattern match. 2410 SWPointer tmp(this); 2411 if (tmp.scaled_iv_plus_offset(n->in(1))) { 2412 if (tmp._invar == NULL) { 2413 int mult = 1 << n->in(2)->get_int(); 2414 _scale = tmp._scale * mult; 2415 _offset += tmp._offset * mult; 2416 return true; 2417 } 2418 } 2419 } 2420 } 2421 return false; 2422 } 2423 2424 //----------------------------offset_plus_k------------------------ 2425 // Match: offset is (k [+/- invariant]) 2426 // where k maybe zero and invariant is optional, but not both. 2427 bool SWPointer::offset_plus_k(Node* n, bool negate) { 2428 int opc = n->Opcode(); 2429 if (opc == Op_ConI) { 2430 _offset += negate ? -(n->get_int()) : n->get_int(); 2431 return true; 2432 } else if (opc == Op_ConL) { 2433 // Okay if value fits into an int 2434 const TypeLong* t = n->find_long_type(); 2435 if (t->higher_equal(TypeLong::INT)) { 2436 jlong loff = n->get_long(); 2437 jint off = (jint)loff; 2438 _offset += negate ? -off : loff; 2439 return true; 2440 } 2441 return false; 2442 } 2443 if (_invar != NULL) return false; // already have an invariant 2444 if (opc == Op_AddI) { 2445 if (n->in(2)->is_Con() && invariant(n->in(1))) { 2446 _negate_invar = negate; 2447 _invar = n->in(1); 2448 _offset += negate ? -(n->in(2)->get_int()) : n->in(2)->get_int(); 2449 return true; 2450 } else if (n->in(1)->is_Con() && invariant(n->in(2))) { 2451 _offset += negate ? -(n->in(1)->get_int()) : n->in(1)->get_int(); 2452 _negate_invar = negate; 2453 _invar = n->in(2); 2454 return true; 2455 } 2456 } 2457 if (opc == Op_SubI) { 2458 if (n->in(2)->is_Con() && invariant(n->in(1))) { 2459 _negate_invar = negate; 2460 _invar = n->in(1); 2461 _offset += !negate ? -(n->in(2)->get_int()) : n->in(2)->get_int(); 2462 return true; 2463 } else if (n->in(1)->is_Con() && invariant(n->in(2))) { 2464 _offset += negate ? -(n->in(1)->get_int()) : n->in(1)->get_int(); 2465 _negate_invar = !negate; 2466 _invar = n->in(2); 2467 return true; 2468 } 2469 } 2470 if (invariant(n)) { 2471 _negate_invar = negate; 2472 _invar = n; 2473 return true; 2474 } 2475 return false; 2476 } 2477 2478 //----------------------------print------------------------ 2479 void SWPointer::print() { 2480 #ifndef PRODUCT 2481 tty->print("base: %d adr: %d scale: %d offset: %d invar: %c%d\n", 2482 _base != NULL ? _base->_idx : 0, 2483 _adr != NULL ? _adr->_idx : 0, 2484 _scale, _offset, 2485 _negate_invar?'-':'+', 2486 _invar != NULL ? _invar->_idx : 0); 2487 #endif 2488 } 2489 2490 // ========================= OrderedPair ===================== 2491 2492 const OrderedPair OrderedPair::initial; 2493 2494 // ========================= SWNodeInfo ===================== 2495 2496 const SWNodeInfo SWNodeInfo::initial; 2497 2498 2499 // ============================ DepGraph =========================== 2500 2501 //------------------------------make_node--------------------------- 2502 // Make a new dependence graph node for an ideal node. 2503 DepMem* DepGraph::make_node(Node* node) { 2504 DepMem* m = new (_arena) DepMem(node); 2505 if (node != NULL) { 2506 assert(_map.at_grow(node->_idx) == NULL, "one init only"); 2507 _map.at_put_grow(node->_idx, m); 2508 } 2509 return m; 2510 } 2511 2512 //------------------------------make_edge--------------------------- 2513 // Make a new dependence graph edge from dpred -> dsucc 2514 DepEdge* DepGraph::make_edge(DepMem* dpred, DepMem* dsucc) { 2515 DepEdge* e = new (_arena) DepEdge(dpred, dsucc, dsucc->in_head(), dpred->out_head()); 2516 dpred->set_out_head(e); 2517 dsucc->set_in_head(e); 2518 return e; 2519 } 2520 2521 // ========================== DepMem ======================== 2522 2523 //------------------------------in_cnt--------------------------- 2524 int DepMem::in_cnt() { 2525 int ct = 0; 2526 for (DepEdge* e = _in_head; e != NULL; e = e->next_in()) ct++; 2527 return ct; 2528 } 2529 2530 //------------------------------out_cnt--------------------------- 2531 int DepMem::out_cnt() { 2532 int ct = 0; 2533 for (DepEdge* e = _out_head; e != NULL; e = e->next_out()) ct++; 2534 return ct; 2535 } 2536 2537 //------------------------------print----------------------------- 2538 void DepMem::print() { 2539 #ifndef PRODUCT 2540 tty->print(" DepNode %d (", _node->_idx); 2541 for (DepEdge* p = _in_head; p != NULL; p = p->next_in()) { 2542 Node* pred = p->pred()->node(); 2543 tty->print(" %d", pred != NULL ? pred->_idx : 0); 2544 } 2545 tty->print(") ["); 2546 for (DepEdge* s = _out_head; s != NULL; s = s->next_out()) { 2547 Node* succ = s->succ()->node(); 2548 tty->print(" %d", succ != NULL ? succ->_idx : 0); 2549 } 2550 tty->print_cr(" ]"); 2551 #endif 2552 } 2553 2554 // =========================== DepEdge ========================= 2555 2556 //------------------------------DepPreds--------------------------- 2557 void DepEdge::print() { 2558 #ifndef PRODUCT 2559 tty->print_cr("DepEdge: %d [ %d ]", _pred->node()->_idx, _succ->node()->_idx); 2560 #endif 2561 } 2562 2563 // =========================== DepPreds ========================= 2564 // Iterator over predecessor edges in the dependence graph. 2565 2566 //------------------------------DepPreds--------------------------- 2567 DepPreds::DepPreds(Node* n, DepGraph& dg) { 2568 _n = n; 2569 _done = false; 2570 if (_n->is_Store() || _n->is_Load()) { 2571 _next_idx = MemNode::Address; 2572 _end_idx = n->req(); 2573 _dep_next = dg.dep(_n)->in_head(); 2574 } else if (_n->is_Mem()) { 2575 _next_idx = 0; 2576 _end_idx = 0; 2577 _dep_next = dg.dep(_n)->in_head(); 2578 } else { 2579 _next_idx = 1; 2580 _end_idx = _n->req(); 2581 _dep_next = NULL; 2582 } 2583 next(); 2584 } 2585 2586 //------------------------------next--------------------------- 2587 void DepPreds::next() { 2588 if (_dep_next != NULL) { 2589 _current = _dep_next->pred()->node(); 2590 _dep_next = _dep_next->next_in(); 2591 } else if (_next_idx < _end_idx) { 2592 _current = _n->in(_next_idx++); 2593 } else { 2594 _done = true; 2595 } 2596 } 2597 2598 // =========================== DepSuccs ========================= 2599 // Iterator over successor edges in the dependence graph. 2600 2601 //------------------------------DepSuccs--------------------------- 2602 DepSuccs::DepSuccs(Node* n, DepGraph& dg) { 2603 _n = n; 2604 _done = false; 2605 if (_n->is_Load()) { 2606 _next_idx = 0; 2607 _end_idx = _n->outcnt(); 2608 _dep_next = dg.dep(_n)->out_head(); 2609 } else if (_n->is_Mem() || _n->is_Phi() && _n->bottom_type() == Type::MEMORY) { 2610 _next_idx = 0; 2611 _end_idx = 0; 2612 _dep_next = dg.dep(_n)->out_head(); 2613 } else { 2614 _next_idx = 0; 2615 _end_idx = _n->outcnt(); 2616 _dep_next = NULL; 2617 } 2618 next(); 2619 } 2620 2621 //-------------------------------next--------------------------- 2622 void DepSuccs::next() { 2623 if (_dep_next != NULL) { 2624 _current = _dep_next->succ()->node(); 2625 _dep_next = _dep_next->next_out(); 2626 } else if (_next_idx < _end_idx) { 2627 _current = _n->raw_out(_next_idx++); 2628 } else { 2629 _done = true; 2630 } 2631 }