1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/cfgnode.hpp" 29 #include "opto/connode.hpp" 30 #include "opto/loopnode.hpp" 31 #include "opto/machnode.hpp" 32 #include "opto/matcher.hpp" 33 #include "opto/node.hpp" 34 #include "opto/opcodes.hpp" 35 #include "opto/regmask.hpp" 36 #include "opto/type.hpp" 37 #include "utilities/copy.hpp" 38 #if INCLUDE_ALL_GCS 39 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" 40 #endif 41 42 class RegMask; 43 // #include "phase.hpp" 44 class PhaseTransform; 45 class PhaseGVN; 46 47 // Arena we are currently building Nodes in 48 const uint Node::NotAMachineReg = 0xffff0000; 49 50 #ifndef PRODUCT 51 extern int nodes_created; 52 #endif 53 54 #ifdef ASSERT 55 56 //-------------------------- construct_node------------------------------------ 57 // Set a breakpoint here to identify where a particular node index is built. 58 void Node::verify_construction() { 59 _debug_orig = NULL; 60 int old_debug_idx = Compile::debug_idx(); 61 int new_debug_idx = old_debug_idx+1; 62 if (new_debug_idx > 0) { 63 // Arrange that the lowest five decimal digits of _debug_idx 64 // will repeat those of _idx. In case this is somehow pathological, 65 // we continue to assign negative numbers (!) consecutively. 66 const int mod = 100000; 67 int bump = (int)(_idx - new_debug_idx) % mod; 68 if (bump < 0) bump += mod; 69 assert(bump >= 0 && bump < mod, ""); 70 new_debug_idx += bump; 71 } 72 Compile::set_debug_idx(new_debug_idx); 73 set_debug_idx( new_debug_idx ); 74 assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX"); 75 assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit"); 76 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) { 77 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx); 78 BREAKPOINT; 79 } 80 #if OPTO_DU_ITERATOR_ASSERT 81 _last_del = NULL; 82 _del_tick = 0; 83 #endif 84 _hash_lock = 0; 85 } 86 87 88 // #ifdef ASSERT ... 89 90 #if OPTO_DU_ITERATOR_ASSERT 91 void DUIterator_Common::sample(const Node* node) { 92 _vdui = VerifyDUIterators; 93 _node = node; 94 _outcnt = node->_outcnt; 95 _del_tick = node->_del_tick; 96 _last = NULL; 97 } 98 99 void DUIterator_Common::verify(const Node* node, bool at_end_ok) { 100 assert(_node == node, "consistent iterator source"); 101 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed"); 102 } 103 104 void DUIterator_Common::verify_resync() { 105 // Ensure that the loop body has just deleted the last guy produced. 106 const Node* node = _node; 107 // Ensure that at least one copy of the last-seen edge was deleted. 108 // Note: It is OK to delete multiple copies of the last-seen edge. 109 // Unfortunately, we have no way to verify that all the deletions delete 110 // that same edge. On this point we must use the Honor System. 111 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge"); 112 assert(node->_last_del == _last, "must have deleted the edge just produced"); 113 // We liked this deletion, so accept the resulting outcnt and tick. 114 _outcnt = node->_outcnt; 115 _del_tick = node->_del_tick; 116 } 117 118 void DUIterator_Common::reset(const DUIterator_Common& that) { 119 if (this == &that) return; // ignore assignment to self 120 if (!_vdui) { 121 // We need to initialize everything, overwriting garbage values. 122 _last = that._last; 123 _vdui = that._vdui; 124 } 125 // Note: It is legal (though odd) for an iterator over some node x 126 // to be reassigned to iterate over another node y. Some doubly-nested 127 // progress loops depend on being able to do this. 128 const Node* node = that._node; 129 // Re-initialize everything, except _last. 130 _node = node; 131 _outcnt = node->_outcnt; 132 _del_tick = node->_del_tick; 133 } 134 135 void DUIterator::sample(const Node* node) { 136 DUIterator_Common::sample(node); // Initialize the assertion data. 137 _refresh_tick = 0; // No refreshes have happened, as yet. 138 } 139 140 void DUIterator::verify(const Node* node, bool at_end_ok) { 141 DUIterator_Common::verify(node, at_end_ok); 142 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range"); 143 } 144 145 void DUIterator::verify_increment() { 146 if (_refresh_tick & 1) { 147 // We have refreshed the index during this loop. 148 // Fix up _idx to meet asserts. 149 if (_idx > _outcnt) _idx = _outcnt; 150 } 151 verify(_node, true); 152 } 153 154 void DUIterator::verify_resync() { 155 // Note: We do not assert on _outcnt, because insertions are OK here. 156 DUIterator_Common::verify_resync(); 157 // Make sure we are still in sync, possibly with no more out-edges: 158 verify(_node, true); 159 } 160 161 void DUIterator::reset(const DUIterator& that) { 162 if (this == &that) return; // self assignment is always a no-op 163 assert(that._refresh_tick == 0, "assign only the result of Node::outs()"); 164 assert(that._idx == 0, "assign only the result of Node::outs()"); 165 assert(_idx == that._idx, "already assigned _idx"); 166 if (!_vdui) { 167 // We need to initialize everything, overwriting garbage values. 168 sample(that._node); 169 } else { 170 DUIterator_Common::reset(that); 171 if (_refresh_tick & 1) { 172 _refresh_tick++; // Clear the "was refreshed" flag. 173 } 174 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly"); 175 } 176 } 177 178 void DUIterator::refresh() { 179 DUIterator_Common::sample(_node); // Re-fetch assertion data. 180 _refresh_tick |= 1; // Set the "was refreshed" flag. 181 } 182 183 void DUIterator::verify_finish() { 184 // If the loop has killed the node, do not require it to re-run. 185 if (_node->_outcnt == 0) _refresh_tick &= ~1; 186 // If this assert triggers, it means that a loop used refresh_out_pos 187 // to re-synch an iteration index, but the loop did not correctly 188 // re-run itself, using a "while (progress)" construct. 189 // This iterator enforces the rule that you must keep trying the loop 190 // until it "runs clean" without any need for refreshing. 191 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing"); 192 } 193 194 195 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) { 196 DUIterator_Common::verify(node, at_end_ok); 197 Node** out = node->_out; 198 uint cnt = node->_outcnt; 199 assert(cnt == _outcnt, "no insertions allowed"); 200 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range"); 201 // This last check is carefully designed to work for NO_OUT_ARRAY. 202 } 203 204 void DUIterator_Fast::verify_limit() { 205 const Node* node = _node; 206 verify(node, true); 207 assert(_outp == node->_out + node->_outcnt, "limit still correct"); 208 } 209 210 void DUIterator_Fast::verify_resync() { 211 const Node* node = _node; 212 if (_outp == node->_out + _outcnt) { 213 // Note that the limit imax, not the pointer i, gets updated with the 214 // exact count of deletions. (For the pointer it's always "--i".) 215 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)"); 216 // This is a limit pointer, with a name like "imax". 217 // Fudge the _last field so that the common assert will be happy. 218 _last = (Node*) node->_last_del; 219 DUIterator_Common::verify_resync(); 220 } else { 221 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)"); 222 // A normal internal pointer. 223 DUIterator_Common::verify_resync(); 224 // Make sure we are still in sync, possibly with no more out-edges: 225 verify(node, true); 226 } 227 } 228 229 void DUIterator_Fast::verify_relimit(uint n) { 230 const Node* node = _node; 231 assert((int)n > 0, "use imax -= n only with a positive count"); 232 // This must be a limit pointer, with a name like "imax". 233 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)"); 234 // The reported number of deletions must match what the node saw. 235 assert(node->_del_tick == _del_tick + n, "must have deleted n edges"); 236 // Fudge the _last field so that the common assert will be happy. 237 _last = (Node*) node->_last_del; 238 DUIterator_Common::verify_resync(); 239 } 240 241 void DUIterator_Fast::reset(const DUIterator_Fast& that) { 242 assert(_outp == that._outp, "already assigned _outp"); 243 DUIterator_Common::reset(that); 244 } 245 246 void DUIterator_Last::verify(const Node* node, bool at_end_ok) { 247 // at_end_ok means the _outp is allowed to underflow by 1 248 _outp += at_end_ok; 249 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc. 250 _outp -= at_end_ok; 251 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes"); 252 } 253 254 void DUIterator_Last::verify_limit() { 255 // Do not require the limit address to be resynched. 256 //verify(node, true); 257 assert(_outp == _node->_out, "limit still correct"); 258 } 259 260 void DUIterator_Last::verify_step(uint num_edges) { 261 assert((int)num_edges > 0, "need non-zero edge count for loop progress"); 262 _outcnt -= num_edges; 263 _del_tick += num_edges; 264 // Make sure we are still in sync, possibly with no more out-edges: 265 const Node* node = _node; 266 verify(node, true); 267 assert(node->_last_del == _last, "must have deleted the edge just produced"); 268 } 269 270 #endif //OPTO_DU_ITERATOR_ASSERT 271 272 273 #endif //ASSERT 274 275 276 // This constant used to initialize _out may be any non-null value. 277 // The value NULL is reserved for the top node only. 278 #define NO_OUT_ARRAY ((Node**)-1) 279 280 // This funny expression handshakes with Node::operator new 281 // to pull Compile::current out of the new node's _out field, 282 // and then calls a subroutine which manages most field 283 // initializations. The only one which is tricky is the 284 // _idx field, which is const, and so must be initialized 285 // by a return value, not an assignment. 286 // 287 // (Aren't you thankful that Java finals don't require so many tricks?) 288 #define IDX_INIT(req) this->Init((req), (Compile*) this->_out) 289 #ifdef _MSC_VER // the IDX_INIT hack falls foul of warning C4355 290 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 291 #endif 292 #ifdef __clang__ 293 #pragma clang diagnostic push 294 #pragma GCC diagnostic ignored "-Wuninitialized" 295 #endif 296 297 // Out-of-line code from node constructors. 298 // Executed only when extra debug info. is being passed around. 299 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) { 300 C->set_node_notes_at(idx, nn); 301 } 302 303 // Shared initialization code. 304 inline int Node::Init(int req, Compile* C) { 305 assert(Compile::current() == C, "must use operator new(Compile*)"); 306 int idx = C->next_unique(); 307 308 // Allocate memory for the necessary number of edges. 309 if (req > 0) { 310 // Allocate space for _in array to have double alignment. 311 _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*)))); 312 #ifdef ASSERT 313 _in[req-1] = this; // magic cookie for assertion check 314 #endif 315 } 316 // If there are default notes floating around, capture them: 317 Node_Notes* nn = C->default_node_notes(); 318 if (nn != NULL) init_node_notes(C, idx, nn); 319 320 // Note: At this point, C is dead, 321 // and we begin to initialize the new Node. 322 323 _cnt = _max = req; 324 _outcnt = _outmax = 0; 325 _class_id = Class_Node; 326 _flags = 0; 327 _out = NO_OUT_ARRAY; 328 return idx; 329 } 330 331 //------------------------------Node------------------------------------------- 332 // Create a Node, with a given number of required edges. 333 Node::Node(uint req) 334 : _idx(IDX_INIT(req)) 335 #ifdef ASSERT 336 , _parse_idx(_idx) 337 #endif 338 { 339 assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" ); 340 debug_only( verify_construction() ); 341 NOT_PRODUCT(nodes_created++); 342 if (req == 0) { 343 assert( _in == (Node**)this, "Must not pass arg count to 'new'" ); 344 _in = NULL; 345 } else { 346 assert( _in[req-1] == this, "Must pass arg count to 'new'" ); 347 Node** to = _in; 348 for(uint i = 0; i < req; i++) { 349 to[i] = NULL; 350 } 351 } 352 } 353 354 //------------------------------Node------------------------------------------- 355 Node::Node(Node *n0) 356 : _idx(IDX_INIT(1)) 357 #ifdef ASSERT 358 , _parse_idx(_idx) 359 #endif 360 { 361 debug_only( verify_construction() ); 362 NOT_PRODUCT(nodes_created++); 363 // Assert we allocated space for input array already 364 assert( _in[0] == this, "Must pass arg count to 'new'" ); 365 assert( is_not_dead(n0), "can not use dead node"); 366 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 367 } 368 369 //------------------------------Node------------------------------------------- 370 Node::Node(Node *n0, Node *n1) 371 : _idx(IDX_INIT(2)) 372 #ifdef ASSERT 373 , _parse_idx(_idx) 374 #endif 375 { 376 debug_only( verify_construction() ); 377 NOT_PRODUCT(nodes_created++); 378 // Assert we allocated space for input array already 379 assert( _in[1] == this, "Must pass arg count to 'new'" ); 380 assert( is_not_dead(n0), "can not use dead node"); 381 assert( is_not_dead(n1), "can not use dead node"); 382 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 383 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 384 } 385 386 //------------------------------Node------------------------------------------- 387 Node::Node(Node *n0, Node *n1, Node *n2) 388 : _idx(IDX_INIT(3)) 389 #ifdef ASSERT 390 , _parse_idx(_idx) 391 #endif 392 { 393 debug_only( verify_construction() ); 394 NOT_PRODUCT(nodes_created++); 395 // Assert we allocated space for input array already 396 assert( _in[2] == this, "Must pass arg count to 'new'" ); 397 assert( is_not_dead(n0), "can not use dead node"); 398 assert( is_not_dead(n1), "can not use dead node"); 399 assert( is_not_dead(n2), "can not use dead node"); 400 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 401 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 402 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 403 } 404 405 //------------------------------Node------------------------------------------- 406 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3) 407 : _idx(IDX_INIT(4)) 408 #ifdef ASSERT 409 , _parse_idx(_idx) 410 #endif 411 { 412 debug_only( verify_construction() ); 413 NOT_PRODUCT(nodes_created++); 414 // Assert we allocated space for input array already 415 assert( _in[3] == this, "Must pass arg count to 'new'" ); 416 assert( is_not_dead(n0), "can not use dead node"); 417 assert( is_not_dead(n1), "can not use dead node"); 418 assert( is_not_dead(n2), "can not use dead node"); 419 assert( is_not_dead(n3), "can not use dead node"); 420 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 421 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 422 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 423 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 424 } 425 426 //------------------------------Node------------------------------------------- 427 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4) 428 : _idx(IDX_INIT(5)) 429 #ifdef ASSERT 430 , _parse_idx(_idx) 431 #endif 432 { 433 debug_only( verify_construction() ); 434 NOT_PRODUCT(nodes_created++); 435 // Assert we allocated space for input array already 436 assert( _in[4] == this, "Must pass arg count to 'new'" ); 437 assert( is_not_dead(n0), "can not use dead node"); 438 assert( is_not_dead(n1), "can not use dead node"); 439 assert( is_not_dead(n2), "can not use dead node"); 440 assert( is_not_dead(n3), "can not use dead node"); 441 assert( is_not_dead(n4), "can not use dead node"); 442 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 443 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 444 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 445 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 446 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 447 } 448 449 //------------------------------Node------------------------------------------- 450 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 451 Node *n4, Node *n5) 452 : _idx(IDX_INIT(6)) 453 #ifdef ASSERT 454 , _parse_idx(_idx) 455 #endif 456 { 457 debug_only( verify_construction() ); 458 NOT_PRODUCT(nodes_created++); 459 // Assert we allocated space for input array already 460 assert( _in[5] == this, "Must pass arg count to 'new'" ); 461 assert( is_not_dead(n0), "can not use dead node"); 462 assert( is_not_dead(n1), "can not use dead node"); 463 assert( is_not_dead(n2), "can not use dead node"); 464 assert( is_not_dead(n3), "can not use dead node"); 465 assert( is_not_dead(n4), "can not use dead node"); 466 assert( is_not_dead(n5), "can not use dead node"); 467 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 468 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 469 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 470 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 471 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 472 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 473 } 474 475 //------------------------------Node------------------------------------------- 476 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 477 Node *n4, Node *n5, Node *n6) 478 : _idx(IDX_INIT(7)) 479 #ifdef ASSERT 480 , _parse_idx(_idx) 481 #endif 482 { 483 debug_only( verify_construction() ); 484 NOT_PRODUCT(nodes_created++); 485 // Assert we allocated space for input array already 486 assert( _in[6] == this, "Must pass arg count to 'new'" ); 487 assert( is_not_dead(n0), "can not use dead node"); 488 assert( is_not_dead(n1), "can not use dead node"); 489 assert( is_not_dead(n2), "can not use dead node"); 490 assert( is_not_dead(n3), "can not use dead node"); 491 assert( is_not_dead(n4), "can not use dead node"); 492 assert( is_not_dead(n5), "can not use dead node"); 493 assert( is_not_dead(n6), "can not use dead node"); 494 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 495 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 496 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 497 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 498 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 499 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 500 _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this); 501 } 502 503 #ifdef __clang__ 504 #pragma clang diagnostic pop 505 #endif 506 507 508 //------------------------------clone------------------------------------------ 509 // Clone a Node. 510 Node *Node::clone() const { 511 Compile* C = Compile::current(); 512 uint s = size_of(); // Size of inherited Node 513 Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*)); 514 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s); 515 // Set the new input pointer array 516 n->_in = (Node**)(((char*)n)+s); 517 // Cannot share the old output pointer array, so kill it 518 n->_out = NO_OUT_ARRAY; 519 // And reset the counters to 0 520 n->_outcnt = 0; 521 n->_outmax = 0; 522 // Unlock this guy, since he is not in any hash table. 523 debug_only(n->_hash_lock = 0); 524 // Walk the old node's input list to duplicate its edges 525 uint i; 526 for( i = 0; i < len(); i++ ) { 527 Node *x = in(i); 528 n->_in[i] = x; 529 if (x != NULL) x->add_out(n); 530 } 531 if (is_macro()) 532 C->add_macro_node(n); 533 if (is_expensive()) 534 C->add_expensive_node(n); 535 536 if (Opcode() == Op_ShenandoahLoadReferenceBarrier) { 537 C->add_shenandoah_barrier(reinterpret_cast<ShenandoahLoadReferenceBarrierNode*>(n)); 538 } 539 // If the cloned node is a range check dependent CastII, add it to the list. 540 CastIINode* cast = n->isa_CastII(); 541 if (cast != NULL && cast->has_range_check()) { 542 C->add_range_check_cast(cast); 543 } 544 545 n->set_idx(C->next_unique()); // Get new unique index as well 546 debug_only( n->verify_construction() ); 547 NOT_PRODUCT(nodes_created++); 548 // Do not patch over the debug_idx of a clone, because it makes it 549 // impossible to break on the clone's moment of creation. 550 //debug_only( n->set_debug_idx( debug_idx() ) ); 551 552 C->copy_node_notes_to(n, (Node*) this); 553 554 // MachNode clone 555 uint nopnds; 556 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) { 557 MachNode *mach = n->as_Mach(); 558 MachNode *mthis = this->as_Mach(); 559 // Get address of _opnd_array. 560 // It should be the same offset since it is the clone of this node. 561 MachOper **from = mthis->_opnds; 562 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) + 563 pointer_delta((const void*)from, 564 (const void*)(&mthis->_opnds), 1)); 565 mach->_opnds = to; 566 for ( uint i = 0; i < nopnds; ++i ) { 567 to[i] = from[i]->clone(C); 568 } 569 } 570 // cloning CallNode may need to clone JVMState 571 if (n->is_Call()) { 572 n->as_Call()->clone_jvms(C); 573 } 574 if (n->is_SafePoint()) { 575 n->as_SafePoint()->clone_replaced_nodes(); 576 } 577 return n; // Return the clone 578 } 579 580 //---------------------------setup_is_top-------------------------------------- 581 // Call this when changing the top node, to reassert the invariants 582 // required by Node::is_top. See Compile::set_cached_top_node. 583 void Node::setup_is_top() { 584 if (this == (Node*)Compile::current()->top()) { 585 // This node has just become top. Kill its out array. 586 _outcnt = _outmax = 0; 587 _out = NULL; // marker value for top 588 assert(is_top(), "must be top"); 589 } else { 590 if (_out == NULL) _out = NO_OUT_ARRAY; 591 assert(!is_top(), "must not be top"); 592 } 593 } 594 595 596 //------------------------------~Node------------------------------------------ 597 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 598 extern int reclaim_idx ; 599 extern int reclaim_in ; 600 extern int reclaim_node; 601 void Node::destruct() { 602 // Eagerly reclaim unique Node numberings 603 Compile* compile = Compile::current(); 604 if ((uint)_idx+1 == compile->unique()) { 605 compile->set_unique(compile->unique()-1); 606 #ifdef ASSERT 607 reclaim_idx++; 608 #endif 609 } 610 // Clear debug info: 611 Node_Notes* nn = compile->node_notes_at(_idx); 612 if (nn != NULL) nn->clear(); 613 // Walk the input array, freeing the corresponding output edges 614 _cnt = _max; // forget req/prec distinction 615 uint i; 616 for( i = 0; i < _max; i++ ) { 617 set_req(i, NULL); 618 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim"); 619 } 620 assert(outcnt() == 0, "deleting a node must not leave a dangling use"); 621 // See if the input array was allocated just prior to the object 622 int edge_size = _max*sizeof(void*); 623 int out_edge_size = _outmax*sizeof(void*); 624 char *edge_end = ((char*)_in) + edge_size; 625 char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out); 626 char *out_edge_end = out_array + out_edge_size; 627 int node_size = size_of(); 628 629 // Free the output edge array 630 if (out_edge_size > 0) { 631 #ifdef ASSERT 632 if( out_edge_end == compile->node_arena()->hwm() ) 633 reclaim_in += out_edge_size; // count reclaimed out edges with in edges 634 #endif 635 compile->node_arena()->Afree(out_array, out_edge_size); 636 } 637 638 // Free the input edge array and the node itself 639 if( edge_end == (char*)this ) { 640 #ifdef ASSERT 641 if( edge_end+node_size == compile->node_arena()->hwm() ) { 642 reclaim_in += edge_size; 643 reclaim_node+= node_size; 644 } 645 #else 646 // It was; free the input array and object all in one hit 647 compile->node_arena()->Afree(_in,edge_size+node_size); 648 #endif 649 } else { 650 651 // Free just the input array 652 #ifdef ASSERT 653 if( edge_end == compile->node_arena()->hwm() ) 654 reclaim_in += edge_size; 655 #endif 656 compile->node_arena()->Afree(_in,edge_size); 657 658 // Free just the object 659 #ifdef ASSERT 660 if( ((char*)this) + node_size == compile->node_arena()->hwm() ) 661 reclaim_node+= node_size; 662 #else 663 compile->node_arena()->Afree(this,node_size); 664 #endif 665 } 666 if (is_macro()) { 667 compile->remove_macro_node(this); 668 } 669 if (is_expensive()) { 670 compile->remove_expensive_node(this); 671 } 672 if (Opcode() == Op_ShenandoahLoadReferenceBarrier) { 673 compile->remove_shenandoah_barrier(reinterpret_cast<ShenandoahLoadReferenceBarrierNode*>(this)); 674 } 675 CastIINode* cast = isa_CastII(); 676 if (cast != NULL && cast->has_range_check()) { 677 compile->remove_range_check_cast(cast); 678 } 679 680 if (is_SafePoint()) { 681 as_SafePoint()->delete_replaced_nodes(); 682 } 683 #ifdef ASSERT 684 // We will not actually delete the storage, but we'll make the node unusable. 685 *(address*)this = badAddress; // smash the C++ vtbl, probably 686 _in = _out = (Node**) badAddress; 687 _max = _cnt = _outmax = _outcnt = 0; 688 #endif 689 } 690 691 //------------------------------grow------------------------------------------- 692 // Grow the input array, making space for more edges 693 void Node::grow( uint len ) { 694 Arena* arena = Compile::current()->node_arena(); 695 uint new_max = _max; 696 if( new_max == 0 ) { 697 _max = 4; 698 _in = (Node**)arena->Amalloc(4*sizeof(Node*)); 699 Node** to = _in; 700 to[0] = NULL; 701 to[1] = NULL; 702 to[2] = NULL; 703 to[3] = NULL; 704 return; 705 } 706 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 707 // Trimming to limit allows a uint8 to handle up to 255 edges. 708 // Previously I was using only powers-of-2 which peaked at 128 edges. 709 //if( new_max >= limit ) new_max = limit-1; 710 _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*)); 711 Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space 712 _max = new_max; // Record new max length 713 // This assertion makes sure that Node::_max is wide enough to 714 // represent the numerical value of new_max. 715 assert(_max == new_max && _max > len, "int width of _max is too small"); 716 } 717 718 //-----------------------------out_grow---------------------------------------- 719 // Grow the input array, making space for more edges 720 void Node::out_grow( uint len ) { 721 assert(!is_top(), "cannot grow a top node's out array"); 722 Arena* arena = Compile::current()->node_arena(); 723 uint new_max = _outmax; 724 if( new_max == 0 ) { 725 _outmax = 4; 726 _out = (Node **)arena->Amalloc(4*sizeof(Node*)); 727 return; 728 } 729 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 730 // Trimming to limit allows a uint8 to handle up to 255 edges. 731 // Previously I was using only powers-of-2 which peaked at 128 edges. 732 //if( new_max >= limit ) new_max = limit-1; 733 assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value"); 734 _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*)); 735 //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space 736 _outmax = new_max; // Record new max length 737 // This assertion makes sure that Node::_max is wide enough to 738 // represent the numerical value of new_max. 739 assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small"); 740 } 741 742 #ifdef ASSERT 743 //------------------------------is_dead---------------------------------------- 744 bool Node::is_dead() const { 745 // Mach and pinch point nodes may look like dead. 746 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) ) 747 return false; 748 for( uint i = 0; i < _max; i++ ) 749 if( _in[i] != NULL ) 750 return false; 751 dump(); 752 return true; 753 } 754 #endif 755 756 757 //------------------------------is_unreachable--------------------------------- 758 bool Node::is_unreachable(PhaseIterGVN &igvn) const { 759 assert(!is_Mach(), "doesn't work with MachNodes"); 760 return outcnt() == 0 || igvn.type(this) == Type::TOP || in(0)->is_top(); 761 } 762 763 //------------------------------add_req---------------------------------------- 764 // Add a new required input at the end 765 void Node::add_req( Node *n ) { 766 assert( is_not_dead(n), "can not use dead node"); 767 768 // Look to see if I can move precedence down one without reallocating 769 if( (_cnt >= _max) || (in(_max-1) != NULL) ) 770 grow( _max+1 ); 771 772 // Find a precedence edge to move 773 if( in(_cnt) != NULL ) { // Next precedence edge is busy? 774 uint i; 775 for( i=_cnt; i<_max; i++ ) 776 if( in(i) == NULL ) // Find the NULL at end of prec edge list 777 break; // There must be one, since we grew the array 778 _in[i] = in(_cnt); // Move prec over, making space for req edge 779 } 780 _in[_cnt++] = n; // Stuff over old prec edge 781 if (n != NULL) n->add_out((Node *)this); 782 } 783 784 //---------------------------add_req_batch------------------------------------- 785 // Add a new required input at the end 786 void Node::add_req_batch( Node *n, uint m ) { 787 assert( is_not_dead(n), "can not use dead node"); 788 // check various edge cases 789 if ((int)m <= 1) { 790 assert((int)m >= 0, "oob"); 791 if (m != 0) add_req(n); 792 return; 793 } 794 795 // Look to see if I can move precedence down one without reallocating 796 if( (_cnt+m) > _max || _in[_max-m] ) 797 grow( _max+m ); 798 799 // Find a precedence edge to move 800 if( _in[_cnt] != NULL ) { // Next precedence edge is busy? 801 uint i; 802 for( i=_cnt; i<_max; i++ ) 803 if( _in[i] == NULL ) // Find the NULL at end of prec edge list 804 break; // There must be one, since we grew the array 805 // Slide all the precs over by m positions (assume #prec << m). 806 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*))); 807 } 808 809 // Stuff over the old prec edges 810 for(uint i=0; i<m; i++ ) { 811 _in[_cnt++] = n; 812 } 813 814 // Insert multiple out edges on the node. 815 if (n != NULL && !n->is_top()) { 816 for(uint i=0; i<m; i++ ) { 817 n->add_out((Node *)this); 818 } 819 } 820 } 821 822 //------------------------------del_req---------------------------------------- 823 // Delete the required edge and compact the edge array 824 void Node::del_req( uint idx ) { 825 assert( idx < _cnt, "oob"); 826 assert( !VerifyHashTableKeys || _hash_lock == 0, 827 "remove node from hash table before modifying it"); 828 // First remove corresponding def-use edge 829 Node *n = in(idx); 830 if (n != NULL) n->del_out((Node *)this); 831 _in[idx] = in(--_cnt); // Compact the array 832 // Avoid spec violation: Gap in prec edges. 833 close_prec_gap_at(_cnt); 834 } 835 836 //------------------------------del_req_ordered-------------------------------- 837 // Delete the required edge and compact the edge array with preserved order 838 void Node::del_req_ordered( uint idx ) { 839 assert( idx < _cnt, "oob"); 840 assert( !VerifyHashTableKeys || _hash_lock == 0, 841 "remove node from hash table before modifying it"); 842 // First remove corresponding def-use edge 843 Node *n = in(idx); 844 if (n != NULL) n->del_out((Node *)this); 845 if (idx < --_cnt) { // Not last edge ? 846 Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx)*sizeof(Node*))); 847 } 848 // Avoid spec violation: Gap in prec edges. 849 close_prec_gap_at(_cnt); 850 } 851 852 //------------------------------ins_req---------------------------------------- 853 // Insert a new required input at the end 854 void Node::ins_req( uint idx, Node *n ) { 855 assert( is_not_dead(n), "can not use dead node"); 856 add_req(NULL); // Make space 857 assert( idx < _max, "Must have allocated enough space"); 858 // Slide over 859 if(_cnt-idx-1 > 0) { 860 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*))); 861 } 862 _in[idx] = n; // Stuff over old required edge 863 if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge 864 } 865 866 //-----------------------------find_edge--------------------------------------- 867 int Node::find_edge(Node* n) { 868 for (uint i = 0; i < len(); i++) { 869 if (_in[i] == n) return i; 870 } 871 return -1; 872 } 873 874 //----------------------------replace_edge------------------------------------- 875 int Node::replace_edge(Node* old, Node* neww) { 876 if (old == neww) return 0; // nothing to do 877 uint nrep = 0; 878 for (uint i = 0; i < len(); i++) { 879 if (in(i) == old) { 880 if (i < req()) { 881 set_req(i, neww); 882 } else { 883 assert(find_prec_edge(neww) == -1, err_msg("spec violation: duplicated prec edge (node %d -> %d)", _idx, neww->_idx)); 884 set_prec(i, neww); 885 } 886 nrep++; 887 } 888 } 889 return nrep; 890 } 891 892 /** 893 * Replace input edges in the range pointing to 'old' node. 894 */ 895 int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) { 896 if (old == neww) return 0; // nothing to do 897 uint nrep = 0; 898 for (int i = start; i < end; i++) { 899 if (in(i) == old) { 900 set_req(i, neww); 901 nrep++; 902 } 903 } 904 return nrep; 905 } 906 907 //-------------------------disconnect_inputs----------------------------------- 908 // NULL out all inputs to eliminate incoming Def-Use edges. 909 // Return the number of edges between 'n' and 'this' 910 int Node::disconnect_inputs(Node *n, Compile* C) { 911 int edges_to_n = 0; 912 913 uint cnt = req(); 914 for( uint i = 0; i < cnt; ++i ) { 915 if( in(i) == 0 ) continue; 916 if( in(i) == n ) ++edges_to_n; 917 set_req(i, NULL); 918 } 919 // Remove precedence edges if any exist 920 // Note: Safepoints may have precedence edges, even during parsing 921 if( (req() != len()) && (in(req()) != NULL) ) { 922 uint max = len(); 923 for( uint i = 0; i < max; ++i ) { 924 if( in(i) == 0 ) continue; 925 if( in(i) == n ) ++edges_to_n; 926 set_prec(i, NULL); 927 } 928 } 929 930 // Node::destruct requires all out edges be deleted first 931 // debug_only(destruct();) // no reuse benefit expected 932 if (edges_to_n == 0) { 933 C->record_dead_node(_idx); 934 } 935 return edges_to_n; 936 } 937 938 //-----------------------------uncast--------------------------------------- 939 // %%% Temporary, until we sort out CheckCastPP vs. CastPP. 940 // Strip away casting. (It is depth-limited.) 941 Node* Node::uncast() const { 942 // Should be inline: 943 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this; 944 if (is_ConstraintCast() || is_CheckCastPP()) 945 return uncast_helper(this); 946 else 947 return (Node*) this; 948 } 949 950 // Find out of current node that matches opcode. 951 Node* Node::find_out_with(int opcode) { 952 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 953 Node* use = fast_out(i); 954 if (use->Opcode() == opcode) { 955 return use; 956 } 957 } 958 return NULL; 959 } 960 961 //---------------------------uncast_helper------------------------------------- 962 Node* Node::uncast_helper(const Node* p) { 963 #ifdef ASSERT 964 uint depth_count = 0; 965 const Node* orig_p = p; 966 #endif 967 968 while (true) { 969 #ifdef ASSERT 970 if (depth_count >= K) { 971 orig_p->dump(4); 972 if (p != orig_p) 973 p->dump(1); 974 } 975 assert(depth_count++ < K, "infinite loop in Node::uncast_helper"); 976 #endif 977 if (p == NULL || p->req() != 2) { 978 break; 979 } else if (p->is_ConstraintCast()) { 980 p = p->in(1); 981 } else if (p->is_CheckCastPP()) { 982 p = p->in(1); 983 } else { 984 break; 985 } 986 } 987 return (Node*) p; 988 } 989 990 // Return true if the current node has an out that matches opcode. 991 bool Node::has_out_with(int opcode) { 992 return (find_out_with(opcode) != NULL); 993 } 994 995 //------------------------------add_prec--------------------------------------- 996 // Add a new precedence input. Precedence inputs are unordered, with 997 // duplicates removed and NULLs packed down at the end. 998 void Node::add_prec( Node *n ) { 999 assert( is_not_dead(n), "can not use dead node"); 1000 1001 // Check for NULL at end 1002 if( _cnt >= _max || in(_max-1) ) 1003 grow( _max+1 ); 1004 1005 // Find a precedence edge to move 1006 uint i = _cnt; 1007 while( in(i) != NULL ) { 1008 if (in(i) == n) return; // Avoid spec violation: duplicated prec edge. 1009 i++; 1010 } 1011 _in[i] = n; // Stuff prec edge over NULL 1012 if ( n != NULL) n->add_out((Node *)this); // Add mirror edge 1013 1014 #ifdef ASSERT 1015 while ((++i)<_max) { assert(_in[i] == NULL, err_msg("spec violation: Gap in prec edges (node %d)", _idx)); } 1016 #endif 1017 } 1018 1019 //------------------------------rm_prec---------------------------------------- 1020 // Remove a precedence input. Precedence inputs are unordered, with 1021 // duplicates removed and NULLs packed down at the end. 1022 void Node::rm_prec( uint j ) { 1023 assert(j < _max, err_msg("oob: i=%d, _max=%d", j, _max)); 1024 assert(j >= _cnt, "not a precedence edge"); 1025 if (_in[j] == NULL) return; // Avoid spec violation: Gap in prec edges. 1026 _in[j]->del_out((Node *)this); 1027 close_prec_gap_at(j); 1028 } 1029 1030 //------------------------------size_of---------------------------------------- 1031 uint Node::size_of() const { return sizeof(*this); } 1032 1033 //------------------------------ideal_reg-------------------------------------- 1034 uint Node::ideal_reg() const { return 0; } 1035 1036 //------------------------------jvms------------------------------------------- 1037 JVMState* Node::jvms() const { return NULL; } 1038 1039 #ifdef ASSERT 1040 //------------------------------jvms------------------------------------------- 1041 bool Node::verify_jvms(const JVMState* using_jvms) const { 1042 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { 1043 if (jvms == using_jvms) return true; 1044 } 1045 return false; 1046 } 1047 1048 //------------------------------init_NodeProperty------------------------------ 1049 void Node::init_NodeProperty() { 1050 assert(_max_classes <= max_jushort, "too many NodeProperty classes"); 1051 assert(_max_flags <= max_jushort, "too many NodeProperty flags"); 1052 } 1053 #endif 1054 1055 //------------------------------format----------------------------------------- 1056 // Print as assembly 1057 void Node::format( PhaseRegAlloc *, outputStream *st ) const {} 1058 //------------------------------emit------------------------------------------- 1059 // Emit bytes starting at parameter 'ptr'. 1060 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {} 1061 //------------------------------size------------------------------------------- 1062 // Size of instruction in bytes 1063 uint Node::size(PhaseRegAlloc *ra_) const { return 0; } 1064 1065 //------------------------------CFG Construction------------------------------- 1066 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root, 1067 // Goto and Return. 1068 const Node *Node::is_block_proj() const { return 0; } 1069 1070 // Minimum guaranteed type 1071 const Type *Node::bottom_type() const { return Type::BOTTOM; } 1072 1073 1074 //------------------------------raise_bottom_type------------------------------ 1075 // Get the worst-case Type output for this Node. 1076 void Node::raise_bottom_type(const Type* new_type) { 1077 if (is_Type()) { 1078 TypeNode *n = this->as_Type(); 1079 if (VerifyAliases) { 1080 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type"); 1081 } 1082 n->set_type(new_type); 1083 } else if (is_Load()) { 1084 LoadNode *n = this->as_Load(); 1085 if (VerifyAliases) { 1086 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type"); 1087 } 1088 n->set_type(new_type); 1089 } 1090 } 1091 1092 //------------------------------Identity--------------------------------------- 1093 // Return a node that the given node is equivalent to. 1094 Node *Node::Identity( PhaseTransform * ) { 1095 return this; // Default to no identities 1096 } 1097 1098 //------------------------------Value------------------------------------------ 1099 // Compute a new Type for a node using the Type of the inputs. 1100 const Type *Node::Value( PhaseTransform * ) const { 1101 return bottom_type(); // Default to worst-case Type 1102 } 1103 1104 //------------------------------Ideal------------------------------------------ 1105 // 1106 // 'Idealize' the graph rooted at this Node. 1107 // 1108 // In order to be efficient and flexible there are some subtle invariants 1109 // these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks 1110 // these invariants, although its too slow to have on by default. If you are 1111 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN! 1112 // 1113 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this' 1114 // pointer. If ANY change is made, it must return the root of the reshaped 1115 // graph - even if the root is the same Node. Example: swapping the inputs 1116 // to an AddINode gives the same answer and same root, but you still have to 1117 // return the 'this' pointer instead of NULL. 1118 // 1119 // You cannot return an OLD Node, except for the 'this' pointer. Use the 1120 // Identity call to return an old Node; basically if Identity can find 1121 // another Node have the Ideal call make no change and return NULL. 1122 // Example: AddINode::Ideal must check for add of zero; in this case it 1123 // returns NULL instead of doing any graph reshaping. 1124 // 1125 // You cannot modify any old Nodes except for the 'this' pointer. Due to 1126 // sharing there may be other users of the old Nodes relying on their current 1127 // semantics. Modifying them will break the other users. 1128 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for 1129 // "X+3" unchanged in case it is shared. 1130 // 1131 // If you modify the 'this' pointer's inputs, you should use 1132 // 'set_req'. If you are making a new Node (either as the new root or 1133 // some new internal piece) you may use 'init_req' to set the initial 1134 // value. You can make a new Node with either 'new' or 'clone'. In 1135 // either case, def-use info is correctly maintained. 1136 // 1137 // Example: reshape "(X+3)+4" into "X+7": 1138 // set_req(1, in(1)->in(1)); 1139 // set_req(2, phase->intcon(7)); 1140 // return this; 1141 // Example: reshape "X*4" into "X<<2" 1142 // return new (C) LShiftINode(in(1), phase->intcon(2)); 1143 // 1144 // You must call 'phase->transform(X)' on any new Nodes X you make, except 1145 // for the returned root node. Example: reshape "X*31" with "(X<<5)-X". 1146 // Node *shift=phase->transform(new(C)LShiftINode(in(1),phase->intcon(5))); 1147 // return new (C) AddINode(shift, in(1)); 1148 // 1149 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'. 1150 // These forms are faster than 'phase->transform(new (C) ConNode())' and Do 1151 // The Right Thing with def-use info. 1152 // 1153 // You cannot bury the 'this' Node inside of a graph reshape. If the reshaped 1154 // graph uses the 'this' Node it must be the root. If you want a Node with 1155 // the same Opcode as the 'this' pointer use 'clone'. 1156 // 1157 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) { 1158 return NULL; // Default to being Ideal already 1159 } 1160 1161 // Some nodes have specific Ideal subgraph transformations only if they are 1162 // unique users of specific nodes. Such nodes should be put on IGVN worklist 1163 // for the transformations to happen. 1164 bool Node::has_special_unique_user() const { 1165 assert(outcnt() == 1, "match only for unique out"); 1166 Node* n = unique_out(); 1167 int op = Opcode(); 1168 if( this->is_Store() ) { 1169 // Condition for back-to-back stores folding. 1170 return n->Opcode() == op && n->in(MemNode::Memory) == this; 1171 } else if (this->is_Load() || this->is_DecodeN()) { 1172 // Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input 1173 return n->Opcode() == Op_MemBarAcquire; 1174 } else if( op == Op_AddL ) { 1175 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y)) 1176 return n->Opcode() == Op_ConvL2I && n->in(1) == this; 1177 } else if( op == Op_SubI || op == Op_SubL ) { 1178 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y) 1179 return n->Opcode() == op && n->in(2) == this; 1180 } 1181 return false; 1182 }; 1183 1184 //--------------------------find_exact_control--------------------------------- 1185 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 1186 Node* Node::find_exact_control(Node* ctrl) { 1187 if (ctrl == NULL && this->is_Region()) 1188 ctrl = this->as_Region()->is_copy(); 1189 1190 if (ctrl != NULL && ctrl->is_CatchProj()) { 1191 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index) 1192 ctrl = ctrl->in(0); 1193 if (ctrl != NULL && !ctrl->is_top()) 1194 ctrl = ctrl->in(0); 1195 } 1196 1197 if (ctrl != NULL && ctrl->is_Proj()) 1198 ctrl = ctrl->in(0); 1199 1200 return ctrl; 1201 } 1202 1203 //--------------------------dominates------------------------------------------ 1204 // Helper function for MemNode::all_controls_dominate(). 1205 // Check if 'this' control node dominates or equal to 'sub' control node. 1206 // We already know that if any path back to Root or Start reaches 'this', 1207 // then all paths so, so this is a simple search for one example, 1208 // not an exhaustive search for a counterexample. 1209 bool Node::dominates(Node* sub, Node_List &nlist) { 1210 assert(this->is_CFG(), "expecting control"); 1211 assert(sub != NULL && sub->is_CFG(), "expecting control"); 1212 1213 // detect dead cycle without regions 1214 int iterations_without_region_limit = DominatorSearchLimit; 1215 1216 Node* orig_sub = sub; 1217 Node* dom = this; 1218 bool met_dom = false; 1219 nlist.clear(); 1220 1221 // Walk 'sub' backward up the chain to 'dom', watching for regions. 1222 // After seeing 'dom', continue up to Root or Start. 1223 // If we hit a region (backward split point), it may be a loop head. 1224 // Keep going through one of the region's inputs. If we reach the 1225 // same region again, go through a different input. Eventually we 1226 // will either exit through the loop head, or give up. 1227 // (If we get confused, break out and return a conservative 'false'.) 1228 while (sub != NULL) { 1229 if (sub->is_top()) break; // Conservative answer for dead code. 1230 if (sub == dom) { 1231 if (nlist.size() == 0) { 1232 // No Region nodes except loops were visited before and the EntryControl 1233 // path was taken for loops: it did not walk in a cycle. 1234 return true; 1235 } else if (met_dom) { 1236 break; // already met before: walk in a cycle 1237 } else { 1238 // Region nodes were visited. Continue walk up to Start or Root 1239 // to make sure that it did not walk in a cycle. 1240 met_dom = true; // first time meet 1241 iterations_without_region_limit = DominatorSearchLimit; // Reset 1242 } 1243 } 1244 if (sub->is_Start() || sub->is_Root()) { 1245 // Success if we met 'dom' along a path to Start or Root. 1246 // We assume there are no alternative paths that avoid 'dom'. 1247 // (This assumption is up to the caller to ensure!) 1248 return met_dom; 1249 } 1250 Node* up = sub->in(0); 1251 // Normalize simple pass-through regions and projections: 1252 up = sub->find_exact_control(up); 1253 // If sub == up, we found a self-loop. Try to push past it. 1254 if (sub == up && sub->is_Loop()) { 1255 // Take loop entry path on the way up to 'dom'. 1256 up = sub->in(1); // in(LoopNode::EntryControl); 1257 } else if (sub == up && sub->is_Region() && sub->req() != 3) { 1258 // Always take in(1) path on the way up to 'dom' for clone regions 1259 // (with only one input) or regions which merge > 2 paths 1260 // (usually used to merge fast/slow paths). 1261 up = sub->in(1); 1262 } else if (sub == up && sub->is_Region()) { 1263 // Try both paths for Regions with 2 input paths (it may be a loop head). 1264 // It could give conservative 'false' answer without information 1265 // which region's input is the entry path. 1266 iterations_without_region_limit = DominatorSearchLimit; // Reset 1267 1268 bool region_was_visited_before = false; 1269 // Was this Region node visited before? 1270 // If so, we have reached it because we accidentally took a 1271 // loop-back edge from 'sub' back into the body of the loop, 1272 // and worked our way up again to the loop header 'sub'. 1273 // So, take the first unexplored path on the way up to 'dom'. 1274 for (int j = nlist.size() - 1; j >= 0; j--) { 1275 intptr_t ni = (intptr_t)nlist.at(j); 1276 Node* visited = (Node*)(ni & ~1); 1277 bool visited_twice_already = ((ni & 1) != 0); 1278 if (visited == sub) { 1279 if (visited_twice_already) { 1280 // Visited 2 paths, but still stuck in loop body. Give up. 1281 return false; 1282 } 1283 // The Region node was visited before only once. 1284 // (We will repush with the low bit set, below.) 1285 nlist.remove(j); 1286 // We will find a new edge and re-insert. 1287 region_was_visited_before = true; 1288 break; 1289 } 1290 } 1291 1292 // Find an incoming edge which has not been seen yet; walk through it. 1293 assert(up == sub, ""); 1294 uint skip = region_was_visited_before ? 1 : 0; 1295 for (uint i = 1; i < sub->req(); i++) { 1296 Node* in = sub->in(i); 1297 if (in != NULL && !in->is_top() && in != sub) { 1298 if (skip == 0) { 1299 up = in; 1300 break; 1301 } 1302 --skip; // skip this nontrivial input 1303 } 1304 } 1305 1306 // Set 0 bit to indicate that both paths were taken. 1307 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0))); 1308 } 1309 1310 if (up == sub) { 1311 break; // some kind of tight cycle 1312 } 1313 if (up == orig_sub && met_dom) { 1314 // returned back after visiting 'dom' 1315 break; // some kind of cycle 1316 } 1317 if (--iterations_without_region_limit < 0) { 1318 break; // dead cycle 1319 } 1320 sub = up; 1321 } 1322 1323 // Did not meet Root or Start node in pred. chain. 1324 // Conservative answer for dead code. 1325 return false; 1326 } 1327 1328 //------------------------------remove_dead_region----------------------------- 1329 // This control node is dead. Follow the subgraph below it making everything 1330 // using it dead as well. This will happen normally via the usual IterGVN 1331 // worklist but this call is more efficient. Do not update use-def info 1332 // inside the dead region, just at the borders. 1333 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) { 1334 // Con's are a popular node to re-hit in the hash table again. 1335 if( dead->is_Con() ) return; 1336 1337 // Can't put ResourceMark here since igvn->_worklist uses the same arena 1338 // for verify pass with +VerifyOpto and we add/remove elements in it here. 1339 Node_List nstack(Thread::current()->resource_area()); 1340 1341 Node *top = igvn->C->top(); 1342 nstack.push(dead); 1343 bool has_irreducible_loop = igvn->C->has_irreducible_loop(); 1344 1345 while (nstack.size() > 0) { 1346 dead = nstack.pop(); 1347 if (dead->Opcode() == Op_SafePoint) { 1348 dead->as_SafePoint()->disconnect_from_root(igvn); 1349 } 1350 if (dead->outcnt() > 0) { 1351 // Keep dead node on stack until all uses are processed. 1352 nstack.push(dead); 1353 // For all Users of the Dead... ;-) 1354 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) { 1355 Node* use = dead->last_out(k); 1356 igvn->hash_delete(use); // Yank from hash table prior to mod 1357 if (use->in(0) == dead) { // Found another dead node 1358 assert (!use->is_Con(), "Control for Con node should be Root node."); 1359 use->set_req(0, top); // Cut dead edge to prevent processing 1360 nstack.push(use); // the dead node again. 1361 } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop 1362 use->is_Loop() && !use->is_Root() && // Don't kill Root (RootNode extends LoopNode) 1363 use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead 1364 use->set_req(LoopNode::EntryControl, top); // Cut dead edge to prevent processing 1365 use->set_req(0, top); // Cut self edge 1366 nstack.push(use); 1367 } else { // Else found a not-dead user 1368 // Dead if all inputs are top or null 1369 bool dead_use = !use->is_Root(); // Keep empty graph alive 1370 for (uint j = 1; j < use->req(); j++) { 1371 Node* in = use->in(j); 1372 if (in == dead) { // Turn all dead inputs into TOP 1373 use->set_req(j, top); 1374 } else if (in != NULL && !in->is_top()) { 1375 dead_use = false; 1376 } 1377 } 1378 if (dead_use) { 1379 if (use->is_Region()) { 1380 use->set_req(0, top); // Cut self edge 1381 } 1382 nstack.push(use); 1383 } else { 1384 igvn->_worklist.push(use); 1385 } 1386 } 1387 // Refresh the iterator, since any number of kills might have happened. 1388 k = dead->last_outs(kmin); 1389 } 1390 } else { // (dead->outcnt() == 0) 1391 // Done with outputs. 1392 igvn->hash_delete(dead); 1393 igvn->_worklist.remove(dead); 1394 igvn->set_type(dead, Type::TOP); 1395 if (dead->is_macro()) { 1396 igvn->C->remove_macro_node(dead); 1397 } 1398 if (dead->is_expensive()) { 1399 igvn->C->remove_expensive_node(dead); 1400 } 1401 if (dead->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 1402 igvn->C->remove_shenandoah_barrier(reinterpret_cast<ShenandoahLoadReferenceBarrierNode*>(dead)); 1403 } 1404 CastIINode* cast = dead->isa_CastII(); 1405 if (cast != NULL && cast->has_range_check()) { 1406 igvn->C->remove_range_check_cast(cast); 1407 } 1408 igvn->C->record_dead_node(dead->_idx); 1409 // Kill all inputs to the dead guy 1410 for (uint i=0; i < dead->req(); i++) { 1411 Node *n = dead->in(i); // Get input to dead guy 1412 if (n != NULL && !n->is_top()) { // Input is valid? 1413 dead->set_req(i, top); // Smash input away 1414 if (n->outcnt() == 0) { // Input also goes dead? 1415 if (!n->is_Con()) 1416 nstack.push(n); // Clear it out as well 1417 } else if (n->outcnt() == 1 && 1418 n->has_special_unique_user()) { 1419 igvn->add_users_to_worklist( n ); 1420 } else if (n->outcnt() <= 2 && n->is_Store()) { 1421 // Push store's uses on worklist to enable folding optimization for 1422 // store/store and store/load to the same address. 1423 // The restriction (outcnt() <= 2) is the same as in set_req_X() 1424 // and remove_globally_dead_node(). 1425 igvn->add_users_to_worklist( n ); 1426 } else if (n->Opcode() == Op_AddP && CallLeafNode::has_only_g1_wb_pre_uses(n)) { 1427 igvn->add_users_to_worklist(n); 1428 } 1429 } 1430 } 1431 } // (dead->outcnt() == 0) 1432 } // while (nstack.size() > 0) for outputs 1433 return; 1434 } 1435 1436 //------------------------------remove_dead_region----------------------------- 1437 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) { 1438 Node *n = in(0); 1439 if( !n ) return false; 1440 // Lost control into this guy? I.e., it became unreachable? 1441 // Aggressively kill all unreachable code. 1442 if (can_reshape && n->is_top()) { 1443 kill_dead_code(this, phase->is_IterGVN()); 1444 return false; // Node is dead. 1445 } 1446 1447 if( n->is_Region() && n->as_Region()->is_copy() ) { 1448 Node *m = n->nonnull_req(); 1449 set_req(0, m); 1450 return true; 1451 } 1452 return false; 1453 } 1454 1455 //------------------------------hash------------------------------------------- 1456 // Hash function over Nodes. 1457 uint Node::hash() const { 1458 uint sum = 0; 1459 for( uint i=0; i<_cnt; i++ ) // Add in all inputs 1460 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs 1461 return (sum>>2) + _cnt + Opcode(); 1462 } 1463 1464 //------------------------------cmp-------------------------------------------- 1465 // Compare special parts of simple Nodes 1466 uint Node::cmp( const Node &n ) const { 1467 return 1; // Must be same 1468 } 1469 1470 //------------------------------rematerialize----------------------------------- 1471 // Should we clone rather than spill this instruction? 1472 bool Node::rematerialize() const { 1473 if ( is_Mach() ) 1474 return this->as_Mach()->rematerialize(); 1475 else 1476 return (_flags & Flag_rematerialize) != 0; 1477 } 1478 1479 //------------------------------needs_anti_dependence_check--------------------- 1480 // Nodes which use memory without consuming it, hence need antidependences. 1481 bool Node::needs_anti_dependence_check() const { 1482 if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 ) 1483 return false; 1484 else 1485 return in(1)->bottom_type()->has_memory(); 1486 } 1487 1488 1489 // Get an integer constant from a ConNode (or CastIINode). 1490 // Return a default value if there is no apparent constant here. 1491 const TypeInt* Node::find_int_type() const { 1492 if (this->is_Type()) { 1493 return this->as_Type()->type()->isa_int(); 1494 } else if (this->is_Con()) { 1495 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1496 return this->bottom_type()->isa_int(); 1497 } 1498 return NULL; 1499 } 1500 1501 // Get a pointer constant from a ConstNode. 1502 // Returns the constant if it is a pointer ConstNode 1503 intptr_t Node::get_ptr() const { 1504 assert( Opcode() == Op_ConP, "" ); 1505 return ((ConPNode*)this)->type()->is_ptr()->get_con(); 1506 } 1507 1508 // Get a narrow oop constant from a ConNNode. 1509 intptr_t Node::get_narrowcon() const { 1510 assert( Opcode() == Op_ConN, "" ); 1511 return ((ConNNode*)this)->type()->is_narrowoop()->get_con(); 1512 } 1513 1514 // Get a long constant from a ConNode. 1515 // Return a default value if there is no apparent constant here. 1516 const TypeLong* Node::find_long_type() const { 1517 if (this->is_Type()) { 1518 return this->as_Type()->type()->isa_long(); 1519 } else if (this->is_Con()) { 1520 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1521 return this->bottom_type()->isa_long(); 1522 } 1523 return NULL; 1524 } 1525 1526 1527 /** 1528 * Return a ptr type for nodes which should have it. 1529 */ 1530 const TypePtr* Node::get_ptr_type() const { 1531 const TypePtr* tp = this->bottom_type()->make_ptr(); 1532 #ifdef ASSERT 1533 if (tp == NULL) { 1534 this->dump(1); 1535 assert((tp != NULL), "unexpected node type"); 1536 } 1537 #endif 1538 return tp; 1539 } 1540 1541 // Get a double constant from a ConstNode. 1542 // Returns the constant if it is a double ConstNode 1543 jdouble Node::getd() const { 1544 assert( Opcode() == Op_ConD, "" ); 1545 return ((ConDNode*)this)->type()->is_double_constant()->getd(); 1546 } 1547 1548 // Get a float constant from a ConstNode. 1549 // Returns the constant if it is a float ConstNode 1550 jfloat Node::getf() const { 1551 assert( Opcode() == Op_ConF, "" ); 1552 return ((ConFNode*)this)->type()->is_float_constant()->getf(); 1553 } 1554 1555 #ifndef PRODUCT 1556 1557 //----------------------------NotANode---------------------------------------- 1558 // Used in debugging code to avoid walking across dead or uninitialized edges. 1559 static inline bool NotANode(const Node* n) { 1560 if (n == NULL) return true; 1561 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc. 1562 if (*(address*)n == badAddress) return true; // kill by Node::destruct 1563 return false; 1564 } 1565 1566 1567 //------------------------------find------------------------------------------ 1568 // Find a neighbor of this Node with the given _idx 1569 // If idx is negative, find its absolute value, following both _in and _out. 1570 static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl, 1571 VectorSet* old_space, VectorSet* new_space ) { 1572 int node_idx = (idx >= 0) ? idx : -idx; 1573 if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc. 1574 // Contained in new_space or old_space? Check old_arena first since it's mostly empty. 1575 VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space; 1576 if( v->test(n->_idx) ) return; 1577 if( (int)n->_idx == node_idx 1578 debug_only(|| n->debug_idx() == node_idx) ) { 1579 if (result != NULL) 1580 tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n", 1581 (uintptr_t)result, (uintptr_t)n, node_idx); 1582 result = n; 1583 } 1584 v->set(n->_idx); 1585 for( uint i=0; i<n->len(); i++ ) { 1586 if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue; 1587 find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space ); 1588 } 1589 // Search along forward edges also: 1590 if (idx < 0 && !only_ctrl) { 1591 for( uint j=0; j<n->outcnt(); j++ ) { 1592 find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space ); 1593 } 1594 } 1595 #ifdef ASSERT 1596 // Search along debug_orig edges last, checking for cycles 1597 Node* orig = n->debug_orig(); 1598 if (orig != NULL) { 1599 do { 1600 if (NotANode(orig)) break; 1601 find_recur(C, result, orig, idx, only_ctrl, old_space, new_space ); 1602 orig = orig->debug_orig(); 1603 } while (orig != NULL && orig != n->debug_orig()); 1604 } 1605 #endif //ASSERT 1606 } 1607 1608 // call this from debugger: 1609 Node* find_node(Node* n, int idx) { 1610 return n->find(idx); 1611 } 1612 1613 //------------------------------find------------------------------------------- 1614 Node* Node::find(int idx) const { 1615 ResourceArea *area = Thread::current()->resource_area(); 1616 VectorSet old_space(area), new_space(area); 1617 Node* result = NULL; 1618 find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space ); 1619 return result; 1620 } 1621 1622 //------------------------------find_ctrl-------------------------------------- 1623 // Find an ancestor to this node in the control history with given _idx 1624 Node* Node::find_ctrl(int idx) const { 1625 ResourceArea *area = Thread::current()->resource_area(); 1626 VectorSet old_space(area), new_space(area); 1627 Node* result = NULL; 1628 find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space ); 1629 return result; 1630 } 1631 #endif 1632 1633 1634 1635 #ifndef PRODUCT 1636 1637 // -----------------------------Name------------------------------------------- 1638 extern const char *NodeClassNames[]; 1639 const char *Node::Name() const { return NodeClassNames[Opcode()]; } 1640 1641 static bool is_disconnected(const Node* n) { 1642 for (uint i = 0; i < n->req(); i++) { 1643 if (n->in(i) != NULL) return false; 1644 } 1645 return true; 1646 } 1647 1648 #ifdef ASSERT 1649 static void dump_orig(Node* orig, outputStream *st) { 1650 Compile* C = Compile::current(); 1651 if (NotANode(orig)) orig = NULL; 1652 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1653 if (orig == NULL) return; 1654 st->print(" !orig="); 1655 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops 1656 if (NotANode(fast)) fast = NULL; 1657 while (orig != NULL) { 1658 bool discon = is_disconnected(orig); // if discon, print [123] else 123 1659 if (discon) st->print("["); 1660 if (!Compile::current()->node_arena()->contains(orig)) 1661 st->print("o"); 1662 st->print("%d", orig->_idx); 1663 if (discon) st->print("]"); 1664 orig = orig->debug_orig(); 1665 if (NotANode(orig)) orig = NULL; 1666 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1667 if (orig != NULL) st->print(","); 1668 if (fast != NULL) { 1669 // Step fast twice for each single step of orig: 1670 fast = fast->debug_orig(); 1671 if (NotANode(fast)) fast = NULL; 1672 if (fast != NULL && fast != orig) { 1673 fast = fast->debug_orig(); 1674 if (NotANode(fast)) fast = NULL; 1675 } 1676 if (fast == orig) { 1677 st->print("..."); 1678 break; 1679 } 1680 } 1681 } 1682 } 1683 1684 void Node::set_debug_orig(Node* orig) { 1685 _debug_orig = orig; 1686 if (BreakAtNode == 0) return; 1687 if (NotANode(orig)) orig = NULL; 1688 int trip = 10; 1689 while (orig != NULL) { 1690 if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) { 1691 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d", 1692 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx()); 1693 BREAKPOINT; 1694 } 1695 orig = orig->debug_orig(); 1696 if (NotANode(orig)) orig = NULL; 1697 if (trip-- <= 0) break; 1698 } 1699 } 1700 #endif //ASSERT 1701 1702 //------------------------------dump------------------------------------------ 1703 // Dump a Node 1704 void Node::dump(const char* suffix, outputStream *st) const { 1705 Compile* C = Compile::current(); 1706 bool is_new = C->node_arena()->contains(this); 1707 C->_in_dump_cnt++; 1708 st->print("%c%d\t%s\t=== ", is_new ? ' ' : 'o', _idx, Name()); 1709 1710 // Dump the required and precedence inputs 1711 dump_req(st); 1712 dump_prec(st); 1713 // Dump the outputs 1714 dump_out(st); 1715 1716 if (is_disconnected(this)) { 1717 #ifdef ASSERT 1718 st->print(" [%d]",debug_idx()); 1719 dump_orig(debug_orig(), st); 1720 #endif 1721 st->cr(); 1722 C->_in_dump_cnt--; 1723 return; // don't process dead nodes 1724 } 1725 1726 // Dump node-specific info 1727 dump_spec(st); 1728 #ifdef ASSERT 1729 // Dump the non-reset _debug_idx 1730 if (Verbose && WizardMode) { 1731 st->print(" [%d]",debug_idx()); 1732 } 1733 #endif 1734 1735 const Type *t = bottom_type(); 1736 1737 if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) { 1738 const TypeInstPtr *toop = t->isa_instptr(); 1739 const TypeKlassPtr *tkls = t->isa_klassptr(); 1740 ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL ); 1741 if (klass && klass->is_loaded() && klass->is_interface()) { 1742 st->print(" Interface:"); 1743 } else if (toop) { 1744 st->print(" Oop:"); 1745 } else if (tkls) { 1746 st->print(" Klass:"); 1747 } 1748 t->dump_on(st); 1749 } else if (t == Type::MEMORY) { 1750 st->print(" Memory:"); 1751 MemNode::dump_adr_type(this, adr_type(), st); 1752 } else if (Verbose || WizardMode) { 1753 st->print(" Type:"); 1754 if (t) { 1755 t->dump_on(st); 1756 } else { 1757 st->print("no type"); 1758 } 1759 } else if (t->isa_vect() && this->is_MachSpillCopy()) { 1760 // Dump MachSpillcopy vector type. 1761 t->dump_on(st); 1762 } 1763 if (is_new) { 1764 debug_only(dump_orig(debug_orig(), st)); 1765 Node_Notes* nn = C->node_notes_at(_idx); 1766 if (nn != NULL && !nn->is_clear()) { 1767 if (nn->jvms() != NULL) { 1768 st->print(" !jvms:"); 1769 nn->jvms()->dump_spec(st); 1770 } 1771 } 1772 } 1773 if (suffix) st->print("%s", suffix); 1774 C->_in_dump_cnt--; 1775 } 1776 1777 //------------------------------dump_req-------------------------------------- 1778 void Node::dump_req(outputStream *st) const { 1779 // Dump the required input edges 1780 for (uint i = 0; i < req(); i++) { // For all required inputs 1781 Node* d = in(i); 1782 if (d == NULL) { 1783 st->print("_ "); 1784 } else if (NotANode(d)) { 1785 st->print("NotANode "); // uninitialized, sentinel, garbage, etc. 1786 } else { 1787 st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx); 1788 } 1789 } 1790 } 1791 1792 1793 //------------------------------dump_prec------------------------------------- 1794 void Node::dump_prec(outputStream *st) const { 1795 // Dump the precedence edges 1796 int any_prec = 0; 1797 for (uint i = req(); i < len(); i++) { // For all precedence inputs 1798 Node* p = in(i); 1799 if (p != NULL) { 1800 if (!any_prec++) st->print(" |"); 1801 if (NotANode(p)) { st->print("NotANode "); continue; } 1802 st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 1803 } 1804 } 1805 } 1806 1807 //------------------------------dump_out-------------------------------------- 1808 void Node::dump_out(outputStream *st) const { 1809 // Delimit the output edges 1810 st->print(" [["); 1811 // Dump the output edges 1812 for (uint i = 0; i < _outcnt; i++) { // For all outputs 1813 Node* u = _out[i]; 1814 if (u == NULL) { 1815 st->print("_ "); 1816 } else if (NotANode(u)) { 1817 st->print("NotANode "); 1818 } else { 1819 st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx); 1820 } 1821 } 1822 st->print("]] "); 1823 } 1824 1825 //------------------------------dump_nodes------------------------------------- 1826 static void dump_nodes(const Node* start, int d, bool only_ctrl) { 1827 Node* s = (Node*)start; // remove const 1828 if (NotANode(s)) return; 1829 1830 uint depth = (uint)ABS(d); 1831 int direction = d; 1832 Compile* C = Compile::current(); 1833 GrowableArray <Node *> nstack(C->live_nodes()); 1834 1835 nstack.append(s); 1836 int begin = 0; 1837 int end = 0; 1838 for(uint i = 0; i < depth; i++) { 1839 end = nstack.length(); 1840 for(int j = begin; j < end; j++) { 1841 Node* tp = nstack.at(j); 1842 uint limit = direction > 0 ? tp->len() : tp->outcnt(); 1843 for(uint k = 0; k < limit; k++) { 1844 Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k); 1845 1846 if (NotANode(n)) continue; 1847 // do not recurse through top or the root (would reach unrelated stuff) 1848 if (n->is_Root() || n->is_top()) continue; 1849 if (only_ctrl && !n->is_CFG()) continue; 1850 1851 bool on_stack = nstack.contains(n); 1852 if (!on_stack) { 1853 nstack.append(n); 1854 } 1855 } 1856 } 1857 begin = end; 1858 } 1859 end = nstack.length(); 1860 if (direction > 0) { 1861 for(int j = end-1; j >= 0; j--) { 1862 nstack.at(j)->dump(); 1863 } 1864 } else { 1865 for(int j = 0; j < end; j++) { 1866 nstack.at(j)->dump(); 1867 } 1868 } 1869 } 1870 1871 //------------------------------dump------------------------------------------- 1872 void Node::dump(int d) const { 1873 dump_nodes(this, d, false); 1874 } 1875 1876 //------------------------------dump_ctrl-------------------------------------- 1877 // Dump a Node's control history to depth 1878 void Node::dump_ctrl(int d) const { 1879 dump_nodes(this, d, true); 1880 } 1881 1882 // VERIFICATION CODE 1883 // For each input edge to a node (ie - for each Use-Def edge), verify that 1884 // there is a corresponding Def-Use edge. 1885 //------------------------------verify_edges----------------------------------- 1886 void Node::verify_edges(Unique_Node_List &visited) { 1887 uint i, j, idx; 1888 int cnt; 1889 Node *n; 1890 1891 // Recursive termination test 1892 if (visited.member(this)) return; 1893 visited.push(this); 1894 1895 // Walk over all input edges, checking for correspondence 1896 for( i = 0; i < len(); i++ ) { 1897 n = in(i); 1898 if (n != NULL && !n->is_top()) { 1899 // Count instances of (Node *)this 1900 cnt = 0; 1901 for (idx = 0; idx < n->_outcnt; idx++ ) { 1902 if (n->_out[idx] == (Node *)this) cnt++; 1903 } 1904 assert( cnt > 0,"Failed to find Def-Use edge." ); 1905 // Check for duplicate edges 1906 // walk the input array downcounting the input edges to n 1907 for( j = 0; j < len(); j++ ) { 1908 if( in(j) == n ) cnt--; 1909 } 1910 assert( cnt == 0,"Mismatched edge count."); 1911 } else if (n == NULL) { 1912 assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges"); 1913 } else { 1914 assert(n->is_top(), "sanity"); 1915 // Nothing to check. 1916 } 1917 } 1918 // Recursive walk over all input edges 1919 for( i = 0; i < len(); i++ ) { 1920 n = in(i); 1921 if( n != NULL ) 1922 in(i)->verify_edges(visited); 1923 } 1924 } 1925 1926 //------------------------------verify_recur----------------------------------- 1927 static const Node *unique_top = NULL; 1928 1929 void Node::verify_recur(const Node *n, int verify_depth, 1930 VectorSet &old_space, VectorSet &new_space) { 1931 if ( verify_depth == 0 ) return; 1932 if (verify_depth > 0) --verify_depth; 1933 1934 Compile* C = Compile::current(); 1935 1936 // Contained in new_space or old_space? 1937 VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space; 1938 // Check for visited in the proper space. Numberings are not unique 1939 // across spaces so we need a separate VectorSet for each space. 1940 if( v->test_set(n->_idx) ) return; 1941 1942 if (n->is_Con() && n->bottom_type() == Type::TOP) { 1943 if (C->cached_top_node() == NULL) 1944 C->set_cached_top_node((Node*)n); 1945 assert(C->cached_top_node() == n, "TOP node must be unique"); 1946 } 1947 1948 for( uint i = 0; i < n->len(); i++ ) { 1949 Node *x = n->in(i); 1950 if (!x || x->is_top()) continue; 1951 1952 // Verify my input has a def-use edge to me 1953 if (true /*VerifyDefUse*/) { 1954 // Count use-def edges from n to x 1955 int cnt = 0; 1956 for( uint j = 0; j < n->len(); j++ ) 1957 if( n->in(j) == x ) 1958 cnt++; 1959 // Count def-use edges from x to n 1960 uint max = x->_outcnt; 1961 for( uint k = 0; k < max; k++ ) 1962 if (x->_out[k] == n) 1963 cnt--; 1964 assert( cnt == 0, "mismatched def-use edge counts" ); 1965 } 1966 1967 verify_recur(x, verify_depth, old_space, new_space); 1968 } 1969 1970 } 1971 1972 //------------------------------verify----------------------------------------- 1973 // Check Def-Use info for my subgraph 1974 void Node::verify() const { 1975 Compile* C = Compile::current(); 1976 Node* old_top = C->cached_top_node(); 1977 ResourceMark rm; 1978 ResourceArea *area = Thread::current()->resource_area(); 1979 VectorSet old_space(area), new_space(area); 1980 verify_recur(this, -1, old_space, new_space); 1981 C->set_cached_top_node(old_top); 1982 } 1983 #endif 1984 1985 1986 //------------------------------walk------------------------------------------- 1987 // Graph walk, with both pre-order and post-order functions 1988 void Node::walk(NFunc pre, NFunc post, void *env) { 1989 VectorSet visited(Thread::current()->resource_area()); // Setup for local walk 1990 walk_(pre, post, env, visited); 1991 } 1992 1993 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) { 1994 if( visited.test_set(_idx) ) return; 1995 pre(*this,env); // Call the pre-order walk function 1996 for( uint i=0; i<_max; i++ ) 1997 if( in(i) ) // Input exists and is not walked? 1998 in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions 1999 post(*this,env); // Call the post-order walk function 2000 } 2001 2002 void Node::nop(Node &, void*) {} 2003 2004 //------------------------------Registers-------------------------------------- 2005 // Do we Match on this edge index or not? Generally false for Control 2006 // and true for everything else. Weird for calls & returns. 2007 uint Node::match_edge(uint idx) const { 2008 return idx; // True for other than index 0 (control) 2009 } 2010 2011 static RegMask _not_used_at_all; 2012 // Register classes are defined for specific machines 2013 const RegMask &Node::out_RegMask() const { 2014 ShouldNotCallThis(); 2015 return _not_used_at_all; 2016 } 2017 2018 const RegMask &Node::in_RegMask(uint) const { 2019 ShouldNotCallThis(); 2020 return _not_used_at_all; 2021 } 2022 2023 //============================================================================= 2024 //----------------------------------------------------------------------------- 2025 void Node_Array::reset( Arena *new_arena ) { 2026 _a->Afree(_nodes,_max*sizeof(Node*)); 2027 _max = 0; 2028 _nodes = NULL; 2029 _a = new_arena; 2030 } 2031 2032 //------------------------------clear------------------------------------------ 2033 // Clear all entries in _nodes to NULL but keep storage 2034 void Node_Array::clear() { 2035 Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) ); 2036 } 2037 2038 //----------------------------------------------------------------------------- 2039 void Node_Array::grow( uint i ) { 2040 if( !_max ) { 2041 _max = 1; 2042 _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) ); 2043 _nodes[0] = NULL; 2044 } 2045 uint old = _max; 2046 while( i >= _max ) _max <<= 1; // Double to fit 2047 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*)); 2048 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) ); 2049 } 2050 2051 //----------------------------------------------------------------------------- 2052 void Node_Array::insert( uint i, Node *n ) { 2053 if( _nodes[_max-1] ) grow(_max); // Get more space if full 2054 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*))); 2055 _nodes[i] = n; 2056 } 2057 2058 //----------------------------------------------------------------------------- 2059 void Node_Array::remove( uint i ) { 2060 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*))); 2061 _nodes[_max-1] = NULL; 2062 } 2063 2064 //----------------------------------------------------------------------------- 2065 void Node_Array::sort( C_sort_func_t func) { 2066 qsort( _nodes, _max, sizeof( Node* ), func ); 2067 } 2068 2069 //----------------------------------------------------------------------------- 2070 void Node_Array::dump() const { 2071 #ifndef PRODUCT 2072 for( uint i = 0; i < _max; i++ ) { 2073 Node *nn = _nodes[i]; 2074 if( nn != NULL ) { 2075 tty->print("%5d--> ",i); nn->dump(); 2076 } 2077 } 2078 #endif 2079 } 2080 2081 //--------------------------is_iteratively_computed------------------------------ 2082 // Operation appears to be iteratively computed (such as an induction variable) 2083 // It is possible for this operation to return false for a loop-varying 2084 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 2085 bool Node::is_iteratively_computed() { 2086 if (ideal_reg()) { // does operation have a result register? 2087 for (uint i = 1; i < req(); i++) { 2088 Node* n = in(i); 2089 if (n != NULL && n->is_Phi()) { 2090 for (uint j = 1; j < n->req(); j++) { 2091 if (n->in(j) == this) { 2092 return true; 2093 } 2094 } 2095 } 2096 } 2097 } 2098 return false; 2099 } 2100 2101 //--------------------------find_similar------------------------------ 2102 // Return a node with opcode "opc" and same inputs as "this" if one can 2103 // be found; Otherwise return NULL; 2104 Node* Node::find_similar(int opc) { 2105 if (req() >= 2) { 2106 Node* def = in(1); 2107 if (def && def->outcnt() >= 2) { 2108 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) { 2109 Node* use = def->fast_out(i); 2110 if (use->Opcode() == opc && 2111 use->req() == req()) { 2112 uint j; 2113 for (j = 0; j < use->req(); j++) { 2114 if (use->in(j) != in(j)) { 2115 break; 2116 } 2117 } 2118 if (j == use->req()) { 2119 return use; 2120 } 2121 } 2122 } 2123 } 2124 } 2125 return NULL; 2126 } 2127 2128 2129 //--------------------------unique_ctrl_out------------------------------ 2130 // Return the unique control out if only one. Null if none or more than one. 2131 Node* Node::unique_ctrl_out() { 2132 Node* found = NULL; 2133 for (uint i = 0; i < outcnt(); i++) { 2134 Node* use = raw_out(i); 2135 if (use->is_CFG() && use != this) { 2136 if (found != NULL) return NULL; 2137 found = use; 2138 } 2139 } 2140 return found; 2141 } 2142 2143 void Node::ensure_control_or_add_prec(Node* c) { 2144 if (in(0) == NULL) { 2145 set_req(0, c); 2146 } else if (in(0) != c) { 2147 add_prec(c); 2148 } 2149 } 2150 2151 //============================================================================= 2152 //------------------------------yank------------------------------------------- 2153 // Find and remove 2154 void Node_List::yank( Node *n ) { 2155 uint i; 2156 for( i = 0; i < _cnt; i++ ) 2157 if( _nodes[i] == n ) 2158 break; 2159 2160 if( i < _cnt ) 2161 _nodes[i] = _nodes[--_cnt]; 2162 } 2163 2164 //------------------------------dump------------------------------------------- 2165 void Node_List::dump() const { 2166 #ifndef PRODUCT 2167 for( uint i = 0; i < _cnt; i++ ) 2168 if( _nodes[i] ) { 2169 tty->print("%5d--> ",i); 2170 _nodes[i]->dump(); 2171 } 2172 #endif 2173 } 2174 2175 void Node_List::dump_simple() const { 2176 #ifndef PRODUCT 2177 for( uint i = 0; i < _cnt; i++ ) 2178 if( _nodes[i] ) { 2179 tty->print(" %d", _nodes[i]->_idx); 2180 } else { 2181 tty->print(" NULL"); 2182 } 2183 #endif 2184 } 2185 2186 //============================================================================= 2187 //------------------------------remove----------------------------------------- 2188 void Unique_Node_List::remove( Node *n ) { 2189 if( _in_worklist[n->_idx] ) { 2190 for( uint i = 0; i < size(); i++ ) 2191 if( _nodes[i] == n ) { 2192 map(i,Node_List::pop()); 2193 _in_worklist >>= n->_idx; 2194 return; 2195 } 2196 ShouldNotReachHere(); 2197 } 2198 } 2199 2200 //-----------------------remove_useless_nodes---------------------------------- 2201 // Remove useless nodes from worklist 2202 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) { 2203 2204 for( uint i = 0; i < size(); ++i ) { 2205 Node *n = at(i); 2206 assert( n != NULL, "Did not expect null entries in worklist"); 2207 if( ! useful.test(n->_idx) ) { 2208 _in_worklist >>= n->_idx; 2209 map(i,Node_List::pop()); 2210 // Node *replacement = Node_List::pop(); 2211 // if( i != size() ) { // Check if removing last entry 2212 // _nodes[i] = replacement; 2213 // } 2214 --i; // Visit popped node 2215 // If it was last entry, loop terminates since size() was also reduced 2216 } 2217 } 2218 } 2219 2220 //============================================================================= 2221 void Node_Stack::grow() { 2222 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top 2223 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode)); 2224 size_t max = old_max << 1; // max * 2 2225 _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max); 2226 _inode_max = _inodes + max; 2227 _inode_top = _inodes + old_top; // restore _top 2228 } 2229 2230 // Node_Stack is used to map nodes. 2231 Node* Node_Stack::find(uint idx) const { 2232 uint sz = size(); 2233 for (uint i=0; i < sz; i++) { 2234 if (idx == index_at(i) ) 2235 return node_at(i); 2236 } 2237 return NULL; 2238 } 2239 2240 //============================================================================= 2241 uint TypeNode::size_of() const { return sizeof(*this); } 2242 #ifndef PRODUCT 2243 void TypeNode::dump_spec(outputStream *st) const { 2244 if( !Verbose && !WizardMode ) { 2245 // standard dump does this in Verbose and WizardMode 2246 st->print(" #"); _type->dump_on(st); 2247 } 2248 } 2249 #endif 2250 uint TypeNode::hash() const { 2251 return Node::hash() + _type->hash(); 2252 } 2253 uint TypeNode::cmp( const Node &n ) const 2254 { return !Type::cmp( _type, ((TypeNode&)n)._type ); } 2255 const Type *TypeNode::bottom_type() const { return _type; } 2256 const Type *TypeNode::Value( PhaseTransform * ) const { return _type; } 2257 2258 //------------------------------ideal_reg-------------------------------------- 2259 uint TypeNode::ideal_reg() const { 2260 return _type->ideal_reg(); 2261 }