1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_NODE_HPP 26 #define SHARE_OPTO_NODE_HPP 27 28 #include "libadt/vectset.hpp" 29 #include "opto/compile.hpp" 30 #include "opto/type.hpp" 31 #include "utilities/copy.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 // Optimization - Graph Style 36 37 38 class AbstractLockNode; 39 class AddNode; 40 class AddPNode; 41 class AliasInfo; 42 class AllocateArrayNode; 43 class AllocateNode; 44 class ArrayCopyNode; 45 class BaseCountedLoopNode; 46 class BaseCountedLoopEndNode; 47 class BlackholeNode; 48 class Block; 49 class BoolNode; 50 class BoxLockNode; 51 class CMoveNode; 52 class CallDynamicJavaNode; 53 class CallJavaNode; 54 class CallLeafNode; 55 class CallLeafNoFPNode; 56 class CallNode; 57 class CallRuntimeNode; 58 class CallNativeNode; 59 class CallStaticJavaNode; 60 class CastFFNode; 61 class CastDDNode; 62 class CastVVNode; 63 class CastIINode; 64 class CastLLNode; 65 class CatchNode; 66 class CatchProjNode; 67 class CheckCastPPNode; 68 class ClearArrayNode; 69 class CmpNode; 70 class CodeBuffer; 71 class ConstraintCastNode; 72 class ConNode; 73 class CompareAndSwapNode; 74 class CompareAndExchangeNode; 75 class CountedLoopNode; 76 class CountedLoopEndNode; 77 class DecodeNarrowPtrNode; 78 class DecodeNNode; 79 class DecodeNKlassNode; 80 class EncodeNarrowPtrNode; 81 class EncodePNode; 82 class EncodePKlassNode; 83 class FastLockNode; 84 class FastUnlockNode; 85 class HaltNode; 86 class IfNode; 87 class IfProjNode; 88 class IfFalseNode; 89 class IfTrueNode; 90 class InitializeNode; 91 class JVMState; 92 class JumpNode; 93 class JumpProjNode; 94 class LoadNode; 95 class LoadStoreNode; 96 class LoadStoreConditionalNode; 97 class LockNode; 98 class LongCountedLoopNode; 99 class LongCountedLoopEndNode; 100 class LoopNode; 101 class LShiftNode; 102 class MachBranchNode; 103 class MachCallDynamicJavaNode; 104 class MachCallJavaNode; 105 class MachCallLeafNode; 106 class MachCallNode; 107 class MachCallNativeNode; 108 class MachCallRuntimeNode; 109 class MachCallStaticJavaNode; 110 class MachConstantBaseNode; 111 class MachConstantNode; 112 class MachGotoNode; 113 class MachIfNode; 114 class MachJumpNode; 115 class MachNode; 116 class MachNullCheckNode; 117 class MachProjNode; 118 class MachReturnNode; 119 class MachSafePointNode; 120 class MachSpillCopyNode; 121 class MachTempNode; 122 class MachMergeNode; 123 class MachMemBarNode; 124 class Matcher; 125 class MemBarNode; 126 class MemBarStoreStoreNode; 127 class MemNode; 128 class MergeMemNode; 129 class MoveNode; 130 class MulNode; 131 class MultiNode; 132 class MultiBranchNode; 133 class NeverBranchNode; 134 class Opaque1Node; 135 class OuterStripMinedLoopNode; 136 class OuterStripMinedLoopEndNode; 137 class Node; 138 class Node_Array; 139 class Node_List; 140 class Node_Stack; 141 class OopMap; 142 class ParmNode; 143 class PCTableNode; 144 class PhaseCCP; 145 class PhaseGVN; 146 class PhaseIterGVN; 147 class PhaseRegAlloc; 148 class PhaseTransform; 149 class PhaseValues; 150 class PhiNode; 151 class Pipeline; 152 class ProjNode; 153 class RangeCheckNode; 154 class RegMask; 155 class RegionNode; 156 class RootNode; 157 class SafePointNode; 158 class SafePointScalarObjectNode; 159 class StartNode; 160 class State; 161 class StoreNode; 162 class SubNode; 163 class SubTypeCheckNode; 164 class Type; 165 class TypeNode; 166 class UnlockNode; 167 class VectorNode; 168 class LoadVectorNode; 169 class LoadVectorMaskedNode; 170 class StoreVectorMaskedNode; 171 class LoadVectorGatherNode; 172 class StoreVectorNode; 173 class StoreVectorScatterNode; 174 class VectorMaskCmpNode; 175 class VectorUnboxNode; 176 class VectorSet; 177 class VectorReinterpretNode; 178 class ShiftVNode; 179 180 181 #ifndef OPTO_DU_ITERATOR_ASSERT 182 #ifdef ASSERT 183 #define OPTO_DU_ITERATOR_ASSERT 1 184 #else 185 #define OPTO_DU_ITERATOR_ASSERT 0 186 #endif 187 #endif //OPTO_DU_ITERATOR_ASSERT 188 189 #if OPTO_DU_ITERATOR_ASSERT 190 class DUIterator; 191 class DUIterator_Fast; 192 class DUIterator_Last; 193 #else 194 typedef uint DUIterator; 195 typedef Node** DUIterator_Fast; 196 typedef Node** DUIterator_Last; 197 #endif 198 199 // Node Sentinel 200 #define NodeSentinel (Node*)-1 201 202 // Unknown count frequency 203 #define COUNT_UNKNOWN (-1.0f) 204 205 //------------------------------Node------------------------------------------- 206 // Nodes define actions in the program. They create values, which have types. 207 // They are both vertices in a directed graph and program primitives. Nodes 208 // are labeled; the label is the "opcode", the primitive function in the lambda 209 // calculus sense that gives meaning to the Node. Node inputs are ordered (so 210 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to 211 // the Node's function. These inputs also define a Type equation for the Node. 212 // Solving these Type equations amounts to doing dataflow analysis. 213 // Control and data are uniformly represented in the graph. Finally, Nodes 214 // have a unique dense integer index which is used to index into side arrays 215 // whenever I have phase-specific information. 216 217 class Node { 218 friend class VMStructs; 219 220 // Lots of restrictions on cloning Nodes 221 NONCOPYABLE(Node); 222 223 public: 224 friend class Compile; 225 #if OPTO_DU_ITERATOR_ASSERT 226 friend class DUIterator_Common; 227 friend class DUIterator; 228 friend class DUIterator_Fast; 229 friend class DUIterator_Last; 230 #endif 231 232 // Because Nodes come and go, I define an Arena of Node structures to pull 233 // from. This should allow fast access to node creation & deletion. This 234 // field is a local cache of a value defined in some "program fragment" for 235 // which these Nodes are just a part of. 236 237 inline void* operator new(size_t x) throw() { 238 Compile* C = Compile::current(); 239 Node* n = (Node*)C->node_arena()->AmallocWords(x); 240 return (void*)n; 241 } 242 243 // Delete is a NOP 244 void operator delete( void *ptr ) {} 245 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 246 void destruct(PhaseValues* phase); 247 248 // Create a new Node. Required is the number is of inputs required for 249 // semantic correctness. 250 Node( uint required ); 251 252 // Create a new Node with given input edges. 253 // This version requires use of the "edge-count" new. 254 // E.g. new (C,3) FooNode( C, NULL, left, right ); 255 Node( Node *n0 ); 256 Node( Node *n0, Node *n1 ); 257 Node( Node *n0, Node *n1, Node *n2 ); 258 Node( Node *n0, Node *n1, Node *n2, Node *n3 ); 259 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); 260 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); 261 Node( Node *n0, Node *n1, Node *n2, Node *n3, 262 Node *n4, Node *n5, Node *n6 ); 263 264 // Clone an inherited Node given only the base Node type. 265 Node* clone() const; 266 267 // Clone a Node, immediately supplying one or two new edges. 268 // The first and second arguments, if non-null, replace in(1) and in(2), 269 // respectively. 270 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { 271 Node* nn = clone(); 272 if (in1 != NULL) nn->set_req(1, in1); 273 if (in2 != NULL) nn->set_req(2, in2); 274 return nn; 275 } 276 277 private: 278 // Shared setup for the above constructors. 279 // Handles all interactions with Compile::current. 280 // Puts initial values in all Node fields except _idx. 281 // Returns the initial value for _idx, which cannot 282 // be initialized by assignment. 283 inline int Init(int req); 284 285 //----------------- input edge handling 286 protected: 287 friend class PhaseCFG; // Access to address of _in array elements 288 Node **_in; // Array of use-def references to Nodes 289 Node **_out; // Array of def-use references to Nodes 290 291 // Input edges are split into two categories. Required edges are required 292 // for semantic correctness; order is important and NULLs are allowed. 293 // Precedence edges are used to help determine execution order and are 294 // added, e.g., for scheduling purposes. They are unordered and not 295 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 296 // are required, from _cnt to _max-1 are precedence edges. 297 node_idx_t _cnt; // Total number of required Node inputs. 298 299 node_idx_t _max; // Actual length of input array. 300 301 // Output edges are an unordered list of def-use edges which exactly 302 // correspond to required input edges which point from other nodes 303 // to this one. Thus the count of the output edges is the number of 304 // users of this node. 305 node_idx_t _outcnt; // Total number of Node outputs. 306 307 node_idx_t _outmax; // Actual length of output array. 308 309 // Grow the actual input array to the next larger power-of-2 bigger than len. 310 void grow( uint len ); 311 // Grow the output array to the next larger power-of-2 bigger than len. 312 void out_grow( uint len ); 313 314 public: 315 // Each Node is assigned a unique small/dense number. This number is used 316 // to index into auxiliary arrays of data and bit vectors. 317 // The field _idx is declared constant to defend against inadvertent assignments, 318 // since it is used by clients as a naked field. However, the field's value can be 319 // changed using the set_idx() method. 320 // 321 // The PhaseRenumberLive phase renumbers nodes based on liveness information. 322 // Therefore, it updates the value of the _idx field. The parse-time _idx is 323 // preserved in _parse_idx. 324 const node_idx_t _idx; 325 DEBUG_ONLY(const node_idx_t _parse_idx;) 326 // IGV node identifier. Two nodes, possibly in different compilation phases, 327 // have the same IGV identifier if (and only if) they are the very same node 328 // (same memory address) or one is "derived" from the other (by e.g. 329 // renumbering or matching). This identifier makes it possible to follow the 330 // entire lifetime of a node in IGV even if its C2 identifier (_idx) changes. 331 NOT_PRODUCT(node_idx_t _igv_idx;) 332 333 // Get the (read-only) number of input edges 334 uint req() const { return _cnt; } 335 uint len() const { return _max; } 336 // Get the (read-only) number of output edges 337 uint outcnt() const { return _outcnt; } 338 339 #if OPTO_DU_ITERATOR_ASSERT 340 // Iterate over the out-edges of this node. Deletions are illegal. 341 inline DUIterator outs() const; 342 // Use this when the out array might have changed to suppress asserts. 343 inline DUIterator& refresh_out_pos(DUIterator& i) const; 344 // Does the node have an out at this position? (Used for iteration.) 345 inline bool has_out(DUIterator& i) const; 346 inline Node* out(DUIterator& i) const; 347 // Iterate over the out-edges of this node. All changes are illegal. 348 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; 349 inline Node* fast_out(DUIterator_Fast& i) const; 350 // Iterate over the out-edges of this node, deleting one at a time. 351 inline DUIterator_Last last_outs(DUIterator_Last& min) const; 352 inline Node* last_out(DUIterator_Last& i) const; 353 // The inline bodies of all these methods are after the iterator definitions. 354 #else 355 // Iterate over the out-edges of this node. Deletions are illegal. 356 // This iteration uses integral indexes, to decouple from array reallocations. 357 DUIterator outs() const { return 0; } 358 // Use this when the out array might have changed to suppress asserts. 359 DUIterator refresh_out_pos(DUIterator i) const { return i; } 360 361 // Reference to the i'th output Node. Error if out of bounds. 362 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } 363 // Does the node have an out at this position? (Used for iteration.) 364 bool has_out(DUIterator i) const { return i < _outcnt; } 365 366 // Iterate over the out-edges of this node. All changes are illegal. 367 // This iteration uses a pointer internal to the out array. 368 DUIterator_Fast fast_outs(DUIterator_Fast& max) const { 369 Node** out = _out; 370 // Assign a limit pointer to the reference argument: 371 max = out + (ptrdiff_t)_outcnt; 372 // Return the base pointer: 373 return out; 374 } 375 Node* fast_out(DUIterator_Fast i) const { return *i; } 376 // Iterate over the out-edges of this node, deleting one at a time. 377 // This iteration uses a pointer internal to the out array. 378 DUIterator_Last last_outs(DUIterator_Last& min) const { 379 Node** out = _out; 380 // Assign a limit pointer to the reference argument: 381 min = out; 382 // Return the pointer to the start of the iteration: 383 return out + (ptrdiff_t)_outcnt - 1; 384 } 385 Node* last_out(DUIterator_Last i) const { return *i; } 386 #endif 387 388 // Reference to the i'th input Node. Error if out of bounds. 389 Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; } 390 // Reference to the i'th input Node. NULL if out of bounds. 391 Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); } 392 // Reference to the i'th output Node. Error if out of bounds. 393 // Use this accessor sparingly. We are going trying to use iterators instead. 394 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } 395 // Return the unique out edge. 396 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } 397 // Delete out edge at position 'i' by moving last out edge to position 'i' 398 void raw_del_out(uint i) { 399 assert(i < _outcnt,"oob"); 400 assert(_outcnt > 0,"oob"); 401 #if OPTO_DU_ITERATOR_ASSERT 402 // Record that a change happened here. 403 debug_only(_last_del = _out[i]; ++_del_tick); 404 #endif 405 _out[i] = _out[--_outcnt]; 406 // Smash the old edge so it can't be used accidentally. 407 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 408 } 409 410 #ifdef ASSERT 411 bool is_dead() const; 412 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead())) 413 bool is_reachable_from_root() const; 414 #endif 415 // Check whether node has become unreachable 416 bool is_unreachable(PhaseIterGVN &igvn) const; 417 418 // Set a required input edge, also updates corresponding output edge 419 void add_req( Node *n ); // Append a NEW required input 420 void add_req( Node *n0, Node *n1 ) { 421 add_req(n0); add_req(n1); } 422 void add_req( Node *n0, Node *n1, Node *n2 ) { 423 add_req(n0); add_req(n1); add_req(n2); } 424 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). 425 void del_req( uint idx ); // Delete required edge & compact 426 void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order 427 void ins_req( uint i, Node *n ); // Insert a NEW required input 428 void set_req( uint i, Node *n ) { 429 assert( is_not_dead(n), "can not use dead node"); 430 assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt); 431 assert( !VerifyHashTableKeys || _hash_lock == 0, 432 "remove node from hash table before modifying it"); 433 Node** p = &_in[i]; // cache this._in, across the del_out call 434 if (*p != NULL) (*p)->del_out((Node *)this); 435 (*p) = n; 436 if (n != NULL) n->add_out((Node *)this); 437 Compile::current()->record_modified_node(this); 438 } 439 // Light version of set_req() to init inputs after node creation. 440 void init_req( uint i, Node *n ) { 441 assert( i == 0 && this == n || 442 is_not_dead(n), "can not use dead node"); 443 assert( i < _cnt, "oob"); 444 assert( !VerifyHashTableKeys || _hash_lock == 0, 445 "remove node from hash table before modifying it"); 446 assert( _in[i] == NULL, "sanity"); 447 _in[i] = n; 448 if (n != NULL) n->add_out((Node *)this); 449 Compile::current()->record_modified_node(this); 450 } 451 // Find first occurrence of n among my edges: 452 int find_edge(Node* n); 453 int find_prec_edge(Node* n) { 454 for (uint i = req(); i < len(); i++) { 455 if (_in[i] == n) return i; 456 if (_in[i] == NULL) { 457 DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == NULL, "Gap in prec edges!"); ) 458 break; 459 } 460 } 461 return -1; 462 } 463 int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = NULL); 464 int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn); 465 // NULL out all inputs to eliminate incoming Def-Use edges. 466 void disconnect_inputs(Compile* C); 467 468 // Quickly, return true if and only if I am Compile::current()->top(). 469 bool is_top() const { 470 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); 471 return (_out == NULL); 472 } 473 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) 474 void setup_is_top(); 475 476 // Strip away casting. (It is depth-limited.) 477 Node* uncast(bool keep_deps = false) const; 478 // Return whether two Nodes are equivalent, after stripping casting. 479 bool eqv_uncast(const Node* n, bool keep_deps = false) const { 480 return (this->uncast(keep_deps) == n->uncast(keep_deps)); 481 } 482 483 // Find out of current node that matches opcode. 484 Node* find_out_with(int opcode); 485 // Return true if the current node has an out that matches opcode. 486 bool has_out_with(int opcode); 487 // Return true if the current node has an out that matches any of the opcodes. 488 bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4); 489 490 private: 491 static Node* uncast_helper(const Node* n, bool keep_deps); 492 493 // Add an output edge to the end of the list 494 void add_out( Node *n ) { 495 if (is_top()) return; 496 if( _outcnt == _outmax ) out_grow(_outcnt); 497 _out[_outcnt++] = n; 498 } 499 // Delete an output edge 500 void del_out( Node *n ) { 501 if (is_top()) return; 502 Node** outp = &_out[_outcnt]; 503 // Find and remove n 504 do { 505 assert(outp > _out, "Missing Def-Use edge"); 506 } while (*--outp != n); 507 *outp = _out[--_outcnt]; 508 // Smash the old edge so it can't be used accidentally. 509 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 510 // Record that a change happened here. 511 #if OPTO_DU_ITERATOR_ASSERT 512 debug_only(_last_del = n; ++_del_tick); 513 #endif 514 } 515 // Close gap after removing edge. 516 void close_prec_gap_at(uint gap) { 517 assert(_cnt <= gap && gap < _max, "no valid prec edge"); 518 uint i = gap; 519 Node *last = NULL; 520 for (; i < _max-1; ++i) { 521 Node *next = _in[i+1]; 522 if (next == NULL) break; 523 last = next; 524 } 525 _in[gap] = last; // Move last slot to empty one. 526 _in[i] = NULL; // NULL out last slot. 527 } 528 529 public: 530 // Globally replace this node by a given new node, updating all uses. 531 void replace_by(Node* new_node); 532 // Globally replace this node by a given new node, updating all uses 533 // and cutting input edges of old node. 534 void subsume_by(Node* new_node, Compile* c) { 535 replace_by(new_node); 536 disconnect_inputs(c); 537 } 538 void set_req_X(uint i, Node *n, PhaseIterGVN *igvn); 539 void set_req_X(uint i, Node *n, PhaseGVN *gvn); 540 // Find the one non-null required input. RegionNode only 541 Node *nonnull_req() const; 542 // Add or remove precedence edges 543 void add_prec( Node *n ); 544 void rm_prec( uint i ); 545 546 // Note: prec(i) will not necessarily point to n if edge already exists. 547 void set_prec( uint i, Node *n ) { 548 assert(i < _max, "oob: i=%d, _max=%d", i, _max); 549 assert(is_not_dead(n), "can not use dead node"); 550 assert(i >= _cnt, "not a precedence edge"); 551 // Avoid spec violation: duplicated prec edge. 552 if (_in[i] == n) return; 553 if (n == NULL || find_prec_edge(n) != -1) { 554 rm_prec(i); 555 return; 556 } 557 if (_in[i] != NULL) _in[i]->del_out((Node *)this); 558 _in[i] = n; 559 n->add_out((Node *)this); 560 } 561 562 // Set this node's index, used by cisc_version to replace current node 563 void set_idx(uint new_idx) { 564 const node_idx_t* ref = &_idx; 565 *(node_idx_t*)ref = new_idx; 566 } 567 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) 568 void swap_edges(uint i1, uint i2) { 569 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 570 // Def-Use info is unchanged 571 Node* n1 = in(i1); 572 Node* n2 = in(i2); 573 _in[i1] = n2; 574 _in[i2] = n1; 575 // If this node is in the hash table, make sure it doesn't need a rehash. 576 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); 577 } 578 579 // Iterators over input Nodes for a Node X are written as: 580 // for( i = 0; i < X.req(); i++ ) ... X[i] ... 581 // NOTE: Required edges can contain embedded NULL pointers. 582 583 //----------------- Other Node Properties 584 585 // Generate class IDs for (some) ideal nodes so that it is possible to determine 586 // the type of a node using a non-virtual method call (the method is_<Node>() below). 587 // 588 // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines 589 // the type of the node the ID represents; another subset of an ID's bits are reserved 590 // for the superclasses of the node represented by the ID. 591 // 592 // By design, if A is a supertype of B, A.is_B() returns true and B.is_A() 593 // returns false. A.is_A() returns true. 594 // 595 // If two classes, A and B, have the same superclass, a different bit of A's class id 596 // is reserved for A's type than for B's type. That bit is specified by the third 597 // parameter in the macro DEFINE_CLASS_ID. 598 // 599 // By convention, classes with deeper hierarchy are declared first. Moreover, 600 // classes with the same hierarchy depth are sorted by usage frequency. 601 // 602 // The query method masks the bits to cut off bits of subclasses and then compares 603 // the result with the class id (see the macro DEFINE_CLASS_QUERY below). 604 // 605 // Class_MachCall=30, ClassMask_MachCall=31 606 // 12 8 4 0 607 // 0 0 0 0 0 0 0 0 1 1 1 1 0 608 // | | | | 609 // | | | Bit_Mach=2 610 // | | Bit_MachReturn=4 611 // | Bit_MachSafePoint=8 612 // Bit_MachCall=16 613 // 614 // Class_CountedLoop=56, ClassMask_CountedLoop=63 615 // 12 8 4 0 616 // 0 0 0 0 0 0 0 1 1 1 0 0 0 617 // | | | 618 // | | Bit_Region=8 619 // | Bit_Loop=16 620 // Bit_CountedLoop=32 621 622 #define DEFINE_CLASS_ID(cl, supcl, subn) \ 623 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ 624 Class_##cl = Class_##supcl + Bit_##cl , \ 625 ClassMask_##cl = ((Bit_##cl << 1) - 1) , 626 627 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods 628 // so that its values fit into 32 bits. 629 enum NodeClasses { 630 Bit_Node = 0x00000000, 631 Class_Node = 0x00000000, 632 ClassMask_Node = 0xFFFFFFFF, 633 634 DEFINE_CLASS_ID(Multi, Node, 0) 635 DEFINE_CLASS_ID(SafePoint, Multi, 0) 636 DEFINE_CLASS_ID(Call, SafePoint, 0) 637 DEFINE_CLASS_ID(CallJava, Call, 0) 638 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) 639 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) 640 DEFINE_CLASS_ID(CallRuntime, Call, 1) 641 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) 642 DEFINE_CLASS_ID(CallLeafNoFP, CallLeaf, 0) 643 DEFINE_CLASS_ID(Allocate, Call, 2) 644 DEFINE_CLASS_ID(AllocateArray, Allocate, 0) 645 DEFINE_CLASS_ID(AbstractLock, Call, 3) 646 DEFINE_CLASS_ID(Lock, AbstractLock, 0) 647 DEFINE_CLASS_ID(Unlock, AbstractLock, 1) 648 DEFINE_CLASS_ID(ArrayCopy, Call, 4) 649 DEFINE_CLASS_ID(CallNative, Call, 5) 650 DEFINE_CLASS_ID(MultiBranch, Multi, 1) 651 DEFINE_CLASS_ID(PCTable, MultiBranch, 0) 652 DEFINE_CLASS_ID(Catch, PCTable, 0) 653 DEFINE_CLASS_ID(Jump, PCTable, 1) 654 DEFINE_CLASS_ID(If, MultiBranch, 1) 655 DEFINE_CLASS_ID(BaseCountedLoopEnd, If, 0) 656 DEFINE_CLASS_ID(CountedLoopEnd, BaseCountedLoopEnd, 0) 657 DEFINE_CLASS_ID(LongCountedLoopEnd, BaseCountedLoopEnd, 1) 658 DEFINE_CLASS_ID(RangeCheck, If, 1) 659 DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2) 660 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) 661 DEFINE_CLASS_ID(Start, Multi, 2) 662 DEFINE_CLASS_ID(MemBar, Multi, 3) 663 DEFINE_CLASS_ID(Initialize, MemBar, 0) 664 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1) 665 666 DEFINE_CLASS_ID(Mach, Node, 1) 667 DEFINE_CLASS_ID(MachReturn, Mach, 0) 668 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) 669 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) 670 DEFINE_CLASS_ID(MachCallJava, MachCall, 0) 671 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) 672 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) 673 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) 674 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) 675 DEFINE_CLASS_ID(MachCallNative, MachCall, 2) 676 DEFINE_CLASS_ID(MachBranch, Mach, 1) 677 DEFINE_CLASS_ID(MachIf, MachBranch, 0) 678 DEFINE_CLASS_ID(MachGoto, MachBranch, 1) 679 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2) 680 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2) 681 DEFINE_CLASS_ID(MachTemp, Mach, 3) 682 DEFINE_CLASS_ID(MachConstantBase, Mach, 4) 683 DEFINE_CLASS_ID(MachConstant, Mach, 5) 684 DEFINE_CLASS_ID(MachJump, MachConstant, 0) 685 DEFINE_CLASS_ID(MachMerge, Mach, 6) 686 DEFINE_CLASS_ID(MachMemBar, Mach, 7) 687 688 DEFINE_CLASS_ID(Type, Node, 2) 689 DEFINE_CLASS_ID(Phi, Type, 0) 690 DEFINE_CLASS_ID(ConstraintCast, Type, 1) 691 DEFINE_CLASS_ID(CastII, ConstraintCast, 0) 692 DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1) 693 DEFINE_CLASS_ID(CastLL, ConstraintCast, 2) 694 DEFINE_CLASS_ID(CastFF, ConstraintCast, 3) 695 DEFINE_CLASS_ID(CastDD, ConstraintCast, 4) 696 DEFINE_CLASS_ID(CastVV, ConstraintCast, 5) 697 DEFINE_CLASS_ID(CMove, Type, 3) 698 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) 699 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5) 700 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0) 701 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1) 702 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) 703 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) 704 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) 705 DEFINE_CLASS_ID(Vector, Type, 7) 706 DEFINE_CLASS_ID(VectorMaskCmp, Vector, 0) 707 DEFINE_CLASS_ID(VectorUnbox, Vector, 1) 708 DEFINE_CLASS_ID(VectorReinterpret, Vector, 2) 709 DEFINE_CLASS_ID(ShiftV, Vector, 3) 710 711 DEFINE_CLASS_ID(Proj, Node, 3) 712 DEFINE_CLASS_ID(CatchProj, Proj, 0) 713 DEFINE_CLASS_ID(JumpProj, Proj, 1) 714 DEFINE_CLASS_ID(IfProj, Proj, 2) 715 DEFINE_CLASS_ID(IfTrue, IfProj, 0) 716 DEFINE_CLASS_ID(IfFalse, IfProj, 1) 717 DEFINE_CLASS_ID(Parm, Proj, 4) 718 DEFINE_CLASS_ID(MachProj, Proj, 5) 719 720 DEFINE_CLASS_ID(Mem, Node, 4) 721 DEFINE_CLASS_ID(Load, Mem, 0) 722 DEFINE_CLASS_ID(LoadVector, Load, 0) 723 DEFINE_CLASS_ID(LoadVectorGather, LoadVector, 0) 724 DEFINE_CLASS_ID(LoadVectorMasked, LoadVector, 1) 725 DEFINE_CLASS_ID(Store, Mem, 1) 726 DEFINE_CLASS_ID(StoreVector, Store, 0) 727 DEFINE_CLASS_ID(StoreVectorScatter, StoreVector, 0) 728 DEFINE_CLASS_ID(StoreVectorMasked, StoreVector, 1) 729 DEFINE_CLASS_ID(LoadStore, Mem, 2) 730 DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0) 731 DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0) 732 DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1) 733 734 DEFINE_CLASS_ID(Region, Node, 5) 735 DEFINE_CLASS_ID(Loop, Region, 0) 736 DEFINE_CLASS_ID(Root, Loop, 0) 737 DEFINE_CLASS_ID(BaseCountedLoop, Loop, 1) 738 DEFINE_CLASS_ID(CountedLoop, BaseCountedLoop, 0) 739 DEFINE_CLASS_ID(LongCountedLoop, BaseCountedLoop, 1) 740 DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2) 741 742 DEFINE_CLASS_ID(Sub, Node, 6) 743 DEFINE_CLASS_ID(Cmp, Sub, 0) 744 DEFINE_CLASS_ID(FastLock, Cmp, 0) 745 DEFINE_CLASS_ID(FastUnlock, Cmp, 1) 746 DEFINE_CLASS_ID(SubTypeCheck,Cmp, 2) 747 748 DEFINE_CLASS_ID(MergeMem, Node, 7) 749 DEFINE_CLASS_ID(Bool, Node, 8) 750 DEFINE_CLASS_ID(AddP, Node, 9) 751 DEFINE_CLASS_ID(BoxLock, Node, 10) 752 DEFINE_CLASS_ID(Add, Node, 11) 753 DEFINE_CLASS_ID(Mul, Node, 12) 754 DEFINE_CLASS_ID(ClearArray, Node, 14) 755 DEFINE_CLASS_ID(Halt, Node, 15) 756 DEFINE_CLASS_ID(Opaque1, Node, 16) 757 DEFINE_CLASS_ID(Move, Node, 17) 758 DEFINE_CLASS_ID(LShift, Node, 18) 759 760 _max_classes = ClassMask_Move 761 }; 762 #undef DEFINE_CLASS_ID 763 764 // Flags are sorted by usage frequency. 765 enum NodeFlags { 766 Flag_is_Copy = 1 << 0, // should be first bit to avoid shift 767 Flag_rematerialize = 1 << 1, 768 Flag_needs_anti_dependence_check = 1 << 2, 769 Flag_is_macro = 1 << 3, 770 Flag_is_Con = 1 << 4, 771 Flag_is_cisc_alternate = 1 << 5, 772 Flag_is_dead_loop_safe = 1 << 6, 773 Flag_may_be_short_branch = 1 << 7, 774 Flag_avoid_back_to_back_before = 1 << 8, 775 Flag_avoid_back_to_back_after = 1 << 9, 776 Flag_has_call = 1 << 10, 777 Flag_is_reduction = 1 << 11, 778 Flag_is_scheduled = 1 << 12, 779 Flag_is_expensive = 1 << 13, 780 Flag_is_predicated_vector = 1 << 14, 781 Flag_for_post_loop_opts_igvn = 1 << 15, 782 Flag_is_removed_by_peephole = 1 << 16, 783 _last_flag = Flag_is_removed_by_peephole 784 }; 785 786 class PD; 787 788 private: 789 juint _class_id; 790 juint _flags; 791 792 static juint max_flags(); 793 794 protected: 795 // These methods should be called from constructors only. 796 void init_class_id(juint c) { 797 _class_id = c; // cast out const 798 } 799 void init_flags(uint fl) { 800 assert(fl <= max_flags(), "invalid node flag"); 801 _flags |= fl; 802 } 803 void clear_flag(uint fl) { 804 assert(fl <= max_flags(), "invalid node flag"); 805 _flags &= ~fl; 806 } 807 808 public: 809 const juint class_id() const { return _class_id; } 810 811 const juint flags() const { return _flags; } 812 813 void add_flag(juint fl) { init_flags(fl); } 814 815 void remove_flag(juint fl) { clear_flag(fl); } 816 817 // Return a dense integer opcode number 818 virtual int Opcode() const; 819 820 // Virtual inherited Node size 821 virtual uint size_of() const; 822 823 // Other interesting Node properties 824 #define DEFINE_CLASS_QUERY(type) \ 825 bool is_##type() const { \ 826 return ((_class_id & ClassMask_##type) == Class_##type); \ 827 } \ 828 type##Node *as_##type() const { \ 829 assert(is_##type(), "invalid node class: %s", Name()); \ 830 return (type##Node*)this; \ 831 } \ 832 type##Node* isa_##type() const { \ 833 return (is_##type()) ? as_##type() : NULL; \ 834 } 835 836 DEFINE_CLASS_QUERY(AbstractLock) 837 DEFINE_CLASS_QUERY(Add) 838 DEFINE_CLASS_QUERY(AddP) 839 DEFINE_CLASS_QUERY(Allocate) 840 DEFINE_CLASS_QUERY(AllocateArray) 841 DEFINE_CLASS_QUERY(ArrayCopy) 842 DEFINE_CLASS_QUERY(BaseCountedLoop) 843 DEFINE_CLASS_QUERY(BaseCountedLoopEnd) 844 DEFINE_CLASS_QUERY(Bool) 845 DEFINE_CLASS_QUERY(BoxLock) 846 DEFINE_CLASS_QUERY(Call) 847 DEFINE_CLASS_QUERY(CallNative) 848 DEFINE_CLASS_QUERY(CallDynamicJava) 849 DEFINE_CLASS_QUERY(CallJava) 850 DEFINE_CLASS_QUERY(CallLeaf) 851 DEFINE_CLASS_QUERY(CallLeafNoFP) 852 DEFINE_CLASS_QUERY(CallRuntime) 853 DEFINE_CLASS_QUERY(CallStaticJava) 854 DEFINE_CLASS_QUERY(Catch) 855 DEFINE_CLASS_QUERY(CatchProj) 856 DEFINE_CLASS_QUERY(CheckCastPP) 857 DEFINE_CLASS_QUERY(CastII) 858 DEFINE_CLASS_QUERY(CastLL) 859 DEFINE_CLASS_QUERY(ConstraintCast) 860 DEFINE_CLASS_QUERY(ClearArray) 861 DEFINE_CLASS_QUERY(CMove) 862 DEFINE_CLASS_QUERY(Cmp) 863 DEFINE_CLASS_QUERY(CountedLoop) 864 DEFINE_CLASS_QUERY(CountedLoopEnd) 865 DEFINE_CLASS_QUERY(DecodeNarrowPtr) 866 DEFINE_CLASS_QUERY(DecodeN) 867 DEFINE_CLASS_QUERY(DecodeNKlass) 868 DEFINE_CLASS_QUERY(EncodeNarrowPtr) 869 DEFINE_CLASS_QUERY(EncodeP) 870 DEFINE_CLASS_QUERY(EncodePKlass) 871 DEFINE_CLASS_QUERY(FastLock) 872 DEFINE_CLASS_QUERY(FastUnlock) 873 DEFINE_CLASS_QUERY(Halt) 874 DEFINE_CLASS_QUERY(If) 875 DEFINE_CLASS_QUERY(RangeCheck) 876 DEFINE_CLASS_QUERY(IfProj) 877 DEFINE_CLASS_QUERY(IfFalse) 878 DEFINE_CLASS_QUERY(IfTrue) 879 DEFINE_CLASS_QUERY(Initialize) 880 DEFINE_CLASS_QUERY(Jump) 881 DEFINE_CLASS_QUERY(JumpProj) 882 DEFINE_CLASS_QUERY(LongCountedLoop) 883 DEFINE_CLASS_QUERY(LongCountedLoopEnd) 884 DEFINE_CLASS_QUERY(Load) 885 DEFINE_CLASS_QUERY(LoadStore) 886 DEFINE_CLASS_QUERY(LoadStoreConditional) 887 DEFINE_CLASS_QUERY(Lock) 888 DEFINE_CLASS_QUERY(Loop) 889 DEFINE_CLASS_QUERY(LShift) 890 DEFINE_CLASS_QUERY(Mach) 891 DEFINE_CLASS_QUERY(MachBranch) 892 DEFINE_CLASS_QUERY(MachCall) 893 DEFINE_CLASS_QUERY(MachCallNative) 894 DEFINE_CLASS_QUERY(MachCallDynamicJava) 895 DEFINE_CLASS_QUERY(MachCallJava) 896 DEFINE_CLASS_QUERY(MachCallLeaf) 897 DEFINE_CLASS_QUERY(MachCallRuntime) 898 DEFINE_CLASS_QUERY(MachCallStaticJava) 899 DEFINE_CLASS_QUERY(MachConstantBase) 900 DEFINE_CLASS_QUERY(MachConstant) 901 DEFINE_CLASS_QUERY(MachGoto) 902 DEFINE_CLASS_QUERY(MachIf) 903 DEFINE_CLASS_QUERY(MachJump) 904 DEFINE_CLASS_QUERY(MachNullCheck) 905 DEFINE_CLASS_QUERY(MachProj) 906 DEFINE_CLASS_QUERY(MachReturn) 907 DEFINE_CLASS_QUERY(MachSafePoint) 908 DEFINE_CLASS_QUERY(MachSpillCopy) 909 DEFINE_CLASS_QUERY(MachTemp) 910 DEFINE_CLASS_QUERY(MachMemBar) 911 DEFINE_CLASS_QUERY(MachMerge) 912 DEFINE_CLASS_QUERY(Mem) 913 DEFINE_CLASS_QUERY(MemBar) 914 DEFINE_CLASS_QUERY(MemBarStoreStore) 915 DEFINE_CLASS_QUERY(MergeMem) 916 DEFINE_CLASS_QUERY(Move) 917 DEFINE_CLASS_QUERY(Mul) 918 DEFINE_CLASS_QUERY(Multi) 919 DEFINE_CLASS_QUERY(MultiBranch) 920 DEFINE_CLASS_QUERY(Opaque1) 921 DEFINE_CLASS_QUERY(OuterStripMinedLoop) 922 DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd) 923 DEFINE_CLASS_QUERY(Parm) 924 DEFINE_CLASS_QUERY(PCTable) 925 DEFINE_CLASS_QUERY(Phi) 926 DEFINE_CLASS_QUERY(Proj) 927 DEFINE_CLASS_QUERY(Region) 928 DEFINE_CLASS_QUERY(Root) 929 DEFINE_CLASS_QUERY(SafePoint) 930 DEFINE_CLASS_QUERY(SafePointScalarObject) 931 DEFINE_CLASS_QUERY(Start) 932 DEFINE_CLASS_QUERY(Store) 933 DEFINE_CLASS_QUERY(Sub) 934 DEFINE_CLASS_QUERY(SubTypeCheck) 935 DEFINE_CLASS_QUERY(Type) 936 DEFINE_CLASS_QUERY(Vector) 937 DEFINE_CLASS_QUERY(VectorMaskCmp) 938 DEFINE_CLASS_QUERY(VectorUnbox) 939 DEFINE_CLASS_QUERY(VectorReinterpret); 940 DEFINE_CLASS_QUERY(LoadVector) 941 DEFINE_CLASS_QUERY(LoadVectorGather) 942 DEFINE_CLASS_QUERY(StoreVector) 943 DEFINE_CLASS_QUERY(StoreVectorScatter) 944 DEFINE_CLASS_QUERY(ShiftV) 945 DEFINE_CLASS_QUERY(Unlock) 946 947 #undef DEFINE_CLASS_QUERY 948 949 // duplicate of is_MachSpillCopy() 950 bool is_SpillCopy () const { 951 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); 952 } 953 954 bool is_Con () const { return (_flags & Flag_is_Con) != 0; } 955 // The data node which is safe to leave in dead loop during IGVN optimization. 956 bool is_dead_loop_safe() const; 957 958 // is_Copy() returns copied edge index (0 or 1) 959 uint is_Copy() const { return (_flags & Flag_is_Copy); } 960 961 virtual bool is_CFG() const { return false; } 962 963 // If this node is control-dependent on a test, can it be 964 // rerouted to a dominating equivalent test? This is usually 965 // true of non-CFG nodes, but can be false for operations which 966 // depend for their correct sequencing on more than one test. 967 // (In that case, hoisting to a dominating test may silently 968 // skip some other important test.) 969 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; 970 971 // When building basic blocks, I need to have a notion of block beginning 972 // Nodes, next block selector Nodes (block enders), and next block 973 // projections. These calls need to work on their machine equivalents. The 974 // Ideal beginning Nodes are RootNode, RegionNode and StartNode. 975 bool is_block_start() const { 976 if ( is_Region() ) 977 return this == (const Node*)in(0); 978 else 979 return is_Start(); 980 } 981 982 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, 983 // Goto and Return. This call also returns the block ending Node. 984 virtual const Node *is_block_proj() const; 985 986 // The node is a "macro" node which needs to be expanded before matching 987 bool is_macro() const { return (_flags & Flag_is_macro) != 0; } 988 // The node is expensive: the best control is set during loop opts 989 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; } 990 991 // An arithmetic node which accumulates a data in a loop. 992 // It must have the loop's phi as input and provide a def to the phi. 993 bool is_reduction() const { return (_flags & Flag_is_reduction) != 0; } 994 995 bool is_predicated_vector() const { return (_flags & Flag_is_predicated_vector) != 0; } 996 997 // Used in lcm to mark nodes that have scheduled 998 bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; } 999 1000 bool for_post_loop_opts_igvn() const { return (_flags & Flag_for_post_loop_opts_igvn) != 0; } 1001 1002 //----------------- Optimization 1003 1004 // Get the worst-case Type output for this Node. 1005 virtual const class Type *bottom_type() const; 1006 1007 // If we find a better type for a node, try to record it permanently. 1008 // Return true if this node actually changed. 1009 // Be sure to do the hash_delete game in the "rehash" variant. 1010 void raise_bottom_type(const Type* new_type); 1011 1012 // Get the address type with which this node uses and/or defs memory, 1013 // or NULL if none. The address type is conservatively wide. 1014 // Returns non-null for calls, membars, loads, stores, etc. 1015 // Returns TypePtr::BOTTOM if the node touches memory "broadly". 1016 virtual const class TypePtr *adr_type() const { return NULL; } 1017 1018 // Return an existing node which computes the same function as this node. 1019 // The optimistic combined algorithm requires this to return a Node which 1020 // is a small number of steps away (e.g., one of my inputs). 1021 virtual Node* Identity(PhaseGVN* phase); 1022 1023 // Return the set of values this Node can take on at runtime. 1024 virtual const Type* Value(PhaseGVN* phase) const; 1025 1026 // Return a node which is more "ideal" than the current node. 1027 // The invariants on this call are subtle. If in doubt, read the 1028 // treatise in node.cpp above the default implementation AND TEST WITH 1029 // +VerifyIterativeGVN! 1030 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1031 1032 // Some nodes have specific Ideal subgraph transformations only if they are 1033 // unique users of specific nodes. Such nodes should be put on IGVN worklist 1034 // for the transformations to happen. 1035 bool has_special_unique_user() const; 1036 1037 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 1038 Node* find_exact_control(Node* ctrl); 1039 1040 // Check if 'this' node dominates or equal to 'sub'. 1041 bool dominates(Node* sub, Node_List &nlist); 1042 1043 protected: 1044 bool remove_dead_region(PhaseGVN *phase, bool can_reshape); 1045 public: 1046 1047 // See if there is valid pipeline info 1048 static const Pipeline *pipeline_class(); 1049 virtual const Pipeline *pipeline() const; 1050 1051 // Compute the latency from the def to this instruction of the ith input node 1052 uint latency(uint i); 1053 1054 // Hash & compare functions, for pessimistic value numbering 1055 1056 // If the hash function returns the special sentinel value NO_HASH, 1057 // the node is guaranteed never to compare equal to any other node. 1058 // If we accidentally generate a hash with value NO_HASH the node 1059 // won't go into the table and we'll lose a little optimization. 1060 static const uint NO_HASH = 0; 1061 virtual uint hash() const; 1062 virtual bool cmp( const Node &n ) const; 1063 1064 // Operation appears to be iteratively computed (such as an induction variable) 1065 // It is possible for this operation to return false for a loop-varying 1066 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 1067 bool is_iteratively_computed(); 1068 1069 // Determine if a node is a counted loop induction variable. 1070 // NOTE: The method is defined in "loopnode.cpp". 1071 bool is_cloop_ind_var() const; 1072 1073 // Return a node with opcode "opc" and same inputs as "this" if one can 1074 // be found; Otherwise return NULL; 1075 Node* find_similar(int opc); 1076 1077 // Return the unique control out if only one. Null if none or more than one. 1078 Node* unique_ctrl_out_or_null() const; 1079 // Return the unique control out. Asserts if none or more than one control out. 1080 Node* unique_ctrl_out() const; 1081 1082 // Set control or add control as precedence edge 1083 void ensure_control_or_add_prec(Node* c); 1084 1085 //----------------- Code Generation 1086 1087 // Ideal register class for Matching. Zero means unmatched instruction 1088 // (these are cloned instead of converted to machine nodes). 1089 virtual uint ideal_reg() const; 1090 1091 static const uint NotAMachineReg; // must be > max. machine register 1092 1093 // Do we Match on this edge index or not? Generally false for Control 1094 // and true for everything else. Weird for calls & returns. 1095 virtual uint match_edge(uint idx) const; 1096 1097 // Register class output is returned in 1098 virtual const RegMask &out_RegMask() const; 1099 // Register class input is expected in 1100 virtual const RegMask &in_RegMask(uint) const; 1101 // Should we clone rather than spill this instruction? 1102 bool rematerialize() const; 1103 1104 // Return JVM State Object if this Node carries debug info, or NULL otherwise 1105 virtual JVMState* jvms() const; 1106 1107 // Print as assembly 1108 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; 1109 // Emit bytes starting at parameter 'ptr' 1110 // Bump 'ptr' by the number of output bytes 1111 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; 1112 // Size of instruction in bytes 1113 virtual uint size(PhaseRegAlloc *ra_) const; 1114 1115 // Convenience function to extract an integer constant from a node. 1116 // If it is not an integer constant (either Con, CastII, or Mach), 1117 // return value_if_unknown. 1118 jint find_int_con(jint value_if_unknown) const { 1119 const TypeInt* t = find_int_type(); 1120 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 1121 } 1122 // Return the constant, knowing it is an integer constant already 1123 jint get_int() const { 1124 const TypeInt* t = find_int_type(); 1125 guarantee(t != NULL, "must be con"); 1126 return t->get_con(); 1127 } 1128 // Here's where the work is done. Can produce non-constant int types too. 1129 const TypeInt* find_int_type() const; 1130 const TypeInteger* find_integer_type(BasicType bt) const; 1131 1132 // Same thing for long (and intptr_t, via type.hpp): 1133 jlong get_long() const { 1134 const TypeLong* t = find_long_type(); 1135 guarantee(t != NULL, "must be con"); 1136 return t->get_con(); 1137 } 1138 jlong find_long_con(jint value_if_unknown) const { 1139 const TypeLong* t = find_long_type(); 1140 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 1141 } 1142 const TypeLong* find_long_type() const; 1143 1144 jlong get_integer_as_long(BasicType bt) const { 1145 const TypeInteger* t = find_integer_type(bt); 1146 guarantee(t != NULL && t->is_con(), "must be con"); 1147 return t->get_con_as_long(bt); 1148 } 1149 jlong find_integer_as_long(BasicType bt, jlong value_if_unknown) const { 1150 const TypeInteger* t = find_integer_type(bt); 1151 if (t == NULL || !t->is_con()) return value_if_unknown; 1152 return t->get_con_as_long(bt); 1153 } 1154 const TypePtr* get_ptr_type() const; 1155 1156 // These guys are called by code generated by ADLC: 1157 intptr_t get_ptr() const; 1158 intptr_t get_narrowcon() const; 1159 jdouble getd() const; 1160 jfloat getf() const; 1161 1162 // Nodes which are pinned into basic blocks 1163 virtual bool pinned() const { return false; } 1164 1165 // Nodes which use memory without consuming it, hence need antidependences 1166 // More specifically, needs_anti_dependence_check returns true iff the node 1167 // (a) does a load, and (b) does not perform a store (except perhaps to a 1168 // stack slot or some other unaliased location). 1169 bool needs_anti_dependence_check() const; 1170 1171 // Return which operand this instruction may cisc-spill. In other words, 1172 // return operand position that can convert from reg to memory access 1173 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } 1174 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } 1175 1176 // Whether this is a memory-writing machine node. 1177 bool is_memory_writer() const { return is_Mach() && bottom_type()->has_memory(); } 1178 1179 //----------------- Printing, etc 1180 #ifndef PRODUCT 1181 private: 1182 int _indent; 1183 1184 public: 1185 void set_indent(int indent) { _indent = indent; } 1186 1187 private: 1188 static bool add_to_worklist(Node* n, Node_List* worklist, Arena* old_arena, VectorSet* old_space, VectorSet* new_space); 1189 public: 1190 Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx. 1191 Node* find_ctrl(int idx); // Search control ancestors for the given idx. 1192 void dump() const { dump("\n"); } // Print this node. 1193 void dump(const char* suffix, bool mark = false, outputStream *st = tty) const; // Print this node. 1194 void dump(int depth) const; // Print this node, recursively to depth d 1195 void dump_ctrl(int depth) const; // Print control nodes, to depth d 1196 void dump_comp() const; // Print this node in compact representation. 1197 // Print this node in compact representation. 1198 void dump_comp(const char* suffix, outputStream *st = tty) const; 1199 virtual void dump_req(outputStream *st = tty) const; // Print required-edge info 1200 virtual void dump_prec(outputStream *st = tty) const; // Print precedence-edge info 1201 virtual void dump_out(outputStream *st = tty) const; // Print the output edge info 1202 virtual void dump_spec(outputStream *st) const {}; // Print per-node info 1203 // Print compact per-node info 1204 virtual void dump_compact_spec(outputStream *st) const { dump_spec(st); } 1205 void dump_related() const; // Print related nodes (depends on node at hand). 1206 // Print related nodes up to given depths for input and output nodes. 1207 void dump_related(uint d_in, uint d_out) const; 1208 void dump_related_compact() const; // Print related nodes in compact representation. 1209 // Collect related nodes. 1210 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 1211 // Collect nodes starting from this node, explicitly including/excluding control and data links. 1212 void collect_nodes(GrowableArray<Node*> *ns, int d, bool ctrl, bool data) const; 1213 1214 // Node collectors, to be used in implementations of Node::rel(). 1215 // Collect the entire data input graph. Include control inputs if requested. 1216 void collect_nodes_in_all_data(GrowableArray<Node*> *ns, bool ctrl) const; 1217 // Collect the entire control input graph. Include data inputs if requested. 1218 void collect_nodes_in_all_ctrl(GrowableArray<Node*> *ns, bool data) const; 1219 // Collect the entire output graph until hitting and including control nodes. 1220 void collect_nodes_out_all_ctrl_boundary(GrowableArray<Node*> *ns) const; 1221 1222 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges 1223 static void verify(int verify_depth, VectorSet& visited, Node_List& worklist); 1224 1225 // This call defines a class-unique string used to identify class instances 1226 virtual const char *Name() const; 1227 1228 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) 1229 // RegMask Print Functions 1230 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } 1231 void dump_out_regmask() { out_RegMask().dump(); } 1232 static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } 1233 void fast_dump() const { 1234 tty->print("%4d: %-17s", _idx, Name()); 1235 for (uint i = 0; i < len(); i++) 1236 if (in(i)) 1237 tty->print(" %4d", in(i)->_idx); 1238 else 1239 tty->print(" NULL"); 1240 tty->print("\n"); 1241 } 1242 #endif 1243 #ifdef ASSERT 1244 void verify_construction(); 1245 bool verify_jvms(const JVMState* jvms) const; 1246 1247 Node* _debug_orig; // Original version of this, if any. 1248 Node* debug_orig() const { return _debug_orig; } 1249 void set_debug_orig(Node* orig); // _debug_orig = orig 1250 void dump_orig(outputStream *st, bool print_key = true) const; 1251 1252 int _debug_idx; // Unique value assigned to every node. 1253 int debug_idx() const { return _debug_idx; } 1254 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } 1255 1256 int _hash_lock; // Barrier to modifications of nodes in the hash table 1257 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } 1258 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } 1259 1260 static void init_NodeProperty(); 1261 1262 #if OPTO_DU_ITERATOR_ASSERT 1263 const Node* _last_del; // The last deleted node. 1264 uint _del_tick; // Bumped when a deletion happens.. 1265 #endif 1266 #endif 1267 }; 1268 1269 inline bool not_a_node(const Node* n) { 1270 if (n == NULL) return true; 1271 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc. 1272 if (*(address*)n == badAddress) return true; // kill by Node::destruct 1273 return false; 1274 } 1275 1276 //----------------------------------------------------------------------------- 1277 // Iterators over DU info, and associated Node functions. 1278 1279 #if OPTO_DU_ITERATOR_ASSERT 1280 1281 // Common code for assertion checking on DU iterators. 1282 class DUIterator_Common { 1283 #ifdef ASSERT 1284 protected: 1285 bool _vdui; // cached value of VerifyDUIterators 1286 const Node* _node; // the node containing the _out array 1287 uint _outcnt; // cached node->_outcnt 1288 uint _del_tick; // cached node->_del_tick 1289 Node* _last; // last value produced by the iterator 1290 1291 void sample(const Node* node); // used by c'tor to set up for verifies 1292 void verify(const Node* node, bool at_end_ok = false); 1293 void verify_resync(); 1294 void reset(const DUIterator_Common& that); 1295 1296 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators 1297 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } 1298 #else 1299 #define I_VDUI_ONLY(i,x) { } 1300 #endif //ASSERT 1301 }; 1302 1303 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) 1304 1305 // Default DU iterator. Allows appends onto the out array. 1306 // Allows deletion from the out array only at the current point. 1307 // Usage: 1308 // for (DUIterator i = x->outs(); x->has_out(i); i++) { 1309 // Node* y = x->out(i); 1310 // ... 1311 // } 1312 // Compiles in product mode to a unsigned integer index, which indexes 1313 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate 1314 // also reloads x->_outcnt. If you delete, you must perform "--i" just 1315 // before continuing the loop. You must delete only the last-produced 1316 // edge. You must delete only a single copy of the last-produced edge, 1317 // or else you must delete all copies at once (the first time the edge 1318 // is produced by the iterator). 1319 class DUIterator : public DUIterator_Common { 1320 friend class Node; 1321 1322 // This is the index which provides the product-mode behavior. 1323 // Whatever the product-mode version of the system does to the 1324 // DUI index is done to this index. All other fields in 1325 // this class are used only for assertion checking. 1326 uint _idx; 1327 1328 #ifdef ASSERT 1329 uint _refresh_tick; // Records the refresh activity. 1330 1331 void sample(const Node* node); // Initialize _refresh_tick etc. 1332 void verify(const Node* node, bool at_end_ok = false); 1333 void verify_increment(); // Verify an increment operation. 1334 void verify_resync(); // Verify that we can back up over a deletion. 1335 void verify_finish(); // Verify that the loop terminated properly. 1336 void refresh(); // Resample verification info. 1337 void reset(const DUIterator& that); // Resample after assignment. 1338 #endif 1339 1340 DUIterator(const Node* node, int dummy_to_avoid_conversion) 1341 { _idx = 0; debug_only(sample(node)); } 1342 1343 public: 1344 // initialize to garbage; clear _vdui to disable asserts 1345 DUIterator() 1346 { /*initialize to garbage*/ debug_only(_vdui = false); } 1347 1348 DUIterator(const DUIterator& that) 1349 { _idx = that._idx; debug_only(_vdui = false; reset(that)); } 1350 1351 void operator++(int dummy_to_specify_postfix_op) 1352 { _idx++; VDUI_ONLY(verify_increment()); } 1353 1354 void operator--() 1355 { VDUI_ONLY(verify_resync()); --_idx; } 1356 1357 ~DUIterator() 1358 { VDUI_ONLY(verify_finish()); } 1359 1360 void operator=(const DUIterator& that) 1361 { _idx = that._idx; debug_only(reset(that)); } 1362 }; 1363 1364 DUIterator Node::outs() const 1365 { return DUIterator(this, 0); } 1366 DUIterator& Node::refresh_out_pos(DUIterator& i) const 1367 { I_VDUI_ONLY(i, i.refresh()); return i; } 1368 bool Node::has_out(DUIterator& i) const 1369 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } 1370 Node* Node::out(DUIterator& i) const 1371 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } 1372 1373 1374 // Faster DU iterator. Disallows insertions into the out array. 1375 // Allows deletion from the out array only at the current point. 1376 // Usage: 1377 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { 1378 // Node* y = x->fast_out(i); 1379 // ... 1380 // } 1381 // Compiles in product mode to raw Node** pointer arithmetic, with 1382 // no reloading of pointers from the original node x. If you delete, 1383 // you must perform "--i; --imax" just before continuing the loop. 1384 // If you delete multiple copies of the same edge, you must decrement 1385 // imax, but not i, multiple times: "--i, imax -= num_edges". 1386 class DUIterator_Fast : public DUIterator_Common { 1387 friend class Node; 1388 friend class DUIterator_Last; 1389 1390 // This is the pointer which provides the product-mode behavior. 1391 // Whatever the product-mode version of the system does to the 1392 // DUI pointer is done to this pointer. All other fields in 1393 // this class are used only for assertion checking. 1394 Node** _outp; 1395 1396 #ifdef ASSERT 1397 void verify(const Node* node, bool at_end_ok = false); 1398 void verify_limit(); 1399 void verify_resync(); 1400 void verify_relimit(uint n); 1401 void reset(const DUIterator_Fast& that); 1402 #endif 1403 1404 // Note: offset must be signed, since -1 is sometimes passed 1405 DUIterator_Fast(const Node* node, ptrdiff_t offset) 1406 { _outp = node->_out + offset; debug_only(sample(node)); } 1407 1408 public: 1409 // initialize to garbage; clear _vdui to disable asserts 1410 DUIterator_Fast() 1411 { /*initialize to garbage*/ debug_only(_vdui = false); } 1412 1413 DUIterator_Fast(const DUIterator_Fast& that) 1414 { _outp = that._outp; debug_only(_vdui = false; reset(that)); } 1415 1416 void operator++(int dummy_to_specify_postfix_op) 1417 { _outp++; VDUI_ONLY(verify(_node, true)); } 1418 1419 void operator--() 1420 { VDUI_ONLY(verify_resync()); --_outp; } 1421 1422 void operator-=(uint n) // applied to the limit only 1423 { _outp -= n; VDUI_ONLY(verify_relimit(n)); } 1424 1425 bool operator<(DUIterator_Fast& limit) { 1426 I_VDUI_ONLY(*this, this->verify(_node, true)); 1427 I_VDUI_ONLY(limit, limit.verify_limit()); 1428 return _outp < limit._outp; 1429 } 1430 1431 void operator=(const DUIterator_Fast& that) 1432 { _outp = that._outp; debug_only(reset(that)); } 1433 }; 1434 1435 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { 1436 // Assign a limit pointer to the reference argument: 1437 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); 1438 // Return the base pointer: 1439 return DUIterator_Fast(this, 0); 1440 } 1441 Node* Node::fast_out(DUIterator_Fast& i) const { 1442 I_VDUI_ONLY(i, i.verify(this)); 1443 return debug_only(i._last=) *i._outp; 1444 } 1445 1446 1447 // Faster DU iterator. Requires each successive edge to be removed. 1448 // Does not allow insertion of any edges. 1449 // Usage: 1450 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { 1451 // Node* y = x->last_out(i); 1452 // ... 1453 // } 1454 // Compiles in product mode to raw Node** pointer arithmetic, with 1455 // no reloading of pointers from the original node x. 1456 class DUIterator_Last : private DUIterator_Fast { 1457 friend class Node; 1458 1459 #ifdef ASSERT 1460 void verify(const Node* node, bool at_end_ok = false); 1461 void verify_limit(); 1462 void verify_step(uint num_edges); 1463 #endif 1464 1465 // Note: offset must be signed, since -1 is sometimes passed 1466 DUIterator_Last(const Node* node, ptrdiff_t offset) 1467 : DUIterator_Fast(node, offset) { } 1468 1469 void operator++(int dummy_to_specify_postfix_op) {} // do not use 1470 void operator<(int) {} // do not use 1471 1472 public: 1473 DUIterator_Last() { } 1474 // initialize to garbage 1475 1476 DUIterator_Last(const DUIterator_Last& that) = default; 1477 1478 void operator--() 1479 { _outp--; VDUI_ONLY(verify_step(1)); } 1480 1481 void operator-=(uint n) 1482 { _outp -= n; VDUI_ONLY(verify_step(n)); } 1483 1484 bool operator>=(DUIterator_Last& limit) { 1485 I_VDUI_ONLY(*this, this->verify(_node, true)); 1486 I_VDUI_ONLY(limit, limit.verify_limit()); 1487 return _outp >= limit._outp; 1488 } 1489 1490 DUIterator_Last& operator=(const DUIterator_Last& that) = default; 1491 }; 1492 1493 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { 1494 // Assign a limit pointer to the reference argument: 1495 imin = DUIterator_Last(this, 0); 1496 // Return the initial pointer: 1497 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); 1498 } 1499 Node* Node::last_out(DUIterator_Last& i) const { 1500 I_VDUI_ONLY(i, i.verify(this)); 1501 return debug_only(i._last=) *i._outp; 1502 } 1503 1504 #endif //OPTO_DU_ITERATOR_ASSERT 1505 1506 #undef I_VDUI_ONLY 1507 #undef VDUI_ONLY 1508 1509 // An Iterator that truly follows the iterator pattern. Doesn't 1510 // support deletion but could be made to. 1511 // 1512 // for (SimpleDUIterator i(n); i.has_next(); i.next()) { 1513 // Node* m = i.get(); 1514 // 1515 class SimpleDUIterator : public StackObj { 1516 private: 1517 Node* node; 1518 DUIterator_Fast i; 1519 DUIterator_Fast imax; 1520 public: 1521 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} 1522 bool has_next() { return i < imax; } 1523 void next() { i++; } 1524 Node* get() { return node->fast_out(i); } 1525 }; 1526 1527 1528 //----------------------------------------------------------------------------- 1529 // Map dense integer indices to Nodes. Uses classic doubling-array trick. 1530 // Abstractly provides an infinite array of Node*'s, initialized to NULL. 1531 // Note that the constructor just zeros things, and since I use Arena 1532 // allocation I do not need a destructor to reclaim storage. 1533 class Node_Array : public ResourceObj { 1534 friend class VMStructs; 1535 protected: 1536 Arena* _a; // Arena to allocate in 1537 uint _max; 1538 Node** _nodes; 1539 void grow( uint i ); // Grow array node to fit 1540 public: 1541 Node_Array(Arena* a, uint max = OptoNodeListSize) : _a(a), _max(max) { 1542 _nodes = NEW_ARENA_ARRAY(a, Node*, max); 1543 clear(); 1544 } 1545 1546 Node_Array(Node_Array* na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} 1547 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped 1548 { return (i<_max) ? _nodes[i] : (Node*)NULL; } 1549 Node* at(uint i) const { assert(i<_max,"oob"); return _nodes[i]; } 1550 Node** adr() { return _nodes; } 1551 // Extend the mapping: index i maps to Node *n. 1552 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } 1553 void insert( uint i, Node *n ); 1554 void remove( uint i ); // Remove, preserving order 1555 // Clear all entries in _nodes to NULL but keep storage 1556 void clear() { 1557 Copy::zero_to_bytes(_nodes, _max * sizeof(Node*)); 1558 } 1559 1560 uint Size() const { return _max; } 1561 void dump() const; 1562 }; 1563 1564 class Node_List : public Node_Array { 1565 friend class VMStructs; 1566 uint _cnt; 1567 public: 1568 Node_List(uint max = OptoNodeListSize) : Node_Array(Thread::current()->resource_area(), max), _cnt(0) {} 1569 Node_List(Arena *a, uint max = OptoNodeListSize) : Node_Array(a, max), _cnt(0) {} 1570 bool contains(const Node* n) const { 1571 for (uint e = 0; e < size(); e++) { 1572 if (at(e) == n) return true; 1573 } 1574 return false; 1575 } 1576 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } 1577 void remove( uint i ) { Node_Array::remove(i); _cnt--; } 1578 void push( Node *b ) { map(_cnt++,b); } 1579 void yank( Node *n ); // Find and remove 1580 Node *pop() { return _nodes[--_cnt]; } 1581 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage 1582 void copy(const Node_List& from) { 1583 if (from._max > _max) { 1584 grow(from._max); 1585 } 1586 _cnt = from._cnt; 1587 Copy::conjoint_words_to_higher((HeapWord*)&from._nodes[0], (HeapWord*)&_nodes[0], from._max * sizeof(Node*)); 1588 } 1589 1590 uint size() const { return _cnt; } 1591 void dump() const; 1592 void dump_simple() const; 1593 }; 1594 1595 //------------------------------Unique_Node_List------------------------------- 1596 class Unique_Node_List : public Node_List { 1597 friend class VMStructs; 1598 VectorSet _in_worklist; 1599 uint _clock_index; // Index in list where to pop from next 1600 public: 1601 Unique_Node_List() : Node_List(), _clock_index(0) {} 1602 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} 1603 1604 void remove( Node *n ); 1605 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } 1606 VectorSet& member_set(){ return _in_worklist; } 1607 1608 void push(Node* b) { 1609 if( !_in_worklist.test_set(b->_idx) ) 1610 Node_List::push(b); 1611 } 1612 Node *pop() { 1613 if( _clock_index >= size() ) _clock_index = 0; 1614 Node *b = at(_clock_index); 1615 map( _clock_index, Node_List::pop()); 1616 if (size() != 0) _clock_index++; // Always start from 0 1617 _in_worklist.remove(b->_idx); 1618 return b; 1619 } 1620 Node *remove(uint i) { 1621 Node *b = Node_List::at(i); 1622 _in_worklist.remove(b->_idx); 1623 map(i,Node_List::pop()); 1624 return b; 1625 } 1626 void yank(Node *n) { 1627 _in_worklist.remove(n->_idx); 1628 Node_List::yank(n); 1629 } 1630 void clear() { 1631 _in_worklist.clear(); // Discards storage but grows automatically 1632 Node_List::clear(); 1633 _clock_index = 0; 1634 } 1635 1636 // Used after parsing to remove useless nodes before Iterative GVN 1637 void remove_useless_nodes(VectorSet& useful); 1638 1639 bool contains(const Node* n) const { 1640 fatal("use faster member() instead"); 1641 return false; 1642 } 1643 1644 #ifndef PRODUCT 1645 void print_set() const { _in_worklist.print(); } 1646 #endif 1647 }; 1648 1649 // Inline definition of Compile::record_for_igvn must be deferred to this point. 1650 inline void Compile::record_for_igvn(Node* n) { 1651 _for_igvn->push(n); 1652 } 1653 1654 //------------------------------Node_Stack------------------------------------- 1655 class Node_Stack { 1656 friend class VMStructs; 1657 protected: 1658 struct INode { 1659 Node *node; // Processed node 1660 uint indx; // Index of next node's child 1661 }; 1662 INode *_inode_top; // tos, stack grows up 1663 INode *_inode_max; // End of _inodes == _inodes + _max 1664 INode *_inodes; // Array storage for the stack 1665 Arena *_a; // Arena to allocate in 1666 void grow(); 1667 public: 1668 Node_Stack(int size) { 1669 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1670 _a = Thread::current()->resource_area(); 1671 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1672 _inode_max = _inodes + max; 1673 _inode_top = _inodes - 1; // stack is empty 1674 } 1675 1676 Node_Stack(Arena *a, int size) : _a(a) { 1677 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1678 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1679 _inode_max = _inodes + max; 1680 _inode_top = _inodes - 1; // stack is empty 1681 } 1682 1683 void pop() { 1684 assert(_inode_top >= _inodes, "node stack underflow"); 1685 --_inode_top; 1686 } 1687 void push(Node *n, uint i) { 1688 ++_inode_top; 1689 if (_inode_top >= _inode_max) grow(); 1690 INode *top = _inode_top; // optimization 1691 top->node = n; 1692 top->indx = i; 1693 } 1694 Node *node() const { 1695 return _inode_top->node; 1696 } 1697 Node* node_at(uint i) const { 1698 assert(_inodes + i <= _inode_top, "in range"); 1699 return _inodes[i].node; 1700 } 1701 uint index() const { 1702 return _inode_top->indx; 1703 } 1704 uint index_at(uint i) const { 1705 assert(_inodes + i <= _inode_top, "in range"); 1706 return _inodes[i].indx; 1707 } 1708 void set_node(Node *n) { 1709 _inode_top->node = n; 1710 } 1711 void set_index(uint i) { 1712 _inode_top->indx = i; 1713 } 1714 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size 1715 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size 1716 bool is_nonempty() const { return (_inode_top >= _inodes); } 1717 bool is_empty() const { return (_inode_top < _inodes); } 1718 void clear() { _inode_top = _inodes - 1; } // retain storage 1719 1720 // Node_Stack is used to map nodes. 1721 Node* find(uint idx) const; 1722 }; 1723 1724 1725 //-----------------------------Node_Notes-------------------------------------- 1726 // Debugging or profiling annotations loosely and sparsely associated 1727 // with some nodes. See Compile::node_notes_at for the accessor. 1728 class Node_Notes { 1729 friend class VMStructs; 1730 JVMState* _jvms; 1731 1732 public: 1733 Node_Notes(JVMState* jvms = NULL) { 1734 _jvms = jvms; 1735 } 1736 1737 JVMState* jvms() { return _jvms; } 1738 void set_jvms(JVMState* x) { _jvms = x; } 1739 1740 // True if there is nothing here. 1741 bool is_clear() { 1742 return (_jvms == NULL); 1743 } 1744 1745 // Make there be nothing here. 1746 void clear() { 1747 _jvms = NULL; 1748 } 1749 1750 // Make a new, clean node notes. 1751 static Node_Notes* make(Compile* C) { 1752 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1753 nn->clear(); 1754 return nn; 1755 } 1756 1757 Node_Notes* clone(Compile* C) { 1758 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1759 (*nn) = (*this); 1760 return nn; 1761 } 1762 1763 // Absorb any information from source. 1764 bool update_from(Node_Notes* source) { 1765 bool changed = false; 1766 if (source != NULL) { 1767 if (source->jvms() != NULL) { 1768 set_jvms(source->jvms()); 1769 changed = true; 1770 } 1771 } 1772 return changed; 1773 } 1774 }; 1775 1776 // Inlined accessors for Compile::node_nodes that require the preceding class: 1777 inline Node_Notes* 1778 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, 1779 int idx, bool can_grow) { 1780 assert(idx >= 0, "oob"); 1781 int block_idx = (idx >> _log2_node_notes_block_size); 1782 int grow_by = (block_idx - (arr == NULL? 0: arr->length())); 1783 if (grow_by >= 0) { 1784 if (!can_grow) return NULL; 1785 grow_node_notes(arr, grow_by + 1); 1786 } 1787 if (arr == NULL) return NULL; 1788 // (Every element of arr is a sub-array of length _node_notes_block_size.) 1789 return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); 1790 } 1791 1792 inline bool 1793 Compile::set_node_notes_at(int idx, Node_Notes* value) { 1794 if (value == NULL || value->is_clear()) 1795 return false; // nothing to write => write nothing 1796 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); 1797 assert(loc != NULL, ""); 1798 return loc->update_from(value); 1799 } 1800 1801 1802 //------------------------------TypeNode--------------------------------------- 1803 // Node with a Type constant. 1804 class TypeNode : public Node { 1805 protected: 1806 virtual uint hash() const; // Check the type 1807 virtual bool cmp( const Node &n ) const; 1808 virtual uint size_of() const; // Size is bigger 1809 const Type* const _type; 1810 public: 1811 void set_type(const Type* t) { 1812 assert(t != NULL, "sanity"); 1813 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 1814 *(const Type**)&_type = t; // cast away const-ness 1815 // If this node is in the hash table, make sure it doesn't need a rehash. 1816 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 1817 } 1818 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 1819 TypeNode( const Type *t, uint required ) : Node(required), _type(t) { 1820 init_class_id(Class_Type); 1821 } 1822 virtual const Type* Value(PhaseGVN* phase) const; 1823 virtual const Type *bottom_type() const; 1824 virtual uint ideal_reg() const; 1825 #ifndef PRODUCT 1826 virtual void dump_spec(outputStream *st) const; 1827 virtual void dump_compact_spec(outputStream *st) const; 1828 #endif 1829 }; 1830 1831 #include "opto/opcodes.hpp" 1832 1833 #define Op_IL(op) \ 1834 inline int Op_ ## op(BasicType bt) { \ 1835 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); \ 1836 if (bt == T_INT) { \ 1837 return Op_## op ## I; \ 1838 } \ 1839 return Op_## op ## L; \ 1840 } 1841 1842 Op_IL(Add) 1843 Op_IL(Sub) 1844 Op_IL(Mul) 1845 Op_IL(URShift) 1846 Op_IL(LShift) 1847 Op_IL(Xor) 1848 Op_IL(Cmp) 1849 1850 inline int Op_ConIL(BasicType bt) { 1851 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); 1852 if (bt == T_INT) { 1853 return Op_ConI; 1854 } 1855 return Op_ConL; 1856 } 1857 1858 inline int Op_Cmp_unsigned(BasicType bt) { 1859 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); 1860 if (bt == T_INT) { 1861 return Op_CmpU; 1862 } 1863 return Op_CmpUL; 1864 } 1865 1866 inline int Op_Cast(BasicType bt) { 1867 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); 1868 if (bt == T_INT) { 1869 return Op_CastII; 1870 } 1871 return Op_CastLL; 1872 } 1873 1874 #endif // SHARE_OPTO_NODE_HPP --- EOF ---