1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_NODE_HPP 26 #define SHARE_OPTO_NODE_HPP 27 28 #include "libadt/vectset.hpp" 29 #include "opto/compile.hpp" 30 #include "opto/type.hpp" 31 #include "utilities/copy.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 // Optimization - Graph Style 36 37 38 class AbstractLockNode; 39 class AddNode; 40 class AddPNode; 41 class AliasInfo; 42 class AllocateArrayNode; 43 class AllocateNode; 44 class ArrayCopyNode; 45 class BaseCountedLoopNode; 46 class BaseCountedLoopEndNode; 47 class BlackholeNode; 48 class Block; 49 class BoolNode; 50 class BoxLockNode; 51 class CMoveNode; 52 class CallDynamicJavaNode; 53 class CallJavaNode; 54 class CallLeafNode; 55 class CallLeafNoFPNode; 56 class CallNode; 57 class CallRuntimeNode; 58 class CallStaticJavaNode; 59 class CastFFNode; 60 class CastDDNode; 61 class CastVVNode; 62 class CastIINode; 63 class CastLLNode; 64 class CatchNode; 65 class CatchProjNode; 66 class CheckCastPPNode; 67 class ClearArrayNode; 68 class CmpNode; 69 class CodeBuffer; 70 class ConstraintCastNode; 71 class ConNode; 72 class ConINode; 73 class ConvertNode; 74 class CompareAndSwapNode; 75 class CompareAndExchangeNode; 76 class CountedLoopNode; 77 class CountedLoopEndNode; 78 class DecodeNarrowPtrNode; 79 class DecodeNNode; 80 class DecodeNKlassNode; 81 class EncodeNarrowPtrNode; 82 class EncodePNode; 83 class EncodePKlassNode; 84 class FastLockNode; 85 class FastUnlockNode; 86 class HaltNode; 87 class IfNode; 88 class IfProjNode; 89 class IfFalseNode; 90 class IfTrueNode; 91 class InitializeNode; 92 class JVMState; 93 class JumpNode; 94 class JumpProjNode; 95 class LoadNode; 96 class LoadStoreNode; 97 class LoadStoreConditionalNode; 98 class LockNode; 99 class LongCountedLoopNode; 100 class LongCountedLoopEndNode; 101 class LoopNode; 102 class LShiftNode; 103 class MachBranchNode; 104 class MachCallDynamicJavaNode; 105 class MachCallJavaNode; 106 class MachCallLeafNode; 107 class MachCallNode; 108 class MachCallRuntimeNode; 109 class MachCallStaticJavaNode; 110 class MachConstantBaseNode; 111 class MachConstantNode; 112 class MachGotoNode; 113 class MachIfNode; 114 class MachJumpNode; 115 class MachNode; 116 class MachNullCheckNode; 117 class MachProjNode; 118 class MachReturnNode; 119 class MachSafePointNode; 120 class MachSpillCopyNode; 121 class MachTempNode; 122 class MachMergeNode; 123 class MachMemBarNode; 124 class Matcher; 125 class MemBarNode; 126 class MemBarStoreStoreNode; 127 class MemNode; 128 class MergeMemNode; 129 class MoveNode; 130 class MulNode; 131 class MultiNode; 132 class MultiBranchNode; 133 class NegNode; 134 class NegVNode; 135 class NeverBranchNode; 136 class Opaque1Node; 137 class OuterStripMinedLoopNode; 138 class OuterStripMinedLoopEndNode; 139 class Node; 140 class Node_Array; 141 class Node_List; 142 class Node_Stack; 143 class OopMap; 144 class ParmNode; 145 class ParsePredicateNode; 146 class PCTableNode; 147 class PhaseCCP; 148 class PhaseGVN; 149 class PhaseIterGVN; 150 class PhaseRegAlloc; 151 class PhaseTransform; 152 class PhaseValues; 153 class PhiNode; 154 class Pipeline; 155 class PopulateIndexNode; 156 class ProjNode; 157 class RangeCheckNode; 158 class ReductionNode; 159 class RegMask; 160 class RegionNode; 161 class RootNode; 162 class SafePointNode; 163 class SafePointScalarObjectNode; 164 class SafePointScalarMergeNode; 165 class StartNode; 166 class State; 167 class StoreNode; 168 class SubNode; 169 class SubTypeCheckNode; 170 class Type; 171 class TypeNode; 172 class UnlockNode; 173 class UnorderedReductionNode; 174 class VectorNode; 175 class LoadVectorNode; 176 class LoadVectorMaskedNode; 177 class StoreVectorMaskedNode; 178 class LoadVectorGatherNode; 179 class StoreVectorNode; 180 class StoreVectorScatterNode; 181 class VectorMaskCmpNode; 182 class VectorUnboxNode; 183 class VectorSet; 184 class VectorReinterpretNode; 185 class ShiftVNode; 186 class ExpandVNode; 187 class CompressVNode; 188 class CompressMNode; 189 190 191 #ifndef OPTO_DU_ITERATOR_ASSERT 192 #ifdef ASSERT 193 #define OPTO_DU_ITERATOR_ASSERT 1 194 #else 195 #define OPTO_DU_ITERATOR_ASSERT 0 196 #endif 197 #endif //OPTO_DU_ITERATOR_ASSERT 198 199 #if OPTO_DU_ITERATOR_ASSERT 200 class DUIterator; 201 class DUIterator_Fast; 202 class DUIterator_Last; 203 #else 204 typedef uint DUIterator; 205 typedef Node** DUIterator_Fast; 206 typedef Node** DUIterator_Last; 207 #endif 208 209 // Node Sentinel 210 #define NodeSentinel (Node*)-1 211 212 // Unknown count frequency 213 #define COUNT_UNKNOWN (-1.0f) 214 215 //------------------------------Node------------------------------------------- 216 // Nodes define actions in the program. They create values, which have types. 217 // They are both vertices in a directed graph and program primitives. Nodes 218 // are labeled; the label is the "opcode", the primitive function in the lambda 219 // calculus sense that gives meaning to the Node. Node inputs are ordered (so 220 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to 221 // the Node's function. These inputs also define a Type equation for the Node. 222 // Solving these Type equations amounts to doing dataflow analysis. 223 // Control and data are uniformly represented in the graph. Finally, Nodes 224 // have a unique dense integer index which is used to index into side arrays 225 // whenever I have phase-specific information. 226 227 class Node { 228 friend class VMStructs; 229 230 // Lots of restrictions on cloning Nodes 231 NONCOPYABLE(Node); 232 233 public: 234 friend class Compile; 235 #if OPTO_DU_ITERATOR_ASSERT 236 friend class DUIterator_Common; 237 friend class DUIterator; 238 friend class DUIterator_Fast; 239 friend class DUIterator_Last; 240 #endif 241 242 // Because Nodes come and go, I define an Arena of Node structures to pull 243 // from. This should allow fast access to node creation & deletion. This 244 // field is a local cache of a value defined in some "program fragment" for 245 // which these Nodes are just a part of. 246 247 inline void* operator new(size_t x) throw() { 248 Compile* C = Compile::current(); 249 Node* n = (Node*)C->node_arena()->AmallocWords(x); 250 return (void*)n; 251 } 252 253 // Delete is a NOP 254 void operator delete( void *ptr ) {} 255 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 256 void destruct(PhaseValues* phase); 257 258 // Create a new Node. Required is the number is of inputs required for 259 // semantic correctness. 260 Node( uint required ); 261 262 // Create a new Node with given input edges. 263 // This version requires use of the "edge-count" new. 264 // E.g. new (C,3) FooNode( C, nullptr, left, right ); 265 Node( Node *n0 ); 266 Node( Node *n0, Node *n1 ); 267 Node( Node *n0, Node *n1, Node *n2 ); 268 Node( Node *n0, Node *n1, Node *n2, Node *n3 ); 269 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); 270 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); 271 Node( Node *n0, Node *n1, Node *n2, Node *n3, 272 Node *n4, Node *n5, Node *n6 ); 273 274 // Clone an inherited Node given only the base Node type. 275 Node* clone() const; 276 277 // Clone a Node, immediately supplying one or two new edges. 278 // The first and second arguments, if non-null, replace in(1) and in(2), 279 // respectively. 280 Node* clone_with_data_edge(Node* in1, Node* in2 = nullptr) const { 281 Node* nn = clone(); 282 if (in1 != nullptr) nn->set_req(1, in1); 283 if (in2 != nullptr) nn->set_req(2, in2); 284 return nn; 285 } 286 287 private: 288 // Shared setup for the above constructors. 289 // Handles all interactions with Compile::current. 290 // Puts initial values in all Node fields except _idx. 291 // Returns the initial value for _idx, which cannot 292 // be initialized by assignment. 293 inline int Init(int req); 294 295 //----------------- input edge handling 296 protected: 297 friend class PhaseCFG; // Access to address of _in array elements 298 Node **_in; // Array of use-def references to Nodes 299 Node **_out; // Array of def-use references to Nodes 300 301 // Input edges are split into two categories. Required edges are required 302 // for semantic correctness; order is important and nulls are allowed. 303 // Precedence edges are used to help determine execution order and are 304 // added, e.g., for scheduling purposes. They are unordered and not 305 // duplicated; they have no embedded nulls. Edges from 0 to _cnt-1 306 // are required, from _cnt to _max-1 are precedence edges. 307 node_idx_t _cnt; // Total number of required Node inputs. 308 309 node_idx_t _max; // Actual length of input array. 310 311 // Output edges are an unordered list of def-use edges which exactly 312 // correspond to required input edges which point from other nodes 313 // to this one. Thus the count of the output edges is the number of 314 // users of this node. 315 node_idx_t _outcnt; // Total number of Node outputs. 316 317 node_idx_t _outmax; // Actual length of output array. 318 319 // Grow the actual input array to the next larger power-of-2 bigger than len. 320 void grow( uint len ); 321 // Grow the output array to the next larger power-of-2 bigger than len. 322 void out_grow( uint len ); 323 324 public: 325 // Each Node is assigned a unique small/dense number. This number is used 326 // to index into auxiliary arrays of data and bit vectors. 327 // The field _idx is declared constant to defend against inadvertent assignments, 328 // since it is used by clients as a naked field. However, the field's value can be 329 // changed using the set_idx() method. 330 // 331 // The PhaseRenumberLive phase renumbers nodes based on liveness information. 332 // Therefore, it updates the value of the _idx field. The parse-time _idx is 333 // preserved in _parse_idx. 334 const node_idx_t _idx; 335 DEBUG_ONLY(const node_idx_t _parse_idx;) 336 // IGV node identifier. Two nodes, possibly in different compilation phases, 337 // have the same IGV identifier if (and only if) they are the very same node 338 // (same memory address) or one is "derived" from the other (by e.g. 339 // renumbering or matching). This identifier makes it possible to follow the 340 // entire lifetime of a node in IGV even if its C2 identifier (_idx) changes. 341 NOT_PRODUCT(node_idx_t _igv_idx;) 342 343 // Get the (read-only) number of input edges 344 uint req() const { return _cnt; } 345 uint len() const { return _max; } 346 // Get the (read-only) number of output edges 347 uint outcnt() const { return _outcnt; } 348 349 #if OPTO_DU_ITERATOR_ASSERT 350 // Iterate over the out-edges of this node. Deletions are illegal. 351 inline DUIterator outs() const; 352 // Use this when the out array might have changed to suppress asserts. 353 inline DUIterator& refresh_out_pos(DUIterator& i) const; 354 // Does the node have an out at this position? (Used for iteration.) 355 inline bool has_out(DUIterator& i) const; 356 inline Node* out(DUIterator& i) const; 357 // Iterate over the out-edges of this node. All changes are illegal. 358 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; 359 inline Node* fast_out(DUIterator_Fast& i) const; 360 // Iterate over the out-edges of this node, deleting one at a time. 361 inline DUIterator_Last last_outs(DUIterator_Last& min) const; 362 inline Node* last_out(DUIterator_Last& i) const; 363 // The inline bodies of all these methods are after the iterator definitions. 364 #else 365 // Iterate over the out-edges of this node. Deletions are illegal. 366 // This iteration uses integral indexes, to decouple from array reallocations. 367 DUIterator outs() const { return 0; } 368 // Use this when the out array might have changed to suppress asserts. 369 DUIterator refresh_out_pos(DUIterator i) const { return i; } 370 371 // Reference to the i'th output Node. Error if out of bounds. 372 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } 373 // Does the node have an out at this position? (Used for iteration.) 374 bool has_out(DUIterator i) const { return i < _outcnt; } 375 376 // Iterate over the out-edges of this node. All changes are illegal. 377 // This iteration uses a pointer internal to the out array. 378 DUIterator_Fast fast_outs(DUIterator_Fast& max) const { 379 Node** out = _out; 380 // Assign a limit pointer to the reference argument: 381 max = out + (ptrdiff_t)_outcnt; 382 // Return the base pointer: 383 return out; 384 } 385 Node* fast_out(DUIterator_Fast i) const { return *i; } 386 // Iterate over the out-edges of this node, deleting one at a time. 387 // This iteration uses a pointer internal to the out array. 388 DUIterator_Last last_outs(DUIterator_Last& min) const { 389 Node** out = _out; 390 // Assign a limit pointer to the reference argument: 391 min = out; 392 // Return the pointer to the start of the iteration: 393 return out + (ptrdiff_t)_outcnt - 1; 394 } 395 Node* last_out(DUIterator_Last i) const { return *i; } 396 #endif 397 398 // Reference to the i'th input Node. Error if out of bounds. 399 Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; } 400 // Reference to the i'th input Node. null if out of bounds. 401 Node* lookup(uint i) const { return ((i < _max) ? _in[i] : nullptr); } 402 // Reference to the i'th output Node. Error if out of bounds. 403 // Use this accessor sparingly. We are going trying to use iterators instead. 404 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } 405 // Return the unique out edge. 406 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } 407 // Delete out edge at position 'i' by moving last out edge to position 'i' 408 void raw_del_out(uint i) { 409 assert(i < _outcnt,"oob"); 410 assert(_outcnt > 0,"oob"); 411 #if OPTO_DU_ITERATOR_ASSERT 412 // Record that a change happened here. 413 debug_only(_last_del = _out[i]; ++_del_tick); 414 #endif 415 _out[i] = _out[--_outcnt]; 416 // Smash the old edge so it can't be used accidentally. 417 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 418 } 419 420 #ifdef ASSERT 421 bool is_dead() const; 422 static bool is_not_dead(const Node* n); 423 bool is_reachable_from_root() const; 424 #endif 425 // Check whether node has become unreachable 426 bool is_unreachable(PhaseIterGVN &igvn) const; 427 428 // Set a required input edge, also updates corresponding output edge 429 void add_req( Node *n ); // Append a NEW required input 430 void add_req( Node *n0, Node *n1 ) { 431 add_req(n0); add_req(n1); } 432 void add_req( Node *n0, Node *n1, Node *n2 ) { 433 add_req(n0); add_req(n1); add_req(n2); } 434 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). 435 void del_req( uint idx ); // Delete required edge & compact 436 void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order 437 void ins_req( uint i, Node *n ); // Insert a NEW required input 438 void set_req( uint i, Node *n ) { 439 assert( is_not_dead(n), "can not use dead node"); 440 assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt); 441 assert( !VerifyHashTableKeys || _hash_lock == 0, 442 "remove node from hash table before modifying it"); 443 Node** p = &_in[i]; // cache this._in, across the del_out call 444 if (*p != nullptr) (*p)->del_out((Node *)this); 445 (*p) = n; 446 if (n != nullptr) n->add_out((Node *)this); 447 Compile::current()->record_modified_node(this); 448 } 449 // Light version of set_req() to init inputs after node creation. 450 void init_req( uint i, Node *n ) { 451 assert( i == 0 && this == n || 452 is_not_dead(n), "can not use dead node"); 453 assert( i < _cnt, "oob"); 454 assert( !VerifyHashTableKeys || _hash_lock == 0, 455 "remove node from hash table before modifying it"); 456 assert( _in[i] == nullptr, "sanity"); 457 _in[i] = n; 458 if (n != nullptr) n->add_out((Node *)this); 459 Compile::current()->record_modified_node(this); 460 } 461 // Find first occurrence of n among my edges: 462 int find_edge(Node* n); 463 int find_prec_edge(Node* n) { 464 for (uint i = req(); i < len(); i++) { 465 if (_in[i] == n) return i; 466 if (_in[i] == nullptr) { 467 DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == nullptr, "Gap in prec edges!"); ) 468 break; 469 } 470 } 471 return -1; 472 } 473 int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = nullptr); 474 int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn); 475 // null out all inputs to eliminate incoming Def-Use edges. 476 void disconnect_inputs(Compile* C); 477 478 // Quickly, return true if and only if I am Compile::current()->top(). 479 bool is_top() const { 480 assert((this == (Node*) Compile::current()->top()) == (_out == nullptr), ""); 481 return (_out == nullptr); 482 } 483 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) 484 void setup_is_top(); 485 486 // Strip away casting. (It is depth-limited.) 487 Node* uncast(bool keep_deps = false) const; 488 // Return whether two Nodes are equivalent, after stripping casting. 489 bool eqv_uncast(const Node* n, bool keep_deps = false) const { 490 return (this->uncast(keep_deps) == n->uncast(keep_deps)); 491 } 492 493 // Find out of current node that matches opcode. 494 Node* find_out_with(int opcode); 495 // Return true if the current node has an out that matches opcode. 496 bool has_out_with(int opcode); 497 // Return true if the current node has an out that matches any of the opcodes. 498 bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4); 499 500 private: 501 static Node* uncast_helper(const Node* n, bool keep_deps); 502 503 // Add an output edge to the end of the list 504 void add_out( Node *n ) { 505 if (is_top()) return; 506 if( _outcnt == _outmax ) out_grow(_outcnt); 507 _out[_outcnt++] = n; 508 } 509 // Delete an output edge 510 void del_out( Node *n ) { 511 if (is_top()) return; 512 Node** outp = &_out[_outcnt]; 513 // Find and remove n 514 do { 515 assert(outp > _out, "Missing Def-Use edge"); 516 } while (*--outp != n); 517 *outp = _out[--_outcnt]; 518 // Smash the old edge so it can't be used accidentally. 519 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 520 // Record that a change happened here. 521 #if OPTO_DU_ITERATOR_ASSERT 522 debug_only(_last_del = n; ++_del_tick); 523 #endif 524 } 525 // Close gap after removing edge. 526 void close_prec_gap_at(uint gap) { 527 assert(_cnt <= gap && gap < _max, "no valid prec edge"); 528 uint i = gap; 529 Node *last = nullptr; 530 for (; i < _max-1; ++i) { 531 Node *next = _in[i+1]; 532 if (next == nullptr) break; 533 last = next; 534 } 535 _in[gap] = last; // Move last slot to empty one. 536 _in[i] = nullptr; // null out last slot. 537 } 538 539 public: 540 // Globally replace this node by a given new node, updating all uses. 541 void replace_by(Node* new_node); 542 // Globally replace this node by a given new node, updating all uses 543 // and cutting input edges of old node. 544 void subsume_by(Node* new_node, Compile* c) { 545 replace_by(new_node); 546 disconnect_inputs(c); 547 } 548 void set_req_X(uint i, Node *n, PhaseIterGVN *igvn); 549 void set_req_X(uint i, Node *n, PhaseGVN *gvn); 550 // Find the one non-null required input. RegionNode only 551 Node *nonnull_req() const; 552 // Add or remove precedence edges 553 void add_prec( Node *n ); 554 void rm_prec( uint i ); 555 556 // Note: prec(i) will not necessarily point to n if edge already exists. 557 void set_prec( uint i, Node *n ) { 558 assert(i < _max, "oob: i=%d, _max=%d", i, _max); 559 assert(is_not_dead(n), "can not use dead node"); 560 assert(i >= _cnt, "not a precedence edge"); 561 // Avoid spec violation: duplicated prec edge. 562 if (_in[i] == n) return; 563 if (n == nullptr || find_prec_edge(n) != -1) { 564 rm_prec(i); 565 return; 566 } 567 if (_in[i] != nullptr) _in[i]->del_out((Node *)this); 568 _in[i] = n; 569 n->add_out((Node *)this); 570 Compile::current()->record_modified_node(this); 571 } 572 573 // Set this node's index, used by cisc_version to replace current node 574 void set_idx(uint new_idx) { 575 const node_idx_t* ref = &_idx; 576 *(node_idx_t*)ref = new_idx; 577 } 578 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) 579 void swap_edges(uint i1, uint i2) { 580 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 581 // Def-Use info is unchanged 582 Node* n1 = in(i1); 583 Node* n2 = in(i2); 584 _in[i1] = n2; 585 _in[i2] = n1; 586 // If this node is in the hash table, make sure it doesn't need a rehash. 587 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); 588 // Flip swapped edges flag. 589 if (has_swapped_edges()) { 590 remove_flag(Node::Flag_has_swapped_edges); 591 } else { 592 add_flag(Node::Flag_has_swapped_edges); 593 } 594 } 595 596 // Iterators over input Nodes for a Node X are written as: 597 // for( i = 0; i < X.req(); i++ ) ... X[i] ... 598 // NOTE: Required edges can contain embedded null pointers. 599 600 //----------------- Other Node Properties 601 602 // Generate class IDs for (some) ideal nodes so that it is possible to determine 603 // the type of a node using a non-virtual method call (the method is_<Node>() below). 604 // 605 // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines 606 // the type of the node the ID represents; another subset of an ID's bits are reserved 607 // for the superclasses of the node represented by the ID. 608 // 609 // By design, if A is a supertype of B, A.is_B() returns true and B.is_A() 610 // returns false. A.is_A() returns true. 611 // 612 // If two classes, A and B, have the same superclass, a different bit of A's class id 613 // is reserved for A's type than for B's type. That bit is specified by the third 614 // parameter in the macro DEFINE_CLASS_ID. 615 // 616 // By convention, classes with deeper hierarchy are declared first. Moreover, 617 // classes with the same hierarchy depth are sorted by usage frequency. 618 // 619 // The query method masks the bits to cut off bits of subclasses and then compares 620 // the result with the class id (see the macro DEFINE_CLASS_QUERY below). 621 // 622 // Class_MachCall=30, ClassMask_MachCall=31 623 // 12 8 4 0 624 // 0 0 0 0 0 0 0 0 1 1 1 1 0 625 // | | | | 626 // | | | Bit_Mach=2 627 // | | Bit_MachReturn=4 628 // | Bit_MachSafePoint=8 629 // Bit_MachCall=16 630 // 631 // Class_CountedLoop=56, ClassMask_CountedLoop=63 632 // 12 8 4 0 633 // 0 0 0 0 0 0 0 1 1 1 0 0 0 634 // | | | 635 // | | Bit_Region=8 636 // | Bit_Loop=16 637 // Bit_CountedLoop=32 638 639 #define DEFINE_CLASS_ID(cl, supcl, subn) \ 640 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ 641 Class_##cl = Class_##supcl + Bit_##cl , \ 642 ClassMask_##cl = ((Bit_##cl << 1) - 1) , 643 644 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods 645 // so that its values fit into 32 bits. 646 enum NodeClasses { 647 Bit_Node = 0x00000000, 648 Class_Node = 0x00000000, 649 ClassMask_Node = 0xFFFFFFFF, 650 651 DEFINE_CLASS_ID(Multi, Node, 0) 652 DEFINE_CLASS_ID(SafePoint, Multi, 0) 653 DEFINE_CLASS_ID(Call, SafePoint, 0) 654 DEFINE_CLASS_ID(CallJava, Call, 0) 655 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) 656 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) 657 DEFINE_CLASS_ID(CallRuntime, Call, 1) 658 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) 659 DEFINE_CLASS_ID(CallLeafNoFP, CallLeaf, 0) 660 DEFINE_CLASS_ID(Allocate, Call, 2) 661 DEFINE_CLASS_ID(AllocateArray, Allocate, 0) 662 DEFINE_CLASS_ID(AbstractLock, Call, 3) 663 DEFINE_CLASS_ID(Lock, AbstractLock, 0) 664 DEFINE_CLASS_ID(Unlock, AbstractLock, 1) 665 DEFINE_CLASS_ID(ArrayCopy, Call, 4) 666 DEFINE_CLASS_ID(MultiBranch, Multi, 1) 667 DEFINE_CLASS_ID(PCTable, MultiBranch, 0) 668 DEFINE_CLASS_ID(Catch, PCTable, 0) 669 DEFINE_CLASS_ID(Jump, PCTable, 1) 670 DEFINE_CLASS_ID(If, MultiBranch, 1) 671 DEFINE_CLASS_ID(BaseCountedLoopEnd, If, 0) 672 DEFINE_CLASS_ID(CountedLoopEnd, BaseCountedLoopEnd, 0) 673 DEFINE_CLASS_ID(LongCountedLoopEnd, BaseCountedLoopEnd, 1) 674 DEFINE_CLASS_ID(RangeCheck, If, 1) 675 DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2) 676 DEFINE_CLASS_ID(ParsePredicate, If, 3) 677 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) 678 DEFINE_CLASS_ID(Start, Multi, 2) 679 DEFINE_CLASS_ID(MemBar, Multi, 3) 680 DEFINE_CLASS_ID(Initialize, MemBar, 0) 681 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1) 682 683 DEFINE_CLASS_ID(Mach, Node, 1) 684 DEFINE_CLASS_ID(MachReturn, Mach, 0) 685 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) 686 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) 687 DEFINE_CLASS_ID(MachCallJava, MachCall, 0) 688 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) 689 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) 690 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) 691 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) 692 DEFINE_CLASS_ID(MachBranch, Mach, 1) 693 DEFINE_CLASS_ID(MachIf, MachBranch, 0) 694 DEFINE_CLASS_ID(MachGoto, MachBranch, 1) 695 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2) 696 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2) 697 DEFINE_CLASS_ID(MachTemp, Mach, 3) 698 DEFINE_CLASS_ID(MachConstantBase, Mach, 4) 699 DEFINE_CLASS_ID(MachConstant, Mach, 5) 700 DEFINE_CLASS_ID(MachJump, MachConstant, 0) 701 DEFINE_CLASS_ID(MachMerge, Mach, 6) 702 DEFINE_CLASS_ID(MachMemBar, Mach, 7) 703 704 DEFINE_CLASS_ID(Type, Node, 2) 705 DEFINE_CLASS_ID(Phi, Type, 0) 706 DEFINE_CLASS_ID(ConstraintCast, Type, 1) 707 DEFINE_CLASS_ID(CastII, ConstraintCast, 0) 708 DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1) 709 DEFINE_CLASS_ID(CastLL, ConstraintCast, 2) 710 DEFINE_CLASS_ID(CastFF, ConstraintCast, 3) 711 DEFINE_CLASS_ID(CastDD, ConstraintCast, 4) 712 DEFINE_CLASS_ID(CastVV, ConstraintCast, 5) 713 DEFINE_CLASS_ID(CMove, Type, 3) 714 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) 715 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5) 716 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0) 717 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1) 718 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) 719 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) 720 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) 721 DEFINE_CLASS_ID(Vector, Type, 7) 722 DEFINE_CLASS_ID(VectorMaskCmp, Vector, 0) 723 DEFINE_CLASS_ID(VectorUnbox, Vector, 1) 724 DEFINE_CLASS_ID(VectorReinterpret, Vector, 2) 725 DEFINE_CLASS_ID(ShiftV, Vector, 3) 726 DEFINE_CLASS_ID(CompressV, Vector, 4) 727 DEFINE_CLASS_ID(ExpandV, Vector, 5) 728 DEFINE_CLASS_ID(CompressM, Vector, 6) 729 DEFINE_CLASS_ID(Reduction, Vector, 7) 730 DEFINE_CLASS_ID(UnorderedReduction, Reduction, 0) 731 DEFINE_CLASS_ID(NegV, Vector, 8) 732 DEFINE_CLASS_ID(Con, Type, 8) 733 DEFINE_CLASS_ID(ConI, Con, 0) 734 DEFINE_CLASS_ID(SafePointScalarMerge, Type, 9) 735 DEFINE_CLASS_ID(Convert, Type, 10) 736 737 738 DEFINE_CLASS_ID(Proj, Node, 3) 739 DEFINE_CLASS_ID(CatchProj, Proj, 0) 740 DEFINE_CLASS_ID(JumpProj, Proj, 1) 741 DEFINE_CLASS_ID(IfProj, Proj, 2) 742 DEFINE_CLASS_ID(IfTrue, IfProj, 0) 743 DEFINE_CLASS_ID(IfFalse, IfProj, 1) 744 DEFINE_CLASS_ID(Parm, Proj, 4) 745 DEFINE_CLASS_ID(MachProj, Proj, 5) 746 747 DEFINE_CLASS_ID(Mem, Node, 4) 748 DEFINE_CLASS_ID(Load, Mem, 0) 749 DEFINE_CLASS_ID(LoadVector, Load, 0) 750 DEFINE_CLASS_ID(LoadVectorGather, LoadVector, 0) 751 DEFINE_CLASS_ID(LoadVectorGatherMasked, LoadVector, 1) 752 DEFINE_CLASS_ID(LoadVectorMasked, LoadVector, 2) 753 DEFINE_CLASS_ID(Store, Mem, 1) 754 DEFINE_CLASS_ID(StoreVector, Store, 0) 755 DEFINE_CLASS_ID(StoreVectorScatter, StoreVector, 0) 756 DEFINE_CLASS_ID(StoreVectorScatterMasked, StoreVector, 1) 757 DEFINE_CLASS_ID(StoreVectorMasked, StoreVector, 2) 758 DEFINE_CLASS_ID(LoadStore, Mem, 2) 759 DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0) 760 DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0) 761 DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1) 762 763 DEFINE_CLASS_ID(Region, Node, 5) 764 DEFINE_CLASS_ID(Loop, Region, 0) 765 DEFINE_CLASS_ID(Root, Loop, 0) 766 DEFINE_CLASS_ID(BaseCountedLoop, Loop, 1) 767 DEFINE_CLASS_ID(CountedLoop, BaseCountedLoop, 0) 768 DEFINE_CLASS_ID(LongCountedLoop, BaseCountedLoop, 1) 769 DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2) 770 771 DEFINE_CLASS_ID(Sub, Node, 6) 772 DEFINE_CLASS_ID(Cmp, Sub, 0) 773 DEFINE_CLASS_ID(FastLock, Cmp, 0) 774 DEFINE_CLASS_ID(FastUnlock, Cmp, 1) 775 DEFINE_CLASS_ID(SubTypeCheck,Cmp, 2) 776 777 DEFINE_CLASS_ID(MergeMem, Node, 7) 778 DEFINE_CLASS_ID(Bool, Node, 8) 779 DEFINE_CLASS_ID(AddP, Node, 9) 780 DEFINE_CLASS_ID(BoxLock, Node, 10) 781 DEFINE_CLASS_ID(Add, Node, 11) 782 DEFINE_CLASS_ID(Mul, Node, 12) 783 DEFINE_CLASS_ID(ClearArray, Node, 14) 784 DEFINE_CLASS_ID(Halt, Node, 15) 785 DEFINE_CLASS_ID(Opaque1, Node, 16) 786 DEFINE_CLASS_ID(Move, Node, 17) 787 DEFINE_CLASS_ID(LShift, Node, 18) 788 DEFINE_CLASS_ID(Neg, Node, 19) 789 790 _max_classes = ClassMask_Neg 791 }; 792 #undef DEFINE_CLASS_ID 793 794 // Flags are sorted by usage frequency. 795 enum NodeFlags { 796 Flag_is_Copy = 1 << 0, // should be first bit to avoid shift 797 Flag_rematerialize = 1 << 1, 798 Flag_needs_anti_dependence_check = 1 << 2, 799 Flag_is_macro = 1 << 3, 800 Flag_is_Con = 1 << 4, 801 Flag_is_cisc_alternate = 1 << 5, 802 Flag_is_dead_loop_safe = 1 << 6, 803 Flag_may_be_short_branch = 1 << 7, 804 Flag_avoid_back_to_back_before = 1 << 8, 805 Flag_avoid_back_to_back_after = 1 << 9, 806 Flag_has_call = 1 << 10, 807 Flag_has_swapped_edges = 1 << 11, 808 Flag_is_scheduled = 1 << 12, 809 Flag_is_expensive = 1 << 13, 810 Flag_is_predicated_vector = 1 << 14, 811 Flag_for_post_loop_opts_igvn = 1 << 15, 812 Flag_is_removed_by_peephole = 1 << 16, 813 Flag_is_predicated_using_blend = 1 << 17, 814 _last_flag = Flag_is_predicated_using_blend 815 }; 816 817 class PD; 818 819 private: 820 juint _class_id; 821 juint _flags; 822 823 static juint max_flags(); 824 825 protected: 826 // These methods should be called from constructors only. 827 void init_class_id(juint c) { 828 _class_id = c; // cast out const 829 } 830 void init_flags(uint fl) { 831 assert(fl <= max_flags(), "invalid node flag"); 832 _flags |= fl; 833 } 834 void clear_flag(uint fl) { 835 assert(fl <= max_flags(), "invalid node flag"); 836 _flags &= ~fl; 837 } 838 839 public: 840 juint class_id() const { return _class_id; } 841 842 juint flags() const { return _flags; } 843 844 void add_flag(juint fl) { init_flags(fl); } 845 846 void remove_flag(juint fl) { clear_flag(fl); } 847 848 // Return a dense integer opcode number 849 virtual int Opcode() const; 850 851 // Virtual inherited Node size 852 virtual uint size_of() const; 853 854 // Other interesting Node properties 855 #define DEFINE_CLASS_QUERY(type) \ 856 bool is_##type() const { \ 857 return ((_class_id & ClassMask_##type) == Class_##type); \ 858 } \ 859 type##Node *as_##type() const { \ 860 assert(is_##type(), "invalid node class: %s", Name()); \ 861 return (type##Node*)this; \ 862 } \ 863 type##Node* isa_##type() const { \ 864 return (is_##type()) ? as_##type() : nullptr; \ 865 } 866 867 DEFINE_CLASS_QUERY(AbstractLock) 868 DEFINE_CLASS_QUERY(Add) 869 DEFINE_CLASS_QUERY(AddP) 870 DEFINE_CLASS_QUERY(Allocate) 871 DEFINE_CLASS_QUERY(AllocateArray) 872 DEFINE_CLASS_QUERY(ArrayCopy) 873 DEFINE_CLASS_QUERY(BaseCountedLoop) 874 DEFINE_CLASS_QUERY(BaseCountedLoopEnd) 875 DEFINE_CLASS_QUERY(Bool) 876 DEFINE_CLASS_QUERY(BoxLock) 877 DEFINE_CLASS_QUERY(Call) 878 DEFINE_CLASS_QUERY(CallDynamicJava) 879 DEFINE_CLASS_QUERY(CallJava) 880 DEFINE_CLASS_QUERY(CallLeaf) 881 DEFINE_CLASS_QUERY(CallLeafNoFP) 882 DEFINE_CLASS_QUERY(CallRuntime) 883 DEFINE_CLASS_QUERY(CallStaticJava) 884 DEFINE_CLASS_QUERY(Catch) 885 DEFINE_CLASS_QUERY(CatchProj) 886 DEFINE_CLASS_QUERY(CheckCastPP) 887 DEFINE_CLASS_QUERY(CastII) 888 DEFINE_CLASS_QUERY(CastLL) 889 DEFINE_CLASS_QUERY(ConI) 890 DEFINE_CLASS_QUERY(ConstraintCast) 891 DEFINE_CLASS_QUERY(ClearArray) 892 DEFINE_CLASS_QUERY(CMove) 893 DEFINE_CLASS_QUERY(Cmp) 894 DEFINE_CLASS_QUERY(Convert) 895 DEFINE_CLASS_QUERY(CountedLoop) 896 DEFINE_CLASS_QUERY(CountedLoopEnd) 897 DEFINE_CLASS_QUERY(DecodeNarrowPtr) 898 DEFINE_CLASS_QUERY(DecodeN) 899 DEFINE_CLASS_QUERY(DecodeNKlass) 900 DEFINE_CLASS_QUERY(EncodeNarrowPtr) 901 DEFINE_CLASS_QUERY(EncodeP) 902 DEFINE_CLASS_QUERY(EncodePKlass) 903 DEFINE_CLASS_QUERY(FastLock) 904 DEFINE_CLASS_QUERY(FastUnlock) 905 DEFINE_CLASS_QUERY(Halt) 906 DEFINE_CLASS_QUERY(If) 907 DEFINE_CLASS_QUERY(RangeCheck) 908 DEFINE_CLASS_QUERY(IfProj) 909 DEFINE_CLASS_QUERY(IfFalse) 910 DEFINE_CLASS_QUERY(IfTrue) 911 DEFINE_CLASS_QUERY(Initialize) 912 DEFINE_CLASS_QUERY(Jump) 913 DEFINE_CLASS_QUERY(JumpProj) 914 DEFINE_CLASS_QUERY(LongCountedLoop) 915 DEFINE_CLASS_QUERY(LongCountedLoopEnd) 916 DEFINE_CLASS_QUERY(Load) 917 DEFINE_CLASS_QUERY(LoadStore) 918 DEFINE_CLASS_QUERY(LoadStoreConditional) 919 DEFINE_CLASS_QUERY(Lock) 920 DEFINE_CLASS_QUERY(Loop) 921 DEFINE_CLASS_QUERY(LShift) 922 DEFINE_CLASS_QUERY(Mach) 923 DEFINE_CLASS_QUERY(MachBranch) 924 DEFINE_CLASS_QUERY(MachCall) 925 DEFINE_CLASS_QUERY(MachCallDynamicJava) 926 DEFINE_CLASS_QUERY(MachCallJava) 927 DEFINE_CLASS_QUERY(MachCallLeaf) 928 DEFINE_CLASS_QUERY(MachCallRuntime) 929 DEFINE_CLASS_QUERY(MachCallStaticJava) 930 DEFINE_CLASS_QUERY(MachConstantBase) 931 DEFINE_CLASS_QUERY(MachConstant) 932 DEFINE_CLASS_QUERY(MachGoto) 933 DEFINE_CLASS_QUERY(MachIf) 934 DEFINE_CLASS_QUERY(MachJump) 935 DEFINE_CLASS_QUERY(MachNullCheck) 936 DEFINE_CLASS_QUERY(MachProj) 937 DEFINE_CLASS_QUERY(MachReturn) 938 DEFINE_CLASS_QUERY(MachSafePoint) 939 DEFINE_CLASS_QUERY(MachSpillCopy) 940 DEFINE_CLASS_QUERY(MachTemp) 941 DEFINE_CLASS_QUERY(MachMemBar) 942 DEFINE_CLASS_QUERY(MachMerge) 943 DEFINE_CLASS_QUERY(Mem) 944 DEFINE_CLASS_QUERY(MemBar) 945 DEFINE_CLASS_QUERY(MemBarStoreStore) 946 DEFINE_CLASS_QUERY(MergeMem) 947 DEFINE_CLASS_QUERY(Move) 948 DEFINE_CLASS_QUERY(Mul) 949 DEFINE_CLASS_QUERY(Multi) 950 DEFINE_CLASS_QUERY(MultiBranch) 951 DEFINE_CLASS_QUERY(Neg) 952 DEFINE_CLASS_QUERY(NegV) 953 DEFINE_CLASS_QUERY(NeverBranch) 954 DEFINE_CLASS_QUERY(Opaque1) 955 DEFINE_CLASS_QUERY(OuterStripMinedLoop) 956 DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd) 957 DEFINE_CLASS_QUERY(Parm) 958 DEFINE_CLASS_QUERY(ParsePredicate) 959 DEFINE_CLASS_QUERY(PCTable) 960 DEFINE_CLASS_QUERY(Phi) 961 DEFINE_CLASS_QUERY(Proj) 962 DEFINE_CLASS_QUERY(Reduction) 963 DEFINE_CLASS_QUERY(Region) 964 DEFINE_CLASS_QUERY(Root) 965 DEFINE_CLASS_QUERY(SafePoint) 966 DEFINE_CLASS_QUERY(SafePointScalarObject) 967 DEFINE_CLASS_QUERY(SafePointScalarMerge) 968 DEFINE_CLASS_QUERY(Start) 969 DEFINE_CLASS_QUERY(Store) 970 DEFINE_CLASS_QUERY(Sub) 971 DEFINE_CLASS_QUERY(SubTypeCheck) 972 DEFINE_CLASS_QUERY(Type) 973 DEFINE_CLASS_QUERY(UnorderedReduction) 974 DEFINE_CLASS_QUERY(Vector) 975 DEFINE_CLASS_QUERY(VectorMaskCmp) 976 DEFINE_CLASS_QUERY(VectorUnbox) 977 DEFINE_CLASS_QUERY(VectorReinterpret) 978 DEFINE_CLASS_QUERY(CompressV) 979 DEFINE_CLASS_QUERY(ExpandV) 980 DEFINE_CLASS_QUERY(CompressM) 981 DEFINE_CLASS_QUERY(LoadVector) 982 DEFINE_CLASS_QUERY(LoadVectorGather) 983 DEFINE_CLASS_QUERY(StoreVector) 984 DEFINE_CLASS_QUERY(StoreVectorScatter) 985 DEFINE_CLASS_QUERY(ShiftV) 986 DEFINE_CLASS_QUERY(Unlock) 987 988 #undef DEFINE_CLASS_QUERY 989 990 // duplicate of is_MachSpillCopy() 991 bool is_SpillCopy () const { 992 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); 993 } 994 995 bool is_Con () const { return (_flags & Flag_is_Con) != 0; } 996 // The data node which is safe to leave in dead loop during IGVN optimization. 997 bool is_dead_loop_safe() const; 998 999 // is_Copy() returns copied edge index (0 or 1) 1000 uint is_Copy() const { return (_flags & Flag_is_Copy); } 1001 1002 virtual bool is_CFG() const { return false; } 1003 1004 // If this node is control-dependent on a test, can it be 1005 // rerouted to a dominating equivalent test? This is usually 1006 // true of non-CFG nodes, but can be false for operations which 1007 // depend for their correct sequencing on more than one test. 1008 // (In that case, hoisting to a dominating test may silently 1009 // skip some other important test.) 1010 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; 1011 1012 // When building basic blocks, I need to have a notion of block beginning 1013 // Nodes, next block selector Nodes (block enders), and next block 1014 // projections. These calls need to work on their machine equivalents. The 1015 // Ideal beginning Nodes are RootNode, RegionNode and StartNode. 1016 bool is_block_start() const { 1017 if ( is_Region() ) 1018 return this == (const Node*)in(0); 1019 else 1020 return is_Start(); 1021 } 1022 1023 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, 1024 // Goto and Return. This call also returns the block ending Node. 1025 virtual const Node *is_block_proj() const; 1026 1027 // The node is a "macro" node which needs to be expanded before matching 1028 bool is_macro() const { return (_flags & Flag_is_macro) != 0; } 1029 // The node is expensive: the best control is set during loop opts 1030 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != nullptr; } 1031 // The node's original edge position is swapped. 1032 bool has_swapped_edges() const { return (_flags & Flag_has_swapped_edges) != 0; } 1033 1034 bool is_predicated_vector() const { return (_flags & Flag_is_predicated_vector) != 0; } 1035 1036 bool is_predicated_using_blend() const { return (_flags & Flag_is_predicated_using_blend) != 0; } 1037 1038 // Used in lcm to mark nodes that have scheduled 1039 bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; } 1040 1041 bool for_post_loop_opts_igvn() const { return (_flags & Flag_for_post_loop_opts_igvn) != 0; } 1042 1043 // Is 'n' possibly a loop entry (i.e. a Parse Predicate projection)? 1044 static bool may_be_loop_entry(Node* n) { 1045 return n != nullptr && n->is_IfProj() && n->in(0)->is_ParsePredicate(); 1046 } 1047 1048 //----------------- Optimization 1049 1050 // Get the worst-case Type output for this Node. 1051 virtual const class Type *bottom_type() const; 1052 1053 // If we find a better type for a node, try to record it permanently. 1054 // Return true if this node actually changed. 1055 // Be sure to do the hash_delete game in the "rehash" variant. 1056 void raise_bottom_type(const Type* new_type); 1057 1058 // Get the address type with which this node uses and/or defs memory, 1059 // or null if none. The address type is conservatively wide. 1060 // Returns non-null for calls, membars, loads, stores, etc. 1061 // Returns TypePtr::BOTTOM if the node touches memory "broadly". 1062 virtual const class TypePtr *adr_type() const { return nullptr; } 1063 1064 // Return an existing node which computes the same function as this node. 1065 // The optimistic combined algorithm requires this to return a Node which 1066 // is a small number of steps away (e.g., one of my inputs). 1067 virtual Node* Identity(PhaseGVN* phase); 1068 1069 // Return the set of values this Node can take on at runtime. 1070 virtual const Type* Value(PhaseGVN* phase) const; 1071 1072 // Return a node which is more "ideal" than the current node. 1073 // The invariants on this call are subtle. If in doubt, read the 1074 // treatise in node.cpp above the default implementation AND TEST WITH 1075 // -XX:VerifyIterativeGVN=1 1076 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1077 1078 // Some nodes have specific Ideal subgraph transformations only if they are 1079 // unique users of specific nodes. Such nodes should be put on IGVN worklist 1080 // for the transformations to happen. 1081 bool has_special_unique_user() const; 1082 1083 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 1084 Node* find_exact_control(Node* ctrl); 1085 1086 // Check if 'this' node dominates or equal to 'sub'. 1087 bool dominates(Node* sub, Node_List &nlist); 1088 1089 protected: 1090 bool remove_dead_region(PhaseGVN *phase, bool can_reshape); 1091 public: 1092 1093 // See if there is valid pipeline info 1094 static const Pipeline *pipeline_class(); 1095 virtual const Pipeline *pipeline() const; 1096 1097 // Compute the latency from the def to this instruction of the ith input node 1098 uint latency(uint i); 1099 1100 // Hash & compare functions, for pessimistic value numbering 1101 1102 // If the hash function returns the special sentinel value NO_HASH, 1103 // the node is guaranteed never to compare equal to any other node. 1104 // If we accidentally generate a hash with value NO_HASH the node 1105 // won't go into the table and we'll lose a little optimization. 1106 static const uint NO_HASH = 0; 1107 virtual uint hash() const; 1108 virtual bool cmp( const Node &n ) const; 1109 1110 // Operation appears to be iteratively computed (such as an induction variable) 1111 // It is possible for this operation to return false for a loop-varying 1112 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 1113 bool is_iteratively_computed(); 1114 1115 // Determine if a node is a counted loop induction variable. 1116 // NOTE: The method is defined in "loopnode.cpp". 1117 bool is_cloop_ind_var() const; 1118 1119 // Return a node with opcode "opc" and same inputs as "this" if one can 1120 // be found; Otherwise return null; 1121 Node* find_similar(int opc); 1122 1123 // Return the unique control out if only one. Null if none or more than one. 1124 Node* unique_ctrl_out_or_null() const; 1125 // Return the unique control out. Asserts if none or more than one control out. 1126 Node* unique_ctrl_out() const; 1127 1128 // Set control or add control as precedence edge 1129 void ensure_control_or_add_prec(Node* c); 1130 1131 // Visit boundary uses of the node and apply a callback function for each. 1132 // Recursively traverse uses, stopping and applying the callback when 1133 // reaching a boundary node, defined by is_boundary. Note: the function 1134 // definition appears after the complete type definition of Node_List. 1135 template <typename Callback, typename Check> 1136 void visit_uses(Callback callback, Check is_boundary) const; 1137 1138 //----------------- Code Generation 1139 1140 // Ideal register class for Matching. Zero means unmatched instruction 1141 // (these are cloned instead of converted to machine nodes). 1142 virtual uint ideal_reg() const; 1143 1144 static const uint NotAMachineReg; // must be > max. machine register 1145 1146 // Do we Match on this edge index or not? Generally false for Control 1147 // and true for everything else. Weird for calls & returns. 1148 virtual uint match_edge(uint idx) const; 1149 1150 // Register class output is returned in 1151 virtual const RegMask &out_RegMask() const; 1152 // Register class input is expected in 1153 virtual const RegMask &in_RegMask(uint) const; 1154 // Should we clone rather than spill this instruction? 1155 bool rematerialize() const; 1156 1157 // Return JVM State Object if this Node carries debug info, or null otherwise 1158 virtual JVMState* jvms() const; 1159 1160 // Print as assembly 1161 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; 1162 // Emit bytes starting at parameter 'ptr' 1163 // Bump 'ptr' by the number of output bytes 1164 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; 1165 // Size of instruction in bytes 1166 virtual uint size(PhaseRegAlloc *ra_) const; 1167 1168 // Convenience function to extract an integer constant from a node. 1169 // If it is not an integer constant (either Con, CastII, or Mach), 1170 // return value_if_unknown. 1171 jint find_int_con(jint value_if_unknown) const { 1172 const TypeInt* t = find_int_type(); 1173 return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown; 1174 } 1175 // Return the constant, knowing it is an integer constant already 1176 jint get_int() const { 1177 const TypeInt* t = find_int_type(); 1178 guarantee(t != nullptr, "must be con"); 1179 return t->get_con(); 1180 } 1181 // Here's where the work is done. Can produce non-constant int types too. 1182 const TypeInt* find_int_type() const; 1183 const TypeInteger* find_integer_type(BasicType bt) const; 1184 1185 // Same thing for long (and intptr_t, via type.hpp): 1186 jlong get_long() const { 1187 const TypeLong* t = find_long_type(); 1188 guarantee(t != nullptr, "must be con"); 1189 return t->get_con(); 1190 } 1191 jlong find_long_con(jint value_if_unknown) const { 1192 const TypeLong* t = find_long_type(); 1193 return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown; 1194 } 1195 const TypeLong* find_long_type() const; 1196 1197 jlong get_integer_as_long(BasicType bt) const { 1198 const TypeInteger* t = find_integer_type(bt); 1199 guarantee(t != nullptr && t->is_con(), "must be con"); 1200 return t->get_con_as_long(bt); 1201 } 1202 jlong find_integer_as_long(BasicType bt, jlong value_if_unknown) const { 1203 const TypeInteger* t = find_integer_type(bt); 1204 if (t == nullptr || !t->is_con()) return value_if_unknown; 1205 return t->get_con_as_long(bt); 1206 } 1207 const TypePtr* get_ptr_type() const; 1208 1209 // These guys are called by code generated by ADLC: 1210 intptr_t get_ptr() const; 1211 intptr_t get_narrowcon() const; 1212 jdouble getd() const; 1213 jfloat getf() const; 1214 1215 // Nodes which are pinned into basic blocks 1216 virtual bool pinned() const { return false; } 1217 1218 // Nodes which use memory without consuming it, hence need antidependences 1219 // More specifically, needs_anti_dependence_check returns true iff the node 1220 // (a) does a load, and (b) does not perform a store (except perhaps to a 1221 // stack slot or some other unaliased location). 1222 bool needs_anti_dependence_check() const; 1223 1224 // Return which operand this instruction may cisc-spill. In other words, 1225 // return operand position that can convert from reg to memory access 1226 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } 1227 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } 1228 1229 // Whether this is a memory-writing machine node. 1230 bool is_memory_writer() const { return is_Mach() && bottom_type()->has_memory(); } 1231 1232 // Whether this is a memory phi node 1233 bool is_memory_phi() const { return is_Phi() && bottom_type() == Type::MEMORY; } 1234 1235 //----------------- Printing, etc 1236 #ifndef PRODUCT 1237 public: 1238 Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx. 1239 Node* find_ctrl(int idx); // Search control ancestors for the given idx. 1240 void dump_bfs(const int max_distance, Node* target, const char* options, outputStream* st) const; 1241 void dump_bfs(const int max_distance, Node* target, const char* options) const; // directly to tty 1242 void dump_bfs(const int max_distance) const; // dump_bfs(max_distance, nullptr, nullptr) 1243 class DumpConfig { 1244 public: 1245 // overridden to implement coloring of node idx 1246 virtual void pre_dump(outputStream *st, const Node* n) = 0; 1247 virtual void post_dump(outputStream *st) = 0; 1248 }; 1249 void dump_idx(bool align = false, outputStream* st = tty, DumpConfig* dc = nullptr) const; 1250 void dump_name(outputStream* st = tty, DumpConfig* dc = nullptr) const; 1251 void dump() const; // print node with newline 1252 void dump(const char* suffix, bool mark = false, outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print this node. 1253 void dump(int depth) const; // Print this node, recursively to depth d 1254 void dump_ctrl(int depth) const; // Print control nodes, to depth d 1255 void dump_comp() const; // Print this node in compact representation. 1256 // Print this node in compact representation. 1257 void dump_comp(const char* suffix, outputStream *st = tty) const; 1258 private: 1259 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print required-edge info 1260 virtual void dump_prec(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print precedence-edge info 1261 virtual void dump_out(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print the output edge info 1262 public: 1263 virtual void dump_spec(outputStream *st) const {}; // Print per-node info 1264 // Print compact per-node info 1265 virtual void dump_compact_spec(outputStream *st) const { dump_spec(st); } 1266 1267 static void verify(int verify_depth, VectorSet& visited, Node_List& worklist); 1268 1269 // This call defines a class-unique string used to identify class instances 1270 virtual const char *Name() const; 1271 1272 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) 1273 static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } // check if we are in a dump call 1274 #endif 1275 #ifdef ASSERT 1276 void verify_construction(); 1277 bool verify_jvms(const JVMState* jvms) const; 1278 1279 Node* _debug_orig; // Original version of this, if any. 1280 Node* debug_orig() const { return _debug_orig; } 1281 void set_debug_orig(Node* orig); // _debug_orig = orig 1282 void dump_orig(outputStream *st, bool print_key = true) const; 1283 1284 uint64_t _debug_idx; // Unique value assigned to every node. 1285 uint64_t debug_idx() const { return _debug_idx; } 1286 void set_debug_idx(uint64_t debug_idx) { _debug_idx = debug_idx; } 1287 1288 int _hash_lock; // Barrier to modifications of nodes in the hash table 1289 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } 1290 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } 1291 1292 static void init_NodeProperty(); 1293 1294 #if OPTO_DU_ITERATOR_ASSERT 1295 const Node* _last_del; // The last deleted node. 1296 uint _del_tick; // Bumped when a deletion happens.. 1297 #endif 1298 #endif 1299 }; 1300 1301 inline bool not_a_node(const Node* n) { 1302 if (n == nullptr) return true; 1303 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc. 1304 if (*(address*)n == badAddress) return true; // kill by Node::destruct 1305 return false; 1306 } 1307 1308 //----------------------------------------------------------------------------- 1309 // Iterators over DU info, and associated Node functions. 1310 1311 #if OPTO_DU_ITERATOR_ASSERT 1312 1313 // Common code for assertion checking on DU iterators. 1314 class DUIterator_Common { 1315 #ifdef ASSERT 1316 protected: 1317 bool _vdui; // cached value of VerifyDUIterators 1318 const Node* _node; // the node containing the _out array 1319 uint _outcnt; // cached node->_outcnt 1320 uint _del_tick; // cached node->_del_tick 1321 Node* _last; // last value produced by the iterator 1322 1323 void sample(const Node* node); // used by c'tor to set up for verifies 1324 void verify(const Node* node, bool at_end_ok = false); 1325 void verify_resync(); 1326 void reset(const DUIterator_Common& that); 1327 1328 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators 1329 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } 1330 #else 1331 #define I_VDUI_ONLY(i,x) { } 1332 #endif //ASSERT 1333 }; 1334 1335 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) 1336 1337 // Default DU iterator. Allows appends onto the out array. 1338 // Allows deletion from the out array only at the current point. 1339 // Usage: 1340 // for (DUIterator i = x->outs(); x->has_out(i); i++) { 1341 // Node* y = x->out(i); 1342 // ... 1343 // } 1344 // Compiles in product mode to a unsigned integer index, which indexes 1345 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate 1346 // also reloads x->_outcnt. If you delete, you must perform "--i" just 1347 // before continuing the loop. You must delete only the last-produced 1348 // edge. You must delete only a single copy of the last-produced edge, 1349 // or else you must delete all copies at once (the first time the edge 1350 // is produced by the iterator). 1351 class DUIterator : public DUIterator_Common { 1352 friend class Node; 1353 1354 // This is the index which provides the product-mode behavior. 1355 // Whatever the product-mode version of the system does to the 1356 // DUI index is done to this index. All other fields in 1357 // this class are used only for assertion checking. 1358 uint _idx; 1359 1360 #ifdef ASSERT 1361 uint _refresh_tick; // Records the refresh activity. 1362 1363 void sample(const Node* node); // Initialize _refresh_tick etc. 1364 void verify(const Node* node, bool at_end_ok = false); 1365 void verify_increment(); // Verify an increment operation. 1366 void verify_resync(); // Verify that we can back up over a deletion. 1367 void verify_finish(); // Verify that the loop terminated properly. 1368 void refresh(); // Resample verification info. 1369 void reset(const DUIterator& that); // Resample after assignment. 1370 #endif 1371 1372 DUIterator(const Node* node, int dummy_to_avoid_conversion) 1373 { _idx = 0; debug_only(sample(node)); } 1374 1375 public: 1376 // initialize to garbage; clear _vdui to disable asserts 1377 DUIterator() 1378 { /*initialize to garbage*/ debug_only(_vdui = false); } 1379 1380 DUIterator(const DUIterator& that) 1381 { _idx = that._idx; debug_only(_vdui = false; reset(that)); } 1382 1383 void operator++(int dummy_to_specify_postfix_op) 1384 { _idx++; VDUI_ONLY(verify_increment()); } 1385 1386 void operator--() 1387 { VDUI_ONLY(verify_resync()); --_idx; } 1388 1389 ~DUIterator() 1390 { VDUI_ONLY(verify_finish()); } 1391 1392 void operator=(const DUIterator& that) 1393 { _idx = that._idx; debug_only(reset(that)); } 1394 }; 1395 1396 DUIterator Node::outs() const 1397 { return DUIterator(this, 0); } 1398 DUIterator& Node::refresh_out_pos(DUIterator& i) const 1399 { I_VDUI_ONLY(i, i.refresh()); return i; } 1400 bool Node::has_out(DUIterator& i) const 1401 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } 1402 Node* Node::out(DUIterator& i) const 1403 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } 1404 1405 1406 // Faster DU iterator. Disallows insertions into the out array. 1407 // Allows deletion from the out array only at the current point. 1408 // Usage: 1409 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { 1410 // Node* y = x->fast_out(i); 1411 // ... 1412 // } 1413 // Compiles in product mode to raw Node** pointer arithmetic, with 1414 // no reloading of pointers from the original node x. If you delete, 1415 // you must perform "--i; --imax" just before continuing the loop. 1416 // If you delete multiple copies of the same edge, you must decrement 1417 // imax, but not i, multiple times: "--i, imax -= num_edges". 1418 class DUIterator_Fast : public DUIterator_Common { 1419 friend class Node; 1420 friend class DUIterator_Last; 1421 1422 // This is the pointer which provides the product-mode behavior. 1423 // Whatever the product-mode version of the system does to the 1424 // DUI pointer is done to this pointer. All other fields in 1425 // this class are used only for assertion checking. 1426 Node** _outp; 1427 1428 #ifdef ASSERT 1429 void verify(const Node* node, bool at_end_ok = false); 1430 void verify_limit(); 1431 void verify_resync(); 1432 void verify_relimit(uint n); 1433 void reset(const DUIterator_Fast& that); 1434 #endif 1435 1436 // Note: offset must be signed, since -1 is sometimes passed 1437 DUIterator_Fast(const Node* node, ptrdiff_t offset) 1438 { _outp = node->_out + offset; debug_only(sample(node)); } 1439 1440 public: 1441 // initialize to garbage; clear _vdui to disable asserts 1442 DUIterator_Fast() 1443 { /*initialize to garbage*/ debug_only(_vdui = false); } 1444 1445 DUIterator_Fast(const DUIterator_Fast& that) 1446 { _outp = that._outp; debug_only(_vdui = false; reset(that)); } 1447 1448 void operator++(int dummy_to_specify_postfix_op) 1449 { _outp++; VDUI_ONLY(verify(_node, true)); } 1450 1451 void operator--() 1452 { VDUI_ONLY(verify_resync()); --_outp; } 1453 1454 void operator-=(uint n) // applied to the limit only 1455 { _outp -= n; VDUI_ONLY(verify_relimit(n)); } 1456 1457 bool operator<(DUIterator_Fast& limit) { 1458 I_VDUI_ONLY(*this, this->verify(_node, true)); 1459 I_VDUI_ONLY(limit, limit.verify_limit()); 1460 return _outp < limit._outp; 1461 } 1462 1463 void operator=(const DUIterator_Fast& that) 1464 { _outp = that._outp; debug_only(reset(that)); } 1465 }; 1466 1467 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { 1468 // Assign a limit pointer to the reference argument: 1469 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); 1470 // Return the base pointer: 1471 return DUIterator_Fast(this, 0); 1472 } 1473 Node* Node::fast_out(DUIterator_Fast& i) const { 1474 I_VDUI_ONLY(i, i.verify(this)); 1475 return debug_only(i._last=) *i._outp; 1476 } 1477 1478 1479 // Faster DU iterator. Requires each successive edge to be removed. 1480 // Does not allow insertion of any edges. 1481 // Usage: 1482 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { 1483 // Node* y = x->last_out(i); 1484 // ... 1485 // } 1486 // Compiles in product mode to raw Node** pointer arithmetic, with 1487 // no reloading of pointers from the original node x. 1488 class DUIterator_Last : private DUIterator_Fast { 1489 friend class Node; 1490 1491 #ifdef ASSERT 1492 void verify(const Node* node, bool at_end_ok = false); 1493 void verify_limit(); 1494 void verify_step(uint num_edges); 1495 #endif 1496 1497 // Note: offset must be signed, since -1 is sometimes passed 1498 DUIterator_Last(const Node* node, ptrdiff_t offset) 1499 : DUIterator_Fast(node, offset) { } 1500 1501 void operator++(int dummy_to_specify_postfix_op) {} // do not use 1502 void operator<(int) {} // do not use 1503 1504 public: 1505 DUIterator_Last() { } 1506 // initialize to garbage 1507 1508 DUIterator_Last(const DUIterator_Last& that) = default; 1509 1510 void operator--() 1511 { _outp--; VDUI_ONLY(verify_step(1)); } 1512 1513 void operator-=(uint n) 1514 { _outp -= n; VDUI_ONLY(verify_step(n)); } 1515 1516 bool operator>=(DUIterator_Last& limit) { 1517 I_VDUI_ONLY(*this, this->verify(_node, true)); 1518 I_VDUI_ONLY(limit, limit.verify_limit()); 1519 return _outp >= limit._outp; 1520 } 1521 1522 DUIterator_Last& operator=(const DUIterator_Last& that) = default; 1523 }; 1524 1525 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { 1526 // Assign a limit pointer to the reference argument: 1527 imin = DUIterator_Last(this, 0); 1528 // Return the initial pointer: 1529 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); 1530 } 1531 Node* Node::last_out(DUIterator_Last& i) const { 1532 I_VDUI_ONLY(i, i.verify(this)); 1533 return debug_only(i._last=) *i._outp; 1534 } 1535 1536 #endif //OPTO_DU_ITERATOR_ASSERT 1537 1538 #undef I_VDUI_ONLY 1539 #undef VDUI_ONLY 1540 1541 // An Iterator that truly follows the iterator pattern. Doesn't 1542 // support deletion but could be made to. 1543 // 1544 // for (SimpleDUIterator i(n); i.has_next(); i.next()) { 1545 // Node* m = i.get(); 1546 // 1547 class SimpleDUIterator : public StackObj { 1548 private: 1549 Node* node; 1550 DUIterator_Fast i; 1551 DUIterator_Fast imax; 1552 public: 1553 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} 1554 bool has_next() { return i < imax; } 1555 void next() { i++; } 1556 Node* get() { return node->fast_out(i); } 1557 }; 1558 1559 1560 //----------------------------------------------------------------------------- 1561 // Map dense integer indices to Nodes. Uses classic doubling-array trick. 1562 // Abstractly provides an infinite array of Node*'s, initialized to null. 1563 // Note that the constructor just zeros things, and since I use Arena 1564 // allocation I do not need a destructor to reclaim storage. 1565 class Node_Array : public AnyObj { 1566 friend class VMStructs; 1567 protected: 1568 Arena* _a; // Arena to allocate in 1569 uint _max; 1570 Node** _nodes; 1571 void grow( uint i ); // Grow array node to fit 1572 public: 1573 Node_Array(Arena* a, uint max = OptoNodeListSize) : _a(a), _max(max) { 1574 _nodes = NEW_ARENA_ARRAY(a, Node*, max); 1575 clear(); 1576 } 1577 Node_Array() : Node_Array(Thread::current()->resource_area()) {} 1578 1579 NONCOPYABLE(Node_Array); 1580 Node_Array& operator=(Node_Array&&) = delete; 1581 // Allow move constructor for && (eg. capture return of function) 1582 Node_Array(Node_Array&&) = default; 1583 1584 Node *operator[] ( uint i ) const // Lookup, or null for not mapped 1585 { return (i<_max) ? _nodes[i] : (Node*)nullptr; } 1586 Node* at(uint i) const { assert(i<_max,"oob"); return _nodes[i]; } 1587 Node** adr() { return _nodes; } 1588 // Extend the mapping: index i maps to Node *n. 1589 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } 1590 void insert( uint i, Node *n ); 1591 void remove( uint i ); // Remove, preserving order 1592 // Clear all entries in _nodes to null but keep storage 1593 void clear() { 1594 Copy::zero_to_bytes(_nodes, _max * sizeof(Node*)); 1595 } 1596 1597 uint max() const { return _max; } 1598 void dump() const; 1599 }; 1600 1601 class Node_List : public Node_Array { 1602 friend class VMStructs; 1603 uint _cnt; 1604 public: 1605 Node_List(uint max = OptoNodeListSize) : Node_Array(Thread::current()->resource_area(), max), _cnt(0) {} 1606 Node_List(Arena *a, uint max = OptoNodeListSize) : Node_Array(a, max), _cnt(0) {} 1607 1608 NONCOPYABLE(Node_List); 1609 Node_List& operator=(Node_List&&) = delete; 1610 // Allow move constructor for && (eg. capture return of function) 1611 Node_List(Node_List&&) = default; 1612 1613 bool contains(const Node* n) const { 1614 for (uint e = 0; e < size(); e++) { 1615 if (at(e) == n) return true; 1616 } 1617 return false; 1618 } 1619 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } 1620 void remove( uint i ) { Node_Array::remove(i); _cnt--; } 1621 void push( Node *b ) { map(_cnt++,b); } 1622 void yank( Node *n ); // Find and remove 1623 Node *pop() { return _nodes[--_cnt]; } 1624 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage 1625 void copy(const Node_List& from) { 1626 if (from._max > _max) { 1627 grow(from._max); 1628 } 1629 _cnt = from._cnt; 1630 Copy::conjoint_words_to_higher((HeapWord*)&from._nodes[0], (HeapWord*)&_nodes[0], from._max * sizeof(Node*)); 1631 } 1632 1633 uint size() const { return _cnt; } 1634 void dump() const; 1635 void dump_simple() const; 1636 }; 1637 1638 // Definition must appear after complete type definition of Node_List 1639 template <typename Callback, typename Check> 1640 void Node::visit_uses(Callback callback, Check is_boundary) const { 1641 ResourceMark rm; 1642 VectorSet visited; 1643 Node_List worklist; 1644 1645 // The initial worklist consists of the direct uses 1646 for (DUIterator_Fast kmax, k = fast_outs(kmax); k < kmax; k++) { 1647 Node* out = fast_out(k); 1648 if (!visited.test_set(out->_idx)) { worklist.push(out); } 1649 } 1650 1651 while (worklist.size() > 0) { 1652 Node* use = worklist.pop(); 1653 // Apply callback on boundary nodes 1654 if (is_boundary(use)) { 1655 callback(use); 1656 } else { 1657 // Not a boundary node, continue search 1658 for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) { 1659 Node* out = use->fast_out(k); 1660 if (!visited.test_set(out->_idx)) { worklist.push(out); } 1661 } 1662 } 1663 } 1664 } 1665 1666 1667 //------------------------------Unique_Node_List------------------------------- 1668 class Unique_Node_List : public Node_List { 1669 friend class VMStructs; 1670 VectorSet _in_worklist; 1671 uint _clock_index; // Index in list where to pop from next 1672 public: 1673 Unique_Node_List() : Node_List(), _clock_index(0) {} 1674 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} 1675 1676 NONCOPYABLE(Unique_Node_List); 1677 Unique_Node_List& operator=(Unique_Node_List&&) = delete; 1678 // Allow move constructor for && (eg. capture return of function) 1679 Unique_Node_List(Unique_Node_List&&) = default; 1680 1681 void remove( Node *n ); 1682 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } 1683 VectorSet& member_set(){ return _in_worklist; } 1684 1685 void push(Node* b) { 1686 if( !_in_worklist.test_set(b->_idx) ) 1687 Node_List::push(b); 1688 } 1689 Node *pop() { 1690 if( _clock_index >= size() ) _clock_index = 0; 1691 Node *b = at(_clock_index); 1692 map( _clock_index, Node_List::pop()); 1693 if (size() != 0) _clock_index++; // Always start from 0 1694 _in_worklist.remove(b->_idx); 1695 return b; 1696 } 1697 Node *remove(uint i) { 1698 Node *b = Node_List::at(i); 1699 _in_worklist.remove(b->_idx); 1700 map(i,Node_List::pop()); 1701 return b; 1702 } 1703 void yank(Node *n) { 1704 _in_worklist.remove(n->_idx); 1705 Node_List::yank(n); 1706 } 1707 void clear() { 1708 _in_worklist.clear(); // Discards storage but grows automatically 1709 Node_List::clear(); 1710 _clock_index = 0; 1711 } 1712 void ensure_empty() { 1713 assert(size() == 0, "must be empty"); 1714 clear(); // just in case 1715 } 1716 1717 // Used after parsing to remove useless nodes before Iterative GVN 1718 void remove_useless_nodes(VectorSet& useful); 1719 1720 // If the idx of the Nodes change, we must recompute the VectorSet 1721 void recompute_idx_set() { 1722 _in_worklist.clear(); 1723 for (uint i = 0; i < size(); i++) { 1724 Node* n = at(i); 1725 _in_worklist.set(n->_idx); 1726 } 1727 } 1728 1729 #ifdef ASSERT 1730 bool is_subset_of(Unique_Node_List& other) { 1731 for (uint i = 0; i < size(); i++) { 1732 Node* n = at(i); 1733 if (!other.member(n)) { 1734 return false; 1735 } 1736 } 1737 return true; 1738 } 1739 #endif 1740 1741 bool contains(const Node* n) const { 1742 fatal("use faster member() instead"); 1743 return false; 1744 } 1745 1746 #ifndef PRODUCT 1747 void print_set() const { _in_worklist.print(); } 1748 #endif 1749 }; 1750 1751 // Unique_Mixed_Node_List 1752 // unique: nodes are added only once 1753 // mixed: allow new and old nodes 1754 class Unique_Mixed_Node_List : public ResourceObj { 1755 public: 1756 Unique_Mixed_Node_List() : _visited_set(cmpkey, hashkey) {} 1757 1758 void add(Node* node) { 1759 if (not_a_node(node)) { 1760 return; // Gracefully handle null, -1, 0xabababab, etc. 1761 } 1762 if (_visited_set[node] == nullptr) { 1763 _visited_set.Insert(node, node); 1764 _worklist.push(node); 1765 } 1766 } 1767 1768 Node* operator[] (uint i) const { 1769 return _worklist[i]; 1770 } 1771 1772 size_t size() { 1773 return _worklist.size(); 1774 } 1775 1776 private: 1777 Dict _visited_set; 1778 Node_List _worklist; 1779 }; 1780 1781 // Inline definition of Compile::record_for_igvn must be deferred to this point. 1782 inline void Compile::record_for_igvn(Node* n) { 1783 _igvn_worklist->push(n); 1784 } 1785 1786 // Inline definition of Compile::remove_for_igvn must be deferred to this point. 1787 inline void Compile::remove_for_igvn(Node* n) { 1788 _igvn_worklist->remove(n); 1789 } 1790 1791 //------------------------------Node_Stack------------------------------------- 1792 class Node_Stack { 1793 friend class VMStructs; 1794 protected: 1795 struct INode { 1796 Node *node; // Processed node 1797 uint indx; // Index of next node's child 1798 }; 1799 INode *_inode_top; // tos, stack grows up 1800 INode *_inode_max; // End of _inodes == _inodes + _max 1801 INode *_inodes; // Array storage for the stack 1802 Arena *_a; // Arena to allocate in 1803 void grow(); 1804 public: 1805 Node_Stack(int size) { 1806 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1807 _a = Thread::current()->resource_area(); 1808 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1809 _inode_max = _inodes + max; 1810 _inode_top = _inodes - 1; // stack is empty 1811 } 1812 1813 Node_Stack(Arena *a, int size) : _a(a) { 1814 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1815 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1816 _inode_max = _inodes + max; 1817 _inode_top = _inodes - 1; // stack is empty 1818 } 1819 1820 void pop() { 1821 assert(_inode_top >= _inodes, "node stack underflow"); 1822 --_inode_top; 1823 } 1824 void push(Node *n, uint i) { 1825 ++_inode_top; 1826 if (_inode_top >= _inode_max) grow(); 1827 INode *top = _inode_top; // optimization 1828 top->node = n; 1829 top->indx = i; 1830 } 1831 Node *node() const { 1832 return _inode_top->node; 1833 } 1834 Node* node_at(uint i) const { 1835 assert(_inodes + i <= _inode_top, "in range"); 1836 return _inodes[i].node; 1837 } 1838 uint index() const { 1839 return _inode_top->indx; 1840 } 1841 uint index_at(uint i) const { 1842 assert(_inodes + i <= _inode_top, "in range"); 1843 return _inodes[i].indx; 1844 } 1845 void set_node(Node *n) { 1846 _inode_top->node = n; 1847 } 1848 void set_index(uint i) { 1849 _inode_top->indx = i; 1850 } 1851 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size 1852 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size 1853 bool is_nonempty() const { return (_inode_top >= _inodes); } 1854 bool is_empty() const { return (_inode_top < _inodes); } 1855 void clear() { _inode_top = _inodes - 1; } // retain storage 1856 1857 // Node_Stack is used to map nodes. 1858 Node* find(uint idx) const; 1859 1860 NONCOPYABLE(Node_Stack); 1861 }; 1862 1863 1864 //-----------------------------Node_Notes-------------------------------------- 1865 // Debugging or profiling annotations loosely and sparsely associated 1866 // with some nodes. See Compile::node_notes_at for the accessor. 1867 class Node_Notes { 1868 friend class VMStructs; 1869 JVMState* _jvms; 1870 1871 public: 1872 Node_Notes(JVMState* jvms = nullptr) { 1873 _jvms = jvms; 1874 } 1875 1876 JVMState* jvms() { return _jvms; } 1877 void set_jvms(JVMState* x) { _jvms = x; } 1878 1879 // True if there is nothing here. 1880 bool is_clear() { 1881 return (_jvms == nullptr); 1882 } 1883 1884 // Make there be nothing here. 1885 void clear() { 1886 _jvms = nullptr; 1887 } 1888 1889 // Make a new, clean node notes. 1890 static Node_Notes* make(Compile* C) { 1891 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1892 nn->clear(); 1893 return nn; 1894 } 1895 1896 Node_Notes* clone(Compile* C) { 1897 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1898 (*nn) = (*this); 1899 return nn; 1900 } 1901 1902 // Absorb any information from source. 1903 bool update_from(Node_Notes* source) { 1904 bool changed = false; 1905 if (source != nullptr) { 1906 if (source->jvms() != nullptr) { 1907 set_jvms(source->jvms()); 1908 changed = true; 1909 } 1910 } 1911 return changed; 1912 } 1913 }; 1914 1915 // Inlined accessors for Compile::node_nodes that require the preceding class: 1916 inline Node_Notes* 1917 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, 1918 int idx, bool can_grow) { 1919 assert(idx >= 0, "oob"); 1920 int block_idx = (idx >> _log2_node_notes_block_size); 1921 int grow_by = (block_idx - (arr == nullptr? 0: arr->length())); 1922 if (grow_by >= 0) { 1923 if (!can_grow) return nullptr; 1924 grow_node_notes(arr, grow_by + 1); 1925 } 1926 if (arr == nullptr) return nullptr; 1927 // (Every element of arr is a sub-array of length _node_notes_block_size.) 1928 return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); 1929 } 1930 1931 inline bool 1932 Compile::set_node_notes_at(int idx, Node_Notes* value) { 1933 if (value == nullptr || value->is_clear()) 1934 return false; // nothing to write => write nothing 1935 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); 1936 assert(loc != nullptr, ""); 1937 return loc->update_from(value); 1938 } 1939 1940 1941 //------------------------------TypeNode--------------------------------------- 1942 // Node with a Type constant. 1943 class TypeNode : public Node { 1944 protected: 1945 virtual uint hash() const; // Check the type 1946 virtual bool cmp( const Node &n ) const; 1947 virtual uint size_of() const; // Size is bigger 1948 const Type* const _type; 1949 public: 1950 void set_type(const Type* t) { 1951 assert(t != nullptr, "sanity"); 1952 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 1953 *(const Type**)&_type = t; // cast away const-ness 1954 // If this node is in the hash table, make sure it doesn't need a rehash. 1955 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 1956 } 1957 const Type* type() const { assert(_type != nullptr, "sanity"); return _type; }; 1958 TypeNode( const Type *t, uint required ) : Node(required), _type(t) { 1959 init_class_id(Class_Type); 1960 } 1961 virtual const Type* Value(PhaseGVN* phase) const; 1962 virtual const Type *bottom_type() const; 1963 virtual uint ideal_reg() const; 1964 #ifndef PRODUCT 1965 virtual void dump_spec(outputStream *st) const; 1966 virtual void dump_compact_spec(outputStream *st) const; 1967 #endif 1968 }; 1969 1970 #include "opto/opcodes.hpp" 1971 1972 #define Op_IL(op) \ 1973 inline int Op_ ## op(BasicType bt) { \ 1974 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); \ 1975 if (bt == T_INT) { \ 1976 return Op_## op ## I; \ 1977 } \ 1978 return Op_## op ## L; \ 1979 } 1980 1981 Op_IL(Add) 1982 Op_IL(Sub) 1983 Op_IL(Mul) 1984 Op_IL(URShift) 1985 Op_IL(LShift) 1986 Op_IL(Xor) 1987 Op_IL(Cmp) 1988 1989 inline int Op_ConIL(BasicType bt) { 1990 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); 1991 if (bt == T_INT) { 1992 return Op_ConI; 1993 } 1994 return Op_ConL; 1995 } 1996 1997 inline int Op_Cmp_unsigned(BasicType bt) { 1998 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); 1999 if (bt == T_INT) { 2000 return Op_CmpU; 2001 } 2002 return Op_CmpUL; 2003 } 2004 2005 inline int Op_Cast(BasicType bt) { 2006 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); 2007 if (bt == T_INT) { 2008 return Op_CastII; 2009 } 2010 return Op_CastLL; 2011 } 2012 2013 #endif // SHARE_OPTO_NODE_HPP