1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2024, 2025, Alibaba Group Holding Limited. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_OPTO_NODE_HPP
27 #define SHARE_OPTO_NODE_HPP
28
29 #include "libadt/vectset.hpp"
30 #include "opto/compile.hpp"
31 #include "opto/type.hpp"
32 #include "utilities/copy.hpp"
33
34 // Portions of code courtesy of Clifford Click
35
36 // Optimization - Graph Style
37
38
39 class AbstractLockNode;
40 class AddNode;
41 class AddPNode;
42 class AliasInfo;
43 class AllocateArrayNode;
44 class AllocateNode;
45 class ArrayCopyNode;
46 class BaseCountedLoopNode;
47 class BaseCountedLoopEndNode;
48 class BlackholeNode;
49 class Block;
50 class BoolNode;
51 class BoxLockNode;
52 class CMoveNode;
53 class CallDynamicJavaNode;
54 class CallJavaNode;
55 class CallLeafNode;
56 class CallLeafNoFPNode;
57 class CallLeafPureNode;
58 class CallNode;
59 class CallRuntimeNode;
60 class CallStaticJavaNode;
61 class CastFFNode;
62 class CastHHNode;
63 class CastDDNode;
64 class CastVVNode;
65 class CastIINode;
66 class CastLLNode;
67 class CastPPNode;
68 class CatchNode;
69 class CatchProjNode;
70 class CheckCastPPNode;
71 class ClearArrayNode;
72 class CmpNode;
73 class CodeBuffer;
74 class ConstraintCastNode;
75 class ConNode;
76 class ConINode;
77 class ConvertNode;
78 class CompareAndSwapNode;
79 class CompareAndExchangeNode;
80 class CountedLoopNode;
81 class CountedLoopEndNode;
82 class DecodeNarrowPtrNode;
83 class DecodeNNode;
84 class DecodeNKlassNode;
85 class EncodeNarrowPtrNode;
86 class EncodePNode;
87 class EncodePKlassNode;
88 class FastLockNode;
89 class FastUnlockNode;
90 class HaltNode;
91 class IfNode;
92 class IfProjNode;
93 class IfFalseNode;
94 class IfTrueNode;
95 class InitializeNode;
96 class JVMState;
97 class JumpNode;
98 class JumpProjNode;
99 class LoadNode;
100 class LoadStoreNode;
101 class LoadStoreConditionalNode;
102 class LockNode;
103 class LongCountedLoopNode;
104 class LongCountedLoopEndNode;
105 class LoopNode;
106 class LShiftNode;
107 class MachBranchNode;
108 class MachCallDynamicJavaNode;
109 class MachCallJavaNode;
110 class MachCallLeafNode;
111 class MachCallNode;
112 class MachCallRuntimeNode;
113 class MachCallStaticJavaNode;
114 class MachConstantBaseNode;
115 class MachConstantNode;
116 class MachGotoNode;
117 class MachIfNode;
118 class MachJumpNode;
119 class MachNode;
120 class MachNullCheckNode;
121 class MachProjNode;
122 class MachReturnNode;
123 class MachSafePointNode;
124 class MachSpillCopyNode;
125 class MachTempNode;
126 class MachMergeNode;
127 class MachMemBarNode;
128 class Matcher;
129 class MemBarNode;
130 class MemBarStoreStoreNode;
131 class MemNode;
132 class MergeMemNode;
133 class MoveNode;
134 class MulNode;
135 class MultiNode;
136 class MultiBranchNode;
137 class NegNode;
138 class NegVNode;
139 class NeverBranchNode;
140 class Opaque1Node;
141 class OpaqueLoopInitNode;
142 class OpaqueLoopStrideNode;
143 class OpaqueMultiversioningNode;
144 class OpaqueNotNullNode;
145 class OpaqueInitializedAssertionPredicateNode;
146 class OpaqueTemplateAssertionPredicateNode;
147 class OuterStripMinedLoopNode;
148 class OuterStripMinedLoopEndNode;
149 class Node;
150 class Node_Array;
151 class Node_List;
152 class Node_Stack;
153 class OopMap;
154 class ParmNode;
155 class ParsePredicateNode;
156 class PCTableNode;
157 class PhaseCCP;
158 class PhaseGVN;
159 class PhaseIdealLoop;
160 class PhaseIterGVN;
161 class PhaseRegAlloc;
162 class PhaseTransform;
163 class PhaseValues;
164 class PhiNode;
165 class Pipeline;
166 class PopulateIndexNode;
167 class ProjNode;
168 class RangeCheckNode;
169 class ReductionNode;
170 class RegMask;
171 class RegionNode;
172 class RootNode;
173 class SafePointNode;
174 class SafePointScalarObjectNode;
175 class SafePointScalarMergeNode;
176 class SaturatingVectorNode;
177 class StartNode;
178 class State;
179 class StoreNode;
180 class SubNode;
181 class SubTypeCheckNode;
182 class Type;
183 class TypeNode;
184 class UnlockNode;
185 class VectorNode;
186 class LoadVectorNode;
187 class LoadVectorMaskedNode;
188 class StoreVectorMaskedNode;
189 class LoadVectorGatherNode;
190 class LoadVectorGatherMaskedNode;
191 class StoreVectorNode;
192 class StoreVectorScatterNode;
193 class StoreVectorScatterMaskedNode;
194 class VerifyVectorAlignmentNode;
195 class VectorMaskCmpNode;
196 class VectorUnboxNode;
197 class VectorSet;
198 class VectorReinterpretNode;
199 class ShiftVNode;
200 class MulVLNode;
201 class ExpandVNode;
202 class CompressVNode;
203 class CompressMNode;
204 class C2_MacroAssembler;
205
206
207 #ifndef OPTO_DU_ITERATOR_ASSERT
208 #ifdef ASSERT
209 #define OPTO_DU_ITERATOR_ASSERT 1
210 #else
211 #define OPTO_DU_ITERATOR_ASSERT 0
212 #endif
213 #endif //OPTO_DU_ITERATOR_ASSERT
214
215 #if OPTO_DU_ITERATOR_ASSERT
216 class DUIterator;
217 class DUIterator_Fast;
218 class DUIterator_Last;
219 #else
220 typedef uint DUIterator;
221 typedef Node** DUIterator_Fast;
222 typedef Node** DUIterator_Last;
223 #endif
224
225 typedef ResizeableHashTable<Node*, Node*, AnyObj::RESOURCE_AREA, mtCompiler> OrigToNewHashtable;
226
227 // Node Sentinel
228 #define NodeSentinel (Node*)-1
229
230 // Unknown count frequency
231 #define COUNT_UNKNOWN (-1.0f)
232
233 //------------------------------Node-------------------------------------------
234 // Nodes define actions in the program. They create values, which have types.
235 // They are both vertices in a directed graph and program primitives. Nodes
236 // are labeled; the label is the "opcode", the primitive function in the lambda
237 // calculus sense that gives meaning to the Node. Node inputs are ordered (so
238 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to
239 // the Node's function. These inputs also define a Type equation for the Node.
240 // Solving these Type equations amounts to doing dataflow analysis.
241 // Control and data are uniformly represented in the graph. Finally, Nodes
242 // have a unique dense integer index which is used to index into side arrays
243 // whenever I have phase-specific information.
244
245 class Node {
246
247 // Lots of restrictions on cloning Nodes
248 NONCOPYABLE(Node);
249
250 public:
251 friend class Compile;
252 #if OPTO_DU_ITERATOR_ASSERT
253 friend class DUIterator_Common;
254 friend class DUIterator;
255 friend class DUIterator_Fast;
256 friend class DUIterator_Last;
257 #endif
258
259 // Because Nodes come and go, I define an Arena of Node structures to pull
260 // from. This should allow fast access to node creation & deletion. This
261 // field is a local cache of a value defined in some "program fragment" for
262 // which these Nodes are just a part of.
263
264 inline void* operator new(size_t x) throw() {
265 Compile* C = Compile::current();
266 Node* n = (Node*)C->node_arena()->AmallocWords(x);
267 return (void*)n;
268 }
269
270 // Delete is a NOP
271 void operator delete( void *ptr ) {}
272 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
273 void destruct(PhaseValues* phase);
274
275 // Create a new Node. Required is the number is of inputs required for
276 // semantic correctness.
277 Node( uint required );
278
279 // Create a new Node with given input edges.
280 // This version requires use of the "edge-count" new.
281 // E.g. new (C,3) FooNode( C, nullptr, left, right );
282 Node( Node *n0 );
283 Node( Node *n0, Node *n1 );
284 Node( Node *n0, Node *n1, Node *n2 );
285 Node( Node *n0, Node *n1, Node *n2, Node *n3 );
286 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
287 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
288 Node( Node *n0, Node *n1, Node *n2, Node *n3,
289 Node *n4, Node *n5, Node *n6 );
290
291 // Clone an inherited Node given only the base Node type.
292 Node* clone() const;
293
294 // Clone a Node, immediately supplying one or two new edges.
295 // The first and second arguments, if non-null, replace in(1) and in(2),
296 // respectively.
297 Node* clone_with_data_edge(Node* in1, Node* in2 = nullptr) const {
298 Node* nn = clone();
299 if (in1 != nullptr) nn->set_req(1, in1);
300 if (in2 != nullptr) nn->set_req(2, in2);
301 return nn;
302 }
303
304 private:
305 // Shared setup for the above constructors.
306 // Handles all interactions with Compile::current.
307 // Puts initial values in all Node fields except _idx.
308 // Returns the initial value for _idx, which cannot
309 // be initialized by assignment.
310 inline int Init(int req);
311
312 //----------------- input edge handling
313 protected:
314 friend class PhaseCFG; // Access to address of _in array elements
315 Node **_in; // Array of use-def references to Nodes
316 Node **_out; // Array of def-use references to Nodes
317
318 // Input edges are split into two categories. Required edges are required
319 // for semantic correctness; order is important and nulls are allowed.
320 // Precedence edges are used to help determine execution order and are
321 // added, e.g., for scheduling purposes. They are unordered and not
322 // duplicated; they have no embedded nulls. Edges from 0 to _cnt-1
323 // are required, from _cnt to _max-1 are precedence edges.
324 node_idx_t _cnt; // Total number of required Node inputs.
325
326 node_idx_t _max; // Actual length of input array.
327
328 // Output edges are an unordered list of def-use edges which exactly
329 // correspond to required input edges which point from other nodes
330 // to this one. Thus the count of the output edges is the number of
331 // users of this node.
332 node_idx_t _outcnt; // Total number of Node outputs.
333
334 node_idx_t _outmax; // Actual length of output array.
335
336 // Grow the actual input array to the next larger power-of-2 bigger than len.
337 void grow( uint len );
338 // Grow the output array to the next larger power-of-2 bigger than len.
339 void out_grow( uint len );
340 // Resize input or output array to grow it to the next larger power-of-2
341 // bigger than len.
342 void resize_array(Node**& array, node_idx_t& max_size, uint len, bool needs_clearing);
343
344 public:
345 // Each Node is assigned a unique small/dense number. This number is used
346 // to index into auxiliary arrays of data and bit vectors.
347 // The value of _idx can be changed using the set_idx() method.
348 //
349 // The PhaseRenumberLive phase renumbers nodes based on liveness information.
350 // Therefore, it updates the value of the _idx field. The parse-time _idx is
351 // preserved in _parse_idx.
352 node_idx_t _idx;
353 DEBUG_ONLY(const node_idx_t _parse_idx;)
354 // IGV node identifier. Two nodes, possibly in different compilation phases,
355 // have the same IGV identifier if (and only if) they are the very same node
356 // (same memory address) or one is "derived" from the other (by e.g.
357 // renumbering or matching). This identifier makes it possible to follow the
358 // entire lifetime of a node in IGV even if its C2 identifier (_idx) changes.
359 NOT_PRODUCT(node_idx_t _igv_idx;)
360
361 // Get the (read-only) number of input edges
362 uint req() const { return _cnt; }
363 uint len() const { return _max; }
364 // Get the (read-only) number of output edges
365 uint outcnt() const { return _outcnt; }
366
367 #if OPTO_DU_ITERATOR_ASSERT
368 // Iterate over the out-edges of this node. Deletions are illegal.
369 inline DUIterator outs() const;
370 // Use this when the out array might have changed to suppress asserts.
371 inline DUIterator& refresh_out_pos(DUIterator& i) const;
372 // Does the node have an out at this position? (Used for iteration.)
373 inline bool has_out(DUIterator& i) const;
374 inline Node* out(DUIterator& i) const;
375 // Iterate over the out-edges of this node. All changes are illegal.
376 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
377 inline Node* fast_out(DUIterator_Fast& i) const;
378 // Iterate over the out-edges of this node, deleting one at a time.
379 inline DUIterator_Last last_outs(DUIterator_Last& min) const;
380 inline Node* last_out(DUIterator_Last& i) const;
381 // The inline bodies of all these methods are after the iterator definitions.
382 #else
383 // Iterate over the out-edges of this node. Deletions are illegal.
384 // This iteration uses integral indexes, to decouple from array reallocations.
385 DUIterator outs() const { return 0; }
386 // Use this when the out array might have changed to suppress asserts.
387 DUIterator refresh_out_pos(DUIterator i) const { return i; }
388
389 // Reference to the i'th output Node. Error if out of bounds.
390 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
391 // Does the node have an out at this position? (Used for iteration.)
392 bool has_out(DUIterator i) const { return i < _outcnt; }
393
394 // Iterate over the out-edges of this node. All changes are illegal.
395 // This iteration uses a pointer internal to the out array.
396 DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
397 Node** out = _out;
398 // Assign a limit pointer to the reference argument:
399 max = out + (ptrdiff_t)_outcnt;
400 // Return the base pointer:
401 return out;
402 }
403 Node* fast_out(DUIterator_Fast i) const { return *i; }
404 // Iterate over the out-edges of this node, deleting one at a time.
405 // This iteration uses a pointer internal to the out array.
406 DUIterator_Last last_outs(DUIterator_Last& min) const {
407 Node** out = _out;
408 // Assign a limit pointer to the reference argument:
409 min = out;
410 // Return the pointer to the start of the iteration:
411 return out + (ptrdiff_t)_outcnt - 1;
412 }
413 Node* last_out(DUIterator_Last i) const { return *i; }
414 #endif
415
416 // Reference to the i'th input Node. Error if out of bounds.
417 Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; }
418 // Reference to the i'th input Node. null if out of bounds.
419 Node* lookup(uint i) const { return ((i < _max) ? _in[i] : nullptr); }
420 // Reference to the i'th output Node. Error if out of bounds.
421 // Use this accessor sparingly. We are going trying to use iterators instead.
422 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
423 // Return the unique out edge.
424 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
425 // Delete out edge at position 'i' by moving last out edge to position 'i'
426 void raw_del_out(uint i) {
427 assert(i < _outcnt,"oob");
428 assert(_outcnt > 0,"oob");
429 #if OPTO_DU_ITERATOR_ASSERT
430 // Record that a change happened here.
431 DEBUG_ONLY(_last_del = _out[i]; ++_del_tick);
432 #endif
433 _out[i] = _out[--_outcnt];
434 // Smash the old edge so it can't be used accidentally.
435 DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
436 }
437
438 #ifdef ASSERT
439 bool is_dead() const;
440 static bool is_not_dead(const Node* n);
441 bool is_reachable_from_root() const;
442 #endif
443 // Check whether node has become unreachable
444 bool is_unreachable(PhaseIterGVN &igvn) const;
445
446 // Set a required input edge, also updates corresponding output edge
447 void add_req( Node *n ); // Append a NEW required input
448 void add_req( Node *n0, Node *n1 ) {
449 add_req(n0); add_req(n1); }
450 void add_req( Node *n0, Node *n1, Node *n2 ) {
451 add_req(n0); add_req(n1); add_req(n2); }
452 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
453 void del_req( uint idx ); // Delete required edge & compact
454 void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
455 void ins_req( uint i, Node *n ); // Insert a NEW required input
456 void set_req( uint i, Node *n ) {
457 assert( is_not_dead(n), "can not use dead node");
458 assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt);
459 assert( !VerifyHashTableKeys || _hash_lock == 0,
460 "remove node from hash table before modifying it");
461 Node** p = &_in[i]; // cache this._in, across the del_out call
462 if (*p != nullptr) (*p)->del_out((Node *)this);
463 (*p) = n;
464 if (n != nullptr) n->add_out((Node *)this);
465 Compile::current()->record_modified_node(this);
466 }
467 // Light version of set_req() to init inputs after node creation.
468 void init_req( uint i, Node *n ) {
469 assert( (i == 0 && this == n) ||
470 is_not_dead(n), "can not use dead node");
471 assert( i < _cnt, "oob");
472 assert( !VerifyHashTableKeys || _hash_lock == 0,
473 "remove node from hash table before modifying it");
474 assert( _in[i] == nullptr, "sanity");
475 _in[i] = n;
476 if (n != nullptr) n->add_out((Node *)this);
477 Compile::current()->record_modified_node(this);
478 }
479 // Find first occurrence of n among my edges:
480 int find_edge(Node* n);
481 int find_prec_edge(Node* n) {
482 for (uint i = req(); i < len(); i++) {
483 if (_in[i] == n) return i;
484 if (_in[i] == nullptr) {
485 DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == nullptr, "Gap in prec edges!"); )
486 break;
487 }
488 }
489 return -1;
490 }
491 int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = nullptr);
492 int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn);
493 // null out all inputs to eliminate incoming Def-Use edges.
494 void disconnect_inputs(Compile* C);
495
496 // Quickly, return true if and only if I am Compile::current()->top().
497 bool is_top() const {
498 assert((this == (Node*) Compile::current()->top()) == (_out == nullptr), "");
499 return (_out == nullptr);
500 }
501 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.)
502 void setup_is_top();
503
504 // Strip away casting. (It is depth-limited.)
505 Node* uncast(bool keep_deps = false) const;
506 // Return whether two Nodes are equivalent, after stripping casting.
507 bool eqv_uncast(const Node* n, bool keep_deps = false) const {
508 return (this->uncast(keep_deps) == n->uncast(keep_deps));
509 }
510
511 // Find out of current node that matches opcode.
512 Node* find_out_with(int opcode);
513 // Return true if the current node has an out that matches opcode.
514 bool has_out_with(int opcode);
515 // Return true if the current node has an out that matches any of the opcodes.
516 bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4);
517
518 private:
519 static Node* uncast_helper(const Node* n, bool keep_deps);
520
521 // Add an output edge to the end of the list
522 void add_out( Node *n ) {
523 if (is_top()) return;
524 if( _outcnt == _outmax ) out_grow(_outcnt);
525 _out[_outcnt++] = n;
526 }
527 // Delete an output edge
528 void del_out( Node *n ) {
529 if (is_top()) return;
530 Node** outp = &_out[_outcnt];
531 // Find and remove n
532 do {
533 assert(outp > _out, "Missing Def-Use edge");
534 } while (*--outp != n);
535 *outp = _out[--_outcnt];
536 // Smash the old edge so it can't be used accidentally.
537 DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
538 // Record that a change happened here.
539 #if OPTO_DU_ITERATOR_ASSERT
540 DEBUG_ONLY(_last_del = n; ++_del_tick);
541 #endif
542 }
543 // Close gap after removing edge.
544 void close_prec_gap_at(uint gap) {
545 assert(_cnt <= gap && gap < _max, "no valid prec edge");
546 uint i = gap;
547 Node *last = nullptr;
548 for (; i < _max-1; ++i) {
549 Node *next = _in[i+1];
550 if (next == nullptr) break;
551 last = next;
552 }
553 _in[gap] = last; // Move last slot to empty one.
554 _in[i] = nullptr; // null out last slot.
555 }
556
557 public:
558 // Globally replace this node by a given new node, updating all uses.
559 void replace_by(Node* new_node);
560 // Globally replace this node by a given new node, updating all uses
561 // and cutting input edges of old node.
562 void subsume_by(Node* new_node, Compile* c) {
563 replace_by(new_node);
564 disconnect_inputs(c);
565 }
566 void set_req_X(uint i, Node *n, PhaseIterGVN *igvn);
567 void set_req_X(uint i, Node *n, PhaseGVN *gvn);
568 // Find the one non-null required input. RegionNode only
569 Node *nonnull_req() const;
570 // Add or remove precedence edges
571 void add_prec( Node *n );
572 void rm_prec( uint i );
573
574 // Note: prec(i) will not necessarily point to n if edge already exists.
575 void set_prec( uint i, Node *n ) {
576 assert(i < _max, "oob: i=%d, _max=%d", i, _max);
577 assert(is_not_dead(n), "can not use dead node");
578 assert(i >= _cnt, "not a precedence edge");
579 // Avoid spec violation: duplicated prec edge.
580 if (_in[i] == n) return;
581 if (n == nullptr || find_prec_edge(n) != -1) {
582 rm_prec(i);
583 return;
584 }
585 if (_in[i] != nullptr) _in[i]->del_out((Node *)this);
586 _in[i] = n;
587 n->add_out((Node *)this);
588 Compile::current()->record_modified_node(this);
589 }
590
591 // Set this node's index, used by cisc_version to replace current node
592 void set_idx(uint new_idx) {
593 _idx = new_idx;
594 }
595 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.)
596 void swap_edges(uint i1, uint i2) {
597 DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
598 // Def-Use info is unchanged
599 Node* n1 = in(i1);
600 Node* n2 = in(i2);
601 _in[i1] = n2;
602 _in[i2] = n1;
603 // If this node is in the hash table, make sure it doesn't need a rehash.
604 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
605 // Flip swapped edges flag.
606 if (has_swapped_edges()) {
607 remove_flag(Node::Flag_has_swapped_edges);
608 } else {
609 add_flag(Node::Flag_has_swapped_edges);
610 }
611 }
612
613 // Iterators over input Nodes for a Node X are written as:
614 // for( i = 0; i < X.req(); i++ ) ... X[i] ...
615 // NOTE: Required edges can contain embedded null pointers.
616
617 //----------------- Other Node Properties
618
619 // Generate class IDs for (some) ideal nodes so that it is possible to determine
620 // the type of a node using a non-virtual method call (the method is_<Node>() below).
621 //
622 // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines
623 // the type of the node the ID represents; another subset of an ID's bits are reserved
624 // for the superclasses of the node represented by the ID.
625 //
626 // By design, if A is a supertype of B, A.is_B() returns true and B.is_A()
627 // returns false. A.is_A() returns true.
628 //
629 // If two classes, A and B, have the same superclass, a different bit of A's class id
630 // is reserved for A's type than for B's type. That bit is specified by the third
631 // parameter in the macro DEFINE_CLASS_ID.
632 //
633 // By convention, classes with deeper hierarchy are declared first. Moreover,
634 // classes with the same hierarchy depth are sorted by usage frequency.
635 //
636 // The query method masks the bits to cut off bits of subclasses and then compares
637 // the result with the class id (see the macro DEFINE_CLASS_QUERY below).
638 //
639 // Class_MachCall=30, ClassMask_MachCall=31
640 // 12 8 4 0
641 // 0 0 0 0 0 0 0 0 1 1 1 1 0
642 // | | | |
643 // | | | Bit_Mach=2
644 // | | Bit_MachReturn=4
645 // | Bit_MachSafePoint=8
646 // Bit_MachCall=16
647 //
648 // Class_CountedLoop=56, ClassMask_CountedLoop=63
649 // 12 8 4 0
650 // 0 0 0 0 0 0 0 1 1 1 0 0 0
651 // | | |
652 // | | Bit_Region=8
653 // | Bit_Loop=16
654 // Bit_CountedLoop=32
655
656 #define DEFINE_CLASS_ID(cl, supcl, subn) \
657 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
658 Class_##cl = Class_##supcl + Bit_##cl , \
659 ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
660
661 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
662 // so that its values fit into 32 bits.
663 enum NodeClasses {
664 Bit_Node = 0x00000000,
665 Class_Node = 0x00000000,
666 ClassMask_Node = 0xFFFFFFFF,
667
668 DEFINE_CLASS_ID(Multi, Node, 0)
669 DEFINE_CLASS_ID(SafePoint, Multi, 0)
670 DEFINE_CLASS_ID(Call, SafePoint, 0)
671 DEFINE_CLASS_ID(CallJava, Call, 0)
672 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0)
673 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1)
674 DEFINE_CLASS_ID(CallRuntime, Call, 1)
675 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0)
676 DEFINE_CLASS_ID(CallLeafNoFP, CallLeaf, 0)
677 DEFINE_CLASS_ID(CallLeafPure, CallLeaf, 1)
678 DEFINE_CLASS_ID(Allocate, Call, 2)
679 DEFINE_CLASS_ID(AllocateArray, Allocate, 0)
680 DEFINE_CLASS_ID(AbstractLock, Call, 3)
681 DEFINE_CLASS_ID(Lock, AbstractLock, 0)
682 DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
683 DEFINE_CLASS_ID(ArrayCopy, Call, 4)
684 DEFINE_CLASS_ID(MultiBranch, Multi, 1)
685 DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
686 DEFINE_CLASS_ID(Catch, PCTable, 0)
687 DEFINE_CLASS_ID(Jump, PCTable, 1)
688 DEFINE_CLASS_ID(If, MultiBranch, 1)
689 DEFINE_CLASS_ID(BaseCountedLoopEnd, If, 0)
690 DEFINE_CLASS_ID(CountedLoopEnd, BaseCountedLoopEnd, 0)
691 DEFINE_CLASS_ID(LongCountedLoopEnd, BaseCountedLoopEnd, 1)
692 DEFINE_CLASS_ID(RangeCheck, If, 1)
693 DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2)
694 DEFINE_CLASS_ID(ParsePredicate, If, 3)
695 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
696 DEFINE_CLASS_ID(Start, Multi, 2)
697 DEFINE_CLASS_ID(MemBar, Multi, 3)
698 DEFINE_CLASS_ID(Initialize, MemBar, 0)
699 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
700
701 DEFINE_CLASS_ID(Mach, Node, 1)
702 DEFINE_CLASS_ID(MachReturn, Mach, 0)
703 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
704 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
705 DEFINE_CLASS_ID(MachCallJava, MachCall, 0)
706 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0)
707 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
708 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
709 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
710 DEFINE_CLASS_ID(MachBranch, Mach, 1)
711 DEFINE_CLASS_ID(MachIf, MachBranch, 0)
712 DEFINE_CLASS_ID(MachGoto, MachBranch, 1)
713 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2)
714 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2)
715 DEFINE_CLASS_ID(MachTemp, Mach, 3)
716 DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
717 DEFINE_CLASS_ID(MachConstant, Mach, 5)
718 DEFINE_CLASS_ID(MachJump, MachConstant, 0)
719 DEFINE_CLASS_ID(MachMerge, Mach, 6)
720 DEFINE_CLASS_ID(MachMemBar, Mach, 7)
721
722 DEFINE_CLASS_ID(Type, Node, 2)
723 DEFINE_CLASS_ID(Phi, Type, 0)
724 DEFINE_CLASS_ID(ConstraintCast, Type, 1)
725 DEFINE_CLASS_ID(CastII, ConstraintCast, 0)
726 DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1)
727 DEFINE_CLASS_ID(CastLL, ConstraintCast, 2)
728 DEFINE_CLASS_ID(CastFF, ConstraintCast, 3)
729 DEFINE_CLASS_ID(CastDD, ConstraintCast, 4)
730 DEFINE_CLASS_ID(CastVV, ConstraintCast, 5)
731 DEFINE_CLASS_ID(CastPP, ConstraintCast, 6)
732 DEFINE_CLASS_ID(CastHH, ConstraintCast, 7)
733 DEFINE_CLASS_ID(CMove, Type, 3)
734 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
735 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5)
736 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0)
737 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1)
738 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
739 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
740 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
741 DEFINE_CLASS_ID(Vector, Type, 7)
742 DEFINE_CLASS_ID(VectorMaskCmp, Vector, 0)
743 DEFINE_CLASS_ID(VectorUnbox, Vector, 1)
744 DEFINE_CLASS_ID(VectorReinterpret, Vector, 2)
745 DEFINE_CLASS_ID(ShiftV, Vector, 3)
746 DEFINE_CLASS_ID(CompressV, Vector, 4)
747 DEFINE_CLASS_ID(ExpandV, Vector, 5)
748 DEFINE_CLASS_ID(CompressM, Vector, 6)
749 DEFINE_CLASS_ID(Reduction, Vector, 7)
750 DEFINE_CLASS_ID(NegV, Vector, 8)
751 DEFINE_CLASS_ID(SaturatingVector, Vector, 9)
752 DEFINE_CLASS_ID(MulVL, Vector, 10)
753 DEFINE_CLASS_ID(Con, Type, 8)
754 DEFINE_CLASS_ID(ConI, Con, 0)
755 DEFINE_CLASS_ID(SafePointScalarMerge, Type, 9)
756 DEFINE_CLASS_ID(Convert, Type, 10)
757
758
759 DEFINE_CLASS_ID(Proj, Node, 3)
760 DEFINE_CLASS_ID(CatchProj, Proj, 0)
761 DEFINE_CLASS_ID(JumpProj, Proj, 1)
762 DEFINE_CLASS_ID(IfProj, Proj, 2)
763 DEFINE_CLASS_ID(IfTrue, IfProj, 0)
764 DEFINE_CLASS_ID(IfFalse, IfProj, 1)
765 DEFINE_CLASS_ID(Parm, Proj, 4)
766 DEFINE_CLASS_ID(MachProj, Proj, 5)
767
768 DEFINE_CLASS_ID(Mem, Node, 4)
769 DEFINE_CLASS_ID(Load, Mem, 0)
770 DEFINE_CLASS_ID(LoadVector, Load, 0)
771 DEFINE_CLASS_ID(LoadVectorGather, LoadVector, 0)
772 DEFINE_CLASS_ID(LoadVectorGatherMasked, LoadVector, 1)
773 DEFINE_CLASS_ID(LoadVectorMasked, LoadVector, 2)
774 DEFINE_CLASS_ID(Store, Mem, 1)
775 DEFINE_CLASS_ID(StoreVector, Store, 0)
776 DEFINE_CLASS_ID(StoreVectorScatter, StoreVector, 0)
777 DEFINE_CLASS_ID(StoreVectorScatterMasked, StoreVector, 1)
778 DEFINE_CLASS_ID(StoreVectorMasked, StoreVector, 2)
779 DEFINE_CLASS_ID(LoadStore, Mem, 2)
780 DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0)
781 DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0)
782 DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1)
783
784 DEFINE_CLASS_ID(Region, Node, 5)
785 DEFINE_CLASS_ID(Loop, Region, 0)
786 DEFINE_CLASS_ID(Root, Loop, 0)
787 DEFINE_CLASS_ID(BaseCountedLoop, Loop, 1)
788 DEFINE_CLASS_ID(CountedLoop, BaseCountedLoop, 0)
789 DEFINE_CLASS_ID(LongCountedLoop, BaseCountedLoop, 1)
790 DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2)
791
792 DEFINE_CLASS_ID(Sub, Node, 6)
793 DEFINE_CLASS_ID(Cmp, Sub, 0)
794 DEFINE_CLASS_ID(FastLock, Cmp, 0)
795 DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
796 DEFINE_CLASS_ID(SubTypeCheck,Cmp, 2)
797
798 DEFINE_CLASS_ID(MergeMem, Node, 7)
799 DEFINE_CLASS_ID(Bool, Node, 8)
800 DEFINE_CLASS_ID(AddP, Node, 9)
801 DEFINE_CLASS_ID(BoxLock, Node, 10)
802 DEFINE_CLASS_ID(Add, Node, 11)
803 DEFINE_CLASS_ID(Mul, Node, 12)
804 DEFINE_CLASS_ID(ClearArray, Node, 14)
805 DEFINE_CLASS_ID(Halt, Node, 15)
806 DEFINE_CLASS_ID(Opaque1, Node, 16)
807 DEFINE_CLASS_ID(OpaqueLoopInit, Opaque1, 0)
808 DEFINE_CLASS_ID(OpaqueLoopStride, Opaque1, 1)
809 DEFINE_CLASS_ID(OpaqueMultiversioning, Opaque1, 2)
810 DEFINE_CLASS_ID(OpaqueNotNull, Node, 17)
811 DEFINE_CLASS_ID(OpaqueInitializedAssertionPredicate, Node, 18)
812 DEFINE_CLASS_ID(OpaqueTemplateAssertionPredicate, Node, 19)
813 DEFINE_CLASS_ID(Move, Node, 20)
814 DEFINE_CLASS_ID(LShift, Node, 21)
815 DEFINE_CLASS_ID(Neg, Node, 22)
816
817 _max_classes = ClassMask_Neg
818 };
819 #undef DEFINE_CLASS_ID
820
821 // Flags are sorted by usage frequency.
822 enum NodeFlags {
823 Flag_is_Copy = 1 << 0, // should be first bit to avoid shift
824 Flag_rematerialize = 1 << 1,
825 Flag_needs_anti_dependence_check = 1 << 2,
826 Flag_is_macro = 1 << 3,
827 Flag_is_Con = 1 << 4,
828 Flag_is_cisc_alternate = 1 << 5,
829 Flag_is_dead_loop_safe = 1 << 6,
830 Flag_may_be_short_branch = 1 << 7,
831 Flag_avoid_back_to_back_before = 1 << 8,
832 Flag_avoid_back_to_back_after = 1 << 9,
833 Flag_has_call = 1 << 10,
834 Flag_has_swapped_edges = 1 << 11,
835 Flag_is_scheduled = 1 << 12,
836 Flag_is_expensive = 1 << 13,
837 Flag_is_predicated_vector = 1 << 14,
838 Flag_for_post_loop_opts_igvn = 1 << 15,
839 Flag_for_merge_stores_igvn = 1 << 16,
840 Flag_is_removed_by_peephole = 1 << 17,
841 Flag_is_predicated_using_blend = 1 << 18,
842 _last_flag = Flag_is_predicated_using_blend
843 };
844
845 class PD;
846
847 private:
848 juint _class_id;
849 juint _flags;
850
851 #ifdef ASSERT
852 static juint max_flags();
853 #endif
854
855 protected:
856 // These methods should be called from constructors only.
857 void init_class_id(juint c) {
858 _class_id = c; // cast out const
859 }
860 void init_flags(uint fl) {
861 assert(fl <= max_flags(), "invalid node flag");
862 _flags |= fl;
863 }
864 void clear_flag(uint fl) {
865 assert(fl <= max_flags(), "invalid node flag");
866 _flags &= ~fl;
867 }
868
869 public:
870 juint class_id() const { return _class_id; }
871
872 juint flags() const { return _flags; }
873
874 void add_flag(juint fl) { init_flags(fl); }
875
876 void remove_flag(juint fl) { clear_flag(fl); }
877
878 // Return a dense integer opcode number
879 virtual int Opcode() const;
880
881 // Virtual inherited Node size
882 virtual uint size_of() const;
883
884 // Other interesting Node properties
885 #define DEFINE_CLASS_QUERY(type) \
886 bool is_##type() const { \
887 return ((_class_id & ClassMask_##type) == Class_##type); \
888 } \
889 type##Node *as_##type() const { \
890 assert(is_##type(), "invalid node class: %s", Name()); \
891 return (type##Node*)this; \
892 } \
893 type##Node* isa_##type() const { \
894 return (is_##type()) ? as_##type() : nullptr; \
895 }
896
897 DEFINE_CLASS_QUERY(AbstractLock)
898 DEFINE_CLASS_QUERY(Add)
899 DEFINE_CLASS_QUERY(AddP)
900 DEFINE_CLASS_QUERY(Allocate)
901 DEFINE_CLASS_QUERY(AllocateArray)
902 DEFINE_CLASS_QUERY(ArrayCopy)
903 DEFINE_CLASS_QUERY(BaseCountedLoop)
904 DEFINE_CLASS_QUERY(BaseCountedLoopEnd)
905 DEFINE_CLASS_QUERY(Bool)
906 DEFINE_CLASS_QUERY(BoxLock)
907 DEFINE_CLASS_QUERY(Call)
908 DEFINE_CLASS_QUERY(CallDynamicJava)
909 DEFINE_CLASS_QUERY(CallJava)
910 DEFINE_CLASS_QUERY(CallLeaf)
911 DEFINE_CLASS_QUERY(CallLeafNoFP)
912 DEFINE_CLASS_QUERY(CallLeafPure)
913 DEFINE_CLASS_QUERY(CallRuntime)
914 DEFINE_CLASS_QUERY(CallStaticJava)
915 DEFINE_CLASS_QUERY(Catch)
916 DEFINE_CLASS_QUERY(CatchProj)
917 DEFINE_CLASS_QUERY(CheckCastPP)
918 DEFINE_CLASS_QUERY(CastII)
919 DEFINE_CLASS_QUERY(CastLL)
920 DEFINE_CLASS_QUERY(CastFF)
921 DEFINE_CLASS_QUERY(ConI)
922 DEFINE_CLASS_QUERY(CastPP)
923 DEFINE_CLASS_QUERY(ConstraintCast)
924 DEFINE_CLASS_QUERY(ClearArray)
925 DEFINE_CLASS_QUERY(CMove)
926 DEFINE_CLASS_QUERY(Cmp)
927 DEFINE_CLASS_QUERY(Convert)
928 DEFINE_CLASS_QUERY(CountedLoop)
929 DEFINE_CLASS_QUERY(CountedLoopEnd)
930 DEFINE_CLASS_QUERY(DecodeNarrowPtr)
931 DEFINE_CLASS_QUERY(DecodeN)
932 DEFINE_CLASS_QUERY(DecodeNKlass)
933 DEFINE_CLASS_QUERY(EncodeNarrowPtr)
934 DEFINE_CLASS_QUERY(EncodeP)
935 DEFINE_CLASS_QUERY(EncodePKlass)
936 DEFINE_CLASS_QUERY(FastLock)
937 DEFINE_CLASS_QUERY(FastUnlock)
938 DEFINE_CLASS_QUERY(Halt)
939 DEFINE_CLASS_QUERY(If)
940 DEFINE_CLASS_QUERY(RangeCheck)
941 DEFINE_CLASS_QUERY(IfProj)
942 DEFINE_CLASS_QUERY(IfFalse)
943 DEFINE_CLASS_QUERY(IfTrue)
944 DEFINE_CLASS_QUERY(Initialize)
945 DEFINE_CLASS_QUERY(Jump)
946 DEFINE_CLASS_QUERY(JumpProj)
947 DEFINE_CLASS_QUERY(LongCountedLoop)
948 DEFINE_CLASS_QUERY(LongCountedLoopEnd)
949 DEFINE_CLASS_QUERY(Load)
950 DEFINE_CLASS_QUERY(LoadStore)
951 DEFINE_CLASS_QUERY(LoadStoreConditional)
952 DEFINE_CLASS_QUERY(Lock)
953 DEFINE_CLASS_QUERY(Loop)
954 DEFINE_CLASS_QUERY(LShift)
955 DEFINE_CLASS_QUERY(Mach)
956 DEFINE_CLASS_QUERY(MachBranch)
957 DEFINE_CLASS_QUERY(MachCall)
958 DEFINE_CLASS_QUERY(MachCallDynamicJava)
959 DEFINE_CLASS_QUERY(MachCallJava)
960 DEFINE_CLASS_QUERY(MachCallLeaf)
961 DEFINE_CLASS_QUERY(MachCallRuntime)
962 DEFINE_CLASS_QUERY(MachCallStaticJava)
963 DEFINE_CLASS_QUERY(MachConstantBase)
964 DEFINE_CLASS_QUERY(MachConstant)
965 DEFINE_CLASS_QUERY(MachGoto)
966 DEFINE_CLASS_QUERY(MachIf)
967 DEFINE_CLASS_QUERY(MachJump)
968 DEFINE_CLASS_QUERY(MachNullCheck)
969 DEFINE_CLASS_QUERY(MachProj)
970 DEFINE_CLASS_QUERY(MachReturn)
971 DEFINE_CLASS_QUERY(MachSafePoint)
972 DEFINE_CLASS_QUERY(MachSpillCopy)
973 DEFINE_CLASS_QUERY(MachTemp)
974 DEFINE_CLASS_QUERY(MachMemBar)
975 DEFINE_CLASS_QUERY(MachMerge)
976 DEFINE_CLASS_QUERY(Mem)
977 DEFINE_CLASS_QUERY(MemBar)
978 DEFINE_CLASS_QUERY(MemBarStoreStore)
979 DEFINE_CLASS_QUERY(MergeMem)
980 DEFINE_CLASS_QUERY(Move)
981 DEFINE_CLASS_QUERY(Mul)
982 DEFINE_CLASS_QUERY(Multi)
983 DEFINE_CLASS_QUERY(MultiBranch)
984 DEFINE_CLASS_QUERY(MulVL)
985 DEFINE_CLASS_QUERY(Neg)
986 DEFINE_CLASS_QUERY(NegV)
987 DEFINE_CLASS_QUERY(NeverBranch)
988 DEFINE_CLASS_QUERY(Opaque1)
989 DEFINE_CLASS_QUERY(OpaqueNotNull)
990 DEFINE_CLASS_QUERY(OpaqueInitializedAssertionPredicate)
991 DEFINE_CLASS_QUERY(OpaqueTemplateAssertionPredicate)
992 DEFINE_CLASS_QUERY(OpaqueLoopInit)
993 DEFINE_CLASS_QUERY(OpaqueLoopStride)
994 DEFINE_CLASS_QUERY(OpaqueMultiversioning)
995 DEFINE_CLASS_QUERY(OuterStripMinedLoop)
996 DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd)
997 DEFINE_CLASS_QUERY(Parm)
998 DEFINE_CLASS_QUERY(ParsePredicate)
999 DEFINE_CLASS_QUERY(PCTable)
1000 DEFINE_CLASS_QUERY(Phi)
1001 DEFINE_CLASS_QUERY(Proj)
1002 DEFINE_CLASS_QUERY(Reduction)
1003 DEFINE_CLASS_QUERY(Region)
1004 DEFINE_CLASS_QUERY(Root)
1005 DEFINE_CLASS_QUERY(SafePoint)
1006 DEFINE_CLASS_QUERY(SafePointScalarObject)
1007 DEFINE_CLASS_QUERY(SafePointScalarMerge)
1008 DEFINE_CLASS_QUERY(Start)
1009 DEFINE_CLASS_QUERY(Store)
1010 DEFINE_CLASS_QUERY(Sub)
1011 DEFINE_CLASS_QUERY(SubTypeCheck)
1012 DEFINE_CLASS_QUERY(Type)
1013 DEFINE_CLASS_QUERY(Vector)
1014 DEFINE_CLASS_QUERY(VectorMaskCmp)
1015 DEFINE_CLASS_QUERY(VectorUnbox)
1016 DEFINE_CLASS_QUERY(VectorReinterpret)
1017 DEFINE_CLASS_QUERY(CompressV)
1018 DEFINE_CLASS_QUERY(ExpandV)
1019 DEFINE_CLASS_QUERY(CompressM)
1020 DEFINE_CLASS_QUERY(LoadVector)
1021 DEFINE_CLASS_QUERY(LoadVectorGather)
1022 DEFINE_CLASS_QUERY(LoadVectorMasked)
1023 DEFINE_CLASS_QUERY(LoadVectorGatherMasked)
1024 DEFINE_CLASS_QUERY(StoreVector)
1025 DEFINE_CLASS_QUERY(StoreVectorScatter)
1026 DEFINE_CLASS_QUERY(StoreVectorMasked)
1027 DEFINE_CLASS_QUERY(StoreVectorScatterMasked)
1028 DEFINE_CLASS_QUERY(SaturatingVector)
1029 DEFINE_CLASS_QUERY(ShiftV)
1030 DEFINE_CLASS_QUERY(Unlock)
1031
1032 #undef DEFINE_CLASS_QUERY
1033
1034 // duplicate of is_MachSpillCopy()
1035 bool is_SpillCopy () const {
1036 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
1037 }
1038
1039 bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
1040 // The data node which is safe to leave in dead loop during IGVN optimization.
1041 bool is_dead_loop_safe() const;
1042
1043 // is_Copy() returns copied edge index (0 or 1)
1044 uint is_Copy() const { return (_flags & Flag_is_Copy); }
1045
1046 virtual bool is_CFG() const { return false; }
1047
1048 // If this node is control-dependent on a test, can it be
1049 // rerouted to a dominating equivalent test? This is usually
1050 // true of non-CFG nodes, but can be false for operations which
1051 // depend for their correct sequencing on more than one test.
1052 // (In that case, hoisting to a dominating test may silently
1053 // skip some other important test.)
1054 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
1055
1056 // When building basic blocks, I need to have a notion of block beginning
1057 // Nodes, next block selector Nodes (block enders), and next block
1058 // projections. These calls need to work on their machine equivalents. The
1059 // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
1060 bool is_block_start() const {
1061 if ( is_Region() )
1062 return this == (const Node*)in(0);
1063 else
1064 return is_Start();
1065 }
1066
1067 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
1068 // Goto and Return. This call also returns the block ending Node.
1069 virtual const Node *is_block_proj() const;
1070
1071 // The node is a "macro" node which needs to be expanded before matching
1072 bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
1073 // The node is expensive: the best control is set during loop opts
1074 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != nullptr; }
1075 // The node's original edge position is swapped.
1076 bool has_swapped_edges() const { return (_flags & Flag_has_swapped_edges) != 0; }
1077
1078 bool is_predicated_vector() const { return (_flags & Flag_is_predicated_vector) != 0; }
1079
1080 bool is_predicated_using_blend() const { return (_flags & Flag_is_predicated_using_blend) != 0; }
1081
1082 // Used in lcm to mark nodes that have scheduled
1083 bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; }
1084
1085 bool for_post_loop_opts_igvn() const { return (_flags & Flag_for_post_loop_opts_igvn) != 0; }
1086 bool for_merge_stores_igvn() const { return (_flags & Flag_for_merge_stores_igvn) != 0; }
1087
1088 // Is 'n' possibly a loop entry (i.e. a Parse Predicate projection)?
1089 static bool may_be_loop_entry(Node* n) {
1090 return n != nullptr && n->is_IfProj() && n->in(0)->is_ParsePredicate();
1091 }
1092
1093 //----------------- Optimization
1094
1095 // Get the worst-case Type output for this Node.
1096 virtual const class Type *bottom_type() const;
1097
1098 // If we find a better type for a node, try to record it permanently.
1099 // Return true if this node actually changed.
1100 // Be sure to do the hash_delete game in the "rehash" variant.
1101 void raise_bottom_type(const Type* new_type);
1102
1103 // Get the address type with which this node uses and/or defs memory,
1104 // or null if none. The address type is conservatively wide.
1105 // Returns non-null for calls, membars, loads, stores, etc.
1106 // Returns TypePtr::BOTTOM if the node touches memory "broadly".
1107 virtual const class TypePtr *adr_type() const { return nullptr; }
1108
1109 // Return an existing node which computes the same function as this node.
1110 // The optimistic combined algorithm requires this to return a Node which
1111 // is a small number of steps away (e.g., one of my inputs).
1112 virtual Node* Identity(PhaseGVN* phase);
1113
1114 // Return the set of values this Node can take on at runtime.
1115 virtual const Type* Value(PhaseGVN* phase) const;
1116
1117 // Return a node which is more "ideal" than the current node.
1118 // The invariants on this call are subtle. If in doubt, read the
1119 // treatise in node.cpp above the default implementation AND TEST WITH
1120 // -XX:VerifyIterativeGVN=1
1121 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1122
1123 // Some nodes have specific Ideal subgraph transformations only if they are
1124 // unique users of specific nodes. Such nodes should be put on IGVN worklist
1125 // for the transformations to happen.
1126 bool has_special_unique_user() const;
1127
1128 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
1129 Node* find_exact_control(Node* ctrl);
1130
1131 // Results of the dominance analysis.
1132 enum class DomResult {
1133 NotDominate, // 'this' node does not dominate 'sub'.
1134 Dominate, // 'this' node dominates or is equal to 'sub'.
1135 EncounteredDeadCode // Result is undefined due to encountering dead code.
1136 };
1137 // Check if 'this' node dominates or equal to 'sub'.
1138 DomResult dominates(Node* sub, Node_List &nlist);
1139
1140 bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
1141 public:
1142
1143 // See if there is valid pipeline info
1144 static const Pipeline *pipeline_class();
1145 virtual const Pipeline *pipeline() const;
1146
1147 // Compute the latency from the def to this instruction of the ith input node
1148 uint latency(uint i);
1149
1150 // Hash & compare functions, for pessimistic value numbering
1151
1152 // If the hash function returns the special sentinel value NO_HASH,
1153 // the node is guaranteed never to compare equal to any other node.
1154 // If we accidentally generate a hash with value NO_HASH the node
1155 // won't go into the table and we'll lose a little optimization.
1156 static const uint NO_HASH = 0;
1157 virtual uint hash() const;
1158 virtual bool cmp( const Node &n ) const;
1159
1160 // Operation appears to be iteratively computed (such as an induction variable)
1161 // It is possible for this operation to return false for a loop-varying
1162 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
1163 bool is_iteratively_computed();
1164
1165 // Determine if a node is a counted loop induction variable.
1166 // NOTE: The method is defined in "loopnode.cpp".
1167 bool is_cloop_ind_var() const;
1168
1169 // Return a node with opcode "opc" and same inputs as "this" if one can
1170 // be found; Otherwise return null;
1171 Node* find_similar(int opc);
1172
1173 // Return the unique control out if only one. Null if none or more than one.
1174 Node* unique_ctrl_out_or_null() const;
1175 // Return the unique control out. Asserts if none or more than one control out.
1176 Node* unique_ctrl_out() const;
1177
1178 // Set control or add control as precedence edge
1179 void ensure_control_or_add_prec(Node* c);
1180 void add_prec_from(Node* n);
1181
1182 // Visit boundary uses of the node and apply a callback function for each.
1183 // Recursively traverse uses, stopping and applying the callback when
1184 // reaching a boundary node, defined by is_boundary. Note: the function
1185 // definition appears after the complete type definition of Node_List.
1186 template <typename Callback, typename Check>
1187 void visit_uses(Callback callback, Check is_boundary) const;
1188
1189 // Returns a clone of the current node that's pinned (if the current node is not) for nodes found in array accesses
1190 // (Load and range check CastII nodes).
1191 // This is used when an array access is made dependent on 2 or more range checks (range check smearing or Loop Predication).
1192 virtual Node* pin_array_access_node() const {
1193 return nullptr;
1194 }
1195
1196 //----------------- Code Generation
1197
1198 // Ideal register class for Matching. Zero means unmatched instruction
1199 // (these are cloned instead of converted to machine nodes).
1200 virtual uint ideal_reg() const;
1201
1202 static const uint NotAMachineReg; // must be > max. machine register
1203
1204 // Do we Match on this edge index or not? Generally false for Control
1205 // and true for everything else. Weird for calls & returns.
1206 virtual uint match_edge(uint idx) const;
1207
1208 // Register class output is returned in
1209 virtual const RegMask &out_RegMask() const;
1210 // Register class input is expected in
1211 virtual const RegMask &in_RegMask(uint) const;
1212 // Should we clone rather than spill this instruction?
1213 bool rematerialize() const;
1214
1215 // Return JVM State Object if this Node carries debug info, or null otherwise
1216 virtual JVMState* jvms() const;
1217
1218 // Print as assembly
1219 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
1220 // Emit bytes using C2_MacroAssembler
1221 virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
1222 // Size of instruction in bytes
1223 virtual uint size(PhaseRegAlloc *ra_) const;
1224
1225 // Convenience function to extract an integer constant from a node.
1226 // If it is not an integer constant (either Con, CastII, or Mach),
1227 // return value_if_unknown.
1228 jint find_int_con(jint value_if_unknown) const {
1229 const TypeInt* t = find_int_type();
1230 return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
1231 }
1232 // Return the constant, knowing it is an integer constant already
1233 jint get_int() const {
1234 const TypeInt* t = find_int_type();
1235 guarantee(t != nullptr, "must be con");
1236 return t->get_con();
1237 }
1238 // Here's where the work is done. Can produce non-constant int types too.
1239 const TypeInt* find_int_type() const;
1240 const TypeInteger* find_integer_type(BasicType bt) const;
1241
1242 // Same thing for long (and intptr_t, via type.hpp):
1243 jlong get_long() const {
1244 const TypeLong* t = find_long_type();
1245 guarantee(t != nullptr, "must be con");
1246 return t->get_con();
1247 }
1248 jlong find_long_con(jint value_if_unknown) const {
1249 const TypeLong* t = find_long_type();
1250 return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
1251 }
1252 const TypeLong* find_long_type() const;
1253
1254 jlong get_integer_as_long(BasicType bt) const {
1255 const TypeInteger* t = find_integer_type(bt);
1256 guarantee(t != nullptr && t->is_con(), "must be con");
1257 return t->get_con_as_long(bt);
1258 }
1259 jlong find_integer_as_long(BasicType bt, jlong value_if_unknown) const {
1260 const TypeInteger* t = find_integer_type(bt);
1261 if (t == nullptr || !t->is_con()) return value_if_unknown;
1262 return t->get_con_as_long(bt);
1263 }
1264 const TypePtr* get_ptr_type() const;
1265
1266 // These guys are called by code generated by ADLC:
1267 intptr_t get_ptr() const;
1268 intptr_t get_narrowcon() const;
1269 jdouble getd() const;
1270 jfloat getf() const;
1271 jshort geth() const;
1272
1273 // Nodes which are pinned into basic blocks
1274 virtual bool pinned() const { return false; }
1275
1276 // Nodes which use memory without consuming it, hence need antidependences
1277 // More specifically, needs_anti_dependence_check returns true iff the node
1278 // (a) does a load, and (b) does not perform a store (except perhaps to a
1279 // stack slot or some other unaliased location).
1280 bool needs_anti_dependence_check() const;
1281
1282 // Return which operand this instruction may cisc-spill. In other words,
1283 // return operand position that can convert from reg to memory access
1284 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
1285 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
1286
1287 // Whether this is a memory-writing machine node.
1288 bool is_memory_writer() const { return is_Mach() && bottom_type()->has_memory(); }
1289
1290 // Whether this is a memory phi node
1291 bool is_memory_phi() const { return is_Phi() && bottom_type() == Type::MEMORY; }
1292
1293 bool is_div_or_mod(BasicType bt) const;
1294
1295 bool is_data_proj_of_pure_function(const Node* maybe_pure_function) const;
1296
1297 //----------------- Printing, etc
1298 #ifndef PRODUCT
1299 public:
1300 Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx.
1301 Node* find_ctrl(int idx); // Search control ancestors for the given idx.
1302 void dump_bfs(const int max_distance, Node* target, const char* options, outputStream* st, const frame* fr = nullptr) const;
1303 void dump_bfs(const int max_distance, Node* target, const char* options) const; // directly to tty
1304 void dump_bfs(const int max_distance) const; // dump_bfs(max_distance, nullptr, nullptr)
1305 void dump_bfs(const int max_distance, Node* target, const char* options, void* sp, void* fp, void* pc) const;
1306 class DumpConfig {
1307 public:
1308 // overridden to implement coloring of node idx
1309 virtual void pre_dump(outputStream *st, const Node* n) = 0;
1310 virtual void post_dump(outputStream *st) = 0;
1311 };
1312 void dump_idx(bool align = false, outputStream* st = tty, DumpConfig* dc = nullptr) const;
1313 void dump_name(outputStream* st = tty, DumpConfig* dc = nullptr) const;
1314 void dump() const; // print node with newline
1315 void dump(const char* suffix, bool mark = false, outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print this node.
1316 void dump(int depth) const; // Print this node, recursively to depth d
1317 void dump_ctrl(int depth) const; // Print control nodes, to depth d
1318 void dump_comp() const; // Print this node in compact representation.
1319 // Print this node in compact representation.
1320 void dump_comp(const char* suffix, outputStream *st = tty) const;
1321 private:
1322 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print required-edge info
1323 virtual void dump_prec(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print precedence-edge info
1324 virtual void dump_out(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print the output edge info
1325 public:
1326 virtual void dump_spec(outputStream *st) const {}; // Print per-node info
1327 // Print compact per-node info
1328 virtual void dump_compact_spec(outputStream *st) const { dump_spec(st); }
1329
1330 static void verify(int verify_depth, VectorSet& visited, Node_List& worklist);
1331
1332 // This call defines a class-unique string used to identify class instances
1333 virtual const char *Name() const;
1334
1335 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
1336 static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } // check if we are in a dump call
1337 #endif
1338 #ifdef ASSERT
1339 void verify_construction();
1340 bool verify_jvms(const JVMState* jvms) const;
1341
1342 Node* _debug_orig; // Original version of this, if any.
1343 Node* debug_orig() const { return _debug_orig; }
1344 void set_debug_orig(Node* orig); // _debug_orig = orig
1345 void dump_orig(outputStream *st, bool print_key = true) const;
1346
1347 uint64_t _debug_idx; // Unique value assigned to every node.
1348 uint64_t debug_idx() const { return _debug_idx; }
1349 void set_debug_idx(uint64_t debug_idx) { _debug_idx = debug_idx; }
1350
1351 int _hash_lock; // Barrier to modifications of nodes in the hash table
1352 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
1353 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
1354
1355 static void init_NodeProperty();
1356
1357 #if OPTO_DU_ITERATOR_ASSERT
1358 const Node* _last_del; // The last deleted node.
1359 uint _del_tick; // Bumped when a deletion happens..
1360 #endif
1361 #endif
1362 };
1363
1364 inline bool not_a_node(const Node* n) {
1365 if (n == nullptr) return true;
1366 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
1367 if (*(address*)n == badAddress) return true; // kill by Node::destruct
1368 return false;
1369 }
1370
1371 //-----------------------------------------------------------------------------
1372 // Iterators over DU info, and associated Node functions.
1373
1374 #if OPTO_DU_ITERATOR_ASSERT
1375
1376 // Common code for assertion checking on DU iterators.
1377 class DUIterator_Common {
1378 #ifdef ASSERT
1379 protected:
1380 bool _vdui; // cached value of VerifyDUIterators
1381 const Node* _node; // the node containing the _out array
1382 uint _outcnt; // cached node->_outcnt
1383 uint _del_tick; // cached node->_del_tick
1384 Node* _last; // last value produced by the iterator
1385
1386 void sample(const Node* node); // used by c'tor to set up for verifies
1387 void verify(const Node* node, bool at_end_ok = false);
1388 void verify_resync();
1389 void reset(const DUIterator_Common& that);
1390
1391 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
1392 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
1393 #else
1394 #define I_VDUI_ONLY(i,x) { }
1395 #endif //ASSERT
1396 };
1397
1398 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x)
1399
1400 // Default DU iterator. Allows appends onto the out array.
1401 // Allows deletion from the out array only at the current point.
1402 // Usage:
1403 // for (DUIterator i = x->outs(); x->has_out(i); i++) {
1404 // Node* y = x->out(i);
1405 // ...
1406 // }
1407 // Compiles in product mode to a unsigned integer index, which indexes
1408 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate
1409 // also reloads x->_outcnt. If you delete, you must perform "--i" just
1410 // before continuing the loop. You must delete only the last-produced
1411 // edge. You must delete only a single copy of the last-produced edge,
1412 // or else you must delete all copies at once (the first time the edge
1413 // is produced by the iterator).
1414 class DUIterator : public DUIterator_Common {
1415 friend class Node;
1416
1417 // This is the index which provides the product-mode behavior.
1418 // Whatever the product-mode version of the system does to the
1419 // DUI index is done to this index. All other fields in
1420 // this class are used only for assertion checking.
1421 uint _idx;
1422
1423 #ifdef ASSERT
1424 uint _refresh_tick; // Records the refresh activity.
1425
1426 void sample(const Node* node); // Initialize _refresh_tick etc.
1427 void verify(const Node* node, bool at_end_ok = false);
1428 void verify_increment(); // Verify an increment operation.
1429 void verify_resync(); // Verify that we can back up over a deletion.
1430 void verify_finish(); // Verify that the loop terminated properly.
1431 void refresh(); // Resample verification info.
1432 void reset(const DUIterator& that); // Resample after assignment.
1433 #endif
1434
1435 DUIterator(const Node* node, int dummy_to_avoid_conversion)
1436 { _idx = 0; DEBUG_ONLY(sample(node)); }
1437
1438 public:
1439 // initialize to garbage; clear _vdui to disable asserts
1440 DUIterator()
1441 { /*initialize to garbage*/ DEBUG_ONLY(_vdui = false); }
1442
1443 DUIterator(const DUIterator& that)
1444 { _idx = that._idx; DEBUG_ONLY(_vdui = false; reset(that)); }
1445
1446 void operator++(int dummy_to_specify_postfix_op)
1447 { _idx++; VDUI_ONLY(verify_increment()); }
1448
1449 void operator--()
1450 { VDUI_ONLY(verify_resync()); --_idx; }
1451
1452 ~DUIterator()
1453 { VDUI_ONLY(verify_finish()); }
1454
1455 void operator=(const DUIterator& that)
1456 { _idx = that._idx; DEBUG_ONLY(reset(that)); }
1457 };
1458
1459 DUIterator Node::outs() const
1460 { return DUIterator(this, 0); }
1461 DUIterator& Node::refresh_out_pos(DUIterator& i) const
1462 { I_VDUI_ONLY(i, i.refresh()); return i; }
1463 bool Node::has_out(DUIterator& i) const
1464 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
1465 Node* Node::out(DUIterator& i) const
1466 { I_VDUI_ONLY(i, i.verify(this)); return DEBUG_ONLY(i._last=) _out[i._idx]; }
1467
1468
1469 // Faster DU iterator. Disallows insertions into the out array.
1470 // Allows deletion from the out array only at the current point.
1471 // Usage:
1472 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
1473 // Node* y = x->fast_out(i);
1474 // ...
1475 // }
1476 // Compiles in product mode to raw Node** pointer arithmetic, with
1477 // no reloading of pointers from the original node x. If you delete,
1478 // you must perform "--i; --imax" just before continuing the loop.
1479 // If you delete multiple copies of the same edge, you must decrement
1480 // imax, but not i, multiple times: "--i, imax -= num_edges".
1481 class DUIterator_Fast : public DUIterator_Common {
1482 friend class Node;
1483 friend class DUIterator_Last;
1484
1485 // This is the pointer which provides the product-mode behavior.
1486 // Whatever the product-mode version of the system does to the
1487 // DUI pointer is done to this pointer. All other fields in
1488 // this class are used only for assertion checking.
1489 Node** _outp;
1490
1491 #ifdef ASSERT
1492 void verify(const Node* node, bool at_end_ok = false);
1493 void verify_limit();
1494 void verify_resync();
1495 void verify_relimit(uint n);
1496 void reset(const DUIterator_Fast& that);
1497 #endif
1498
1499 // Note: offset must be signed, since -1 is sometimes passed
1500 DUIterator_Fast(const Node* node, ptrdiff_t offset)
1501 { _outp = node->_out + offset; DEBUG_ONLY(sample(node)); }
1502
1503 public:
1504 // initialize to garbage; clear _vdui to disable asserts
1505 DUIterator_Fast()
1506 { /*initialize to garbage*/ DEBUG_ONLY(_vdui = false); }
1507
1508 DUIterator_Fast(const DUIterator_Fast& that)
1509 { _outp = that._outp; DEBUG_ONLY(_vdui = false; reset(that)); }
1510
1511 void operator++(int dummy_to_specify_postfix_op)
1512 { _outp++; VDUI_ONLY(verify(_node, true)); }
1513
1514 void operator--()
1515 { VDUI_ONLY(verify_resync()); --_outp; }
1516
1517 void operator-=(uint n) // applied to the limit only
1518 { _outp -= n; VDUI_ONLY(verify_relimit(n)); }
1519
1520 bool operator<(DUIterator_Fast& limit) {
1521 I_VDUI_ONLY(*this, this->verify(_node, true));
1522 I_VDUI_ONLY(limit, limit.verify_limit());
1523 return _outp < limit._outp;
1524 }
1525
1526 void operator=(const DUIterator_Fast& that)
1527 { _outp = that._outp; DEBUG_ONLY(reset(that)); }
1528 };
1529
1530 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
1531 // Assign a limit pointer to the reference argument:
1532 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
1533 // Return the base pointer:
1534 return DUIterator_Fast(this, 0);
1535 }
1536 Node* Node::fast_out(DUIterator_Fast& i) const {
1537 I_VDUI_ONLY(i, i.verify(this));
1538 return DEBUG_ONLY(i._last=) *i._outp;
1539 }
1540
1541
1542 // Faster DU iterator. Requires each successive edge to be removed.
1543 // Does not allow insertion of any edges.
1544 // Usage:
1545 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
1546 // Node* y = x->last_out(i);
1547 // ...
1548 // }
1549 // Compiles in product mode to raw Node** pointer arithmetic, with
1550 // no reloading of pointers from the original node x.
1551 class DUIterator_Last : private DUIterator_Fast {
1552 friend class Node;
1553
1554 #ifdef ASSERT
1555 void verify(const Node* node, bool at_end_ok = false);
1556 void verify_limit();
1557 void verify_step(uint num_edges);
1558 #endif
1559
1560 // Note: offset must be signed, since -1 is sometimes passed
1561 DUIterator_Last(const Node* node, ptrdiff_t offset)
1562 : DUIterator_Fast(node, offset) { }
1563
1564 void operator++(int dummy_to_specify_postfix_op) {} // do not use
1565 void operator<(int) {} // do not use
1566
1567 public:
1568 DUIterator_Last() { }
1569 // initialize to garbage
1570
1571 DUIterator_Last(const DUIterator_Last& that) = default;
1572
1573 void operator--()
1574 { _outp--; VDUI_ONLY(verify_step(1)); }
1575
1576 void operator-=(uint n)
1577 { _outp -= n; VDUI_ONLY(verify_step(n)); }
1578
1579 bool operator>=(DUIterator_Last& limit) {
1580 I_VDUI_ONLY(*this, this->verify(_node, true));
1581 I_VDUI_ONLY(limit, limit.verify_limit());
1582 return _outp >= limit._outp;
1583 }
1584
1585 DUIterator_Last& operator=(const DUIterator_Last& that) = default;
1586 };
1587
1588 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
1589 // Assign a limit pointer to the reference argument:
1590 imin = DUIterator_Last(this, 0);
1591 // Return the initial pointer:
1592 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
1593 }
1594 Node* Node::last_out(DUIterator_Last& i) const {
1595 I_VDUI_ONLY(i, i.verify(this));
1596 return DEBUG_ONLY(i._last=) *i._outp;
1597 }
1598
1599 #endif //OPTO_DU_ITERATOR_ASSERT
1600
1601 #undef I_VDUI_ONLY
1602 #undef VDUI_ONLY
1603
1604 // An Iterator that truly follows the iterator pattern. Doesn't
1605 // support deletion but could be made to.
1606 //
1607 // for (SimpleDUIterator i(n); i.has_next(); i.next()) {
1608 // Node* m = i.get();
1609 //
1610 class SimpleDUIterator : public StackObj {
1611 private:
1612 Node* node;
1613 DUIterator_Fast imax;
1614 DUIterator_Fast i;
1615 public:
1616 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
1617 bool has_next() { return i < imax; }
1618 void next() { i++; }
1619 Node* get() { return node->fast_out(i); }
1620 };
1621
1622
1623 //-----------------------------------------------------------------------------
1624 // Map dense integer indices to Nodes. Uses classic doubling-array trick.
1625 // Abstractly provides an infinite array of Node*'s, initialized to null.
1626 // Note that the constructor just zeros things, and since I use Arena
1627 // allocation I do not need a destructor to reclaim storage.
1628 class Node_Array : public AnyObj {
1629 protected:
1630 Arena* _a; // Arena to allocate in
1631 uint _max;
1632 Node** _nodes;
1633 ReallocMark _nesting; // Safety checks for arena reallocation
1634
1635 // Grow array to required capacity
1636 void maybe_grow(uint i) {
1637 _nesting.check(_a); // Check if a potential reallocation in the arena is safe
1638 if (i >= _max) {
1639 grow(i);
1640 }
1641 }
1642 void grow(uint i);
1643
1644 public:
1645 Node_Array(Arena* a, uint max = OptoNodeListSize) : _a(a), _max(max) {
1646 _nodes = NEW_ARENA_ARRAY(a, Node*, max);
1647 clear();
1648 }
1649 Node_Array() : Node_Array(Thread::current()->resource_area()) {}
1650
1651 NONCOPYABLE(Node_Array);
1652 Node_Array& operator=(Node_Array&&) = delete;
1653 // Allow move constructor for && (eg. capture return of function)
1654 Node_Array(Node_Array&&) = default;
1655
1656 Node *operator[] ( uint i ) const // Lookup, or null for not mapped
1657 { return (i<_max) ? _nodes[i] : (Node*)nullptr; }
1658 Node* at(uint i) const { assert(i<_max,"oob"); return _nodes[i]; }
1659 Node** adr() { return _nodes; }
1660 // Extend the mapping: index i maps to Node *n.
1661 void map( uint i, Node *n ) { maybe_grow(i); _nodes[i] = n; }
1662 void insert( uint i, Node *n );
1663 void remove( uint i ); // Remove, preserving order
1664 // Clear all entries in _nodes to null but keep storage
1665 void clear() {
1666 Copy::zero_to_bytes(_nodes, _max * sizeof(Node*));
1667 }
1668
1669 uint max() const { return _max; }
1670 void dump() const;
1671 };
1672
1673 class Node_List : public Node_Array {
1674 uint _cnt;
1675 public:
1676 Node_List(uint max = OptoNodeListSize) : Node_Array(Thread::current()->resource_area(), max), _cnt(0) {}
1677 Node_List(Arena *a, uint max = OptoNodeListSize) : Node_Array(a, max), _cnt(0) {}
1678
1679 NONCOPYABLE(Node_List);
1680 Node_List& operator=(Node_List&&) = delete;
1681 // Allow move constructor for && (eg. capture return of function)
1682 Node_List(Node_List&&) = default;
1683
1684 bool contains(const Node* n) const {
1685 for (uint e = 0; e < size(); e++) {
1686 if (at(e) == n) return true;
1687 }
1688 return false;
1689 }
1690 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
1691 void remove( uint i ) { Node_Array::remove(i); _cnt--; }
1692 void push( Node *b ) { map(_cnt++,b); }
1693 void yank( Node *n ); // Find and remove
1694 Node *pop() { return _nodes[--_cnt]; }
1695 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
1696 void copy(const Node_List& from) {
1697 if (from._max > _max) {
1698 grow(from._max);
1699 }
1700 _cnt = from._cnt;
1701 Copy::conjoint_words_to_higher((HeapWord*)&from._nodes[0], (HeapWord*)&_nodes[0], from._max * sizeof(Node*));
1702 }
1703
1704 uint size() const { return _cnt; }
1705 void dump() const;
1706 void dump_simple() const;
1707 };
1708
1709 // Definition must appear after complete type definition of Node_List
1710 template <typename Callback, typename Check>
1711 void Node::visit_uses(Callback callback, Check is_boundary) const {
1712 ResourceMark rm;
1713 VectorSet visited;
1714 Node_List worklist;
1715
1716 // The initial worklist consists of the direct uses
1717 for (DUIterator_Fast kmax, k = fast_outs(kmax); k < kmax; k++) {
1718 Node* out = fast_out(k);
1719 if (!visited.test_set(out->_idx)) { worklist.push(out); }
1720 }
1721
1722 while (worklist.size() > 0) {
1723 Node* use = worklist.pop();
1724 // Apply callback on boundary nodes
1725 if (is_boundary(use)) {
1726 callback(use);
1727 } else {
1728 // Not a boundary node, continue search
1729 for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) {
1730 Node* out = use->fast_out(k);
1731 if (!visited.test_set(out->_idx)) { worklist.push(out); }
1732 }
1733 }
1734 }
1735 }
1736
1737
1738 //------------------------------Unique_Node_List-------------------------------
1739 class Unique_Node_List : public Node_List {
1740 VectorSet _in_worklist;
1741 uint _clock_index; // Index in list where to pop from next
1742 public:
1743 Unique_Node_List() : Node_List(), _clock_index(0) {}
1744 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
1745
1746 NONCOPYABLE(Unique_Node_List);
1747 Unique_Node_List& operator=(Unique_Node_List&&) = delete;
1748 // Allow move constructor for && (eg. capture return of function)
1749 Unique_Node_List(Unique_Node_List&&) = default;
1750
1751 void remove( Node *n );
1752 bool member(const Node* n) const { return _in_worklist.test(n->_idx) != 0; }
1753 VectorSet& member_set(){ return _in_worklist; }
1754
1755 void push(Node* b) {
1756 if( !_in_worklist.test_set(b->_idx) )
1757 Node_List::push(b);
1758 }
1759 void push_non_cfg_inputs_of(const Node* node) {
1760 for (uint i = 1; i < node->req(); i++) {
1761 Node* input = node->in(i);
1762 if (input != nullptr && !input->is_CFG()) {
1763 push(input);
1764 }
1765 }
1766 }
1767
1768 void push_outputs_of(const Node* node) {
1769 for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) {
1770 Node* output = node->fast_out(i);
1771 push(output);
1772 }
1773 }
1774
1775 Node *pop() {
1776 if( _clock_index >= size() ) _clock_index = 0;
1777 Node *b = at(_clock_index);
1778 map( _clock_index, Node_List::pop());
1779 if (size() != 0) _clock_index++; // Always start from 0
1780 _in_worklist.remove(b->_idx);
1781 return b;
1782 }
1783 Node *remove(uint i) {
1784 Node *b = Node_List::at(i);
1785 _in_worklist.remove(b->_idx);
1786 map(i,Node_List::pop());
1787 return b;
1788 }
1789 void yank(Node *n) {
1790 _in_worklist.remove(n->_idx);
1791 Node_List::yank(n);
1792 }
1793 void clear() {
1794 _in_worklist.clear(); // Discards storage but grows automatically
1795 Node_List::clear();
1796 _clock_index = 0;
1797 }
1798 void ensure_empty() {
1799 assert(size() == 0, "must be empty");
1800 clear(); // just in case
1801 }
1802
1803 // Used after parsing to remove useless nodes before Iterative GVN
1804 void remove_useless_nodes(VectorSet& useful);
1805
1806 // If the idx of the Nodes change, we must recompute the VectorSet
1807 void recompute_idx_set() {
1808 _in_worklist.clear();
1809 for (uint i = 0; i < size(); i++) {
1810 Node* n = at(i);
1811 _in_worklist.set(n->_idx);
1812 }
1813 }
1814
1815 #ifdef ASSERT
1816 bool is_subset_of(Unique_Node_List& other) {
1817 for (uint i = 0; i < size(); i++) {
1818 Node* n = at(i);
1819 if (!other.member(n)) {
1820 return false;
1821 }
1822 }
1823 return true;
1824 }
1825 #endif
1826
1827 bool contains(const Node* n) const {
1828 fatal("use faster member() instead");
1829 return false;
1830 }
1831
1832 #ifndef PRODUCT
1833 void print_set() const { _in_worklist.print(); }
1834 #endif
1835 };
1836
1837 // Unique_Mixed_Node_List
1838 // unique: nodes are added only once
1839 // mixed: allow new and old nodes
1840 class Unique_Mixed_Node_List : public ResourceObj {
1841 public:
1842 Unique_Mixed_Node_List() : _visited_set(cmpkey, hashkey) {}
1843
1844 void add(Node* node) {
1845 if (not_a_node(node)) {
1846 return; // Gracefully handle null, -1, 0xabababab, etc.
1847 }
1848 if (_visited_set[node] == nullptr) {
1849 _visited_set.Insert(node, node);
1850 _worklist.push(node);
1851 }
1852 }
1853
1854 Node* operator[] (uint i) const {
1855 return _worklist[i];
1856 }
1857
1858 size_t size() {
1859 return _worklist.size();
1860 }
1861
1862 private:
1863 Dict _visited_set;
1864 Node_List _worklist;
1865 };
1866
1867 // Inline definition of Compile::record_for_igvn must be deferred to this point.
1868 inline void Compile::record_for_igvn(Node* n) {
1869 _igvn_worklist->push(n);
1870 }
1871
1872 // Inline definition of Compile::remove_for_igvn must be deferred to this point.
1873 inline void Compile::remove_for_igvn(Node* n) {
1874 _igvn_worklist->remove(n);
1875 }
1876
1877 //------------------------------Node_Stack-------------------------------------
1878 class Node_Stack {
1879 protected:
1880 struct INode {
1881 Node *node; // Processed node
1882 uint indx; // Index of next node's child
1883 };
1884 INode *_inode_top; // tos, stack grows up
1885 INode *_inode_max; // End of _inodes == _inodes + _max
1886 INode *_inodes; // Array storage for the stack
1887 Arena *_a; // Arena to allocate in
1888 ReallocMark _nesting; // Safety checks for arena reallocation
1889
1890 void maybe_grow() {
1891 _nesting.check(_a); // Check if a potential reallocation in the arena is safe
1892 if (_inode_top >= _inode_max) {
1893 grow();
1894 }
1895 }
1896 void grow();
1897
1898 public:
1899 Node_Stack(int size) {
1900 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1901 _a = Thread::current()->resource_area();
1902 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1903 _inode_max = _inodes + max;
1904 _inode_top = _inodes - 1; // stack is empty
1905 }
1906
1907 Node_Stack(Arena *a, int size) : _a(a) {
1908 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1909 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1910 _inode_max = _inodes + max;
1911 _inode_top = _inodes - 1; // stack is empty
1912 }
1913
1914 void pop() {
1915 assert(_inode_top >= _inodes, "node stack underflow");
1916 --_inode_top;
1917 }
1918 void push(Node *n, uint i) {
1919 ++_inode_top;
1920 maybe_grow();
1921 INode *top = _inode_top; // optimization
1922 top->node = n;
1923 top->indx = i;
1924 }
1925 Node *node() const {
1926 return _inode_top->node;
1927 }
1928 Node* node_at(uint i) const {
1929 assert(_inodes + i <= _inode_top, "in range");
1930 return _inodes[i].node;
1931 }
1932 uint index() const {
1933 return _inode_top->indx;
1934 }
1935 uint index_at(uint i) const {
1936 assert(_inodes + i <= _inode_top, "in range");
1937 return _inodes[i].indx;
1938 }
1939 void set_node(Node *n) {
1940 _inode_top->node = n;
1941 }
1942 void set_index(uint i) {
1943 _inode_top->indx = i;
1944 }
1945 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
1946 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size
1947 bool is_nonempty() const { return (_inode_top >= _inodes); }
1948 bool is_empty() const { return (_inode_top < _inodes); }
1949 void clear() { _inode_top = _inodes - 1; } // retain storage
1950
1951 // Node_Stack is used to map nodes.
1952 Node* find(uint idx) const;
1953
1954 NONCOPYABLE(Node_Stack);
1955 };
1956
1957
1958 //-----------------------------Node_Notes--------------------------------------
1959 // Debugging or profiling annotations loosely and sparsely associated
1960 // with some nodes. See Compile::node_notes_at for the accessor.
1961 class Node_Notes {
1962 JVMState* _jvms;
1963
1964 public:
1965 Node_Notes(JVMState* jvms = nullptr) {
1966 _jvms = jvms;
1967 }
1968
1969 JVMState* jvms() { return _jvms; }
1970 void set_jvms(JVMState* x) { _jvms = x; }
1971
1972 // True if there is nothing here.
1973 bool is_clear() {
1974 return (_jvms == nullptr);
1975 }
1976
1977 // Make there be nothing here.
1978 void clear() {
1979 _jvms = nullptr;
1980 }
1981
1982 // Make a new, clean node notes.
1983 static Node_Notes* make(Compile* C) {
1984 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1985 nn->clear();
1986 return nn;
1987 }
1988
1989 Node_Notes* clone(Compile* C) {
1990 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1991 (*nn) = (*this);
1992 return nn;
1993 }
1994
1995 // Absorb any information from source.
1996 bool update_from(Node_Notes* source) {
1997 bool changed = false;
1998 if (source != nullptr) {
1999 if (source->jvms() != nullptr) {
2000 set_jvms(source->jvms());
2001 changed = true;
2002 }
2003 }
2004 return changed;
2005 }
2006 };
2007
2008 // Inlined accessors for Compile::node_nodes that require the preceding class:
2009 inline Node_Notes*
2010 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
2011 int idx, bool can_grow) {
2012 assert(idx >= 0, "oob");
2013 int block_idx = (idx >> _log2_node_notes_block_size);
2014 int grow_by = (block_idx - (arr == nullptr? 0: arr->length()));
2015 if (grow_by >= 0) {
2016 if (!can_grow) return nullptr;
2017 grow_node_notes(arr, grow_by + 1);
2018 }
2019 if (arr == nullptr) return nullptr;
2020 // (Every element of arr is a sub-array of length _node_notes_block_size.)
2021 return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
2022 }
2023
2024 inline Node_Notes* Compile::node_notes_at(int idx) {
2025 return locate_node_notes(_node_note_array, idx, false);
2026 }
2027
2028 inline bool
2029 Compile::set_node_notes_at(int idx, Node_Notes* value) {
2030 if (value == nullptr || value->is_clear())
2031 return false; // nothing to write => write nothing
2032 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
2033 assert(loc != nullptr, "");
2034 return loc->update_from(value);
2035 }
2036
2037
2038 //------------------------------TypeNode---------------------------------------
2039 // Node with a Type constant.
2040 class TypeNode : public Node {
2041 protected:
2042 virtual uint hash() const; // Check the type
2043 virtual bool cmp( const Node &n ) const;
2044 virtual uint size_of() const; // Size is bigger
2045 const Type* const _type;
2046 public:
2047 void set_type(const Type* t) {
2048 assert(t != nullptr, "sanity");
2049 DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
2050 *(const Type**)&_type = t; // cast away const-ness
2051 // If this node is in the hash table, make sure it doesn't need a rehash.
2052 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
2053 }
2054 const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
2055 TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
2056 init_class_id(Class_Type);
2057 }
2058 virtual const Type* Value(PhaseGVN* phase) const;
2059 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
2060 virtual const Type *bottom_type() const;
2061 virtual uint ideal_reg() const;
2062
2063 void make_path_dead(PhaseIterGVN* igvn, PhaseIdealLoop* loop, Node* ctrl_use, uint j, const char* phase_str);
2064 #ifndef PRODUCT
2065 virtual void dump_spec(outputStream *st) const;
2066 virtual void dump_compact_spec(outputStream *st) const;
2067 #endif
2068 void make_paths_from_here_dead(PhaseIterGVN* igvn, PhaseIdealLoop* loop, const char* phase_str);
2069 void create_halt_path(PhaseIterGVN* igvn, Node* c, PhaseIdealLoop* loop, const char* phase_str) const;
2070 };
2071
2072 #include "opto/opcodes.hpp"
2073
2074 #define Op_IL(op) \
2075 inline int Op_ ## op(BasicType bt) { \
2076 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); \
2077 if (bt == T_INT) { \
2078 return Op_## op ## I; \
2079 } \
2080 return Op_## op ## L; \
2081 }
2082
2083 Op_IL(Add)
2084 Op_IL(And)
2085 Op_IL(Sub)
2086 Op_IL(Mul)
2087 Op_IL(URShift)
2088 Op_IL(LShift)
2089 Op_IL(Xor)
2090 Op_IL(Cmp)
2091 Op_IL(Div)
2092 Op_IL(Mod)
2093 Op_IL(UDiv)
2094 Op_IL(UMod)
2095
2096 inline int Op_ConIL(BasicType bt) {
2097 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2098 if (bt == T_INT) {
2099 return Op_ConI;
2100 }
2101 return Op_ConL;
2102 }
2103
2104 inline int Op_Cmp_unsigned(BasicType bt) {
2105 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2106 if (bt == T_INT) {
2107 return Op_CmpU;
2108 }
2109 return Op_CmpUL;
2110 }
2111
2112 inline int Op_Cast(BasicType bt) {
2113 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2114 if (bt == T_INT) {
2115 return Op_CastII;
2116 }
2117 return Op_CastLL;
2118 }
2119
2120 inline int Op_DivIL(BasicType bt, bool is_unsigned) {
2121 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2122 if (bt == T_INT) {
2123 if (is_unsigned) {
2124 return Op_UDivI;
2125 } else {
2126 return Op_DivI;
2127 }
2128 }
2129 if (is_unsigned) {
2130 return Op_UDivL;
2131 } else {
2132 return Op_DivL;
2133 }
2134 }
2135
2136 inline int Op_DivModIL(BasicType bt, bool is_unsigned) {
2137 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2138 if (bt == T_INT) {
2139 if (is_unsigned) {
2140 return Op_UDivModI;
2141 } else {
2142 return Op_DivModI;
2143 }
2144 }
2145 if (is_unsigned) {
2146 return Op_UDivModL;
2147 } else {
2148 return Op_DivModL;
2149 }
2150 }
2151
2152 // Interface to define actions that should be taken when running DataNodeBFS. Each use can extend this class to specify
2153 // a customized BFS.
2154 class BFSActions : public StackObj {
2155 public:
2156 // Should a node's inputs further be visited in the BFS traversal? By default, we visit all data inputs. Override this
2157 // method to provide a custom filter.
2158 virtual bool should_visit(Node* node) const {
2159 // By default, visit all inputs.
2160 return true;
2161 };
2162
2163 // Is the visited node a target node that we are looking for in the BFS traversal? We do not visit its inputs further
2164 // but the BFS will continue to visit all unvisited nodes in the queue.
2165 virtual bool is_target_node(Node* node) const = 0;
2166
2167 // Defines an action that should be taken when we visit a target node in the BFS traversal.
2168 virtual void target_node_action(Node* target_node) = 0;
2169 };
2170
2171 // Class to perform a BFS traversal on the data nodes from a given start node. The provided BFSActions guide which
2172 // data node's inputs should be further visited, which data nodes are target nodes and what to do with the target nodes.
2173 class DataNodeBFS : public StackObj {
2174 BFSActions& _bfs_actions;
2175
2176 public:
2177 explicit DataNodeBFS(BFSActions& bfs_action) : _bfs_actions(bfs_action) {}
2178
2179 // Run the BFS starting from 'start_node' and apply the actions provided to this class.
2180 void run(Node* start_node) {
2181 ResourceMark rm;
2182 Unique_Node_List _nodes_to_visit;
2183 _nodes_to_visit.push(start_node);
2184 for (uint i = 0; i < _nodes_to_visit.size(); i++) {
2185 Node* next = _nodes_to_visit[i];
2186 for (uint j = 1; j < next->req(); j++) {
2187 Node* input = next->in(j);
2188 if (_bfs_actions.is_target_node(input)) {
2189 assert(_bfs_actions.should_visit(input), "must also pass node filter");
2190 _bfs_actions.target_node_action(input);
2191 } else if (_bfs_actions.should_visit(input)) {
2192 _nodes_to_visit.push(input);
2193 }
2194 }
2195 }
2196 }
2197 };
2198
2199 #endif // SHARE_OPTO_NODE_HPP