1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2024, 2025, Alibaba Group Holding Limited. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_OPTO_NODE_HPP
27 #define SHARE_OPTO_NODE_HPP
28
29 #include "libadt/vectset.hpp"
30 #include "opto/compile.hpp"
31 #include "opto/type.hpp"
32 #include "utilities/copy.hpp"
33
34 // Portions of code courtesy of Clifford Click
35
36 // Optimization - Graph Style
37
38
39 class AbstractLockNode;
40 class AddNode;
41 class AddPNode;
42 class AliasInfo;
43 class AllocateArrayNode;
44 class AllocateNode;
45 class ArrayCopyNode;
46 class BaseCountedLoopNode;
47 class BaseCountedLoopEndNode;
48 class BlackholeNode;
49 class Block;
50 class BoolNode;
51 class BoxLockNode;
52 class CMoveNode;
53 class CallDynamicJavaNode;
54 class CallJavaNode;
55 class CallLeafNode;
56 class CallLeafNoFPNode;
57 class CallLeafPureNode;
58 class CallNode;
59 class CallRuntimeNode;
60 class CallStaticJavaNode;
61 class CastFFNode;
62 class CastHHNode;
63 class CastDDNode;
64 class CastVVNode;
65 class CastIINode;
66 class CastLLNode;
67 class CastPPNode;
68 class CatchNode;
69 class CatchProjNode;
70 class CheckCastPPNode;
71 class ClearArrayNode;
72 class CmpNode;
73 class CodeBuffer;
74 class ConstraintCastNode;
75 class ConNode;
76 class ConINode;
77 class ConvertNode;
78 class CompareAndSwapNode;
79 class CompareAndExchangeNode;
80 class CountedLoopNode;
81 class CountedLoopEndNode;
82 class DecodeNarrowPtrNode;
83 class DecodeNNode;
84 class DecodeNKlassNode;
85 class EncodeNarrowPtrNode;
86 class EncodePNode;
87 class EncodePKlassNode;
88 class FastLockNode;
89 class FastUnlockNode;
90 class HaltNode;
91 class IfNode;
92 class IfProjNode;
93 class IfFalseNode;
94 class IfTrueNode;
95 class InitializeNode;
96 class JVMState;
97 class JumpNode;
98 class JumpProjNode;
99 class LoadNode;
100 class LoadStoreNode;
101 class LoadStoreConditionalNode;
102 class LockNode;
103 class LongCountedLoopNode;
104 class LongCountedLoopEndNode;
105 class LoopNode;
106 class LShiftNode;
107 class MachBranchNode;
108 class MachCallDynamicJavaNode;
109 class MachCallJavaNode;
110 class MachCallLeafNode;
111 class MachCallNode;
112 class MachCallRuntimeNode;
113 class MachCallStaticJavaNode;
114 class MachConstantBaseNode;
115 class MachConstantNode;
116 class MachGotoNode;
117 class MachIfNode;
118 class MachJumpNode;
119 class MachNode;
120 class MachNullCheckNode;
121 class MachProjNode;
122 class MachReturnNode;
123 class MachSafePointNode;
124 class MachSpillCopyNode;
125 class MachTempNode;
126 class MachMergeNode;
127 class MachMemBarNode;
128 class Matcher;
129 class MemBarNode;
130 class MemBarStoreStoreNode;
131 class MemNode;
132 class MergeMemNode;
133 class MinMaxNode;
134 class MoveNode;
135 class MulNode;
136 class MultiNode;
137 class MultiBranchNode;
138 class NarrowMemProjNode;
139 class NegNode;
140 class NegVNode;
141 class NeverBranchNode;
142 class Opaque1Node;
143 class OpaqueLoopInitNode;
144 class OpaqueLoopStrideNode;
145 class OpaqueMultiversioningNode;
146 class OpaqueConstantBoolNode;
147 class OpaqueInitializedAssertionPredicateNode;
148 class OpaqueTemplateAssertionPredicateNode;
149 class OuterStripMinedLoopNode;
150 class OuterStripMinedLoopEndNode;
151 class Node;
152 class Node_Array;
153 class Node_List;
154 class Node_Stack;
155 class OopMap;
156 class ParmNode;
157 class ParsePredicateNode;
158 class PCTableNode;
159 class PhaseCCP;
160 class PhaseGVN;
161 class PhaseIdealLoop;
162 class PhaseIterGVN;
163 class PhaseRegAlloc;
164 class PhaseTransform;
165 class PhaseValues;
166 class PhiNode;
167 class Pipeline;
168 class PopulateIndexNode;
169 class ProjNode;
170 class RangeCheckNode;
171 class ReductionNode;
172 class RegMask;
173 class RegionNode;
174 class RootNode;
175 class SafePointNode;
176 class SafePointScalarObjectNode;
177 class SafePointScalarMergeNode;
178 class SaturatingVectorNode;
179 class StartNode;
180 class State;
181 class StoreNode;
182 class SubNode;
183 class SubTypeCheckNode;
184 class Type;
185 class TypeNode;
186 class UnlockNode;
187 class VectorNode;
188 class LoadVectorNode;
189 class LoadVectorMaskedNode;
190 class StoreVectorMaskedNode;
191 class LoadVectorGatherNode;
192 class LoadVectorGatherMaskedNode;
193 class StoreVectorNode;
194 class StoreVectorScatterNode;
195 class StoreVectorScatterMaskedNode;
196 class VerifyVectorAlignmentNode;
197 class VectorMaskCmpNode;
198 class VectorUnboxNode;
199 class VectorSet;
200 class VectorReinterpretNode;
201 class ShiftVNode;
202 class MulVLNode;
203 class ExpandVNode;
204 class CompressVNode;
205 class CompressMNode;
206 class C2_MacroAssembler;
207
208
209 #ifndef OPTO_DU_ITERATOR_ASSERT
210 #ifdef ASSERT
211 #define OPTO_DU_ITERATOR_ASSERT 1
212 #else
213 #define OPTO_DU_ITERATOR_ASSERT 0
214 #endif
215 #endif //OPTO_DU_ITERATOR_ASSERT
216
217 #if OPTO_DU_ITERATOR_ASSERT
218 class DUIterator;
219 class DUIterator_Fast;
220 class DUIterator_Last;
221 #else
222 typedef uint DUIterator;
223 typedef Node** DUIterator_Fast;
224 typedef Node** DUIterator_Last;
225 #endif
226
227 typedef ResizeableHashTable<Node*, Node*, AnyObj::RESOURCE_AREA, mtCompiler> OrigToNewHashtable;
228
229 // Node Sentinel
230 #define NodeSentinel (Node*)-1
231
232 // Unknown count frequency
233 #define COUNT_UNKNOWN (-1.0f)
234
235 //------------------------------Node-------------------------------------------
236 // Nodes define actions in the program. They create values, which have types.
237 // They are both vertices in a directed graph and program primitives. Nodes
238 // are labeled; the label is the "opcode", the primitive function in the lambda
239 // calculus sense that gives meaning to the Node. Node inputs are ordered (so
240 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to
241 // the Node's function. These inputs also define a Type equation for the Node.
242 // Solving these Type equations amounts to doing dataflow analysis.
243 // Control and data are uniformly represented in the graph. Finally, Nodes
244 // have a unique dense integer index which is used to index into side arrays
245 // whenever I have phase-specific information.
246
247 class Node {
248
249 // Lots of restrictions on cloning Nodes
250 NONCOPYABLE(Node);
251
252 public:
253 friend class Compile;
254 #if OPTO_DU_ITERATOR_ASSERT
255 friend class DUIterator_Common;
256 friend class DUIterator;
257 friend class DUIterator_Fast;
258 friend class DUIterator_Last;
259 #endif
260
261 // Because Nodes come and go, I define an Arena of Node structures to pull
262 // from. This should allow fast access to node creation & deletion. This
263 // field is a local cache of a value defined in some "program fragment" for
264 // which these Nodes are just a part of.
265
266 inline void* operator new(size_t x) throw() {
267 Compile* C = Compile::current();
268 Node* n = (Node*)C->node_arena()->AmallocWords(x);
269 return (void*)n;
270 }
271
272 // Delete is a NOP
273 void operator delete( void *ptr ) {}
274 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
275 void destruct(PhaseValues* phase);
276
277 // Create a new Node. Required is the number is of inputs required for
278 // semantic correctness.
279 Node( uint required );
280
281 // Create a new Node with given input edges.
282 // This version requires use of the "edge-count" new.
283 // E.g. new (C,3) FooNode( C, nullptr, left, right );
284 Node( Node *n0 );
285 Node( Node *n0, Node *n1 );
286 Node( Node *n0, Node *n1, Node *n2 );
287 Node( Node *n0, Node *n1, Node *n2, Node *n3 );
288 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
289 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
290 Node( Node *n0, Node *n1, Node *n2, Node *n3,
291 Node *n4, Node *n5, Node *n6 );
292
293 // Clone an inherited Node given only the base Node type.
294 Node* clone() const;
295
296 // Clone a Node, immediately supplying one or two new edges.
297 // The first and second arguments, if non-null, replace in(1) and in(2),
298 // respectively.
299 Node* clone_with_data_edge(Node* in1, Node* in2 = nullptr) const {
300 Node* nn = clone();
301 if (in1 != nullptr) nn->set_req(1, in1);
302 if (in2 != nullptr) nn->set_req(2, in2);
303 return nn;
304 }
305
306 private:
307 // Shared setup for the above constructors.
308 // Handles all interactions with Compile::current.
309 // Puts initial values in all Node fields except _idx.
310 // Returns the initial value for _idx, which cannot
311 // be initialized by assignment.
312 inline int Init(int req);
313
314 //----------------- input edge handling
315 protected:
316 friend class PhaseCFG; // Access to address of _in array elements
317 Node **_in; // Array of use-def references to Nodes
318 Node **_out; // Array of def-use references to Nodes
319
320 // Input edges are split into two categories. Required edges are required
321 // for semantic correctness; order is important and nulls are allowed.
322 // Precedence edges are used to help determine execution order and are
323 // added, e.g., for scheduling purposes. They are unordered and not
324 // duplicated; they have no embedded nulls. Edges from 0 to _cnt-1
325 // are required, from _cnt to _max-1 are precedence edges.
326 node_idx_t _cnt; // Total number of required Node inputs.
327
328 node_idx_t _max; // Actual length of input array.
329
330 // Output edges are an unordered list of def-use edges which exactly
331 // correspond to required input edges which point from other nodes
332 // to this one. Thus the count of the output edges is the number of
333 // users of this node.
334 node_idx_t _outcnt; // Total number of Node outputs.
335
336 node_idx_t _outmax; // Actual length of output array.
337
338 // Grow the actual input array to the next larger power-of-2 bigger than len.
339 void grow( uint len );
340 // Grow the output array to the next larger power-of-2 bigger than len.
341 void out_grow( uint len );
342 // Resize input or output array to grow it to the next larger power-of-2
343 // bigger than len.
344 void resize_array(Node**& array, node_idx_t& max_size, uint len, bool needs_clearing);
345
346 public:
347 // Each Node is assigned a unique small/dense number. This number is used
348 // to index into auxiliary arrays of data and bit vectors.
349 // The value of _idx can be changed using the set_idx() method.
350 //
351 // The PhaseRenumberLive phase renumbers nodes based on liveness information.
352 // Therefore, it updates the value of the _idx field. The parse-time _idx is
353 // preserved in _parse_idx.
354 node_idx_t _idx;
355 DEBUG_ONLY(const node_idx_t _parse_idx;)
356 // IGV node identifier. Two nodes, possibly in different compilation phases,
357 // have the same IGV identifier if (and only if) they are the very same node
358 // (same memory address) or one is "derived" from the other (by e.g.
359 // renumbering or matching). This identifier makes it possible to follow the
360 // entire lifetime of a node in IGV even if its C2 identifier (_idx) changes.
361 NOT_PRODUCT(node_idx_t _igv_idx;)
362
363 // Get the (read-only) number of input edges
364 uint req() const { return _cnt; }
365 uint len() const { return _max; }
366 // Get the (read-only) number of output edges
367 uint outcnt() const { return _outcnt; }
368
369 #if OPTO_DU_ITERATOR_ASSERT
370 // Iterate over the out-edges of this node. Deletions are illegal.
371 inline DUIterator outs() const;
372 // Use this when the out array might have changed to suppress asserts.
373 inline DUIterator& refresh_out_pos(DUIterator& i) const;
374 // Does the node have an out at this position? (Used for iteration.)
375 inline bool has_out(DUIterator& i) const;
376 inline Node* out(DUIterator& i) const;
377 // Iterate over the out-edges of this node. All changes are illegal.
378 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
379 inline Node* fast_out(DUIterator_Fast& i) const;
380 // Iterate over the out-edges of this node, deleting one at a time.
381 inline DUIterator_Last last_outs(DUIterator_Last& min) const;
382 inline Node* last_out(DUIterator_Last& i) const;
383 // The inline bodies of all these methods are after the iterator definitions.
384 #else
385 // Iterate over the out-edges of this node. Deletions are illegal.
386 // This iteration uses integral indexes, to decouple from array reallocations.
387 DUIterator outs() const { return 0; }
388 // Use this when the out array might have changed to suppress asserts.
389 DUIterator refresh_out_pos(DUIterator i) const { return i; }
390
391 // Reference to the i'th output Node. Error if out of bounds.
392 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
393 // Does the node have an out at this position? (Used for iteration.)
394 bool has_out(DUIterator i) const { return i < _outcnt; }
395
396 // Iterate over the out-edges of this node. All changes are illegal.
397 // This iteration uses a pointer internal to the out array.
398 DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
399 Node** out = _out;
400 // Assign a limit pointer to the reference argument:
401 max = out + (ptrdiff_t)_outcnt;
402 // Return the base pointer:
403 return out;
404 }
405 Node* fast_out(DUIterator_Fast i) const { return *i; }
406 // Iterate over the out-edges of this node, deleting one at a time.
407 // This iteration uses a pointer internal to the out array.
408 DUIterator_Last last_outs(DUIterator_Last& min) const {
409 Node** out = _out;
410 // Assign a limit pointer to the reference argument:
411 min = out;
412 // Return the pointer to the start of the iteration:
413 return out + (ptrdiff_t)_outcnt - 1;
414 }
415 Node* last_out(DUIterator_Last i) const { return *i; }
416 #endif
417
418 // Reference to the i'th input Node. Error if out of bounds.
419 Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; }
420 // Reference to the i'th input Node. null if out of bounds.
421 Node* lookup(uint i) const { return ((i < _max) ? _in[i] : nullptr); }
422 // Reference to the i'th output Node. Error if out of bounds.
423 // Use this accessor sparingly. We are going trying to use iterators instead.
424 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
425 // Return the unique out edge.
426 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
427
428 // In some cases, a node n is only used by a single use, but the use may use
429 // n once or multiple times:
430 // use = ConvF2I(this)
431 // use = AddI(this, this)
432 Node* unique_multiple_edges_out_or_null() const;
433
434 // Delete out edge at position 'i' by moving last out edge to position 'i'
435 void raw_del_out(uint i) {
436 assert(i < _outcnt,"oob");
437 assert(_outcnt > 0,"oob");
438 #if OPTO_DU_ITERATOR_ASSERT
439 // Record that a change happened here.
440 DEBUG_ONLY(_last_del = _out[i]; ++_del_tick);
441 #endif
442 _out[i] = _out[--_outcnt];
443 // Smash the old edge so it can't be used accidentally.
444 DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
445 }
446
447 #ifdef ASSERT
448 bool is_dead() const;
449 static bool is_not_dead(const Node* n);
450 bool is_reachable_from_root() const;
451 #endif
452 // Check whether node has become unreachable
453 bool is_unreachable(PhaseIterGVN &igvn) const;
454
455 // Set a required input edge, also updates corresponding output edge
456 void add_req( Node *n ); // Append a NEW required input
457 void add_req( Node *n0, Node *n1 ) {
458 add_req(n0); add_req(n1); }
459 void add_req( Node *n0, Node *n1, Node *n2 ) {
460 add_req(n0); add_req(n1); add_req(n2); }
461 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
462 void del_req( uint idx ); // Delete required edge & compact
463 void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
464 void ins_req( uint i, Node *n ); // Insert a NEW required input
465 void set_req( uint i, Node *n ) {
466 assert( is_not_dead(n), "can not use dead node");
467 assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt);
468 assert( !VerifyHashTableKeys || _hash_lock == 0,
469 "remove node from hash table before modifying it");
470 Node** p = &_in[i]; // cache this._in, across the del_out call
471 if (*p != nullptr) (*p)->del_out((Node *)this);
472 (*p) = n;
473 if (n != nullptr) n->add_out((Node *)this);
474 Compile::current()->record_modified_node(this);
475 }
476 // Light version of set_req() to init inputs after node creation.
477 void init_req( uint i, Node *n ) {
478 assert( (i == 0 && this == n) ||
479 is_not_dead(n), "can not use dead node");
480 assert( i < _cnt, "oob");
481 assert( !VerifyHashTableKeys || _hash_lock == 0,
482 "remove node from hash table before modifying it");
483 assert( _in[i] == nullptr, "sanity");
484 _in[i] = n;
485 if (n != nullptr) n->add_out((Node *)this);
486 Compile::current()->record_modified_node(this);
487 }
488 // Find first occurrence of n among my edges:
489 int find_edge(Node* n);
490 int find_prec_edge(Node* n) {
491 for (uint i = req(); i < len(); i++) {
492 if (_in[i] == n) return i;
493 if (_in[i] == nullptr) {
494 DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == nullptr, "Gap in prec edges!"); )
495 break;
496 }
497 }
498 return -1;
499 }
500 int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = nullptr);
501 int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn);
502 // null out all inputs to eliminate incoming Def-Use edges.
503 void disconnect_inputs(Compile* C);
504
505 // Quickly, return true if and only if I am Compile::current()->top().
506 bool is_top() const {
507 assert((this == (Node*) Compile::current()->top()) == (_out == nullptr), "");
508 return (_out == nullptr);
509 }
510 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.)
511 void setup_is_top();
512
513 // Strip away casting. (It is depth-limited.)
514 Node* uncast(bool keep_deps = false) const;
515 // Return whether two Nodes are equivalent, after stripping casting.
516 bool eqv_uncast(const Node* n, bool keep_deps = false) const {
517 return (this->uncast(keep_deps) == n->uncast(keep_deps));
518 }
519
520 // Find out of current node that matches opcode.
521 Node* find_out_with(int opcode);
522 // Return true if the current node has an out that matches opcode.
523 bool has_out_with(int opcode);
524 // Return true if the current node has an out that matches any of the opcodes.
525 bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4);
526
527 private:
528 static Node* uncast_helper(const Node* n, bool keep_deps);
529
530 // Add an output edge to the end of the list
531 void add_out( Node *n ) {
532 if (is_top()) return;
533 if( _outcnt == _outmax ) out_grow(_outcnt);
534 _out[_outcnt++] = n;
535 }
536 // Delete an output edge
537 void del_out( Node *n ) {
538 if (is_top()) return;
539 Node** outp = &_out[_outcnt];
540 // Find and remove n
541 do {
542 assert(outp > _out, "Missing Def-Use edge");
543 } while (*--outp != n);
544 *outp = _out[--_outcnt];
545 // Smash the old edge so it can't be used accidentally.
546 DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
547 // Record that a change happened here.
548 #if OPTO_DU_ITERATOR_ASSERT
549 DEBUG_ONLY(_last_del = n; ++_del_tick);
550 #endif
551 }
552 // Close gap after removing edge.
553 void close_prec_gap_at(uint gap) {
554 assert(_cnt <= gap && gap < _max, "no valid prec edge");
555 uint i = gap;
556 Node *last = nullptr;
557 for (; i < _max-1; ++i) {
558 Node *next = _in[i+1];
559 if (next == nullptr) break;
560 last = next;
561 }
562 _in[gap] = last; // Move last slot to empty one.
563 _in[i] = nullptr; // null out last slot.
564 }
565
566 public:
567 // Globally replace this node by a given new node, updating all uses.
568 void replace_by(Node* new_node);
569 // Globally replace this node by a given new node, updating all uses
570 // and cutting input edges of old node.
571 void subsume_by(Node* new_node, Compile* c) {
572 replace_by(new_node);
573 disconnect_inputs(c);
574 }
575 void set_req_X(uint i, Node *n, PhaseIterGVN *igvn);
576 void set_req_X(uint i, Node *n, PhaseGVN *gvn);
577 // Find the one non-null required input. RegionNode only
578 Node *nonnull_req() const;
579 // Add or remove precedence edges
580 void add_prec( Node *n );
581 void rm_prec( uint i );
582
583 // Note: prec(i) will not necessarily point to n if edge already exists.
584 void set_prec( uint i, Node *n ) {
585 assert(i < _max, "oob: i=%d, _max=%d", i, _max);
586 assert(is_not_dead(n), "can not use dead node");
587 assert(i >= _cnt, "not a precedence edge");
588 // Avoid spec violation: duplicated prec edge.
589 if (_in[i] == n) return;
590 if (n == nullptr || find_prec_edge(n) != -1) {
591 rm_prec(i);
592 return;
593 }
594 if (_in[i] != nullptr) _in[i]->del_out((Node *)this);
595 _in[i] = n;
596 n->add_out((Node *)this);
597 Compile::current()->record_modified_node(this);
598 }
599
600 // Set this node's index, used by cisc_version to replace current node
601 void set_idx(uint new_idx) {
602 _idx = new_idx;
603 }
604 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.)
605 void swap_edges(uint i1, uint i2) {
606 DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
607 // Def-Use info is unchanged
608 Node* n1 = in(i1);
609 Node* n2 = in(i2);
610 _in[i1] = n2;
611 _in[i2] = n1;
612 // If this node is in the hash table, make sure it doesn't need a rehash.
613 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
614 // Flip swapped edges flag.
615 if (has_swapped_edges()) {
616 remove_flag(Node::Flag_has_swapped_edges);
617 } else {
618 add_flag(Node::Flag_has_swapped_edges);
619 }
620 }
621
622 // Iterators over input Nodes for a Node X are written as:
623 // for( i = 0; i < X.req(); i++ ) ... X[i] ...
624 // NOTE: Required edges can contain embedded null pointers.
625
626 //----------------- Other Node Properties
627
628 // Generate class IDs for (some) ideal nodes so that it is possible to determine
629 // the type of a node using a non-virtual method call (the method is_<Node>() below).
630 //
631 // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines
632 // the type of the node the ID represents; another subset of an ID's bits are reserved
633 // for the superclasses of the node represented by the ID.
634 //
635 // By design, if A is a supertype of B, A.is_B() returns true and B.is_A()
636 // returns false. A.is_A() returns true.
637 //
638 // If two classes, A and B, have the same superclass, a different bit of A's class id
639 // is reserved for A's type than for B's type. That bit is specified by the third
640 // parameter in the macro DEFINE_CLASS_ID.
641 //
642 // By convention, classes with deeper hierarchy are declared first. Moreover,
643 // classes with the same hierarchy depth are sorted by usage frequency.
644 //
645 // The query method masks the bits to cut off bits of subclasses and then compares
646 // the result with the class id (see the macro DEFINE_CLASS_QUERY below).
647 //
648 // Class_MachCall=30, ClassMask_MachCall=31
649 // 12 8 4 0
650 // 0 0 0 0 0 0 0 0 1 1 1 1 0
651 // | | | |
652 // | | | Bit_Mach=2
653 // | | Bit_MachReturn=4
654 // | Bit_MachSafePoint=8
655 // Bit_MachCall=16
656 //
657 // Class_CountedLoop=56, ClassMask_CountedLoop=63
658 // 12 8 4 0
659 // 0 0 0 0 0 0 0 1 1 1 0 0 0
660 // | | |
661 // | | Bit_Region=8
662 // | Bit_Loop=16
663 // Bit_CountedLoop=32
664
665 #define DEFINE_CLASS_ID(cl, supcl, subn) \
666 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
667 Class_##cl = Class_##supcl + Bit_##cl , \
668 ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
669
670 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
671 // so that its values fit into 32 bits.
672 enum NodeClasses {
673 Bit_Node = 0x00000000,
674 Class_Node = 0x00000000,
675 ClassMask_Node = 0xFFFFFFFF,
676
677 DEFINE_CLASS_ID(Multi, Node, 0)
678 DEFINE_CLASS_ID(SafePoint, Multi, 0)
679 DEFINE_CLASS_ID(Call, SafePoint, 0)
680 DEFINE_CLASS_ID(CallJava, Call, 0)
681 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0)
682 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1)
683 DEFINE_CLASS_ID(CallRuntime, Call, 1)
684 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0)
685 DEFINE_CLASS_ID(CallLeafNoFP, CallLeaf, 0)
686 DEFINE_CLASS_ID(CallLeafPure, CallLeaf, 1)
687 DEFINE_CLASS_ID(Allocate, Call, 2)
688 DEFINE_CLASS_ID(AllocateArray, Allocate, 0)
689 DEFINE_CLASS_ID(AbstractLock, Call, 3)
690 DEFINE_CLASS_ID(Lock, AbstractLock, 0)
691 DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
692 DEFINE_CLASS_ID(ArrayCopy, Call, 4)
693 DEFINE_CLASS_ID(MultiBranch, Multi, 1)
694 DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
695 DEFINE_CLASS_ID(Catch, PCTable, 0)
696 DEFINE_CLASS_ID(Jump, PCTable, 1)
697 DEFINE_CLASS_ID(If, MultiBranch, 1)
698 DEFINE_CLASS_ID(BaseCountedLoopEnd, If, 0)
699 DEFINE_CLASS_ID(CountedLoopEnd, BaseCountedLoopEnd, 0)
700 DEFINE_CLASS_ID(LongCountedLoopEnd, BaseCountedLoopEnd, 1)
701 DEFINE_CLASS_ID(RangeCheck, If, 1)
702 DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2)
703 DEFINE_CLASS_ID(ParsePredicate, If, 3)
704 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
705 DEFINE_CLASS_ID(Start, Multi, 2)
706 DEFINE_CLASS_ID(MemBar, Multi, 3)
707 DEFINE_CLASS_ID(Initialize, MemBar, 0)
708 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
709
710 DEFINE_CLASS_ID(Mach, Node, 1)
711 DEFINE_CLASS_ID(MachReturn, Mach, 0)
712 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
713 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
714 DEFINE_CLASS_ID(MachCallJava, MachCall, 0)
715 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0)
716 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
717 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
718 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
719 DEFINE_CLASS_ID(MachBranch, Mach, 1)
720 DEFINE_CLASS_ID(MachIf, MachBranch, 0)
721 DEFINE_CLASS_ID(MachGoto, MachBranch, 1)
722 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2)
723 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2)
724 DEFINE_CLASS_ID(MachTemp, Mach, 3)
725 DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
726 DEFINE_CLASS_ID(MachConstant, Mach, 5)
727 DEFINE_CLASS_ID(MachJump, MachConstant, 0)
728 DEFINE_CLASS_ID(MachMerge, Mach, 6)
729 DEFINE_CLASS_ID(MachMemBar, Mach, 7)
730
731 DEFINE_CLASS_ID(Type, Node, 2)
732 DEFINE_CLASS_ID(Phi, Type, 0)
733 DEFINE_CLASS_ID(ConstraintCast, Type, 1)
734 DEFINE_CLASS_ID(CastII, ConstraintCast, 0)
735 DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1)
736 DEFINE_CLASS_ID(CastLL, ConstraintCast, 2)
737 DEFINE_CLASS_ID(CastFF, ConstraintCast, 3)
738 DEFINE_CLASS_ID(CastDD, ConstraintCast, 4)
739 DEFINE_CLASS_ID(CastVV, ConstraintCast, 5)
740 DEFINE_CLASS_ID(CastPP, ConstraintCast, 6)
741 DEFINE_CLASS_ID(CastHH, ConstraintCast, 7)
742 DEFINE_CLASS_ID(CMove, Type, 3)
743 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
744 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5)
745 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0)
746 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1)
747 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
748 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
749 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
750 DEFINE_CLASS_ID(Vector, Type, 7)
751 DEFINE_CLASS_ID(VectorMaskCmp, Vector, 0)
752 DEFINE_CLASS_ID(VectorUnbox, Vector, 1)
753 DEFINE_CLASS_ID(VectorReinterpret, Vector, 2)
754 DEFINE_CLASS_ID(ShiftV, Vector, 3)
755 DEFINE_CLASS_ID(CompressV, Vector, 4)
756 DEFINE_CLASS_ID(ExpandV, Vector, 5)
757 DEFINE_CLASS_ID(CompressM, Vector, 6)
758 DEFINE_CLASS_ID(Reduction, Vector, 7)
759 DEFINE_CLASS_ID(NegV, Vector, 8)
760 DEFINE_CLASS_ID(SaturatingVector, Vector, 9)
761 DEFINE_CLASS_ID(MulVL, Vector, 10)
762 DEFINE_CLASS_ID(Con, Type, 8)
763 DEFINE_CLASS_ID(ConI, Con, 0)
764 DEFINE_CLASS_ID(SafePointScalarMerge, Type, 9)
765 DEFINE_CLASS_ID(Convert, Type, 10)
766
767
768 DEFINE_CLASS_ID(Proj, Node, 3)
769 DEFINE_CLASS_ID(CatchProj, Proj, 0)
770 DEFINE_CLASS_ID(JumpProj, Proj, 1)
771 DEFINE_CLASS_ID(IfProj, Proj, 2)
772 DEFINE_CLASS_ID(IfTrue, IfProj, 0)
773 DEFINE_CLASS_ID(IfFalse, IfProj, 1)
774 DEFINE_CLASS_ID(Parm, Proj, 4)
775 DEFINE_CLASS_ID(MachProj, Proj, 5)
776 DEFINE_CLASS_ID(NarrowMemProj, Proj, 6)
777
778 DEFINE_CLASS_ID(Mem, Node, 4)
779 DEFINE_CLASS_ID(Load, Mem, 0)
780 DEFINE_CLASS_ID(LoadVector, Load, 0)
781 DEFINE_CLASS_ID(LoadVectorGather, LoadVector, 0)
782 DEFINE_CLASS_ID(LoadVectorGatherMasked, LoadVector, 1)
783 DEFINE_CLASS_ID(LoadVectorMasked, LoadVector, 2)
784 DEFINE_CLASS_ID(Store, Mem, 1)
785 DEFINE_CLASS_ID(StoreVector, Store, 0)
786 DEFINE_CLASS_ID(StoreVectorScatter, StoreVector, 0)
787 DEFINE_CLASS_ID(StoreVectorScatterMasked, StoreVector, 1)
788 DEFINE_CLASS_ID(StoreVectorMasked, StoreVector, 2)
789 DEFINE_CLASS_ID(LoadStore, Mem, 2)
790 DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0)
791 DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0)
792 DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1)
793
794 DEFINE_CLASS_ID(Region, Node, 5)
795 DEFINE_CLASS_ID(Loop, Region, 0)
796 DEFINE_CLASS_ID(Root, Loop, 0)
797 DEFINE_CLASS_ID(BaseCountedLoop, Loop, 1)
798 DEFINE_CLASS_ID(CountedLoop, BaseCountedLoop, 0)
799 DEFINE_CLASS_ID(LongCountedLoop, BaseCountedLoop, 1)
800 DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2)
801
802 DEFINE_CLASS_ID(Sub, Node, 6)
803 DEFINE_CLASS_ID(Cmp, Sub, 0)
804 DEFINE_CLASS_ID(FastLock, Cmp, 0)
805 DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
806 DEFINE_CLASS_ID(SubTypeCheck,Cmp, 2)
807
808 DEFINE_CLASS_ID(MergeMem, Node, 7)
809 DEFINE_CLASS_ID(Bool, Node, 8)
810 DEFINE_CLASS_ID(AddP, Node, 9)
811 DEFINE_CLASS_ID(BoxLock, Node, 10)
812 DEFINE_CLASS_ID(Add, Node, 11)
813 DEFINE_CLASS_ID(MinMax, Add, 0)
814 DEFINE_CLASS_ID(Mul, Node, 12)
815 DEFINE_CLASS_ID(ClearArray, Node, 14)
816 DEFINE_CLASS_ID(Halt, Node, 15)
817 DEFINE_CLASS_ID(Opaque1, Node, 16)
818 DEFINE_CLASS_ID(OpaqueLoopInit, Opaque1, 0)
819 DEFINE_CLASS_ID(OpaqueLoopStride, Opaque1, 1)
820 DEFINE_CLASS_ID(OpaqueMultiversioning, Opaque1, 2)
821 DEFINE_CLASS_ID(OpaqueConstantBool, Node, 17)
822 DEFINE_CLASS_ID(OpaqueInitializedAssertionPredicate, Node, 18)
823 DEFINE_CLASS_ID(OpaqueTemplateAssertionPredicate, Node, 19)
824 DEFINE_CLASS_ID(Move, Node, 20)
825 DEFINE_CLASS_ID(LShift, Node, 21)
826 DEFINE_CLASS_ID(Neg, Node, 22)
827
828 _max_classes = ClassMask_Neg
829 };
830 #undef DEFINE_CLASS_ID
831
832 // Flags are sorted by usage frequency.
833 enum NodeFlags : uint64_t {
834 Flag_is_Copy = 1ULL << 0, // should be first bit to avoid shift
835 Flag_rematerialize = 1ULL << 1,
836 Flag_needs_anti_dependence_check = 1ULL << 2,
837 Flag_is_macro = 1ULL << 3,
838 Flag_is_Con = 1ULL << 4,
839 Flag_is_cisc_alternate = 1ULL << 5,
840 Flag_is_dead_loop_safe = 1ULL << 6,
841 Flag_may_be_short_branch = 1ULL << 7,
842 Flag_avoid_back_to_back_before = 1ULL << 8,
843 Flag_avoid_back_to_back_after = 1ULL << 9,
844 Flag_has_call = 1ULL << 10,
845 Flag_has_swapped_edges = 1ULL << 11,
846 Flag_is_scheduled = 1ULL << 12,
847 Flag_is_expensive = 1ULL << 13,
848 Flag_is_predicated_vector = 1ULL << 14, // Marked on a vector node that has an additional
849 // mask input controlling the lane operations.
850 Flag_for_post_loop_opts_igvn = 1ULL << 15,
851 Flag_for_merge_stores_igvn = 1ULL << 16,
852 Flag_is_removed_by_peephole = 1ULL << 17,
853 Flag_is_predicated_using_blend = 1ULL << 18,
854 _last_flag = Flag_is_predicated_using_blend
855 };
856
857 class PD;
858
859 private:
860 juint _class_id;
861 juint _flags;
862
863 #ifdef ASSERT
864 static juint max_flags();
865 #endif
866
867 protected:
868 // These methods should be called from constructors only.
869 void init_class_id(juint c) {
870 _class_id = c; // cast out const
871 }
872 void init_flags(uint fl) {
873 assert(fl <= max_flags(), "invalid node flag");
874 _flags |= fl;
875 }
876 void clear_flag(uint fl) {
877 assert(fl <= max_flags(), "invalid node flag");
878 _flags &= ~fl;
879 }
880
881 public:
882 juint class_id() const { return _class_id; }
883
884 juint flags() const { return _flags; }
885
886 void add_flag(juint fl) { init_flags(fl); }
887
888 void remove_flag(juint fl) { clear_flag(fl); }
889
890 // Return a dense integer opcode number
891 virtual int Opcode() const;
892
893 // Virtual inherited Node size
894 virtual uint size_of() const;
895
896 // Other interesting Node properties
897 #define DEFINE_CLASS_QUERY(type) \
898 bool is_##type() const { \
899 return ((_class_id & ClassMask_##type) == Class_##type); \
900 } \
901 type##Node *as_##type() const { \
902 assert(is_##type(), "invalid node class: %s", Name()); \
903 return (type##Node*)this; \
904 } \
905 type##Node* isa_##type() const { \
906 return (is_##type()) ? as_##type() : nullptr; \
907 }
908
909 DEFINE_CLASS_QUERY(AbstractLock)
910 DEFINE_CLASS_QUERY(Add)
911 DEFINE_CLASS_QUERY(AddP)
912 DEFINE_CLASS_QUERY(Allocate)
913 DEFINE_CLASS_QUERY(AllocateArray)
914 DEFINE_CLASS_QUERY(ArrayCopy)
915 DEFINE_CLASS_QUERY(BaseCountedLoop)
916 DEFINE_CLASS_QUERY(BaseCountedLoopEnd)
917 DEFINE_CLASS_QUERY(Bool)
918 DEFINE_CLASS_QUERY(BoxLock)
919 DEFINE_CLASS_QUERY(Call)
920 DEFINE_CLASS_QUERY(CallDynamicJava)
921 DEFINE_CLASS_QUERY(CallJava)
922 DEFINE_CLASS_QUERY(CallLeaf)
923 DEFINE_CLASS_QUERY(CallLeafNoFP)
924 DEFINE_CLASS_QUERY(CallLeafPure)
925 DEFINE_CLASS_QUERY(CallRuntime)
926 DEFINE_CLASS_QUERY(CallStaticJava)
927 DEFINE_CLASS_QUERY(Catch)
928 DEFINE_CLASS_QUERY(CatchProj)
929 DEFINE_CLASS_QUERY(CheckCastPP)
930 DEFINE_CLASS_QUERY(CastII)
931 DEFINE_CLASS_QUERY(CastLL)
932 DEFINE_CLASS_QUERY(CastFF)
933 DEFINE_CLASS_QUERY(ConI)
934 DEFINE_CLASS_QUERY(CastPP)
935 DEFINE_CLASS_QUERY(ConstraintCast)
936 DEFINE_CLASS_QUERY(ClearArray)
937 DEFINE_CLASS_QUERY(CMove)
938 DEFINE_CLASS_QUERY(Cmp)
939 DEFINE_CLASS_QUERY(Convert)
940 DEFINE_CLASS_QUERY(CountedLoop)
941 DEFINE_CLASS_QUERY(CountedLoopEnd)
942 DEFINE_CLASS_QUERY(DecodeNarrowPtr)
943 DEFINE_CLASS_QUERY(DecodeN)
944 DEFINE_CLASS_QUERY(DecodeNKlass)
945 DEFINE_CLASS_QUERY(EncodeNarrowPtr)
946 DEFINE_CLASS_QUERY(EncodeP)
947 DEFINE_CLASS_QUERY(EncodePKlass)
948 DEFINE_CLASS_QUERY(FastLock)
949 DEFINE_CLASS_QUERY(FastUnlock)
950 DEFINE_CLASS_QUERY(Halt)
951 DEFINE_CLASS_QUERY(If)
952 DEFINE_CLASS_QUERY(RangeCheck)
953 DEFINE_CLASS_QUERY(IfProj)
954 DEFINE_CLASS_QUERY(IfFalse)
955 DEFINE_CLASS_QUERY(IfTrue)
956 DEFINE_CLASS_QUERY(Initialize)
957 DEFINE_CLASS_QUERY(Jump)
958 DEFINE_CLASS_QUERY(JumpProj)
959 DEFINE_CLASS_QUERY(LongCountedLoop)
960 DEFINE_CLASS_QUERY(LongCountedLoopEnd)
961 DEFINE_CLASS_QUERY(Load)
962 DEFINE_CLASS_QUERY(LoadStore)
963 DEFINE_CLASS_QUERY(LoadStoreConditional)
964 DEFINE_CLASS_QUERY(Lock)
965 DEFINE_CLASS_QUERY(Loop)
966 DEFINE_CLASS_QUERY(LShift)
967 DEFINE_CLASS_QUERY(Mach)
968 DEFINE_CLASS_QUERY(MachBranch)
969 DEFINE_CLASS_QUERY(MachCall)
970 DEFINE_CLASS_QUERY(MachCallDynamicJava)
971 DEFINE_CLASS_QUERY(MachCallJava)
972 DEFINE_CLASS_QUERY(MachCallLeaf)
973 DEFINE_CLASS_QUERY(MachCallRuntime)
974 DEFINE_CLASS_QUERY(MachCallStaticJava)
975 DEFINE_CLASS_QUERY(MachConstantBase)
976 DEFINE_CLASS_QUERY(MachConstant)
977 DEFINE_CLASS_QUERY(MachGoto)
978 DEFINE_CLASS_QUERY(MachIf)
979 DEFINE_CLASS_QUERY(MachJump)
980 DEFINE_CLASS_QUERY(MachNullCheck)
981 DEFINE_CLASS_QUERY(MachProj)
982 DEFINE_CLASS_QUERY(MachReturn)
983 DEFINE_CLASS_QUERY(MachSafePoint)
984 DEFINE_CLASS_QUERY(MachSpillCopy)
985 DEFINE_CLASS_QUERY(MachTemp)
986 DEFINE_CLASS_QUERY(MachMemBar)
987 DEFINE_CLASS_QUERY(MachMerge)
988 DEFINE_CLASS_QUERY(Mem)
989 DEFINE_CLASS_QUERY(MemBar)
990 DEFINE_CLASS_QUERY(MemBarStoreStore)
991 DEFINE_CLASS_QUERY(MergeMem)
992 DEFINE_CLASS_QUERY(MinMax)
993 DEFINE_CLASS_QUERY(Move)
994 DEFINE_CLASS_QUERY(Mul)
995 DEFINE_CLASS_QUERY(Multi)
996 DEFINE_CLASS_QUERY(MultiBranch)
997 DEFINE_CLASS_QUERY(MulVL)
998 DEFINE_CLASS_QUERY(NarrowMemProj)
999 DEFINE_CLASS_QUERY(Neg)
1000 DEFINE_CLASS_QUERY(NegV)
1001 DEFINE_CLASS_QUERY(NeverBranch)
1002 DEFINE_CLASS_QUERY(Opaque1)
1003 DEFINE_CLASS_QUERY(OpaqueConstantBool)
1004 DEFINE_CLASS_QUERY(OpaqueInitializedAssertionPredicate)
1005 DEFINE_CLASS_QUERY(OpaqueTemplateAssertionPredicate)
1006 DEFINE_CLASS_QUERY(OpaqueLoopInit)
1007 DEFINE_CLASS_QUERY(OpaqueLoopStride)
1008 DEFINE_CLASS_QUERY(OpaqueMultiversioning)
1009 DEFINE_CLASS_QUERY(OuterStripMinedLoop)
1010 DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd)
1011 DEFINE_CLASS_QUERY(Parm)
1012 DEFINE_CLASS_QUERY(ParsePredicate)
1013 DEFINE_CLASS_QUERY(PCTable)
1014 DEFINE_CLASS_QUERY(Phi)
1015 DEFINE_CLASS_QUERY(Proj)
1016 DEFINE_CLASS_QUERY(Reduction)
1017 DEFINE_CLASS_QUERY(Region)
1018 DEFINE_CLASS_QUERY(Root)
1019 DEFINE_CLASS_QUERY(SafePoint)
1020 DEFINE_CLASS_QUERY(SafePointScalarObject)
1021 DEFINE_CLASS_QUERY(SafePointScalarMerge)
1022 DEFINE_CLASS_QUERY(Start)
1023 DEFINE_CLASS_QUERY(Store)
1024 DEFINE_CLASS_QUERY(Sub)
1025 DEFINE_CLASS_QUERY(SubTypeCheck)
1026 DEFINE_CLASS_QUERY(Type)
1027 DEFINE_CLASS_QUERY(Vector)
1028 DEFINE_CLASS_QUERY(VectorMaskCmp)
1029 DEFINE_CLASS_QUERY(VectorUnbox)
1030 DEFINE_CLASS_QUERY(VectorReinterpret)
1031 DEFINE_CLASS_QUERY(CompressV)
1032 DEFINE_CLASS_QUERY(ExpandV)
1033 DEFINE_CLASS_QUERY(CompressM)
1034 DEFINE_CLASS_QUERY(LoadVector)
1035 DEFINE_CLASS_QUERY(LoadVectorGather)
1036 DEFINE_CLASS_QUERY(LoadVectorMasked)
1037 DEFINE_CLASS_QUERY(LoadVectorGatherMasked)
1038 DEFINE_CLASS_QUERY(StoreVector)
1039 DEFINE_CLASS_QUERY(StoreVectorScatter)
1040 DEFINE_CLASS_QUERY(StoreVectorMasked)
1041 DEFINE_CLASS_QUERY(StoreVectorScatterMasked)
1042 DEFINE_CLASS_QUERY(SaturatingVector)
1043 DEFINE_CLASS_QUERY(ShiftV)
1044 DEFINE_CLASS_QUERY(Unlock)
1045
1046 #undef DEFINE_CLASS_QUERY
1047
1048 // duplicate of is_MachSpillCopy()
1049 bool is_SpillCopy () const {
1050 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
1051 }
1052
1053 bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
1054 // The data node which is safe to leave in dead loop during IGVN optimization.
1055 bool is_dead_loop_safe() const;
1056
1057 // is_Copy() returns copied edge index (0 or 1)
1058 uint is_Copy() const { return (_flags & Flag_is_Copy); }
1059
1060 virtual bool is_CFG() const { return false; }
1061
1062 // If this node is control-dependent on a test, can it be rerouted to a dominating equivalent
1063 // test? This means that the node can be executed safely as long as it happens after the test
1064 // that is its control input without worrying about the whole control flow. On the contrary, if
1065 // the node depends on a test that is not its control input, or if it depends on more than one
1066 // tests, then this method must return false.
1067 //
1068 // Pseudocode examples:
1069 // 1. if (y != 0) {
1070 // x / y;
1071 // }
1072 // The division depends only on the test y != 0 and can be executed anywhere y != 0 holds true.
1073 // As a result, depends_only_on_test returns true.
1074 // 2. if (y != 0) {
1075 // if (x > 1) {
1076 // x / y;
1077 // }
1078 // }
1079 // If the division x / y has its control input being the IfTrueNode of the test y != 0, then
1080 // depends_only_on_test returns true. Otherwise, if the division has its control input being the
1081 // IfTrueNode of the test x > 1, then depends_only_on_test returns false.
1082 // 3. if (y > z) {
1083 // if (z > 0) {
1084 // x / y
1085 // }
1086 // }
1087 // The division depends on both tests y > z and z > 0. As a result, depends_only_on_test returns
1088 // false.
1089 //
1090 // This method allows more freedom in certain nodes with regards to scheduling, for example it
1091 // allows nodes to float out of loops together with its test.
1092 //
1093 // This method is pessimistic, this means that it may return false even if the node satisfy the
1094 // requirements. However, it must return false if the node does not satisfy the requirements.
1095 // When a test is decomposed into multiple tests, all nodes that depend on the decomposed test
1096 // must be pinned at the lowest dominating test of those. For example, when a zero check of a
1097 // division is split through a region but the division itself is not, it must be pinned at the
1098 // merge point by returning false when calling this method.
1099 bool depends_only_on_test() const {
1100 if (is_CFG() || pinned()) {
1101 return false;
1102 }
1103 assert(in(0) != nullptr, "must have a control input");
1104 return depends_only_on_test_impl();
1105 }
1106
1107 // Return a clone of the current node that's pinned. The current node must return true for
1108 // depends_only_on_test, and the retuned node must return false. This method is called when the
1109 // node is disconnected from its test.
1110 //
1111 // Examples:
1112 // 1. for (int i = start; i <= limit; i++) {
1113 // if (!rangecheck(i, a)) {
1114 // trap;
1115 // }
1116 // a[i];
1117 // }
1118 // Loop predication can then hoist the range check out of the loop:
1119 // if (!rangecheck(start, a)) {
1120 // trap;
1121 // }
1122 // if (!rangecheck(limit, a)) {
1123 // trap;
1124 // }
1125 // for (int i = start; i <= limit; i++) {
1126 // a[i];
1127 // }
1128 // As the load a[i] now depends on both tests rangecheck(start, a) and rangecheck(limit, a), it
1129 // must be pinned at the lowest dominating test of those.
1130 //
1131 // 2. if (y > x) {
1132 // if (x >= 0) {
1133 // if (y != 0) {
1134 // x / y;
1135 // }
1136 // }
1137 // }
1138 // The test (y != 0) == true can be deduced from (y > x) == true and (x >= 0) == true, so we may
1139 // choose to elide it. In such cases, the division x / y now depends on both tests
1140 // (y > x) == true and (x >= 0) == true, so it must be pinned at the lowest dominating test of
1141 // those.
1142 //
1143 // 3. if (b) {
1144 // ...
1145 // } else {
1146 // ...
1147 // }
1148 // if (y == 0) {
1149 // trap;
1150 // }
1151 // x / y;
1152 // The division x / y depends only on the test (y == 0) == false, but if we split the test
1153 // through the merge point but not the division:
1154 // if (b) {
1155 // ...
1156 // if (y == 0) {
1157 // trap;
1158 // }
1159 // } else {
1160 // ...
1161 // if (y == 0) {
1162 // trap;
1163 // }
1164 // }
1165 // x / y;
1166 // The division now has the control input being the RegionNode merge the branches of if(b)
1167 // instead of a test that proves y != 0. As a result, it must be pinned at that node.
1168 //
1169 // There are cases where the node does not actually have a dependency on its control input. For
1170 // example, when we try to sink a LoadNode out of a loop in PhaseIdealLoop::try_sink_out_of_loop,
1171 // we clone the node so that all of the clones can be scheduled out of the loop. To prevent the
1172 // clones from being GVN-ed again, we add a control input for the node at the loop exit. For the
1173 // cases when the node does provably not depend on its control input, this method can return
1174 // nullptr.
1175 Node* pin_node_under_control() const {
1176 assert(depends_only_on_test(), "must be a depends_only_on_test node");
1177 Node* res = pin_node_under_control_impl();
1178 if (res == nullptr) {
1179 assert(is_Load(), "unexpected failure to pin for %s", Name());
1180 return nullptr;
1181 }
1182 assert(!res->depends_only_on_test(), "the result must not depends_only_on_test");
1183 return res;
1184 }
1185
1186 private:
1187 virtual bool depends_only_on_test_impl() const { assert(false, "%s", Name()); return false; }
1188 virtual Node* pin_node_under_control_impl() const { assert(false, "%s", Name()); return nullptr; }
1189
1190 public:
1191 // When building basic blocks, I need to have a notion of block beginning
1192 // Nodes, next block selector Nodes (block enders), and next block
1193 // projections. These calls need to work on their machine equivalents. The
1194 // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
1195 bool is_block_start() const {
1196 if ( is_Region() )
1197 return this == (const Node*)in(0);
1198 else
1199 return is_Start();
1200 }
1201
1202 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
1203 // Goto and Return. This call also returns the block ending Node.
1204 virtual const Node *is_block_proj() const;
1205
1206 // The node is a "macro" node which needs to be expanded before matching
1207 bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
1208 // The node is expensive: the best control is set during loop opts
1209 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != nullptr; }
1210 // The node's original edge position is swapped.
1211 bool has_swapped_edges() const { return (_flags & Flag_has_swapped_edges) != 0; }
1212
1213 bool is_predicated_vector() const { return (_flags & Flag_is_predicated_vector) != 0; }
1214
1215 bool is_predicated_using_blend() const { return (_flags & Flag_is_predicated_using_blend) != 0; }
1216
1217 // Used in lcm to mark nodes that have scheduled
1218 bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; }
1219
1220 bool for_post_loop_opts_igvn() const { return (_flags & Flag_for_post_loop_opts_igvn) != 0; }
1221 bool for_merge_stores_igvn() const { return (_flags & Flag_for_merge_stores_igvn) != 0; }
1222
1223 // Is 'n' possibly a loop entry (i.e. a Parse Predicate projection)?
1224 static bool may_be_loop_entry(Node* n) {
1225 return n != nullptr && n->is_IfProj() && n->in(0)->is_ParsePredicate();
1226 }
1227
1228 //----------------- Optimization
1229
1230 // Get the worst-case Type output for this Node.
1231 virtual const class Type *bottom_type() const;
1232
1233 // If we find a better type for a node, try to record it permanently.
1234 // Return true if this node actually changed.
1235 // Be sure to do the hash_delete game in the "rehash" variant.
1236 void raise_bottom_type(const Type* new_type);
1237
1238 // Get the address type with which this node uses and/or defs memory,
1239 // or null if none. The address type is conservatively wide.
1240 // Returns non-null for calls, membars, loads, stores, etc.
1241 // Returns TypePtr::BOTTOM if the node touches memory "broadly".
1242 virtual const class TypePtr *adr_type() const { return nullptr; }
1243
1244 // Return an existing node which computes the same function as this node.
1245 // The optimistic combined algorithm requires this to return a Node which
1246 // is a small number of steps away (e.g., one of my inputs).
1247 virtual Node* Identity(PhaseGVN* phase);
1248
1249 // Return the set of values this Node can take on at runtime.
1250 virtual const Type* Value(PhaseGVN* phase) const;
1251
1252 // Return a node which is more "ideal" than the current node.
1253 // The invariants on this call are subtle. If in doubt, read the
1254 // treatise in node.cpp above the default implementation AND TEST WITH
1255 // -XX:VerifyIterativeGVN=1
1256 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1257
1258 // Some nodes have specific Ideal subgraph transformations only if they are
1259 // unique users of specific nodes. Such nodes should be put on IGVN worklist
1260 // for the transformations to happen.
1261 bool has_special_unique_user() const;
1262
1263 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
1264 Node* find_exact_control(Node* ctrl);
1265
1266 // Results of the dominance analysis.
1267 enum class DomResult {
1268 NotDominate, // 'this' node does not dominate 'sub'.
1269 Dominate, // 'this' node dominates or is equal to 'sub'.
1270 EncounteredDeadCode // Result is undefined due to encountering dead code.
1271 };
1272 // Check if 'this' node dominates or equal to 'sub'.
1273 DomResult dominates(Node* sub, Node_List &nlist);
1274
1275 bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
1276 public:
1277
1278 // See if there is valid pipeline info
1279 static const Pipeline *pipeline_class();
1280 virtual const Pipeline *pipeline() const;
1281
1282 // Compute the latency from the def to this instruction of the ith input node
1283 uint latency(uint i);
1284
1285 // Hash & compare functions, for pessimistic value numbering
1286
1287 // If the hash function returns the special sentinel value NO_HASH,
1288 // the node is guaranteed never to compare equal to any other node.
1289 // If we accidentally generate a hash with value NO_HASH the node
1290 // won't go into the table and we'll lose a little optimization.
1291 static const uint NO_HASH = 0;
1292 virtual uint hash() const;
1293 virtual bool cmp( const Node &n ) const;
1294
1295 // Operation appears to be iteratively computed (such as an induction variable)
1296 // It is possible for this operation to return false for a loop-varying
1297 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
1298 bool is_iteratively_computed();
1299
1300 // Determine if a node is a counted loop induction variable.
1301 // NOTE: The method is defined in "loopnode.cpp".
1302 bool is_cloop_ind_var() const;
1303
1304 // Return a node with opcode "opc" and same inputs as "this" if one can
1305 // be found; Otherwise return null;
1306 Node* find_similar(int opc);
1307 bool has_same_inputs_as(const Node* other) const;
1308
1309 // Return the unique control out if only one. Null if none or more than one.
1310 Node* unique_ctrl_out_or_null() const;
1311 // Return the unique control out. Asserts if none or more than one control out.
1312 Node* unique_ctrl_out() const;
1313
1314 // Set control or add control as precedence edge
1315 void ensure_control_or_add_prec(Node* c);
1316 void add_prec_from(Node* n);
1317
1318 // Visit boundary uses of the node and apply a callback function for each.
1319 // Recursively traverse uses, stopping and applying the callback when
1320 // reaching a boundary node, defined by is_boundary. Note: the function
1321 // definition appears after the complete type definition of Node_List.
1322 template <typename Callback, typename Check>
1323 void visit_uses(Callback callback, Check is_boundary) const;
1324
1325 //----------------- Code Generation
1326
1327 // Ideal register class for Matching. Zero means unmatched instruction
1328 // (these are cloned instead of converted to machine nodes).
1329 virtual uint ideal_reg() const;
1330
1331 static const uint NotAMachineReg; // must be > max. machine register
1332
1333 // Do we Match on this edge index or not? Generally false for Control
1334 // and true for everything else. Weird for calls & returns.
1335 virtual uint match_edge(uint idx) const;
1336
1337 // Register class output is returned in
1338 virtual const RegMask &out_RegMask() const;
1339 // Register class input is expected in
1340 virtual const RegMask &in_RegMask(uint) const;
1341 // Should we clone rather than spill this instruction?
1342 bool rematerialize() const;
1343
1344 // Return JVM State Object if this Node carries debug info, or null otherwise
1345 virtual JVMState* jvms() const;
1346
1347 // Print as assembly
1348 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
1349 // Emit bytes using C2_MacroAssembler
1350 virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
1351 // Size of instruction in bytes
1352 virtual uint size(PhaseRegAlloc *ra_) const;
1353
1354 // Convenience function to extract an integer constant from a node.
1355 // If it is not an integer constant (either Con, CastII, or Mach),
1356 // return value_if_unknown.
1357 jint find_int_con(jint value_if_unknown) const {
1358 const TypeInt* t = find_int_type();
1359 return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
1360 }
1361 // Return the constant, knowing it is an integer constant already
1362 jint get_int() const {
1363 const TypeInt* t = find_int_type();
1364 guarantee(t != nullptr, "must be con");
1365 return t->get_con();
1366 }
1367 // Here's where the work is done. Can produce non-constant int types too.
1368 const TypeInt* find_int_type() const;
1369 const TypeInteger* find_integer_type(BasicType bt) const;
1370
1371 // Same thing for long (and intptr_t, via type.hpp):
1372 jlong get_long() const {
1373 const TypeLong* t = find_long_type();
1374 guarantee(t != nullptr, "must be con");
1375 return t->get_con();
1376 }
1377 jlong find_long_con(jint value_if_unknown) const {
1378 const TypeLong* t = find_long_type();
1379 return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
1380 }
1381 const TypeLong* find_long_type() const;
1382
1383 jlong get_integer_as_long(BasicType bt) const {
1384 const TypeInteger* t = find_integer_type(bt);
1385 guarantee(t != nullptr && t->is_con(), "must be con");
1386 return t->get_con_as_long(bt);
1387 }
1388 jlong find_integer_as_long(BasicType bt, jlong value_if_unknown) const {
1389 const TypeInteger* t = find_integer_type(bt);
1390 if (t == nullptr || !t->is_con()) return value_if_unknown;
1391 return t->get_con_as_long(bt);
1392 }
1393 const TypePtr* get_ptr_type() const;
1394
1395 // These guys are called by code generated by ADLC:
1396 intptr_t get_ptr() const;
1397 intptr_t get_narrowcon() const;
1398 jdouble getd() const;
1399 jfloat getf() const;
1400 jshort geth() const;
1401
1402 // Nodes which are pinned into basic blocks
1403 virtual bool pinned() const { return false; }
1404
1405 // Nodes which use memory without consuming it, hence need antidependences
1406 // More specifically, needs_anti_dependence_check returns true iff the node
1407 // (a) does a load, and (b) does not perform a store (except perhaps to a
1408 // stack slot or some other unaliased location).
1409 bool needs_anti_dependence_check() const;
1410
1411 // Return which operand this instruction may cisc-spill. In other words,
1412 // return operand position that can convert from reg to memory access
1413 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
1414 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
1415
1416 // Whether this is a memory-writing machine node.
1417 bool is_memory_writer() const { return is_Mach() && bottom_type()->has_memory(); }
1418
1419 // Whether this is a memory phi node
1420 bool is_memory_phi() const { return is_Phi() && bottom_type() == Type::MEMORY; }
1421
1422 bool is_div_or_mod(BasicType bt) const;
1423
1424 bool is_data_proj_of_pure_function(const Node* maybe_pure_function) const;
1425
1426 //----------------- Printing, etc
1427 #ifndef PRODUCT
1428 public:
1429 Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx.
1430 Node* find_ctrl(int idx); // Search control ancestors for the given idx.
1431 void dump_bfs(const int max_distance, Node* target, const char* options, outputStream* st, const frame* fr = nullptr) const;
1432 void dump_bfs(const int max_distance, Node* target, const char* options) const; // directly to tty
1433 void dump_bfs(const int max_distance) const; // dump_bfs(max_distance, nullptr, nullptr)
1434 void dump_bfs(const int max_distance, Node* target, const char* options, void* sp, void* fp, void* pc) const;
1435 class DumpConfig {
1436 public:
1437 // overridden to implement coloring of node idx
1438 virtual void pre_dump(outputStream *st, const Node* n) = 0;
1439 virtual void post_dump(outputStream *st) = 0;
1440 };
1441 void dump_idx(bool align = false, outputStream* st = tty, DumpConfig* dc = nullptr) const;
1442 void dump_name(outputStream* st = tty, DumpConfig* dc = nullptr) const;
1443 void dump() const; // print node with newline
1444 void dump(const char* suffix, bool mark = false, outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print this node.
1445 void dump(int depth) const; // Print this node, recursively to depth d
1446 void dump_ctrl(int depth) const; // Print control nodes, to depth d
1447 void dump_comp() const; // Print this node in compact representation.
1448 // Print this node in compact representation.
1449 void dump_comp(const char* suffix, outputStream *st = tty) const;
1450 private:
1451 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print required-edge info
1452 virtual void dump_prec(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print precedence-edge info
1453 virtual void dump_out(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print the output edge info
1454 public:
1455 virtual void dump_spec(outputStream *st) const {}; // Print per-node info
1456 // Print compact per-node info
1457 virtual void dump_compact_spec(outputStream *st) const { dump_spec(st); }
1458
1459 static void verify(int verify_depth, VectorSet& visited, Node_List& worklist);
1460
1461 // This call defines a class-unique string used to identify class instances
1462 virtual const char *Name() const;
1463
1464 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
1465 static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } // check if we are in a dump call
1466 #endif
1467 #ifdef ASSERT
1468 void verify_construction();
1469 bool verify_jvms(const JVMState* jvms) const;
1470
1471 Node* _debug_orig; // Original version of this, if any.
1472 Node* debug_orig() const { return _debug_orig; }
1473 void set_debug_orig(Node* orig); // _debug_orig = orig
1474 void dump_orig(outputStream *st, bool print_key = true) const;
1475
1476 uint64_t _debug_idx; // Unique value assigned to every node.
1477 uint64_t debug_idx() const { return _debug_idx; }
1478 void set_debug_idx(uint64_t debug_idx) { _debug_idx = debug_idx; }
1479
1480 int _hash_lock; // Barrier to modifications of nodes in the hash table
1481 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
1482 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
1483
1484 static void init_NodeProperty();
1485
1486 #if OPTO_DU_ITERATOR_ASSERT
1487 const Node* _last_del; // The last deleted node.
1488 uint _del_tick; // Bumped when a deletion happens..
1489 #endif
1490 #endif
1491 };
1492
1493 inline bool not_a_node(const Node* n) {
1494 if (n == nullptr) return true;
1495 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
1496 if (*(address*)n == badAddress) return true; // kill by Node::destruct
1497 return false;
1498 }
1499
1500 //-----------------------------------------------------------------------------
1501 // Iterators over DU info, and associated Node functions.
1502
1503 #if OPTO_DU_ITERATOR_ASSERT
1504
1505 // Common code for assertion checking on DU iterators.
1506 class DUIterator_Common {
1507 #ifdef ASSERT
1508 protected:
1509 bool _vdui; // cached value of VerifyDUIterators
1510 const Node* _node; // the node containing the _out array
1511 uint _outcnt; // cached node->_outcnt
1512 uint _del_tick; // cached node->_del_tick
1513 Node* _last; // last value produced by the iterator
1514
1515 void sample(const Node* node); // used by c'tor to set up for verifies
1516 void verify(const Node* node, bool at_end_ok = false);
1517 void verify_resync();
1518 void reset(const DUIterator_Common& that);
1519
1520 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
1521 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
1522 #else
1523 #define I_VDUI_ONLY(i,x) { }
1524 #endif //ASSERT
1525 };
1526
1527 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x)
1528
1529 // Default DU iterator. Allows appends onto the out array.
1530 // Allows deletion from the out array only at the current point.
1531 // Usage:
1532 // for (DUIterator i = x->outs(); x->has_out(i); i++) {
1533 // Node* y = x->out(i);
1534 // ...
1535 // }
1536 // Compiles in product mode to a unsigned integer index, which indexes
1537 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate
1538 // also reloads x->_outcnt. If you delete, you must perform "--i" just
1539 // before continuing the loop. You must delete only the last-produced
1540 // edge. You must delete only a single copy of the last-produced edge,
1541 // or else you must delete all copies at once (the first time the edge
1542 // is produced by the iterator).
1543 class DUIterator : public DUIterator_Common {
1544 friend class Node;
1545
1546 // This is the index which provides the product-mode behavior.
1547 // Whatever the product-mode version of the system does to the
1548 // DUI index is done to this index. All other fields in
1549 // this class are used only for assertion checking.
1550 uint _idx;
1551
1552 #ifdef ASSERT
1553 uint _refresh_tick; // Records the refresh activity.
1554
1555 void sample(const Node* node); // Initialize _refresh_tick etc.
1556 void verify(const Node* node, bool at_end_ok = false);
1557 void verify_increment(); // Verify an increment operation.
1558 void verify_resync(); // Verify that we can back up over a deletion.
1559 void verify_finish(); // Verify that the loop terminated properly.
1560 void refresh(); // Resample verification info.
1561 void reset(const DUIterator& that); // Resample after assignment.
1562 #endif
1563
1564 DUIterator(const Node* node, int dummy_to_avoid_conversion)
1565 { _idx = 0; DEBUG_ONLY(sample(node)); }
1566
1567 public:
1568 // initialize to garbage; clear _vdui to disable asserts
1569 DUIterator()
1570 { /*initialize to garbage*/ DEBUG_ONLY(_vdui = false); }
1571
1572 DUIterator(const DUIterator& that)
1573 { _idx = that._idx; DEBUG_ONLY(_vdui = false; reset(that)); }
1574
1575 void operator++(int dummy_to_specify_postfix_op)
1576 { _idx++; VDUI_ONLY(verify_increment()); }
1577
1578 void operator--()
1579 { VDUI_ONLY(verify_resync()); --_idx; }
1580
1581 ~DUIterator()
1582 { VDUI_ONLY(verify_finish()); }
1583
1584 void operator=(const DUIterator& that)
1585 { _idx = that._idx; DEBUG_ONLY(reset(that)); }
1586 };
1587
1588 DUIterator Node::outs() const
1589 { return DUIterator(this, 0); }
1590 DUIterator& Node::refresh_out_pos(DUIterator& i) const
1591 { I_VDUI_ONLY(i, i.refresh()); return i; }
1592 bool Node::has_out(DUIterator& i) const
1593 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
1594 Node* Node::out(DUIterator& i) const
1595 { I_VDUI_ONLY(i, i.verify(this)); return DEBUG_ONLY(i._last=) _out[i._idx]; }
1596
1597
1598 // Faster DU iterator. Disallows insertions into the out array.
1599 // Allows deletion from the out array only at the current point.
1600 // Usage:
1601 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
1602 // Node* y = x->fast_out(i);
1603 // ...
1604 // }
1605 // Compiles in product mode to raw Node** pointer arithmetic, with
1606 // no reloading of pointers from the original node x. If you delete,
1607 // you must perform "--i; --imax" just before continuing the loop.
1608 // If you delete multiple copies of the same edge, you must decrement
1609 // imax, but not i, multiple times: "--i, imax -= num_edges".
1610 class DUIterator_Fast : public DUIterator_Common {
1611 friend class Node;
1612 friend class DUIterator_Last;
1613
1614 // This is the pointer which provides the product-mode behavior.
1615 // Whatever the product-mode version of the system does to the
1616 // DUI pointer is done to this pointer. All other fields in
1617 // this class are used only for assertion checking.
1618 Node** _outp;
1619
1620 #ifdef ASSERT
1621 void verify(const Node* node, bool at_end_ok = false);
1622 void verify_limit();
1623 void verify_resync();
1624 void verify_relimit(uint n);
1625 void reset(const DUIterator_Fast& that);
1626 #endif
1627
1628 // Note: offset must be signed, since -1 is sometimes passed
1629 DUIterator_Fast(const Node* node, ptrdiff_t offset)
1630 { _outp = node->_out + offset; DEBUG_ONLY(sample(node)); }
1631
1632 public:
1633 // initialize to garbage; clear _vdui to disable asserts
1634 DUIterator_Fast()
1635 { /*initialize to garbage*/ DEBUG_ONLY(_vdui = false); }
1636
1637 DUIterator_Fast(const DUIterator_Fast& that)
1638 { _outp = that._outp; DEBUG_ONLY(_vdui = false; reset(that)); }
1639
1640 void operator++(int dummy_to_specify_postfix_op)
1641 { _outp++; VDUI_ONLY(verify(_node, true)); }
1642
1643 void operator--()
1644 { VDUI_ONLY(verify_resync()); --_outp; }
1645
1646 void operator-=(uint n) // applied to the limit only
1647 { _outp -= n; VDUI_ONLY(verify_relimit(n)); }
1648
1649 bool operator<(DUIterator_Fast& limit) {
1650 I_VDUI_ONLY(*this, this->verify(_node, true));
1651 I_VDUI_ONLY(limit, limit.verify_limit());
1652 return _outp < limit._outp;
1653 }
1654
1655 void operator=(const DUIterator_Fast& that)
1656 { _outp = that._outp; DEBUG_ONLY(reset(that)); }
1657 };
1658
1659 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
1660 // Assign a limit pointer to the reference argument:
1661 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
1662 // Return the base pointer:
1663 return DUIterator_Fast(this, 0);
1664 }
1665 Node* Node::fast_out(DUIterator_Fast& i) const {
1666 I_VDUI_ONLY(i, i.verify(this));
1667 return DEBUG_ONLY(i._last=) *i._outp;
1668 }
1669
1670
1671 // Faster DU iterator. Requires each successive edge to be removed.
1672 // Does not allow insertion of any edges.
1673 // Usage:
1674 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
1675 // Node* y = x->last_out(i);
1676 // ...
1677 // }
1678 // Compiles in product mode to raw Node** pointer arithmetic, with
1679 // no reloading of pointers from the original node x.
1680 class DUIterator_Last : private DUIterator_Fast {
1681 friend class Node;
1682
1683 #ifdef ASSERT
1684 void verify(const Node* node, bool at_end_ok = false);
1685 void verify_limit();
1686 void verify_step(uint num_edges);
1687 #endif
1688
1689 // Note: offset must be signed, since -1 is sometimes passed
1690 DUIterator_Last(const Node* node, ptrdiff_t offset)
1691 : DUIterator_Fast(node, offset) { }
1692
1693 void operator++(int dummy_to_specify_postfix_op) {} // do not use
1694 void operator<(int) {} // do not use
1695
1696 public:
1697 DUIterator_Last() { }
1698 // initialize to garbage
1699
1700 DUIterator_Last(const DUIterator_Last& that) = default;
1701
1702 void operator--()
1703 { _outp--; VDUI_ONLY(verify_step(1)); }
1704
1705 void operator-=(uint n)
1706 { _outp -= n; VDUI_ONLY(verify_step(n)); }
1707
1708 bool operator>=(DUIterator_Last& limit) {
1709 I_VDUI_ONLY(*this, this->verify(_node, true));
1710 I_VDUI_ONLY(limit, limit.verify_limit());
1711 return _outp >= limit._outp;
1712 }
1713
1714 DUIterator_Last& operator=(const DUIterator_Last& that) = default;
1715 };
1716
1717 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
1718 // Assign a limit pointer to the reference argument:
1719 imin = DUIterator_Last(this, 0);
1720 // Return the initial pointer:
1721 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
1722 }
1723 Node* Node::last_out(DUIterator_Last& i) const {
1724 I_VDUI_ONLY(i, i.verify(this));
1725 return DEBUG_ONLY(i._last=) *i._outp;
1726 }
1727
1728 #endif //OPTO_DU_ITERATOR_ASSERT
1729
1730 #undef I_VDUI_ONLY
1731 #undef VDUI_ONLY
1732
1733 // An Iterator that truly follows the iterator pattern. Doesn't
1734 // support deletion but could be made to.
1735 //
1736 // for (SimpleDUIterator i(n); i.has_next(); i.next()) {
1737 // Node* m = i.get();
1738 //
1739 class SimpleDUIterator : public StackObj {
1740 private:
1741 Node* node;
1742 DUIterator_Fast imax;
1743 DUIterator_Fast i;
1744 public:
1745 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
1746 bool has_next() { return i < imax; }
1747 void next() { i++; }
1748 Node* get() { return node->fast_out(i); }
1749 };
1750
1751
1752 //-----------------------------------------------------------------------------
1753 // Map dense integer indices to Nodes. Uses classic doubling-array trick.
1754 // Abstractly provides an infinite array of Node*'s, initialized to null.
1755 // Note that the constructor just zeros things, and since I use Arena
1756 // allocation I do not need a destructor to reclaim storage.
1757 class Node_Array : public AnyObj {
1758 protected:
1759 Arena* _a; // Arena to allocate in
1760 uint _max;
1761 Node** _nodes;
1762 ReallocMark _nesting; // Safety checks for arena reallocation
1763
1764 // Grow array to required capacity
1765 void maybe_grow(uint i) {
1766 _nesting.check(_a); // Check if a potential reallocation in the arena is safe
1767 if (i >= _max) {
1768 grow(i);
1769 }
1770 }
1771 void grow(uint i);
1772
1773 public:
1774 Node_Array(Arena* a, uint max = OptoNodeListSize) : _a(a), _max(max) {
1775 _nodes = NEW_ARENA_ARRAY(a, Node*, max);
1776 clear();
1777 }
1778 Node_Array() : Node_Array(Thread::current()->resource_area()) {}
1779
1780 NONCOPYABLE(Node_Array);
1781 Node_Array& operator=(Node_Array&&) = delete;
1782 // Allow move constructor for && (eg. capture return of function)
1783 Node_Array(Node_Array&&) = default;
1784
1785 Node *operator[] ( uint i ) const // Lookup, or null for not mapped
1786 { return (i<_max) ? _nodes[i] : (Node*)nullptr; }
1787 Node* at(uint i) const { assert(i<_max,"oob"); return _nodes[i]; }
1788 Node** adr() { return _nodes; }
1789 // Extend the mapping: index i maps to Node *n.
1790 void map( uint i, Node *n ) { maybe_grow(i); _nodes[i] = n; }
1791 void insert( uint i, Node *n );
1792 void remove( uint i ); // Remove, preserving order
1793 // Clear all entries in _nodes to null but keep storage
1794 void clear() {
1795 Copy::zero_to_bytes(_nodes, _max * sizeof(Node*));
1796 }
1797
1798 uint max() const { return _max; }
1799 void dump() const;
1800 };
1801
1802 class Node_List : public Node_Array {
1803 uint _cnt;
1804 public:
1805 Node_List(uint max = OptoNodeListSize) : Node_Array(Thread::current()->resource_area(), max), _cnt(0) {}
1806 Node_List(Arena *a, uint max = OptoNodeListSize) : Node_Array(a, max), _cnt(0) {}
1807
1808 NONCOPYABLE(Node_List);
1809 Node_List& operator=(Node_List&&) = delete;
1810 // Allow move constructor for && (eg. capture return of function)
1811 Node_List(Node_List&&) = default;
1812
1813 bool contains(const Node* n) const {
1814 for (uint e = 0; e < size(); e++) {
1815 if (at(e) == n) return true;
1816 }
1817 return false;
1818 }
1819 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
1820 void remove( uint i ) { Node_Array::remove(i); _cnt--; }
1821 void push( Node *b ) { map(_cnt++,b); }
1822 void yank( Node *n ); // Find and remove
1823 Node *pop() { return _nodes[--_cnt]; }
1824 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
1825 void copy(const Node_List& from) {
1826 if (from._max > _max) {
1827 grow(from._max);
1828 }
1829 _cnt = from._cnt;
1830 Copy::conjoint_words_to_higher((HeapWord*)&from._nodes[0], (HeapWord*)&_nodes[0], from._max * sizeof(Node*));
1831 }
1832
1833 uint size() const { return _cnt; }
1834 void dump() const;
1835 void dump_simple() const;
1836 };
1837
1838 // Definition must appear after complete type definition of Node_List
1839 template <typename Callback, typename Check>
1840 void Node::visit_uses(Callback callback, Check is_boundary) const {
1841 ResourceMark rm;
1842 VectorSet visited;
1843 Node_List worklist;
1844
1845 // The initial worklist consists of the direct uses
1846 for (DUIterator_Fast kmax, k = fast_outs(kmax); k < kmax; k++) {
1847 Node* out = fast_out(k);
1848 if (!visited.test_set(out->_idx)) { worklist.push(out); }
1849 }
1850
1851 while (worklist.size() > 0) {
1852 Node* use = worklist.pop();
1853 // Apply callback on boundary nodes
1854 if (is_boundary(use)) {
1855 callback(use);
1856 } else {
1857 // Not a boundary node, continue search
1858 for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) {
1859 Node* out = use->fast_out(k);
1860 if (!visited.test_set(out->_idx)) { worklist.push(out); }
1861 }
1862 }
1863 }
1864 }
1865
1866
1867 //------------------------------Unique_Node_List-------------------------------
1868 class Unique_Node_List : public Node_List {
1869 VectorSet _in_worklist;
1870 uint _clock_index; // Index in list where to pop from next
1871 public:
1872 Unique_Node_List() : Node_List(), _clock_index(0) {}
1873 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
1874
1875 NONCOPYABLE(Unique_Node_List);
1876 Unique_Node_List& operator=(Unique_Node_List&&) = delete;
1877 // Allow move constructor for && (eg. capture return of function)
1878 Unique_Node_List(Unique_Node_List&&) = default;
1879
1880 void remove( Node *n );
1881 bool member(const Node* n) const { return _in_worklist.test(n->_idx) != 0; }
1882 VectorSet& member_set(){ return _in_worklist; }
1883
1884 void push(Node* b) {
1885 if( !_in_worklist.test_set(b->_idx) )
1886 Node_List::push(b);
1887 }
1888 void push_non_cfg_inputs_of(const Node* node) {
1889 for (uint i = 1; i < node->req(); i++) {
1890 Node* input = node->in(i);
1891 if (input != nullptr && !input->is_CFG()) {
1892 push(input);
1893 }
1894 }
1895 }
1896
1897 void push_outputs_of(const Node* node) {
1898 for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) {
1899 Node* output = node->fast_out(i);
1900 push(output);
1901 }
1902 }
1903
1904 Node *pop() {
1905 if( _clock_index >= size() ) _clock_index = 0;
1906 Node *b = at(_clock_index);
1907 map( _clock_index, Node_List::pop());
1908 if (size() != 0) _clock_index++; // Always start from 0
1909 _in_worklist.remove(b->_idx);
1910 return b;
1911 }
1912 Node *remove(uint i) {
1913 Node *b = Node_List::at(i);
1914 _in_worklist.remove(b->_idx);
1915 map(i,Node_List::pop());
1916 return b;
1917 }
1918 void yank(Node *n) {
1919 _in_worklist.remove(n->_idx);
1920 Node_List::yank(n);
1921 }
1922 void clear() {
1923 _in_worklist.clear(); // Discards storage but grows automatically
1924 Node_List::clear();
1925 _clock_index = 0;
1926 }
1927 void ensure_empty() {
1928 assert(size() == 0, "must be empty");
1929 clear(); // just in case
1930 }
1931
1932 // Used after parsing to remove useless nodes before Iterative GVN
1933 void remove_useless_nodes(VectorSet& useful);
1934
1935 // If the idx of the Nodes change, we must recompute the VectorSet
1936 void recompute_idx_set() {
1937 _in_worklist.clear();
1938 for (uint i = 0; i < size(); i++) {
1939 Node* n = at(i);
1940 _in_worklist.set(n->_idx);
1941 }
1942 }
1943
1944 #ifdef ASSERT
1945 bool is_subset_of(Unique_Node_List& other) {
1946 for (uint i = 0; i < size(); i++) {
1947 Node* n = at(i);
1948 if (!other.member(n)) {
1949 return false;
1950 }
1951 }
1952 return true;
1953 }
1954 #endif
1955
1956 bool contains(const Node* n) const {
1957 fatal("use faster member() instead");
1958 return false;
1959 }
1960
1961 #ifndef PRODUCT
1962 void print_set() const { _in_worklist.print(); }
1963 #endif
1964 };
1965
1966 // Unique_Mixed_Node_List
1967 // unique: nodes are added only once
1968 // mixed: allow new and old nodes
1969 class Unique_Mixed_Node_List : public ResourceObj {
1970 public:
1971 Unique_Mixed_Node_List() : _visited_set(cmpkey, hashkey) {}
1972
1973 void add(Node* node) {
1974 if (not_a_node(node)) {
1975 return; // Gracefully handle null, -1, 0xabababab, etc.
1976 }
1977 if (_visited_set[node] == nullptr) {
1978 _visited_set.Insert(node, node);
1979 _worklist.push(node);
1980 }
1981 }
1982
1983 Node* operator[] (uint i) const {
1984 return _worklist[i];
1985 }
1986
1987 size_t size() {
1988 return _worklist.size();
1989 }
1990
1991 private:
1992 Dict _visited_set;
1993 Node_List _worklist;
1994 };
1995
1996 // Inline definition of Compile::record_for_igvn must be deferred to this point.
1997 inline void Compile::record_for_igvn(Node* n) {
1998 _igvn_worklist->push(n);
1999 }
2000
2001 // Inline definition of Compile::remove_for_igvn must be deferred to this point.
2002 inline void Compile::remove_for_igvn(Node* n) {
2003 _igvn_worklist->remove(n);
2004 }
2005
2006 //------------------------------Node_Stack-------------------------------------
2007 class Node_Stack {
2008 protected:
2009 struct INode {
2010 Node *node; // Processed node
2011 uint indx; // Index of next node's child
2012 };
2013 INode *_inode_top; // tos, stack grows up
2014 INode *_inode_max; // End of _inodes == _inodes + _max
2015 INode *_inodes; // Array storage for the stack
2016 Arena *_a; // Arena to allocate in
2017 ReallocMark _nesting; // Safety checks for arena reallocation
2018
2019 void maybe_grow() {
2020 _nesting.check(_a); // Check if a potential reallocation in the arena is safe
2021 if (_inode_top >= _inode_max) {
2022 grow();
2023 }
2024 }
2025 void grow();
2026
2027 public:
2028 Node_Stack(int size) {
2029 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
2030 _a = Thread::current()->resource_area();
2031 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
2032 _inode_max = _inodes + max;
2033 _inode_top = _inodes - 1; // stack is empty
2034 }
2035
2036 Node_Stack(Arena *a, int size) : _a(a) {
2037 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
2038 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
2039 _inode_max = _inodes + max;
2040 _inode_top = _inodes - 1; // stack is empty
2041 }
2042
2043 void pop() {
2044 assert(_inode_top >= _inodes, "node stack underflow");
2045 --_inode_top;
2046 }
2047 void push(Node *n, uint i) {
2048 ++_inode_top;
2049 maybe_grow();
2050 INode *top = _inode_top; // optimization
2051 top->node = n;
2052 top->indx = i;
2053 }
2054 Node *node() const {
2055 return _inode_top->node;
2056 }
2057 Node* node_at(uint i) const {
2058 assert(_inodes + i <= _inode_top, "in range");
2059 return _inodes[i].node;
2060 }
2061 uint index() const {
2062 return _inode_top->indx;
2063 }
2064 uint index_at(uint i) const {
2065 assert(_inodes + i <= _inode_top, "in range");
2066 return _inodes[i].indx;
2067 }
2068 void set_node(Node *n) {
2069 _inode_top->node = n;
2070 }
2071 void set_index(uint i) {
2072 _inode_top->indx = i;
2073 }
2074 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
2075 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size
2076 bool is_nonempty() const { return (_inode_top >= _inodes); }
2077 bool is_empty() const { return (_inode_top < _inodes); }
2078 void clear() { _inode_top = _inodes - 1; } // retain storage
2079
2080 // Node_Stack is used to map nodes.
2081 Node* find(uint idx) const;
2082
2083 NONCOPYABLE(Node_Stack);
2084 };
2085
2086
2087 //-----------------------------Node_Notes--------------------------------------
2088 // Debugging or profiling annotations loosely and sparsely associated
2089 // with some nodes. See Compile::node_notes_at for the accessor.
2090 class Node_Notes {
2091 JVMState* _jvms;
2092
2093 public:
2094 Node_Notes(JVMState* jvms = nullptr) {
2095 _jvms = jvms;
2096 }
2097
2098 JVMState* jvms() { return _jvms; }
2099 void set_jvms(JVMState* x) { _jvms = x; }
2100
2101 // True if there is nothing here.
2102 bool is_clear() {
2103 return (_jvms == nullptr);
2104 }
2105
2106 // Make there be nothing here.
2107 void clear() {
2108 _jvms = nullptr;
2109 }
2110
2111 // Make a new, clean node notes.
2112 static Node_Notes* make(Compile* C) {
2113 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
2114 nn->clear();
2115 return nn;
2116 }
2117
2118 Node_Notes* clone(Compile* C) {
2119 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
2120 (*nn) = (*this);
2121 return nn;
2122 }
2123
2124 // Absorb any information from source.
2125 bool update_from(Node_Notes* source) {
2126 bool changed = false;
2127 if (source != nullptr) {
2128 if (source->jvms() != nullptr) {
2129 set_jvms(source->jvms());
2130 changed = true;
2131 }
2132 }
2133 return changed;
2134 }
2135 };
2136
2137 // Inlined accessors for Compile::node_nodes that require the preceding class:
2138 inline Node_Notes*
2139 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
2140 int idx, bool can_grow) {
2141 assert(idx >= 0, "oob");
2142 int block_idx = (idx >> _log2_node_notes_block_size);
2143 int grow_by = (block_idx - (arr == nullptr? 0: arr->length()));
2144 if (grow_by >= 0) {
2145 if (!can_grow) return nullptr;
2146 grow_node_notes(arr, grow_by + 1);
2147 }
2148 if (arr == nullptr) return nullptr;
2149 // (Every element of arr is a sub-array of length _node_notes_block_size.)
2150 return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
2151 }
2152
2153 inline Node_Notes* Compile::node_notes_at(int idx) {
2154 return locate_node_notes(_node_note_array, idx, false);
2155 }
2156
2157 inline bool
2158 Compile::set_node_notes_at(int idx, Node_Notes* value) {
2159 if (value == nullptr || value->is_clear())
2160 return false; // nothing to write => write nothing
2161 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
2162 assert(loc != nullptr, "");
2163 return loc->update_from(value);
2164 }
2165
2166
2167 //------------------------------TypeNode---------------------------------------
2168 // Node with a Type constant.
2169 class TypeNode : public Node {
2170 protected:
2171 virtual uint hash() const; // Check the type
2172 virtual bool cmp( const Node &n ) const;
2173 virtual uint size_of() const; // Size is bigger
2174 const Type* const _type;
2175 public:
2176 void set_type(const Type* t) {
2177 assert(t != nullptr, "sanity");
2178 DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
2179 *(const Type**)&_type = t; // cast away const-ness
2180 // If this node is in the hash table, make sure it doesn't need a rehash.
2181 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
2182 }
2183 const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
2184 TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
2185 init_class_id(Class_Type);
2186 }
2187 virtual const Type* Value(PhaseGVN* phase) const;
2188 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
2189 virtual const Type *bottom_type() const;
2190 virtual uint ideal_reg() const;
2191
2192 void make_path_dead(PhaseIterGVN* igvn, PhaseIdealLoop* loop, Node* ctrl_use, uint j, const char* phase_str);
2193 #ifndef PRODUCT
2194 virtual void dump_spec(outputStream *st) const;
2195 virtual void dump_compact_spec(outputStream *st) const;
2196 #endif
2197 void make_paths_from_here_dead(PhaseIterGVN* igvn, PhaseIdealLoop* loop, const char* phase_str);
2198 void create_halt_path(PhaseIterGVN* igvn, Node* c, PhaseIdealLoop* loop, const char* phase_str) const;
2199 };
2200
2201 #include "opto/opcodes.hpp"
2202
2203 #define Op_IL(op) \
2204 inline int Op_ ## op(BasicType bt) { \
2205 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); \
2206 if (bt == T_INT) { \
2207 return Op_## op ## I; \
2208 } \
2209 return Op_## op ## L; \
2210 }
2211
2212 Op_IL(Add)
2213 Op_IL(And)
2214 Op_IL(Sub)
2215 Op_IL(Mul)
2216 Op_IL(URShift)
2217 Op_IL(LShift)
2218 Op_IL(RShift)
2219 Op_IL(Xor)
2220 Op_IL(Cmp)
2221 Op_IL(Div)
2222 Op_IL(Mod)
2223 Op_IL(UDiv)
2224 Op_IL(UMod)
2225
2226 inline int Op_ConIL(BasicType bt) {
2227 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2228 if (bt == T_INT) {
2229 return Op_ConI;
2230 }
2231 return Op_ConL;
2232 }
2233
2234 inline int Op_Cmp_unsigned(BasicType bt) {
2235 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2236 if (bt == T_INT) {
2237 return Op_CmpU;
2238 }
2239 return Op_CmpUL;
2240 }
2241
2242 inline int Op_Cast(BasicType bt) {
2243 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2244 if (bt == T_INT) {
2245 return Op_CastII;
2246 }
2247 return Op_CastLL;
2248 }
2249
2250 inline int Op_DivIL(BasicType bt, bool is_unsigned) {
2251 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2252 if (bt == T_INT) {
2253 if (is_unsigned) {
2254 return Op_UDivI;
2255 } else {
2256 return Op_DivI;
2257 }
2258 }
2259 if (is_unsigned) {
2260 return Op_UDivL;
2261 } else {
2262 return Op_DivL;
2263 }
2264 }
2265
2266 inline int Op_DivModIL(BasicType bt, bool is_unsigned) {
2267 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2268 if (bt == T_INT) {
2269 if (is_unsigned) {
2270 return Op_UDivModI;
2271 } else {
2272 return Op_DivModI;
2273 }
2274 }
2275 if (is_unsigned) {
2276 return Op_UDivModL;
2277 } else {
2278 return Op_DivModL;
2279 }
2280 }
2281
2282 // Interface to define actions that should be taken when running DataNodeBFS. Each use can extend this class to specify
2283 // a customized BFS.
2284 class BFSActions : public StackObj {
2285 public:
2286 // Should a node's inputs further be visited in the BFS traversal? By default, we visit all data inputs. Override this
2287 // method to provide a custom filter.
2288 virtual bool should_visit(Node* node) const {
2289 // By default, visit all inputs.
2290 return true;
2291 };
2292
2293 // Is the visited node a target node that we are looking for in the BFS traversal? We do not visit its inputs further
2294 // but the BFS will continue to visit all unvisited nodes in the queue.
2295 virtual bool is_target_node(Node* node) const = 0;
2296
2297 // Defines an action that should be taken when we visit a target node in the BFS traversal.
2298 // To give more freedom, we pass the direct child node to the target node such that
2299 // child->in(i) == target node. This allows to also directly replace the target node instead
2300 // of only updating its inputs.
2301 virtual void target_node_action(Node* child, uint i) = 0;
2302 };
2303
2304 // Class to perform a BFS traversal on the data nodes from a given start node. The provided BFSActions guide which
2305 // data node's inputs should be further visited, which data nodes are target nodes and what to do with the target nodes.
2306 class DataNodeBFS : public StackObj {
2307 BFSActions& _bfs_actions;
2308
2309 public:
2310 explicit DataNodeBFS(BFSActions& bfs_action) : _bfs_actions(bfs_action) {}
2311
2312 // Run the BFS starting from 'start_node' and apply the actions provided to this class.
2313 void run(Node* start_node) {
2314 ResourceMark rm;
2315 Unique_Node_List _nodes_to_visit;
2316 _nodes_to_visit.push(start_node);
2317 for (uint i = 0; i < _nodes_to_visit.size(); i++) {
2318 Node* next = _nodes_to_visit[i];
2319 for (uint j = 1; j < next->req(); j++) {
2320 Node* input = next->in(j);
2321 if (_bfs_actions.is_target_node(input)) {
2322 assert(_bfs_actions.should_visit(input), "must also pass node filter");
2323 _bfs_actions.target_node_action(next, j);
2324 } else if (_bfs_actions.should_visit(input)) {
2325 _nodes_to_visit.push(input);
2326 }
2327 }
2328 }
2329 }
2330 };
2331
2332 #endif // SHARE_OPTO_NODE_HPP