1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2024, 2025, Alibaba Group Holding Limited. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_OPTO_NODE_HPP
27 #define SHARE_OPTO_NODE_HPP
28
29 #include "libadt/vectset.hpp"
30 #include "opto/compile.hpp"
31 #include "opto/type.hpp"
32 #include "utilities/copy.hpp"
33
34 // Portions of code courtesy of Clifford Click
35
36 // Optimization - Graph Style
37
38
39 class AbstractLockNode;
40 class AddNode;
41 class AddPNode;
42 class AliasInfo;
43 class AllocateArrayNode;
44 class AllocateNode;
45 class ArrayCopyNode;
46 class BaseCountedLoopNode;
47 class BaseCountedLoopEndNode;
48 class BlackholeNode;
49 class Block;
50 class BoolNode;
51 class BoxLockNode;
52 class CMoveNode;
53 class CallDynamicJavaNode;
54 class CallJavaNode;
55 class CallLeafNode;
56 class CallLeafNoFPNode;
57 class CallLeafPureNode;
58 class CallNode;
59 class CallRuntimeNode;
60 class CallStaticJavaNode;
61 class CastFFNode;
62 class CastHHNode;
63 class CastDDNode;
64 class CastVVNode;
65 class CastIINode;
66 class CastLLNode;
67 class CastPPNode;
68 class CatchNode;
69 class CatchProjNode;
70 class CheckCastPPNode;
71 class ClearArrayNode;
72 class CmpNode;
73 class CodeBuffer;
74 class ConstraintCastNode;
75 class ConNode;
76 class ConINode;
77 class ConvertNode;
78 class CompareAndSwapNode;
79 class CompareAndExchangeNode;
80 class CountedLoopNode;
81 class CountedLoopEndNode;
82 class DecodeNarrowPtrNode;
83 class DecodeNNode;
84 class DecodeNKlassNode;
85 class EncodeNarrowPtrNode;
86 class EncodePNode;
87 class EncodePKlassNode;
88 class FastLockNode;
89 class FastUnlockNode;
90 class FlatArrayCheckNode;
91 class HaltNode;
92 class IfNode;
93 class IfProjNode;
94 class IfFalseNode;
95 class IfTrueNode;
96 class InitializeNode;
97 class JVMState;
98 class JumpNode;
99 class JumpProjNode;
100 class LoadNode;
101 class LoadStoreNode;
102 class LoadStoreConditionalNode;
103 class LockNode;
104 class LongCountedLoopNode;
105 class LongCountedLoopEndNode;
106 class LoopNode;
107 class LShiftNode;
108 class MachBranchNode;
109 class MachCallDynamicJavaNode;
110 class MachCallJavaNode;
111 class MachCallLeafNode;
112 class MachCallNode;
113 class MachCallRuntimeNode;
114 class MachCallStaticJavaNode;
115 class MachConstantBaseNode;
116 class MachConstantNode;
117 class MachGotoNode;
118 class MachIfNode;
119 class MachJumpNode;
120 class MachNode;
121 class MachNullCheckNode;
122 class MachProjNode;
123 class MachPrologNode;
124 class MachReturnNode;
125 class MachSafePointNode;
126 class MachSpillCopyNode;
127 class MachTempNode;
128 class MachMergeNode;
129 class MachMemBarNode;
130 class MachVEPNode;
131 class Matcher;
132 class MemBarNode;
133 class MemBarStoreStoreNode;
134 class MemNode;
135 class MergeMemNode;
136 class MoveNode;
137 class MulNode;
138 class MultiNode;
139 class MultiBranchNode;
140 class NegNode;
141 class NegVNode;
142 class NeverBranchNode;
143 class Opaque1Node;
144 class OpaqueLoopInitNode;
145 class OpaqueLoopStrideNode;
146 class OpaqueMultiversioningNode;
147 class OpaqueNotNullNode;
148 class OpaqueInitializedAssertionPredicateNode;
149 class OpaqueTemplateAssertionPredicateNode;
150 class OuterStripMinedLoopNode;
151 class OuterStripMinedLoopEndNode;
152 class Node;
153 class Node_Array;
154 class Node_List;
155 class Node_Stack;
156 class OopMap;
157 class ParmNode;
158 class ParsePredicateNode;
159 class PCTableNode;
160 class PhaseCCP;
161 class PhaseGVN;
162 class PhaseIdealLoop;
163 class PhaseIterGVN;
164 class PhaseRegAlloc;
165 class PhaseTransform;
166 class PhaseValues;
167 class PhiNode;
168 class Pipeline;
169 class PopulateIndexNode;
170 class ProjNode;
171 class RangeCheckNode;
172 class ReductionNode;
173 class RegMask;
174 class RegionNode;
175 class RootNode;
176 class SafePointNode;
177 class SafePointScalarObjectNode;
178 class SafePointScalarMergeNode;
179 class SaturatingVectorNode;
180 class StartNode;
181 class State;
182 class StoreNode;
183 class SubNode;
184 class SubTypeCheckNode;
185 class Type;
186 class TypeNode;
187 class UnlockNode;
188 class InlineTypeNode;
189 class LoadFlatNode;
190 class StoreFlatNode;
191 class VectorNode;
192 class LoadVectorNode;
193 class LoadVectorMaskedNode;
194 class StoreVectorMaskedNode;
195 class LoadVectorGatherNode;
196 class LoadVectorGatherMaskedNode;
197 class StoreVectorNode;
198 class StoreVectorScatterNode;
199 class StoreVectorScatterMaskedNode;
200 class VerifyVectorAlignmentNode;
201 class VectorMaskCmpNode;
202 class VectorUnboxNode;
203 class VectorSet;
204 class VectorReinterpretNode;
205 class ShiftVNode;
206 class MulVLNode;
207 class ExpandVNode;
208 class CompressVNode;
209 class CompressMNode;
210 class C2_MacroAssembler;
211
212
213 #ifndef OPTO_DU_ITERATOR_ASSERT
214 #ifdef ASSERT
215 #define OPTO_DU_ITERATOR_ASSERT 1
216 #else
217 #define OPTO_DU_ITERATOR_ASSERT 0
218 #endif
219 #endif //OPTO_DU_ITERATOR_ASSERT
220
221 #if OPTO_DU_ITERATOR_ASSERT
222 class DUIterator;
223 class DUIterator_Fast;
224 class DUIterator_Last;
225 #else
226 typedef uint DUIterator;
227 typedef Node** DUIterator_Fast;
228 typedef Node** DUIterator_Last;
229 #endif
230
231 typedef ResizeableHashTable<Node*, Node*, AnyObj::RESOURCE_AREA, mtCompiler> OrigToNewHashtable;
232
233 // Node Sentinel
234 #define NodeSentinel (Node*)-1
235
236 // Unknown count frequency
237 #define COUNT_UNKNOWN (-1.0f)
238
239 //------------------------------Node-------------------------------------------
240 // Nodes define actions in the program. They create values, which have types.
241 // They are both vertices in a directed graph and program primitives. Nodes
242 // are labeled; the label is the "opcode", the primitive function in the lambda
243 // calculus sense that gives meaning to the Node. Node inputs are ordered (so
244 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to
245 // the Node's function. These inputs also define a Type equation for the Node.
246 // Solving these Type equations amounts to doing dataflow analysis.
247 // Control and data are uniformly represented in the graph. Finally, Nodes
248 // have a unique dense integer index which is used to index into side arrays
249 // whenever I have phase-specific information.
250
251 class Node {
252
253 // Lots of restrictions on cloning Nodes
254 NONCOPYABLE(Node);
255
256 public:
257 friend class Compile;
258 #if OPTO_DU_ITERATOR_ASSERT
259 friend class DUIterator_Common;
260 friend class DUIterator;
261 friend class DUIterator_Fast;
262 friend class DUIterator_Last;
263 #endif
264
265 // Because Nodes come and go, I define an Arena of Node structures to pull
266 // from. This should allow fast access to node creation & deletion. This
267 // field is a local cache of a value defined in some "program fragment" for
268 // which these Nodes are just a part of.
269
270 inline void* operator new(size_t x) throw() {
271 Compile* C = Compile::current();
272 Node* n = (Node*)C->node_arena()->AmallocWords(x);
273 return (void*)n;
274 }
275
276 // Delete is a NOP
277 void operator delete( void *ptr ) {}
278 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
279 void destruct(PhaseValues* phase);
280
281 // Create a new Node. Required is the number is of inputs required for
282 // semantic correctness.
283 Node( uint required );
284
285 // Create a new Node with given input edges.
286 // This version requires use of the "edge-count" new.
287 // E.g. new (C,3) FooNode( C, nullptr, left, right );
288 Node( Node *n0 );
289 Node( Node *n0, Node *n1 );
290 Node( Node *n0, Node *n1, Node *n2 );
291 Node( Node *n0, Node *n1, Node *n2, Node *n3 );
292 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
293 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
294 Node( Node *n0, Node *n1, Node *n2, Node *n3,
295 Node *n4, Node *n5, Node *n6 );
296
297 // Clone an inherited Node given only the base Node type.
298 Node* clone() const;
299
300 // Clone a Node, immediately supplying one or two new edges.
301 // The first and second arguments, if non-null, replace in(1) and in(2),
302 // respectively.
303 Node* clone_with_data_edge(Node* in1, Node* in2 = nullptr) const {
304 Node* nn = clone();
305 if (in1 != nullptr) nn->set_req(1, in1);
306 if (in2 != nullptr) nn->set_req(2, in2);
307 return nn;
308 }
309
310 private:
311 // Shared setup for the above constructors.
312 // Handles all interactions with Compile::current.
313 // Puts initial values in all Node fields except _idx.
314 // Returns the initial value for _idx, which cannot
315 // be initialized by assignment.
316 inline int Init(int req);
317
318 //----------------- input edge handling
319 protected:
320 friend class PhaseCFG; // Access to address of _in array elements
321 Node **_in; // Array of use-def references to Nodes
322 Node **_out; // Array of def-use references to Nodes
323
324 // Input edges are split into two categories. Required edges are required
325 // for semantic correctness; order is important and nulls are allowed.
326 // Precedence edges are used to help determine execution order and are
327 // added, e.g., for scheduling purposes. They are unordered and not
328 // duplicated; they have no embedded nulls. Edges from 0 to _cnt-1
329 // are required, from _cnt to _max-1 are precedence edges.
330 node_idx_t _cnt; // Total number of required Node inputs.
331
332 node_idx_t _max; // Actual length of input array.
333
334 // Output edges are an unordered list of def-use edges which exactly
335 // correspond to required input edges which point from other nodes
336 // to this one. Thus the count of the output edges is the number of
337 // users of this node.
338 node_idx_t _outcnt; // Total number of Node outputs.
339
340 node_idx_t _outmax; // Actual length of output array.
341
342 // Grow the actual input array to the next larger power-of-2 bigger than len.
343 void grow( uint len );
344 // Grow the output array to the next larger power-of-2 bigger than len.
345 void out_grow( uint len );
346 // Resize input or output array to grow it to the next larger power-of-2
347 // bigger than len.
348 void resize_array(Node**& array, node_idx_t& max_size, uint len, bool needs_clearing);
349
350 public:
351 // Each Node is assigned a unique small/dense number. This number is used
352 // to index into auxiliary arrays of data and bit vectors.
353 // The value of _idx can be changed using the set_idx() method.
354 //
355 // The PhaseRenumberLive phase renumbers nodes based on liveness information.
356 // Therefore, it updates the value of the _idx field. The parse-time _idx is
357 // preserved in _parse_idx.
358 node_idx_t _idx;
359 DEBUG_ONLY(const node_idx_t _parse_idx;)
360 // IGV node identifier. Two nodes, possibly in different compilation phases,
361 // have the same IGV identifier if (and only if) they are the very same node
362 // (same memory address) or one is "derived" from the other (by e.g.
363 // renumbering or matching). This identifier makes it possible to follow the
364 // entire lifetime of a node in IGV even if its C2 identifier (_idx) changes.
365 NOT_PRODUCT(node_idx_t _igv_idx;)
366
367 // Get the (read-only) number of input edges
368 uint req() const { return _cnt; }
369 uint len() const { return _max; }
370 // Get the (read-only) number of output edges
371 uint outcnt() const { return _outcnt; }
372
373 #if OPTO_DU_ITERATOR_ASSERT
374 // Iterate over the out-edges of this node. Deletions are illegal.
375 inline DUIterator outs() const;
376 // Use this when the out array might have changed to suppress asserts.
377 inline DUIterator& refresh_out_pos(DUIterator& i) const;
378 // Does the node have an out at this position? (Used for iteration.)
379 inline bool has_out(DUIterator& i) const;
380 inline Node* out(DUIterator& i) const;
381 // Iterate over the out-edges of this node. All changes are illegal.
382 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
383 inline Node* fast_out(DUIterator_Fast& i) const;
384 // Iterate over the out-edges of this node, deleting one at a time.
385 inline DUIterator_Last last_outs(DUIterator_Last& min) const;
386 inline Node* last_out(DUIterator_Last& i) const;
387 // The inline bodies of all these methods are after the iterator definitions.
388 #else
389 // Iterate over the out-edges of this node. Deletions are illegal.
390 // This iteration uses integral indexes, to decouple from array reallocations.
391 DUIterator outs() const { return 0; }
392 // Use this when the out array might have changed to suppress asserts.
393 DUIterator refresh_out_pos(DUIterator i) const { return i; }
394
395 // Reference to the i'th output Node. Error if out of bounds.
396 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
397 // Does the node have an out at this position? (Used for iteration.)
398 bool has_out(DUIterator i) const { return i < _outcnt; }
399
400 // Iterate over the out-edges of this node. All changes are illegal.
401 // This iteration uses a pointer internal to the out array.
402 DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
403 Node** out = _out;
404 // Assign a limit pointer to the reference argument:
405 max = out + (ptrdiff_t)_outcnt;
406 // Return the base pointer:
407 return out;
408 }
409 Node* fast_out(DUIterator_Fast i) const { return *i; }
410 // Iterate over the out-edges of this node, deleting one at a time.
411 // This iteration uses a pointer internal to the out array.
412 DUIterator_Last last_outs(DUIterator_Last& min) const {
413 Node** out = _out;
414 // Assign a limit pointer to the reference argument:
415 min = out;
416 // Return the pointer to the start of the iteration:
417 return out + (ptrdiff_t)_outcnt - 1;
418 }
419 Node* last_out(DUIterator_Last i) const { return *i; }
420 #endif
421
422 // Reference to the i'th input Node. Error if out of bounds.
423 Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; }
424 // Reference to the i'th input Node. null if out of bounds.
425 Node* lookup(uint i) const { return ((i < _max) ? _in[i] : nullptr); }
426 // Reference to the i'th output Node. Error if out of bounds.
427 // Use this accessor sparingly. We are going trying to use iterators instead.
428 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
429 // Return the unique out edge.
430 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
431 // Delete out edge at position 'i' by moving last out edge to position 'i'
432 void raw_del_out(uint i) {
433 assert(i < _outcnt,"oob");
434 assert(_outcnt > 0,"oob");
435 #if OPTO_DU_ITERATOR_ASSERT
436 // Record that a change happened here.
437 DEBUG_ONLY(_last_del = _out[i]; ++_del_tick);
438 #endif
439 _out[i] = _out[--_outcnt];
440 // Smash the old edge so it can't be used accidentally.
441 DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
442 }
443
444 #ifdef ASSERT
445 bool is_dead() const;
446 static bool is_not_dead(const Node* n);
447 bool is_reachable_from_root() const;
448 #endif
449 // Check whether node has become unreachable
450 bool is_unreachable(PhaseIterGVN &igvn) const;
451
452 // Set a required input edge, also updates corresponding output edge
453 void add_req( Node *n ); // Append a NEW required input
454 void add_req( Node *n0, Node *n1 ) {
455 add_req(n0); add_req(n1); }
456 void add_req( Node *n0, Node *n1, Node *n2 ) {
457 add_req(n0); add_req(n1); add_req(n2); }
458 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
459 void del_req( uint idx ); // Delete required edge & compact
460 void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
461 void ins_req( uint i, Node *n ); // Insert a NEW required input
462 void set_req( uint i, Node *n ) {
463 assert( is_not_dead(n), "can not use dead node");
464 assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt);
465 assert( !VerifyHashTableKeys || _hash_lock == 0,
466 "remove node from hash table before modifying it");
467 Node** p = &_in[i]; // cache this._in, across the del_out call
468 if (*p != nullptr) (*p)->del_out((Node *)this);
469 (*p) = n;
470 if (n != nullptr) n->add_out((Node *)this);
471 Compile::current()->record_modified_node(this);
472 }
473 // Light version of set_req() to init inputs after node creation.
474 void init_req( uint i, Node *n ) {
475 assert( (i == 0 && this == n) ||
476 is_not_dead(n), "can not use dead node");
477 assert( i < _cnt, "oob");
478 assert( !VerifyHashTableKeys || _hash_lock == 0,
479 "remove node from hash table before modifying it");
480 assert( _in[i] == nullptr, "sanity");
481 _in[i] = n;
482 if (n != nullptr) n->add_out((Node *)this);
483 Compile::current()->record_modified_node(this);
484 }
485 // Find first occurrence of n among my edges:
486 int find_edge(Node* n);
487 int find_prec_edge(Node* n) {
488 for (uint i = req(); i < len(); i++) {
489 if (_in[i] == n) return i;
490 if (_in[i] == nullptr) {
491 DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == nullptr, "Gap in prec edges!"); )
492 break;
493 }
494 }
495 return -1;
496 }
497 int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = nullptr);
498 int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn);
499 // null out all inputs to eliminate incoming Def-Use edges.
500 void disconnect_inputs(Compile* C);
501
502 // Quickly, return true if and only if I am Compile::current()->top().
503 bool is_top() const {
504 assert((this == (Node*) Compile::current()->top()) == (_out == nullptr), "");
505 return (_out == nullptr);
506 }
507 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.)
508 void setup_is_top();
509
510 // Strip away casting. (It is depth-limited.)
511 Node* uncast(bool keep_deps = false) const;
512 // Return whether two Nodes are equivalent, after stripping casting.
513 bool eqv_uncast(const Node* n, bool keep_deps = false) const {
514 return (this->uncast(keep_deps) == n->uncast(keep_deps));
515 }
516
517 // Find out of current node that matches opcode.
518 Node* find_out_with(int opcode);
519 // Return true if the current node has an out that matches opcode.
520 bool has_out_with(int opcode);
521 // Return true if the current node has an out that matches any of the opcodes.
522 bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4);
523
524 private:
525 static Node* uncast_helper(const Node* n, bool keep_deps);
526
527 // Add an output edge to the end of the list
528 void add_out( Node *n ) {
529 if (is_top()) return;
530 if( _outcnt == _outmax ) out_grow(_outcnt);
531 _out[_outcnt++] = n;
532 }
533 // Delete an output edge
534 void del_out( Node *n ) {
535 if (is_top()) return;
536 Node** outp = &_out[_outcnt];
537 // Find and remove n
538 do {
539 assert(outp > _out, "Missing Def-Use edge");
540 } while (*--outp != n);
541 *outp = _out[--_outcnt];
542 // Smash the old edge so it can't be used accidentally.
543 DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
544 // Record that a change happened here.
545 #if OPTO_DU_ITERATOR_ASSERT
546 DEBUG_ONLY(_last_del = n; ++_del_tick);
547 #endif
548 }
549 // Close gap after removing edge.
550 void close_prec_gap_at(uint gap) {
551 assert(_cnt <= gap && gap < _max, "no valid prec edge");
552 uint i = gap;
553 Node *last = nullptr;
554 for (; i < _max-1; ++i) {
555 Node *next = _in[i+1];
556 if (next == nullptr) break;
557 last = next;
558 }
559 _in[gap] = last; // Move last slot to empty one.
560 _in[i] = nullptr; // null out last slot.
561 }
562
563 public:
564 // Globally replace this node by a given new node, updating all uses.
565 void replace_by(Node* new_node);
566 // Globally replace this node by a given new node, updating all uses
567 // and cutting input edges of old node.
568 void subsume_by(Node* new_node, Compile* c) {
569 replace_by(new_node);
570 disconnect_inputs(c);
571 }
572 void set_req_X(uint i, Node *n, PhaseIterGVN *igvn);
573 void set_req_X(uint i, Node *n, PhaseGVN *gvn);
574 // Find the one non-null required input. RegionNode only
575 Node *nonnull_req() const;
576 // Add or remove precedence edges
577 void add_prec( Node *n );
578 void rm_prec( uint i );
579
580 // Note: prec(i) will not necessarily point to n if edge already exists.
581 void set_prec( uint i, Node *n ) {
582 assert(i < _max, "oob: i=%d, _max=%d", i, _max);
583 assert(is_not_dead(n), "can not use dead node");
584 assert(i >= _cnt, "not a precedence edge");
585 // Avoid spec violation: duplicated prec edge.
586 if (_in[i] == n) return;
587 if (n == nullptr || find_prec_edge(n) != -1) {
588 rm_prec(i);
589 return;
590 }
591 if (_in[i] != nullptr) _in[i]->del_out((Node *)this);
592 _in[i] = n;
593 n->add_out((Node *)this);
594 Compile::current()->record_modified_node(this);
595 }
596
597 // Set this node's index, used by cisc_version to replace current node
598 void set_idx(uint new_idx) {
599 _idx = new_idx;
600 }
601 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.)
602 void swap_edges(uint i1, uint i2) {
603 DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
604 // Def-Use info is unchanged
605 Node* n1 = in(i1);
606 Node* n2 = in(i2);
607 _in[i1] = n2;
608 _in[i2] = n1;
609 // If this node is in the hash table, make sure it doesn't need a rehash.
610 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
611 // Flip swapped edges flag.
612 if (has_swapped_edges()) {
613 remove_flag(Node::Flag_has_swapped_edges);
614 } else {
615 add_flag(Node::Flag_has_swapped_edges);
616 }
617 }
618
619 // Iterators over input Nodes for a Node X are written as:
620 // for( i = 0; i < X.req(); i++ ) ... X[i] ...
621 // NOTE: Required edges can contain embedded null pointers.
622
623 //----------------- Other Node Properties
624
625 // Generate class IDs for (some) ideal nodes so that it is possible to determine
626 // the type of a node using a non-virtual method call (the method is_<Node>() below).
627 //
628 // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines
629 // the type of the node the ID represents; another subset of an ID's bits are reserved
630 // for the superclasses of the node represented by the ID.
631 //
632 // By design, if A is a supertype of B, A.is_B() returns true and B.is_A()
633 // returns false. A.is_A() returns true.
634 //
635 // If two classes, A and B, have the same superclass, a different bit of A's class id
636 // is reserved for A's type than for B's type. That bit is specified by the third
637 // parameter in the macro DEFINE_CLASS_ID.
638 //
639 // By convention, classes with deeper hierarchy are declared first. Moreover,
640 // classes with the same hierarchy depth are sorted by usage frequency.
641 //
642 // The query method masks the bits to cut off bits of subclasses and then compares
643 // the result with the class id (see the macro DEFINE_CLASS_QUERY below).
644 //
645 // Class_MachCall=30, ClassMask_MachCall=31
646 // 12 8 4 0
647 // 0 0 0 0 0 0 0 0 1 1 1 1 0
648 // | | | |
649 // | | | Bit_Mach=2
650 // | | Bit_MachReturn=4
651 // | Bit_MachSafePoint=8
652 // Bit_MachCall=16
653 //
654 // Class_CountedLoop=56, ClassMask_CountedLoop=63
655 // 12 8 4 0
656 // 0 0 0 0 0 0 0 1 1 1 0 0 0
657 // | | |
658 // | | Bit_Region=8
659 // | Bit_Loop=16
660 // Bit_CountedLoop=32
661
662 #define DEFINE_CLASS_ID(cl, supcl, subn) \
663 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
664 Class_##cl = Class_##supcl + Bit_##cl , \
665 ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
666
667 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
668 // so that its values fit into 32 bits.
669 enum NodeClasses {
670 Bit_Node = 0x00000000,
671 Class_Node = 0x00000000,
672 ClassMask_Node = 0xFFFFFFFF,
673
674 DEFINE_CLASS_ID(Multi, Node, 0)
675 DEFINE_CLASS_ID(SafePoint, Multi, 0)
676 DEFINE_CLASS_ID(Call, SafePoint, 0)
677 DEFINE_CLASS_ID(CallJava, Call, 0)
678 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0)
679 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1)
680 DEFINE_CLASS_ID(CallRuntime, Call, 1)
681 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0)
682 DEFINE_CLASS_ID(CallLeafNoFP, CallLeaf, 0)
683 DEFINE_CLASS_ID(CallLeafPure, CallLeaf, 1)
684 DEFINE_CLASS_ID(Allocate, Call, 2)
685 DEFINE_CLASS_ID(AllocateArray, Allocate, 0)
686 DEFINE_CLASS_ID(AbstractLock, Call, 3)
687 DEFINE_CLASS_ID(Lock, AbstractLock, 0)
688 DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
689 DEFINE_CLASS_ID(ArrayCopy, Call, 4)
690 DEFINE_CLASS_ID(LoadFlat, SafePoint, 1)
691 DEFINE_CLASS_ID(StoreFlat, SafePoint, 2)
692 DEFINE_CLASS_ID(MultiBranch, Multi, 1)
693 DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
694 DEFINE_CLASS_ID(Catch, PCTable, 0)
695 DEFINE_CLASS_ID(Jump, PCTable, 1)
696 DEFINE_CLASS_ID(If, MultiBranch, 1)
697 DEFINE_CLASS_ID(BaseCountedLoopEnd, If, 0)
698 DEFINE_CLASS_ID(CountedLoopEnd, BaseCountedLoopEnd, 0)
699 DEFINE_CLASS_ID(LongCountedLoopEnd, BaseCountedLoopEnd, 1)
700 DEFINE_CLASS_ID(RangeCheck, If, 1)
701 DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2)
702 DEFINE_CLASS_ID(ParsePredicate, If, 3)
703 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
704 DEFINE_CLASS_ID(Start, Multi, 2)
705 DEFINE_CLASS_ID(MemBar, Multi, 3)
706 DEFINE_CLASS_ID(Initialize, MemBar, 0)
707 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
708 DEFINE_CLASS_ID(Blackhole, MemBar, 2)
709
710 DEFINE_CLASS_ID(Mach, Node, 1)
711 DEFINE_CLASS_ID(MachReturn, Mach, 0)
712 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
713 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
714 DEFINE_CLASS_ID(MachCallJava, MachCall, 0)
715 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0)
716 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
717 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
718 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
719 DEFINE_CLASS_ID(MachBranch, Mach, 1)
720 DEFINE_CLASS_ID(MachIf, MachBranch, 0)
721 DEFINE_CLASS_ID(MachGoto, MachBranch, 1)
722 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2)
723 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2)
724 DEFINE_CLASS_ID(MachTemp, Mach, 3)
725 DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
726 DEFINE_CLASS_ID(MachConstant, Mach, 5)
727 DEFINE_CLASS_ID(MachJump, MachConstant, 0)
728 DEFINE_CLASS_ID(MachMerge, Mach, 6)
729 DEFINE_CLASS_ID(MachMemBar, Mach, 7)
730 DEFINE_CLASS_ID(MachProlog, Mach, 8)
731 DEFINE_CLASS_ID(MachVEP, Mach, 9)
732
733 DEFINE_CLASS_ID(Type, Node, 2)
734 DEFINE_CLASS_ID(Phi, Type, 0)
735 DEFINE_CLASS_ID(ConstraintCast, Type, 1)
736 DEFINE_CLASS_ID(CastII, ConstraintCast, 0)
737 DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1)
738 DEFINE_CLASS_ID(CastLL, ConstraintCast, 2)
739 DEFINE_CLASS_ID(CastFF, ConstraintCast, 3)
740 DEFINE_CLASS_ID(CastDD, ConstraintCast, 4)
741 DEFINE_CLASS_ID(CastVV, ConstraintCast, 5)
742 DEFINE_CLASS_ID(CastPP, ConstraintCast, 6)
743 DEFINE_CLASS_ID(CastHH, ConstraintCast, 7)
744 DEFINE_CLASS_ID(CMove, Type, 3)
745 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
746 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5)
747 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0)
748 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1)
749 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
750 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
751 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
752 DEFINE_CLASS_ID(Vector, Type, 7)
753 DEFINE_CLASS_ID(VectorMaskCmp, Vector, 0)
754 DEFINE_CLASS_ID(VectorUnbox, Vector, 1)
755 DEFINE_CLASS_ID(VectorReinterpret, Vector, 2)
756 DEFINE_CLASS_ID(ShiftV, Vector, 3)
757 DEFINE_CLASS_ID(CompressV, Vector, 4)
758 DEFINE_CLASS_ID(ExpandV, Vector, 5)
759 DEFINE_CLASS_ID(CompressM, Vector, 6)
760 DEFINE_CLASS_ID(Reduction, Vector, 7)
761 DEFINE_CLASS_ID(NegV, Vector, 8)
762 DEFINE_CLASS_ID(SaturatingVector, Vector, 9)
763 DEFINE_CLASS_ID(MulVL, Vector, 10)
764 DEFINE_CLASS_ID(InlineType, Type, 8)
765 DEFINE_CLASS_ID(Con, Type, 9)
766 DEFINE_CLASS_ID(ConI, Con, 0)
767 DEFINE_CLASS_ID(SafePointScalarMerge, Type, 10)
768 DEFINE_CLASS_ID(Convert, Type, 11)
769
770
771 DEFINE_CLASS_ID(Proj, Node, 3)
772 DEFINE_CLASS_ID(CatchProj, Proj, 0)
773 DEFINE_CLASS_ID(JumpProj, Proj, 1)
774 DEFINE_CLASS_ID(IfProj, Proj, 2)
775 DEFINE_CLASS_ID(IfTrue, IfProj, 0)
776 DEFINE_CLASS_ID(IfFalse, IfProj, 1)
777 DEFINE_CLASS_ID(Parm, Proj, 4)
778 DEFINE_CLASS_ID(MachProj, Proj, 5)
779
780 DEFINE_CLASS_ID(Mem, Node, 4)
781 DEFINE_CLASS_ID(Load, Mem, 0)
782 DEFINE_CLASS_ID(LoadVector, Load, 0)
783 DEFINE_CLASS_ID(LoadVectorGather, LoadVector, 0)
784 DEFINE_CLASS_ID(LoadVectorGatherMasked, LoadVector, 1)
785 DEFINE_CLASS_ID(LoadVectorMasked, LoadVector, 2)
786 DEFINE_CLASS_ID(Store, Mem, 1)
787 DEFINE_CLASS_ID(StoreVector, Store, 0)
788 DEFINE_CLASS_ID(StoreVectorScatter, StoreVector, 0)
789 DEFINE_CLASS_ID(StoreVectorScatterMasked, StoreVector, 1)
790 DEFINE_CLASS_ID(StoreVectorMasked, StoreVector, 2)
791 DEFINE_CLASS_ID(LoadStore, Mem, 2)
792 DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0)
793 DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0)
794 DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1)
795
796 DEFINE_CLASS_ID(Region, Node, 5)
797 DEFINE_CLASS_ID(Loop, Region, 0)
798 DEFINE_CLASS_ID(Root, Loop, 0)
799 DEFINE_CLASS_ID(BaseCountedLoop, Loop, 1)
800 DEFINE_CLASS_ID(CountedLoop, BaseCountedLoop, 0)
801 DEFINE_CLASS_ID(LongCountedLoop, BaseCountedLoop, 1)
802 DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2)
803
804 DEFINE_CLASS_ID(Sub, Node, 6)
805 DEFINE_CLASS_ID(Cmp, Sub, 0)
806 DEFINE_CLASS_ID(FastLock, Cmp, 0)
807 DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
808 DEFINE_CLASS_ID(SubTypeCheck, Cmp, 2)
809 DEFINE_CLASS_ID(FlatArrayCheck, Cmp, 3)
810
811 DEFINE_CLASS_ID(MergeMem, Node, 7)
812 DEFINE_CLASS_ID(Bool, Node, 8)
813 DEFINE_CLASS_ID(AddP, Node, 9)
814 DEFINE_CLASS_ID(BoxLock, Node, 10)
815 DEFINE_CLASS_ID(Add, Node, 11)
816 DEFINE_CLASS_ID(Mul, Node, 12)
817 DEFINE_CLASS_ID(ClearArray, Node, 14)
818 DEFINE_CLASS_ID(Halt, Node, 15)
819 DEFINE_CLASS_ID(Opaque1, Node, 16)
820 DEFINE_CLASS_ID(OpaqueLoopInit, Opaque1, 0)
821 DEFINE_CLASS_ID(OpaqueLoopStride, Opaque1, 1)
822 DEFINE_CLASS_ID(OpaqueMultiversioning, Opaque1, 2)
823 DEFINE_CLASS_ID(OpaqueNotNull, Node, 17)
824 DEFINE_CLASS_ID(OpaqueInitializedAssertionPredicate, Node, 18)
825 DEFINE_CLASS_ID(OpaqueTemplateAssertionPredicate, Node, 19)
826 DEFINE_CLASS_ID(Move, Node, 20)
827 DEFINE_CLASS_ID(LShift, Node, 21)
828 DEFINE_CLASS_ID(Neg, Node, 22)
829
830 _max_classes = ClassMask_Neg
831 };
832 #undef DEFINE_CLASS_ID
833
834 // Flags are sorted by usage frequency.
835 enum NodeFlags {
836 Flag_is_Copy = 1 << 0, // should be first bit to avoid shift
837 Flag_rematerialize = 1 << 1,
838 Flag_needs_anti_dependence_check = 1 << 2,
839 Flag_is_macro = 1 << 3,
840 Flag_is_Con = 1 << 4,
841 Flag_is_cisc_alternate = 1 << 5,
842 Flag_is_dead_loop_safe = 1 << 6,
843 Flag_may_be_short_branch = 1 << 7,
844 Flag_avoid_back_to_back_before = 1 << 8,
845 Flag_avoid_back_to_back_after = 1 << 9,
846 Flag_has_call = 1 << 10,
847 Flag_has_swapped_edges = 1 << 11,
848 Flag_is_scheduled = 1 << 12,
849 Flag_is_expensive = 1 << 13,
850 Flag_is_predicated_vector = 1 << 14,
851 Flag_for_post_loop_opts_igvn = 1 << 15,
852 Flag_for_merge_stores_igvn = 1 << 16,
853 Flag_is_removed_by_peephole = 1 << 17,
854 Flag_is_predicated_using_blend = 1 << 18,
855 _last_flag = Flag_is_predicated_using_blend
856 };
857
858 class PD;
859
860 private:
861 juint _class_id;
862 juint _flags;
863
864 #ifdef ASSERT
865 static juint max_flags();
866 #endif
867
868 protected:
869 // These methods should be called from constructors only.
870 void init_class_id(juint c) {
871 _class_id = c; // cast out const
872 }
873 void init_flags(uint fl) {
874 assert(fl <= max_flags(), "invalid node flag");
875 _flags |= fl;
876 }
877 void clear_flag(uint fl) {
878 assert(fl <= max_flags(), "invalid node flag");
879 _flags &= ~fl;
880 }
881
882 public:
883 juint class_id() const { return _class_id; }
884
885 juint flags() const { return _flags; }
886
887 void add_flag(juint fl) { init_flags(fl); }
888
889 void remove_flag(juint fl) { clear_flag(fl); }
890
891 // Return a dense integer opcode number
892 virtual int Opcode() const;
893
894 // Virtual inherited Node size
895 virtual uint size_of() const;
896
897 // Other interesting Node properties
898 #define DEFINE_CLASS_QUERY(type) \
899 bool is_##type() const { \
900 return ((_class_id & ClassMask_##type) == Class_##type); \
901 } \
902 type##Node *as_##type() const { \
903 assert(is_##type(), "invalid node class: %s", Name()); \
904 return (type##Node*)this; \
905 } \
906 type##Node* isa_##type() const { \
907 return (is_##type()) ? as_##type() : nullptr; \
908 }
909
910 DEFINE_CLASS_QUERY(AbstractLock)
911 DEFINE_CLASS_QUERY(Add)
912 DEFINE_CLASS_QUERY(AddP)
913 DEFINE_CLASS_QUERY(Allocate)
914 DEFINE_CLASS_QUERY(AllocateArray)
915 DEFINE_CLASS_QUERY(ArrayCopy)
916 DEFINE_CLASS_QUERY(BaseCountedLoop)
917 DEFINE_CLASS_QUERY(BaseCountedLoopEnd)
918 DEFINE_CLASS_QUERY(Blackhole)
919 DEFINE_CLASS_QUERY(Bool)
920 DEFINE_CLASS_QUERY(BoxLock)
921 DEFINE_CLASS_QUERY(Call)
922 DEFINE_CLASS_QUERY(CallDynamicJava)
923 DEFINE_CLASS_QUERY(CallJava)
924 DEFINE_CLASS_QUERY(CallLeaf)
925 DEFINE_CLASS_QUERY(CallLeafNoFP)
926 DEFINE_CLASS_QUERY(CallLeafPure)
927 DEFINE_CLASS_QUERY(CallRuntime)
928 DEFINE_CLASS_QUERY(CallStaticJava)
929 DEFINE_CLASS_QUERY(Catch)
930 DEFINE_CLASS_QUERY(CatchProj)
931 DEFINE_CLASS_QUERY(CheckCastPP)
932 DEFINE_CLASS_QUERY(CastII)
933 DEFINE_CLASS_QUERY(CastLL)
934 DEFINE_CLASS_QUERY(CastFF)
935 DEFINE_CLASS_QUERY(ConI)
936 DEFINE_CLASS_QUERY(CastPP)
937 DEFINE_CLASS_QUERY(ConstraintCast)
938 DEFINE_CLASS_QUERY(ClearArray)
939 DEFINE_CLASS_QUERY(CMove)
940 DEFINE_CLASS_QUERY(Cmp)
941 DEFINE_CLASS_QUERY(Convert)
942 DEFINE_CLASS_QUERY(CountedLoop)
943 DEFINE_CLASS_QUERY(CountedLoopEnd)
944 DEFINE_CLASS_QUERY(DecodeNarrowPtr)
945 DEFINE_CLASS_QUERY(DecodeN)
946 DEFINE_CLASS_QUERY(DecodeNKlass)
947 DEFINE_CLASS_QUERY(EncodeNarrowPtr)
948 DEFINE_CLASS_QUERY(EncodeP)
949 DEFINE_CLASS_QUERY(EncodePKlass)
950 DEFINE_CLASS_QUERY(FastLock)
951 DEFINE_CLASS_QUERY(FastUnlock)
952 DEFINE_CLASS_QUERY(FlatArrayCheck)
953 DEFINE_CLASS_QUERY(Halt)
954 DEFINE_CLASS_QUERY(If)
955 DEFINE_CLASS_QUERY(RangeCheck)
956 DEFINE_CLASS_QUERY(IfProj)
957 DEFINE_CLASS_QUERY(IfFalse)
958 DEFINE_CLASS_QUERY(IfTrue)
959 DEFINE_CLASS_QUERY(Initialize)
960 DEFINE_CLASS_QUERY(Jump)
961 DEFINE_CLASS_QUERY(JumpProj)
962 DEFINE_CLASS_QUERY(LongCountedLoop)
963 DEFINE_CLASS_QUERY(LongCountedLoopEnd)
964 DEFINE_CLASS_QUERY(Load)
965 DEFINE_CLASS_QUERY(LoadStore)
966 DEFINE_CLASS_QUERY(LoadStoreConditional)
967 DEFINE_CLASS_QUERY(Lock)
968 DEFINE_CLASS_QUERY(Loop)
969 DEFINE_CLASS_QUERY(LShift)
970 DEFINE_CLASS_QUERY(Mach)
971 DEFINE_CLASS_QUERY(MachBranch)
972 DEFINE_CLASS_QUERY(MachCall)
973 DEFINE_CLASS_QUERY(MachCallDynamicJava)
974 DEFINE_CLASS_QUERY(MachCallJava)
975 DEFINE_CLASS_QUERY(MachCallLeaf)
976 DEFINE_CLASS_QUERY(MachCallRuntime)
977 DEFINE_CLASS_QUERY(MachCallStaticJava)
978 DEFINE_CLASS_QUERY(MachConstantBase)
979 DEFINE_CLASS_QUERY(MachConstant)
980 DEFINE_CLASS_QUERY(MachGoto)
981 DEFINE_CLASS_QUERY(MachIf)
982 DEFINE_CLASS_QUERY(MachJump)
983 DEFINE_CLASS_QUERY(MachNullCheck)
984 DEFINE_CLASS_QUERY(MachProj)
985 DEFINE_CLASS_QUERY(MachProlog)
986 DEFINE_CLASS_QUERY(MachReturn)
987 DEFINE_CLASS_QUERY(MachSafePoint)
988 DEFINE_CLASS_QUERY(MachSpillCopy)
989 DEFINE_CLASS_QUERY(MachTemp)
990 DEFINE_CLASS_QUERY(MachMemBar)
991 DEFINE_CLASS_QUERY(MachMerge)
992 DEFINE_CLASS_QUERY(MachVEP)
993 DEFINE_CLASS_QUERY(Mem)
994 DEFINE_CLASS_QUERY(MemBar)
995 DEFINE_CLASS_QUERY(MemBarStoreStore)
996 DEFINE_CLASS_QUERY(MergeMem)
997 DEFINE_CLASS_QUERY(Move)
998 DEFINE_CLASS_QUERY(Mul)
999 DEFINE_CLASS_QUERY(Multi)
1000 DEFINE_CLASS_QUERY(MultiBranch)
1001 DEFINE_CLASS_QUERY(MulVL)
1002 DEFINE_CLASS_QUERY(Neg)
1003 DEFINE_CLASS_QUERY(NegV)
1004 DEFINE_CLASS_QUERY(NeverBranch)
1005 DEFINE_CLASS_QUERY(Opaque1)
1006 DEFINE_CLASS_QUERY(OpaqueNotNull)
1007 DEFINE_CLASS_QUERY(OpaqueInitializedAssertionPredicate)
1008 DEFINE_CLASS_QUERY(OpaqueTemplateAssertionPredicate)
1009 DEFINE_CLASS_QUERY(OpaqueLoopInit)
1010 DEFINE_CLASS_QUERY(OpaqueLoopStride)
1011 DEFINE_CLASS_QUERY(OpaqueMultiversioning)
1012 DEFINE_CLASS_QUERY(OuterStripMinedLoop)
1013 DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd)
1014 DEFINE_CLASS_QUERY(Parm)
1015 DEFINE_CLASS_QUERY(ParsePredicate)
1016 DEFINE_CLASS_QUERY(PCTable)
1017 DEFINE_CLASS_QUERY(Phi)
1018 DEFINE_CLASS_QUERY(Proj)
1019 DEFINE_CLASS_QUERY(Reduction)
1020 DEFINE_CLASS_QUERY(Region)
1021 DEFINE_CLASS_QUERY(Root)
1022 DEFINE_CLASS_QUERY(SafePoint)
1023 DEFINE_CLASS_QUERY(SafePointScalarObject)
1024 DEFINE_CLASS_QUERY(SafePointScalarMerge)
1025 DEFINE_CLASS_QUERY(Start)
1026 DEFINE_CLASS_QUERY(Store)
1027 DEFINE_CLASS_QUERY(Sub)
1028 DEFINE_CLASS_QUERY(SubTypeCheck)
1029 DEFINE_CLASS_QUERY(Type)
1030 DEFINE_CLASS_QUERY(InlineType)
1031 DEFINE_CLASS_QUERY(LoadFlat)
1032 DEFINE_CLASS_QUERY(StoreFlat)
1033 DEFINE_CLASS_QUERY(Vector)
1034 DEFINE_CLASS_QUERY(VectorMaskCmp)
1035 DEFINE_CLASS_QUERY(VectorUnbox)
1036 DEFINE_CLASS_QUERY(VectorReinterpret)
1037 DEFINE_CLASS_QUERY(CompressV)
1038 DEFINE_CLASS_QUERY(ExpandV)
1039 DEFINE_CLASS_QUERY(CompressM)
1040 DEFINE_CLASS_QUERY(LoadVector)
1041 DEFINE_CLASS_QUERY(LoadVectorGather)
1042 DEFINE_CLASS_QUERY(LoadVectorMasked)
1043 DEFINE_CLASS_QUERY(LoadVectorGatherMasked)
1044 DEFINE_CLASS_QUERY(StoreVector)
1045 DEFINE_CLASS_QUERY(StoreVectorScatter)
1046 DEFINE_CLASS_QUERY(StoreVectorMasked)
1047 DEFINE_CLASS_QUERY(StoreVectorScatterMasked)
1048 DEFINE_CLASS_QUERY(SaturatingVector)
1049 DEFINE_CLASS_QUERY(ShiftV)
1050 DEFINE_CLASS_QUERY(Unlock)
1051
1052 #undef DEFINE_CLASS_QUERY
1053
1054 // duplicate of is_MachSpillCopy()
1055 bool is_SpillCopy () const {
1056 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
1057 }
1058
1059 bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
1060 // The data node which is safe to leave in dead loop during IGVN optimization.
1061 bool is_dead_loop_safe() const;
1062
1063 // is_Copy() returns copied edge index (0 or 1)
1064 uint is_Copy() const { return (_flags & Flag_is_Copy); }
1065
1066 virtual bool is_CFG() const { return false; }
1067
1068 // If this node is control-dependent on a test, can it be
1069 // rerouted to a dominating equivalent test? This is usually
1070 // true of non-CFG nodes, but can be false for operations which
1071 // depend for their correct sequencing on more than one test.
1072 // (In that case, hoisting to a dominating test may silently
1073 // skip some other important test.)
1074 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
1075
1076 // When building basic blocks, I need to have a notion of block beginning
1077 // Nodes, next block selector Nodes (block enders), and next block
1078 // projections. These calls need to work on their machine equivalents. The
1079 // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
1080 bool is_block_start() const {
1081 if ( is_Region() )
1082 return this == (const Node*)in(0);
1083 else
1084 return is_Start();
1085 }
1086
1087 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
1088 // Goto and Return. This call also returns the block ending Node.
1089 virtual const Node *is_block_proj() const;
1090
1091 // The node is a "macro" node which needs to be expanded before matching
1092 bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
1093 // The node is expensive: the best control is set during loop opts
1094 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != nullptr; }
1095 // The node's original edge position is swapped.
1096 bool has_swapped_edges() const { return (_flags & Flag_has_swapped_edges) != 0; }
1097
1098 bool is_predicated_vector() const { return (_flags & Flag_is_predicated_vector) != 0; }
1099
1100 bool is_predicated_using_blend() const { return (_flags & Flag_is_predicated_using_blend) != 0; }
1101
1102 // Used in lcm to mark nodes that have scheduled
1103 bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; }
1104
1105 bool for_post_loop_opts_igvn() const { return (_flags & Flag_for_post_loop_opts_igvn) != 0; }
1106 bool for_merge_stores_igvn() const { return (_flags & Flag_for_merge_stores_igvn) != 0; }
1107
1108 // Is 'n' possibly a loop entry (i.e. a Parse Predicate projection)?
1109 static bool may_be_loop_entry(Node* n) {
1110 return n != nullptr && n->is_IfProj() && n->in(0)->is_ParsePredicate();
1111 }
1112
1113 //----------------- Optimization
1114
1115 // Get the worst-case Type output for this Node.
1116 virtual const class Type *bottom_type() const;
1117
1118 // If we find a better type for a node, try to record it permanently.
1119 // Return true if this node actually changed.
1120 // Be sure to do the hash_delete game in the "rehash" variant.
1121 void raise_bottom_type(const Type* new_type);
1122
1123 // Get the address type with which this node uses and/or defs memory,
1124 // or null if none. The address type is conservatively wide.
1125 // Returns non-null for calls, membars, loads, stores, etc.
1126 // Returns TypePtr::BOTTOM if the node touches memory "broadly".
1127 virtual const class TypePtr *adr_type() const { return nullptr; }
1128
1129 // Return an existing node which computes the same function as this node.
1130 // The optimistic combined algorithm requires this to return a Node which
1131 // is a small number of steps away (e.g., one of my inputs).
1132 virtual Node* Identity(PhaseGVN* phase);
1133
1134 // Return the set of values this Node can take on at runtime.
1135 virtual const Type* Value(PhaseGVN* phase) const;
1136
1137 // Return a node which is more "ideal" than the current node.
1138 // The invariants on this call are subtle. If in doubt, read the
1139 // treatise in node.cpp above the default implementation AND TEST WITH
1140 // -XX:VerifyIterativeGVN=1
1141 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1142
1143 // Some nodes have specific Ideal subgraph transformations only if they are
1144 // unique users of specific nodes. Such nodes should be put on IGVN worklist
1145 // for the transformations to happen.
1146 bool has_special_unique_user() const;
1147
1148 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
1149 Node* find_exact_control(Node* ctrl);
1150
1151 // Results of the dominance analysis.
1152 enum class DomResult {
1153 NotDominate, // 'this' node does not dominate 'sub'.
1154 Dominate, // 'this' node dominates or is equal to 'sub'.
1155 EncounteredDeadCode // Result is undefined due to encountering dead code.
1156 };
1157 // Check if 'this' node dominates or equal to 'sub'.
1158 DomResult dominates(Node* sub, Node_List &nlist);
1159
1160 bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
1161 public:
1162
1163 // See if there is valid pipeline info
1164 static const Pipeline *pipeline_class();
1165 virtual const Pipeline *pipeline() const;
1166
1167 // Compute the latency from the def to this instruction of the ith input node
1168 uint latency(uint i);
1169
1170 // Hash & compare functions, for pessimistic value numbering
1171
1172 // If the hash function returns the special sentinel value NO_HASH,
1173 // the node is guaranteed never to compare equal to any other node.
1174 // If we accidentally generate a hash with value NO_HASH the node
1175 // won't go into the table and we'll lose a little optimization.
1176 static const uint NO_HASH = 0;
1177 virtual uint hash() const;
1178 virtual bool cmp( const Node &n ) const;
1179
1180 // Operation appears to be iteratively computed (such as an induction variable)
1181 // It is possible for this operation to return false for a loop-varying
1182 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
1183 bool is_iteratively_computed();
1184
1185 // Determine if a node is a counted loop induction variable.
1186 // NOTE: The method is defined in "loopnode.cpp".
1187 bool is_cloop_ind_var() const;
1188
1189 // Return a node with opcode "opc" and same inputs as "this" if one can
1190 // be found; Otherwise return null;
1191 Node* find_similar(int opc);
1192
1193 // Return the unique control out if only one. Null if none or more than one.
1194 Node* unique_ctrl_out_or_null() const;
1195 // Return the unique control out. Asserts if none or more than one control out.
1196 Node* unique_ctrl_out() const;
1197
1198 // Set control or add control as precedence edge
1199 void ensure_control_or_add_prec(Node* c);
1200 void add_prec_from(Node* n);
1201
1202 // Visit boundary uses of the node and apply a callback function for each.
1203 // Recursively traverse uses, stopping and applying the callback when
1204 // reaching a boundary node, defined by is_boundary. Note: the function
1205 // definition appears after the complete type definition of Node_List.
1206 template <typename Callback, typename Check>
1207 void visit_uses(Callback callback, Check is_boundary) const;
1208
1209 // Returns a clone of the current node that's pinned (if the current node is not) for nodes found in array accesses
1210 // (Load and range check CastII nodes).
1211 // This is used when an array access is made dependent on 2 or more range checks (range check smearing or Loop Predication).
1212 virtual Node* pin_array_access_node() const {
1213 return nullptr;
1214 }
1215
1216 //----------------- Code Generation
1217
1218 // Ideal register class for Matching. Zero means unmatched instruction
1219 // (these are cloned instead of converted to machine nodes).
1220 virtual uint ideal_reg() const;
1221
1222 static const uint NotAMachineReg; // must be > max. machine register
1223
1224 // Do we Match on this edge index or not? Generally false for Control
1225 // and true for everything else. Weird for calls & returns.
1226 virtual uint match_edge(uint idx) const;
1227
1228 // Register class output is returned in
1229 virtual const RegMask &out_RegMask() const;
1230 // Register class input is expected in
1231 virtual const RegMask &in_RegMask(uint) const;
1232 // Should we clone rather than spill this instruction?
1233 bool rematerialize() const;
1234
1235 // Return JVM State Object if this Node carries debug info, or null otherwise
1236 virtual JVMState* jvms() const;
1237
1238 // Print as assembly
1239 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
1240 // Emit bytes using C2_MacroAssembler
1241 virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
1242 // Size of instruction in bytes
1243 virtual uint size(PhaseRegAlloc *ra_) const;
1244
1245 // Convenience function to extract an integer constant from a node.
1246 // If it is not an integer constant (either Con, CastII, or Mach),
1247 // return value_if_unknown.
1248 jint find_int_con(jint value_if_unknown) const {
1249 const TypeInt* t = find_int_type();
1250 return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
1251 }
1252 // Return the constant, knowing it is an integer constant already
1253 jint get_int() const {
1254 const TypeInt* t = find_int_type();
1255 guarantee(t != nullptr, "must be con");
1256 return t->get_con();
1257 }
1258 // Here's where the work is done. Can produce non-constant int types too.
1259 const TypeInt* find_int_type() const;
1260 const TypeInteger* find_integer_type(BasicType bt) const;
1261
1262 // Same thing for long (and intptr_t, via type.hpp):
1263 jlong get_long() const {
1264 const TypeLong* t = find_long_type();
1265 guarantee(t != nullptr, "must be con");
1266 return t->get_con();
1267 }
1268 jlong find_long_con(jint value_if_unknown) const {
1269 const TypeLong* t = find_long_type();
1270 return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
1271 }
1272 const TypeLong* find_long_type() const;
1273
1274 jlong get_integer_as_long(BasicType bt) const {
1275 const TypeInteger* t = find_integer_type(bt);
1276 guarantee(t != nullptr && t->is_con(), "must be con");
1277 return t->get_con_as_long(bt);
1278 }
1279 jlong find_integer_as_long(BasicType bt, jlong value_if_unknown) const {
1280 const TypeInteger* t = find_integer_type(bt);
1281 if (t == nullptr || !t->is_con()) return value_if_unknown;
1282 return t->get_con_as_long(bt);
1283 }
1284 const TypePtr* get_ptr_type() const;
1285
1286 // These guys are called by code generated by ADLC:
1287 intptr_t get_ptr() const;
1288 intptr_t get_narrowcon() const;
1289 jdouble getd() const;
1290 jfloat getf() const;
1291 jshort geth() const;
1292
1293 // Nodes which are pinned into basic blocks
1294 virtual bool pinned() const { return false; }
1295
1296 // Nodes which use memory without consuming it, hence need antidependences
1297 // More specifically, needs_anti_dependence_check returns true iff the node
1298 // (a) does a load, and (b) does not perform a store (except perhaps to a
1299 // stack slot or some other unaliased location).
1300 bool needs_anti_dependence_check() const;
1301
1302 // Return which operand this instruction may cisc-spill. In other words,
1303 // return operand position that can convert from reg to memory access
1304 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
1305 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
1306
1307 // Whether this is a memory-writing machine node.
1308 bool is_memory_writer() const { return is_Mach() && bottom_type()->has_memory(); }
1309
1310 // Whether this is a memory phi node
1311 bool is_memory_phi() const { return is_Phi() && bottom_type() == Type::MEMORY; }
1312
1313 bool is_div_or_mod(BasicType bt) const;
1314
1315 bool is_data_proj_of_pure_function(const Node* maybe_pure_function) const;
1316
1317 //----------------- Printing, etc
1318 #ifndef PRODUCT
1319 public:
1320 Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx.
1321 Node* find_ctrl(int idx); // Search control ancestors for the given idx.
1322 void dump_bfs(const int max_distance, Node* target, const char* options, outputStream* st, const frame* fr = nullptr) const;
1323 void dump_bfs(const int max_distance, Node* target, const char* options) const; // directly to tty
1324 void dump_bfs(const int max_distance) const; // dump_bfs(max_distance, nullptr, nullptr)
1325 void dump_bfs(const int max_distance, Node* target, const char* options, void* sp, void* fp, void* pc) const;
1326 class DumpConfig {
1327 public:
1328 // overridden to implement coloring of node idx
1329 virtual void pre_dump(outputStream *st, const Node* n) = 0;
1330 virtual void post_dump(outputStream *st) = 0;
1331 };
1332 void dump_idx(bool align = false, outputStream* st = tty, DumpConfig* dc = nullptr) const;
1333 void dump_name(outputStream* st = tty, DumpConfig* dc = nullptr) const;
1334 void dump() const; // print node with newline
1335 void dump(const char* suffix, bool mark = false, outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print this node.
1336 void dump(int depth) const; // Print this node, recursively to depth d
1337 void dump_ctrl(int depth) const; // Print control nodes, to depth d
1338 void dump_comp() const; // Print this node in compact representation.
1339 // Print this node in compact representation.
1340 void dump_comp(const char* suffix, outputStream *st = tty) const;
1341 private:
1342 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print required-edge info
1343 virtual void dump_prec(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print precedence-edge info
1344 virtual void dump_out(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print the output edge info
1345 public:
1346 virtual void dump_spec(outputStream *st) const {}; // Print per-node info
1347 // Print compact per-node info
1348 virtual void dump_compact_spec(outputStream *st) const { dump_spec(st); }
1349
1350 static void verify(int verify_depth, VectorSet& visited, Node_List& worklist);
1351
1352 // This call defines a class-unique string used to identify class instances
1353 virtual const char *Name() const;
1354
1355 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
1356 static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } // check if we are in a dump call
1357 #endif
1358 #ifdef ASSERT
1359 void verify_construction();
1360 bool verify_jvms(const JVMState* jvms) const;
1361
1362 Node* _debug_orig; // Original version of this, if any.
1363 Node* debug_orig() const { return _debug_orig; }
1364 void set_debug_orig(Node* orig); // _debug_orig = orig
1365 void dump_orig(outputStream *st, bool print_key = true) const;
1366
1367 uint64_t _debug_idx; // Unique value assigned to every node.
1368 uint64_t debug_idx() const { return _debug_idx; }
1369 void set_debug_idx(uint64_t debug_idx) { _debug_idx = debug_idx; }
1370
1371 int _hash_lock; // Barrier to modifications of nodes in the hash table
1372 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
1373 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
1374
1375 static void init_NodeProperty();
1376
1377 #if OPTO_DU_ITERATOR_ASSERT
1378 const Node* _last_del; // The last deleted node.
1379 uint _del_tick; // Bumped when a deletion happens..
1380 #endif
1381 #endif
1382 };
1383
1384 inline bool not_a_node(const Node* n) {
1385 if (n == nullptr) return true;
1386 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
1387 if (*(address*)n == badAddress) return true; // kill by Node::destruct
1388 return false;
1389 }
1390
1391 //-----------------------------------------------------------------------------
1392 // Iterators over DU info, and associated Node functions.
1393
1394 #if OPTO_DU_ITERATOR_ASSERT
1395
1396 // Common code for assertion checking on DU iterators.
1397 class DUIterator_Common {
1398 #ifdef ASSERT
1399 protected:
1400 bool _vdui; // cached value of VerifyDUIterators
1401 const Node* _node; // the node containing the _out array
1402 uint _outcnt; // cached node->_outcnt
1403 uint _del_tick; // cached node->_del_tick
1404 Node* _last; // last value produced by the iterator
1405
1406 void sample(const Node* node); // used by c'tor to set up for verifies
1407 void verify(const Node* node, bool at_end_ok = false);
1408 void verify_resync();
1409 void reset(const DUIterator_Common& that);
1410
1411 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
1412 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
1413 #else
1414 #define I_VDUI_ONLY(i,x) { }
1415 #endif //ASSERT
1416 };
1417
1418 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x)
1419
1420 // Default DU iterator. Allows appends onto the out array.
1421 // Allows deletion from the out array only at the current point.
1422 // Usage:
1423 // for (DUIterator i = x->outs(); x->has_out(i); i++) {
1424 // Node* y = x->out(i);
1425 // ...
1426 // }
1427 // Compiles in product mode to a unsigned integer index, which indexes
1428 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate
1429 // also reloads x->_outcnt. If you delete, you must perform "--i" just
1430 // before continuing the loop. You must delete only the last-produced
1431 // edge. You must delete only a single copy of the last-produced edge,
1432 // or else you must delete all copies at once (the first time the edge
1433 // is produced by the iterator).
1434 class DUIterator : public DUIterator_Common {
1435 friend class Node;
1436
1437 // This is the index which provides the product-mode behavior.
1438 // Whatever the product-mode version of the system does to the
1439 // DUI index is done to this index. All other fields in
1440 // this class are used only for assertion checking.
1441 uint _idx;
1442
1443 #ifdef ASSERT
1444 uint _refresh_tick; // Records the refresh activity.
1445
1446 void sample(const Node* node); // Initialize _refresh_tick etc.
1447 void verify(const Node* node, bool at_end_ok = false);
1448 void verify_increment(); // Verify an increment operation.
1449 void verify_resync(); // Verify that we can back up over a deletion.
1450 void verify_finish(); // Verify that the loop terminated properly.
1451 void refresh(); // Resample verification info.
1452 void reset(const DUIterator& that); // Resample after assignment.
1453 #endif
1454
1455 DUIterator(const Node* node, int dummy_to_avoid_conversion)
1456 { _idx = 0; DEBUG_ONLY(sample(node)); }
1457
1458 public:
1459 // initialize to garbage; clear _vdui to disable asserts
1460 DUIterator()
1461 { /*initialize to garbage*/ DEBUG_ONLY(_vdui = false); }
1462
1463 DUIterator(const DUIterator& that)
1464 { _idx = that._idx; DEBUG_ONLY(_vdui = false; reset(that)); }
1465
1466 void operator++(int dummy_to_specify_postfix_op)
1467 { _idx++; VDUI_ONLY(verify_increment()); }
1468
1469 void operator--()
1470 { VDUI_ONLY(verify_resync()); --_idx; }
1471
1472 ~DUIterator()
1473 { VDUI_ONLY(verify_finish()); }
1474
1475 void operator=(const DUIterator& that)
1476 { _idx = that._idx; DEBUG_ONLY(reset(that)); }
1477 };
1478
1479 DUIterator Node::outs() const
1480 { return DUIterator(this, 0); }
1481 DUIterator& Node::refresh_out_pos(DUIterator& i) const
1482 { I_VDUI_ONLY(i, i.refresh()); return i; }
1483 bool Node::has_out(DUIterator& i) const
1484 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
1485 Node* Node::out(DUIterator& i) const
1486 { I_VDUI_ONLY(i, i.verify(this)); return DEBUG_ONLY(i._last=) _out[i._idx]; }
1487
1488
1489 // Faster DU iterator. Disallows insertions into the out array.
1490 // Allows deletion from the out array only at the current point.
1491 // Usage:
1492 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
1493 // Node* y = x->fast_out(i);
1494 // ...
1495 // }
1496 // Compiles in product mode to raw Node** pointer arithmetic, with
1497 // no reloading of pointers from the original node x. If you delete,
1498 // you must perform "--i; --imax" just before continuing the loop.
1499 // If you delete multiple copies of the same edge, you must decrement
1500 // imax, but not i, multiple times: "--i, imax -= num_edges".
1501 class DUIterator_Fast : public DUIterator_Common {
1502 friend class Node;
1503 friend class DUIterator_Last;
1504
1505 // This is the pointer which provides the product-mode behavior.
1506 // Whatever the product-mode version of the system does to the
1507 // DUI pointer is done to this pointer. All other fields in
1508 // this class are used only for assertion checking.
1509 Node** _outp;
1510
1511 #ifdef ASSERT
1512 void verify(const Node* node, bool at_end_ok = false);
1513 void verify_limit();
1514 void verify_resync();
1515 void verify_relimit(uint n);
1516 void reset(const DUIterator_Fast& that);
1517 #endif
1518
1519 // Note: offset must be signed, since -1 is sometimes passed
1520 DUIterator_Fast(const Node* node, ptrdiff_t offset)
1521 { _outp = node->_out + offset; DEBUG_ONLY(sample(node)); }
1522
1523 public:
1524 // initialize to garbage; clear _vdui to disable asserts
1525 DUIterator_Fast()
1526 { /*initialize to garbage*/ DEBUG_ONLY(_vdui = false); }
1527
1528 DUIterator_Fast(const DUIterator_Fast& that)
1529 { _outp = that._outp; DEBUG_ONLY(_vdui = false; reset(that)); }
1530
1531 void operator++(int dummy_to_specify_postfix_op)
1532 { _outp++; VDUI_ONLY(verify(_node, true)); }
1533
1534 void operator--()
1535 { VDUI_ONLY(verify_resync()); --_outp; }
1536
1537 void operator-=(uint n) // applied to the limit only
1538 { _outp -= n; VDUI_ONLY(verify_relimit(n)); }
1539
1540 bool operator<(DUIterator_Fast& limit) {
1541 I_VDUI_ONLY(*this, this->verify(_node, true));
1542 I_VDUI_ONLY(limit, limit.verify_limit());
1543 return _outp < limit._outp;
1544 }
1545
1546 void operator=(const DUIterator_Fast& that)
1547 { _outp = that._outp; DEBUG_ONLY(reset(that)); }
1548 };
1549
1550 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
1551 // Assign a limit pointer to the reference argument:
1552 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
1553 // Return the base pointer:
1554 return DUIterator_Fast(this, 0);
1555 }
1556 Node* Node::fast_out(DUIterator_Fast& i) const {
1557 I_VDUI_ONLY(i, i.verify(this));
1558 return DEBUG_ONLY(i._last=) *i._outp;
1559 }
1560
1561
1562 // Faster DU iterator. Requires each successive edge to be removed.
1563 // Does not allow insertion of any edges.
1564 // Usage:
1565 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
1566 // Node* y = x->last_out(i);
1567 // ...
1568 // }
1569 // Compiles in product mode to raw Node** pointer arithmetic, with
1570 // no reloading of pointers from the original node x.
1571 class DUIterator_Last : private DUIterator_Fast {
1572 friend class Node;
1573
1574 #ifdef ASSERT
1575 void verify(const Node* node, bool at_end_ok = false);
1576 void verify_limit();
1577 void verify_step(uint num_edges);
1578 #endif
1579
1580 // Note: offset must be signed, since -1 is sometimes passed
1581 DUIterator_Last(const Node* node, ptrdiff_t offset)
1582 : DUIterator_Fast(node, offset) { }
1583
1584 void operator++(int dummy_to_specify_postfix_op) {} // do not use
1585 void operator<(int) {} // do not use
1586
1587 public:
1588 DUIterator_Last() { }
1589 // initialize to garbage
1590
1591 DUIterator_Last(const DUIterator_Last& that) = default;
1592
1593 void operator--()
1594 { _outp--; VDUI_ONLY(verify_step(1)); }
1595
1596 void operator-=(uint n)
1597 { _outp -= n; VDUI_ONLY(verify_step(n)); }
1598
1599 bool operator>=(DUIterator_Last& limit) {
1600 I_VDUI_ONLY(*this, this->verify(_node, true));
1601 I_VDUI_ONLY(limit, limit.verify_limit());
1602 return _outp >= limit._outp;
1603 }
1604
1605 DUIterator_Last& operator=(const DUIterator_Last& that) = default;
1606 };
1607
1608 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
1609 // Assign a limit pointer to the reference argument:
1610 imin = DUIterator_Last(this, 0);
1611 // Return the initial pointer:
1612 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
1613 }
1614 Node* Node::last_out(DUIterator_Last& i) const {
1615 I_VDUI_ONLY(i, i.verify(this));
1616 return DEBUG_ONLY(i._last=) *i._outp;
1617 }
1618
1619 #endif //OPTO_DU_ITERATOR_ASSERT
1620
1621 #undef I_VDUI_ONLY
1622 #undef VDUI_ONLY
1623
1624 // An Iterator that truly follows the iterator pattern. Doesn't
1625 // support deletion but could be made to.
1626 //
1627 // for (SimpleDUIterator i(n); i.has_next(); i.next()) {
1628 // Node* m = i.get();
1629 //
1630 class SimpleDUIterator : public StackObj {
1631 private:
1632 Node* node;
1633 DUIterator_Fast imax;
1634 DUIterator_Fast i;
1635 public:
1636 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
1637 bool has_next() { return i < imax; }
1638 void next() { i++; }
1639 Node* get() { return node->fast_out(i); }
1640 };
1641
1642
1643 //-----------------------------------------------------------------------------
1644 // Map dense integer indices to Nodes. Uses classic doubling-array trick.
1645 // Abstractly provides an infinite array of Node*'s, initialized to null.
1646 // Note that the constructor just zeros things, and since I use Arena
1647 // allocation I do not need a destructor to reclaim storage.
1648 class Node_Array : public AnyObj {
1649 protected:
1650 Arena* _a; // Arena to allocate in
1651 uint _max;
1652 Node** _nodes;
1653 ReallocMark _nesting; // Safety checks for arena reallocation
1654
1655 // Grow array to required capacity
1656 void maybe_grow(uint i) {
1657 _nesting.check(_a); // Check if a potential reallocation in the arena is safe
1658 if (i >= _max) {
1659 grow(i);
1660 }
1661 }
1662 void grow(uint i);
1663
1664 public:
1665 Node_Array(Arena* a, uint max = OptoNodeListSize) : _a(a), _max(max) {
1666 _nodes = NEW_ARENA_ARRAY(a, Node*, max);
1667 clear();
1668 }
1669 Node_Array() : Node_Array(Thread::current()->resource_area()) {}
1670
1671 NONCOPYABLE(Node_Array);
1672 Node_Array& operator=(Node_Array&&) = delete;
1673 // Allow move constructor for && (eg. capture return of function)
1674 Node_Array(Node_Array&&) = default;
1675
1676 Node *operator[] ( uint i ) const // Lookup, or null for not mapped
1677 { return (i<_max) ? _nodes[i] : (Node*)nullptr; }
1678 Node* at(uint i) const { assert(i<_max,"oob"); return _nodes[i]; }
1679 Node** adr() { return _nodes; }
1680 // Extend the mapping: index i maps to Node *n.
1681 void map( uint i, Node *n ) { maybe_grow(i); _nodes[i] = n; }
1682 void insert( uint i, Node *n );
1683 void remove( uint i ); // Remove, preserving order
1684 // Clear all entries in _nodes to null but keep storage
1685 void clear() {
1686 Copy::zero_to_bytes(_nodes, _max * sizeof(Node*));
1687 }
1688
1689 uint max() const { return _max; }
1690 void dump() const;
1691 };
1692
1693 class Node_List : public Node_Array {
1694 uint _cnt;
1695 public:
1696 Node_List(uint max = OptoNodeListSize) : Node_Array(Thread::current()->resource_area(), max), _cnt(0) {}
1697 Node_List(Arena *a, uint max = OptoNodeListSize) : Node_Array(a, max), _cnt(0) {}
1698
1699 NONCOPYABLE(Node_List);
1700 Node_List& operator=(Node_List&&) = delete;
1701 // Allow move constructor for && (eg. capture return of function)
1702 Node_List(Node_List&&) = default;
1703
1704 bool contains(const Node* n) const {
1705 for (uint e = 0; e < size(); e++) {
1706 if (at(e) == n) return true;
1707 }
1708 return false;
1709 }
1710 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
1711 void remove( uint i ) { Node_Array::remove(i); _cnt--; }
1712 void push( Node *b ) { map(_cnt++,b); }
1713 void yank( Node *n ); // Find and remove
1714 Node *pop() { return _nodes[--_cnt]; }
1715 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
1716 void copy(const Node_List& from) {
1717 if (from._max > _max) {
1718 grow(from._max);
1719 }
1720 _cnt = from._cnt;
1721 Copy::conjoint_words_to_higher((HeapWord*)&from._nodes[0], (HeapWord*)&_nodes[0], from._max * sizeof(Node*));
1722 }
1723
1724 uint size() const { return _cnt; }
1725 void dump() const;
1726 void dump_simple() const;
1727 };
1728
1729 // Definition must appear after complete type definition of Node_List
1730 template <typename Callback, typename Check>
1731 void Node::visit_uses(Callback callback, Check is_boundary) const {
1732 ResourceMark rm;
1733 VectorSet visited;
1734 Node_List worklist;
1735
1736 // The initial worklist consists of the direct uses
1737 for (DUIterator_Fast kmax, k = fast_outs(kmax); k < kmax; k++) {
1738 Node* out = fast_out(k);
1739 if (!visited.test_set(out->_idx)) { worklist.push(out); }
1740 }
1741
1742 while (worklist.size() > 0) {
1743 Node* use = worklist.pop();
1744 // Apply callback on boundary nodes
1745 if (is_boundary(use)) {
1746 callback(use);
1747 } else {
1748 // Not a boundary node, continue search
1749 for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) {
1750 Node* out = use->fast_out(k);
1751 if (!visited.test_set(out->_idx)) { worklist.push(out); }
1752 }
1753 }
1754 }
1755 }
1756
1757
1758 //------------------------------Unique_Node_List-------------------------------
1759 class Unique_Node_List : public Node_List {
1760 VectorSet _in_worklist;
1761 uint _clock_index; // Index in list where to pop from next
1762 public:
1763 Unique_Node_List() : Node_List(), _clock_index(0) {}
1764 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
1765
1766 NONCOPYABLE(Unique_Node_List);
1767 Unique_Node_List& operator=(Unique_Node_List&&) = delete;
1768 // Allow move constructor for && (eg. capture return of function)
1769 Unique_Node_List(Unique_Node_List&&) = default;
1770
1771 void remove( Node *n );
1772 bool member(const Node* n) const { return _in_worklist.test(n->_idx) != 0; }
1773 VectorSet& member_set(){ return _in_worklist; }
1774
1775 void push(Node* b) {
1776 if( !_in_worklist.test_set(b->_idx) )
1777 Node_List::push(b);
1778 }
1779 void push_non_cfg_inputs_of(const Node* node) {
1780 for (uint i = 1; i < node->req(); i++) {
1781 Node* input = node->in(i);
1782 if (input != nullptr && !input->is_CFG()) {
1783 push(input);
1784 }
1785 }
1786 }
1787
1788 void push_outputs_of(const Node* node) {
1789 for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) {
1790 Node* output = node->fast_out(i);
1791 push(output);
1792 }
1793 }
1794
1795 Node *pop() {
1796 if( _clock_index >= size() ) _clock_index = 0;
1797 Node *b = at(_clock_index);
1798 map( _clock_index, Node_List::pop());
1799 if (size() != 0) _clock_index++; // Always start from 0
1800 _in_worklist.remove(b->_idx);
1801 return b;
1802 }
1803 Node *remove(uint i) {
1804 Node *b = Node_List::at(i);
1805 _in_worklist.remove(b->_idx);
1806 map(i,Node_List::pop());
1807 return b;
1808 }
1809 void yank(Node *n) {
1810 _in_worklist.remove(n->_idx);
1811 Node_List::yank(n);
1812 }
1813 void clear() {
1814 _in_worklist.clear(); // Discards storage but grows automatically
1815 Node_List::clear();
1816 _clock_index = 0;
1817 }
1818 void ensure_empty() {
1819 assert(size() == 0, "must be empty");
1820 clear(); // just in case
1821 }
1822
1823 // Used after parsing to remove useless nodes before Iterative GVN
1824 void remove_useless_nodes(VectorSet& useful);
1825
1826 // If the idx of the Nodes change, we must recompute the VectorSet
1827 void recompute_idx_set() {
1828 _in_worklist.clear();
1829 for (uint i = 0; i < size(); i++) {
1830 Node* n = at(i);
1831 _in_worklist.set(n->_idx);
1832 }
1833 }
1834
1835 #ifdef ASSERT
1836 bool is_subset_of(Unique_Node_List& other) {
1837 for (uint i = 0; i < size(); i++) {
1838 Node* n = at(i);
1839 if (!other.member(n)) {
1840 return false;
1841 }
1842 }
1843 return true;
1844 }
1845 #endif
1846
1847 bool contains(const Node* n) const {
1848 fatal("use faster member() instead");
1849 return false;
1850 }
1851
1852 #ifndef PRODUCT
1853 void print_set() const { _in_worklist.print(); }
1854 #endif
1855 };
1856
1857 // Unique_Mixed_Node_List
1858 // unique: nodes are added only once
1859 // mixed: allow new and old nodes
1860 class Unique_Mixed_Node_List : public ResourceObj {
1861 public:
1862 Unique_Mixed_Node_List() : _visited_set(cmpkey, hashkey) {}
1863
1864 void add(Node* node) {
1865 if (not_a_node(node)) {
1866 return; // Gracefully handle null, -1, 0xabababab, etc.
1867 }
1868 if (_visited_set[node] == nullptr) {
1869 _visited_set.Insert(node, node);
1870 _worklist.push(node);
1871 }
1872 }
1873
1874 Node* operator[] (uint i) const {
1875 return _worklist[i];
1876 }
1877
1878 size_t size() {
1879 return _worklist.size();
1880 }
1881
1882 private:
1883 Dict _visited_set;
1884 Node_List _worklist;
1885 };
1886
1887 // Inline definition of Compile::record_for_igvn must be deferred to this point.
1888 inline void Compile::record_for_igvn(Node* n) {
1889 _igvn_worklist->push(n);
1890 }
1891
1892 // Inline definition of Compile::remove_for_igvn must be deferred to this point.
1893 inline void Compile::remove_for_igvn(Node* n) {
1894 _igvn_worklist->remove(n);
1895 }
1896
1897 //------------------------------Node_Stack-------------------------------------
1898 class Node_Stack {
1899 protected:
1900 struct INode {
1901 Node *node; // Processed node
1902 uint indx; // Index of next node's child
1903 };
1904 INode *_inode_top; // tos, stack grows up
1905 INode *_inode_max; // End of _inodes == _inodes + _max
1906 INode *_inodes; // Array storage for the stack
1907 Arena *_a; // Arena to allocate in
1908 ReallocMark _nesting; // Safety checks for arena reallocation
1909
1910 void maybe_grow() {
1911 _nesting.check(_a); // Check if a potential reallocation in the arena is safe
1912 if (_inode_top >= _inode_max) {
1913 grow();
1914 }
1915 }
1916 void grow();
1917
1918 public:
1919 Node_Stack(int size) {
1920 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1921 _a = Thread::current()->resource_area();
1922 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1923 _inode_max = _inodes + max;
1924 _inode_top = _inodes - 1; // stack is empty
1925 }
1926
1927 Node_Stack(Arena *a, int size) : _a(a) {
1928 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1929 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1930 _inode_max = _inodes + max;
1931 _inode_top = _inodes - 1; // stack is empty
1932 }
1933
1934 void pop() {
1935 assert(_inode_top >= _inodes, "node stack underflow");
1936 --_inode_top;
1937 }
1938 void push(Node *n, uint i) {
1939 ++_inode_top;
1940 maybe_grow();
1941 INode *top = _inode_top; // optimization
1942 top->node = n;
1943 top->indx = i;
1944 }
1945 Node *node() const {
1946 return _inode_top->node;
1947 }
1948 Node* node_at(uint i) const {
1949 assert(_inodes + i <= _inode_top, "in range");
1950 return _inodes[i].node;
1951 }
1952 uint index() const {
1953 return _inode_top->indx;
1954 }
1955 uint index_at(uint i) const {
1956 assert(_inodes + i <= _inode_top, "in range");
1957 return _inodes[i].indx;
1958 }
1959 void set_node(Node *n) {
1960 _inode_top->node = n;
1961 }
1962 void set_index(uint i) {
1963 _inode_top->indx = i;
1964 }
1965 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
1966 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size
1967 bool is_nonempty() const { return (_inode_top >= _inodes); }
1968 bool is_empty() const { return (_inode_top < _inodes); }
1969 void clear() { _inode_top = _inodes - 1; } // retain storage
1970
1971 // Node_Stack is used to map nodes.
1972 Node* find(uint idx) const;
1973
1974 NONCOPYABLE(Node_Stack);
1975 };
1976
1977
1978 //-----------------------------Node_Notes--------------------------------------
1979 // Debugging or profiling annotations loosely and sparsely associated
1980 // with some nodes. See Compile::node_notes_at for the accessor.
1981 class Node_Notes {
1982 JVMState* _jvms;
1983
1984 public:
1985 Node_Notes(JVMState* jvms = nullptr) {
1986 _jvms = jvms;
1987 }
1988
1989 JVMState* jvms() { return _jvms; }
1990 void set_jvms(JVMState* x) { _jvms = x; }
1991
1992 // True if there is nothing here.
1993 bool is_clear() {
1994 return (_jvms == nullptr);
1995 }
1996
1997 // Make there be nothing here.
1998 void clear() {
1999 _jvms = nullptr;
2000 }
2001
2002 // Make a new, clean node notes.
2003 static Node_Notes* make(Compile* C) {
2004 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
2005 nn->clear();
2006 return nn;
2007 }
2008
2009 Node_Notes* clone(Compile* C) {
2010 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
2011 (*nn) = (*this);
2012 return nn;
2013 }
2014
2015 // Absorb any information from source.
2016 bool update_from(Node_Notes* source) {
2017 bool changed = false;
2018 if (source != nullptr) {
2019 if (source->jvms() != nullptr) {
2020 set_jvms(source->jvms());
2021 changed = true;
2022 }
2023 }
2024 return changed;
2025 }
2026 };
2027
2028 // Inlined accessors for Compile::node_nodes that require the preceding class:
2029 inline Node_Notes*
2030 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
2031 int idx, bool can_grow) {
2032 assert(idx >= 0, "oob");
2033 int block_idx = (idx >> _log2_node_notes_block_size);
2034 int grow_by = (block_idx - (arr == nullptr? 0: arr->length()));
2035 if (grow_by >= 0) {
2036 if (!can_grow) return nullptr;
2037 grow_node_notes(arr, grow_by + 1);
2038 }
2039 if (arr == nullptr) return nullptr;
2040 // (Every element of arr is a sub-array of length _node_notes_block_size.)
2041 return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
2042 }
2043
2044 inline Node_Notes* Compile::node_notes_at(int idx) {
2045 return locate_node_notes(_node_note_array, idx, false);
2046 }
2047
2048 inline bool
2049 Compile::set_node_notes_at(int idx, Node_Notes* value) {
2050 if (value == nullptr || value->is_clear())
2051 return false; // nothing to write => write nothing
2052 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
2053 assert(loc != nullptr, "");
2054 return loc->update_from(value);
2055 }
2056
2057
2058 //------------------------------TypeNode---------------------------------------
2059 // Node with a Type constant.
2060 class TypeNode : public Node {
2061 protected:
2062 virtual uint hash() const; // Check the type
2063 virtual bool cmp( const Node &n ) const;
2064 virtual uint size_of() const; // Size is bigger
2065 const Type* const _type;
2066 public:
2067 void set_type(const Type* t) {
2068 assert(t != nullptr, "sanity");
2069 DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
2070 *(const Type**)&_type = t; // cast away const-ness
2071 // If this node is in the hash table, make sure it doesn't need a rehash.
2072 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
2073 }
2074 const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
2075 TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
2076 init_class_id(Class_Type);
2077 }
2078 virtual const Type* Value(PhaseGVN* phase) const;
2079 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
2080 virtual const Type *bottom_type() const;
2081 virtual uint ideal_reg() const;
2082
2083 void make_path_dead(PhaseIterGVN* igvn, PhaseIdealLoop* loop, Node* ctrl_use, uint j, const char* phase_str);
2084 #ifndef PRODUCT
2085 virtual void dump_spec(outputStream *st) const;
2086 virtual void dump_compact_spec(outputStream *st) const;
2087 #endif
2088 void make_paths_from_here_dead(PhaseIterGVN* igvn, PhaseIdealLoop* loop, const char* phase_str);
2089 void create_halt_path(PhaseIterGVN* igvn, Node* c, PhaseIdealLoop* loop, const char* phase_str) const;
2090 };
2091
2092 #include "opto/opcodes.hpp"
2093
2094 #define Op_IL(op) \
2095 inline int Op_ ## op(BasicType bt) { \
2096 assert(bt == T_INT || bt == T_LONG, "only for int or longs"); \
2097 if (bt == T_INT) { \
2098 return Op_## op ## I; \
2099 } \
2100 return Op_## op ## L; \
2101 }
2102
2103 Op_IL(Add)
2104 Op_IL(And)
2105 Op_IL(Sub)
2106 Op_IL(Mul)
2107 Op_IL(URShift)
2108 Op_IL(LShift)
2109 Op_IL(RShift)
2110 Op_IL(Xor)
2111 Op_IL(Cmp)
2112 Op_IL(Div)
2113 Op_IL(Mod)
2114 Op_IL(UDiv)
2115 Op_IL(UMod)
2116
2117 inline int Op_ConIL(BasicType bt) {
2118 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2119 if (bt == T_INT) {
2120 return Op_ConI;
2121 }
2122 return Op_ConL;
2123 }
2124
2125 inline int Op_Cmp_unsigned(BasicType bt) {
2126 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2127 if (bt == T_INT) {
2128 return Op_CmpU;
2129 }
2130 return Op_CmpUL;
2131 }
2132
2133 inline int Op_Cast(BasicType bt) {
2134 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2135 if (bt == T_INT) {
2136 return Op_CastII;
2137 }
2138 return Op_CastLL;
2139 }
2140
2141 inline int Op_DivIL(BasicType bt, bool is_unsigned) {
2142 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2143 if (bt == T_INT) {
2144 if (is_unsigned) {
2145 return Op_UDivI;
2146 } else {
2147 return Op_DivI;
2148 }
2149 }
2150 if (is_unsigned) {
2151 return Op_UDivL;
2152 } else {
2153 return Op_DivL;
2154 }
2155 }
2156
2157 inline int Op_DivModIL(BasicType bt, bool is_unsigned) {
2158 assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2159 if (bt == T_INT) {
2160 if (is_unsigned) {
2161 return Op_UDivModI;
2162 } else {
2163 return Op_DivModI;
2164 }
2165 }
2166 if (is_unsigned) {
2167 return Op_UDivModL;
2168 } else {
2169 return Op_DivModL;
2170 }
2171 }
2172
2173 // Interface to define actions that should be taken when running DataNodeBFS. Each use can extend this class to specify
2174 // a customized BFS.
2175 class BFSActions : public StackObj {
2176 public:
2177 // Should a node's inputs further be visited in the BFS traversal? By default, we visit all data inputs. Override this
2178 // method to provide a custom filter.
2179 virtual bool should_visit(Node* node) const {
2180 // By default, visit all inputs.
2181 return true;
2182 };
2183
2184 // Is the visited node a target node that we are looking for in the BFS traversal? We do not visit its inputs further
2185 // but the BFS will continue to visit all unvisited nodes in the queue.
2186 virtual bool is_target_node(Node* node) const = 0;
2187
2188 // Defines an action that should be taken when we visit a target node in the BFS traversal.
2189 virtual void target_node_action(Node* target_node) = 0;
2190 };
2191
2192 // Class to perform a BFS traversal on the data nodes from a given start node. The provided BFSActions guide which
2193 // data node's inputs should be further visited, which data nodes are target nodes and what to do with the target nodes.
2194 class DataNodeBFS : public StackObj {
2195 BFSActions& _bfs_actions;
2196
2197 public:
2198 explicit DataNodeBFS(BFSActions& bfs_action) : _bfs_actions(bfs_action) {}
2199
2200 // Run the BFS starting from 'start_node' and apply the actions provided to this class.
2201 void run(Node* start_node) {
2202 ResourceMark rm;
2203 Unique_Node_List _nodes_to_visit;
2204 _nodes_to_visit.push(start_node);
2205 for (uint i = 0; i < _nodes_to_visit.size(); i++) {
2206 Node* next = _nodes_to_visit[i];
2207 for (uint j = 1; j < next->req(); j++) {
2208 Node* input = next->in(j);
2209 if (_bfs_actions.is_target_node(input)) {
2210 assert(_bfs_actions.should_visit(input), "must also pass node filter");
2211 _bfs_actions.target_node_action(input);
2212 } else if (_bfs_actions.should_visit(input)) {
2213 _nodes_to_visit.push(input);
2214 }
2215 }
2216 }
2217 }
2218 };
2219
2220 #endif // SHARE_OPTO_NODE_HPP