1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2024, 2025, Alibaba Group Holding Limited. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "libadt/vectset.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "opto/ad.hpp"
32 #include "opto/callGenerator.hpp"
33 #include "opto/castnode.hpp"
34 #include "opto/cfgnode.hpp"
35 #include "opto/connode.hpp"
36 #include "opto/inlinetypenode.hpp"
37 #include "opto/loopnode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/matcher.hpp"
40 #include "opto/node.hpp"
41 #include "opto/opcodes.hpp"
42 #include "opto/reachability.hpp"
43 #include "opto/regmask.hpp"
44 #include "opto/rootnode.hpp"
45 #include "opto/type.hpp"
46 #include "utilities/copy.hpp"
47 #include "utilities/macros.hpp"
48 #include "utilities/powerOfTwo.hpp"
49 #include "utilities/stringUtils.hpp"
50
51 class RegMask;
52 // #include "phase.hpp"
53 class PhaseTransform;
54 class PhaseGVN;
55
56 // Arena we are currently building Nodes in
57 const uint Node::NotAMachineReg = 0xffff0000;
58
59 #ifndef PRODUCT
60 extern uint nodes_created;
61 #endif
62 #ifdef __clang__
63 #pragma clang diagnostic push
64 #pragma GCC diagnostic ignored "-Wuninitialized"
65 #endif
66
67 #ifdef ASSERT
68
69 //-------------------------- construct_node------------------------------------
70 // Set a breakpoint here to identify where a particular node index is built.
71 void Node::verify_construction() {
72 _debug_orig = nullptr;
73 // The decimal digits of _debug_idx are <compile_id> followed by 10 digits of <_idx>
74 Compile* C = Compile::current();
75 assert(C->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
76 uint64_t new_debug_idx = (uint64_t)C->compile_id() * 10000000000 + _idx;
77 set_debug_idx(new_debug_idx);
78 if (!C->phase_optimize_finished()) {
79 // Only check assert during parsing and optimization phase. Skip it while generating code.
80 assert(C->live_nodes() <= C->max_node_limit(), "Live Node limit exceeded limit");
81 }
82 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (uint64_t)_idx == BreakAtNode)) {
83 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=" UINT64_FORMAT, _idx, _debug_idx);
84 BREAKPOINT;
85 }
86 #if OPTO_DU_ITERATOR_ASSERT
87 _last_del = nullptr;
88 _del_tick = 0;
89 #endif
90 _hash_lock = 0;
91 }
92
93
94 // #ifdef ASSERT ...
95
96 #if OPTO_DU_ITERATOR_ASSERT
97 void DUIterator_Common::sample(const Node* node) {
98 _vdui = VerifyDUIterators;
99 _node = node;
100 _outcnt = node->_outcnt;
101 _del_tick = node->_del_tick;
102 _last = nullptr;
103 }
104
105 void DUIterator_Common::verify(const Node* node, bool at_end_ok) {
106 assert(_node == node, "consistent iterator source");
107 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed");
108 }
109
110 void DUIterator_Common::verify_resync() {
111 // Ensure that the loop body has just deleted the last guy produced.
112 const Node* node = _node;
113 // Ensure that at least one copy of the last-seen edge was deleted.
114 // Note: It is OK to delete multiple copies of the last-seen edge.
115 // Unfortunately, we have no way to verify that all the deletions delete
116 // that same edge. On this point we must use the Honor System.
117 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge");
118 assert(node->_last_del == _last, "must have deleted the edge just produced");
119 // We liked this deletion, so accept the resulting outcnt and tick.
120 _outcnt = node->_outcnt;
121 _del_tick = node->_del_tick;
122 }
123
124 void DUIterator_Common::reset(const DUIterator_Common& that) {
125 if (this == &that) return; // ignore assignment to self
126 if (!_vdui) {
127 // We need to initialize everything, overwriting garbage values.
128 _last = that._last;
129 _vdui = that._vdui;
130 }
131 // Note: It is legal (though odd) for an iterator over some node x
132 // to be reassigned to iterate over another node y. Some doubly-nested
133 // progress loops depend on being able to do this.
134 const Node* node = that._node;
135 // Re-initialize everything, except _last.
136 _node = node;
137 _outcnt = node->_outcnt;
138 _del_tick = node->_del_tick;
139 }
140
141 void DUIterator::sample(const Node* node) {
142 DUIterator_Common::sample(node); // Initialize the assertion data.
143 _refresh_tick = 0; // No refreshes have happened, as yet.
144 }
145
146 void DUIterator::verify(const Node* node, bool at_end_ok) {
147 DUIterator_Common::verify(node, at_end_ok);
148 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range");
149 }
150
151 void DUIterator::verify_increment() {
152 if (_refresh_tick & 1) {
153 // We have refreshed the index during this loop.
154 // Fix up _idx to meet asserts.
155 if (_idx > _outcnt) _idx = _outcnt;
156 }
157 verify(_node, true);
158 }
159
160 void DUIterator::verify_resync() {
161 // Note: We do not assert on _outcnt, because insertions are OK here.
162 DUIterator_Common::verify_resync();
163 // Make sure we are still in sync, possibly with no more out-edges:
164 verify(_node, true);
165 }
166
167 void DUIterator::reset(const DUIterator& that) {
168 if (this == &that) return; // self assignment is always a no-op
169 assert(that._refresh_tick == 0, "assign only the result of Node::outs()");
170 assert(that._idx == 0, "assign only the result of Node::outs()");
171 assert(_idx == that._idx, "already assigned _idx");
172 if (!_vdui) {
173 // We need to initialize everything, overwriting garbage values.
174 sample(that._node);
175 } else {
176 DUIterator_Common::reset(that);
177 if (_refresh_tick & 1) {
178 _refresh_tick++; // Clear the "was refreshed" flag.
179 }
180 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly");
181 }
182 }
183
184 void DUIterator::refresh() {
185 DUIterator_Common::sample(_node); // Re-fetch assertion data.
186 _refresh_tick |= 1; // Set the "was refreshed" flag.
187 }
188
189 void DUIterator::verify_finish() {
190 // If the loop has killed the node, do not require it to re-run.
191 if (_node->_outcnt == 0) _refresh_tick &= ~1;
192 // If this assert triggers, it means that a loop used refresh_out_pos
193 // to re-synch an iteration index, but the loop did not correctly
194 // re-run itself, using a "while (progress)" construct.
195 // This iterator enforces the rule that you must keep trying the loop
196 // until it "runs clean" without any need for refreshing.
197 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing");
198 }
199
200
201 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) {
202 DUIterator_Common::verify(node, at_end_ok);
203 Node** out = node->_out;
204 uint cnt = node->_outcnt;
205 assert(cnt == _outcnt, "no insertions allowed");
206 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range");
207 // This last check is carefully designed to work for NO_OUT_ARRAY.
208 }
209
210 void DUIterator_Fast::verify_limit() {
211 const Node* node = _node;
212 verify(node, true);
213 assert(_outp == node->_out + node->_outcnt, "limit still correct");
214 }
215
216 void DUIterator_Fast::verify_resync() {
217 const Node* node = _node;
218 if (_outp == node->_out + _outcnt) {
219 // Note that the limit imax, not the pointer i, gets updated with the
220 // exact count of deletions. (For the pointer it's always "--i".)
221 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)");
222 // This is a limit pointer, with a name like "imax".
223 // Fudge the _last field so that the common assert will be happy.
224 _last = (Node*) node->_last_del;
225 DUIterator_Common::verify_resync();
226 } else {
227 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)");
228 // A normal internal pointer.
229 DUIterator_Common::verify_resync();
230 // Make sure we are still in sync, possibly with no more out-edges:
231 verify(node, true);
232 }
233 }
234
235 void DUIterator_Fast::verify_relimit(uint n) {
236 const Node* node = _node;
237 assert((int)n > 0, "use imax -= n only with a positive count");
238 // This must be a limit pointer, with a name like "imax".
239 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)");
240 // The reported number of deletions must match what the node saw.
241 assert(node->_del_tick == _del_tick + n, "must have deleted n edges");
242 // Fudge the _last field so that the common assert will be happy.
243 _last = (Node*) node->_last_del;
244 DUIterator_Common::verify_resync();
245 }
246
247 void DUIterator_Fast::reset(const DUIterator_Fast& that) {
248 assert(_outp == that._outp, "already assigned _outp");
249 DUIterator_Common::reset(that);
250 }
251
252 void DUIterator_Last::verify(const Node* node, bool at_end_ok) {
253 // at_end_ok means the _outp is allowed to underflow by 1
254 _outp += at_end_ok;
255 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc.
256 _outp -= at_end_ok;
257 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes");
258 }
259
260 void DUIterator_Last::verify_limit() {
261 // Do not require the limit address to be resynched.
262 //verify(node, true);
263 assert(_outp == _node->_out, "limit still correct");
264 }
265
266 void DUIterator_Last::verify_step(uint num_edges) {
267 assert((int)num_edges > 0, "need non-zero edge count for loop progress");
268 _outcnt -= num_edges;
269 _del_tick += num_edges;
270 // Make sure we are still in sync, possibly with no more out-edges:
271 const Node* node = _node;
272 verify(node, true);
273 assert(node->_last_del == _last, "must have deleted the edge just produced");
274 }
275
276 #endif //OPTO_DU_ITERATOR_ASSERT
277
278
279 #endif //ASSERT
280
281
282 // This constant used to initialize _out may be any non-null value.
283 // The value null is reserved for the top node only.
284 #define NO_OUT_ARRAY ((Node**)-1)
285
286 // Out-of-line code from node constructors.
287 // Executed only when extra debug info. is being passed around.
288 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) {
289 C->set_node_notes_at(idx, nn);
290 }
291
292 // Shared initialization code.
293 inline int Node::Init(int req) {
294 Compile* C = Compile::current();
295 int idx = C->next_unique();
296 NOT_PRODUCT(_igv_idx = C->next_igv_idx());
297
298 // Allocate memory for the necessary number of edges.
299 if (req > 0) {
300 // Allocate space for _in array to have double alignment.
301 _in = (Node **) ((char *) (C->node_arena()->AmallocWords(req * sizeof(void*))));
302 }
303 // If there are default notes floating around, capture them:
304 Node_Notes* nn = C->default_node_notes();
305 if (nn != nullptr) init_node_notes(C, idx, nn);
306
307 // Note: At this point, C is dead,
308 // and we begin to initialize the new Node.
309
310 _cnt = _max = req;
311 _outcnt = _outmax = 0;
312 _class_id = Class_Node;
313 _flags = 0;
314 _out = NO_OUT_ARRAY;
315 return idx;
316 }
317
318 //------------------------------Node-------------------------------------------
319 // Create a Node, with a given number of required edges.
320 Node::Node(uint req)
321 : _idx(Init(req))
322 #ifdef ASSERT
323 , _parse_idx(_idx)
324 #endif
325 {
326 assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
327 DEBUG_ONLY( verify_construction() );
328 NOT_PRODUCT(nodes_created++);
329 if (req == 0) {
330 _in = nullptr;
331 } else {
332 Node** to = _in;
333 for(uint i = 0; i < req; i++) {
334 to[i] = nullptr;
335 }
336 }
337 }
338
339 //------------------------------Node-------------------------------------------
340 Node::Node(Node *n0)
341 : _idx(Init(1))
342 #ifdef ASSERT
343 , _parse_idx(_idx)
344 #endif
345 {
346 DEBUG_ONLY( verify_construction() );
347 NOT_PRODUCT(nodes_created++);
348 assert( is_not_dead(n0), "can not use dead node");
349 _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
350 }
351
352 //------------------------------Node-------------------------------------------
353 Node::Node(Node *n0, Node *n1)
354 : _idx(Init(2))
355 #ifdef ASSERT
356 , _parse_idx(_idx)
357 #endif
358 {
359 DEBUG_ONLY( verify_construction() );
360 NOT_PRODUCT(nodes_created++);
361 assert( is_not_dead(n0), "can not use dead node");
362 assert( is_not_dead(n1), "can not use dead node");
363 _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
364 _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
365 }
366
367 //------------------------------Node-------------------------------------------
368 Node::Node(Node *n0, Node *n1, Node *n2)
369 : _idx(Init(3))
370 #ifdef ASSERT
371 , _parse_idx(_idx)
372 #endif
373 {
374 DEBUG_ONLY( verify_construction() );
375 NOT_PRODUCT(nodes_created++);
376 assert( is_not_dead(n0), "can not use dead node");
377 assert( is_not_dead(n1), "can not use dead node");
378 assert( is_not_dead(n2), "can not use dead node");
379 _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
380 _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
381 _in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this);
382 }
383
384 //------------------------------Node-------------------------------------------
385 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
386 : _idx(Init(4))
387 #ifdef ASSERT
388 , _parse_idx(_idx)
389 #endif
390 {
391 DEBUG_ONLY( verify_construction() );
392 NOT_PRODUCT(nodes_created++);
393 assert( is_not_dead(n0), "can not use dead node");
394 assert( is_not_dead(n1), "can not use dead node");
395 assert( is_not_dead(n2), "can not use dead node");
396 assert( is_not_dead(n3), "can not use dead node");
397 _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
398 _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
399 _in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this);
400 _in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this);
401 }
402
403 //------------------------------Node-------------------------------------------
404 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
405 : _idx(Init(5))
406 #ifdef ASSERT
407 , _parse_idx(_idx)
408 #endif
409 {
410 DEBUG_ONLY( verify_construction() );
411 NOT_PRODUCT(nodes_created++);
412 assert( is_not_dead(n0), "can not use dead node");
413 assert( is_not_dead(n1), "can not use dead node");
414 assert( is_not_dead(n2), "can not use dead node");
415 assert( is_not_dead(n3), "can not use dead node");
416 assert( is_not_dead(n4), "can not use dead node");
417 _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
418 _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
419 _in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this);
420 _in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this);
421 _in[4] = n4; if (n4 != nullptr) n4->add_out((Node *)this);
422 }
423
424 //------------------------------Node-------------------------------------------
425 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
426 Node *n4, Node *n5)
427 : _idx(Init(6))
428 #ifdef ASSERT
429 , _parse_idx(_idx)
430 #endif
431 {
432 DEBUG_ONLY( verify_construction() );
433 NOT_PRODUCT(nodes_created++);
434 assert( is_not_dead(n0), "can not use dead node");
435 assert( is_not_dead(n1), "can not use dead node");
436 assert( is_not_dead(n2), "can not use dead node");
437 assert( is_not_dead(n3), "can not use dead node");
438 assert( is_not_dead(n4), "can not use dead node");
439 assert( is_not_dead(n5), "can not use dead node");
440 _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
441 _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
442 _in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this);
443 _in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this);
444 _in[4] = n4; if (n4 != nullptr) n4->add_out((Node *)this);
445 _in[5] = n5; if (n5 != nullptr) n5->add_out((Node *)this);
446 }
447
448 //------------------------------Node-------------------------------------------
449 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
450 Node *n4, Node *n5, Node *n6)
451 : _idx(Init(7))
452 #ifdef ASSERT
453 , _parse_idx(_idx)
454 #endif
455 {
456 DEBUG_ONLY( verify_construction() );
457 NOT_PRODUCT(nodes_created++);
458 assert( is_not_dead(n0), "can not use dead node");
459 assert( is_not_dead(n1), "can not use dead node");
460 assert( is_not_dead(n2), "can not use dead node");
461 assert( is_not_dead(n3), "can not use dead node");
462 assert( is_not_dead(n4), "can not use dead node");
463 assert( is_not_dead(n5), "can not use dead node");
464 assert( is_not_dead(n6), "can not use dead node");
465 _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
466 _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
467 _in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this);
468 _in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this);
469 _in[4] = n4; if (n4 != nullptr) n4->add_out((Node *)this);
470 _in[5] = n5; if (n5 != nullptr) n5->add_out((Node *)this);
471 _in[6] = n6; if (n6 != nullptr) n6->add_out((Node *)this);
472 }
473
474 #ifdef __clang__
475 #pragma clang diagnostic pop
476 #endif
477
478
479 //------------------------------clone------------------------------------------
480 // Clone a Node.
481 Node *Node::clone() const {
482 Compile* C = Compile::current();
483 uint s = size_of(); // Size of inherited Node
484 Node *n = (Node*)C->node_arena()->AmallocWords(size_of() + _max*sizeof(Node*));
485 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
486 // Set the new input pointer array
487 n->_in = (Node**)(((char*)n)+s);
488 // Cannot share the old output pointer array, so kill it
489 n->_out = NO_OUT_ARRAY;
490 // And reset the counters to 0
491 n->_outcnt = 0;
492 n->_outmax = 0;
493 // Unlock this guy, since he is not in any hash table.
494 DEBUG_ONLY(n->_hash_lock = 0);
495 // Walk the old node's input list to duplicate its edges
496 uint i;
497 for( i = 0; i < len(); i++ ) {
498 Node *x = in(i);
499 n->_in[i] = x;
500 if (x != nullptr) x->add_out(n);
501 }
502 if (is_macro()) {
503 C->add_macro_node(n);
504 }
505 if (is_expensive()) {
506 C->add_expensive_node(n);
507 }
508 if (is_ReachabilityFence()) {
509 C->add_reachability_fence(n->as_ReachabilityFence());
510 }
511 if (for_post_loop_opts_igvn()) {
512 // Don't add cloned node to Compile::_for_post_loop_opts_igvn list automatically.
513 // If it is applicable, it will happen anyway when the cloned node is registered with IGVN.
514 n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
515 }
516 if (for_merge_stores_igvn()) {
517 // Don't add cloned node to Compile::_for_merge_stores_igvn list automatically.
518 // If it is applicable, it will happen anyway when the cloned node is registered with IGVN.
519 n->remove_flag(Node::NodeFlags::Flag_for_merge_stores_igvn);
520 }
521 if (n->is_ParsePredicate()) {
522 C->add_parse_predicate(n->as_ParsePredicate());
523 }
524 if (n->is_OpaqueTemplateAssertionPredicate()) {
525 C->add_template_assertion_predicate_opaque(n->as_OpaqueTemplateAssertionPredicate());
526 }
527
528 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
529 bs->register_potential_barrier_node(n);
530
531 n->set_idx(C->next_unique()); // Get new unique index as well
532 NOT_PRODUCT(n->_igv_idx = C->next_igv_idx());
533 DEBUG_ONLY( n->verify_construction() );
534 NOT_PRODUCT(nodes_created++);
535 // Do not patch over the debug_idx of a clone, because it makes it
536 // impossible to break on the clone's moment of creation.
537 //DEBUG_ONLY( n->set_debug_idx( debug_idx() ) );
538
539 C->copy_node_notes_to(n, (Node*) this);
540
541 // MachNode clone
542 uint nopnds;
543 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) {
544 MachNode *mach = n->as_Mach();
545 MachNode *mthis = this->as_Mach();
546 // Get address of _opnd_array.
547 // It should be the same offset since it is the clone of this node.
548 MachOper **from = mthis->_opnds;
549 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) +
550 pointer_delta((const void*)from,
551 (const void*)(&mthis->_opnds), 1));
552 mach->_opnds = to;
553 for ( uint i = 0; i < nopnds; ++i ) {
554 to[i] = from[i]->clone();
555 }
556 }
557 if (this->is_MachProj()) {
558 // MachProjNodes contain register masks that may contain pointers to
559 // externally allocated memory. Make sure to use a proper constructor
560 // instead of just shallowly copying.
561 MachProjNode* mach = n->as_MachProj();
562 MachProjNode* mthis = this->as_MachProj();
563 new (&mach->_rout) RegMask(mthis->_rout);
564 }
565 if (n->is_Call()) {
566 // CallGenerator is linked to the original node.
567 CallGenerator* cg = n->as_Call()->generator();
568 if (cg != nullptr) {
569 CallGenerator* cloned_cg = cg->with_call_node(n->as_Call());
570 n->as_Call()->set_generator(cloned_cg);
571 }
572 }
573 if (n->is_SafePoint()) {
574 // Scalar replacement and macro expansion might modify the JVMState.
575 // Clone it to make sure it's not shared between SafePointNodes.
576 n->as_SafePoint()->clone_jvms(C);
577 n->as_SafePoint()->clone_replaced_nodes();
578 }
579 if (n->is_InlineType()) {
580 C->add_inline_type(n);
581 }
582 if (n->is_LoadFlat() || n->is_StoreFlat()) {
583 C->add_flat_access(n);
584 }
585 Compile::current()->record_modified_node(n);
586 return n; // Return the clone
587 }
588
589 //---------------------------setup_is_top--------------------------------------
590 // Call this when changing the top node, to reassert the invariants
591 // required by Node::is_top. See Compile::set_cached_top_node.
592 void Node::setup_is_top() {
593 if (this == (Node*)Compile::current()->top()) {
594 // This node has just become top. Kill its out array.
595 _outcnt = _outmax = 0;
596 _out = nullptr; // marker value for top
597 assert(is_top(), "must be top");
598 } else {
599 if (_out == nullptr) _out = NO_OUT_ARRAY;
600 assert(!is_top(), "must not be top");
601 }
602 }
603
604 //------------------------------~Node------------------------------------------
605 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
606 void Node::destruct(PhaseValues* phase) {
607 Compile* compile = (phase != nullptr) ? phase->C : Compile::current();
608 if (phase != nullptr && phase->is_IterGVN()) {
609 phase->is_IterGVN()->_worklist.remove(this);
610 }
611 // If this is the most recently created node, reclaim its index. Otherwise,
612 // record the node as dead to keep liveness information accurate.
613 if ((uint)_idx+1 == compile->unique()) {
614 compile->set_unique(compile->unique()-1);
615 } else {
616 compile->record_dead_node(_idx);
617 }
618 // Clear debug info:
619 Node_Notes* nn = compile->node_notes_at(_idx);
620 if (nn != nullptr) nn->clear();
621 // Walk the input array, freeing the corresponding output edges
622 _cnt = _max; // forget req/prec distinction
623 uint i;
624 for( i = 0; i < _max; i++ ) {
625 set_req(i, nullptr);
626 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim");
627 }
628 assert(outcnt() == 0, "deleting a node must not leave a dangling use");
629
630 if (is_macro()) {
631 compile->remove_macro_node(this);
632 }
633 if (is_expensive()) {
634 compile->remove_expensive_node(this);
635 }
636 if (is_ReachabilityFence()) {
637 compile->remove_reachability_fence(as_ReachabilityFence());
638 }
639 if (is_OpaqueTemplateAssertionPredicate()) {
640 compile->remove_template_assertion_predicate_opaque(as_OpaqueTemplateAssertionPredicate());
641 }
642 if (is_ParsePredicate()) {
643 compile->remove_parse_predicate(as_ParsePredicate());
644 }
645 if (for_post_loop_opts_igvn()) {
646 compile->remove_from_post_loop_opts_igvn(this);
647 }
648 if (is_InlineType()) {
649 compile->remove_inline_type(this);
650 }
651 if (for_merge_stores_igvn()) {
652 compile->remove_from_merge_stores_igvn(this);
653 }
654
655 if (is_SafePoint()) {
656 as_SafePoint()->delete_replaced_nodes();
657
658 if (is_CallStaticJava()) {
659 compile->remove_unstable_if_trap(as_CallStaticJava(), false);
660 }
661 }
662 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
663 bs->unregister_potential_barrier_node(this);
664
665 // See if the input array was allocated just prior to the object
666 int edge_size = _max*sizeof(void*);
667 int out_edge_size = _outmax*sizeof(void*);
668 char *in_array = ((char*)_in);
669 char *edge_end = in_array + edge_size;
670 char *out_array = (char*)(_out == NO_OUT_ARRAY? nullptr: _out);
671 int node_size = size_of();
672
673 #ifdef ASSERT
674 // We will not actually delete the storage, but we'll make the node unusable.
675 compile->remove_modified_node(this);
676 *(address*)this = badAddress; // smash the C++ vtbl, probably
677 _in = _out = (Node**) badAddress;
678 _max = _cnt = _outmax = _outcnt = 0;
679 #endif
680
681 // Free the output edge array
682 if (out_edge_size > 0) {
683 compile->node_arena()->Afree(out_array, out_edge_size);
684 }
685
686 // Free the input edge array and the node itself
687 if( edge_end == (char*)this ) {
688 // It was; free the input array and object all in one hit
689 #ifndef ASSERT
690 compile->node_arena()->Afree(in_array, edge_size+node_size);
691 #endif
692 } else {
693 // Free just the input array
694 compile->node_arena()->Afree(in_array, edge_size);
695
696 // Free just the object
697 #ifndef ASSERT
698 compile->node_arena()->Afree(this, node_size);
699 #endif
700 }
701 }
702
703 // Resize input or output array to grow it to the next larger power-of-2 bigger
704 // than len.
705 void Node::resize_array(Node**& array, node_idx_t& max_size, uint len, bool needs_clearing) {
706 Arena* arena = Compile::current()->node_arena();
707 uint new_max = max_size;
708 if (new_max == 0) {
709 max_size = 4;
710 array = (Node**)arena->Amalloc(4 * sizeof(Node*));
711 if (needs_clearing) {
712 array[0] = nullptr;
713 array[1] = nullptr;
714 array[2] = nullptr;
715 array[3] = nullptr;
716 }
717 return;
718 }
719 new_max = next_power_of_2(len);
720 assert(needs_clearing || (array != nullptr && array != NO_OUT_ARRAY), "out must have sensible value");
721 array = (Node**)arena->Arealloc(array, max_size * sizeof(Node*), new_max * sizeof(Node*));
722 if (needs_clearing) {
723 Copy::zero_to_bytes(&array[max_size], (new_max - max_size) * sizeof(Node*)); // null all new space
724 }
725 max_size = new_max; // Record new max length
726 // This assertion makes sure that Node::_max is wide enough to
727 // represent the numerical value of new_max.
728 assert(max_size > len, "int width of _max or _outmax is too small");
729 }
730
731 //------------------------------grow-------------------------------------------
732 // Grow the input array, making space for more edges
733 void Node::grow(uint len) {
734 resize_array(_in, _max, len, true);
735 }
736
737 //-----------------------------out_grow----------------------------------------
738 // Grow the input array, making space for more edges
739 void Node::out_grow(uint len) {
740 assert(!is_top(), "cannot grow a top node's out array");
741 resize_array(_out, _outmax, len, false);
742 }
743
744 #ifdef ASSERT
745 //------------------------------is_dead----------------------------------------
746 bool Node::is_dead() const {
747 // Mach and pinch point nodes may look like dead.
748 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
749 return false;
750 for( uint i = 0; i < _max; i++ )
751 if( _in[i] != nullptr )
752 return false;
753 return true;
754 }
755
756 bool Node::is_not_dead(const Node* n) {
757 return n == nullptr || !PhaseIterGVN::is_verify_def_use() || !(n->is_dead());
758 }
759
760 bool Node::is_reachable_from_root() const {
761 ResourceMark rm;
762 Unique_Node_List wq;
763 wq.push((Node*)this);
764 RootNode* root = Compile::current()->root();
765 for (uint i = 0; i < wq.size(); i++) {
766 Node* m = wq.at(i);
767 if (m == root) {
768 return true;
769 }
770 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
771 Node* u = m->fast_out(j);
772 wq.push(u);
773 }
774 }
775 return false;
776 }
777 #endif
778
779 //------------------------------is_unreachable---------------------------------
780 bool Node::is_unreachable(PhaseIterGVN &igvn) const {
781 assert(!is_Mach(), "doesn't work with MachNodes");
782 return outcnt() == 0 || igvn.type(this) == Type::TOP || (in(0) != nullptr && in(0)->is_top());
783 }
784
785 //------------------------------add_req----------------------------------------
786 // Add a new required input at the end
787 void Node::add_req( Node *n ) {
788 assert( is_not_dead(n), "can not use dead node");
789
790 // Look to see if I can move precedence down one without reallocating
791 if( (_cnt >= _max) || (in(_max-1) != nullptr) )
792 grow( _max+1 );
793
794 // Find a precedence edge to move
795 if( in(_cnt) != nullptr ) { // Next precedence edge is busy?
796 uint i;
797 for( i=_cnt; i<_max; i++ )
798 if( in(i) == nullptr ) // Find the null at end of prec edge list
799 break; // There must be one, since we grew the array
800 _in[i] = in(_cnt); // Move prec over, making space for req edge
801 }
802 _in[_cnt++] = n; // Stuff over old prec edge
803 if (n != nullptr) n->add_out((Node *)this);
804 Compile::current()->record_modified_node(this);
805 }
806
807 //---------------------------add_req_batch-------------------------------------
808 // Add a new required input at the end
809 void Node::add_req_batch( Node *n, uint m ) {
810 assert( is_not_dead(n), "can not use dead node");
811 // check various edge cases
812 if ((int)m <= 1) {
813 assert((int)m >= 0, "oob");
814 if (m != 0) add_req(n);
815 return;
816 }
817
818 // Look to see if I can move precedence down one without reallocating
819 if( (_cnt+m) > _max || _in[_max-m] )
820 grow( _max+m );
821
822 // Find a precedence edge to move
823 if( _in[_cnt] != nullptr ) { // Next precedence edge is busy?
824 uint i;
825 for( i=_cnt; i<_max; i++ )
826 if( _in[i] == nullptr ) // Find the null at end of prec edge list
827 break; // There must be one, since we grew the array
828 // Slide all the precs over by m positions (assume #prec << m).
829 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*)));
830 }
831
832 // Stuff over the old prec edges
833 for(uint i=0; i<m; i++ ) {
834 _in[_cnt++] = n;
835 }
836
837 // Insert multiple out edges on the node.
838 if (n != nullptr && !n->is_top()) {
839 for(uint i=0; i<m; i++ ) {
840 n->add_out((Node *)this);
841 }
842 }
843 Compile::current()->record_modified_node(this);
844 }
845
846 //------------------------------del_req----------------------------------------
847 // Delete the required edge and compact the edge array
848 void Node::del_req( uint idx ) {
849 assert( idx < _cnt, "oob");
850 assert( !VerifyHashTableKeys || _hash_lock == 0,
851 "remove node from hash table before modifying it");
852 // First remove corresponding def-use edge
853 Node *n = in(idx);
854 if (n != nullptr) n->del_out((Node *)this);
855 _in[idx] = in(--_cnt); // Compact the array
856 // Avoid spec violation: Gap in prec edges.
857 close_prec_gap_at(_cnt);
858 Compile::current()->record_modified_node(this);
859 }
860
861 //------------------------------del_req_ordered--------------------------------
862 // Delete the required edge and compact the edge array with preserved order
863 void Node::del_req_ordered( uint idx ) {
864 assert( idx < _cnt, "oob");
865 assert( !VerifyHashTableKeys || _hash_lock == 0,
866 "remove node from hash table before modifying it");
867 // First remove corresponding def-use edge
868 Node *n = in(idx);
869 if (n != nullptr) n->del_out((Node *)this);
870 if (idx < --_cnt) { // Not last edge ?
871 Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx)*sizeof(Node*)));
872 }
873 // Avoid spec violation: Gap in prec edges.
874 close_prec_gap_at(_cnt);
875 Compile::current()->record_modified_node(this);
876 }
877
878 //------------------------------ins_req----------------------------------------
879 // Insert a new required input at the end
880 void Node::ins_req( uint idx, Node *n ) {
881 assert( is_not_dead(n), "can not use dead node");
882 add_req(nullptr); // Make space
883 assert( idx < _max, "Must have allocated enough space");
884 // Slide over
885 if(_cnt-idx-1 > 0) {
886 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*)));
887 }
888 _in[idx] = n; // Stuff over old required edge
889 if (n != nullptr) n->add_out((Node *)this); // Add reciprocal def-use edge
890 Compile::current()->record_modified_node(this);
891 }
892
893 //-----------------------------find_edge---------------------------------------
894 int Node::find_edge(Node* n) {
895 for (uint i = 0; i < len(); i++) {
896 if (_in[i] == n) return i;
897 }
898 return -1;
899 }
900
901 //----------------------------replace_edge-------------------------------------
902 int Node::replace_edge(Node* old, Node* neww, PhaseGVN* gvn) {
903 if (old == neww) return 0; // nothing to do
904 uint nrep = 0;
905 for (uint i = 0; i < len(); i++) {
906 if (in(i) == old) {
907 if (i < req()) {
908 if (gvn != nullptr) {
909 set_req_X(i, neww, gvn);
910 } else {
911 set_req(i, neww);
912 }
913 } else {
914 assert(gvn == nullptr || gvn->is_IterGVN() == nullptr, "no support for igvn here");
915 assert(find_prec_edge(neww) == -1, "spec violation: duplicated prec edge (node %d -> %d)", _idx, neww->_idx);
916 set_prec(i, neww);
917 }
918 nrep++;
919 }
920 }
921 return nrep;
922 }
923
924 /**
925 * Replace input edges in the range pointing to 'old' node.
926 */
927 int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn) {
928 if (old == neww) return 0; // nothing to do
929 uint nrep = 0;
930 for (int i = start; i < end; i++) {
931 if (in(i) == old) {
932 set_req_X(i, neww, gvn);
933 nrep++;
934 }
935 }
936 return nrep;
937 }
938
939 //-------------------------disconnect_inputs-----------------------------------
940 // null out all inputs to eliminate incoming Def-Use edges.
941 void Node::disconnect_inputs(Compile* C) {
942 // the layout of Node::_in
943 // r: a required input, null is allowed
944 // p: a precedence, null values are all at the end
945 // -----------------------------------
946 // |r|...|r|p|...|p|null|...|null|
947 // | |
948 // req() len()
949 // -----------------------------------
950 for (uint i = 0; i < req(); ++i) {
951 if (in(i) != nullptr) {
952 set_req(i, nullptr);
953 }
954 }
955
956 // Remove precedence edges if any exist
957 // Note: Safepoints may have precedence edges, even during parsing
958 for (uint i = len(); i > req(); ) {
959 rm_prec(--i); // no-op if _in[i] is null
960 }
961
962 #ifdef ASSERT
963 // sanity check
964 for (uint i = 0; i < len(); ++i) {
965 assert(_in[i] == nullptr, "disconnect_inputs() failed!");
966 }
967 #endif
968
969 // Node::destruct requires all out edges be deleted first
970 // DEBUG_ONLY(destruct();) // no reuse benefit expected
971 C->record_dead_node(_idx);
972 }
973
974 //-----------------------------uncast---------------------------------------
975 // %%% Temporary, until we sort out CheckCastPP vs. CastPP.
976 // Strip away casting. (It is depth-limited.)
977 // Optionally, keep casts with dependencies.
978 Node* Node::uncast(bool keep_deps) const {
979 // Should be inline:
980 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this;
981 if (is_ConstraintCast()) {
982 return uncast_helper(this, keep_deps);
983 } else {
984 return (Node*) this;
985 }
986 }
987
988 // Find out of current node that matches opcode.
989 Node* Node::find_out_with(int opcode) {
990 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
991 Node* use = fast_out(i);
992 if (use->Opcode() == opcode) {
993 return use;
994 }
995 }
996 return nullptr;
997 }
998
999 // Return true if the current node has an out that matches opcode.
1000 bool Node::has_out_with(int opcode) {
1001 return (find_out_with(opcode) != nullptr);
1002 }
1003
1004 // Return true if the current node has an out that matches any of the opcodes.
1005 bool Node::has_out_with(int opcode1, int opcode2, int opcode3, int opcode4) {
1006 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1007 int opcode = fast_out(i)->Opcode();
1008 if (opcode == opcode1 || opcode == opcode2 || opcode == opcode3 || opcode == opcode4) {
1009 return true;
1010 }
1011 }
1012 return false;
1013 }
1014
1015
1016 //---------------------------uncast_helper-------------------------------------
1017 Node* Node::uncast_helper(const Node* p, bool keep_deps) {
1018 #ifdef ASSERT
1019 // If we end up traversing more nodes than we actually have,
1020 // it is definitely an infinite loop.
1021 uint max_depth = Compile::current()->unique();
1022 uint depth_count = 0;
1023 const Node* orig_p = p;
1024 #endif
1025
1026 while (true) {
1027 #ifdef ASSERT
1028 if (depth_count++ >= max_depth) {
1029 orig_p->dump(4);
1030 if (p != orig_p) {
1031 p->dump(1);
1032 }
1033 fatal("infinite loop in Node::uncast_helper");
1034 }
1035 #endif
1036 if (p == nullptr || p->req() != 2) {
1037 break;
1038 } else if (p->is_ConstraintCast()) {
1039 if (keep_deps && p->as_ConstraintCast()->carry_dependency()) {
1040 break; // stop at casts with dependencies
1041 }
1042 p = p->in(1);
1043 } else {
1044 break;
1045 }
1046 }
1047 return (Node*) p;
1048 }
1049
1050 //------------------------------add_prec---------------------------------------
1051 // Add a new precedence input. Precedence inputs are unordered, with
1052 // duplicates removed and nulls packed down at the end.
1053 void Node::add_prec( Node *n ) {
1054 assert( is_not_dead(n), "can not use dead node");
1055
1056 // Check for null at end
1057 if( _cnt >= _max || in(_max-1) )
1058 grow( _max+1 );
1059
1060 // Find a precedence edge to move
1061 uint i = _cnt;
1062 while( in(i) != nullptr ) {
1063 if (in(i) == n) return; // Avoid spec violation: duplicated prec edge.
1064 i++;
1065 }
1066 _in[i] = n; // Stuff prec edge over null
1067 if ( n != nullptr) n->add_out((Node *)this); // Add mirror edge
1068
1069 #ifdef ASSERT
1070 while ((++i)<_max) { assert(_in[i] == nullptr, "spec violation: Gap in prec edges (node %d)", _idx); }
1071 #endif
1072 Compile::current()->record_modified_node(this);
1073 }
1074
1075 //------------------------------rm_prec----------------------------------------
1076 // Remove a precedence input. Precedence inputs are unordered, with
1077 // duplicates removed and nulls packed down at the end.
1078 void Node::rm_prec( uint j ) {
1079 assert(j < _max, "oob: i=%d, _max=%d", j, _max);
1080 assert(j >= _cnt, "not a precedence edge");
1081 if (_in[j] == nullptr) return; // Avoid spec violation: Gap in prec edges.
1082 _in[j]->del_out((Node *)this);
1083 close_prec_gap_at(j);
1084 Compile::current()->record_modified_node(this);
1085 }
1086
1087 //------------------------------size_of----------------------------------------
1088 uint Node::size_of() const { return sizeof(*this); }
1089
1090 //------------------------------ideal_reg--------------------------------------
1091 uint Node::ideal_reg() const { return 0; }
1092
1093 //------------------------------jvms-------------------------------------------
1094 JVMState* Node::jvms() const { return nullptr; }
1095
1096 #ifdef ASSERT
1097 //------------------------------jvms-------------------------------------------
1098 bool Node::verify_jvms(const JVMState* using_jvms) const {
1099 for (JVMState* jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1100 if (jvms == using_jvms) return true;
1101 }
1102 return false;
1103 }
1104
1105 //------------------------------init_NodeProperty------------------------------
1106 void Node::init_NodeProperty() {
1107 assert(_max_classes <= max_juint, "too many NodeProperty classes");
1108 assert(max_flags() <= max_juint, "too many NodeProperty flags");
1109 }
1110
1111 //-----------------------------max_flags---------------------------------------
1112 juint Node::max_flags() {
1113 return (PD::_last_flag << 1) - 1; // allow flags combination
1114 }
1115 #endif
1116
1117 //------------------------------format-----------------------------------------
1118 // Print as assembly
1119 void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
1120 //------------------------------emit-------------------------------------------
1121 // Emit bytes using C2_MacroAssembler
1122 void Node::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {}
1123 //------------------------------size-------------------------------------------
1124 // Size of instruction in bytes
1125 uint Node::size(PhaseRegAlloc *ra_) const { return 0; }
1126
1127 //------------------------------CFG Construction-------------------------------
1128 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root,
1129 // Goto and Return.
1130 const Node *Node::is_block_proj() const { return nullptr; }
1131
1132 // Minimum guaranteed type
1133 const Type *Node::bottom_type() const { return Type::BOTTOM; }
1134
1135
1136 //------------------------------raise_bottom_type------------------------------
1137 // Get the worst-case Type output for this Node.
1138 void Node::raise_bottom_type(const Type* new_type) {
1139 if (is_Type()) {
1140 TypeNode *n = this->as_Type();
1141 if (VerifyAliases) {
1142 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
1143 }
1144 n->set_type(new_type);
1145 } else if (is_Load()) {
1146 LoadNode *n = this->as_Load();
1147 if (VerifyAliases) {
1148 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
1149 }
1150 n->set_type(new_type);
1151 }
1152 }
1153
1154 //------------------------------Identity---------------------------------------
1155 // Return a node that the given node is equivalent to.
1156 Node* Node::Identity(PhaseGVN* phase) {
1157 return this; // Default to no identities
1158 }
1159
1160 //------------------------------Value------------------------------------------
1161 // Compute a new Type for a node using the Type of the inputs.
1162 const Type* Node::Value(PhaseGVN* phase) const {
1163 return bottom_type(); // Default to worst-case Type
1164 }
1165
1166 //------------------------------Ideal------------------------------------------
1167 //
1168 // 'Idealize' the graph rooted at this Node.
1169 //
1170 // In order to be efficient and flexible there are some subtle invariants
1171 // these Ideal calls need to hold. Some of the flag bits for '-XX:VerifyIterativeGVN'
1172 // can help with validating these invariants, although they are too slow to have on by default:
1173 // - '-XX:VerifyIterativeGVN=1' checks the def-use info
1174 // - '-XX:VerifyIterativeGVN=100000' checks the return value
1175 // If you are hacking an Ideal call, be sure to use these.
1176 //
1177 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this'
1178 // pointer. If ANY change is made, it must return the root of the reshaped
1179 // graph - even if the root is the same Node. Example: swapping the inputs
1180 // to an AddINode gives the same answer and same root, but you still have to
1181 // return the 'this' pointer instead of null. If the node was already dead
1182 // before the Ideal call, this rule does not apply, and it is fine to return
1183 // nullptr even if modifications were made.
1184 //
1185 // You cannot return an OLD Node, except for the 'this' pointer. Use the
1186 // Identity call to return an old Node; basically if Identity can find
1187 // another Node have the Ideal call make no change and return null.
1188 // Example: AddINode::Ideal must check for add of zero; in this case it
1189 // returns null instead of doing any graph reshaping.
1190 //
1191 // You cannot modify any old Nodes except for the 'this' pointer. Due to
1192 // sharing there may be other users of the old Nodes relying on their current
1193 // semantics. Modifying them will break the other users.
1194 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for
1195 // "X+3" unchanged in case it is shared.
1196 //
1197 // If you modify the 'this' pointer's inputs, you should use
1198 // 'set_req'. If you are making a new Node (either as the new root or
1199 // some new internal piece) you may use 'init_req' to set the initial
1200 // value. You can make a new Node with either 'new' or 'clone'. In
1201 // either case, def-use info is correctly maintained.
1202 //
1203 // Example: reshape "(X+3)+4" into "X+7":
1204 // set_req(1, in(1)->in(1));
1205 // set_req(2, phase->intcon(7));
1206 // return this;
1207 // Example: reshape "X*4" into "X<<2"
1208 // return new LShiftINode(in(1), phase->intcon(2));
1209 //
1210 // You must call 'phase->transform(X)' on any new Nodes X you make, except
1211 // for the returned root node. Example: reshape "X*31" with "(X<<5)-X".
1212 // Node *shift=phase->transform(new LShiftINode(in(1),phase->intcon(5)));
1213 // return new AddINode(shift, in(1));
1214 //
1215 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'.
1216 // These forms are faster than 'phase->transform(new ConNode())' and Do
1217 // The Right Thing with def-use info.
1218 //
1219 // You cannot bury the 'this' Node inside of a graph reshape. If the reshaped
1220 // graph uses the 'this' Node it must be the root. If you want a Node with
1221 // the same Opcode as the 'this' pointer use 'clone'.
1222 //
1223 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
1224 return nullptr; // Default to being Ideal already
1225 }
1226
1227 // Some nodes have specific Ideal subgraph transformations only if they are
1228 // unique users of specific nodes. Such nodes should be put on IGVN worklist
1229 // for the transformations to happen.
1230 bool Node::has_special_unique_user() const {
1231 assert(outcnt() == 1, "match only for unique out");
1232 Node* n = unique_out();
1233 int op = Opcode();
1234 if (this->is_Store()) {
1235 // Condition for back-to-back stores folding.
1236 return n->Opcode() == op && n->in(MemNode::Memory) == this;
1237 } else if ((this->is_Load() || this->is_DecodeN() || this->is_Phi() || this->is_Con()) && n->Opcode() == Op_MemBarAcquire) {
1238 // Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
1239 return true;
1240 } else if (this->is_Load() && n->is_Move()) {
1241 // Condition for MoveX2Y (LoadX mem) => LoadY mem
1242 return true;
1243 } else if (op == Op_AddL) {
1244 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
1245 return n->Opcode() == Op_ConvL2I && n->in(1) == this;
1246 } else if (op == Op_SubI || op == Op_SubL) {
1247 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y)
1248 return n->Opcode() == op && n->in(2) == this;
1249 } else if (is_If() && (n->is_IfFalse() || n->is_IfTrue())) {
1250 // See IfProjNode::Identity()
1251 return true;
1252 } else if ((is_IfFalse() || is_IfTrue()) && n->is_If()) {
1253 // See IfNode::fold_compares
1254 return true;
1255 } else if (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) {
1256 // Condition for XorVMask(VectorMaskCmp(x,y,cond), MaskAll(true)) ==> VectorMaskCmp(x,y,ncond)
1257 return true;
1258 } else {
1259 return false;
1260 }
1261 };
1262
1263 //--------------------------find_exact_control---------------------------------
1264 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
1265 Node* Node::find_exact_control(Node* ctrl) {
1266 if (ctrl == nullptr && this->is_Region())
1267 ctrl = this->as_Region()->is_copy();
1268
1269 if (ctrl != nullptr && ctrl->is_CatchProj()) {
1270 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index)
1271 ctrl = ctrl->in(0);
1272 if (ctrl != nullptr && !ctrl->is_top())
1273 ctrl = ctrl->in(0);
1274 }
1275
1276 if (ctrl != nullptr && ctrl->is_Proj())
1277 ctrl = ctrl->in(0);
1278
1279 return ctrl;
1280 }
1281
1282 //--------------------------dominates------------------------------------------
1283 // Helper function for MemNode::all_controls_dominate().
1284 // Check if 'this' control node dominates or equal to 'sub' control node.
1285 // We already know that if any path back to Root or Start reaches 'this',
1286 // then all paths so, so this is a simple search for one example,
1287 // not an exhaustive search for a counterexample.
1288 Node::DomResult Node::dominates(Node* sub, Node_List &nlist) {
1289 assert(this->is_CFG(), "expecting control");
1290 assert(sub != nullptr && sub->is_CFG(), "expecting control");
1291
1292 // detect dead cycle without regions
1293 int iterations_without_region_limit = DominatorSearchLimit;
1294
1295 Node* orig_sub = sub;
1296 Node* dom = this;
1297 bool met_dom = false;
1298 nlist.clear();
1299
1300 // Walk 'sub' backward up the chain to 'dom', watching for regions.
1301 // After seeing 'dom', continue up to Root or Start.
1302 // If we hit a region (backward split point), it may be a loop head.
1303 // Keep going through one of the region's inputs. If we reach the
1304 // same region again, go through a different input. Eventually we
1305 // will either exit through the loop head, or give up.
1306 // (If we get confused, break out and return a conservative 'false'.)
1307 while (sub != nullptr) {
1308 if (sub->is_top()) {
1309 // Conservative answer for dead code.
1310 return DomResult::EncounteredDeadCode;
1311 }
1312 if (sub == dom) {
1313 if (nlist.size() == 0) {
1314 // No Region nodes except loops were visited before and the EntryControl
1315 // path was taken for loops: it did not walk in a cycle.
1316 return DomResult::Dominate;
1317 } else if (met_dom) {
1318 break; // already met before: walk in a cycle
1319 } else {
1320 // Region nodes were visited. Continue walk up to Start or Root
1321 // to make sure that it did not walk in a cycle.
1322 met_dom = true; // first time meet
1323 iterations_without_region_limit = DominatorSearchLimit; // Reset
1324 }
1325 }
1326 if (sub->is_Start() || sub->is_Root()) {
1327 // Success if we met 'dom' along a path to Start or Root.
1328 // We assume there are no alternative paths that avoid 'dom'.
1329 // (This assumption is up to the caller to ensure!)
1330 return met_dom ? DomResult::Dominate : DomResult::NotDominate;
1331 }
1332 Node* up = sub->in(0);
1333 // Normalize simple pass-through regions and projections:
1334 up = sub->find_exact_control(up);
1335 // If sub == up, we found a self-loop. Try to push past it.
1336 if (sub == up && sub->is_Loop()) {
1337 // Take loop entry path on the way up to 'dom'.
1338 up = sub->in(1); // in(LoopNode::EntryControl);
1339 } else if (sub == up && sub->is_Region() && sub->req() == 2) {
1340 // Take in(1) path on the way up to 'dom' for regions with only one input
1341 up = sub->in(1);
1342 } else if (sub == up && sub->is_Region()) {
1343 // Try both paths for Regions with 2 input paths (it may be a loop head).
1344 // It could give conservative 'false' answer without information
1345 // which region's input is the entry path.
1346 iterations_without_region_limit = DominatorSearchLimit; // Reset
1347
1348 bool region_was_visited_before = false;
1349 // Was this Region node visited before?
1350 // If so, we have reached it because we accidentally took a
1351 // loop-back edge from 'sub' back into the body of the loop,
1352 // and worked our way up again to the loop header 'sub'.
1353 // So, take the first unexplored path on the way up to 'dom'.
1354 for (int j = nlist.size() - 1; j >= 0; j--) {
1355 intptr_t ni = (intptr_t)nlist.at(j);
1356 Node* visited = (Node*)(ni & ~1);
1357 bool visited_twice_already = ((ni & 1) != 0);
1358 if (visited == sub) {
1359 if (visited_twice_already) {
1360 // Visited 2 paths, but still stuck in loop body. Give up.
1361 return DomResult::NotDominate;
1362 }
1363 // The Region node was visited before only once.
1364 // (We will repush with the low bit set, below.)
1365 nlist.remove(j);
1366 // We will find a new edge and re-insert.
1367 region_was_visited_before = true;
1368 break;
1369 }
1370 }
1371
1372 // Find an incoming edge which has not been seen yet; walk through it.
1373 assert(up == sub, "");
1374 uint skip = region_was_visited_before ? 1 : 0;
1375 for (uint i = 1; i < sub->req(); i++) {
1376 Node* in = sub->in(i);
1377 if (in != nullptr && !in->is_top() && in != sub) {
1378 if (skip == 0) {
1379 up = in;
1380 break;
1381 }
1382 --skip; // skip this nontrivial input
1383 }
1384 }
1385
1386 // Set 0 bit to indicate that both paths were taken.
1387 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0)));
1388 }
1389
1390 if (up == sub) {
1391 break; // some kind of tight cycle
1392 }
1393 if (up == orig_sub && met_dom) {
1394 // returned back after visiting 'dom'
1395 break; // some kind of cycle
1396 }
1397 if (--iterations_without_region_limit < 0) {
1398 break; // dead cycle
1399 }
1400 sub = up;
1401 }
1402
1403 // Did not meet Root or Start node in pred. chain.
1404 return DomResult::NotDominate;
1405 }
1406
1407 //------------------------------remove_dead_region-----------------------------
1408 // This control node is dead. Follow the subgraph below it making everything
1409 // using it dead as well. This will happen normally via the usual IterGVN
1410 // worklist but this call is more efficient. Do not update use-def info
1411 // inside the dead region, just at the borders.
1412 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
1413 // Con's are a popular node to re-hit in the hash table again.
1414 if( dead->is_Con() ) return;
1415
1416 ResourceMark rm;
1417 Node_List nstack;
1418 VectorSet dead_set; // notify uses only once
1419
1420 Node *top = igvn->C->top();
1421 nstack.push(dead);
1422 bool has_irreducible_loop = igvn->C->has_irreducible_loop();
1423
1424 while (nstack.size() > 0) {
1425 dead = nstack.pop();
1426 if (!dead_set.test_set(dead->_idx)) {
1427 // If dead has any live uses, those are now still attached. Notify them before we lose them.
1428 igvn->add_users_to_worklist(dead);
1429 }
1430 if (dead->Opcode() == Op_SafePoint) {
1431 dead->as_SafePoint()->disconnect_from_root(igvn);
1432 }
1433 if (dead->outcnt() > 0) {
1434 // Keep dead node on stack until all uses are processed.
1435 nstack.push(dead);
1436 // For all Users of the Dead... ;-)
1437 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) {
1438 Node* use = dead->last_out(k);
1439 igvn->hash_delete(use); // Yank from hash table prior to mod
1440 if (use->in(0) == dead) { // Found another dead node
1441 assert (!use->is_Con(), "Control for Con node should be Root node.");
1442 use->set_req(0, top); // Cut dead edge to prevent processing
1443 nstack.push(use); // the dead node again.
1444 } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop
1445 use->is_Loop() && !use->is_Root() && // Don't kill Root (RootNode extends LoopNode)
1446 use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead
1447 use->set_req(LoopNode::EntryControl, top); // Cut dead edge to prevent processing
1448 use->set_req(0, top); // Cut self edge
1449 nstack.push(use);
1450 } else { // Else found a not-dead user
1451 // Dead if all inputs are top or null
1452 bool dead_use = !use->is_Root(); // Keep empty graph alive
1453 for (uint j = 1; j < use->req(); j++) {
1454 Node* in = use->in(j);
1455 if (in == dead) { // Turn all dead inputs into TOP
1456 use->set_req(j, top);
1457 } else if (in != nullptr && !in->is_top()) {
1458 dead_use = false;
1459 }
1460 }
1461 if (dead_use) {
1462 if (use->is_Region()) {
1463 use->set_req(0, top); // Cut self edge
1464 }
1465 nstack.push(use);
1466 } else {
1467 igvn->_worklist.push(use);
1468 }
1469 }
1470 // Refresh the iterator, since any number of kills might have happened.
1471 k = dead->last_outs(kmin);
1472 }
1473 } else { // (dead->outcnt() == 0)
1474 // Done with outputs.
1475 igvn->hash_delete(dead);
1476 igvn->_worklist.remove(dead);
1477 igvn->set_type(dead, Type::TOP);
1478 // Kill all inputs to the dead guy
1479 for (uint i=0; i < dead->req(); i++) {
1480 Node *n = dead->in(i); // Get input to dead guy
1481 if (n != nullptr && !n->is_top()) { // Input is valid?
1482 dead->set_req(i, top); // Smash input away
1483 if (n->outcnt() == 0) { // Input also goes dead?
1484 if (!n->is_Con())
1485 nstack.push(n); // Clear it out as well
1486 } else if (n->outcnt() == 1 &&
1487 n->has_special_unique_user()) {
1488 igvn->add_users_to_worklist( n );
1489 } else if (n->outcnt() <= 2 && n->is_Store()) {
1490 // Push store's uses on worklist to enable folding optimization for
1491 // store/store and store/load to the same address.
1492 // The restriction (outcnt() <= 2) is the same as in set_req_X()
1493 // and remove_globally_dead_node().
1494 igvn->add_users_to_worklist( n );
1495 } else if (dead->is_data_proj_of_pure_function(n)) {
1496 igvn->_worklist.push(n);
1497 } else {
1498 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, n);
1499 }
1500 }
1501 }
1502 igvn->C->remove_useless_node(dead);
1503 } // (dead->outcnt() == 0)
1504 } // while (nstack.size() > 0) for outputs
1505 return;
1506 }
1507
1508 //------------------------------remove_dead_region-----------------------------
1509 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
1510 Node *n = in(0);
1511 if( !n ) return false;
1512 // Lost control into this guy? I.e., it became unreachable?
1513 // Aggressively kill all unreachable code.
1514 if (can_reshape && n->is_top()) {
1515 kill_dead_code(this, phase->is_IterGVN());
1516 return false; // Node is dead.
1517 }
1518
1519 if( n->is_Region() && n->as_Region()->is_copy() ) {
1520 Node *m = n->nonnull_req();
1521 set_req(0, m);
1522 return true;
1523 }
1524 return false;
1525 }
1526
1527 //------------------------------hash-------------------------------------------
1528 // Hash function over Nodes.
1529 uint Node::hash() const {
1530 uint sum = 0;
1531 for( uint i=0; i<_cnt; i++ ) // Add in all inputs
1532 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded nulls
1533 return (sum>>2) + _cnt + Opcode();
1534 }
1535
1536 //------------------------------cmp--------------------------------------------
1537 // Compare special parts of simple Nodes
1538 bool Node::cmp( const Node &n ) const {
1539 return true; // Must be same
1540 }
1541
1542 //------------------------------rematerialize-----------------------------------
1543 // Should we clone rather than spill this instruction?
1544 bool Node::rematerialize() const {
1545 if ( is_Mach() )
1546 return this->as_Mach()->rematerialize();
1547 else
1548 return (_flags & Flag_rematerialize) != 0;
1549 }
1550
1551 //------------------------------needs_anti_dependence_check---------------------
1552 // Nodes which use memory without consuming it, hence need antidependences.
1553 bool Node::needs_anti_dependence_check() const {
1554 if (req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0) {
1555 return false;
1556 }
1557 return in(1)->bottom_type()->has_memory();
1558 }
1559
1560 // Get an integer constant from a ConNode (or CastIINode).
1561 // Return a default value if there is no apparent constant here.
1562 const TypeInt* Node::find_int_type() const {
1563 if (this->is_Type()) {
1564 return this->as_Type()->type()->isa_int();
1565 } else if (this->is_Con()) {
1566 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1567 return this->bottom_type()->isa_int();
1568 }
1569 return nullptr;
1570 }
1571
1572 const TypeInteger* Node::find_integer_type(BasicType bt) const {
1573 if (this->is_Type()) {
1574 return this->as_Type()->type()->isa_integer(bt);
1575 } else if (this->is_Con()) {
1576 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1577 return this->bottom_type()->isa_integer(bt);
1578 }
1579 return nullptr;
1580 }
1581
1582 // Get a pointer constant from a ConstNode.
1583 // Returns the constant if it is a pointer ConstNode
1584 intptr_t Node::get_ptr() const {
1585 assert( Opcode() == Op_ConP, "" );
1586 return ((ConPNode*)this)->type()->is_ptr()->get_con();
1587 }
1588
1589 // Get a narrow oop constant from a ConNNode.
1590 intptr_t Node::get_narrowcon() const {
1591 assert( Opcode() == Op_ConN, "" );
1592 return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
1593 }
1594
1595 // Get a long constant from a ConNode.
1596 // Return a default value if there is no apparent constant here.
1597 const TypeLong* Node::find_long_type() const {
1598 if (this->is_Type()) {
1599 return this->as_Type()->type()->isa_long();
1600 } else if (this->is_Con()) {
1601 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1602 return this->bottom_type()->isa_long();
1603 }
1604 return nullptr;
1605 }
1606
1607
1608 /**
1609 * Return a ptr type for nodes which should have it.
1610 */
1611 const TypePtr* Node::get_ptr_type() const {
1612 const TypePtr* tp = this->bottom_type()->make_ptr();
1613 #ifdef ASSERT
1614 if (tp == nullptr) {
1615 this->dump(1);
1616 assert((tp != nullptr), "unexpected node type");
1617 }
1618 #endif
1619 return tp;
1620 }
1621
1622 // Get a double constant from a ConstNode.
1623 // Returns the constant if it is a double ConstNode
1624 jdouble Node::getd() const {
1625 assert( Opcode() == Op_ConD, "" );
1626 return ((ConDNode*)this)->type()->is_double_constant()->getd();
1627 }
1628
1629 // Get a float constant from a ConstNode.
1630 // Returns the constant if it is a float ConstNode
1631 jfloat Node::getf() const {
1632 assert( Opcode() == Op_ConF, "" );
1633 return ((ConFNode*)this)->type()->is_float_constant()->getf();
1634 }
1635
1636 // Get a half float constant from a ConstNode.
1637 // Returns the constant if it is a float ConstNode
1638 jshort Node::geth() const {
1639 assert( Opcode() == Op_ConH, "" );
1640 return ((ConHNode*)this)->type()->is_half_float_constant()->geth();
1641 }
1642
1643 #ifndef PRODUCT
1644
1645 // Call this from debugger:
1646 Node* old_root() {
1647 Matcher* matcher = Compile::current()->matcher();
1648 if (matcher != nullptr) {
1649 Node* new_root = Compile::current()->root();
1650 Node* old_root = matcher->find_old_node(new_root);
1651 if (old_root != nullptr) {
1652 return old_root;
1653 }
1654 }
1655 tty->print("old_root: not found.\n");
1656 return nullptr;
1657 }
1658
1659 // BFS traverse all reachable nodes from start, call callback on them
1660 template <typename Callback>
1661 void visit_nodes(Node* start, Callback callback, bool traverse_output, bool only_ctrl) {
1662 Unique_Mixed_Node_List worklist;
1663 worklist.add(start);
1664 for (uint i = 0; i < worklist.size(); i++) {
1665 Node* n = worklist[i];
1666 callback(n);
1667 for (uint i = 0; i < n->len(); i++) {
1668 if (!only_ctrl || n->is_Region() || (n->Opcode() == Op_Root) || (i == TypeFunc::Control)) {
1669 // If only_ctrl is set: Add regions, the root node, or control inputs only
1670 worklist.add(n->in(i));
1671 }
1672 }
1673 if (traverse_output && !only_ctrl) {
1674 for (uint i = 0; i < n->outcnt(); i++) {
1675 worklist.add(n->raw_out(i));
1676 }
1677 }
1678 }
1679 }
1680
1681 // BFS traverse from start, return node with idx
1682 static Node* find_node_by_idx(Node* start, uint idx, bool traverse_output, bool only_ctrl) {
1683 ResourceMark rm;
1684 Node* result = nullptr;
1685 auto callback = [&] (Node* n) {
1686 if (n->_idx == idx) {
1687 if (result != nullptr) {
1688 tty->print("find_node_by_idx: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n",
1689 (uintptr_t)result, (uintptr_t)n, idx);
1690 }
1691 result = n;
1692 }
1693 };
1694 visit_nodes(start, callback, traverse_output, only_ctrl);
1695 return result;
1696 }
1697
1698 static int node_idx_cmp(const Node** n1, const Node** n2) {
1699 return (*n1)->_idx - (*n2)->_idx;
1700 }
1701
1702 static void find_nodes_by_name(Node* start, const char* name) {
1703 ResourceMark rm;
1704 GrowableArray<const Node*> ns;
1705 auto callback = [&] (const Node* n) {
1706 if (StringUtils::is_star_match(name, n->Name())) {
1707 ns.push(n);
1708 }
1709 };
1710 visit_nodes(start, callback, true, false);
1711 ns.sort(node_idx_cmp);
1712 for (int i = 0; i < ns.length(); i++) {
1713 ns.at(i)->dump();
1714 }
1715 }
1716
1717 static void find_nodes_by_dump(Node* start, const char* pattern) {
1718 ResourceMark rm;
1719 GrowableArray<const Node*> ns;
1720 auto callback = [&] (const Node* n) {
1721 stringStream stream;
1722 n->dump("", false, &stream);
1723 if (StringUtils::is_star_match(pattern, stream.base())) {
1724 ns.push(n);
1725 }
1726 };
1727 visit_nodes(start, callback, true, false);
1728 ns.sort(node_idx_cmp);
1729 for (int i = 0; i < ns.length(); i++) {
1730 ns.at(i)->dump();
1731 }
1732 }
1733
1734 // call from debugger: find node with name pattern in new/current graph
1735 // name can contain "*" in match pattern to match any characters
1736 // the matching is case insensitive
1737 void find_nodes_by_name(const char* name) {
1738 Node* root = Compile::current()->root();
1739 find_nodes_by_name(root, name);
1740 }
1741
1742 // call from debugger: find node with name pattern in old graph
1743 // name can contain "*" in match pattern to match any characters
1744 // the matching is case insensitive
1745 void find_old_nodes_by_name(const char* name) {
1746 Node* root = old_root();
1747 find_nodes_by_name(root, name);
1748 }
1749
1750 // call from debugger: find node with dump pattern in new/current graph
1751 // can contain "*" in match pattern to match any characters
1752 // the matching is case insensitive
1753 void find_nodes_by_dump(const char* pattern) {
1754 Node* root = Compile::current()->root();
1755 find_nodes_by_dump(root, pattern);
1756 }
1757
1758 // call from debugger: find node with name pattern in old graph
1759 // can contain "*" in match pattern to match any characters
1760 // the matching is case insensitive
1761 void find_old_nodes_by_dump(const char* pattern) {
1762 Node* root = old_root();
1763 find_nodes_by_dump(root, pattern);
1764 }
1765
1766 // Call this from debugger, search in same graph as n:
1767 Node* find_node(Node* n, const int idx) {
1768 return n->find(idx);
1769 }
1770
1771 // Call this from debugger, search in new nodes:
1772 Node* find_node(const int idx) {
1773 return Compile::current()->root()->find(idx);
1774 }
1775
1776 // Call this from debugger, search in old nodes:
1777 Node* find_old_node(const int idx) {
1778 Node* root = old_root();
1779 return (root == nullptr) ? nullptr : root->find(idx);
1780 }
1781
1782 // Call this from debugger, search in same graph as n:
1783 Node* find_ctrl(Node* n, const int idx) {
1784 return n->find_ctrl(idx);
1785 }
1786
1787 // Call this from debugger, search in new nodes:
1788 Node* find_ctrl(const int idx) {
1789 return Compile::current()->root()->find_ctrl(idx);
1790 }
1791
1792 // Call this from debugger, search in old nodes:
1793 Node* find_old_ctrl(const int idx) {
1794 Node* root = old_root();
1795 return (root == nullptr) ? nullptr : root->find_ctrl(idx);
1796 }
1797
1798 //------------------------------find_ctrl--------------------------------------
1799 // Find an ancestor to this node in the control history with given _idx
1800 Node* Node::find_ctrl(int idx) {
1801 return find(idx, true);
1802 }
1803
1804 //------------------------------find-------------------------------------------
1805 // Tries to find the node with the index |idx| starting from this node. If idx is negative,
1806 // the search also includes forward (out) edges. Returns null if not found.
1807 // If only_ctrl is set, the search will only be done on control nodes. Returns null if
1808 // not found or if the node to be found is not a control node (search will not find it).
1809 Node* Node::find(const int idx, bool only_ctrl) {
1810 ResourceMark rm;
1811 return find_node_by_idx(this, abs(idx), (idx < 0), only_ctrl);
1812 }
1813
1814 class PrintBFS {
1815 public:
1816 PrintBFS(const Node* start, const int max_distance, const Node* target, const char* options, outputStream* st, const frame* fr)
1817 : _start(start), _max_distance(max_distance), _target(target), _options(options), _output(st), _frame(fr),
1818 _dcc(this), _info_uid(cmpkey, hashkey) {}
1819
1820 void run();
1821 private:
1822 // pipeline steps
1823 bool configure();
1824 void collect();
1825 void select();
1826 void select_all();
1827 void select_all_paths();
1828 void select_shortest_path();
1829 void sort();
1830 void print();
1831
1832 // inputs
1833 const Node* _start;
1834 const int _max_distance;
1835 const Node* _target;
1836 const char* _options;
1837 outputStream* _output;
1838 const frame* _frame;
1839
1840 // options
1841 bool _traverse_inputs = false;
1842 bool _traverse_outputs = false;
1843 struct Filter {
1844 bool _control = false;
1845 bool _memory = false;
1846 bool _data = false;
1847 bool _mixed = false;
1848 bool _other = false;
1849 bool is_empty() const {
1850 return !(_control || _memory || _data || _mixed || _other);
1851 }
1852 void set_all() {
1853 _control = true;
1854 _memory = true;
1855 _data = true;
1856 _mixed = true;
1857 _other = true;
1858 }
1859 // Check if the filter accepts the node. Go by the type categories, but also all CFG nodes
1860 // are considered to have control.
1861 bool accepts(const Node* n) {
1862 const Type* t = n->bottom_type();
1863 return ( _data && t->has_category(Type::Category::Data) ) ||
1864 ( _memory && t->has_category(Type::Category::Memory) ) ||
1865 ( _mixed && t->has_category(Type::Category::Mixed) ) ||
1866 ( _control && (t->has_category(Type::Category::Control) || n->is_CFG()) ) ||
1867 ( _other && t->has_category(Type::Category::Other) );
1868 }
1869 };
1870 Filter _filter_visit;
1871 Filter _filter_boundary;
1872 bool _sort_idx = false;
1873 bool _all_paths = false;
1874 bool _use_color = false;
1875 bool _print_blocks = false;
1876 bool _print_old = false;
1877 bool _dump_only = false;
1878 bool _print_igv = false;
1879
1880 void print_options_help(bool print_examples);
1881 bool parse_options();
1882
1883 public:
1884 class DumpConfigColored : public Node::DumpConfig {
1885 public:
1886 DumpConfigColored(PrintBFS* bfs) : _bfs(bfs) {};
1887 virtual void pre_dump(outputStream* st, const Node* n);
1888 virtual void post_dump(outputStream* st);
1889 private:
1890 PrintBFS* _bfs;
1891 };
1892 private:
1893 DumpConfigColored _dcc;
1894
1895 // node info
1896 static Node* old_node(const Node* n); // mach node -> prior IR node
1897 void print_node_idx(const Node* n);
1898 void print_block_id(const Block* b);
1899 void print_node_block(const Node* n); // _pre_order, head idx, _idom, _dom_depth
1900
1901 // traversal data structures
1902 GrowableArray<const Node*> _worklist; // BFS queue
1903 void maybe_traverse(const Node* src, const Node* dst);
1904
1905 // node info annotation
1906 class Info {
1907 public:
1908 Info() : Info(nullptr, 0) {};
1909 Info(const Node* node, int distance)
1910 : _node(node), _distance_from_start(distance) {};
1911 const Node* node() const { return _node; };
1912 int distance() const { return _distance_from_start; };
1913 int distance_from_target() const { return _distance_from_target; }
1914 void set_distance_from_target(int d) { _distance_from_target = d; }
1915 GrowableArray<const Node*> edge_bwd; // pointing toward _start
1916 bool is_marked() const { return _mark; } // marked to keep during select
1917 void set_mark() { _mark = true; }
1918 private:
1919 const Node* _node;
1920 int _distance_from_start; // distance from _start
1921 int _distance_from_target = 0; // distance from _target if _all_paths
1922 bool _mark = false;
1923 };
1924 Dict _info_uid; // Node -> uid
1925 GrowableArray<Info> _info; // uid -> info
1926
1927 Info* find_info(const Node* n) {
1928 size_t uid = (size_t)_info_uid[n];
1929 if (uid == 0) {
1930 return nullptr;
1931 }
1932 return &_info.at((int)uid);
1933 }
1934
1935 void make_info(const Node* node, const int distance) {
1936 assert(find_info(node) == nullptr, "node does not yet have info");
1937 size_t uid = _info.length() + 1;
1938 _info_uid.Insert((void*)node, (void*)uid);
1939 _info.at_put_grow((int)uid, Info(node, distance));
1940 assert(find_info(node)->node() == node, "stored correct node");
1941 };
1942
1943 // filled by sort, printed by print
1944 GrowableArray<const Node*> _print_list;
1945
1946 // print header + node table
1947 void print_header() const;
1948 void print_node(const Node* n);
1949 };
1950
1951 void PrintBFS::run() {
1952 if (!configure()) {
1953 return;
1954 }
1955 collect();
1956 select();
1957 sort();
1958 print();
1959 }
1960
1961 // set up configuration for BFS and print
1962 bool PrintBFS::configure() {
1963 if (_max_distance < 0) {
1964 _output->print_cr("dump_bfs: max_distance must be non-negative!");
1965 return false;
1966 }
1967 return parse_options();
1968 }
1969
1970 // BFS traverse according to configuration, fill worklist and info
1971 void PrintBFS::collect() {
1972 maybe_traverse(_start, _start);
1973 int pos = 0;
1974 while (pos < _worklist.length()) {
1975 const Node* n = _worklist.at(pos++); // next node to traverse
1976 Info* info = find_info(n);
1977 if (!_filter_visit.accepts(n) && n != _start) {
1978 continue; // we hit boundary, do not traverse further
1979 }
1980 if (n != _start && n->is_Root()) {
1981 continue; // traversing through root node would lead to unrelated nodes
1982 }
1983 if (_traverse_inputs && _max_distance > info->distance()) {
1984 for (uint i = 0; i < n->req(); i++) {
1985 maybe_traverse(n, n->in(i));
1986 }
1987 }
1988 if (_traverse_outputs && _max_distance > info->distance()) {
1989 for (uint i = 0; i < n->outcnt(); i++) {
1990 maybe_traverse(n, n->raw_out(i));
1991 }
1992 }
1993 }
1994 }
1995
1996 // go through work list, mark those that we want to print
1997 void PrintBFS::select() {
1998 if (_target == nullptr ) {
1999 select_all();
2000 } else {
2001 if (find_info(_target) == nullptr) {
2002 _output->print_cr("Could not find target in BFS.");
2003 return;
2004 }
2005 if (_all_paths) {
2006 select_all_paths();
2007 } else {
2008 select_shortest_path();
2009 }
2010 }
2011 }
2012
2013 // take all nodes from BFS
2014 void PrintBFS::select_all() {
2015 for (int i = 0; i < _worklist.length(); i++) {
2016 const Node* n = _worklist.at(i);
2017 Info* info = find_info(n);
2018 info->set_mark();
2019 }
2020 }
2021
2022 // traverse backward from target, along edges found in BFS
2023 void PrintBFS::select_all_paths() {
2024 int pos = 0;
2025 GrowableArray<const Node*> backtrace;
2026 // start from target
2027 backtrace.push(_target);
2028 find_info(_target)->set_mark();
2029 // traverse backward
2030 while (pos < backtrace.length()) {
2031 const Node* n = backtrace.at(pos++);
2032 Info* info = find_info(n);
2033 for (int i = 0; i < info->edge_bwd.length(); i++) {
2034 // all backward edges
2035 const Node* back = info->edge_bwd.at(i);
2036 Info* back_info = find_info(back);
2037 if (!back_info->is_marked()) {
2038 // not yet found this on way back.
2039 back_info->set_distance_from_target(info->distance_from_target() + 1);
2040 if (back_info->distance_from_target() + back_info->distance() <= _max_distance) {
2041 // total distance is small enough
2042 back_info->set_mark();
2043 backtrace.push(back);
2044 }
2045 }
2046 }
2047 }
2048 }
2049
2050 void PrintBFS::select_shortest_path() {
2051 const Node* current = _target;
2052 while (true) {
2053 Info* info = find_info(current);
2054 info->set_mark();
2055 if (current == _start) {
2056 break;
2057 }
2058 // first edge -> leads us one step closer to _start
2059 current = info->edge_bwd.at(0);
2060 }
2061 }
2062
2063 // go through worklist in desired order, put the marked ones in print list
2064 void PrintBFS::sort() {
2065 if (_traverse_inputs && !_traverse_outputs) {
2066 // reverse order
2067 for (int i = _worklist.length() - 1; i >= 0; i--) {
2068 const Node* n = _worklist.at(i);
2069 Info* info = find_info(n);
2070 if (info->is_marked()) {
2071 _print_list.push(n);
2072 }
2073 }
2074 } else {
2075 // same order as worklist
2076 for (int i = 0; i < _worklist.length(); i++) {
2077 const Node* n = _worklist.at(i);
2078 Info* info = find_info(n);
2079 if (info->is_marked()) {
2080 _print_list.push(n);
2081 }
2082 }
2083 }
2084 if (_sort_idx) {
2085 _print_list.sort(node_idx_cmp);
2086 }
2087 }
2088
2089 // go through printlist and print
2090 void PrintBFS::print() {
2091 if (_print_list.length() > 0 ) {
2092 print_header();
2093 for (int i = 0; i < _print_list.length(); i++) {
2094 const Node* n = _print_list.at(i);
2095 print_node(n);
2096 }
2097 if (_print_igv) {
2098 Compile* C = Compile::current();
2099 C->init_igv();
2100 C->igv_print_graph_to_network(nullptr, _print_list, _frame);
2101 }
2102 } else {
2103 _output->print_cr("No nodes to print.");
2104 }
2105 }
2106
2107 void PrintBFS::print_options_help(bool print_examples) {
2108 _output->print_cr("Usage: node->dump_bfs(int max_distance, Node* target, char* options)");
2109 _output->print_cr("");
2110 _output->print_cr("Use cases:");
2111 _output->print_cr(" BFS traversal: no target required");
2112 _output->print_cr(" shortest path: set target");
2113 _output->print_cr(" all paths: set target and put 'A' in options");
2114 _output->print_cr(" detect loop: subcase of all paths, have start==target");
2115 _output->print_cr("");
2116 _output->print_cr("Arguments:");
2117 _output->print_cr(" this/start: staring point of BFS");
2118 _output->print_cr(" target:");
2119 _output->print_cr(" if null: simple BFS");
2120 _output->print_cr(" else: shortest path or all paths between this/start and target");
2121 _output->print_cr(" options:");
2122 _output->print_cr(" if null: same as \"cdmox@B\"");
2123 _output->print_cr(" else: use combination of following characters");
2124 _output->print_cr(" h: display this help info");
2125 _output->print_cr(" H: display this help info, with examples");
2126 _output->print_cr(" +: traverse in-edges (on if neither + nor -)");
2127 _output->print_cr(" -: traverse out-edges");
2128 _output->print_cr(" c: visit control nodes");
2129 _output->print_cr(" d: visit data nodes");
2130 _output->print_cr(" m: visit memory nodes");
2131 _output->print_cr(" o: visit other nodes");
2132 _output->print_cr(" x: visit mixed nodes");
2133 _output->print_cr(" C: boundary control nodes");
2134 _output->print_cr(" D: boundary data nodes");
2135 _output->print_cr(" M: boundary memory nodes");
2136 _output->print_cr(" O: boundary other nodes");
2137 _output->print_cr(" X: boundary mixed nodes");
2138 _output->print_cr(" #: display node category in color (not supported in all terminals)");
2139 _output->print_cr(" S: sort displayed nodes by node idx");
2140 _output->print_cr(" A: all paths (not just shortest path to target)");
2141 _output->print_cr(" @: print old nodes - before matching (if available)");
2142 _output->print_cr(" B: print scheduling blocks (if available)");
2143 _output->print_cr(" $: dump only, no header, no other columns");
2144 _output->print_cr(" !: show nodes on IGV (sent over network stream)");
2145 _output->print_cr(" (use preferably with dump_bfs(int, Node*, char*, void*, void*, void*)");
2146 _output->print_cr(" to produce a C2 stack trace along with the graph dump, see examples below)");
2147 _output->print_cr("");
2148 _output->print_cr("recursively follow edges to nodes with permitted visit types,");
2149 _output->print_cr("on the boundary additionally display nodes allowed in boundary types");
2150 _output->print_cr("Note: the categories can be overlapping. For example a mixed node");
2151 _output->print_cr(" can contain control and memory output. Some from the other");
2152 _output->print_cr(" category are also control (Halt, Return, etc).");
2153 _output->print_cr("");
2154 _output->print_cr("output columns:");
2155 _output->print_cr(" dist: BFS distance to this/start");
2156 _output->print_cr(" apd: all paths distance (d_outputart + d_target)");
2157 _output->print_cr(" block: block identifier, based on _pre_order");
2158 _output->print_cr(" head: first node in block");
2159 _output->print_cr(" idom: head node of idom block");
2160 _output->print_cr(" depth: depth of block (_dom_depth)");
2161 _output->print_cr(" old: old IR node - before matching");
2162 _output->print_cr(" dump: node->dump()");
2163 _output->print_cr("");
2164 _output->print_cr("Note: if none of the \"cmdxo\" characters are in the options string");
2165 _output->print_cr(" then we set all of them.");
2166 _output->print_cr(" This allows for short strings like \"#\" for colored input traversal");
2167 _output->print_cr(" or \"-#\" for colored output traversal.");
2168 if (print_examples) {
2169 _output->print_cr("");
2170 _output->print_cr("Examples:");
2171 _output->print_cr(" if->dump_bfs(10, 0, \"+cxo\")");
2172 _output->print_cr(" starting at some if node, traverse inputs recursively");
2173 _output->print_cr(" only along control (mixed and other can also be control)");
2174 _output->print_cr(" phi->dump_bfs(5, 0, \"-dxo\")");
2175 _output->print_cr(" starting at phi node, traverse outputs recursively");
2176 _output->print_cr(" only along data (mixed and other can also have data flow)");
2177 _output->print_cr(" find_node(385)->dump_bfs(3, 0, \"cdmox+#@B\")");
2178 _output->print_cr(" find inputs of node 385, up to 3 nodes up (+)");
2179 _output->print_cr(" traverse all nodes (cdmox), use colors (#)");
2180 _output->print_cr(" display old nodes and blocks, if they exist");
2181 _output->print_cr(" useful call to start with");
2182 _output->print_cr(" find_node(102)->dump_bfs(10, 0, \"dCDMOX-\")");
2183 _output->print_cr(" find non-data dependencies of a data node");
2184 _output->print_cr(" follow data node outputs until we find another category");
2185 _output->print_cr(" node as the boundary");
2186 _output->print_cr(" x->dump_bfs(10, y, 0)");
2187 _output->print_cr(" find shortest path from x to y, along any edge or node");
2188 _output->print_cr(" will not find a path if it is longer than 10");
2189 _output->print_cr(" useful to find how x and y are related");
2190 _output->print_cr(" find_node(741)->dump_bfs(20, find_node(746), \"c+\")");
2191 _output->print_cr(" find shortest control path between two nodes");
2192 _output->print_cr(" find_node(741)->dump_bfs(8, find_node(746), \"cdmox+A\")");
2193 _output->print_cr(" find all paths (A) between two nodes of length at most 8");
2194 _output->print_cr(" find_node(741)->dump_bfs(7, find_node(741), \"c+A\")");
2195 _output->print_cr(" find all control loops for this node");
2196 _output->print_cr(" find_node(741)->dump_bfs(7, find_node(741), \"c+A!\", $sp, $fp, $pc)");
2197 _output->print_cr(" same as above, but printing the resulting subgraph");
2198 _output->print_cr(" along with a C2 stack trace on IGV");
2199 }
2200 }
2201
2202 bool PrintBFS::parse_options() {
2203 if (_options == nullptr) {
2204 _options = "cdmox@B"; // default options
2205 }
2206 size_t len = strlen(_options);
2207 for (size_t i = 0; i < len; i++) {
2208 switch (_options[i]) {
2209 case '+':
2210 _traverse_inputs = true;
2211 break;
2212 case '-':
2213 _traverse_outputs = true;
2214 break;
2215 case 'c':
2216 _filter_visit._control = true;
2217 break;
2218 case 'm':
2219 _filter_visit._memory = true;
2220 break;
2221 case 'd':
2222 _filter_visit._data = true;
2223 break;
2224 case 'x':
2225 _filter_visit._mixed = true;
2226 break;
2227 case 'o':
2228 _filter_visit._other = true;
2229 break;
2230 case 'C':
2231 _filter_boundary._control = true;
2232 break;
2233 case 'M':
2234 _filter_boundary._memory = true;
2235 break;
2236 case 'D':
2237 _filter_boundary._data = true;
2238 break;
2239 case 'X':
2240 _filter_boundary._mixed = true;
2241 break;
2242 case 'O':
2243 _filter_boundary._other = true;
2244 break;
2245 case 'S':
2246 _sort_idx = true;
2247 break;
2248 case 'A':
2249 _all_paths = true;
2250 break;
2251 case '#':
2252 _use_color = true;
2253 break;
2254 case 'B':
2255 _print_blocks = true;
2256 break;
2257 case '@':
2258 _print_old = true;
2259 break;
2260 case '$':
2261 _dump_only = true;
2262 break;
2263 case '!':
2264 _print_igv = true;
2265 break;
2266 case 'h':
2267 print_options_help(false);
2268 return false;
2269 case 'H':
2270 print_options_help(true);
2271 return false;
2272 default:
2273 _output->print_cr("dump_bfs: Unrecognized option \'%c\'", _options[i]);
2274 _output->print_cr("for help, run: find_node(0)->dump_bfs(0,0,\"H\")");
2275 return false;
2276 }
2277 }
2278 if (!_traverse_inputs && !_traverse_outputs) {
2279 _traverse_inputs = true;
2280 }
2281 if (_filter_visit.is_empty()) {
2282 _filter_visit.set_all();
2283 }
2284 Compile* C = Compile::current();
2285 _print_old &= (C->matcher() != nullptr); // only show old if there are new
2286 _print_blocks &= (C->cfg() != nullptr); // only show blocks if available
2287 return true;
2288 }
2289
2290 void PrintBFS::DumpConfigColored::pre_dump(outputStream* st, const Node* n) {
2291 if (!_bfs->_use_color) {
2292 return;
2293 }
2294 Info* info = _bfs->find_info(n);
2295 if (info == nullptr || !info->is_marked()) {
2296 return;
2297 }
2298
2299 const Type* t = n->bottom_type();
2300 switch (t->category()) {
2301 case Type::Category::Data:
2302 st->print("\u001b[34m");
2303 break;
2304 case Type::Category::Memory:
2305 st->print("\u001b[32m");
2306 break;
2307 case Type::Category::Mixed:
2308 st->print("\u001b[35m");
2309 break;
2310 case Type::Category::Control:
2311 st->print("\u001b[31m");
2312 break;
2313 case Type::Category::Other:
2314 st->print("\u001b[33m");
2315 break;
2316 case Type::Category::Undef:
2317 n->dump();
2318 assert(false, "category undef ??");
2319 break;
2320 default:
2321 n->dump();
2322 assert(false, "not covered");
2323 break;
2324 }
2325 }
2326
2327 void PrintBFS::DumpConfigColored::post_dump(outputStream* st) {
2328 if (!_bfs->_use_color) {
2329 return;
2330 }
2331 st->print("\u001b[0m"); // white
2332 }
2333
2334 Node* PrintBFS::old_node(const Node* n) {
2335 Compile* C = Compile::current();
2336 if (C->matcher() == nullptr || !C->node_arena()->contains(n)) {
2337 return (Node*)nullptr;
2338 } else {
2339 return C->matcher()->find_old_node(n);
2340 }
2341 }
2342
2343 void PrintBFS::print_node_idx(const Node* n) {
2344 Compile* C = Compile::current();
2345 char buf[30];
2346 if (n == nullptr) {
2347 os::snprintf_checked(buf, sizeof(buf), "_"); // null
2348 } else if (C->node_arena()->contains(n)) {
2349 os::snprintf_checked(buf, sizeof(buf), "%d", n->_idx); // new node
2350 } else {
2351 os::snprintf_checked(buf, sizeof(buf), "o%d", n->_idx); // old node
2352 }
2353 _output->print("%6s", buf);
2354 }
2355
2356 void PrintBFS::print_block_id(const Block* b) {
2357 Compile* C = Compile::current();
2358 char buf[30];
2359 os::snprintf_checked(buf, sizeof(buf), "B%d", b->_pre_order);
2360 _output->print("%7s", buf);
2361 }
2362
2363 void PrintBFS::print_node_block(const Node* n) {
2364 Compile* C = Compile::current();
2365 Block* b = C->node_arena()->contains(n)
2366 ? C->cfg()->get_block_for_node(n)
2367 : nullptr; // guard against old nodes
2368 if (b == nullptr) {
2369 _output->print(" _"); // Block
2370 _output->print(" _"); // head
2371 _output->print(" _"); // idom
2372 _output->print(" _"); // depth
2373 } else {
2374 print_block_id(b);
2375 print_node_idx(b->head());
2376 if (b->_idom) {
2377 print_node_idx(b->_idom->head());
2378 } else {
2379 _output->print(" _"); // idom
2380 }
2381 _output->print("%6d ", b->_dom_depth);
2382 }
2383 }
2384
2385 // filter, and add to worklist, add info, note traversal edges
2386 void PrintBFS::maybe_traverse(const Node* src, const Node* dst) {
2387 if (dst != nullptr &&
2388 (_filter_visit.accepts(dst) ||
2389 _filter_boundary.accepts(dst) ||
2390 dst == _start)) { // correct category or start?
2391 if (find_info(dst) == nullptr) {
2392 // never visited - set up info
2393 _worklist.push(dst);
2394 int d = 0;
2395 if (dst != _start) {
2396 d = find_info(src)->distance() + 1;
2397 }
2398 make_info(dst, d);
2399 }
2400 if (src != dst) {
2401 // traversal edges useful during select
2402 find_info(dst)->edge_bwd.push(src);
2403 }
2404 }
2405 }
2406
2407 void PrintBFS::print_header() const {
2408 if (_dump_only) {
2409 return; // no header in dump only mode
2410 }
2411 _output->print("dist"); // distance
2412 if (_all_paths) {
2413 _output->print(" apd"); // all paths distance
2414 }
2415 if (_print_blocks) {
2416 _output->print(" [block head idom depth]"); // block
2417 }
2418 if (_print_old) {
2419 _output->print(" old"); // old node
2420 }
2421 _output->print(" dump\n"); // node dump
2422 _output->print_cr("---------------------------------------------");
2423 }
2424
2425 void PrintBFS::print_node(const Node* n) {
2426 if (_dump_only) {
2427 n->dump("\n", false, _output, &_dcc);
2428 return;
2429 }
2430 _output->print("%4d", find_info(n)->distance());// distance
2431 if (_all_paths) {
2432 Info* info = find_info(n);
2433 int apd = info->distance() + info->distance_from_target();
2434 _output->print("%4d", apd); // all paths distance
2435 }
2436 if (_print_blocks) {
2437 print_node_block(n); // block
2438 }
2439 if (_print_old) {
2440 print_node_idx(old_node(n)); // old node
2441 }
2442 _output->print(" ");
2443 n->dump("\n", false, _output, &_dcc); // node dump
2444 }
2445
2446 //------------------------------dump_bfs--------------------------------------
2447 // Call this from debugger
2448 // Useful for BFS traversal, shortest path, all path, loop detection, etc
2449 // Designed to be more readable, and provide additional info
2450 // To find all options, run:
2451 // find_node(0)->dump_bfs(0,0,"H")
2452 void Node::dump_bfs(const int max_distance, Node* target, const char* options) const {
2453 dump_bfs(max_distance, target, options, tty);
2454 }
2455
2456 // Used to dump to stream.
2457 void Node::dump_bfs(const int max_distance, Node* target, const char* options, outputStream* st, const frame* fr) const {
2458 PrintBFS bfs(this, max_distance, target, options, st, fr);
2459 bfs.run();
2460 }
2461
2462 // Call this from debugger, with default arguments
2463 void Node::dump_bfs(const int max_distance) const {
2464 dump_bfs(max_distance, nullptr, nullptr);
2465 }
2466
2467 // Call this from debugger, with stack handling register arguments for IGV dumps.
2468 // Example: p find_node(741)->dump_bfs(7, find_node(741), "c+A!", $sp, $fp, $pc).
2469 void Node::dump_bfs(const int max_distance, Node* target, const char* options, void* sp, void* fp, void* pc) const {
2470 frame fr(sp, fp, pc);
2471 dump_bfs(max_distance, target, options, tty, &fr);
2472 }
2473
2474 // -----------------------------dump_idx---------------------------------------
2475 void Node::dump_idx(bool align, outputStream* st, DumpConfig* dc) const {
2476 if (dc != nullptr) {
2477 dc->pre_dump(st, this);
2478 }
2479 Compile* C = Compile::current();
2480 bool is_new = C->node_arena()->contains(this);
2481 if (align) { // print prefix empty spaces$
2482 // +1 for leading digit, +1 for "o"
2483 uint max_width = (C->unique() == 0 ? 0 : static_cast<uint>(log10(static_cast<double>(C->unique())))) + 2;
2484 // +1 for leading digit, maybe +1 for "o"
2485 uint width = (_idx == 0 ? 0 : static_cast<uint>(log10(static_cast<double>(_idx)))) + 1 + (is_new ? 0 : 1);
2486 while (max_width > width) {
2487 st->print(" ");
2488 width++;
2489 }
2490 }
2491 if (!is_new) {
2492 st->print("o");
2493 }
2494 st->print("%d", _idx);
2495 if (dc != nullptr) {
2496 dc->post_dump(st);
2497 }
2498 }
2499
2500 // -----------------------------dump_name--------------------------------------
2501 void Node::dump_name(outputStream* st, DumpConfig* dc) const {
2502 if (dc != nullptr) {
2503 dc->pre_dump(st, this);
2504 }
2505 st->print("%s", Name());
2506 if (dc != nullptr) {
2507 dc->post_dump(st);
2508 }
2509 }
2510
2511 // -----------------------------Name-------------------------------------------
2512 extern const char *NodeClassNames[];
2513 const char *Node::Name() const { return NodeClassNames[Opcode()]; }
2514
2515 static bool is_disconnected(const Node* n) {
2516 for (uint i = 0; i < n->req(); i++) {
2517 if (n->in(i) != nullptr) return false;
2518 }
2519 return true;
2520 }
2521
2522 #ifdef ASSERT
2523 void Node::dump_orig(outputStream *st, bool print_key) const {
2524 Compile* C = Compile::current();
2525 Node* orig = _debug_orig;
2526 if (not_a_node(orig)) orig = nullptr;
2527 if (orig != nullptr && !C->node_arena()->contains(orig)) orig = nullptr;
2528 if (orig == nullptr) return;
2529 if (print_key) {
2530 st->print(" !orig=");
2531 }
2532 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
2533 if (not_a_node(fast)) fast = nullptr;
2534 while (orig != nullptr) {
2535 bool discon = is_disconnected(orig); // if discon, print [123] else 123
2536 if (discon) st->print("[");
2537 if (!Compile::current()->node_arena()->contains(orig))
2538 st->print("o");
2539 st->print("%d", orig->_idx);
2540 if (discon) st->print("]");
2541 orig = orig->debug_orig();
2542 if (not_a_node(orig)) orig = nullptr;
2543 if (orig != nullptr && !C->node_arena()->contains(orig)) orig = nullptr;
2544 if (orig != nullptr) st->print(",");
2545 if (fast != nullptr) {
2546 // Step fast twice for each single step of orig:
2547 fast = fast->debug_orig();
2548 if (not_a_node(fast)) fast = nullptr;
2549 if (fast != nullptr && fast != orig) {
2550 fast = fast->debug_orig();
2551 if (not_a_node(fast)) fast = nullptr;
2552 }
2553 if (fast == orig) {
2554 st->print("...");
2555 break;
2556 }
2557 }
2558 }
2559 }
2560
2561 void Node::set_debug_orig(Node* orig) {
2562 _debug_orig = orig;
2563 if (BreakAtNode == 0) return;
2564 if (not_a_node(orig)) orig = nullptr;
2565 int trip = 10;
2566 while (orig != nullptr) {
2567 if (orig->debug_idx() == BreakAtNode || (uintx)orig->_idx == BreakAtNode) {
2568 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=" UINT64_FORMAT " orig._idx=%d orig._debug_idx=" UINT64_FORMAT,
2569 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
2570 BREAKPOINT;
2571 }
2572 orig = orig->debug_orig();
2573 if (not_a_node(orig)) orig = nullptr;
2574 if (trip-- <= 0) break;
2575 }
2576 }
2577 #endif //ASSERT
2578
2579 //------------------------------dump------------------------------------------
2580 // Dump a Node
2581 void Node::dump(const char* suffix, bool mark, outputStream* st, DumpConfig* dc) const {
2582 Compile* C = Compile::current();
2583 bool is_new = C->node_arena()->contains(this);
2584 C->_in_dump_cnt++;
2585
2586 // idx mark name ===
2587 dump_idx(true, st, dc);
2588 st->print(mark ? " >" : " ");
2589 dump_name(st, dc);
2590 st->print(" === ");
2591
2592 // Dump the required and precedence inputs
2593 dump_req(st, dc);
2594 dump_prec(st, dc);
2595 // Dump the outputs
2596 dump_out(st, dc);
2597
2598 if (is_disconnected(this)) {
2599 #ifdef ASSERT
2600 st->print(" [" UINT64_FORMAT "]", debug_idx());
2601 dump_orig(st);
2602 #endif
2603 st->cr();
2604 C->_in_dump_cnt--;
2605 return; // don't process dead nodes
2606 }
2607
2608 if (C->clone_map().value(_idx) != 0) {
2609 C->clone_map().dump(_idx, st);
2610 }
2611 // Dump node-specific info
2612 dump_spec(st);
2613 #ifdef ASSERT
2614 // Dump the non-reset _debug_idx
2615 if (Verbose && WizardMode) {
2616 st->print(" [" UINT64_FORMAT "]", debug_idx());
2617 }
2618 #endif
2619
2620 const Type *t = bottom_type();
2621
2622 if (t != nullptr && (t->isa_instptr() || t->isa_instklassptr())) {
2623 const TypeInstPtr *toop = t->isa_instptr();
2624 const TypeInstKlassPtr *tkls = t->isa_instklassptr();
2625 if (toop) {
2626 st->print(" Oop:");
2627 } else if (tkls) {
2628 st->print(" Klass:");
2629 }
2630 t->dump_on(st);
2631 } else if (t == Type::MEMORY) {
2632 st->print(" Memory:");
2633 MemNode::dump_adr_type(adr_type(), st);
2634 } else if (Verbose || WizardMode) {
2635 st->print(" Type:");
2636 if (t) {
2637 t->dump_on(st);
2638 } else {
2639 st->print("no type");
2640 }
2641 } else if (t->isa_vect() && this->is_MachSpillCopy()) {
2642 // Dump MachSpillcopy vector type.
2643 t->dump_on(st);
2644 }
2645 if (is_new) {
2646 DEBUG_ONLY(dump_orig(st));
2647 Node_Notes* nn = C->node_notes_at(_idx);
2648 if (nn != nullptr && !nn->is_clear()) {
2649 if (nn->jvms() != nullptr) {
2650 st->print(" !jvms:");
2651 nn->jvms()->dump_spec(st);
2652 }
2653 }
2654 }
2655 if (suffix) st->print("%s", suffix);
2656 C->_in_dump_cnt--;
2657 }
2658
2659 // call from debugger: dump node to tty with newline
2660 void Node::dump() const {
2661 dump("\n");
2662 }
2663
2664 //------------------------------dump_req--------------------------------------
2665 void Node::dump_req(outputStream* st, DumpConfig* dc) const {
2666 // Dump the required input edges
2667 for (uint i = 0; i < req(); i++) { // For all required inputs
2668 Node* d = in(i);
2669 if (d == nullptr) {
2670 st->print("_ ");
2671 } else if (not_a_node(d)) {
2672 st->print("not_a_node "); // uninitialized, sentinel, garbage, etc.
2673 } else {
2674 d->dump_idx(false, st, dc);
2675 st->print(" ");
2676 }
2677 }
2678 }
2679
2680
2681 //------------------------------dump_prec-------------------------------------
2682 void Node::dump_prec(outputStream* st, DumpConfig* dc) const {
2683 // Dump the precedence edges
2684 int any_prec = 0;
2685 for (uint i = req(); i < len(); i++) { // For all precedence inputs
2686 Node* p = in(i);
2687 if (p != nullptr) {
2688 if (!any_prec++) st->print(" |");
2689 if (not_a_node(p)) { st->print("not_a_node "); continue; }
2690 p->dump_idx(false, st, dc);
2691 st->print(" ");
2692 }
2693 }
2694 }
2695
2696 //------------------------------dump_out--------------------------------------
2697 void Node::dump_out(outputStream* st, DumpConfig* dc) const {
2698 // Delimit the output edges
2699 st->print(" [[ ");
2700 // Dump the output edges
2701 for (uint i = 0; i < _outcnt; i++) { // For all outputs
2702 Node* u = _out[i];
2703 if (u == nullptr) {
2704 st->print("_ ");
2705 } else if (not_a_node(u)) {
2706 st->print("not_a_node ");
2707 } else {
2708 u->dump_idx(false, st, dc);
2709 st->print(" ");
2710 }
2711 }
2712 st->print("]] ");
2713 }
2714
2715 //------------------------------dump-------------------------------------------
2716 // call from debugger: dump Node's inputs (or outputs if d negative)
2717 void Node::dump(int d) const {
2718 dump_bfs(abs(d), nullptr, (d > 0) ? "+$" : "-$");
2719 }
2720
2721 //------------------------------dump_ctrl--------------------------------------
2722 // call from debugger: dump Node's control inputs (or outputs if d negative)
2723 void Node::dump_ctrl(int d) const {
2724 dump_bfs(abs(d), nullptr, (d > 0) ? "+$c" : "-$c");
2725 }
2726
2727 //-----------------------------dump_compact------------------------------------
2728 void Node::dump_comp() const {
2729 this->dump_comp("\n");
2730 }
2731
2732 //-----------------------------dump_compact------------------------------------
2733 // Dump a Node in compact representation, i.e., just print its name and index.
2734 // Nodes can specify additional specifics to print in compact representation by
2735 // implementing dump_compact_spec.
2736 void Node::dump_comp(const char* suffix, outputStream *st) const {
2737 Compile* C = Compile::current();
2738 C->_in_dump_cnt++;
2739 st->print("%s(%d)", Name(), _idx);
2740 this->dump_compact_spec(st);
2741 if (suffix) {
2742 st->print("%s", suffix);
2743 }
2744 C->_in_dump_cnt--;
2745 }
2746
2747 // VERIFICATION CODE
2748 // Verify all nodes if verify_depth is negative
2749 void Node::verify(int verify_depth, VectorSet& visited, Node_List& worklist) {
2750 assert(verify_depth != 0, "depth should not be 0");
2751 Compile* C = Compile::current();
2752 uint last_index_on_current_depth = worklist.size() - 1;
2753 verify_depth--; // Visiting the first node on depth 1
2754 // Only add nodes to worklist if verify_depth is negative (visit all nodes) or greater than 0
2755 bool add_to_worklist = verify_depth != 0;
2756
2757 for (uint list_index = 0; list_index < worklist.size(); list_index++) {
2758 Node* n = worklist[list_index];
2759
2760 if (n->is_Con() && n->bottom_type() == Type::TOP) {
2761 if (C->cached_top_node() == nullptr) {
2762 C->set_cached_top_node((Node*)n);
2763 }
2764 assert(C->cached_top_node() == n, "TOP node must be unique");
2765 }
2766
2767 uint in_len = n->len();
2768 for (uint i = 0; i < in_len; i++) {
2769 Node* x = n->_in[i];
2770 if (!x || x->is_top()) {
2771 continue;
2772 }
2773
2774 // Verify my input has a def-use edge to me
2775 // Count use-def edges from n to x
2776 int cnt = 1;
2777 for (uint j = 0; j < i; j++) {
2778 if (n->_in[j] == x) {
2779 cnt++;
2780 break;
2781 }
2782 }
2783 if (cnt == 2) {
2784 // x is already checked as n's previous input, skip its duplicated def-use count checking
2785 continue;
2786 }
2787 for (uint j = i + 1; j < in_len; j++) {
2788 if (n->_in[j] == x) {
2789 cnt++;
2790 }
2791 }
2792
2793 // Count def-use edges from x to n
2794 uint max = x->_outcnt;
2795 for (uint k = 0; k < max; k++) {
2796 if (x->_out[k] == n) {
2797 cnt--;
2798 }
2799 }
2800 assert(cnt == 0, "mismatched def-use edge counts");
2801
2802 if (add_to_worklist && !visited.test_set(x->_idx)) {
2803 worklist.push(x);
2804 }
2805 }
2806
2807 if (verify_depth > 0 && list_index == last_index_on_current_depth) {
2808 // All nodes on this depth were processed and its inputs are on the worklist. Decrement verify_depth and
2809 // store the current last list index which is the last node in the list with the new depth. All nodes
2810 // added afterwards will have a new depth again. Stop adding new nodes if depth limit is reached (=0).
2811 verify_depth--;
2812 if (verify_depth == 0) {
2813 add_to_worklist = false;
2814 }
2815 last_index_on_current_depth = worklist.size() - 1;
2816 }
2817 }
2818 }
2819 #endif // not PRODUCT
2820
2821 //------------------------------Registers--------------------------------------
2822 // Do we Match on this edge index or not? Generally false for Control
2823 // and true for everything else. Weird for calls & returns.
2824 uint Node::match_edge(uint idx) const {
2825 return idx; // True for other than index 0 (control)
2826 }
2827
2828 // Register classes are defined for specific machines
2829 const RegMask &Node::out_RegMask() const {
2830 ShouldNotCallThis();
2831 return RegMask::EMPTY;
2832 }
2833
2834 const RegMask &Node::in_RegMask(uint) const {
2835 ShouldNotCallThis();
2836 return RegMask::EMPTY;
2837 }
2838
2839 void Node_Array::grow(uint i) {
2840 assert(i >= _max, "Should have been checked before, use maybe_grow?");
2841 assert(_max > 0, "invariant");
2842 uint old = _max;
2843 _max = next_power_of_2(i);
2844 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*));
2845 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) );
2846 }
2847
2848 void Node_Array::insert(uint i, Node* n) {
2849 if (_nodes[_max - 1]) {
2850 grow(_max);
2851 }
2852 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i + 1], ((_max - i - 1) * sizeof(Node*)));
2853 _nodes[i] = n;
2854 }
2855
2856 void Node_Array::remove(uint i) {
2857 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i + 1], (HeapWord*)&_nodes[i], ((_max - i - 1) * sizeof(Node*)));
2858 _nodes[_max - 1] = nullptr;
2859 }
2860
2861 void Node_Array::dump() const {
2862 #ifndef PRODUCT
2863 for (uint i = 0; i < _max; i++) {
2864 Node* nn = _nodes[i];
2865 if (nn != nullptr) {
2866 tty->print("%5d--> ",i); nn->dump();
2867 }
2868 }
2869 #endif
2870 }
2871
2872 //--------------------------is_iteratively_computed------------------------------
2873 // Operation appears to be iteratively computed (such as an induction variable)
2874 // It is possible for this operation to return false for a loop-varying
2875 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
2876 bool Node::is_iteratively_computed() {
2877 if (ideal_reg()) { // does operation have a result register?
2878 for (uint i = 1; i < req(); i++) {
2879 Node* n = in(i);
2880 if (n != nullptr && n->is_Phi()) {
2881 for (uint j = 1; j < n->req(); j++) {
2882 if (n->in(j) == this) {
2883 return true;
2884 }
2885 }
2886 }
2887 }
2888 }
2889 return false;
2890 }
2891
2892 //--------------------------find_similar------------------------------
2893 // Return a node with opcode "opc" and same inputs as "this" if one can
2894 // be found; Otherwise return null;
2895 Node* Node::find_similar(int opc) {
2896 if (req() >= 2) {
2897 Node* def = in(1);
2898 if (def && def->outcnt() >= 2) {
2899 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) {
2900 Node* use = def->fast_out(i);
2901 if (use != this &&
2902 use->Opcode() == opc &&
2903 use->req() == req() &&
2904 has_same_inputs_as(use)) {
2905 return use;
2906 }
2907 }
2908 }
2909 }
2910 return nullptr;
2911 }
2912
2913 bool Node::has_same_inputs_as(const Node* other) const {
2914 assert(req() == other->req(), "should have same number of inputs");
2915 for (uint j = 0; j < other->req(); j++) {
2916 if (in(j) != other->in(j)) {
2917 return false;
2918 }
2919 }
2920 return true;
2921 }
2922
2923 Node* Node::unique_multiple_edges_out_or_null() const {
2924 Node* use = nullptr;
2925 for (DUIterator_Fast kmax, k = fast_outs(kmax); k < kmax; k++) {
2926 Node* u = fast_out(k);
2927 if (use == nullptr) {
2928 use = u; // first use
2929 } else if (u != use) {
2930 return nullptr; // not unique
2931 } else {
2932 // secondary use
2933 }
2934 }
2935 return use;
2936 }
2937
2938 //--------------------------unique_ctrl_out_or_null-------------------------
2939 // Return the unique control out if only one. Null if none or more than one.
2940 Node* Node::unique_ctrl_out_or_null() const {
2941 Node* found = nullptr;
2942 for (uint i = 0; i < outcnt(); i++) {
2943 Node* use = raw_out(i);
2944 if (use->is_CFG() && use != this) {
2945 if (found != nullptr) {
2946 return nullptr;
2947 }
2948 found = use;
2949 }
2950 }
2951 return found;
2952 }
2953
2954 //--------------------------unique_ctrl_out------------------------------
2955 // Return the unique control out. Asserts if none or more than one control out.
2956 Node* Node::unique_ctrl_out() const {
2957 Node* ctrl = unique_ctrl_out_or_null();
2958 assert(ctrl != nullptr, "control out is assumed to be unique");
2959 return ctrl;
2960 }
2961
2962 void Node::ensure_control_or_add_prec(Node* c) {
2963 if (in(0) == nullptr) {
2964 set_req(0, c);
2965 } else if (in(0) != c) {
2966 add_prec(c);
2967 }
2968 }
2969
2970 void Node::add_prec_from(Node* n) {
2971 for (uint i = n->req(); i < n->len(); i++) {
2972 Node* prec = n->in(i);
2973 if (prec != nullptr) {
2974 add_prec(prec);
2975 }
2976 }
2977 }
2978
2979 bool Node::is_dead_loop_safe() const {
2980 if (is_Phi()) {
2981 return true;
2982 }
2983 if (is_Proj() && in(0) == nullptr) {
2984 return true;
2985 }
2986 if ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0) {
2987 if (!is_Proj()) {
2988 return true;
2989 }
2990 if (in(0)->is_Allocate()) {
2991 return false;
2992 }
2993 // MemNode::can_see_stored_value() peeks through the boxing call
2994 if (in(0)->is_CallStaticJava() && in(0)->as_CallStaticJava()->is_boxing_method()) {
2995 return false;
2996 }
2997 return true;
2998 }
2999 return false;
3000 }
3001
3002 bool Node::is_div_or_mod(BasicType bt) const { return Opcode() == Op_Div(bt) || Opcode() == Op_Mod(bt) ||
3003 Opcode() == Op_UDiv(bt) || Opcode() == Op_UMod(bt); }
3004
3005 // `maybe_pure_function` is assumed to be the input of `this`. This is a bit redundant,
3006 // but we already have and need maybe_pure_function in all the call sites, so
3007 // it makes it obvious that the `maybe_pure_function` is the same node as in the caller,
3008 // while it takes more thinking to realize that a locally computed in(0) must be equal to
3009 // the local in the caller.
3010 bool Node::is_data_proj_of_pure_function(const Node* maybe_pure_function) const {
3011 return Opcode() == Op_Proj && as_Proj()->_con == TypeFunc::Parms && maybe_pure_function->is_CallLeafPure();
3012 }
3013
3014 //--------------------------has_non_debug_uses------------------------------
3015 // Checks whether the node has any non-debug uses or not.
3016 bool Node::has_non_debug_uses() const {
3017 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
3018 Node* u = fast_out(i);
3019 if (u->is_SafePoint()) {
3020 if (u->is_Call() && u->as_Call()->has_non_debug_use(this)) {
3021 return true;
3022 }
3023 // Non-call safepoints have only debug uses.
3024 } else if (u->is_ReachabilityFence()) {
3025 // Reachability fence is treated as debug use.
3026 } else {
3027 return true; // everything else is conservatively treated as non-debug use
3028 }
3029 }
3030 return false; // no non-debug uses found
3031 }
3032
3033 //=============================================================================
3034 //------------------------------yank-------------------------------------------
3035 // Find and remove
3036 void Node_List::yank( Node *n ) {
3037 uint i;
3038 for (i = 0; i < _cnt; i++) {
3039 if (_nodes[i] == n) {
3040 break;
3041 }
3042 }
3043
3044 if (i < _cnt) {
3045 _nodes[i] = _nodes[--_cnt];
3046 }
3047 }
3048
3049 //------------------------------dump-------------------------------------------
3050 void Node_List::dump() const {
3051 #ifndef PRODUCT
3052 for (uint i = 0; i < _cnt; i++) {
3053 if (_nodes[i]) {
3054 tty->print("%5d--> ", i);
3055 _nodes[i]->dump();
3056 }
3057 }
3058 #endif
3059 }
3060
3061 void Node_List::dump_simple() const {
3062 #ifndef PRODUCT
3063 for (uint i = 0; i < _cnt; i++) {
3064 if( _nodes[i] ) {
3065 tty->print(" %d", _nodes[i]->_idx);
3066 } else {
3067 tty->print(" null");
3068 }
3069 }
3070 #endif
3071 }
3072
3073 //=============================================================================
3074 //------------------------------remove-----------------------------------------
3075 void Unique_Node_List::remove(Node* n) {
3076 if (_in_worklist.test(n->_idx)) {
3077 for (uint i = 0; i < size(); i++) {
3078 if (_nodes[i] == n) {
3079 map(i, Node_List::pop());
3080 _in_worklist.remove(n->_idx);
3081 return;
3082 }
3083 }
3084 ShouldNotReachHere();
3085 }
3086 }
3087
3088 //-----------------------remove_useless_nodes----------------------------------
3089 // Remove useless nodes from worklist
3090 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) {
3091 for (uint i = 0; i < size(); ++i) {
3092 Node *n = at(i);
3093 assert( n != nullptr, "Did not expect null entries in worklist");
3094 if (!useful.test(n->_idx)) {
3095 _in_worklist.remove(n->_idx);
3096 map(i, Node_List::pop());
3097 --i; // Visit popped node
3098 // If it was last entry, loop terminates since size() was also reduced
3099 }
3100 }
3101 }
3102
3103 //=============================================================================
3104 void Node_Stack::grow() {
3105 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top
3106 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode));
3107 size_t max = old_max << 1; // max * 2
3108 _inodes = REALLOC_ARENA_ARRAY(_a, _inodes, old_max, max);
3109 _inode_max = _inodes + max;
3110 _inode_top = _inodes + old_top; // restore _top
3111 }
3112
3113 // Node_Stack is used to map nodes.
3114 Node* Node_Stack::find(uint idx) const {
3115 uint sz = size();
3116 for (uint i = 0; i < sz; i++) {
3117 if (idx == index_at(i)) {
3118 return node_at(i);
3119 }
3120 }
3121 return nullptr;
3122 }
3123
3124 //=============================================================================
3125 uint TypeNode::size_of() const { return sizeof(*this); }
3126 #ifndef PRODUCT
3127 void TypeNode::dump_spec(outputStream *st) const {
3128 if (!Verbose && !WizardMode) {
3129 // standard dump does this in Verbose and WizardMode
3130 st->print(" #"); _type->dump_on(st);
3131 }
3132 }
3133
3134 void TypeNode::dump_compact_spec(outputStream *st) const {
3135 st->print("#");
3136 _type->dump_on(st);
3137 }
3138 #endif
3139 uint TypeNode::hash() const {
3140 return Node::hash() + _type->hash();
3141 }
3142 bool TypeNode::cmp(const Node& n) const {
3143 return Type::equals(_type, n.as_Type()->_type);
3144 }
3145 const Type* TypeNode::bottom_type() const { return _type; }
3146 const Type* TypeNode::Value(PhaseGVN* phase) const { return _type; }
3147
3148 //------------------------------ideal_reg--------------------------------------
3149 uint TypeNode::ideal_reg() const {
3150 return _type->ideal_reg();
3151 }
3152
3153 void TypeNode::make_path_dead(PhaseIterGVN* igvn, PhaseIdealLoop* loop, Node* ctrl_use, uint j, const char* phase_str) {
3154 Node* c = ctrl_use->in(j);
3155 if (igvn->type(c) != Type::TOP) {
3156 igvn->replace_input_of(ctrl_use, j, igvn->C->top());
3157 create_halt_path(igvn, c, loop, phase_str);
3158 }
3159 }
3160
3161 // This Type node is dead. It could be because the type that it captures and the type of the node computed from its
3162 // inputs do not intersect anymore. That node has some uses along some control flow paths. Those control flow paths must
3163 // be unreachable as using a dead value makes no sense. For the Type node to capture a narrowed down type, some control
3164 // flow construct must guard the Type node (an If node usually). When the Type node becomes dead, the guard usually
3165 // constant folds and the control flow that leads to the Type node becomes unreachable. There are cases where that
3166 // doesn't happen, however. They are handled here by following uses of the Type node until a CFG or a Phi to find dead
3167 // paths. The dead paths are then replaced by a Halt node.
3168 void TypeNode::make_paths_from_here_dead(PhaseIterGVN* igvn, PhaseIdealLoop* loop, const char* phase_str) {
3169 Unique_Node_List wq;
3170 wq.push(this);
3171 for (uint i = 0; i < wq.size(); ++i) {
3172 Node* n = wq.at(i);
3173 for (DUIterator_Fast kmax, k = n->fast_outs(kmax); k < kmax; k++) {
3174 Node* u = n->fast_out(k);
3175 if (u->is_CFG()) {
3176 assert(!u->is_Region(), "Can't reach a Region without going through a Phi");
3177 make_path_dead(igvn, loop, u, 0, phase_str);
3178 } else if (u->is_Phi()) {
3179 Node* r = u->in(0);
3180 assert(r->is_Region() || r->is_top(), "unexpected Phi's control");
3181 if (r->is_Region()) {
3182 for (uint j = 1; j < u->req(); ++j) {
3183 if (u->in(j) == n && r->in(j) != nullptr) {
3184 make_path_dead(igvn, loop, r, j, phase_str);
3185 }
3186 }
3187 }
3188 } else {
3189 wq.push(u);
3190 }
3191 }
3192 }
3193 }
3194
3195 void TypeNode::create_halt_path(PhaseIterGVN* igvn, Node* c, PhaseIdealLoop* loop, const char* phase_str) const {
3196 Node* frame = new ParmNode(igvn->C->start(), TypeFunc::FramePtr);
3197 if (loop == nullptr) {
3198 igvn->register_new_node_with_optimizer(frame);
3199 } else {
3200 loop->register_new_node(frame, igvn->C->start());
3201 }
3202
3203 stringStream ss;
3204 ss.print("dead path discovered by TypeNode during %s", phase_str);
3205
3206 Node* halt = new HaltNode(c, frame, ss.as_string(igvn->C->comp_arena()));
3207 if (loop == nullptr) {
3208 igvn->register_new_node_with_optimizer(halt);
3209 } else {
3210 loop->register_control(halt, loop->ltree_root(), c);
3211 }
3212 igvn->add_input_to(igvn->C->root(), halt);
3213 }
3214
3215 Node* TypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
3216 if (KillPathsReachableByDeadTypeNode && can_reshape && Value(phase) == Type::TOP) {
3217 PhaseIterGVN* igvn = phase->is_IterGVN();
3218 Node* top = igvn->C->top();
3219 ResourceMark rm;
3220 make_paths_from_here_dead(igvn, nullptr, "igvn");
3221 return top;
3222 }
3223
3224 return Node::Ideal(phase, can_reshape);
3225 }