1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/loopnode.hpp"
36 #include "opto/machnode.hpp"
37 #include "opto/movenode.hpp"
38 #include "opto/mulnode.hpp"
39 #include "opto/narrowptrnode.hpp"
40 #include "opto/phaseX.hpp"
41 #include "opto/regalloc.hpp"
42 #include "opto/regmask.hpp"
43 #include "opto/runtime.hpp"
44 #include "opto/subnode.hpp"
45 #include "opto/vectornode.hpp"
46 #include "utilities/vmError.hpp"
47
48 // Portions of code courtesy of Clifford Click
49
50 // Optimization - Graph Style
51
52 //=============================================================================
53 //------------------------------Value------------------------------------------
54 // Compute the type of the RegionNode.
55 const Type* RegionNode::Value(PhaseGVN* phase) const {
56 for( uint i=1; i<req(); ++i ) { // For all paths in
57 Node *n = in(i); // Get Control source
58 if( !n ) continue; // Missing inputs are TOP
59 if( phase->type(n) == Type::CONTROL )
60 return Type::CONTROL;
61 }
62 return Type::TOP; // All paths dead? Then so are we
63 }
64
65 //------------------------------Identity---------------------------------------
66 // Check for Region being Identity.
67 Node* RegionNode::Identity(PhaseGVN* phase) {
68 // Cannot have Region be an identity, even if it has only 1 input.
69 // Phi users cannot have their Region input folded away for them,
70 // since they need to select the proper data input
71 return this;
72 }
73
74 //------------------------------merge_region-----------------------------------
75 // If a Region flows into a Region, merge into one big happy merge. This is
76 // hard to do if there is stuff that has to happen
77 static Node *merge_region(RegionNode *region, PhaseGVN *phase) {
78 if( region->Opcode() != Op_Region ) // Do not do to LoopNodes
79 return nullptr;
80 Node *progress = nullptr; // Progress flag
81 PhaseIterGVN *igvn = phase->is_IterGVN();
82
83 uint rreq = region->req();
84 for( uint i = 1; i < rreq; i++ ) {
85 Node *r = region->in(i);
86 if( r && r->Opcode() == Op_Region && // Found a region?
87 r->in(0) == r && // Not already collapsed?
88 r != region && // Avoid stupid situations
89 r->outcnt() == 2 ) { // Self user and 'region' user only?
90 assert(!r->as_Region()->has_phi(), "no phi users");
91 if( !progress ) { // No progress
92 if (region->has_phi()) {
93 return nullptr; // Only flatten if no Phi users
94 // igvn->hash_delete( phi );
95 }
96 igvn->hash_delete( region );
97 progress = region; // Making progress
98 }
99 igvn->hash_delete( r );
100
101 // Append inputs to 'r' onto 'region'
102 for( uint j = 1; j < r->req(); j++ ) {
103 // Move an input from 'r' to 'region'
104 region->add_req(r->in(j));
105 r->set_req(j, phase->C->top());
106 // Update phis of 'region'
107 //for( uint k = 0; k < max; k++ ) {
108 // Node *phi = region->out(k);
109 // if( phi->is_Phi() ) {
110 // phi->add_req(phi->in(i));
111 // }
112 //}
113
114 rreq++; // One more input to Region
115 } // Found a region to merge into Region
116 igvn->_worklist.push(r);
117 // Clobber pointer to the now dead 'r'
118 region->set_req(i, phase->C->top());
119 }
120 }
121
122 return progress;
123 }
124
125
126
127 //--------------------------------has_phi--------------------------------------
128 // Helper function: Return any PhiNode that uses this region or null
129 PhiNode* RegionNode::has_phi() const {
130 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
131 Node* phi = fast_out(i);
132 if (phi->is_Phi()) { // Check for Phi users
133 assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)");
134 return phi->as_Phi(); // this one is good enough
135 }
136 }
137
138 return nullptr;
139 }
140
141
142 //-----------------------------has_unique_phi----------------------------------
143 // Helper function: Return the only PhiNode that uses this region or null
144 PhiNode* RegionNode::has_unique_phi() const {
145 // Check that only one use is a Phi
146 PhiNode* only_phi = nullptr;
147 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
148 Node* phi = fast_out(i);
149 if (phi->is_Phi()) { // Check for Phi users
150 assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)");
151 if (only_phi == nullptr) {
152 only_phi = phi->as_Phi();
153 } else {
154 return nullptr; // multiple phis
155 }
156 }
157 }
158
159 return only_phi;
160 }
161
162
163 //------------------------------check_phi_clipping-----------------------------
164 // Helper function for RegionNode's identification of FP clipping
165 // Check inputs to the Phi
166 static bool check_phi_clipping( PhiNode *phi, ConNode * &min, uint &min_idx, ConNode * &max, uint &max_idx, Node * &val, uint &val_idx ) {
167 min = nullptr;
168 max = nullptr;
169 val = nullptr;
170 min_idx = 0;
171 max_idx = 0;
172 val_idx = 0;
173 uint phi_max = phi->req();
174 if( phi_max == 4 ) {
175 for( uint j = 1; j < phi_max; ++j ) {
176 Node *n = phi->in(j);
177 int opcode = n->Opcode();
178 switch( opcode ) {
179 case Op_ConI:
180 {
181 if( min == nullptr ) {
182 min = n->Opcode() == Op_ConI ? (ConNode*)n : nullptr;
183 min_idx = j;
184 } else {
185 max = n->Opcode() == Op_ConI ? (ConNode*)n : nullptr;
186 max_idx = j;
187 if( min->get_int() > max->get_int() ) {
188 // Swap min and max
189 ConNode *temp;
190 uint temp_idx;
191 temp = min; min = max; max = temp;
192 temp_idx = min_idx; min_idx = max_idx; max_idx = temp_idx;
193 }
194 }
195 }
196 break;
197 default:
198 {
199 val = n;
200 val_idx = j;
201 }
202 break;
203 }
204 }
205 }
206 return ( min && max && val && (min->get_int() <= 0) && (max->get_int() >=0) );
207 }
208
209
210 //------------------------------check_if_clipping------------------------------
211 // Helper function for RegionNode's identification of FP clipping
212 // Check that inputs to Region come from two IfNodes,
213 //
214 // If
215 // False True
216 // If |
217 // False True |
218 // | | |
219 // RegionNode_inputs
220 //
221 static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNode * &top_if ) {
222 top_if = nullptr;
223 bot_if = nullptr;
224
225 // Check control structure above RegionNode for (if ( if ) )
226 Node *in1 = region->in(1);
227 Node *in2 = region->in(2);
228 Node *in3 = region->in(3);
229 // Check that all inputs are projections
230 if( in1->is_Proj() && in2->is_Proj() && in3->is_Proj() ) {
231 Node *in10 = in1->in(0);
232 Node *in20 = in2->in(0);
233 Node *in30 = in3->in(0);
234 // Check that #1 and #2 are ifTrue and ifFalse from same If
235 if( in10 != nullptr && in10->is_If() &&
236 in20 != nullptr && in20->is_If() &&
237 in30 != nullptr && in30->is_If() && in10 == in20 &&
238 (in1->Opcode() != in2->Opcode()) ) {
239 Node *in100 = in10->in(0);
240 Node *in1000 = (in100 != nullptr && in100->is_Proj()) ? in100->in(0) : nullptr;
241 // Check that control for in10 comes from other branch of IF from in3
242 if( in1000 != nullptr && in1000->is_If() &&
243 in30 == in1000 && (in3->Opcode() != in100->Opcode()) ) {
244 // Control pattern checks
245 top_if = (IfNode*)in1000;
246 bot_if = (IfNode*)in10;
247 }
248 }
249 }
250
251 return (top_if != nullptr);
252 }
253
254
255 //------------------------------check_convf2i_clipping-------------------------
256 // Helper function for RegionNode's identification of FP clipping
257 // Verify that the value input to the phi comes from "ConvF2I; LShift; RShift"
258 static bool check_convf2i_clipping( PhiNode *phi, uint idx, ConvF2INode * &convf2i, Node *min, Node *max) {
259 convf2i = nullptr;
260
261 // Check for the RShiftNode
262 Node *rshift = phi->in(idx);
263 assert( rshift, "Previous checks ensure phi input is present");
264 if( rshift->Opcode() != Op_RShiftI ) { return false; }
265
266 // Check for the LShiftNode
267 Node *lshift = rshift->in(1);
268 assert( lshift, "Previous checks ensure phi input is present");
269 if( lshift->Opcode() != Op_LShiftI ) { return false; }
270
271 // Check for the ConvF2INode
272 Node *conv = lshift->in(1);
273 if( conv->Opcode() != Op_ConvF2I ) { return false; }
274
275 // Check that shift amounts are only to get sign bits set after F2I
276 jint max_cutoff = max->get_int();
277 jint min_cutoff = min->get_int();
278 jint left_shift = lshift->in(2)->get_int();
279 jint right_shift = rshift->in(2)->get_int();
280 jint max_post_shift = nth_bit(BitsPerJavaInteger - left_shift - 1);
281 if( left_shift != right_shift ||
282 0 > left_shift || left_shift >= BitsPerJavaInteger ||
283 max_post_shift < max_cutoff ||
284 max_post_shift < -min_cutoff ) {
285 // Shifts are necessary but current transformation eliminates them
286 return false;
287 }
288
289 // OK to return the result of ConvF2I without shifting
290 convf2i = (ConvF2INode*)conv;
291 return true;
292 }
293
294
295 //------------------------------check_compare_clipping-------------------------
296 // Helper function for RegionNode's identification of FP clipping
297 static bool check_compare_clipping( bool less_than, IfNode *iff, ConNode *limit, Node * & input ) {
298 Node *i1 = iff->in(1);
299 if ( !i1->is_Bool() ) { return false; }
300 BoolNode *bool1 = i1->as_Bool();
301 if( less_than && bool1->_test._test != BoolTest::le ) { return false; }
302 else if( !less_than && bool1->_test._test != BoolTest::lt ) { return false; }
303 const Node *cmpF = bool1->in(1);
304 if( cmpF->Opcode() != Op_CmpF ) { return false; }
305 // Test that the float value being compared against
306 // is equivalent to the int value used as a limit
307 Node *nodef = cmpF->in(2);
308 if( nodef->Opcode() != Op_ConF ) { return false; }
309 jfloat conf = nodef->getf();
310 jint coni = limit->get_int();
311 if( ((int)conf) != coni ) { return false; }
312 input = cmpF->in(1);
313 return true;
314 }
315
316 //------------------------------is_unreachable_region--------------------------
317 // Check if the RegionNode is part of an unsafe loop and unreachable from root.
318 bool RegionNode::is_unreachable_region(const PhaseGVN* phase) {
319 Node* top = phase->C->top();
320 assert(req() == 2 || (req() == 3 && in(1) != nullptr && in(2) == top), "sanity check arguments");
321 if (_is_unreachable_region) {
322 // Return cached result from previous evaluation which should still be valid
323 assert(is_unreachable_from_root(phase), "walk the graph again and check if its indeed unreachable");
324 return true;
325 }
326
327 // First, cut the simple case of fallthrough region when NONE of
328 // region's phis references itself directly or through a data node.
329 if (is_possible_unsafe_loop(phase)) {
330 // If we have a possible unsafe loop, check if the region node is actually unreachable from root.
331 if (is_unreachable_from_root(phase)) {
332 _is_unreachable_region = true;
333 return true;
334 }
335 }
336 return false;
337 }
338
339 bool RegionNode::is_possible_unsafe_loop(const PhaseGVN* phase) const {
340 uint max = outcnt();
341 uint i;
342 for (i = 0; i < max; i++) {
343 Node* n = raw_out(i);
344 if (n != nullptr && n->is_Phi()) {
345 PhiNode* phi = n->as_Phi();
346 assert(phi->in(0) == this, "sanity check phi");
347 if (phi->outcnt() == 0) {
348 continue; // Safe case - no loops
349 }
350 if (phi->outcnt() == 1) {
351 Node* u = phi->raw_out(0);
352 // Skip if only one use is an other Phi or Call or Uncommon trap.
353 // It is safe to consider this case as fallthrough.
354 if (u != nullptr && (u->is_Phi() || u->is_CFG())) {
355 continue;
356 }
357 }
358 // Check when phi references itself directly or through an other node.
359 if (phi->as_Phi()->simple_data_loop_check(phi->in(1)) >= PhiNode::Unsafe) {
360 break; // Found possible unsafe data loop.
361 }
362 }
363 }
364 if (i >= max) {
365 return false; // An unsafe case was NOT found - don't need graph walk.
366 }
367 return true;
368 }
369
370 bool RegionNode::is_unreachable_from_root(const PhaseGVN* phase) const {
371 ResourceMark rm;
372 Node_List nstack;
373 VectorSet visited;
374
375 // Mark all control nodes reachable from root outputs
376 Node* n = (Node*)phase->C->root();
377 nstack.push(n);
378 visited.set(n->_idx);
379 while (nstack.size() != 0) {
380 n = nstack.pop();
381 uint max = n->outcnt();
382 for (uint i = 0; i < max; i++) {
383 Node* m = n->raw_out(i);
384 if (m != nullptr && m->is_CFG()) {
385 if (m == this) {
386 return false; // We reached the Region node - it is not dead.
387 }
388 if (!visited.test_set(m->_idx))
389 nstack.push(m);
390 }
391 }
392 }
393 return true; // The Region node is unreachable - it is dead.
394 }
395
396 #ifdef ASSERT
397 // Is this region in an infinite subgraph?
398 // (no path to root except through false NeverBranch exit)
399 bool RegionNode::is_in_infinite_subgraph() {
400 ResourceMark rm;
401 Unique_Node_List worklist;
402 worklist.push(this);
403 return RegionNode::are_all_nodes_in_infinite_subgraph(worklist);
404 }
405
406 // Are all nodes in worklist in infinite subgraph?
407 // (no path to root except through false NeverBranch exit)
408 // worklist is directly used for the traversal
409 bool RegionNode::are_all_nodes_in_infinite_subgraph(Unique_Node_List& worklist) {
410 // BFS traversal down the CFG, except through NeverBranch exits
411 for (uint i = 0; i < worklist.size(); ++i) {
412 Node* n = worklist.at(i);
413 assert(n->is_CFG(), "only traverse CFG");
414 if (n->is_Root()) {
415 // Found root -> there was an exit!
416 return false;
417 } else if (n->is_NeverBranch()) {
418 // Only follow the loop-internal projection, not the NeverBranch exit
419 ProjNode* proj = n->as_NeverBranch()->proj_out_or_null(0);
420 assert(proj != nullptr, "must find loop-internal projection of NeverBranch");
421 worklist.push(proj);
422 } else {
423 // Traverse all CFG outputs
424 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
425 Node* use = n->fast_out(i);
426 if (use->is_CFG()) {
427 worklist.push(use);
428 }
429 }
430 }
431 }
432 // No exit found for any loop -> all are infinite
433 return true;
434 }
435 #endif //ASSERT
436
437 void RegionNode::set_loop_status(RegionNode::LoopStatus status) {
438 assert(loop_status() == RegionNode::LoopStatus::NeverIrreducibleEntry, "why set our status again?");
439 assert(status != RegionNode::LoopStatus::MaybeIrreducibleEntry || !is_Loop(), "LoopNode is never irreducible entry.");
440 _loop_status = status;
441 }
442
443 // A Region can only be an irreducible entry if:
444 // - It is marked as "maybe irreducible entry". Any other loop status would guarantee
445 // that it is never an irreducible loop entry.
446 // - And it is not a LoopNode, those are guaranteed to be reducible loop entries.
447 bool RegionNode::can_be_irreducible_entry() const {
448 return loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry &&
449 !is_Loop();
450 }
451
452 void RegionNode::try_clean_mem_phis(PhaseIterGVN* igvn) {
453 // Incremental inlining + PhaseStringOpts sometimes produce:
454 //
455 // cmpP with 1 top input
456 // |
457 // If
458 // / \
459 // IfFalse IfTrue /- Some Node
460 // \ / / /
461 // Region / /-MergeMem
462 // \---Phi
463 //
464 //
465 // It's expected by PhaseStringOpts that the Region goes away and is
466 // replaced by If's control input but because there's still a Phi,
467 // the Region stays in the graph. The top input from the cmpP is
468 // propagated forward and a subgraph that is useful goes away. The
469 // code in PhiNode::try_clean_memory_phi() replaces the Phi with the
470 // MergeMem in order to remove the Region if its last phi dies.
471
472 if (!is_diamond()) {
473 return;
474 }
475
476 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
477 Node* phi = fast_out(i);
478 if (phi->is_Phi() && phi->as_Phi()->try_clean_memory_phi(igvn)) {
479 --i;
480 --imax;
481 }
482 }
483 }
484
485 // Does this region merge a simple diamond formed by a proper IfNode?
486 //
487 // Cmp
488 // /
489 // ctrl Bool
490 // \ /
491 // IfNode
492 // / \
493 // IfFalse IfTrue
494 // \ /
495 // Region
496 bool RegionNode::is_diamond() const {
497 if (req() != 3) {
498 return false;
499 }
500
501 Node* left_path = in(1);
502 Node* right_path = in(2);
503 if (left_path == nullptr || right_path == nullptr) {
504 return false;
505 }
506 Node* diamond_if = left_path->in(0);
507 if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
508 // Not an IfNode merging a diamond or TOP.
509 return false;
510 }
511
512 // Check for a proper bool/cmp
513 const Node* bol = diamond_if->in(1);
514 if (!bol->is_Bool()) {
515 return false;
516 }
517 const Node* cmp = bol->in(1);
518 if (!cmp->is_Cmp()) {
519 return false;
520 }
521 return true;
522 }
523 //------------------------------Ideal------------------------------------------
524 // Return a node which is more "ideal" than the current node. Must preserve
525 // the CFG, but we can still strip out dead paths.
526 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
527 if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy
528 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
529
530 // Check for RegionNode with no Phi users and both inputs come from either
531 // arm of the same IF. If found, then the control-flow split is useless.
532 bool has_phis = false;
533 if (can_reshape) { // Need DU info to check for Phi users
534 try_clean_mem_phis(phase->is_IterGVN());
535 has_phis = (has_phi() != nullptr); // Cache result
536
537 if (!has_phis) { // No Phi users? Nothing merging?
538 for (uint i = 1; i < req()-1; i++) {
539 Node *if1 = in(i);
540 if( !if1 ) continue;
541 Node *iff = if1->in(0);
542 if( !iff || !iff->is_If() ) continue;
543 for( uint j=i+1; j<req(); j++ ) {
544 if( in(j) && in(j)->in(0) == iff &&
545 if1->Opcode() != in(j)->Opcode() ) {
546 // Add the IF Projections to the worklist. They (and the IF itself)
547 // will be eliminated if dead.
548 phase->is_IterGVN()->add_users_to_worklist(iff);
549 set_req(i, iff->in(0));// Skip around the useless IF diamond
550 set_req(j, nullptr);
551 return this; // Record progress
552 }
553 }
554 }
555 }
556 }
557
558 // Remove TOP or null input paths. If only 1 input path remains, this Region
559 // degrades to a copy.
560 bool add_to_worklist = true;
561 bool modified = false;
562 int cnt = 0; // Count of values merging
563 DEBUG_ONLY( int cnt_orig = req(); ) // Save original inputs count
564 DEBUG_ONLY( uint outcnt_orig = outcnt(); )
565 int del_it = 0; // The last input path we delete
566 bool found_top = false; // irreducible loops need to check reachability if we find TOP
567 // For all inputs...
568 for( uint i=1; i<req(); ++i ){// For all paths in
569 Node *n = in(i); // Get the input
570 if( n != nullptr ) {
571 // Remove useless control copy inputs
572 if( n->is_Region() && n->as_Region()->is_copy() ) {
573 set_req(i, n->nonnull_req());
574 modified = true;
575 i--;
576 continue;
577 }
578 if( n->is_Proj() ) { // Remove useless rethrows
579 Node *call = n->in(0);
580 if (call->is_Call() && call->as_Call()->entry_point() == OptoRuntime::rethrow_stub()) {
581 set_req(i, call->in(0));
582 modified = true;
583 i--;
584 continue;
585 }
586 }
587 if( phase->type(n) == Type::TOP ) {
588 set_req_X(i, nullptr, phase); // Ignore TOP inputs
589 modified = true;
590 found_top = true;
591 i--;
592 continue;
593 }
594 cnt++; // One more value merging
595 } else if (can_reshape) { // Else found dead path with DU info
596 PhaseIterGVN *igvn = phase->is_IterGVN();
597 del_req(i); // Yank path from self
598 del_it = i;
599
600 for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
601 Node* use = fast_out(j);
602
603 if (use->req() != req() && use->is_Phi()) {
604 assert(use->in(0) == this, "unexpected control input");
605 igvn->hash_delete(use); // Yank from hash before hacking edges
606 use->set_req_X(i, nullptr, igvn);// Correct DU info
607 use->del_req(i); // Yank path from Phis
608 }
609 }
610
611 if (add_to_worklist) {
612 igvn->add_users_to_worklist(this);
613 add_to_worklist = false;
614 }
615
616 i--;
617 }
618 }
619
620 assert(outcnt() == outcnt_orig, "not expect to remove any use");
621
622 if (can_reshape && found_top && loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
623 // Is it a dead irreducible loop?
624 // If an irreducible loop loses one of the multiple entries
625 // that went into the loop head, or any secondary entries,
626 // we need to verify if the irreducible loop is still reachable,
627 // as the special logic in is_unreachable_region only works
628 // for reducible loops.
629 if (is_unreachable_from_root(phase)) {
630 // The irreducible loop is dead - must remove it
631 PhaseIterGVN* igvn = phase->is_IterGVN();
632 remove_unreachable_subgraph(igvn);
633 return nullptr;
634 }
635 } else if (can_reshape && cnt == 1) {
636 // Is it dead loop?
637 // If it is LoopNopde it had 2 (+1 itself) inputs and
638 // one of them was cut. The loop is dead if it was EntryContol.
639 // Loop node may have only one input because entry path
640 // is removed in PhaseIdealLoop::Dominators().
641 assert(!this->is_Loop() || cnt_orig <= 3, "Loop node should have 3 or less inputs");
642 if ((this->is_Loop() && (del_it == LoopNode::EntryControl ||
643 (del_it == 0 && is_unreachable_region(phase)))) ||
644 (!this->is_Loop() && has_phis && is_unreachable_region(phase))) {
645 PhaseIterGVN* igvn = phase->is_IterGVN();
646 remove_unreachable_subgraph(igvn);
647 return nullptr;
648 }
649 }
650
651 if( cnt <= 1 ) { // Only 1 path in?
652 set_req(0, nullptr); // Null control input for region copy
653 if( cnt == 0 && !can_reshape) { // Parse phase - leave the node as it is.
654 // No inputs or all inputs are null.
655 return nullptr;
656 } else if (can_reshape) { // Optimization phase - remove the node
657 PhaseIterGVN *igvn = phase->is_IterGVN();
658 // Strip mined (inner) loop is going away, remove outer loop.
659 if (is_CountedLoop() &&
660 as_Loop()->is_strip_mined()) {
661 Node* outer_sfpt = as_CountedLoop()->outer_safepoint();
662 Node* outer_out = as_CountedLoop()->outer_loop_exit();
663 if (outer_sfpt != nullptr && outer_out != nullptr) {
664 Node* in = outer_sfpt->in(0);
665 igvn->replace_node(outer_out, in);
666 LoopNode* outer = as_CountedLoop()->outer_loop();
667 igvn->replace_input_of(outer, LoopNode::LoopBackControl, igvn->C->top());
668 }
669 }
670 if (is_CountedLoop()) {
671 Node* opaq = as_CountedLoop()->is_canonical_loop_entry();
672 if (opaq != nullptr) {
673 // This is not a loop anymore. No need to keep the Opaque1 node on the test that guards the loop as it won't be
674 // subject to further loop opts.
675 assert(opaq->Opcode() == Op_OpaqueZeroTripGuard, "");
676 igvn->replace_node(opaq, opaq->in(1));
677 }
678 }
679 Node *parent_ctrl;
680 if( cnt == 0 ) {
681 assert( req() == 1, "no inputs expected" );
682 // During IGVN phase such region will be subsumed by TOP node
683 // so region's phis will have TOP as control node.
684 // Kill phis here to avoid it.
685 // Also set other user's input to top.
686 parent_ctrl = phase->C->top();
687 } else {
688 // The fallthrough case since we already checked dead loops above.
689 parent_ctrl = in(1);
690 assert(parent_ctrl != nullptr, "Region is a copy of some non-null control");
691 assert(parent_ctrl != this, "Close dead loop");
692 }
693 if (add_to_worklist) {
694 igvn->add_users_to_worklist(this); // Check for further allowed opts
695 }
696 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
697 Node* n = last_out(i);
698 igvn->hash_delete(n); // Remove from worklist before modifying edges
699 if (n->outcnt() == 0) {
700 int uses_found = n->replace_edge(this, phase->C->top(), igvn);
701 if (uses_found > 1) { // (--i) done at the end of the loop.
702 i -= (uses_found - 1);
703 }
704 continue;
705 }
706 if( n->is_Phi() ) { // Collapse all Phis
707 // Eagerly replace phis to avoid regionless phis.
708 Node* in;
709 if( cnt == 0 ) {
710 assert( n->req() == 1, "No data inputs expected" );
711 in = parent_ctrl; // replaced by top
712 } else {
713 assert( n->req() == 2 && n->in(1) != nullptr, "Only one data input expected" );
714 in = n->in(1); // replaced by unique input
715 if( n->as_Phi()->is_unsafe_data_reference(in) )
716 in = phase->C->top(); // replaced by top
717 }
718 igvn->replace_node(n, in);
719 }
720 else if( n->is_Region() ) { // Update all incoming edges
721 assert(n != this, "Must be removed from DefUse edges");
722 int uses_found = n->replace_edge(this, parent_ctrl, igvn);
723 if (uses_found > 1) { // (--i) done at the end of the loop.
724 i -= (uses_found - 1);
725 }
726 }
727 else {
728 assert(n->in(0) == this, "Expect RegionNode to be control parent");
729 n->set_req(0, parent_ctrl);
730 }
731 #ifdef ASSERT
732 for( uint k=0; k < n->req(); k++ ) {
733 assert(n->in(k) != this, "All uses of RegionNode should be gone");
734 }
735 #endif
736 }
737 // Remove the RegionNode itself from DefUse info
738 igvn->remove_dead_node(this);
739 return nullptr;
740 }
741 return this; // Record progress
742 }
743
744
745 // If a Region flows into a Region, merge into one big happy merge.
746 if (can_reshape) {
747 Node *m = merge_region(this, phase);
748 if (m != nullptr) return m;
749 }
750
751 // Check if this region is the root of a clipping idiom on floats
752 if( ConvertFloat2IntClipping && can_reshape && req() == 4 ) {
753 // Check that only one use is a Phi and that it simplifies to two constants +
754 PhiNode* phi = has_unique_phi();
755 if (phi != nullptr) { // One Phi user
756 // Check inputs to the Phi
757 ConNode *min;
758 ConNode *max;
759 Node *val;
760 uint min_idx;
761 uint max_idx;
762 uint val_idx;
763 if( check_phi_clipping( phi, min, min_idx, max, max_idx, val, val_idx ) ) {
764 IfNode *top_if;
765 IfNode *bot_if;
766 if( check_if_clipping( this, bot_if, top_if ) ) {
767 // Control pattern checks, now verify compares
768 Node *top_in = nullptr; // value being compared against
769 Node *bot_in = nullptr;
770 if( check_compare_clipping( true, bot_if, min, bot_in ) &&
771 check_compare_clipping( false, top_if, max, top_in ) ) {
772 if( bot_in == top_in ) {
773 PhaseIterGVN *gvn = phase->is_IterGVN();
774 assert( gvn != nullptr, "Only had DefUse info in IterGVN");
775 // Only remaining check is that bot_in == top_in == (Phi's val + mods)
776
777 // Check for the ConvF2INode
778 ConvF2INode *convf2i;
779 if( check_convf2i_clipping( phi, val_idx, convf2i, min, max ) &&
780 convf2i->in(1) == bot_in ) {
781 // Matched pattern, including LShiftI; RShiftI, replace with integer compares
782 // max test
783 Node *cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, min ));
784 Node *boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::lt ));
785 IfNode *iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( top_if->in(0), boo, PROB_UNLIKELY_MAG(5), top_if->_fcnt ));
786 Node *if_min= gvn->register_new_node_with_optimizer(new IfTrueNode (iff));
787 Node *ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff));
788 // min test
789 cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, max ));
790 boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::gt ));
791 iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( ifF, boo, PROB_UNLIKELY_MAG(5), bot_if->_fcnt ));
792 Node *if_max= gvn->register_new_node_with_optimizer(new IfTrueNode (iff));
793 ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff));
794 // update input edges to region node
795 set_req_X( min_idx, if_min, gvn );
796 set_req_X( max_idx, if_max, gvn );
797 set_req_X( val_idx, ifF, gvn );
798 // remove unnecessary 'LShiftI; RShiftI' idiom
799 gvn->hash_delete(phi);
800 phi->set_req_X( val_idx, convf2i, gvn );
801 gvn->hash_find_insert(phi);
802 // Return transformed region node
803 return this;
804 }
805 }
806 }
807 }
808 }
809 }
810 }
811
812 if (can_reshape) {
813 modified |= optimize_trichotomy(phase->is_IterGVN());
814 }
815
816 return modified ? this : nullptr;
817 }
818
819 //--------------------------remove_unreachable_subgraph----------------------
820 // This region and therefore all nodes on the input control path(s) are unreachable
821 // from root. To avoid incomplete removal of unreachable subgraphs, walk up the CFG
822 // and aggressively replace all nodes by top.
823 // If a control node "def" with a single control output "use" has its single output
824 // "use" replaced with top, then "use" removes itself. This has the consequence that
825 // when we visit "use", it already has all inputs removed. They are lost and we cannot
826 // traverse them. This is why we fist find all unreachable nodes, and then remove
827 // them in a second step.
828 void RegionNode::remove_unreachable_subgraph(PhaseIterGVN* igvn) {
829 Node* top = igvn->C->top();
830 ResourceMark rm;
831 Unique_Node_List unreachable; // visit each only once
832 unreachable.push(this);
833 // Recursively find all control inputs.
834 for (uint i = 0; i < unreachable.size(); i++) {
835 Node* n = unreachable.at(i);
836 for (uint i = 0; i < n->req(); ++i) {
837 Node* m = n->in(i);
838 assert(m == nullptr || !m->is_Root(), "Should be unreachable from root");
839 if (m != nullptr && m->is_CFG()) {
840 unreachable.push(m);
841 }
842 }
843 }
844 // Remove all unreachable nodes.
845 for (uint i = 0; i < unreachable.size(); i++) {
846 Node* n = unreachable.at(i);
847 if (n->is_Region()) {
848 // Eagerly replace phis with top to avoid regionless phis.
849 n->set_req(0, nullptr);
850 bool progress = true;
851 uint max = n->outcnt();
852 DUIterator j;
853 while (progress) {
854 progress = false;
855 for (j = n->outs(); n->has_out(j); j++) {
856 Node* u = n->out(j);
857 if (u->is_Phi()) {
858 igvn->replace_node(u, top);
859 if (max != n->outcnt()) {
860 progress = true;
861 j = n->refresh_out_pos(j);
862 max = n->outcnt();
863 }
864 }
865 }
866 }
867 }
868 igvn->replace_node(n, top);
869 }
870 }
871
872 //------------------------------optimize_trichotomy--------------------------
873 // Optimize nested comparisons of the following kind:
874 //
875 // int compare(int a, int b) {
876 // return (a < b) ? -1 : (a == b) ? 0 : 1;
877 // }
878 //
879 // Shape 1:
880 // if (compare(a, b) == 1) { ... } -> if (a > b) { ... }
881 //
882 // Shape 2:
883 // if (compare(a, b) == 0) { ... } -> if (a == b) { ... }
884 //
885 // Above code leads to the following IR shapes where both Ifs compare the
886 // same value and two out of three region inputs idx1 and idx2 map to
887 // the same value and control flow.
888 //
889 // (1) If (2) If
890 // / \ / \
891 // Proj Proj Proj Proj
892 // | \ | \
893 // | If | If If
894 // | / \ | / \ / \
895 // | Proj Proj | Proj Proj ==> Proj Proj
896 // | / / \ | / | /
897 // Region / \ | / | /
898 // \ / \ | / | /
899 // Region Region Region
900 //
901 // The method returns true if 'this' is modified and false otherwise.
902 bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) {
903 int idx1 = 1, idx2 = 2;
904 Node* region = nullptr;
905 if (req() == 3 && in(1) != nullptr && in(2) != nullptr) {
906 // Shape 1: Check if one of the inputs is a region that merges two control
907 // inputs and has no other users (especially no Phi users).
908 region = in(1)->isa_Region() ? in(1) : in(2)->isa_Region();
909 if (region == nullptr || region->outcnt() != 2 || region->req() != 3) {
910 return false; // No suitable region input found
911 }
912 } else if (req() == 4) {
913 // Shape 2: Check if two control inputs map to the same value of the unique phi
914 // user and treat these as if they would come from another region (shape (1)).
915 PhiNode* phi = has_unique_phi();
916 if (phi == nullptr) {
917 return false; // No unique phi user
918 }
919 if (phi->in(idx1) != phi->in(idx2)) {
920 idx2 = 3;
921 if (phi->in(idx1) != phi->in(idx2)) {
922 idx1 = 2;
923 if (phi->in(idx1) != phi->in(idx2)) {
924 return false; // No equal phi inputs found
925 }
926 }
927 }
928 assert(phi->in(idx1) == phi->in(idx2), "must be"); // Region is merging same value
929 region = this;
930 }
931 if (region == nullptr || region->in(idx1) == nullptr || region->in(idx2) == nullptr) {
932 return false; // Region does not merge two control inputs
933 }
934 // At this point we know that region->in(idx1) and region->(idx2) map to the same
935 // value and control flow. Now search for ifs that feed into these region inputs.
936 ProjNode* proj1 = region->in(idx1)->isa_Proj();
937 ProjNode* proj2 = region->in(idx2)->isa_Proj();
938 if (proj1 == nullptr || proj1->outcnt() != 1 ||
939 proj2 == nullptr || proj2->outcnt() != 1) {
940 return false; // No projection inputs with region as unique user found
941 }
942 assert(proj1 != proj2, "should be different projections");
943 IfNode* iff1 = proj1->in(0)->isa_If();
944 IfNode* iff2 = proj2->in(0)->isa_If();
945 if (iff1 == nullptr || iff1->outcnt() != 2 ||
946 iff2 == nullptr || iff2->outcnt() != 2) {
947 return false; // No ifs found
948 }
949 if (iff1 == iff2) {
950 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
951 igvn->replace_input_of(region, idx1, iff1->in(0));
952 igvn->replace_input_of(region, idx2, igvn->C->top());
953 return (region == this); // Remove useless if (both projections map to the same control/value)
954 }
955 BoolNode* bol1 = iff1->in(1)->isa_Bool();
956 BoolNode* bol2 = iff2->in(1)->isa_Bool();
957 if (bol1 == nullptr || bol2 == nullptr) {
958 return false; // No bool inputs found
959 }
960 Node* cmp1 = bol1->in(1);
961 Node* cmp2 = bol2->in(1);
962 bool commute = false;
963 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
964 return false; // No comparison
965 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
966 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
967 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
968 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
969 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {
970 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
971 // SubTypeCheck is not commutative
972 return false;
973 } else if (cmp1 != cmp2) {
974 if (cmp1->in(1) == cmp2->in(2) &&
975 cmp1->in(2) == cmp2->in(1)) {
976 commute = true; // Same but swapped inputs, commute the test
977 } else {
978 return false; // Ifs are not comparing the same values
979 }
980 }
981 proj1 = proj1->other_if_proj();
982 proj2 = proj2->other_if_proj();
983 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
984 proj2->unique_ctrl_out_or_null() == this) ||
985 (proj2->unique_ctrl_out_or_null() == iff1 &&
986 proj1->unique_ctrl_out_or_null() == this))) {
987 return false; // Ifs are not connected through other projs
988 }
989 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
990 // through 'region' and map to the same value. Merge the boolean tests and replace
991 // the ifs by a single comparison.
992 BoolTest test1 = (proj1->_con == 1) ? bol1->_test : bol1->_test.negate();
993 BoolTest test2 = (proj2->_con == 1) ? bol2->_test : bol2->_test.negate();
994 test1 = commute ? test1.commute() : test1;
995 // After possibly commuting test1, if we can merge test1 & test2, then proj2/iff2/bol2 are the nodes to refine.
996 BoolTest::mask res = test1.merge(test2);
997 if (res == BoolTest::illegal) {
998 return false; // Unable to merge tests
999 }
1000 // Adjust iff1 to always pass (only iff2 will remain)
1001 igvn->replace_input_of(iff1, 1, igvn->intcon(proj1->_con));
1002 if (res == BoolTest::never) {
1003 // Merged test is always false, adjust iff2 to always fail
1004 igvn->replace_input_of(iff2, 1, igvn->intcon(1 - proj2->_con));
1005 } else {
1006 // Replace bool input of iff2 with merged test
1007 BoolNode* new_bol = new BoolNode(bol2->in(1), res);
1008 igvn->replace_input_of(iff2, 1, igvn->transform((proj2->_con == 1) ? new_bol : new_bol->negate(igvn)));
1009 if (new_bol->outcnt() == 0) {
1010 igvn->remove_dead_node(new_bol);
1011 }
1012 }
1013 return false;
1014 }
1015
1016 const RegMask &RegionNode::out_RegMask() const {
1017 return RegMask::Empty;
1018 }
1019
1020 #ifndef PRODUCT
1021 void RegionNode::dump_spec(outputStream* st) const {
1022 Node::dump_spec(st);
1023 switch (loop_status()) {
1024 case RegionNode::LoopStatus::MaybeIrreducibleEntry:
1025 st->print("#irreducible ");
1026 break;
1027 case RegionNode::LoopStatus::Reducible:
1028 st->print("#reducible ");
1029 break;
1030 case RegionNode::LoopStatus::NeverIrreducibleEntry:
1031 break; // nothing
1032 }
1033 }
1034 #endif
1035
1036 // Find the one non-null required input. RegionNode only
1037 Node *Node::nonnull_req() const {
1038 assert( is_Region(), "" );
1039 for( uint i = 1; i < _cnt; i++ )
1040 if( in(i) )
1041 return in(i);
1042 ShouldNotReachHere();
1043 return nullptr;
1044 }
1045
1046
1047 //=============================================================================
1048 // note that these functions assume that the _adr_type field is flattened
1049 uint PhiNode::hash() const {
1050 const Type* at = _adr_type;
1051 return TypeNode::hash() + (at ? at->hash() : 0);
1052 }
1053 bool PhiNode::cmp( const Node &n ) const {
1054 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1055 }
1056 static inline
1057 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1058 if (at == nullptr || at == TypePtr::BOTTOM) return at;
1059 return Compile::current()->alias_type(at)->adr_type();
1060 }
1061
1062 //----------------------------make---------------------------------------------
1063 // create a new phi with edges matching r and set (initially) to x
1064 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1065 uint preds = r->req(); // Number of predecessor paths
1066 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
1067 PhiNode* p = new PhiNode(r, t, at);
1068 for (uint j = 1; j < preds; j++) {
1069 // Fill in all inputs, except those which the region does not yet have
1070 if (r->in(j) != nullptr)
1071 p->init_req(j, x);
1072 }
1073 return p;
1074 }
1075 PhiNode* PhiNode::make(Node* r, Node* x) {
1076 const Type* t = x->bottom_type();
1077 const TypePtr* at = nullptr;
1078 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1079 return make(r, x, t, at);
1080 }
1081 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1082 const Type* t = x->bottom_type();
1083 const TypePtr* at = nullptr;
1084 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1085 return new PhiNode(r, t, at);
1086 }
1087
1088
1089 //------------------------slice_memory-----------------------------------------
1090 // create a new phi with narrowed memory type
1091 PhiNode* PhiNode::slice_memory(const TypePtr* adr_type) const {
1092 PhiNode* mem = (PhiNode*) clone();
1093 *(const TypePtr**)&mem->_adr_type = adr_type;
1094 // convert self-loops, or else we get a bad graph
1095 for (uint i = 1; i < req(); i++) {
1096 if ((const Node*)in(i) == this) mem->set_req(i, mem);
1097 }
1098 mem->verify_adr_type();
1099 return mem;
1100 }
1101
1102 //------------------------split_out_instance-----------------------------------
1103 // Split out an instance type from a bottom phi.
1104 PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const {
1105 const TypeOopPtr *t_oop = at->isa_oopptr();
1106 assert(t_oop != nullptr && t_oop->is_known_instance(), "expecting instance oopptr");
1107
1108 // Check if an appropriate node already exists.
1109 Node *region = in(0);
1110 for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
1111 Node* use = region->fast_out(k);
1112 if( use->is_Phi()) {
1113 PhiNode *phi2 = use->as_Phi();
1114 if (phi2->type() == Type::MEMORY && phi2->adr_type() == at) {
1115 return phi2;
1116 }
1117 }
1118 }
1119 Compile *C = igvn->C;
1120 ResourceMark rm;
1121 Node_Array node_map;
1122 Node_Stack stack(C->live_nodes() >> 4);
1123 PhiNode *nphi = slice_memory(at);
1124 igvn->register_new_node_with_optimizer( nphi );
1125 node_map.map(_idx, nphi);
1126 stack.push((Node *)this, 1);
1127 while(!stack.is_empty()) {
1128 PhiNode *ophi = stack.node()->as_Phi();
1129 uint i = stack.index();
1130 assert(i >= 1, "not control edge");
1131 stack.pop();
1132 nphi = node_map[ophi->_idx]->as_Phi();
1133 for (; i < ophi->req(); i++) {
1134 Node *in = ophi->in(i);
1135 if (in == nullptr || igvn->type(in) == Type::TOP)
1136 continue;
1137 Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, nullptr, igvn);
1138 PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : nullptr;
1139 if (optphi != nullptr && optphi->adr_type() == TypePtr::BOTTOM) {
1140 opt = node_map[optphi->_idx];
1141 if (opt == nullptr) {
1142 stack.push(ophi, i);
1143 nphi = optphi->slice_memory(at);
1144 igvn->register_new_node_with_optimizer( nphi );
1145 node_map.map(optphi->_idx, nphi);
1146 ophi = optphi;
1147 i = 0; // will get incremented at top of loop
1148 continue;
1149 }
1150 }
1151 nphi->set_req(i, opt);
1152 }
1153 }
1154 return nphi;
1155 }
1156
1157 //------------------------verify_adr_type--------------------------------------
1158 #ifdef ASSERT
1159 void PhiNode::verify_adr_type(VectorSet& visited, const TypePtr* at) const {
1160 if (visited.test_set(_idx)) return; //already visited
1161
1162 // recheck constructor invariants:
1163 verify_adr_type(false);
1164
1165 // recheck local phi/phi consistency:
1166 assert(_adr_type == at || _adr_type == TypePtr::BOTTOM,
1167 "adr_type must be consistent across phi nest");
1168
1169 // walk around
1170 for (uint i = 1; i < req(); i++) {
1171 Node* n = in(i);
1172 if (n == nullptr) continue;
1173 const Node* np = in(i);
1174 if (np->is_Phi()) {
1175 np->as_Phi()->verify_adr_type(visited, at);
1176 } else if (n->bottom_type() == Type::TOP
1177 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1178 // ignore top inputs
1179 } else {
1180 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1181 // recheck phi/non-phi consistency at leaves:
1182 assert((nat != nullptr) == (at != nullptr), "");
1183 assert(nat == at || nat == TypePtr::BOTTOM,
1184 "adr_type must be consistent at leaves of phi nest");
1185 }
1186 }
1187 }
1188
1189 // Verify a whole nest of phis rooted at this one.
1190 void PhiNode::verify_adr_type(bool recursive) const {
1191 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1192 if (Node::in_dump()) return; // muzzle asserts when printing
1193
1194 assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1195
1196 if (!VerifyAliases) return; // verify thoroughly only if requested
1197
1198 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1199 "Phi::adr_type must be pre-normalized");
1200
1201 if (recursive) {
1202 VectorSet visited;
1203 verify_adr_type(visited, _adr_type);
1204 }
1205 }
1206 #endif
1207
1208
1209 //------------------------------Value------------------------------------------
1210 // Compute the type of the PhiNode
1211 const Type* PhiNode::Value(PhaseGVN* phase) const {
1212 Node *r = in(0); // RegionNode
1213 if( !r ) // Copy or dead
1214 return in(1) ? phase->type(in(1)) : Type::TOP;
1215
1216 // Note: During parsing, phis are often transformed before their regions.
1217 // This means we have to use type_or_null to defend against untyped regions.
1218 if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1219 return Type::TOP;
1220
1221 // Check for trip-counted loop. If so, be smarter.
1222 BaseCountedLoopNode* l = r->is_BaseCountedLoop() ? r->as_BaseCountedLoop() : nullptr;
1223 if (l && ((const Node*)l->phi() == this)) { // Trip counted loop!
1224 // protect against init_trip() or limit() returning null
1225 if (l->can_be_counted_loop(phase)) {
1226 const Node* init = l->init_trip();
1227 const Node* limit = l->limit();
1228 const Node* stride = l->stride();
1229 if (init != nullptr && limit != nullptr && stride != nullptr) {
1230 const TypeInteger* lo = phase->type(init)->isa_integer(l->bt());
1231 const TypeInteger* hi = phase->type(limit)->isa_integer(l->bt());
1232 const TypeInteger* stride_t = phase->type(stride)->isa_integer(l->bt());
1233 if (lo != nullptr && hi != nullptr && stride_t != nullptr) { // Dying loops might have TOP here
1234 assert(stride_t->is_con(), "bad stride type");
1235 BoolTest::mask bt = l->loopexit()->test_trip();
1236 // If the loop exit condition is "not equal", the condition
1237 // would not trigger if init > limit (if stride > 0) or if
1238 // init < limit if (stride > 0) so we can't deduce bounds
1239 // for the iv from the exit condition.
1240 if (bt != BoolTest::ne) {
1241 jlong stride_con = stride_t->get_con_as_long(l->bt());
1242 if (stride_con < 0) { // Down-counter loop
1243 swap(lo, hi);
1244 jlong iv_range_lower_limit = lo->lo_as_long();
1245 // Prevent overflow when adding one below
1246 if (iv_range_lower_limit < max_signed_integer(l->bt())) {
1247 // The loop exit condition is: iv + stride > limit (iv is this Phi). So the loop iterates until
1248 // iv + stride <= limit
1249 // We know that: limit >= lo->lo_as_long() and stride <= -1
1250 // So when the loop exits, iv has to be at most lo->lo_as_long() + 1
1251 iv_range_lower_limit += 1; // lo is after decrement
1252 // Exact bounds for the phi can be computed when ABS(stride) greater than 1 if bounds are constant.
1253 if (lo->is_con() && hi->is_con() && hi->lo_as_long() > lo->hi_as_long() && stride_con != -1) {
1254 julong uhi = static_cast<julong>(hi->lo_as_long());
1255 julong ulo = static_cast<julong>(lo->hi_as_long());
1256 julong diff = ((uhi - ulo - 1) / (-stride_con)) * (-stride_con);
1257 julong ufirst = hi->lo_as_long() - diff;
1258 iv_range_lower_limit = reinterpret_cast<jlong &>(ufirst);
1259 assert(iv_range_lower_limit >= lo->lo_as_long() + 1, "should end up with narrower range");
1260 }
1261 }
1262 return TypeInteger::make(MIN2(iv_range_lower_limit, hi->lo_as_long()), hi->hi_as_long(), 3, l->bt())->filter_speculative(_type);
1263 } else if (stride_con >= 0) {
1264 jlong iv_range_upper_limit = hi->hi_as_long();
1265 // Prevent overflow when subtracting one below
1266 if (iv_range_upper_limit > min_signed_integer(l->bt())) {
1267 // The loop exit condition is: iv + stride < limit (iv is this Phi). So the loop iterates until
1268 // iv + stride >= limit
1269 // We know that: limit <= hi->hi_as_long() and stride >= 1
1270 // So when the loop exits, iv has to be at most hi->hi_as_long() - 1
1271 iv_range_upper_limit -= 1;
1272 // Exact bounds for the phi can be computed when ABS(stride) greater than 1 if bounds are constant.
1273 if (lo->is_con() && hi->is_con() && hi->lo_as_long() > lo->hi_as_long() && stride_con != 1) {
1274 julong uhi = static_cast<julong>(hi->lo_as_long());
1275 julong ulo = static_cast<julong>(lo->hi_as_long());
1276 julong diff = ((uhi - ulo - 1) / stride_con) * stride_con;
1277 julong ulast = lo->hi_as_long() + diff;
1278 iv_range_upper_limit = reinterpret_cast<jlong &>(ulast);
1279 assert(iv_range_upper_limit <= hi->hi_as_long() - 1, "should end up with narrower range");
1280 }
1281 }
1282 return TypeInteger::make(lo->lo_as_long(), MAX2(lo->hi_as_long(), iv_range_upper_limit), 3, l->bt())->filter_speculative(_type);
1283 }
1284 }
1285 }
1286 }
1287 } else if (l->in(LoopNode::LoopBackControl) != nullptr &&
1288 in(LoopNode::EntryControl) != nullptr &&
1289 phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) {
1290 // During CCP, if we saturate the type of a counted loop's Phi
1291 // before the special code for counted loop above has a chance
1292 // to run (that is as long as the type of the backedge's control
1293 // is top), we might end up with non monotonic types
1294 return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type);
1295 }
1296 }
1297
1298 // Default case: merge all inputs
1299 const Type *t = Type::TOP; // Merged type starting value
1300 for (uint i = 1; i < req(); ++i) {// For all paths in
1301 // Reachable control path?
1302 if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) {
1303 const Type* ti = phase->type(in(i));
1304 t = t->meet_speculative(ti);
1305 }
1306 }
1307
1308 // The worst-case type (from ciTypeFlow) should be consistent with "t".
1309 // That is, we expect that "t->higher_equal(_type)" holds true.
1310 // There are various exceptions:
1311 // - Inputs which are phis might in fact be widened unnecessarily.
1312 // For example, an input might be a widened int while the phi is a short.
1313 // - Inputs might be BotPtrs but this phi is dependent on a null check,
1314 // and postCCP has removed the cast which encodes the result of the check.
1315 // - The type of this phi is an interface, and the inputs are classes.
1316 // - Value calls on inputs might produce fuzzy results.
1317 // (Occurrences of this case suggest improvements to Value methods.)
1318 //
1319 // It is not possible to see Type::BOTTOM values as phi inputs,
1320 // because the ciTypeFlow pre-pass produces verifier-quality types.
1321 const Type* ft = t->filter_speculative(_type); // Worst case type
1322
1323 #ifdef ASSERT
1324 // The following logic has been moved into TypeOopPtr::filter.
1325 const Type* jt = t->join_speculative(_type);
1326 if (jt->empty()) { // Emptied out???
1327 // Otherwise it's something stupid like non-overlapping int ranges
1328 // found on dying counted loops.
1329 assert(ft == Type::TOP, ""); // Canonical empty value
1330 }
1331
1332 else {
1333
1334 if (jt != ft && jt->base() == ft->base()) {
1335 if (jt->isa_int() &&
1336 jt->is_int()->_lo == ft->is_int()->_lo &&
1337 jt->is_int()->_hi == ft->is_int()->_hi)
1338 jt = ft;
1339 if (jt->isa_long() &&
1340 jt->is_long()->_lo == ft->is_long()->_lo &&
1341 jt->is_long()->_hi == ft->is_long()->_hi)
1342 jt = ft;
1343 }
1344 if (jt != ft) {
1345 tty->print("merge type: "); t->dump(); tty->cr();
1346 tty->print("kill type: "); _type->dump(); tty->cr();
1347 tty->print("join type: "); jt->dump(); tty->cr();
1348 tty->print("filter type: "); ft->dump(); tty->cr();
1349 }
1350 assert(jt == ft, "");
1351 }
1352 #endif //ASSERT
1353
1354 // Deal with conversion problems found in data loops.
1355 ft = phase->saturate_and_maybe_push_to_igvn_worklist(this, ft);
1356 return ft;
1357 }
1358
1359 // Does this Phi represent a simple well-shaped diamond merge? Return the
1360 // index of the true path or 0 otherwise.
1361 int PhiNode::is_diamond_phi() const {
1362 Node* region = in(0);
1363 assert(region != nullptr && region->is_Region(), "phi must have region");
1364 if (!region->as_Region()->is_diamond()) {
1365 return 0;
1366 }
1367
1368 if (region->in(1)->is_IfTrue()) {
1369 assert(region->in(2)->is_IfFalse(), "bad If");
1370 return 1;
1371 } else {
1372 // Flipped projections.
1373 assert(region->in(2)->is_IfTrue(), "bad If");
1374 return 2;
1375 }
1376 }
1377
1378 // Do the following transformation if we find the corresponding graph shape, remove the involved memory phi and return
1379 // true. Otherwise, return false if the transformation cannot be applied.
1380 //
1381 // If If
1382 // / \ / \
1383 // IfFalse IfTrue /- Some Node IfFalse IfTrue
1384 // \ / / / \ / Some Node
1385 // Region / /-MergeMem ===> Region |
1386 // / \---Phi | MergeMem
1387 // [other phis] \ [other phis] |
1388 // use use
1389 bool PhiNode::try_clean_memory_phi(PhaseIterGVN* igvn) {
1390 if (_type != Type::MEMORY) {
1391 return false;
1392 }
1393 assert(is_diamond_phi() > 0, "sanity");
1394 assert(req() == 3, "same as region");
1395 const Node* region = in(0);
1396 for (uint i = 1; i < 3; i++) {
1397 Node* phi_input = in(i);
1398 if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1399 // Nothing is control-dependent on path #i except the region itself.
1400 MergeMemNode* merge_mem = phi_input->as_MergeMem();
1401 uint j = 3 - i;
1402 Node* other_phi_input = in(j);
1403 if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory()) {
1404 // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1405 // This will allow the diamond to collapse completely if there are no other phis left.
1406 igvn->replace_node(this, merge_mem);
1407 return true;
1408 }
1409 }
1410 }
1411 return false;
1412 }
1413 //----------------------------check_cmove_id-----------------------------------
1414 // Check for CMove'ing a constant after comparing against the constant.
1415 // Happens all the time now, since if we compare equality vs a constant in
1416 // the parser, we "know" the variable is constant on one path and we force
1417 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1418 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
1419 // general in that we don't need constants. Since CMove's are only inserted
1420 // in very special circumstances, we do it here on generic Phi's.
1421 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1422 assert(true_path !=0, "only diamond shape graph expected");
1423
1424 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1425 // phi->region->if_proj->ifnode->bool->cmp
1426 Node* region = in(0);
1427 Node* iff = region->in(1)->in(0);
1428 BoolNode* b = iff->in(1)->as_Bool();
1429 Node* cmp = b->in(1);
1430 Node* tval = in(true_path);
1431 Node* fval = in(3-true_path);
1432 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
1433 if (id == nullptr)
1434 return nullptr;
1435
1436 // Either value might be a cast that depends on a branch of 'iff'.
1437 // Since the 'id' value will float free of the diamond, either
1438 // decast or return failure.
1439 Node* ctl = id->in(0);
1440 if (ctl != nullptr && ctl->in(0) == iff) {
1441 if (id->is_ConstraintCast()) {
1442 return id->in(1);
1443 } else {
1444 // Don't know how to disentangle this value.
1445 return nullptr;
1446 }
1447 }
1448
1449 return id;
1450 }
1451
1452 //------------------------------Identity---------------------------------------
1453 // Check for Region being Identity.
1454 Node* PhiNode::Identity(PhaseGVN* phase) {
1455 if (must_wait_for_region_in_irreducible_loop(phase)) {
1456 return this;
1457 }
1458 // Check for no merging going on
1459 // (There used to be special-case code here when this->region->is_Loop.
1460 // It would check for a tributary phi on the backedge that the main phi
1461 // trivially, perhaps with a single cast. The unique_input method
1462 // does all this and more, by reducing such tributaries to 'this'.)
1463 Node* uin = unique_input(phase, false);
1464 if (uin != nullptr) {
1465 return uin;
1466 }
1467
1468 int true_path = is_diamond_phi();
1469 // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1470 if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1471 Node* id = is_cmove_id(phase, true_path);
1472 if (id != nullptr) {
1473 return id;
1474 }
1475 }
1476
1477 // Looking for phis with identical inputs. If we find one that has
1478 // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1479 if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1480 TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1481 uint phi_len = req();
1482 Node* phi_reg = region();
1483 for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1484 Node* u = phi_reg->fast_out(i);
1485 if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1486 u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1487 u->req() == phi_len) {
1488 for (uint j = 1; j < phi_len; j++) {
1489 if (in(j) != u->in(j)) {
1490 u = nullptr;
1491 break;
1492 }
1493 }
1494 if (u != nullptr) {
1495 return u;
1496 }
1497 }
1498 }
1499 }
1500
1501 return this; // No identity
1502 }
1503
1504 //-----------------------------unique_input------------------------------------
1505 // Find the unique value, discounting top, self-loops, and casts.
1506 // Return top if there are no inputs, and self if there are multiple.
1507 Node* PhiNode::unique_input(PhaseValues* phase, bool uncast) {
1508 // 1) One unique direct input,
1509 // or if uncast is true:
1510 // 2) some of the inputs have an intervening ConstraintCast
1511 // 3) an input is a self loop
1512 //
1513 // 1) input or 2) input or 3) input __
1514 // / \ / \ \ / \
1515 // \ / | cast phi cast
1516 // phi \ / / \ /
1517 // phi / --
1518
1519 Node* r = in(0); // RegionNode
1520 Node* input = nullptr; // The unique direct input (maybe uncasted = ConstraintCasts removed)
1521
1522 for (uint i = 1, cnt = req(); i < cnt; ++i) {
1523 Node* rc = r->in(i);
1524 if (rc == nullptr || phase->type(rc) == Type::TOP)
1525 continue; // ignore unreachable control path
1526 Node* n = in(i);
1527 if (n == nullptr)
1528 continue;
1529 Node* un = n;
1530 if (uncast) {
1531 #ifdef ASSERT
1532 Node* m = un->uncast();
1533 #endif
1534 while (un != nullptr && un->req() == 2 && un->is_ConstraintCast()) {
1535 Node* next = un->in(1);
1536 if (phase->type(next)->isa_rawptr() && phase->type(un)->isa_oopptr()) {
1537 // risk exposing raw ptr at safepoint
1538 break;
1539 }
1540 un = next;
1541 }
1542 assert(m == un || un->in(1) == m, "Only expected at CheckCastPP from allocation");
1543 }
1544 if (un == nullptr || un == this || phase->type(un) == Type::TOP) {
1545 continue; // ignore if top, or in(i) and "this" are in a data cycle
1546 }
1547 // Check for a unique input (maybe uncasted)
1548 if (input == nullptr) {
1549 input = un;
1550 } else if (input != un) {
1551 input = NodeSentinel; // no unique input
1552 }
1553 }
1554 if (input == nullptr) {
1555 return phase->C->top(); // no inputs
1556 }
1557
1558 if (input != NodeSentinel) {
1559 return input; // one unique direct input
1560 }
1561
1562 // Nothing.
1563 return nullptr;
1564 }
1565
1566 //------------------------------is_x2logic-------------------------------------
1567 // Check for simple convert-to-boolean pattern
1568 // If:(C Bool) Region:(IfF IfT) Phi:(Region 0 1)
1569 // Convert Phi to an ConvIB.
1570 static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) {
1571 assert(true_path !=0, "only diamond shape graph expected");
1572
1573 // If we're late in the optimization process, we may have already expanded Conv2B nodes
1574 if (phase->C->post_loop_opts_phase() && !Matcher::match_rule_supported(Op_Conv2B)) {
1575 return nullptr;
1576 }
1577
1578 // Convert the true/false index into an expected 0/1 return.
1579 // Map 2->0 and 1->1.
1580 int flipped = 2-true_path;
1581
1582 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1583 // phi->region->if_proj->ifnode->bool->cmp
1584 Node *region = phi->in(0);
1585 Node *iff = region->in(1)->in(0);
1586 BoolNode *b = (BoolNode*)iff->in(1);
1587 const CmpNode *cmp = (CmpNode*)b->in(1);
1588
1589 Node *zero = phi->in(1);
1590 Node *one = phi->in(2);
1591 const Type *tzero = phase->type( zero );
1592 const Type *tone = phase->type( one );
1593
1594 // Check for compare vs 0
1595 const Type *tcmp = phase->type(cmp->in(2));
1596 if( tcmp != TypeInt::ZERO && tcmp != TypePtr::NULL_PTR ) {
1597 // Allow cmp-vs-1 if the other input is bounded by 0-1
1598 if( !(tcmp == TypeInt::ONE && phase->type(cmp->in(1)) == TypeInt::BOOL) )
1599 return nullptr;
1600 flipped = 1-flipped; // Test is vs 1 instead of 0!
1601 }
1602
1603 // Check for setting zero/one opposite expected
1604 if( tzero == TypeInt::ZERO ) {
1605 if( tone == TypeInt::ONE ) {
1606 } else return nullptr;
1607 } else if( tzero == TypeInt::ONE ) {
1608 if( tone == TypeInt::ZERO ) {
1609 flipped = 1-flipped;
1610 } else return nullptr;
1611 } else return nullptr;
1612
1613 // Check for boolean test backwards
1614 if( b->_test._test == BoolTest::ne ) {
1615 } else if( b->_test._test == BoolTest::eq ) {
1616 flipped = 1-flipped;
1617 } else return nullptr;
1618
1619 // Build int->bool conversion
1620 Node* n = new Conv2BNode(cmp->in(1));
1621 if (flipped) {
1622 n = new XorINode(phase->transform(n), phase->intcon(1));
1623 }
1624
1625 return n;
1626 }
1627
1628 //------------------------------is_cond_add------------------------------------
1629 // Check for simple conditional add pattern: "(P < Q) ? X+Y : X;"
1630 // To be profitable the control flow has to disappear; there can be no other
1631 // values merging here. We replace the test-and-branch with:
1632 // "(sgn(P-Q))&Y) + X". Basically, convert "(P < Q)" into 0 or -1 by
1633 // moving the carry bit from (P-Q) into a register with 'sbb EAX,EAX'.
1634 // Then convert Y to 0-or-Y and finally add.
1635 // This is a key transform for SpecJava _201_compress.
1636 static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) {
1637 assert(true_path !=0, "only diamond shape graph expected");
1638
1639 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1640 // phi->region->if_proj->ifnode->bool->cmp
1641 RegionNode *region = (RegionNode*)phi->in(0);
1642 Node *iff = region->in(1)->in(0);
1643 BoolNode* b = iff->in(1)->as_Bool();
1644 const CmpNode *cmp = (CmpNode*)b->in(1);
1645
1646 // Make sure only merging this one phi here
1647 if (region->has_unique_phi() != phi) return nullptr;
1648
1649 // Make sure each arm of the diamond has exactly one output, which we assume
1650 // is the region. Otherwise, the control flow won't disappear.
1651 if (region->in(1)->outcnt() != 1) return nullptr;
1652 if (region->in(2)->outcnt() != 1) return nullptr;
1653
1654 // Check for "(P < Q)" of type signed int
1655 if (b->_test._test != BoolTest::lt) return nullptr;
1656 if (cmp->Opcode() != Op_CmpI) return nullptr;
1657
1658 Node *p = cmp->in(1);
1659 Node *q = cmp->in(2);
1660 Node *n1 = phi->in( true_path);
1661 Node *n2 = phi->in(3-true_path);
1662
1663 int op = n1->Opcode();
1664 if( op != Op_AddI // Need zero as additive identity
1665 /*&&op != Op_SubI &&
1666 op != Op_AddP &&
1667 op != Op_XorI &&
1668 op != Op_OrI*/ )
1669 return nullptr;
1670
1671 Node *x = n2;
1672 Node *y = nullptr;
1673 if( x == n1->in(1) ) {
1674 y = n1->in(2);
1675 } else if( x == n1->in(2) ) {
1676 y = n1->in(1);
1677 } else return nullptr;
1678
1679 // Not so profitable if compare and add are constants
1680 if( q->is_Con() && phase->type(q) != TypeInt::ZERO && y->is_Con() )
1681 return nullptr;
1682
1683 Node *cmplt = phase->transform( new CmpLTMaskNode(p,q) );
1684 Node *j_and = phase->transform( new AndINode(cmplt,y) );
1685 return new AddINode(j_and,x);
1686 }
1687
1688 //------------------------------is_absolute------------------------------------
1689 // Check for absolute value.
1690 static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) {
1691 assert(true_path !=0, "only diamond shape graph expected");
1692
1693 int cmp_zero_idx = 0; // Index of compare input where to look for zero
1694 int phi_x_idx = 0; // Index of phi input where to find naked x
1695
1696 // ABS ends with the merge of 2 control flow paths.
1697 // Find the false path from the true path. With only 2 inputs, 3 - x works nicely.
1698 int false_path = 3 - true_path;
1699
1700 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1701 // phi->region->if_proj->ifnode->bool->cmp
1702 BoolNode *bol = phi_root->in(0)->in(1)->in(0)->in(1)->as_Bool();
1703 Node *cmp = bol->in(1);
1704
1705 // Check bool sense
1706 if (cmp->Opcode() == Op_CmpF || cmp->Opcode() == Op_CmpD) {
1707 switch (bol->_test._test) {
1708 case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = true_path; break;
1709 case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break;
1710 case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = true_path; break;
1711 case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = false_path; break;
1712 default: return nullptr; break;
1713 }
1714 } else if (cmp->Opcode() == Op_CmpI || cmp->Opcode() == Op_CmpL) {
1715 switch (bol->_test._test) {
1716 case BoolTest::lt:
1717 case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break;
1718 case BoolTest::gt:
1719 case BoolTest::ge: cmp_zero_idx = 2; phi_x_idx = true_path; break;
1720 default: return nullptr; break;
1721 }
1722 }
1723
1724 // Test is next
1725 const Type *tzero = nullptr;
1726 switch (cmp->Opcode()) {
1727 case Op_CmpI: tzero = TypeInt::ZERO; break; // Integer ABS
1728 case Op_CmpL: tzero = TypeLong::ZERO; break; // Long ABS
1729 case Op_CmpF: tzero = TypeF::ZERO; break; // Float ABS
1730 case Op_CmpD: tzero = TypeD::ZERO; break; // Double ABS
1731 default: return nullptr;
1732 }
1733
1734 // Find zero input of compare; the other input is being abs'd
1735 Node *x = nullptr;
1736 bool flip = false;
1737 if( phase->type(cmp->in(cmp_zero_idx)) == tzero ) {
1738 x = cmp->in(3 - cmp_zero_idx);
1739 } else if( phase->type(cmp->in(3 - cmp_zero_idx)) == tzero ) {
1740 // The test is inverted, we should invert the result...
1741 x = cmp->in(cmp_zero_idx);
1742 flip = true;
1743 } else {
1744 return nullptr;
1745 }
1746
1747 // Next get the 2 pieces being selected, one is the original value
1748 // and the other is the negated value.
1749 if( phi_root->in(phi_x_idx) != x ) return nullptr;
1750
1751 // Check other phi input for subtract node
1752 Node *sub = phi_root->in(3 - phi_x_idx);
1753
1754 bool is_sub = sub->Opcode() == Op_SubF || sub->Opcode() == Op_SubD ||
1755 sub->Opcode() == Op_SubI || sub->Opcode() == Op_SubL;
1756
1757 // Allow only Sub(0,X) and fail out for all others; Neg is not OK
1758 if (!is_sub || phase->type(sub->in(1)) != tzero || sub->in(2) != x) return nullptr;
1759
1760 if (tzero == TypeF::ZERO) {
1761 x = new AbsFNode(x);
1762 if (flip) {
1763 x = new SubFNode(sub->in(1), phase->transform(x));
1764 }
1765 } else if (tzero == TypeD::ZERO) {
1766 x = new AbsDNode(x);
1767 if (flip) {
1768 x = new SubDNode(sub->in(1), phase->transform(x));
1769 }
1770 } else if (tzero == TypeInt::ZERO && Matcher::match_rule_supported(Op_AbsI)) {
1771 x = new AbsINode(x);
1772 if (flip) {
1773 x = new SubINode(sub->in(1), phase->transform(x));
1774 }
1775 } else if (tzero == TypeLong::ZERO && Matcher::match_rule_supported(Op_AbsL)) {
1776 x = new AbsLNode(x);
1777 if (flip) {
1778 x = new SubLNode(sub->in(1), phase->transform(x));
1779 }
1780 } else return nullptr;
1781
1782 return x;
1783 }
1784
1785 //------------------------------split_once-------------------------------------
1786 // Helper for split_flow_path
1787 static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *newn) {
1788 igvn->hash_delete(n); // Remove from hash before hacking edges
1789
1790 uint j = 1;
1791 for (uint i = phi->req()-1; i > 0; i--) {
1792 if (phi->in(i) == val) { // Found a path with val?
1793 // Add to NEW Region/Phi, no DU info
1794 newn->set_req( j++, n->in(i) );
1795 // Remove from OLD Region/Phi
1796 n->del_req(i);
1797 }
1798 }
1799
1800 // Register the new node but do not transform it. Cannot transform until the
1801 // entire Region/Phi conglomerate has been hacked as a single huge transform.
1802 igvn->register_new_node_with_optimizer( newn );
1803
1804 // Now I can point to the new node.
1805 n->add_req(newn);
1806 igvn->_worklist.push(n);
1807 }
1808
1809 //------------------------------split_flow_path--------------------------------
1810 // Check for merging identical values and split flow paths
1811 static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) {
1812 // This optimization tries to find two or more inputs of phi with the same constant value
1813 // It then splits them into a separate Phi, and according Region. If this is a loop-entry,
1814 // and the loop entry has multiple fall-in edges, and some of those fall-in edges have that
1815 // constant, and others not, we may split the fall-in edges into separate Phi's, and create
1816 // an irreducible loop. For reducible loops, this never seems to happen, as the multiple
1817 // fall-in edges are already merged before the loop head during parsing. But with irreducible
1818 // loops present the order or merging during parsing can sometimes prevent this.
1819 if (phase->C->has_irreducible_loop()) {
1820 // Avoid this optimization if any irreducible loops are present. Else we may create
1821 // an irreducible loop that we do not detect.
1822 return nullptr;
1823 }
1824 BasicType bt = phi->type()->basic_type();
1825 if( bt == T_ILLEGAL || type2size[bt] <= 0 )
1826 return nullptr; // Bail out on funny non-value stuff
1827 if( phi->req() <= 3 ) // Need at least 2 matched inputs and a
1828 return nullptr; // third unequal input to be worth doing
1829
1830 // Scan for a constant
1831 uint i;
1832 for( i = 1; i < phi->req()-1; i++ ) {
1833 Node *n = phi->in(i);
1834 if( !n ) return nullptr;
1835 if( phase->type(n) == Type::TOP ) return nullptr;
1836 if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN || n->Opcode() == Op_ConNKlass )
1837 break;
1838 }
1839 if( i >= phi->req() ) // Only split for constants
1840 return nullptr;
1841
1842 Node *val = phi->in(i); // Constant to split for
1843 uint hit = 0; // Number of times it occurs
1844 Node *r = phi->region();
1845
1846 for( ; i < phi->req(); i++ ){ // Count occurrences of constant
1847 Node *n = phi->in(i);
1848 if( !n ) return nullptr;
1849 if( phase->type(n) == Type::TOP ) return nullptr;
1850 if( phi->in(i) == val ) {
1851 hit++;
1852 if (Node::may_be_loop_entry(r->in(i))) {
1853 return nullptr; // don't split loop entry path
1854 }
1855 }
1856 }
1857
1858 if( hit <= 1 || // Make sure we find 2 or more
1859 hit == phi->req()-1 ) // and not ALL the same value
1860 return nullptr;
1861
1862 // Now start splitting out the flow paths that merge the same value.
1863 // Split first the RegionNode.
1864 PhaseIterGVN *igvn = phase->is_IterGVN();
1865 RegionNode *newr = new RegionNode(hit+1);
1866 split_once(igvn, phi, val, r, newr);
1867
1868 // Now split all other Phis than this one
1869 for (DUIterator_Fast kmax, k = r->fast_outs(kmax); k < kmax; k++) {
1870 Node* phi2 = r->fast_out(k);
1871 if( phi2->is_Phi() && phi2->as_Phi() != phi ) {
1872 PhiNode *newphi = PhiNode::make_blank(newr, phi2);
1873 split_once(igvn, phi, val, phi2, newphi);
1874 }
1875 }
1876
1877 // Clean up this guy
1878 igvn->hash_delete(phi);
1879 for( i = phi->req()-1; i > 0; i-- ) {
1880 if( phi->in(i) == val ) {
1881 phi->del_req(i);
1882 }
1883 }
1884 phi->add_req(val);
1885
1886 return phi;
1887 }
1888
1889 // Returns the BasicType of a given convert node and a type, with special handling to ensure that conversions to
1890 // and from half float will return the SHORT basic type, as that wouldn't be returned typically from TypeInt.
1891 static BasicType get_convert_type(Node* convert, const Type* type) {
1892 int convert_op = convert->Opcode();
1893 if (type->isa_int() && (convert_op == Op_ConvHF2F || convert_op == Op_ConvF2HF)) {
1894 return T_SHORT;
1895 }
1896
1897 return type->basic_type();
1898 }
1899
1900 //=============================================================================
1901 //------------------------------simple_data_loop_check-------------------------
1902 // Try to determining if the phi node in a simple safe/unsafe data loop.
1903 // Returns:
1904 // enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop };
1905 // Safe - safe case when the phi and it's inputs reference only safe data
1906 // nodes;
1907 // Unsafe - the phi and it's inputs reference unsafe data nodes but there
1908 // is no reference back to the phi - need a graph walk
1909 // to determine if it is in a loop;
1910 // UnsafeLoop - unsafe case when the phi references itself directly or through
1911 // unsafe data node.
1912 // Note: a safe data node is a node which could/never reference itself during
1913 // GVN transformations. For now it is Con, Proj, Phi, CastPP, CheckCastPP.
1914 // I mark Phi nodes as safe node not only because they can reference itself
1915 // but also to prevent mistaking the fallthrough case inside an outer loop
1916 // as dead loop when the phi references itself through an other phi.
1917 PhiNode::LoopSafety PhiNode::simple_data_loop_check(Node *in) const {
1918 // It is unsafe loop if the phi node references itself directly.
1919 if (in == (Node*)this)
1920 return UnsafeLoop; // Unsafe loop
1921 // Unsafe loop if the phi node references itself through an unsafe data node.
1922 // Exclude cases with null inputs or data nodes which could reference
1923 // itself (safe for dead loops).
1924 if (in != nullptr && !in->is_dead_loop_safe()) {
1925 // Check inputs of phi's inputs also.
1926 // It is much less expensive then full graph walk.
1927 uint cnt = in->req();
1928 uint i = (in->is_Proj() && !in->is_CFG()) ? 0 : 1;
1929 for (; i < cnt; ++i) {
1930 Node* m = in->in(i);
1931 if (m == (Node*)this)
1932 return UnsafeLoop; // Unsafe loop
1933 if (m != nullptr && !m->is_dead_loop_safe()) {
1934 // Check the most common case (about 30% of all cases):
1935 // phi->Load/Store->AddP->(ConP ConP Con)/(Parm Parm Con).
1936 Node *m1 = (m->is_AddP() && m->req() > 3) ? m->in(1) : nullptr;
1937 if (m1 == (Node*)this)
1938 return UnsafeLoop; // Unsafe loop
1939 if (m1 != nullptr && m1 == m->in(2) &&
1940 m1->is_dead_loop_safe() && m->in(3)->is_Con()) {
1941 continue; // Safe case
1942 }
1943 // The phi references an unsafe node - need full analysis.
1944 return Unsafe;
1945 }
1946 }
1947 }
1948 return Safe; // Safe case - we can optimize the phi node.
1949 }
1950
1951 //------------------------------is_unsafe_data_reference-----------------------
1952 // If phi can be reached through the data input - it is data loop.
1953 bool PhiNode::is_unsafe_data_reference(Node *in) const {
1954 assert(req() > 1, "");
1955 // First, check simple cases when phi references itself directly or
1956 // through an other node.
1957 LoopSafety safety = simple_data_loop_check(in);
1958 if (safety == UnsafeLoop)
1959 return true; // phi references itself - unsafe loop
1960 else if (safety == Safe)
1961 return false; // Safe case - phi could be replaced with the unique input.
1962
1963 // Unsafe case when we should go through data graph to determine
1964 // if the phi references itself.
1965
1966 ResourceMark rm;
1967
1968 Node_List nstack;
1969 VectorSet visited;
1970
1971 nstack.push(in); // Start with unique input.
1972 visited.set(in->_idx);
1973 while (nstack.size() != 0) {
1974 Node* n = nstack.pop();
1975 uint cnt = n->req();
1976 uint i = (n->is_Proj() && !n->is_CFG()) ? 0 : 1;
1977 for (; i < cnt; i++) {
1978 Node* m = n->in(i);
1979 if (m == (Node*)this) {
1980 return true; // Data loop
1981 }
1982 if (m != nullptr && !m->is_dead_loop_safe()) { // Only look for unsafe cases.
1983 if (!visited.test_set(m->_idx))
1984 nstack.push(m);
1985 }
1986 }
1987 }
1988 return false; // The phi is not reachable from its inputs
1989 }
1990
1991 // Is this Phi's region or some inputs to the region enqueued for IGVN
1992 // and so could cause the region to be optimized out?
1993 bool PhiNode::wait_for_region_igvn(PhaseGVN* phase) {
1994 PhaseIterGVN* igvn = phase->is_IterGVN();
1995 Unique_Node_List& worklist = igvn->_worklist;
1996 bool delay = false;
1997 Node* r = in(0);
1998 for (uint j = 1; j < req(); j++) {
1999 Node* rc = r->in(j);
2000 Node* n = in(j);
2001
2002 if (rc == nullptr || !rc->is_Proj()) { continue; }
2003 if (worklist.member(rc)) {
2004 delay = true;
2005 break;
2006 }
2007
2008 if (rc->in(0) == nullptr || !rc->in(0)->is_If()) { continue; }
2009 if (worklist.member(rc->in(0))) {
2010 delay = true;
2011 break;
2012 }
2013
2014 if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2015 if (worklist.member(rc->in(0)->in(1))) {
2016 delay = true;
2017 break;
2018 }
2019
2020 if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2021 if (worklist.member(rc->in(0)->in(1)->in(1))) {
2022 delay = true;
2023 break;
2024 }
2025 }
2026
2027 if (delay) {
2028 worklist.push(this);
2029 }
2030 return delay;
2031 }
2032
2033 // If the Phi's Region is in an irreducible loop, and the Region
2034 // has had an input removed, but not yet transformed, it could be
2035 // that the Region (and this Phi) are not reachable from Root.
2036 // If we allow the Phi to collapse before the Region, this may lead
2037 // to dead-loop data. Wait for the Region to check for reachability,
2038 // and potentially remove the dead code.
2039 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2040 RegionNode* region = in(0)->as_Region();
2041 if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2042 Node* top = phase->C->top();
2043 for (uint j = 1; j < req(); j++) {
2044 Node* rc = region->in(j); // for each control input
2045 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2046 // Region is missing a control input
2047 Node* n = in(j);
2048 if (n != nullptr && n != top) {
2049 // Phi still has its input, so region just lost its input
2050 return true;
2051 }
2052 }
2053 }
2054 }
2055 return false;
2056 }
2057
2058 // Check if splitting a bot memory Phi through a parent MergeMem may lead to
2059 // non-termination. For more details, see comments at the call site in
2060 // PhiNode::Ideal.
2061 bool PhiNode::is_split_through_mergemem_terminating() const {
2062 ResourceMark rm;
2063 VectorSet visited;
2064 GrowableArray<const Node*> worklist;
2065 worklist.push(this);
2066 visited.set(this->_idx);
2067 auto maybe_add_to_worklist = [&](Node* input) {
2068 if (input != nullptr &&
2069 (input->is_MergeMem() || input->is_memory_phi()) &&
2070 !visited.test_set(input->_idx)) {
2071 worklist.push(input);
2072 assert(input->adr_type() == TypePtr::BOTTOM,
2073 "should only visit bottom memory");
2074 }
2075 };
2076 while (worklist.length() > 0) {
2077 const Node* n = worklist.pop();
2078 if (n->is_MergeMem()) {
2079 Node* input = n->as_MergeMem()->base_memory();
2080 if (input == this) {
2081 return false;
2082 }
2083 maybe_add_to_worklist(input);
2084 } else {
2085 assert(n->is_memory_phi(), "invariant");
2086 for (uint i = PhiNode::Input; i < n->req(); i++) {
2087 Node* input = n->in(i);
2088 if (input == this) {
2089 return false;
2090 }
2091 maybe_add_to_worklist(input);
2092 }
2093 }
2094 }
2095 return true;
2096 }
2097
2098 //------------------------------Ideal------------------------------------------
2099 // Return a node which is more "ideal" than the current node. Must preserve
2100 // the CFG, but we can still strip out dead paths.
2101 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2102 Node *r = in(0); // RegionNode
2103 assert(r != nullptr && r->is_Region(), "this phi must have a region");
2104 assert(r->in(0) == nullptr || !r->in(0)->is_Root(), "not a specially hidden merge");
2105
2106 // Note: During parsing, phis are often transformed before their regions.
2107 // This means we have to use type_or_null to defend against untyped regions.
2108 if( phase->type_or_null(r) == Type::TOP ) // Dead code?
2109 return nullptr; // No change
2110
2111 Node *top = phase->C->top();
2112 bool new_phi = (outcnt() == 0); // transforming new Phi
2113 // No change for igvn if new phi is not hooked
2114 if (new_phi && can_reshape)
2115 return nullptr;
2116
2117 if (must_wait_for_region_in_irreducible_loop(phase)) {
2118 return nullptr;
2119 }
2120
2121 // The are 2 situations when only one valid phi's input is left
2122 // (in addition to Region input).
2123 // One: region is not loop - replace phi with this input.
2124 // Two: region is loop - replace phi with top since this data path is dead
2125 // and we need to break the dead data loop.
2126 Node* progress = nullptr; // Record if any progress made
2127 for( uint j = 1; j < req(); ++j ){ // For all paths in
2128 // Check unreachable control paths
2129 Node* rc = r->in(j);
2130 Node* n = in(j); // Get the input
2131 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2132 if (n != top) { // Not already top?
2133 PhaseIterGVN *igvn = phase->is_IterGVN();
2134 if (can_reshape && igvn != nullptr) {
2135 igvn->_worklist.push(r);
2136 }
2137 // Nuke it down
2138 set_req_X(j, top, phase);
2139 progress = this; // Record progress
2140 }
2141 }
2142 }
2143
2144 if (can_reshape && outcnt() == 0) {
2145 // set_req() above may kill outputs if Phi is referenced
2146 // only by itself on the dead (top) control path.
2147 return top;
2148 }
2149
2150 bool uncasted = false;
2151 Node* uin = unique_input(phase, false);
2152 if (uin == nullptr && can_reshape &&
2153 // If there is a chance that the region can be optimized out do
2154 // not add a cast node that we can't remove yet.
2155 !wait_for_region_igvn(phase)) {
2156 uncasted = true;
2157 uin = unique_input(phase, true);
2158 }
2159 if (uin == top) { // Simplest case: no alive inputs.
2160 if (can_reshape) // IGVN transformation
2161 return top;
2162 else
2163 return nullptr; // Identity will return TOP
2164 } else if (uin != nullptr) {
2165 // Only one not-null unique input path is left.
2166 // Determine if this input is backedge of a loop.
2167 // (Skip new phis which have no uses and dead regions).
2168 if (outcnt() > 0 && r->in(0) != nullptr) {
2169 if (is_data_loop(r->as_Region(), uin, phase)) {
2170 // Break this data loop to avoid creation of a dead loop.
2171 if (can_reshape) {
2172 return top;
2173 } else {
2174 // We can't return top if we are in Parse phase - cut inputs only
2175 // let Identity to handle the case.
2176 replace_edge(uin, top, phase);
2177 return nullptr;
2178 }
2179 }
2180 }
2181
2182 if (uncasted) {
2183 // Add cast nodes between the phi to be removed and its unique input.
2184 // Wait until after parsing for the type information to propagate from the casts.
2185 assert(can_reshape, "Invalid during parsing");
2186 const Type* phi_type = bottom_type();
2187 // Add casts to carry the control dependency of the Phi that is
2188 // going away
2189 Node* cast = nullptr;
2190 const TypeTuple* extra_types = collect_types(phase);
2191 if (phi_type->isa_ptr()) {
2192 const Type* uin_type = phase->type(uin);
2193 if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) {
2194 cast = new CastPPNode(r, uin, phi_type, ConstraintCastNode::StrongDependency, extra_types);
2195 } else {
2196 // Use a CastPP for a cast to not null and a CheckCastPP for
2197 // a cast to a new klass (and both if both null-ness and
2198 // klass change).
2199
2200 // If the type of phi is not null but the type of uin may be
2201 // null, uin's type must be casted to not null
2202 if (phi_type->join(TypePtr::NOTNULL) == phi_type->remove_speculative() &&
2203 uin_type->join(TypePtr::NOTNULL) != uin_type->remove_speculative()) {
2204 cast = new CastPPNode(r, uin, TypePtr::NOTNULL, ConstraintCastNode::StrongDependency, extra_types);
2205 }
2206
2207 // If the type of phi and uin, both casted to not null,
2208 // differ the klass of uin must be (check)cast'ed to match
2209 // that of phi
2210 if (phi_type->join_speculative(TypePtr::NOTNULL) != uin_type->join_speculative(TypePtr::NOTNULL)) {
2211 Node* n = uin;
2212 if (cast != nullptr) {
2213 cast = phase->transform(cast);
2214 n = cast;
2215 }
2216 cast = new CheckCastPPNode(r, n, phi_type, ConstraintCastNode::StrongDependency, extra_types);
2217 }
2218 if (cast == nullptr) {
2219 cast = new CastPPNode(r, uin, phi_type, ConstraintCastNode::StrongDependency, extra_types);
2220 }
2221 }
2222 } else {
2223 cast = ConstraintCastNode::make_cast_for_type(r, uin, phi_type, ConstraintCastNode::StrongDependency, extra_types);
2224 }
2225 assert(cast != nullptr, "cast should be set");
2226 cast = phase->transform(cast);
2227 // set all inputs to the new cast(s) so the Phi is removed by Identity
2228 PhaseIterGVN* igvn = phase->is_IterGVN();
2229 for (uint i = 1; i < req(); i++) {
2230 set_req_X(i, cast, igvn);
2231 }
2232 uin = cast;
2233 }
2234
2235 // One unique input.
2236 DEBUG_ONLY(Node* ident = Identity(phase));
2237 // The unique input must eventually be detected by the Identity call.
2238 #ifdef ASSERT
2239 if (ident != uin && !ident->is_top() && !must_wait_for_region_in_irreducible_loop(phase)) {
2240 // print this output before failing assert
2241 r->dump(3);
2242 this->dump(3);
2243 ident->dump();
2244 uin->dump();
2245 }
2246 #endif
2247 // Identity may not return the expected uin, if it has to wait for the region, in irreducible case
2248 assert(ident == uin || ident->is_top() || must_wait_for_region_in_irreducible_loop(phase), "Identity must clean this up");
2249 return nullptr;
2250 }
2251
2252 Node* opt = nullptr;
2253 int true_path = is_diamond_phi();
2254 if (true_path != 0 &&
2255 // If one of the diamond's branch is in the process of dying then, the Phi's input for that branch might transform
2256 // to top. If that happens replacing the Phi with an operation that consumes the Phi's inputs will cause the Phi
2257 // to be replaced by top. To prevent that, delay the transformation until the branch has a chance to be removed.
2258 !(can_reshape && wait_for_region_igvn(phase))) {
2259 // Check for CMove'ing identity. If it would be unsafe,
2260 // handle it here. In the safe case, let Identity handle it.
2261 Node* unsafe_id = is_cmove_id(phase, true_path);
2262 if( unsafe_id != nullptr && is_unsafe_data_reference(unsafe_id) )
2263 opt = unsafe_id;
2264
2265 // Check for simple convert-to-boolean pattern
2266 if( opt == nullptr )
2267 opt = is_x2logic(phase, this, true_path);
2268
2269 // Check for absolute value
2270 if( opt == nullptr )
2271 opt = is_absolute(phase, this, true_path);
2272
2273 // Check for conditional add
2274 if( opt == nullptr && can_reshape )
2275 opt = is_cond_add(phase, this, true_path);
2276
2277 // These 4 optimizations could subsume the phi:
2278 // have to check for a dead data loop creation.
2279 if( opt != nullptr ) {
2280 if( opt == unsafe_id || is_unsafe_data_reference(opt) ) {
2281 // Found dead loop.
2282 if( can_reshape )
2283 return top;
2284 // We can't return top if we are in Parse phase - cut inputs only
2285 // to stop further optimizations for this phi. Identity will return TOP.
2286 assert(req() == 3, "only diamond merge phi here");
2287 set_req(1, top);
2288 set_req(2, top);
2289 return nullptr;
2290 } else {
2291 return opt;
2292 }
2293 }
2294 }
2295
2296 // Check for merging identical values and split flow paths
2297 if (can_reshape) {
2298 opt = split_flow_path(phase, this);
2299 // This optimization only modifies phi - don't need to check for dead loop.
2300 assert(opt == nullptr || opt == this, "do not elide phi");
2301 if (opt != nullptr) return opt;
2302 }
2303
2304 if (in(1) != nullptr && in(1)->Opcode() == Op_AddP && can_reshape) {
2305 // Try to undo Phi of AddP:
2306 // (Phi (AddP base address offset) (AddP base2 address2 offset2))
2307 // becomes:
2308 // newbase := (Phi base base2)
2309 // newaddress := (Phi address address2)
2310 // newoffset := (Phi offset offset2)
2311 // (AddP newbase newaddress newoffset)
2312 //
2313 // This occurs as a result of unsuccessful split_thru_phi and
2314 // interferes with taking advantage of addressing modes. See the
2315 // clone_shift_expressions code in matcher.cpp
2316 Node* addp = in(1);
2317 Node* base = addp->in(AddPNode::Base);
2318 Node* address = addp->in(AddPNode::Address);
2319 Node* offset = addp->in(AddPNode::Offset);
2320 if (base != nullptr && address != nullptr && offset != nullptr &&
2321 !base->is_top() && !address->is_top() && !offset->is_top()) {
2322 const Type* base_type = base->bottom_type();
2323 const Type* address_type = address->bottom_type();
2324 // make sure that all the inputs are similar to the first one,
2325 // i.e. AddP with base == address and same offset as first AddP
2326 bool doit = true;
2327 for (uint i = 2; i < req(); i++) {
2328 if (in(i) == nullptr ||
2329 in(i)->Opcode() != Op_AddP ||
2330 in(i)->in(AddPNode::Base) == nullptr ||
2331 in(i)->in(AddPNode::Address) == nullptr ||
2332 in(i)->in(AddPNode::Offset) == nullptr ||
2333 in(i)->in(AddPNode::Base)->is_top() ||
2334 in(i)->in(AddPNode::Address)->is_top() ||
2335 in(i)->in(AddPNode::Offset)->is_top()) {
2336 doit = false;
2337 break;
2338 }
2339 if (in(i)->in(AddPNode::Base) != base) {
2340 base = nullptr;
2341 }
2342 if (in(i)->in(AddPNode::Offset) != offset) {
2343 offset = nullptr;
2344 }
2345 if (in(i)->in(AddPNode::Address) != address) {
2346 address = nullptr;
2347 }
2348 // Accumulate type for resulting Phi
2349 base_type = base_type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type());
2350 address_type = address_type->meet_speculative(in(i)->in(AddPNode::Address)->bottom_type());
2351 }
2352 if (doit && base == nullptr) {
2353 // Check for neighboring AddP nodes in a tree.
2354 // If they have a base, use that it.
2355 for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) {
2356 Node* u = this->fast_out(k);
2357 if (u->is_AddP()) {
2358 Node* base2 = u->in(AddPNode::Base);
2359 if (base2 != nullptr && !base2->is_top()) {
2360 if (base == nullptr)
2361 base = base2;
2362 else if (base != base2)
2363 { doit = false; break; }
2364 }
2365 }
2366 }
2367 }
2368 if (doit) {
2369 if (base == nullptr) {
2370 base = new PhiNode(in(0), base_type, nullptr);
2371 for (uint i = 1; i < req(); i++) {
2372 base->init_req(i, in(i)->in(AddPNode::Base));
2373 }
2374 phase->is_IterGVN()->register_new_node_with_optimizer(base);
2375 }
2376 if (address == nullptr) {
2377 address = new PhiNode(in(0), address_type, nullptr);
2378 for (uint i = 1; i < req(); i++) {
2379 address->init_req(i, in(i)->in(AddPNode::Address));
2380 }
2381 phase->is_IterGVN()->register_new_node_with_optimizer(address);
2382 }
2383 if (offset == nullptr) {
2384 offset = new PhiNode(in(0), TypeX_X, nullptr);
2385 for (uint i = 1; i < req(); i++) {
2386 offset->init_req(i, in(i)->in(AddPNode::Offset));
2387 }
2388 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2389 }
2390 return new AddPNode(base, address, offset);
2391 }
2392 }
2393 }
2394
2395 // Split phis through memory merges, so that the memory merges will go away.
2396 // Piggy-back this transformation on the search for a unique input....
2397 // It will be as if the merged memory is the unique value of the phi.
2398 // (Do not attempt this optimization unless parsing is complete.
2399 // It would make the parser's memory-merge logic sick.)
2400 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2401 if (progress == nullptr && can_reshape && type() == Type::MEMORY) {
2402
2403 // See if this Phi should be sliced. Determine the merge width of input
2404 // MergeMems and check if there is a direct loop to self, as illustrated
2405 // below.
2406 //
2407 // +-------------+
2408 // | |
2409 // (base_memory) v |
2410 // MergeMem |
2411 // | |
2412 // v |
2413 // Phi (this) |
2414 // | |
2415 // +-----------+
2416 //
2417 // Generally, there are issues with non-termination with such circularity
2418 // (see comment further below). However, if there is a direct loop to self,
2419 // splitting the Phi through the MergeMem will result in the below.
2420 //
2421 // +---+
2422 // | |
2423 // v |
2424 // Phi |
2425 // |\ |
2426 // | +-+
2427 // (base_memory) v
2428 // MergeMem
2429 //
2430 // This split breaks the circularity and consequently does not lead to
2431 // non-termination.
2432 uint merge_width = 0;
2433 bool split_always_terminates = false; // Is splitting guaranteed to terminate?
2434 for( uint i=1; i<req(); ++i ) {// For all paths in
2435 Node *ii = in(i);
2436 // TOP inputs should not be counted as safe inputs because if the
2437 // Phi references itself through all other inputs then splitting the
2438 // Phi through memory merges would create dead loop at later stage.
2439 if (ii == top) {
2440 return nullptr; // Delay optimization until graph is cleaned.
2441 }
2442 if (ii->is_MergeMem()) {
2443 MergeMemNode* n = ii->as_MergeMem();
2444 merge_width = MAX2(merge_width, n->req());
2445 if (n->base_memory() == this) {
2446 split_always_terminates = true;
2447 }
2448 }
2449 }
2450
2451 // There are cases with circular dependencies between bottom Phis
2452 // and MergeMems. Below is a minimal example.
2453 //
2454 // +------------+
2455 // | |
2456 // (base_memory) v |
2457 // MergeMem |
2458 // | |
2459 // v |
2460 // Phi (this) |
2461 // | |
2462 // v |
2463 // Phi |
2464 // | |
2465 // +----------+
2466 //
2467 // Here, we cannot break the circularity through a self-loop as there
2468 // are two Phis involved. Repeatedly splitting the Phis through the
2469 // MergeMem leads to non-termination. We check for non-termination below.
2470 // Only check for non-termination if necessary.
2471 if (!split_always_terminates && adr_type() == TypePtr::BOTTOM &&
2472 merge_width > Compile::AliasIdxRaw) {
2473 split_always_terminates = is_split_through_mergemem_terminating();
2474 }
2475
2476 if (merge_width > Compile::AliasIdxRaw) {
2477 // found at least one non-empty MergeMem
2478 const TypePtr* at = adr_type();
2479 if (at != TypePtr::BOTTOM) {
2480 // Patch the existing phi to select an input from the merge:
2481 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2482 // Phi:AT1(...m1...)
2483 int alias_idx = phase->C->get_alias_index(at);
2484 for (uint i=1; i<req(); ++i) {
2485 Node *ii = in(i);
2486 if (ii->is_MergeMem()) {
2487 MergeMemNode* n = ii->as_MergeMem();
2488 // compress paths and change unreachable cycles to TOP
2489 // If not, we can update the input infinitely along a MergeMem cycle
2490 // Equivalent code is in MemNode::Ideal_common
2491 Node *m = phase->transform(n);
2492 if (outcnt() == 0) { // Above transform() may kill us!
2493 return top;
2494 }
2495 // If transformed to a MergeMem, get the desired slice
2496 // Otherwise the returned node represents memory for every slice
2497 Node *new_mem = (m->is_MergeMem()) ?
2498 m->as_MergeMem()->memory_at(alias_idx) : m;
2499 // Update input if it is progress over what we have now
2500 if (new_mem != ii) {
2501 set_req_X(i, new_mem, phase->is_IterGVN());
2502 progress = this;
2503 }
2504 }
2505 }
2506 } else if (split_always_terminates) {
2507 // If all inputs reference this phi (directly or through data nodes) -
2508 // it is a dead loop.
2509 bool saw_safe_input = false;
2510 for (uint j = 1; j < req(); ++j) {
2511 Node* n = in(j);
2512 if (n->is_MergeMem()) {
2513 MergeMemNode* mm = n->as_MergeMem();
2514 if (mm->base_memory() == this || mm->base_memory() == mm->empty_memory()) {
2515 // Skip this input if it references back to this phi or if the memory path is dead
2516 continue;
2517 }
2518 }
2519 if (!is_unsafe_data_reference(n)) {
2520 saw_safe_input = true; // found safe input
2521 break;
2522 }
2523 }
2524 if (!saw_safe_input) {
2525 // There is a dead loop: All inputs are either dead or reference back to this phi
2526 return top;
2527 }
2528
2529 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2530 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2531 PhaseIterGVN* igvn = phase->is_IterGVN();
2532 assert(igvn != nullptr, "sanity check");
2533 PhiNode* new_base = (PhiNode*) clone();
2534 // Must eagerly register phis, since they participate in loops.
2535 igvn->register_new_node_with_optimizer(new_base);
2536
2537 MergeMemNode* result = MergeMemNode::make(new_base);
2538 for (uint i = 1; i < req(); ++i) {
2539 Node *ii = in(i);
2540 if (ii->is_MergeMem()) {
2541 MergeMemNode* n = ii->as_MergeMem();
2542 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2543 // If we have not seen this slice yet, make a phi for it.
2544 bool made_new_phi = false;
2545 if (mms.is_empty()) {
2546 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2547 made_new_phi = true;
2548 igvn->register_new_node_with_optimizer(new_phi);
2549 mms.set_memory(new_phi);
2550 }
2551 Node* phi = mms.memory();
2552 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2553 phi->set_req(i, mms.memory2());
2554 }
2555 }
2556 }
2557 // Distribute all self-loops.
2558 { // (Extra braces to hide mms.)
2559 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2560 Node* phi = mms.memory();
2561 for (uint i = 1; i < req(); ++i) {
2562 if (phi->in(i) == this) phi->set_req(i, phi);
2563 }
2564 }
2565 }
2566
2567 // We could immediately transform the new Phi nodes here, but that can
2568 // result in creating an excessive number of new nodes within a single
2569 // IGVN iteration. We have put the Phi nodes on the IGVN worklist, so
2570 // they are transformed later on in any case.
2571
2572 // Replace self with the result.
2573 return result;
2574 }
2575 }
2576 //
2577 // Other optimizations on the memory chain
2578 //
2579 const TypePtr* at = adr_type();
2580 for( uint i=1; i<req(); ++i ) {// For all paths in
2581 Node *ii = in(i);
2582 Node *new_in = MemNode::optimize_memory_chain(ii, at, nullptr, phase);
2583 if (ii != new_in ) {
2584 set_req(i, new_in);
2585 progress = this;
2586 }
2587 }
2588 }
2589
2590 #ifdef _LP64
2591 // Push DecodeN/DecodeNKlass down through phi.
2592 // The rest of phi graph will transform by split EncodeP node though phis up.
2593 if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == nullptr) {
2594 bool may_push = true;
2595 bool has_decodeN = false;
2596 bool is_decodeN = false;
2597 for (uint i=1; i<req(); ++i) {// For all paths in
2598 Node *ii = in(i);
2599 if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) {
2600 // Do optimization if a non dead path exist.
2601 if (ii->in(1)->bottom_type() != Type::TOP) {
2602 has_decodeN = true;
2603 is_decodeN = ii->is_DecodeN();
2604 }
2605 } else if (!ii->is_Phi()) {
2606 may_push = false;
2607 }
2608 }
2609
2610 if (has_decodeN && may_push) {
2611 PhaseIterGVN *igvn = phase->is_IterGVN();
2612 // Make narrow type for new phi.
2613 const Type* narrow_t;
2614 if (is_decodeN) {
2615 narrow_t = TypeNarrowOop::make(this->bottom_type()->is_ptr());
2616 } else {
2617 narrow_t = TypeNarrowKlass::make(this->bottom_type()->is_ptr());
2618 }
2619 PhiNode* new_phi = new PhiNode(r, narrow_t);
2620 uint orig_cnt = req();
2621 for (uint i=1; i<req(); ++i) {// For all paths in
2622 Node *ii = in(i);
2623 Node* new_ii = nullptr;
2624 if (ii->is_DecodeNarrowPtr()) {
2625 assert(ii->bottom_type() == bottom_type(), "sanity");
2626 new_ii = ii->in(1);
2627 } else {
2628 assert(ii->is_Phi(), "sanity");
2629 if (ii->as_Phi() == this) {
2630 new_ii = new_phi;
2631 } else {
2632 if (is_decodeN) {
2633 new_ii = new EncodePNode(ii, narrow_t);
2634 } else {
2635 new_ii = new EncodePKlassNode(ii, narrow_t);
2636 }
2637 igvn->register_new_node_with_optimizer(new_ii);
2638 }
2639 }
2640 new_phi->set_req(i, new_ii);
2641 }
2642 igvn->register_new_node_with_optimizer(new_phi, this);
2643 if (is_decodeN) {
2644 progress = new DecodeNNode(new_phi, bottom_type());
2645 } else {
2646 progress = new DecodeNKlassNode(new_phi, bottom_type());
2647 }
2648 }
2649 }
2650 #endif
2651
2652 // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2653 // proceeding the phi, to de-duplicate the convert node and compact the IR.
2654 if (can_reshape && progress == nullptr) {
2655 ConvertNode* convert = in(1)->isa_Convert();
2656 if (convert != nullptr) {
2657 int conv_op = convert->Opcode();
2658 bool ok = true;
2659
2660 // Check the rest of the inputs
2661 for (uint i = 2; i < req(); i++) {
2662 // Make sure that all inputs are of the same type of convert node
2663 if (in(i)->Opcode() != conv_op) {
2664 ok = false;
2665 break;
2666 }
2667 }
2668
2669 if (ok) {
2670 // Find the local bottom type to set as the type of the phi
2671 const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());
2672 const Type* dest_type = convert->bottom_type();
2673
2674 PhiNode* newphi = new PhiNode(in(0), source_type, nullptr);
2675 // Set inputs to the new phi be the inputs of the convert
2676 for (uint i = 1; i < req(); i++) {
2677 newphi->init_req(i, in(i)->in(1));
2678 }
2679
2680 phase->is_IterGVN()->register_new_node_with_optimizer(newphi, this);
2681
2682 return ConvertNode::create_convert(get_convert_type(convert, source_type), get_convert_type(convert, dest_type), newphi);
2683 }
2684 }
2685 }
2686
2687 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2688 if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2689 progress = merge_through_phi(this, phase->is_IterGVN());
2690 }
2691
2692 return progress; // Return any progress
2693 }
2694
2695 static int compare_types(const Type* const& e1, const Type* const& e2) {
2696 return (intptr_t)e1 - (intptr_t)e2;
2697 }
2698
2699 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
2700 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
2701 // pointer comparison is enough to tell if 2 list of types are the same or not)
2702 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
2703 const Node* region = in(0);
2704 const Type* phi_type = bottom_type();
2705 ResourceMark rm;
2706 GrowableArray<const Type*> types;
2707 for (uint i = 1; i < req(); i++) {
2708 if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
2709 continue;
2710 }
2711 Node* in = Node::in(i);
2712 const Type* t = phase->type(in);
2713 if (in == nullptr || in == this || t == Type::TOP) {
2714 continue;
2715 }
2716 if (t != phi_type && t->higher_equal_speculative(phi_type)) {
2717 types.insert_sorted<compare_types>(t);
2718 }
2719 while (in != nullptr && in->is_ConstraintCast()) {
2720 Node* next = in->in(1);
2721 if (phase->type(next)->isa_rawptr() && phase->type(in)->isa_oopptr()) {
2722 break;
2723 }
2724 ConstraintCastNode* cast = in->as_ConstraintCast();
2725 for (int j = 0; j < cast->extra_types_count(); ++j) {
2726 const Type* extra_t = cast->extra_type_at(j);
2727 if (extra_t != phi_type && extra_t->higher_equal_speculative(phi_type)) {
2728 types.insert_sorted<compare_types>(extra_t);
2729 }
2730 }
2731 in = next;
2732 }
2733 }
2734 const Type **flds = (const Type **)(phase->C->type_arena()->AmallocWords(types.length()*sizeof(Type*)));
2735 for (int i = 0; i < types.length(); ++i) {
2736 flds[i] = types.at(i);
2737 }
2738 return TypeTuple::make(types.length(), flds);
2739 }
2740
2741 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
2742 Node_Stack stack(1);
2743 VectorSet visited;
2744 Node_List node_map;
2745
2746 stack.push(root_phi, 1); // ignore control
2747 visited.set(root_phi->_idx);
2748
2749 Node* new_phi = new PhiNode(root_phi->in(0), t);
2750 node_map.map(root_phi->_idx, new_phi);
2751
2752 while (stack.is_nonempty()) {
2753 Node* n = stack.node();
2754 uint idx = stack.index();
2755 assert(n->is_Phi(), "not a phi");
2756 if (idx < n->req()) {
2757 stack.set_index(idx + 1);
2758 Node* def = n->in(idx);
2759 if (def == nullptr) {
2760 continue; // ignore dead path
2761 } else if (def->is_Phi()) { // inner node
2762 Node* new_phi = node_map[n->_idx];
2763 if (!visited.test_set(def->_idx)) { // not visited yet
2764 node_map.map(def->_idx, new PhiNode(def->in(0), t));
2765 stack.push(def, 1); // ignore control
2766 }
2767 Node* new_in = node_map[def->_idx];
2768 new_phi->set_req(idx, new_in);
2769 } else if (def->Opcode() == Op_VectorBox) { // leaf
2770 assert(n->is_Phi(), "not a phi");
2771 Node* new_phi = node_map[n->_idx];
2772 new_phi->set_req(idx, def->in(c));
2773 } else {
2774 assert(false, "not optimizeable");
2775 return nullptr;
2776 }
2777 } else {
2778 Node* new_phi = node_map[n->_idx];
2779 igvn->register_new_node_with_optimizer(new_phi, n);
2780 stack.pop();
2781 }
2782 }
2783 return new_phi;
2784 }
2785
2786 Node* PhiNode::merge_through_phi(Node* root_phi, PhaseIterGVN* igvn) {
2787 Node_Stack stack(1);
2788 VectorSet visited;
2789
2790 stack.push(root_phi, 1); // ignore control
2791 visited.set(root_phi->_idx);
2792
2793 VectorBoxNode* cached_vbox = nullptr;
2794 while (stack.is_nonempty()) {
2795 Node* n = stack.node();
2796 uint idx = stack.index();
2797 if (idx < n->req()) {
2798 stack.set_index(idx + 1);
2799 Node* in = n->in(idx);
2800 if (in == nullptr) {
2801 continue; // ignore dead path
2802 } else if (in->isa_Phi()) {
2803 if (!visited.test_set(in->_idx)) {
2804 stack.push(in, 1); // ignore control
2805 }
2806 } else if (in->Opcode() == Op_VectorBox) {
2807 VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in);
2808 if (cached_vbox == nullptr) {
2809 cached_vbox = vbox;
2810 } else if (vbox->vec_type() != cached_vbox->vec_type()) {
2811 // TODO: vector type mismatch can be handled with additional reinterpret casts
2812 assert(!Type::equals(vbox->vec_type(), cached_vbox->vec_type()), "inconsistent");
2813 return nullptr; // not optimizable: vector type mismatch
2814 } else if (vbox->box_type() != cached_vbox->box_type()) {
2815 assert(!Type::equals(vbox->box_type(), cached_vbox->box_type()), "inconsistent");
2816 return nullptr; // not optimizable: box type mismatch
2817 }
2818 } else {
2819 return nullptr; // not optimizable: neither Phi nor VectorBox
2820 }
2821 } else {
2822 stack.pop();
2823 }
2824 }
2825 if (cached_vbox == nullptr) {
2826 // We have a Phi dead-loop (no data-input). Phi nodes are considered safe,
2827 // so just avoid this optimization.
2828 return nullptr;
2829 }
2830 const TypeInstPtr* btype = cached_vbox->box_type();
2831 const TypeVect* vtype = cached_vbox->vec_type();
2832 Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box, igvn);
2833 Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn);
2834 return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype);
2835 }
2836
2837 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
2838 // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
2839 // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
2840 // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
2841 assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
2842 const bool is_loop = (r->is_Loop() && r->req() == 3);
2843 const Node* top = phase->C->top();
2844 if (is_loop) {
2845 return !uin->eqv_uncast(in(LoopNode::EntryControl));
2846 } else {
2847 // We have a data loop either with an unsafe data reference or if a region is unreachable.
2848 return is_unsafe_data_reference(uin)
2849 || (r->req() == 3 && (r->in(1) != top && r->in(2) == top && r->is_unreachable_region(phase)));
2850 }
2851 }
2852
2853 //------------------------------is_tripcount-----------------------------------
2854 bool PhiNode::is_tripcount(BasicType bt) const {
2855 return (in(0) != nullptr && in(0)->is_BaseCountedLoop() &&
2856 in(0)->as_BaseCountedLoop()->bt() == bt &&
2857 in(0)->as_BaseCountedLoop()->phi() == this);
2858 }
2859
2860 //------------------------------out_RegMask------------------------------------
2861 const RegMask &PhiNode::in_RegMask(uint i) const {
2862 return i ? out_RegMask() : RegMask::Empty;
2863 }
2864
2865 const RegMask &PhiNode::out_RegMask() const {
2866 uint ideal_reg = _type->ideal_reg();
2867 assert( ideal_reg != Node::NotAMachineReg, "invalid type at Phi" );
2868 if( ideal_reg == 0 ) return RegMask::Empty;
2869 assert(ideal_reg != Op_RegFlags, "flags register is not spillable");
2870 return *(Compile::current()->matcher()->idealreg2spillmask[ideal_reg]);
2871 }
2872
2873 #ifndef PRODUCT
2874 void PhiNode::dump_spec(outputStream *st) const {
2875 TypeNode::dump_spec(st);
2876 if (is_tripcount(T_INT) || is_tripcount(T_LONG)) {
2877 st->print(" #tripcount");
2878 }
2879 }
2880 #endif
2881
2882
2883 //=============================================================================
2884 const Type* GotoNode::Value(PhaseGVN* phase) const {
2885 // If the input is reachable, then we are executed.
2886 // If the input is not reachable, then we are not executed.
2887 return phase->type(in(0));
2888 }
2889
2890 Node* GotoNode::Identity(PhaseGVN* phase) {
2891 return in(0); // Simple copy of incoming control
2892 }
2893
2894 const RegMask &GotoNode::out_RegMask() const {
2895 return RegMask::Empty;
2896 }
2897
2898 //=============================================================================
2899 const RegMask &JumpNode::out_RegMask() const {
2900 return RegMask::Empty;
2901 }
2902
2903 //=============================================================================
2904 const RegMask &JProjNode::out_RegMask() const {
2905 return RegMask::Empty;
2906 }
2907
2908 //=============================================================================
2909 const RegMask &CProjNode::out_RegMask() const {
2910 return RegMask::Empty;
2911 }
2912
2913
2914
2915 //=============================================================================
2916
2917 uint PCTableNode::hash() const { return Node::hash() + _size; }
2918 bool PCTableNode::cmp( const Node &n ) const
2919 { return _size == ((PCTableNode&)n)._size; }
2920
2921 const Type *PCTableNode::bottom_type() const {
2922 const Type** f = TypeTuple::fields(_size);
2923 for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL;
2924 return TypeTuple::make(_size, f);
2925 }
2926
2927 //------------------------------Value------------------------------------------
2928 // Compute the type of the PCTableNode. If reachable it is a tuple of
2929 // Control, otherwise the table targets are not reachable
2930 const Type* PCTableNode::Value(PhaseGVN* phase) const {
2931 if( phase->type(in(0)) == Type::CONTROL )
2932 return bottom_type();
2933 return Type::TOP; // All paths dead? Then so are we
2934 }
2935
2936 //------------------------------Ideal------------------------------------------
2937 // Return a node which is more "ideal" than the current node. Strip out
2938 // control copies
2939 Node *PCTableNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2940 return remove_dead_region(phase, can_reshape) ? this : nullptr;
2941 }
2942
2943 //=============================================================================
2944 uint JumpProjNode::hash() const {
2945 return Node::hash() + _dest_bci;
2946 }
2947
2948 bool JumpProjNode::cmp( const Node &n ) const {
2949 return ProjNode::cmp(n) &&
2950 _dest_bci == ((JumpProjNode&)n)._dest_bci;
2951 }
2952
2953 #ifndef PRODUCT
2954 void JumpProjNode::dump_spec(outputStream *st) const {
2955 ProjNode::dump_spec(st);
2956 st->print("@bci %d ",_dest_bci);
2957 }
2958
2959 void JumpProjNode::dump_compact_spec(outputStream *st) const {
2960 ProjNode::dump_compact_spec(st);
2961 st->print("(%d)%d@%d", _switch_val, _proj_no, _dest_bci);
2962 }
2963 #endif
2964
2965 //=============================================================================
2966 //------------------------------Value------------------------------------------
2967 // Check for being unreachable, or for coming from a Rethrow. Rethrow's cannot
2968 // have the default "fall_through_index" path.
2969 const Type* CatchNode::Value(PhaseGVN* phase) const {
2970 // Unreachable? Then so are all paths from here.
2971 if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
2972 // First assume all paths are reachable
2973 const Type** f = TypeTuple::fields(_size);
2974 for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL;
2975 // Identify cases that will always throw an exception
2976 // () rethrow call
2977 // () virtual or interface call with null receiver
2978 // () call is a check cast with incompatible arguments
2979 if( in(1)->is_Proj() ) {
2980 Node *i10 = in(1)->in(0);
2981 if( i10->is_Call() ) {
2982 CallNode *call = i10->as_Call();
2983 // Rethrows always throw exceptions, never return
2984 if (call->entry_point() == OptoRuntime::rethrow_stub()) {
2985 f[CatchProjNode::fall_through_index] = Type::TOP;
2986 } else if (call->is_AllocateArray()) {
2987 Node* klass_node = call->in(AllocateNode::KlassNode);
2988 Node* length = call->in(AllocateNode::ALength);
2989 const Type* length_type = phase->type(length);
2990 const Type* klass_type = phase->type(klass_node);
2991 Node* valid_length_test = call->in(AllocateNode::ValidLengthTest);
2992 const Type* valid_length_test_t = phase->type(valid_length_test);
2993 if (length_type == Type::TOP || klass_type == Type::TOP || valid_length_test_t == Type::TOP ||
2994 valid_length_test_t->is_int()->is_con(0)) {
2995 f[CatchProjNode::fall_through_index] = Type::TOP;
2996 }
2997 } else if( call->req() > TypeFunc::Parms ) {
2998 const Type *arg0 = phase->type( call->in(TypeFunc::Parms) );
2999 // Check for null receiver to virtual or interface calls
3000 if( call->is_CallDynamicJava() &&
3001 arg0->higher_equal(TypePtr::NULL_PTR) ) {
3002 f[CatchProjNode::fall_through_index] = Type::TOP;
3003 }
3004 } // End of if not a runtime stub
3005 } // End of if have call above me
3006 } // End of slot 1 is not a projection
3007 return TypeTuple::make(_size, f);
3008 }
3009
3010 //=============================================================================
3011 uint CatchProjNode::hash() const {
3012 return Node::hash() + _handler_bci;
3013 }
3014
3015
3016 bool CatchProjNode::cmp( const Node &n ) const {
3017 return ProjNode::cmp(n) &&
3018 _handler_bci == ((CatchProjNode&)n)._handler_bci;
3019 }
3020
3021
3022 //------------------------------Identity---------------------------------------
3023 // If only 1 target is possible, choose it if it is the main control
3024 Node* CatchProjNode::Identity(PhaseGVN* phase) {
3025 // If my value is control and no other value is, then treat as ID
3026 const TypeTuple *t = phase->type(in(0))->is_tuple();
3027 if (t->field_at(_con) != Type::CONTROL) return this;
3028 // If we remove the last CatchProj and elide the Catch/CatchProj, then we
3029 // also remove any exception table entry. Thus we must know the call
3030 // feeding the Catch will not really throw an exception. This is ok for
3031 // the main fall-thru control (happens when we know a call can never throw
3032 // an exception) or for "rethrow", because a further optimization will
3033 // yank the rethrow (happens when we inline a function that can throw an
3034 // exception and the caller has no handler). Not legal, e.g., for passing
3035 // a null receiver to a v-call, or passing bad types to a slow-check-cast.
3036 // These cases MUST throw an exception via the runtime system, so the VM
3037 // will be looking for a table entry.
3038 Node *proj = in(0)->in(1); // Expect a proj feeding CatchNode
3039 CallNode *call;
3040 if (_con != TypeFunc::Control && // Bail out if not the main control.
3041 !(proj->is_Proj() && // AND NOT a rethrow
3042 proj->in(0)->is_Call() &&
3043 (call = proj->in(0)->as_Call()) &&
3044 call->entry_point() == OptoRuntime::rethrow_stub()))
3045 return this;
3046
3047 // Search for any other path being control
3048 for (uint i = 0; i < t->cnt(); i++) {
3049 if (i != _con && t->field_at(i) == Type::CONTROL)
3050 return this;
3051 }
3052 // Only my path is possible; I am identity on control to the jump
3053 return in(0)->in(0);
3054 }
3055
3056
3057 #ifndef PRODUCT
3058 void CatchProjNode::dump_spec(outputStream *st) const {
3059 ProjNode::dump_spec(st);
3060 st->print("@bci %d ",_handler_bci);
3061 }
3062 #endif
3063
3064 //=============================================================================
3065 //------------------------------Identity---------------------------------------
3066 // Check for CreateEx being Identity.
3067 Node* CreateExNode::Identity(PhaseGVN* phase) {
3068 if( phase->type(in(1)) == Type::TOP ) return in(1);
3069 if( phase->type(in(0)) == Type::TOP ) return in(0);
3070 if (phase->type(in(0)->in(0)) == Type::TOP) {
3071 assert(in(0)->is_CatchProj(), "control is CatchProj");
3072 return phase->C->top(); // dead code
3073 }
3074 // We only come from CatchProj, unless the CatchProj goes away.
3075 // If the CatchProj is optimized away, then we just carry the
3076 // exception oop through.
3077 CallNode *call = in(1)->in(0)->as_Call();
3078
3079 return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
3080 in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3081 }
3082
3083 //=============================================================================
3084 //------------------------------Value------------------------------------------
3085 // Check for being unreachable.
3086 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3087 if (!in(0) || in(0)->is_top()) return Type::TOP;
3088 return bottom_type();
3089 }
3090
3091 //------------------------------Ideal------------------------------------------
3092 // Check for no longer being part of a loop
3093 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3094 if (can_reshape && !in(0)->is_Region()) {
3095 // Dead code elimination can sometimes delete this projection so
3096 // if it's not there, there's nothing to do.
3097 Node* fallthru = proj_out_or_null(0);
3098 if (fallthru != nullptr) {
3099 phase->is_IterGVN()->replace_node(fallthru, in(0));
3100 }
3101 return phase->C->top();
3102 }
3103 return nullptr;
3104 }
3105
3106 #ifndef PRODUCT
3107 void NeverBranchNode::format( PhaseRegAlloc *ra_, outputStream *st) const {
3108 st->print("%s", Name());
3109 }
3110 #endif
3111
3112 Node* BlackholeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
3113 return remove_dead_region(phase, can_reshape) ? this : nullptr;
3114 }
3115
3116 #ifndef PRODUCT
3117 void BlackholeNode::format(PhaseRegAlloc* ra, outputStream* st) const {
3118 st->print("blackhole ");
3119 bool first = true;
3120 for (uint i = 0; i < req(); i++) {
3121 Node* n = in(i);
3122 if (n != nullptr && OptoReg::is_valid(ra->get_reg_first(n))) {
3123 if (first) {
3124 first = false;
3125 } else {
3126 st->print(", ");
3127 }
3128 char buf[128];
3129 ra->dump_register(n, buf, sizeof(buf));
3130 st->print("%s", buf);
3131 }
3132 }
3133 st->cr();
3134 }
3135 #endif
3136