1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/inlinetypenode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/machnode.hpp"
38 #include "opto/movenode.hpp"
39 #include "opto/mulnode.hpp"
40 #include "opto/narrowptrnode.hpp"
41 #include "opto/phaseX.hpp"
42 #include "opto/regalloc.hpp"
43 #include "opto/regmask.hpp"
44 #include "opto/runtime.hpp"
45 #include "opto/subnode.hpp"
46 #include "opto/vectornode.hpp"
47 #include "utilities/vmError.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 //------------------------------Value------------------------------------------
55 // Compute the type of the RegionNode.
56 const Type* RegionNode::Value(PhaseGVN* phase) const {
57 for( uint i=1; i<req(); ++i ) { // For all paths in
58 Node *n = in(i); // Get Control source
59 if( !n ) continue; // Missing inputs are TOP
60 if( phase->type(n) == Type::CONTROL )
61 return Type::CONTROL;
62 }
63 return Type::TOP; // All paths dead? Then so are we
64 }
65
66 //------------------------------Identity---------------------------------------
67 // Check for Region being Identity.
68 Node* RegionNode::Identity(PhaseGVN* phase) {
69 // Cannot have Region be an identity, even if it has only 1 input.
70 // Phi users cannot have their Region input folded away for them,
71 // since they need to select the proper data input
72 return this;
73 }
74
75 //------------------------------merge_region-----------------------------------
76 // If a Region flows into a Region, merge into one big happy merge. This is
77 // hard to do if there is stuff that has to happen
78 static Node *merge_region(RegionNode *region, PhaseGVN *phase) {
79 if( region->Opcode() != Op_Region ) // Do not do to LoopNodes
80 return nullptr;
81 Node *progress = nullptr; // Progress flag
82 PhaseIterGVN *igvn = phase->is_IterGVN();
83
84 uint rreq = region->req();
85 for( uint i = 1; i < rreq; i++ ) {
86 Node *r = region->in(i);
87 if( r && r->Opcode() == Op_Region && // Found a region?
88 r->in(0) == r && // Not already collapsed?
89 r != region && // Avoid stupid situations
90 r->outcnt() == 2 ) { // Self user and 'region' user only?
91 assert(!r->as_Region()->has_phi(), "no phi users");
92 if( !progress ) { // No progress
93 if (region->has_phi()) {
94 return nullptr; // Only flatten if no Phi users
95 // igvn->hash_delete( phi );
96 }
97 igvn->hash_delete( region );
98 progress = region; // Making progress
99 }
100 igvn->hash_delete( r );
101
102 // Append inputs to 'r' onto 'region'
103 for( uint j = 1; j < r->req(); j++ ) {
104 // Move an input from 'r' to 'region'
105 region->add_req(r->in(j));
106 r->set_req(j, phase->C->top());
107 // Update phis of 'region'
108 //for( uint k = 0; k < max; k++ ) {
109 // Node *phi = region->out(k);
110 // if( phi->is_Phi() ) {
111 // phi->add_req(phi->in(i));
112 // }
113 //}
114
115 rreq++; // One more input to Region
116 } // Found a region to merge into Region
117 igvn->_worklist.push(r);
118 // Clobber pointer to the now dead 'r'
119 region->set_req(i, phase->C->top());
120 }
121 }
122
123 return progress;
124 }
125
126
127
128 //--------------------------------has_phi--------------------------------------
129 // Helper function: Return any PhiNode that uses this region or null
130 PhiNode* RegionNode::has_phi() const {
131 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
132 Node* phi = fast_out(i);
133 if (phi->is_Phi()) { // Check for Phi users
134 assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)");
135 return phi->as_Phi(); // this one is good enough
136 }
137 }
138
139 return nullptr;
140 }
141
142
143 //-----------------------------has_unique_phi----------------------------------
144 // Helper function: Return the only PhiNode that uses this region or null
145 PhiNode* RegionNode::has_unique_phi() const {
146 // Check that only one use is a Phi
147 PhiNode* only_phi = nullptr;
148 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
149 Node* phi = fast_out(i);
150 if (phi->is_Phi()) { // Check for Phi users
151 assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)");
152 if (only_phi == nullptr) {
153 only_phi = phi->as_Phi();
154 } else {
155 return nullptr; // multiple phis
156 }
157 }
158 }
159
160 return only_phi;
161 }
162
163
164 //------------------------------check_phi_clipping-----------------------------
165 // Helper function for RegionNode's identification of FP clipping
166 // Check inputs to the Phi
167 static bool check_phi_clipping( PhiNode *phi, ConNode * &min, uint &min_idx, ConNode * &max, uint &max_idx, Node * &val, uint &val_idx ) {
168 min = nullptr;
169 max = nullptr;
170 val = nullptr;
171 min_idx = 0;
172 max_idx = 0;
173 val_idx = 0;
174 uint phi_max = phi->req();
175 if( phi_max == 4 ) {
176 for( uint j = 1; j < phi_max; ++j ) {
177 Node *n = phi->in(j);
178 int opcode = n->Opcode();
179 switch( opcode ) {
180 case Op_ConI:
181 {
182 if( min == nullptr ) {
183 min = n->Opcode() == Op_ConI ? (ConNode*)n : nullptr;
184 min_idx = j;
185 } else {
186 max = n->Opcode() == Op_ConI ? (ConNode*)n : nullptr;
187 max_idx = j;
188 if( min->get_int() > max->get_int() ) {
189 // Swap min and max
190 ConNode *temp;
191 uint temp_idx;
192 temp = min; min = max; max = temp;
193 temp_idx = min_idx; min_idx = max_idx; max_idx = temp_idx;
194 }
195 }
196 }
197 break;
198 default:
199 {
200 val = n;
201 val_idx = j;
202 }
203 break;
204 }
205 }
206 }
207 return ( min && max && val && (min->get_int() <= 0) && (max->get_int() >=0) );
208 }
209
210
211 //------------------------------check_if_clipping------------------------------
212 // Helper function for RegionNode's identification of FP clipping
213 // Check that inputs to Region come from two IfNodes,
214 //
215 // If
216 // False True
217 // If |
218 // False True |
219 // | | |
220 // RegionNode_inputs
221 //
222 static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNode * &top_if ) {
223 top_if = nullptr;
224 bot_if = nullptr;
225
226 // Check control structure above RegionNode for (if ( if ) )
227 Node *in1 = region->in(1);
228 Node *in2 = region->in(2);
229 Node *in3 = region->in(3);
230 // Check that all inputs are projections
231 if( in1->is_Proj() && in2->is_Proj() && in3->is_Proj() ) {
232 Node *in10 = in1->in(0);
233 Node *in20 = in2->in(0);
234 Node *in30 = in3->in(0);
235 // Check that #1 and #2 are ifTrue and ifFalse from same If
236 if( in10 != nullptr && in10->is_If() &&
237 in20 != nullptr && in20->is_If() &&
238 in30 != nullptr && in30->is_If() && in10 == in20 &&
239 (in1->Opcode() != in2->Opcode()) ) {
240 Node *in100 = in10->in(0);
241 Node *in1000 = (in100 != nullptr && in100->is_Proj()) ? in100->in(0) : nullptr;
242 // Check that control for in10 comes from other branch of IF from in3
243 if( in1000 != nullptr && in1000->is_If() &&
244 in30 == in1000 && (in3->Opcode() != in100->Opcode()) ) {
245 // Control pattern checks
246 top_if = (IfNode*)in1000;
247 bot_if = (IfNode*)in10;
248 }
249 }
250 }
251
252 return (top_if != nullptr);
253 }
254
255
256 //------------------------------check_convf2i_clipping-------------------------
257 // Helper function for RegionNode's identification of FP clipping
258 // Verify that the value input to the phi comes from "ConvF2I; LShift; RShift"
259 static bool check_convf2i_clipping( PhiNode *phi, uint idx, ConvF2INode * &convf2i, Node *min, Node *max) {
260 convf2i = nullptr;
261
262 // Check for the RShiftNode
263 Node *rshift = phi->in(idx);
264 assert( rshift, "Previous checks ensure phi input is present");
265 if( rshift->Opcode() != Op_RShiftI ) { return false; }
266
267 // Check for the LShiftNode
268 Node *lshift = rshift->in(1);
269 assert( lshift, "Previous checks ensure phi input is present");
270 if( lshift->Opcode() != Op_LShiftI ) { return false; }
271
272 // Check for the ConvF2INode
273 Node *conv = lshift->in(1);
274 if( conv->Opcode() != Op_ConvF2I ) { return false; }
275
276 // Check that shift amounts are only to get sign bits set after F2I
277 jint max_cutoff = max->get_int();
278 jint min_cutoff = min->get_int();
279 jint left_shift = lshift->in(2)->get_int();
280 jint right_shift = rshift->in(2)->get_int();
281 jint max_post_shift = nth_bit(BitsPerJavaInteger - left_shift - 1);
282 if( left_shift != right_shift ||
283 0 > left_shift || left_shift >= BitsPerJavaInteger ||
284 max_post_shift < max_cutoff ||
285 max_post_shift < -min_cutoff ) {
286 // Shifts are necessary but current transformation eliminates them
287 return false;
288 }
289
290 // OK to return the result of ConvF2I without shifting
291 convf2i = (ConvF2INode*)conv;
292 return true;
293 }
294
295
296 //------------------------------check_compare_clipping-------------------------
297 // Helper function for RegionNode's identification of FP clipping
298 static bool check_compare_clipping( bool less_than, IfNode *iff, ConNode *limit, Node * & input ) {
299 Node *i1 = iff->in(1);
300 if ( !i1->is_Bool() ) { return false; }
301 BoolNode *bool1 = i1->as_Bool();
302 if( less_than && bool1->_test._test != BoolTest::le ) { return false; }
303 else if( !less_than && bool1->_test._test != BoolTest::lt ) { return false; }
304 const Node *cmpF = bool1->in(1);
305 if( cmpF->Opcode() != Op_CmpF ) { return false; }
306 // Test that the float value being compared against
307 // is equivalent to the int value used as a limit
308 Node *nodef = cmpF->in(2);
309 if( nodef->Opcode() != Op_ConF ) { return false; }
310 jfloat conf = nodef->getf();
311 jint coni = limit->get_int();
312 if( ((int)conf) != coni ) { return false; }
313 input = cmpF->in(1);
314 return true;
315 }
316
317 //------------------------------is_unreachable_region--------------------------
318 // Check if the RegionNode is part of an unsafe loop and unreachable from root.
319 bool RegionNode::is_unreachable_region(const PhaseGVN* phase) {
320 Node* top = phase->C->top();
321 assert(req() == 2 || (req() == 3 && in(1) != nullptr && in(2) == top), "sanity check arguments");
322 if (_is_unreachable_region) {
323 // Return cached result from previous evaluation which should still be valid
324 assert(is_unreachable_from_root(phase), "walk the graph again and check if its indeed unreachable");
325 return true;
326 }
327
328 // First, cut the simple case of fallthrough region when NONE of
329 // region's phis references itself directly or through a data node.
330 if (is_possible_unsafe_loop()) {
331 // If we have a possible unsafe loop, check if the region node is actually unreachable from root.
332 if (is_unreachable_from_root(phase)) {
333 _is_unreachable_region = true;
334 return true;
335 }
336 }
337 return false;
338 }
339
340 bool RegionNode::is_possible_unsafe_loop() const {
341 uint max = outcnt();
342 uint i;
343 for (i = 0; i < max; i++) {
344 Node* n = raw_out(i);
345 if (n != nullptr && n->is_Phi()) {
346 PhiNode* phi = n->as_Phi();
347 assert(phi->in(0) == this, "sanity check phi");
348 if (phi->outcnt() == 0) {
349 continue; // Safe case - no loops
350 }
351 if (phi->outcnt() == 1) {
352 Node* u = phi->raw_out(0);
353 // Skip if only one use is an other Phi or Call or Uncommon trap.
354 // It is safe to consider this case as fallthrough.
355 if (u != nullptr && (u->is_Phi() || u->is_CFG())) {
356 continue;
357 }
358 }
359 // Check when phi references itself directly or through an other node.
360 if (phi->as_Phi()->simple_data_loop_check(phi->in(1)) >= PhiNode::Unsafe) {
361 break; // Found possible unsafe data loop.
362 }
363 }
364 }
365 if (i >= max) {
366 return false; // An unsafe case was NOT found - don't need graph walk.
367 }
368 return true;
369 }
370
371 bool RegionNode::is_unreachable_from_root(const PhaseGVN* phase) const {
372 ResourceMark rm;
373 Node_List nstack;
374 VectorSet visited;
375
376 // Mark all control nodes reachable from root outputs
377 Node* n = (Node*)phase->C->root();
378 nstack.push(n);
379 visited.set(n->_idx);
380 while (nstack.size() != 0) {
381 n = nstack.pop();
382 uint max = n->outcnt();
383 for (uint i = 0; i < max; i++) {
384 Node* m = n->raw_out(i);
385 if (m != nullptr && m->is_CFG()) {
386 if (m == this) {
387 return false; // We reached the Region node - it is not dead.
388 }
389 if (!visited.test_set(m->_idx))
390 nstack.push(m);
391 }
392 }
393 }
394 return true; // The Region node is unreachable - it is dead.
395 }
396
397 #ifdef ASSERT
398 // Is this region in an infinite subgraph?
399 // (no path to root except through false NeverBranch exit)
400 bool RegionNode::is_in_infinite_subgraph() {
401 ResourceMark rm;
402 Unique_Node_List worklist;
403 worklist.push(this);
404 return RegionNode::are_all_nodes_in_infinite_subgraph(worklist);
405 }
406
407 // Are all nodes in worklist in infinite subgraph?
408 // (no path to root except through false NeverBranch exit)
409 // worklist is directly used for the traversal
410 bool RegionNode::are_all_nodes_in_infinite_subgraph(Unique_Node_List& worklist) {
411 // BFS traversal down the CFG, except through NeverBranch exits
412 for (uint i = 0; i < worklist.size(); ++i) {
413 Node* n = worklist.at(i);
414 assert(n->is_CFG(), "only traverse CFG");
415 if (n->is_Root()) {
416 // Found root -> there was an exit!
417 return false;
418 } else if (n->is_NeverBranch()) {
419 // Only follow the loop-internal projection, not the NeverBranch exit
420 ProjNode* proj = n->as_NeverBranch()->proj_out_or_null(0);
421 assert(proj != nullptr, "must find loop-internal projection of NeverBranch");
422 worklist.push(proj);
423 } else {
424 // Traverse all CFG outputs
425 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
426 Node* use = n->fast_out(i);
427 if (use->is_CFG()) {
428 worklist.push(use);
429 }
430 }
431 }
432 }
433 // No exit found for any loop -> all are infinite
434 return true;
435 }
436 #endif //ASSERT
437
438 void RegionNode::set_loop_status(RegionNode::LoopStatus status) {
439 assert(loop_status() == RegionNode::LoopStatus::NeverIrreducibleEntry, "why set our status again?");
440 assert(status != RegionNode::LoopStatus::MaybeIrreducibleEntry || !is_Loop(), "LoopNode is never irreducible entry.");
441 _loop_status = status;
442 }
443
444 // A Region can only be an irreducible entry if:
445 // - It is marked as "maybe irreducible entry". Any other loop status would guarantee
446 // that it is never an irreducible loop entry.
447 // - And it is not a LoopNode, those are guaranteed to be reducible loop entries.
448 bool RegionNode::can_be_irreducible_entry() const {
449 return loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry &&
450 !is_Loop();
451 }
452
453 void RegionNode::try_clean_mem_phis(PhaseIterGVN* igvn) {
454 // Incremental inlining + PhaseStringOpts sometimes produce:
455 //
456 // cmpP with 1 top input
457 // |
458 // If
459 // / \
460 // IfFalse IfTrue /- Some Node
461 // \ / / /
462 // Region / /-MergeMem
463 // \---Phi
464 //
465 //
466 // It's expected by PhaseStringOpts that the Region goes away and is
467 // replaced by If's control input but because there's still a Phi,
468 // the Region stays in the graph. The top input from the cmpP is
469 // propagated forward and a subgraph that is useful goes away. The
470 // code in PhiNode::try_clean_memory_phi() replaces the Phi with the
471 // MergeMem in order to remove the Region if its last phi dies.
472
473 if (!is_diamond()) {
474 return;
475 }
476
477 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
478 Node* phi = fast_out(i);
479 if (phi->is_Phi() && phi->as_Phi()->try_clean_memory_phi(igvn)) {
480 --i;
481 --imax;
482 }
483 }
484 }
485
486 // Does this region merge a simple diamond formed by a proper IfNode?
487 //
488 // Cmp
489 // /
490 // ctrl Bool
491 // \ /
492 // IfNode
493 // / \
494 // IfFalse IfTrue
495 // \ /
496 // Region
497 bool RegionNode::is_diamond() const {
498 if (req() != 3) {
499 return false;
500 }
501
502 Node* left_path = in(1);
503 Node* right_path = in(2);
504 if (left_path == nullptr || right_path == nullptr) {
505 return false;
506 }
507 Node* diamond_if = left_path->in(0);
508 if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
509 // Not an IfNode merging a diamond or TOP.
510 return false;
511 }
512
513 // Check for a proper bool/cmp
514 const Node* bol = diamond_if->in(1);
515 if (!bol->is_Bool()) {
516 return false;
517 }
518 const Node* cmp = bol->in(1);
519 if (!cmp->is_Cmp()) {
520 return false;
521 }
522 return true;
523 }
524
525 //------------------------------Ideal------------------------------------------
526 // Return a node which is more "ideal" than the current node. Must preserve
527 // the CFG, but we can still strip out dead paths.
528 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
529 if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy
530 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
531
532 // Check for RegionNode with no Phi users and both inputs come from either
533 // arm of the same IF. If found, then the control-flow split is useless.
534 bool has_phis = false;
535 if (can_reshape) { // Need DU info to check for Phi users
536 try_clean_mem_phis(phase->is_IterGVN());
537 has_phis = (has_phi() != nullptr); // Cache result
538
539 if (!has_phis) { // No Phi users? Nothing merging?
540 for (uint i = 1; i < req()-1; i++) {
541 Node *if1 = in(i);
542 if( !if1 ) continue;
543 Node *iff = if1->in(0);
544 if( !iff || !iff->is_If() ) continue;
545 for( uint j=i+1; j<req(); j++ ) {
546 if( in(j) && in(j)->in(0) == iff &&
547 if1->Opcode() != in(j)->Opcode() ) {
548 // Add the IF Projections to the worklist. They (and the IF itself)
549 // will be eliminated if dead.
550 phase->is_IterGVN()->add_users_to_worklist(iff);
551 set_req(i, iff->in(0));// Skip around the useless IF diamond
552 set_req(j, nullptr);
553 return this; // Record progress
554 }
555 }
556 }
557 }
558 }
559
560 // Remove TOP or null input paths. If only 1 input path remains, this Region
561 // degrades to a copy.
562 bool add_to_worklist = true;
563 bool modified = false;
564 int cnt = 0; // Count of values merging
565 DEBUG_ONLY( int cnt_orig = req(); ) // Save original inputs count
566 DEBUG_ONLY( uint outcnt_orig = outcnt(); )
567 int del_it = 0; // The last input path we delete
568 bool found_top = false; // irreducible loops need to check reachability if we find TOP
569 // For all inputs...
570 for( uint i=1; i<req(); ++i ){// For all paths in
571 Node *n = in(i); // Get the input
572 if( n != nullptr ) {
573 // Remove useless control copy inputs
574 if( n->is_Region() && n->as_Region()->is_copy() ) {
575 set_req(i, n->nonnull_req());
576 modified = true;
577 i--;
578 continue;
579 }
580 if( n->is_Proj() ) { // Remove useless rethrows
581 Node *call = n->in(0);
582 if (call->is_Call() && call->as_Call()->entry_point() == OptoRuntime::rethrow_stub()) {
583 set_req(i, call->in(0));
584 modified = true;
585 i--;
586 continue;
587 }
588 }
589 if( phase->type(n) == Type::TOP ) {
590 set_req_X(i, nullptr, phase); // Ignore TOP inputs
591 modified = true;
592 found_top = true;
593 i--;
594 continue;
595 }
596 cnt++; // One more value merging
597 } else if (can_reshape) { // Else found dead path with DU info
598 PhaseIterGVN *igvn = phase->is_IterGVN();
599 del_req(i); // Yank path from self
600 del_it = i;
601
602 for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
603 Node* use = fast_out(j);
604
605 if (use->req() != req() && use->is_Phi()) {
606 assert(use->in(0) == this, "unexpected control input");
607 igvn->hash_delete(use); // Yank from hash before hacking edges
608 use->set_req_X(i, nullptr, igvn);// Correct DU info
609 use->del_req(i); // Yank path from Phis
610 }
611 }
612
613 if (add_to_worklist) {
614 igvn->add_users_to_worklist(this);
615 add_to_worklist = false;
616 }
617
618 i--;
619 }
620 }
621
622 assert(outcnt() == outcnt_orig, "not expect to remove any use");
623
624 if (can_reshape && found_top && loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
625 // Is it a dead irreducible loop?
626 // If an irreducible loop loses one of the multiple entries
627 // that went into the loop head, or any secondary entries,
628 // we need to verify if the irreducible loop is still reachable,
629 // as the special logic in is_unreachable_region only works
630 // for reducible loops.
631 if (is_unreachable_from_root(phase)) {
632 // The irreducible loop is dead - must remove it
633 PhaseIterGVN* igvn = phase->is_IterGVN();
634 remove_unreachable_subgraph(igvn);
635 return nullptr;
636 }
637 } else if (can_reshape && cnt == 1) {
638 // Is it dead loop?
639 // If it is LoopNode it had 2 (+1 itself) inputs and
640 // one of them was cut. The loop is dead if it was EntryControl.
641 // Loop node may have only one input because entry path
642 // is removed in PhaseIdealLoop::Dominators().
643 assert(!this->is_Loop() || cnt_orig <= 3, "Loop node should have 3 or less inputs");
644 if ((this->is_Loop() && (del_it == LoopNode::EntryControl ||
645 (del_it == 0 && is_unreachable_region(phase)))) ||
646 (!this->is_Loop() && has_phis && is_unreachable_region(phase))) {
647 PhaseIterGVN* igvn = phase->is_IterGVN();
648 remove_unreachable_subgraph(igvn);
649 return nullptr;
650 }
651 }
652
653 if( cnt <= 1 ) { // Only 1 path in?
654 set_req(0, nullptr); // Null control input for region copy
655 if( cnt == 0 && !can_reshape) { // Parse phase - leave the node as it is.
656 // No inputs or all inputs are null.
657 return nullptr;
658 } else if (can_reshape) { // Optimization phase - remove the node
659 PhaseIterGVN *igvn = phase->is_IterGVN();
660 // Strip mined (inner) loop is going away, remove outer loop.
661 if (is_CountedLoop() &&
662 as_Loop()->is_strip_mined()) {
663 Node* outer_sfpt = as_CountedLoop()->outer_safepoint();
664 Node* outer_out = as_CountedLoop()->outer_loop_exit();
665 if (outer_sfpt != nullptr && outer_out != nullptr) {
666 Node* in = outer_sfpt->in(0);
667 igvn->replace_node(outer_out, in);
668 LoopNode* outer = as_CountedLoop()->outer_loop();
669 igvn->replace_input_of(outer, LoopNode::LoopBackControl, igvn->C->top());
670 }
671 }
672 if (is_CountedLoop()) {
673 Node* opaq = as_CountedLoop()->is_canonical_loop_entry();
674 if (opaq != nullptr) {
675 // This is not a loop anymore. No need to keep the Opaque1 node on the test that guards the loop as it won't be
676 // subject to further loop opts.
677 assert(opaq->Opcode() == Op_OpaqueZeroTripGuard, "");
678 igvn->replace_node(opaq, opaq->in(1));
679 }
680 }
681 Node *parent_ctrl;
682 if( cnt == 0 ) {
683 assert( req() == 1, "no inputs expected" );
684 // During IGVN phase such region will be subsumed by TOP node
685 // so region's phis will have TOP as control node.
686 // Kill phis here to avoid it.
687 // Also set other user's input to top.
688 parent_ctrl = phase->C->top();
689 } else {
690 // The fallthrough case since we already checked dead loops above.
691 parent_ctrl = in(1);
692 assert(parent_ctrl != nullptr, "Region is a copy of some non-null control");
693 assert(parent_ctrl != this, "Close dead loop");
694 }
695 if (add_to_worklist) {
696 igvn->add_users_to_worklist(this); // Check for further allowed opts
697 }
698 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
699 Node* n = last_out(i);
700 igvn->hash_delete(n); // Remove from worklist before modifying edges
701 if (n->outcnt() == 0) {
702 int uses_found = n->replace_edge(this, phase->C->top(), igvn);
703 if (uses_found > 1) { // (--i) done at the end of the loop.
704 i -= (uses_found - 1);
705 }
706 continue;
707 }
708 if( n->is_Phi() ) { // Collapse all Phis
709 // Eagerly replace phis to avoid regionless phis.
710 Node* in;
711 if( cnt == 0 ) {
712 assert( n->req() == 1, "No data inputs expected" );
713 in = parent_ctrl; // replaced by top
714 } else {
715 assert( n->req() == 2 && n->in(1) != nullptr, "Only one data input expected" );
716 in = n->in(1); // replaced by unique input
717 if( n->as_Phi()->is_unsafe_data_reference(in) )
718 in = phase->C->top(); // replaced by top
719 }
720 igvn->replace_node(n, in);
721 }
722 else if( n->is_Region() ) { // Update all incoming edges
723 assert(n != this, "Must be removed from DefUse edges");
724 int uses_found = n->replace_edge(this, parent_ctrl, igvn);
725 if (uses_found > 1) { // (--i) done at the end of the loop.
726 i -= (uses_found - 1);
727 }
728 }
729 else {
730 assert(n->in(0) == this, "Expect RegionNode to be control parent");
731 n->set_req(0, parent_ctrl);
732 }
733 #ifdef ASSERT
734 for( uint k=0; k < n->req(); k++ ) {
735 assert(n->in(k) != this, "All uses of RegionNode should be gone");
736 }
737 #endif
738 }
739 // Remove the RegionNode itself from DefUse info
740 igvn->remove_dead_node(this);
741 return nullptr;
742 }
743 return this; // Record progress
744 }
745
746
747 // If a Region flows into a Region, merge into one big happy merge.
748 if (can_reshape) {
749 Node *m = merge_region(this, phase);
750 if (m != nullptr) return m;
751 }
752
753 // Check if this region is the root of a clipping idiom on floats
754 if( ConvertFloat2IntClipping && can_reshape && req() == 4 ) {
755 // Check that only one use is a Phi and that it simplifies to two constants +
756 PhiNode* phi = has_unique_phi();
757 if (phi != nullptr) { // One Phi user
758 // Check inputs to the Phi
759 ConNode *min;
760 ConNode *max;
761 Node *val;
762 uint min_idx;
763 uint max_idx;
764 uint val_idx;
765 if( check_phi_clipping( phi, min, min_idx, max, max_idx, val, val_idx ) ) {
766 IfNode *top_if;
767 IfNode *bot_if;
768 if( check_if_clipping( this, bot_if, top_if ) ) {
769 // Control pattern checks, now verify compares
770 Node *top_in = nullptr; // value being compared against
771 Node *bot_in = nullptr;
772 if( check_compare_clipping( true, bot_if, min, bot_in ) &&
773 check_compare_clipping( false, top_if, max, top_in ) ) {
774 if( bot_in == top_in ) {
775 PhaseIterGVN *gvn = phase->is_IterGVN();
776 assert( gvn != nullptr, "Only had DefUse info in IterGVN");
777 // Only remaining check is that bot_in == top_in == (Phi's val + mods)
778
779 // Check for the ConvF2INode
780 ConvF2INode *convf2i;
781 if( check_convf2i_clipping( phi, val_idx, convf2i, min, max ) &&
782 convf2i->in(1) == bot_in ) {
783 // Matched pattern, including LShiftI; RShiftI, replace with integer compares
784 // max test
785 Node *cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, min ));
786 Node *boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::lt ));
787 IfNode *iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( top_if->in(0), boo, PROB_UNLIKELY_MAG(5), top_if->_fcnt ));
788 Node *if_min= gvn->register_new_node_with_optimizer(new IfTrueNode (iff));
789 Node *ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff));
790 // min test
791 cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, max ));
792 boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::gt ));
793 iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( ifF, boo, PROB_UNLIKELY_MAG(5), bot_if->_fcnt ));
794 Node *if_max= gvn->register_new_node_with_optimizer(new IfTrueNode (iff));
795 ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff));
796 // update input edges to region node
797 set_req_X( min_idx, if_min, gvn );
798 set_req_X( max_idx, if_max, gvn );
799 set_req_X( val_idx, ifF, gvn );
800 // remove unnecessary 'LShiftI; RShiftI' idiom
801 gvn->hash_delete(phi);
802 phi->set_req_X( val_idx, convf2i, gvn );
803 gvn->hash_find_insert(phi);
804 // Return transformed region node
805 return this;
806 }
807 }
808 }
809 }
810 }
811 }
812 }
813
814 if (can_reshape) {
815 modified |= optimize_trichotomy(phase->is_IterGVN());
816 }
817
818 return modified ? this : nullptr;
819 }
820
821 //--------------------------remove_unreachable_subgraph----------------------
822 // This region and therefore all nodes on the input control path(s) are unreachable
823 // from root. To avoid incomplete removal of unreachable subgraphs, walk up the CFG
824 // and aggressively replace all nodes by top.
825 // If a control node "def" with a single control output "use" has its single output
826 // "use" replaced with top, then "use" removes itself. This has the consequence that
827 // when we visit "use", it already has all inputs removed. They are lost and we cannot
828 // traverse them. This is why we fist find all unreachable nodes, and then remove
829 // them in a second step.
830 void RegionNode::remove_unreachable_subgraph(PhaseIterGVN* igvn) {
831 Node* top = igvn->C->top();
832 ResourceMark rm;
833 Unique_Node_List unreachable; // visit each only once
834 unreachable.push(this);
835 // Recursively find all control inputs.
836 for (uint i = 0; i < unreachable.size(); i++) {
837 Node* n = unreachable.at(i);
838 for (uint i = 0; i < n->req(); ++i) {
839 Node* m = n->in(i);
840 assert(m == nullptr || !m->is_Root(), "Should be unreachable from root");
841 if (m != nullptr && m->is_CFG()) {
842 unreachable.push(m);
843 }
844 }
845 }
846 // Remove all unreachable nodes.
847 for (uint i = 0; i < unreachable.size(); i++) {
848 Node* n = unreachable.at(i);
849 if (n->is_Region()) {
850 // Eagerly replace phis with top to avoid regionless phis.
851 n->set_req(0, nullptr);
852 bool progress = true;
853 uint max = n->outcnt();
854 DUIterator j;
855 while (progress) {
856 progress = false;
857 for (j = n->outs(); n->has_out(j); j++) {
858 Node* u = n->out(j);
859 if (u->is_Phi()) {
860 igvn->replace_node(u, top);
861 if (max != n->outcnt()) {
862 progress = true;
863 j = n->refresh_out_pos(j);
864 max = n->outcnt();
865 }
866 }
867 }
868 }
869 }
870 igvn->replace_node(n, top);
871 }
872 }
873
874 //------------------------------optimize_trichotomy--------------------------
875 // Optimize nested comparisons of the following kind:
876 //
877 // int compare(int a, int b) {
878 // return (a < b) ? -1 : (a == b) ? 0 : 1;
879 // }
880 //
881 // Shape 1:
882 // if (compare(a, b) == 1) { ... } -> if (a > b) { ... }
883 //
884 // Shape 2:
885 // if (compare(a, b) == 0) { ... } -> if (a == b) { ... }
886 //
887 // Above code leads to the following IR shapes where both Ifs compare the
888 // same value and two out of three region inputs idx1 and idx2 map to
889 // the same value and control flow.
890 //
891 // (1) If (2) If
892 // / \ / \
893 // Proj Proj Proj Proj
894 // | \ | \
895 // | If | If If
896 // | / \ | / \ / \
897 // | Proj Proj | Proj Proj ==> Proj Proj
898 // | / / \ | / | /
899 // Region / \ | / | /
900 // \ / \ | / | /
901 // Region Region Region
902 //
903 // The method returns true if 'this' is modified and false otherwise.
904 bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) {
905 int idx1 = 1, idx2 = 2;
906 Node* region = nullptr;
907 if (req() == 3 && in(1) != nullptr && in(2) != nullptr) {
908 // Shape 1: Check if one of the inputs is a region that merges two control
909 // inputs and has no other users (especially no Phi users).
910 region = in(1)->isa_Region() ? in(1) : in(2)->isa_Region();
911 if (region == nullptr || region->outcnt() != 2 || region->req() != 3) {
912 return false; // No suitable region input found
913 }
914 } else if (req() == 4) {
915 // Shape 2: Check if two control inputs map to the same value of the unique phi
916 // user and treat these as if they would come from another region (shape (1)).
917 PhiNode* phi = has_unique_phi();
918 if (phi == nullptr) {
919 return false; // No unique phi user
920 }
921 if (phi->in(idx1) != phi->in(idx2)) {
922 idx2 = 3;
923 if (phi->in(idx1) != phi->in(idx2)) {
924 idx1 = 2;
925 if (phi->in(idx1) != phi->in(idx2)) {
926 return false; // No equal phi inputs found
927 }
928 }
929 }
930 assert(phi->in(idx1) == phi->in(idx2), "must be"); // Region is merging same value
931 region = this;
932 }
933 if (region == nullptr || region->in(idx1) == nullptr || region->in(idx2) == nullptr) {
934 return false; // Region does not merge two control inputs
935 }
936 // At this point we know that region->in(idx1) and region->(idx2) map to the same
937 // value and control flow. Now search for ifs that feed into these region inputs.
938 IfProjNode* proj1 = region->in(idx1)->isa_IfProj();
939 IfProjNode* proj2 = region->in(idx2)->isa_IfProj();
940 if (proj1 == nullptr || proj1->outcnt() != 1 ||
941 proj2 == nullptr || proj2->outcnt() != 1) {
942 return false; // No projection inputs with region as unique user found
943 }
944 assert(proj1 != proj2, "should be different projections");
945 IfNode* iff1 = proj1->in(0)->isa_If();
946 IfNode* iff2 = proj2->in(0)->isa_If();
947 if (iff1 == nullptr || iff1->outcnt() != 2 ||
948 iff2 == nullptr || iff2->outcnt() != 2) {
949 return false; // No ifs found
950 }
951 if (iff1 == iff2) {
952 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
953 igvn->replace_input_of(region, idx1, iff1->in(0));
954 igvn->replace_input_of(region, idx2, igvn->C->top());
955 return (region == this); // Remove useless if (both projections map to the same control/value)
956 }
957 BoolNode* bol1 = iff1->in(1)->isa_Bool();
958 BoolNode* bol2 = iff2->in(1)->isa_Bool();
959 if (bol1 == nullptr || bol2 == nullptr) {
960 return false; // No bool inputs found
961 }
962 Node* cmp1 = bol1->in(1);
963 Node* cmp2 = bol2->in(1);
964 bool commute = false;
965 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
966 return false; // No comparison
967 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
968 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
969 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
970 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
971 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
972 cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
973 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
974 // SubTypeCheck is not commutative
975 return false;
976 } else if (cmp1 != cmp2) {
977 if (cmp1->in(1) == cmp2->in(2) &&
978 cmp1->in(2) == cmp2->in(1)) {
979 commute = true; // Same but swapped inputs, commute the test
980 } else {
981 return false; // Ifs are not comparing the same values
982 }
983 }
984 proj1 = proj1->other_if_proj();
985 proj2 = proj2->other_if_proj();
986 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
987 proj2->unique_ctrl_out_or_null() == this) ||
988 (proj2->unique_ctrl_out_or_null() == iff1 &&
989 proj1->unique_ctrl_out_or_null() == this))) {
990 return false; // Ifs are not connected through other projs
991 }
992 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
993 // through 'region' and map to the same value. Merge the boolean tests and replace
994 // the ifs by a single comparison.
995 BoolTest test1 = (proj1->_con == 1) ? bol1->_test : bol1->_test.negate();
996 BoolTest test2 = (proj2->_con == 1) ? bol2->_test : bol2->_test.negate();
997 test1 = commute ? test1.commute() : test1;
998 // After possibly commuting test1, if we can merge test1 & test2, then proj2/iff2/bol2 are the nodes to refine.
999 BoolTest::mask res = test1.merge(test2);
1000 if (res == BoolTest::illegal) {
1001 return false; // Unable to merge tests
1002 }
1003 // Adjust iff1 to always pass (only iff2 will remain)
1004 igvn->replace_input_of(iff1, 1, igvn->intcon(proj1->_con));
1005 if (res == BoolTest::never) {
1006 // Merged test is always false, adjust iff2 to always fail
1007 igvn->replace_input_of(iff2, 1, igvn->intcon(1 - proj2->_con));
1008 } else {
1009 // Replace bool input of iff2 with merged test
1010 BoolNode* new_bol = new BoolNode(bol2->in(1), res);
1011 igvn->replace_input_of(iff2, 1, igvn->transform((proj2->_con == 1) ? new_bol : new_bol->negate(igvn)));
1012 if (new_bol->outcnt() == 0) {
1013 igvn->remove_dead_node(new_bol);
1014 }
1015 }
1016 return false;
1017 }
1018
1019 const RegMask &RegionNode::out_RegMask() const {
1020 return RegMask::EMPTY;
1021 }
1022
1023 #ifndef PRODUCT
1024 void RegionNode::dump_spec(outputStream* st) const {
1025 Node::dump_spec(st);
1026 switch (loop_status()) {
1027 case RegionNode::LoopStatus::MaybeIrreducibleEntry:
1028 st->print("#irreducible ");
1029 break;
1030 case RegionNode::LoopStatus::Reducible:
1031 st->print("#reducible ");
1032 break;
1033 case RegionNode::LoopStatus::NeverIrreducibleEntry:
1034 break; // nothing
1035 }
1036 }
1037 #endif
1038
1039 // Find the one non-null required input. RegionNode only
1040 Node *Node::nonnull_req() const {
1041 assert( is_Region(), "" );
1042 for( uint i = 1; i < _cnt; i++ )
1043 if( in(i) )
1044 return in(i);
1045 ShouldNotReachHere();
1046 return nullptr;
1047 }
1048
1049
1050 //=============================================================================
1051 // note that these functions assume that the _adr_type field is flat
1052 uint PhiNode::hash() const {
1053 const Type* at = _adr_type;
1054 return TypeNode::hash() + (at ? at->hash() : 0);
1055 }
1056 bool PhiNode::cmp( const Node &n ) const {
1057 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1058 }
1059 static inline
1060 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1061 if (at == nullptr || at == TypePtr::BOTTOM) return at;
1062 return Compile::current()->alias_type(at)->adr_type();
1063 }
1064
1065 //----------------------------make---------------------------------------------
1066 // create a new phi with edges matching r and set (initially) to x
1067 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1068 uint preds = r->req(); // Number of predecessor paths
1069 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flat_accesses_share_alias()), "flatten at");
1070 PhiNode* p = new PhiNode(r, t, at);
1071 for (uint j = 1; j < preds; j++) {
1072 // Fill in all inputs, except those which the region does not yet have
1073 if (r->in(j) != nullptr)
1074 p->init_req(j, x);
1075 }
1076 return p;
1077 }
1078 PhiNode* PhiNode::make(Node* r, Node* x) {
1079 const Type* t = x->bottom_type();
1080 const TypePtr* at = nullptr;
1081 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1082 return make(r, x, t, at);
1083 }
1084 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1085 const Type* t = x->bottom_type();
1086 const TypePtr* at = nullptr;
1087 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1088 return new PhiNode(r, t, at);
1089 }
1090
1091
1092 //------------------------slice_memory-----------------------------------------
1093 // create a new phi with narrowed memory type
1094 PhiNode* PhiNode::slice_memory(const TypePtr* adr_type) const {
1095 PhiNode* mem = (PhiNode*) clone();
1096 *(const TypePtr**)&mem->_adr_type = adr_type;
1097 // convert self-loops, or else we get a bad graph
1098 for (uint i = 1; i < req(); i++) {
1099 if ((const Node*)in(i) == this) mem->set_req(i, mem);
1100 }
1101 mem->verify_adr_type();
1102 return mem;
1103 }
1104
1105 //------------------------split_out_instance-----------------------------------
1106 // Split out an instance type from a bottom phi.
1107 PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const {
1108 const TypeOopPtr *t_oop = at->isa_oopptr();
1109 assert(t_oop != nullptr && t_oop->is_known_instance(), "expecting instance oopptr");
1110
1111 // Check if an appropriate node already exists.
1112 Node *region = in(0);
1113 for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
1114 Node* use = region->fast_out(k);
1115 if( use->is_Phi()) {
1116 PhiNode *phi2 = use->as_Phi();
1117 if (phi2->type() == Type::MEMORY && phi2->adr_type() == at) {
1118 return phi2;
1119 }
1120 }
1121 }
1122 Compile *C = igvn->C;
1123 ResourceMark rm;
1124 Node_Array node_map;
1125 Node_Stack stack(C->live_nodes() >> 4);
1126 PhiNode *nphi = slice_memory(at);
1127 igvn->register_new_node_with_optimizer( nphi );
1128 node_map.map(_idx, nphi);
1129 stack.push((Node *)this, 1);
1130 while(!stack.is_empty()) {
1131 PhiNode *ophi = stack.node()->as_Phi();
1132 uint i = stack.index();
1133 assert(i >= 1, "not control edge");
1134 stack.pop();
1135 nphi = node_map[ophi->_idx]->as_Phi();
1136 for (; i < ophi->req(); i++) {
1137 Node *in = ophi->in(i);
1138 if (in == nullptr || igvn->type(in) == Type::TOP)
1139 continue;
1140 Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, nullptr, igvn);
1141 PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : nullptr;
1142 if (optphi != nullptr && optphi->adr_type() == TypePtr::BOTTOM) {
1143 opt = node_map[optphi->_idx];
1144 if (opt == nullptr) {
1145 stack.push(ophi, i);
1146 nphi = optphi->slice_memory(at);
1147 igvn->register_new_node_with_optimizer( nphi );
1148 node_map.map(optphi->_idx, nphi);
1149 ophi = optphi;
1150 i = 0; // will get incremented at top of loop
1151 continue;
1152 }
1153 }
1154 nphi->set_req(i, opt);
1155 }
1156 }
1157 return nphi;
1158 }
1159
1160 //------------------------verify_adr_type--------------------------------------
1161 #ifdef ASSERT
1162 void PhiNode::verify_adr_type(VectorSet& visited, const TypePtr* at) const {
1163 if (visited.test_set(_idx)) return; //already visited
1164
1165 // recheck constructor invariants:
1166 verify_adr_type(false);
1167
1168 // recheck local phi/phi consistency:
1169 assert(_adr_type == at || _adr_type == TypePtr::BOTTOM,
1170 "adr_type must be consistent across phi nest");
1171
1172 // walk around
1173 for (uint i = 1; i < req(); i++) {
1174 Node* n = in(i);
1175 if (n == nullptr) continue;
1176 const Node* np = in(i);
1177 if (np->is_Phi()) {
1178 np->as_Phi()->verify_adr_type(visited, at);
1179 } else if (n->bottom_type() == Type::TOP
1180 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1181 // ignore top inputs
1182 } else {
1183 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1184 // recheck phi/non-phi consistency at leaves:
1185 assert((nat != nullptr) == (at != nullptr), "");
1186 assert(nat == at || nat == TypePtr::BOTTOM,
1187 "adr_type must be consistent at leaves of phi nest");
1188 }
1189 }
1190 }
1191
1192 // Verify a whole nest of phis rooted at this one.
1193 void PhiNode::verify_adr_type(bool recursive) const {
1194 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1195 if (Node::in_dump()) return; // muzzle asserts when printing
1196
1197 assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1198 // Flat array element shouldn't get their own memory slice until flat_accesses_share_alias is cleared.
1199 // It could be the graph has no loads/stores and flat_accesses_share_alias is never cleared. EA could still
1200 // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1201 assert(_adr_type == nullptr || _adr_type->isa_aryptr() == nullptr ||
1202 _adr_type->is_aryptr()->is_known_instance() ||
1203 !_adr_type->is_aryptr()->is_flat() ||
1204 !Compile::current()->flat_accesses_share_alias() ||
1205 _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1206
1207 if (!VerifyAliases) return; // verify thoroughly only if requested
1208
1209 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1210 "Phi::adr_type must be pre-normalized");
1211
1212 if (recursive) {
1213 VectorSet visited;
1214 verify_adr_type(visited, _adr_type);
1215 }
1216 }
1217 #endif
1218
1219
1220 //------------------------------Value------------------------------------------
1221 // Compute the type of the PhiNode
1222 const Type* PhiNode::Value(PhaseGVN* phase) const {
1223 Node *r = in(0); // RegionNode
1224 if( !r ) // Copy or dead
1225 return in(1) ? phase->type(in(1)) : Type::TOP;
1226
1227 // Note: During parsing, phis are often transformed before their regions.
1228 // This means we have to use type_or_null to defend against untyped regions.
1229 if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1230 return Type::TOP;
1231
1232 // Check for trip-counted loop. If so, be smarter.
1233 BaseCountedLoopNode* l = r->is_BaseCountedLoop() ? r->as_BaseCountedLoop() : nullptr;
1234 if (l && ((const Node*)l->phi() == this)) { // Trip counted loop!
1235 // protect against init_trip() or limit() returning null
1236 if (l->can_be_counted_loop(phase)) {
1237 const Node* init = l->init_trip();
1238 const Node* limit = l->limit();
1239 const Node* stride = l->stride();
1240 if (init != nullptr && limit != nullptr && stride != nullptr) {
1241 const TypeInteger* lo = phase->type(init)->isa_integer(l->bt());
1242 const TypeInteger* hi = phase->type(limit)->isa_integer(l->bt());
1243 const TypeInteger* stride_t = phase->type(stride)->isa_integer(l->bt());
1244 if (lo != nullptr && hi != nullptr && stride_t != nullptr) { // Dying loops might have TOP here
1245 assert(stride_t->is_con(), "bad stride type");
1246 BoolTest::mask bt = l->loopexit()->test_trip();
1247 // If the loop exit condition is "not equal", the condition
1248 // would not trigger if init > limit (if stride > 0) or if
1249 // init < limit if (stride > 0) so we can't deduce bounds
1250 // for the iv from the exit condition.
1251 if (bt != BoolTest::ne) {
1252 jlong stride_con = stride_t->get_con_as_long(l->bt());
1253 if (stride_con < 0) { // Down-counter loop
1254 swap(lo, hi);
1255 jlong iv_range_lower_limit = lo->lo_as_long();
1256 // Prevent overflow when adding one below
1257 if (iv_range_lower_limit < max_signed_integer(l->bt())) {
1258 // The loop exit condition is: iv + stride > limit (iv is this Phi). So the loop iterates until
1259 // iv + stride <= limit
1260 // We know that: limit >= lo->lo_as_long() and stride <= -1
1261 // So when the loop exits, iv has to be at most lo->lo_as_long() + 1
1262 iv_range_lower_limit += 1; // lo is after decrement
1263 // Exact bounds for the phi can be computed when ABS(stride) greater than 1 if bounds are constant.
1264 if (lo->is_con() && hi->is_con() && hi->lo_as_long() > lo->hi_as_long() && stride_con != -1) {
1265 julong uhi = static_cast<julong>(hi->lo_as_long());
1266 julong ulo = static_cast<julong>(lo->hi_as_long());
1267 julong diff = ((uhi - ulo - 1) / (-stride_con)) * (-stride_con);
1268 julong ufirst = hi->lo_as_long() - diff;
1269 iv_range_lower_limit = reinterpret_cast<jlong &>(ufirst);
1270 assert(iv_range_lower_limit >= lo->lo_as_long() + 1, "should end up with narrower range");
1271 }
1272 }
1273 return TypeInteger::make(MIN2(iv_range_lower_limit, hi->lo_as_long()), hi->hi_as_long(), 3, l->bt())->filter_speculative(_type);
1274 } else if (stride_con >= 0) {
1275 jlong iv_range_upper_limit = hi->hi_as_long();
1276 // Prevent overflow when subtracting one below
1277 if (iv_range_upper_limit > min_signed_integer(l->bt())) {
1278 // The loop exit condition is: iv + stride < limit (iv is this Phi). So the loop iterates until
1279 // iv + stride >= limit
1280 // We know that: limit <= hi->hi_as_long() and stride >= 1
1281 // So when the loop exits, iv has to be at most hi->hi_as_long() - 1
1282 iv_range_upper_limit -= 1;
1283 // Exact bounds for the phi can be computed when ABS(stride) greater than 1 if bounds are constant.
1284 if (lo->is_con() && hi->is_con() && hi->lo_as_long() > lo->hi_as_long() && stride_con != 1) {
1285 julong uhi = static_cast<julong>(hi->lo_as_long());
1286 julong ulo = static_cast<julong>(lo->hi_as_long());
1287 julong diff = ((uhi - ulo - 1) / stride_con) * stride_con;
1288 julong ulast = lo->hi_as_long() + diff;
1289 iv_range_upper_limit = reinterpret_cast<jlong &>(ulast);
1290 assert(iv_range_upper_limit <= hi->hi_as_long() - 1, "should end up with narrower range");
1291 }
1292 }
1293 return TypeInteger::make(lo->lo_as_long(), MAX2(lo->hi_as_long(), iv_range_upper_limit), 3, l->bt())->filter_speculative(_type);
1294 }
1295 }
1296 }
1297 }
1298 } else if (l->in(LoopNode::LoopBackControl) != nullptr &&
1299 in(LoopNode::EntryControl) != nullptr &&
1300 phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) {
1301 // During CCP, if we saturate the type of a counted loop's Phi
1302 // before the special code for counted loop above has a chance
1303 // to run (that is as long as the type of the backedge's control
1304 // is top), we might end up with non monotonic types
1305 return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type);
1306 }
1307 }
1308
1309 // Default case: merge all inputs
1310 const Type *t = Type::TOP; // Merged type starting value
1311 for (uint i = 1; i < req(); ++i) {// For all paths in
1312 // Reachable control path?
1313 if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) {
1314 const Type* ti = phase->type(in(i));
1315 t = t->meet_speculative(ti);
1316 }
1317 }
1318
1319 // The worst-case type (from ciTypeFlow) should be consistent with "t".
1320 // That is, we expect that "t->higher_equal(_type)" holds true.
1321 // There are various exceptions:
1322 // - Inputs which are phis might in fact be widened unnecessarily.
1323 // For example, an input might be a widened int while the phi is a short.
1324 // - Inputs might be BotPtrs but this phi is dependent on a null check,
1325 // and postCCP has removed the cast which encodes the result of the check.
1326 // - The type of this phi is an interface, and the inputs are classes.
1327 // - Value calls on inputs might produce fuzzy results.
1328 // (Occurrences of this case suggest improvements to Value methods.)
1329 //
1330 // It is not possible to see Type::BOTTOM values as phi inputs,
1331 // because the ciTypeFlow pre-pass produces verifier-quality types.
1332 const Type* ft = t->filter_speculative(_type); // Worst case type
1333
1334 #ifdef ASSERT
1335 // The following logic has been moved into TypeOopPtr::filter.
1336 const Type* jt = t->join_speculative(_type);
1337 if (jt->empty()) { // Emptied out???
1338 // Otherwise it's something stupid like non-overlapping int ranges
1339 // found on dying counted loops.
1340 assert(ft == Type::TOP, ""); // Canonical empty value
1341 }
1342
1343 else {
1344
1345 if (jt != ft && jt->base() == ft->base()) {
1346 if (jt->isa_int() &&
1347 jt->is_int()->_lo == ft->is_int()->_lo &&
1348 jt->is_int()->_hi == ft->is_int()->_hi)
1349 jt = ft;
1350 if (jt->isa_long() &&
1351 jt->is_long()->_lo == ft->is_long()->_lo &&
1352 jt->is_long()->_hi == ft->is_long()->_hi)
1353 jt = ft;
1354 }
1355 if (jt != ft) {
1356 tty->print("merge type: "); t->dump(); tty->cr();
1357 tty->print("kill type: "); _type->dump(); tty->cr();
1358 tty->print("join type: "); jt->dump(); tty->cr();
1359 tty->print("filter type: "); ft->dump(); tty->cr();
1360 }
1361 assert(jt == ft, "");
1362 }
1363 #endif //ASSERT
1364
1365 // In rare cases, during an IGVN call to `PhiNode::Value`, `_type` and `t` have incompatible opinion on speculative type,
1366 // resulting into a too small intersection (such as AnyNull), which is removed in cleanup_speculative.
1367 // From that `ft` has no speculative type (ft->speculative() == nullptr).
1368 // After the end of the current `PhiNode::Value` call, `ft` (that is returned) is being store into `_type`
1369 // (see PhaseIterGVN::transform_old -> raise_bottom_type -> set_type).
1370 //
1371 // It is possible that verification happens immediately after, without any change to the current node, or any of its inputs.
1372 // In the verification invocation of `PhiNode::Value`, `t` would be the same as the IGVN `t` (union of input types, that are unchanged),
1373 // but the new `_type` is the value returned by the IGVN invocation of `PhiNode::Value`, the former `ft`, that has no speculative type.
1374 // Thus, the result of `t->filter_speculative(_type)`, the new `ft`, gets the speculative type of `t`, which is not empty. Since the
1375 // result of the verification invocation of `PhiNode::Value` has some speculative type, it is not the same as the previously returned type
1376 // (that had no speculative type), making verification fail.
1377 //
1378 // In such a case, doing the filtering one time more allows to reach a fixpoint.
1379 if (ft->speculative() == nullptr && t->speculative() != nullptr) {
1380 ft = t->filter_speculative(ft);
1381 }
1382 verify_type_stability(phase, t, ft);
1383
1384 // Deal with conversion problems found in data loops.
1385 ft = phase->saturate_and_maybe_push_to_igvn_worklist(this, ft);
1386 return ft;
1387 }
1388
1389 #ifdef ASSERT
1390 // Makes sure that a newly computed type is stable when filtered against the incoming types.
1391 // Otherwise, we may have IGVN verification failures. See PhiNode::Value, and the second
1392 // filtering (enforcing stability), for details.
1393 void PhiNode::verify_type_stability(const PhaseGVN* const phase, const Type* const union_of_input_types, const Type* const new_type) const {
1394 const Type* doubly_filtered_type = union_of_input_types->filter_speculative(new_type);
1395 if (Type::equals(new_type, doubly_filtered_type)) {
1396 return;
1397 }
1398
1399 stringStream ss;
1400
1401 ss.print_cr("At node:");
1402 this->dump("\n", false, &ss);
1403
1404 const Node* region = in(Region);
1405 for (uint i = 1; i < req(); ++i) {
1406 ss.print("in(%d): ", i);
1407 if (region->in(i) != nullptr && phase->type(region->in(i)) == Type::CONTROL) {
1408 const Type* ti = phase->type(in(i));
1409 ti->dump_on(&ss);
1410 }
1411 ss.print_cr("");
1412 }
1413
1414 ss.print("t: ");
1415 union_of_input_types->dump_on(&ss);
1416 ss.print_cr("");
1417
1418 ss.print("_type: ");
1419 _type->dump_on(&ss);
1420 ss.print_cr("");
1421
1422 ss.print("Filter once: ");
1423 new_type->dump_on(&ss);
1424 ss.print_cr("");
1425 ss.print("Filter twice: ");
1426 doubly_filtered_type->dump_on(&ss);
1427 ss.print_cr("");
1428 tty->print("%s", ss.base());
1429 tty->flush();
1430 assert(false, "computed type would not pass verification");
1431 }
1432 #endif
1433
1434 // Does this Phi represent a simple well-shaped diamond merge? Return the
1435 // index of the true path or 0 otherwise.
1436 int PhiNode::is_diamond_phi() const {
1437 Node* region = in(0);
1438 assert(region != nullptr && region->is_Region(), "phi must have region");
1439 if (!region->as_Region()->is_diamond()) {
1440 return 0;
1441 }
1442
1443 if (region->in(1)->is_IfTrue()) {
1444 assert(region->in(2)->is_IfFalse(), "bad If");
1445 return 1;
1446 } else {
1447 // Flipped projections.
1448 assert(region->in(2)->is_IfTrue(), "bad If");
1449 return 2;
1450 }
1451 }
1452
1453 // Do the following transformation if we find the corresponding graph shape, remove the involved memory phi and return
1454 // true. Otherwise, return false if the transformation cannot be applied.
1455 //
1456 // If If
1457 // / \ / \
1458 // IfFalse IfTrue /- Some Node IfFalse IfTrue
1459 // \ / / / \ / Some Node
1460 // Region / /-MergeMem ===> Region |
1461 // / \---Phi | MergeMem
1462 // [other phis] \ [other phis] |
1463 // use use
1464 bool PhiNode::try_clean_memory_phi(PhaseIterGVN* igvn) {
1465 if (_type != Type::MEMORY) {
1466 return false;
1467 }
1468 assert(is_diamond_phi() > 0, "sanity");
1469 assert(req() == 3, "same as region");
1470 RegionNode* region = in(0)->as_Region();
1471 for (uint i = 1; i < 3; i++) {
1472 Node* phi_input = in(i);
1473 if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1474 // Nothing is control-dependent on path #i except the region itself.
1475 MergeMemNode* merge_mem = phi_input->as_MergeMem();
1476 uint j = 3 - i;
1477 Node* other_phi_input = in(j);
1478 if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory() && !is_data_loop(region, phi_input, igvn)) {
1479 // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1480 // Only proceed if the transformation doesn't create a data loop
1481 // This will allow the diamond to collapse completely if there are no other phis left.
1482 igvn->replace_node(this, merge_mem);
1483 return true;
1484 }
1485 }
1486 }
1487 return false;
1488 }
1489
1490 //----------------------------check_cmove_id-----------------------------------
1491 // Check for CMove'ing a constant after comparing against the constant.
1492 // Happens all the time now, since if we compare equality vs a constant in
1493 // the parser, we "know" the variable is constant on one path and we force
1494 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1495 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
1496 // general in that we don't need constants. Since CMove's are only inserted
1497 // in very special circumstances, we do it here on generic Phi's.
1498 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1499 assert(true_path !=0, "only diamond shape graph expected");
1500
1501 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1502 // phi->region->if_proj->ifnode->bool->cmp
1503 Node* region = in(0);
1504 Node* iff = region->in(1)->in(0);
1505 BoolNode* b = iff->in(1)->as_Bool();
1506 Node* cmp = b->in(1);
1507 Node* tval = in(true_path);
1508 Node* fval = in(3-true_path);
1509 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
1510 if (id == nullptr)
1511 return nullptr;
1512
1513 // Either value might be a cast that depends on a branch of 'iff'.
1514 // Since the 'id' value will float free of the diamond, either
1515 // decast or return failure.
1516 Node* ctl = id->in(0);
1517 if (ctl != nullptr && ctl->in(0) == iff) {
1518 if (id->is_ConstraintCast()) {
1519 return id->in(1);
1520 } else {
1521 // Don't know how to disentangle this value.
1522 return nullptr;
1523 }
1524 }
1525
1526 return id;
1527 }
1528
1529 //------------------------------Identity---------------------------------------
1530 // Check for Region being Identity.
1531 Node* PhiNode::Identity(PhaseGVN* phase) {
1532 if (must_wait_for_region_in_irreducible_loop(phase)) {
1533 return this;
1534 }
1535 // Check for no merging going on
1536 // (There used to be special-case code here when this->region->is_Loop.
1537 // It would check for a tributary phi on the backedge that the main phi
1538 // trivially, perhaps with a single cast. The unique_input method
1539 // does all this and more, by reducing such tributaries to 'this'.)
1540 Node* uin = unique_input(phase, false);
1541 if (uin != nullptr) {
1542 return uin;
1543 }
1544 uin = unique_constant_input_recursive(phase);
1545 if (uin != nullptr) {
1546 return uin;
1547 }
1548
1549 int true_path = is_diamond_phi();
1550 // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1551 if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1552 Node* id = is_cmove_id(phase, true_path);
1553 if (id != nullptr) {
1554 return id;
1555 }
1556 }
1557
1558 // Looking for phis with identical inputs. If we find one that has
1559 // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1560 if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1561 TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1562 uint phi_len = req();
1563 Node* phi_reg = region();
1564 for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1565 Node* u = phi_reg->fast_out(i);
1566 if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1567 u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1568 u->req() == phi_len) {
1569 for (uint j = 1; j < phi_len; j++) {
1570 if (in(j) != u->in(j)) {
1571 u = nullptr;
1572 break;
1573 }
1574 }
1575 if (u != nullptr) {
1576 return u;
1577 }
1578 }
1579 }
1580 }
1581
1582 return this; // No identity
1583 }
1584
1585 //-----------------------------unique_input------------------------------------
1586 // Find the unique value, discounting top, self-loops, and casts.
1587 // Return top if there are no inputs, and self if there are multiple.
1588 Node* PhiNode::unique_input(PhaseValues* phase, bool uncast) {
1589 // 1) One unique direct input,
1590 // or if uncast is true:
1591 // 2) some of the inputs have an intervening ConstraintCast
1592 // 3) an input is a self loop
1593 //
1594 // 1) input or 2) input or 3) input __
1595 // / \ / \ \ / \
1596 // \ / | cast phi cast
1597 // phi \ / / \ /
1598 // phi / --
1599
1600 Node* r = in(0); // RegionNode
1601 Node* input = nullptr; // The unique direct input (maybe uncasted = ConstraintCasts removed)
1602
1603 for (uint i = 1, cnt = req(); i < cnt; ++i) {
1604 Node* rc = r->in(i);
1605 if (rc == nullptr || phase->type(rc) == Type::TOP)
1606 continue; // ignore unreachable control path
1607 Node* n = in(i);
1608 if (n == nullptr)
1609 continue;
1610 Node* un = n;
1611 if (uncast) {
1612 #ifdef ASSERT
1613 Node* m = un->uncast();
1614 #endif
1615 while (un != nullptr && un->req() == 2 && un->is_ConstraintCast()) {
1616 Node* next = un->in(1);
1617 if (phase->type(next)->isa_rawptr() && phase->type(un)->isa_oopptr()) {
1618 // risk exposing raw ptr at safepoint
1619 break;
1620 }
1621 un = next;
1622 }
1623 assert(m == un || un->in(1) == m, "Only expected at CheckCastPP from allocation");
1624 }
1625 if (un == nullptr || un == this || phase->type(un) == Type::TOP) {
1626 continue; // ignore if top, or in(i) and "this" are in a data cycle
1627 }
1628 // Check for a unique input (maybe uncasted)
1629 if (input == nullptr) {
1630 input = un;
1631 } else if (input != un) {
1632 input = NodeSentinel; // no unique input
1633 }
1634 }
1635 if (input == nullptr) {
1636 return phase->C->top(); // no inputs
1637 }
1638
1639 if (input != NodeSentinel) {
1640 return input; // one unique direct input
1641 }
1642
1643 // Nothing.
1644 return nullptr;
1645 }
1646
1647 // Find the unique input, try to look recursively through input Phis
1648 Node* PhiNode::unique_constant_input_recursive(PhaseGVN* phase) {
1649 if (!phase->is_IterGVN()) {
1650 return nullptr;
1651 }
1652
1653 ResourceMark rm;
1654 Node* unique = nullptr;
1655 Unique_Node_List visited;
1656 visited.push(this);
1657
1658 for (uint visited_idx = 0; visited_idx < visited.size(); visited_idx++) {
1659 Node* current = visited.at(visited_idx);
1660 for (uint i = 1; i < current->req(); i++) {
1661 Node* phi_in = current->in(i);
1662 if (phi_in == nullptr) {
1663 continue;
1664 }
1665
1666 if (phi_in->is_Phi()) {
1667 visited.push(phi_in);
1668 } else {
1669 if (unique == nullptr) {
1670 if (!phi_in->is_Con()) {
1671 return nullptr;
1672 }
1673 unique = phi_in;
1674 } else if (unique != phi_in) {
1675 return nullptr;
1676 }
1677 }
1678 }
1679 }
1680 return unique;
1681 }
1682
1683 //------------------------------is_x2logic-------------------------------------
1684 // Check for simple convert-to-boolean pattern
1685 // If:(C Bool) Region:(IfF IfT) Phi:(Region 0 1)
1686 // Convert Phi to an ConvIB.
1687 static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) {
1688 assert(true_path !=0, "only diamond shape graph expected");
1689
1690 // If we're late in the optimization process, we may have already expanded Conv2B nodes
1691 if (phase->C->post_loop_opts_phase() && !Matcher::match_rule_supported(Op_Conv2B)) {
1692 return nullptr;
1693 }
1694
1695 // Convert the true/false index into an expected 0/1 return.
1696 // Map 2->0 and 1->1.
1697 int flipped = 2-true_path;
1698
1699 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1700 // phi->region->if_proj->ifnode->bool->cmp
1701 Node *region = phi->in(0);
1702 Node *iff = region->in(1)->in(0);
1703 BoolNode *b = (BoolNode*)iff->in(1);
1704 const CmpNode *cmp = (CmpNode*)b->in(1);
1705
1706 Node *zero = phi->in(1);
1707 Node *one = phi->in(2);
1708 const Type *tzero = phase->type( zero );
1709 const Type *tone = phase->type( one );
1710
1711 // Check for compare vs 0
1712 const Type *tcmp = phase->type(cmp->in(2));
1713 if( tcmp != TypeInt::ZERO && tcmp != TypePtr::NULL_PTR ) {
1714 // Allow cmp-vs-1 if the other input is bounded by 0-1
1715 if( !(tcmp == TypeInt::ONE && phase->type(cmp->in(1)) == TypeInt::BOOL) )
1716 return nullptr;
1717 flipped = 1-flipped; // Test is vs 1 instead of 0!
1718 }
1719
1720 // Check for setting zero/one opposite expected
1721 if( tzero == TypeInt::ZERO ) {
1722 if( tone == TypeInt::ONE ) {
1723 } else return nullptr;
1724 } else if( tzero == TypeInt::ONE ) {
1725 if( tone == TypeInt::ZERO ) {
1726 flipped = 1-flipped;
1727 } else return nullptr;
1728 } else return nullptr;
1729
1730 // Check for boolean test backwards
1731 if( b->_test._test == BoolTest::ne ) {
1732 } else if( b->_test._test == BoolTest::eq ) {
1733 flipped = 1-flipped;
1734 } else return nullptr;
1735
1736 // Build int->bool conversion
1737 Node* n = new Conv2BNode(cmp->in(1));
1738 if (flipped) {
1739 n = new XorINode(phase->transform(n), phase->intcon(1));
1740 }
1741
1742 return n;
1743 }
1744
1745 //------------------------------is_cond_add------------------------------------
1746 // Check for simple conditional add pattern: "(P < Q) ? X+Y : X;"
1747 // To be profitable the control flow has to disappear; there can be no other
1748 // values merging here. We replace the test-and-branch with:
1749 // "(sgn(P-Q))&Y) + X". Basically, convert "(P < Q)" into 0 or -1 by
1750 // moving the carry bit from (P-Q) into a register with 'sbb EAX,EAX'.
1751 // Then convert Y to 0-or-Y and finally add.
1752 // This is a key transform for SpecJava _201_compress.
1753 static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) {
1754 assert(true_path !=0, "only diamond shape graph expected");
1755
1756 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1757 // phi->region->if_proj->ifnode->bool->cmp
1758 RegionNode *region = (RegionNode*)phi->in(0);
1759 Node *iff = region->in(1)->in(0);
1760 BoolNode* b = iff->in(1)->as_Bool();
1761 const CmpNode *cmp = (CmpNode*)b->in(1);
1762
1763 // Make sure only merging this one phi here
1764 if (region->has_unique_phi() != phi) return nullptr;
1765
1766 // Make sure each arm of the diamond has exactly one output, which we assume
1767 // is the region. Otherwise, the control flow won't disappear.
1768 if (region->in(1)->outcnt() != 1) return nullptr;
1769 if (region->in(2)->outcnt() != 1) return nullptr;
1770
1771 // Check for "(P < Q)" of type signed int
1772 if (b->_test._test != BoolTest::lt) return nullptr;
1773 if (cmp->Opcode() != Op_CmpI) return nullptr;
1774
1775 Node *p = cmp->in(1);
1776 Node *q = cmp->in(2);
1777 Node *n1 = phi->in( true_path);
1778 Node *n2 = phi->in(3-true_path);
1779
1780 int op = n1->Opcode();
1781 if( op != Op_AddI // Need zero as additive identity
1782 /*&&op != Op_SubI &&
1783 op != Op_AddP &&
1784 op != Op_XorI &&
1785 op != Op_OrI*/ )
1786 return nullptr;
1787
1788 Node *x = n2;
1789 Node *y = nullptr;
1790 if( x == n1->in(1) ) {
1791 y = n1->in(2);
1792 } else if( x == n1->in(2) ) {
1793 y = n1->in(1);
1794 } else return nullptr;
1795
1796 // Not so profitable if compare and add are constants
1797 if( q->is_Con() && phase->type(q) != TypeInt::ZERO && y->is_Con() )
1798 return nullptr;
1799
1800 Node *cmplt = phase->transform( new CmpLTMaskNode(p,q) );
1801 Node *j_and = phase->transform( new AndINode(cmplt,y) );
1802 return new AddINode(j_and,x);
1803 }
1804
1805 //------------------------------is_absolute------------------------------------
1806 // Check for absolute value.
1807 static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) {
1808 assert(true_path !=0, "only diamond shape graph expected");
1809
1810 int cmp_zero_idx = 0; // Index of compare input where to look for zero
1811 int phi_x_idx = 0; // Index of phi input where to find naked x
1812
1813 // ABS ends with the merge of 2 control flow paths.
1814 // Find the false path from the true path. With only 2 inputs, 3 - x works nicely.
1815 int false_path = 3 - true_path;
1816
1817 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1818 // phi->region->if_proj->ifnode->bool->cmp
1819 BoolNode *bol = phi_root->in(0)->in(1)->in(0)->in(1)->as_Bool();
1820 Node *cmp = bol->in(1);
1821
1822 // Check bool sense
1823 if (cmp->Opcode() == Op_CmpF || cmp->Opcode() == Op_CmpD) {
1824 switch (bol->_test._test) {
1825 case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = true_path; break;
1826 case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break;
1827 case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = true_path; break;
1828 case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = false_path; break;
1829 default: return nullptr; break;
1830 }
1831 } else if (cmp->Opcode() == Op_CmpI || cmp->Opcode() == Op_CmpL) {
1832 switch (bol->_test._test) {
1833 case BoolTest::lt:
1834 case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break;
1835 case BoolTest::gt:
1836 case BoolTest::ge: cmp_zero_idx = 2; phi_x_idx = true_path; break;
1837 default: return nullptr; break;
1838 }
1839 }
1840
1841 // Test is next
1842 const Type *tzero = nullptr;
1843 switch (cmp->Opcode()) {
1844 case Op_CmpI: tzero = TypeInt::ZERO; break; // Integer ABS
1845 case Op_CmpL: tzero = TypeLong::ZERO; break; // Long ABS
1846 case Op_CmpF: tzero = TypeF::ZERO; break; // Float ABS
1847 case Op_CmpD: tzero = TypeD::ZERO; break; // Double ABS
1848 default: return nullptr;
1849 }
1850
1851 // Find zero input of compare; the other input is being abs'd
1852 Node *x = nullptr;
1853 bool flip = false;
1854 if( phase->type(cmp->in(cmp_zero_idx)) == tzero ) {
1855 x = cmp->in(3 - cmp_zero_idx);
1856 } else if( phase->type(cmp->in(3 - cmp_zero_idx)) == tzero ) {
1857 // The test is inverted, we should invert the result...
1858 x = cmp->in(cmp_zero_idx);
1859 flip = true;
1860 } else {
1861 return nullptr;
1862 }
1863
1864 // Next get the 2 pieces being selected, one is the original value
1865 // and the other is the negated value.
1866 if( phi_root->in(phi_x_idx) != x ) return nullptr;
1867
1868 // Check other phi input for subtract node
1869 Node *sub = phi_root->in(3 - phi_x_idx);
1870
1871 bool is_sub = sub->Opcode() == Op_SubF || sub->Opcode() == Op_SubD ||
1872 sub->Opcode() == Op_SubI || sub->Opcode() == Op_SubL;
1873
1874 // Allow only Sub(0,X) and fail out for all others; Neg is not OK
1875 if (!is_sub || phase->type(sub->in(1)) != tzero || sub->in(2) != x) return nullptr;
1876
1877 if (tzero == TypeF::ZERO) {
1878 x = new AbsFNode(x);
1879 if (flip) {
1880 x = new SubFNode(sub->in(1), phase->transform(x));
1881 }
1882 } else if (tzero == TypeD::ZERO) {
1883 x = new AbsDNode(x);
1884 if (flip) {
1885 x = new SubDNode(sub->in(1), phase->transform(x));
1886 }
1887 } else if (tzero == TypeInt::ZERO && Matcher::match_rule_supported(Op_AbsI)) {
1888 x = new AbsINode(x);
1889 if (flip) {
1890 x = new SubINode(sub->in(1), phase->transform(x));
1891 }
1892 } else if (tzero == TypeLong::ZERO && Matcher::match_rule_supported(Op_AbsL)) {
1893 x = new AbsLNode(x);
1894 if (flip) {
1895 x = new SubLNode(sub->in(1), phase->transform(x));
1896 }
1897 } else return nullptr;
1898
1899 return x;
1900 }
1901
1902 //------------------------------split_once-------------------------------------
1903 // Helper for split_flow_path
1904 static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *newn) {
1905 igvn->hash_delete(n); // Remove from hash before hacking edges
1906
1907 uint j = 1;
1908 for (uint i = phi->req()-1; i > 0; i--) {
1909 if (phi->in(i) == val) { // Found a path with val?
1910 // Add to NEW Region/Phi, no DU info
1911 newn->set_req( j++, n->in(i) );
1912 // Remove from OLD Region/Phi
1913 n->del_req(i);
1914 }
1915 }
1916
1917 // Register the new node but do not transform it. Cannot transform until the
1918 // entire Region/Phi conglomerate has been hacked as a single huge transform.
1919 igvn->register_new_node_with_optimizer( newn );
1920
1921 // Now I can point to the new node.
1922 n->add_req(newn);
1923 igvn->_worklist.push(n);
1924 }
1925
1926 //------------------------------split_flow_path--------------------------------
1927 // Check for merging identical values and split flow paths
1928 static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) {
1929 // This optimization tries to find two or more inputs of phi with the same constant value
1930 // It then splits them into a separate Phi, and according Region. If this is a loop-entry,
1931 // and the loop entry has multiple fall-in edges, and some of those fall-in edges have that
1932 // constant, and others not, we may split the fall-in edges into separate Phi's, and create
1933 // an irreducible loop. For reducible loops, this never seems to happen, as the multiple
1934 // fall-in edges are already merged before the loop head during parsing. But with irreducible
1935 // loops present the order or merging during parsing can sometimes prevent this.
1936 if (phase->C->has_irreducible_loop()) {
1937 // Avoid this optimization if any irreducible loops are present. Else we may create
1938 // an irreducible loop that we do not detect.
1939 return nullptr;
1940 }
1941 BasicType bt = phi->type()->basic_type();
1942 if( bt == T_ILLEGAL || type2size[bt] <= 0 )
1943 return nullptr; // Bail out on funny non-value stuff
1944 if( phi->req() <= 3 ) // Need at least 2 matched inputs and a
1945 return nullptr; // third unequal input to be worth doing
1946
1947 // Scan for a constant
1948 uint i;
1949 for( i = 1; i < phi->req()-1; i++ ) {
1950 Node *n = phi->in(i);
1951 if( !n ) return nullptr;
1952 if( phase->type(n) == Type::TOP ) return nullptr;
1953 if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN || n->Opcode() == Op_ConNKlass )
1954 break;
1955 }
1956 if( i >= phi->req() ) // Only split for constants
1957 return nullptr;
1958
1959 Node *val = phi->in(i); // Constant to split for
1960 uint hit = 0; // Number of times it occurs
1961 Node *r = phi->region();
1962
1963 for( ; i < phi->req(); i++ ){ // Count occurrences of constant
1964 Node *n = phi->in(i);
1965 if( !n ) return nullptr;
1966 if( phase->type(n) == Type::TOP ) return nullptr;
1967 if( phi->in(i) == val ) {
1968 hit++;
1969 if (Node::may_be_loop_entry(r->in(i))) {
1970 return nullptr; // don't split loop entry path
1971 }
1972 }
1973 }
1974
1975 if( hit <= 1 || // Make sure we find 2 or more
1976 hit == phi->req()-1 ) // and not ALL the same value
1977 return nullptr;
1978
1979 // Now start splitting out the flow paths that merge the same value.
1980 // Split first the RegionNode.
1981 PhaseIterGVN *igvn = phase->is_IterGVN();
1982 RegionNode *newr = new RegionNode(hit+1);
1983 split_once(igvn, phi, val, r, newr);
1984
1985 // Now split all other Phis than this one
1986 for (DUIterator_Fast kmax, k = r->fast_outs(kmax); k < kmax; k++) {
1987 Node* phi2 = r->fast_out(k);
1988 if( phi2->is_Phi() && phi2->as_Phi() != phi ) {
1989 PhiNode *newphi = PhiNode::make_blank(newr, phi2);
1990 split_once(igvn, phi, val, phi2, newphi);
1991 }
1992 }
1993
1994 // Clean up this guy
1995 igvn->hash_delete(phi);
1996 for( i = phi->req()-1; i > 0; i-- ) {
1997 if( phi->in(i) == val ) {
1998 phi->del_req(i);
1999 }
2000 }
2001 phi->add_req(val);
2002
2003 return phi;
2004 }
2005
2006 // Returns the BasicType of a given convert node and a type, with special handling to ensure that conversions to
2007 // and from half float will return the SHORT basic type, as that wouldn't be returned typically from TypeInt.
2008 static BasicType get_convert_type(Node* convert, const Type* type) {
2009 int convert_op = convert->Opcode();
2010 if (type->isa_int() && (convert_op == Op_ConvHF2F || convert_op == Op_ConvF2HF)) {
2011 return T_SHORT;
2012 }
2013
2014 return type->basic_type();
2015 }
2016
2017 //=============================================================================
2018 //------------------------------simple_data_loop_check-------------------------
2019 // Try to determining if the phi node in a simple safe/unsafe data loop.
2020 // Returns:
2021 // enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop };
2022 // Safe - safe case when the phi and it's inputs reference only safe data
2023 // nodes;
2024 // Unsafe - the phi and it's inputs reference unsafe data nodes but there
2025 // is no reference back to the phi - need a graph walk
2026 // to determine if it is in a loop;
2027 // UnsafeLoop - unsafe case when the phi references itself directly or through
2028 // unsafe data node.
2029 // Note: a safe data node is a node which could/never reference itself during
2030 // GVN transformations. For now it is Con, Proj, Phi, CastPP, CheckCastPP.
2031 // I mark Phi nodes as safe node not only because they can reference itself
2032 // but also to prevent mistaking the fallthrough case inside an outer loop
2033 // as dead loop when the phi references itself through an other phi.
2034 PhiNode::LoopSafety PhiNode::simple_data_loop_check(Node *in) const {
2035 // It is unsafe loop if the phi node references itself directly.
2036 if (in == (Node*)this)
2037 return UnsafeLoop; // Unsafe loop
2038 // Unsafe loop if the phi node references itself through an unsafe data node.
2039 // Exclude cases with null inputs or data nodes which could reference
2040 // itself (safe for dead loops).
2041 if (in != nullptr && !in->is_dead_loop_safe()) {
2042 // Check inputs of phi's inputs also.
2043 // It is much less expensive then full graph walk.
2044 uint cnt = in->req();
2045 uint i = (in->is_Proj() && !in->is_CFG()) ? 0 : 1;
2046 for (; i < cnt; ++i) {
2047 Node* m = in->in(i);
2048 if (m == (Node*)this)
2049 return UnsafeLoop; // Unsafe loop
2050 if (m != nullptr && !m->is_dead_loop_safe()) {
2051 // Check the most common case (about 30% of all cases):
2052 // phi->Load/Store->AddP->(ConP ConP Con)/(Parm Parm Con).
2053 Node *m1 = (m->is_AddP() && m->req() > 3) ? m->in(1) : nullptr;
2054 if (m1 == (Node*)this)
2055 return UnsafeLoop; // Unsafe loop
2056 if (m1 != nullptr && m1 == m->in(2) &&
2057 m1->is_dead_loop_safe() && m->in(3)->is_Con()) {
2058 continue; // Safe case
2059 }
2060 // The phi references an unsafe node - need full analysis.
2061 return Unsafe;
2062 }
2063 }
2064 }
2065 return Safe; // Safe case - we can optimize the phi node.
2066 }
2067
2068 //------------------------------is_unsafe_data_reference-----------------------
2069 // If phi can be reached through the data input - it is data loop.
2070 bool PhiNode::is_unsafe_data_reference(Node *in) const {
2071 assert(req() > 1, "");
2072 // First, check simple cases when phi references itself directly or
2073 // through an other node.
2074 LoopSafety safety = simple_data_loop_check(in);
2075 if (safety == UnsafeLoop)
2076 return true; // phi references itself - unsafe loop
2077 else if (safety == Safe)
2078 return false; // Safe case - phi could be replaced with the unique input.
2079
2080 // Unsafe case when we should go through data graph to determine
2081 // if the phi references itself.
2082
2083 ResourceMark rm;
2084
2085 Node_List nstack;
2086 VectorSet visited;
2087
2088 nstack.push(in); // Start with unique input.
2089 visited.set(in->_idx);
2090 while (nstack.size() != 0) {
2091 Node* n = nstack.pop();
2092 uint cnt = n->req();
2093 uint i = (n->is_Proj() && !n->is_CFG()) ? 0 : 1;
2094 for (; i < cnt; i++) {
2095 Node* m = n->in(i);
2096 if (m == (Node*)this) {
2097 return true; // Data loop
2098 }
2099 if (m != nullptr && !m->is_dead_loop_safe()) { // Only look for unsafe cases.
2100 if (!visited.test_set(m->_idx))
2101 nstack.push(m);
2102 }
2103 }
2104 }
2105 return false; // The phi is not reachable from its inputs
2106 }
2107
2108 // Is this Phi's region or some inputs to the region enqueued for IGVN
2109 // and so could cause the region to be optimized out?
2110 bool PhiNode::wait_for_region_igvn(PhaseGVN* phase) {
2111 PhaseIterGVN* igvn = phase->is_IterGVN();
2112 Unique_Node_List& worklist = igvn->_worklist;
2113 bool delay = false;
2114 Node* r = in(0);
2115 for (uint j = 1; j < req(); j++) {
2116 Node* rc = r->in(j);
2117 Node* n = in(j);
2118
2119 if (rc == nullptr || !rc->is_Proj()) { continue; }
2120 if (worklist.member(rc)) {
2121 delay = true;
2122 break;
2123 }
2124
2125 if (rc->in(0) == nullptr || !rc->in(0)->is_If()) { continue; }
2126 if (worklist.member(rc->in(0))) {
2127 delay = true;
2128 break;
2129 }
2130
2131 if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2132 if (worklist.member(rc->in(0)->in(1))) {
2133 delay = true;
2134 break;
2135 }
2136
2137 if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2138 if (worklist.member(rc->in(0)->in(1)->in(1))) {
2139 delay = true;
2140 break;
2141 }
2142 }
2143
2144 if (delay) {
2145 worklist.push(this);
2146 }
2147 return delay;
2148 }
2149
2150 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
2151 InlineTypeNode* PhiNode::push_inline_types_down(PhaseGVN* phase, bool can_reshape, ciInlineKlass* inline_klass) {
2152 assert(inline_klass != nullptr, "must be");
2153 InlineTypeNode* vt = InlineTypeNode::make_null(*phase, inline_klass, /* transform = */ false)->clone_with_phis(phase, in(0), nullptr, !_type->maybe_null(), true);
2154 if (can_reshape) {
2155 // Replace phi right away to be able to use the inline
2156 // type node when reaching the phi again through data loops.
2157 PhaseIterGVN* igvn = phase->is_IterGVN();
2158 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2159 Node* u = fast_out(i);
2160 igvn->rehash_node_delayed(u);
2161 imax -= u->replace_edge(this, vt);
2162 --i;
2163 }
2164 igvn->rehash_node_delayed(this);
2165 assert(outcnt() == 0, "should be dead now");
2166 }
2167 ResourceMark rm;
2168 Node_List casts;
2169 for (uint i = 1; i < req(); ++i) {
2170 Node* n = in(i);
2171 while (n->is_ConstraintCast()) {
2172 casts.push(n);
2173 n = n->in(1);
2174 }
2175 if (phase->type(n)->is_zero_type()) {
2176 n = InlineTypeNode::make_null(*phase, inline_klass);
2177 } else if (n->is_Phi()) {
2178 assert(can_reshape, "can only handle phis during IGVN");
2179 n = phase->transform(n->as_Phi()->push_inline_types_down(phase, can_reshape, inline_klass));
2180 }
2181 while (casts.size() != 0) {
2182 // Push the cast(s) through the InlineTypeNode
2183 // TODO 8302217 Can we avoid cloning? See InlineTypeNode::clone_if_required
2184 Node* cast = casts.pop()->clone();
2185 cast->set_req_X(1, n->as_InlineType()->get_oop(), phase);
2186 n = n->clone();
2187 n->as_InlineType()->set_oop(*phase, phase->transform(cast));
2188 n = phase->transform(n);
2189 if (n->is_top()) {
2190 break;
2191 }
2192 }
2193 bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
2194 assert(n->is_top() || n->is_InlineType(), "Only InlineType or top at this point.");
2195 if (n->is_InlineType()) {
2196 vt->merge_with(phase, n->as_InlineType(), i, transform);
2197 } // else nothing to do: phis above vt created by clone_with_phis are initialized to top already.
2198 }
2199 return vt;
2200 }
2201
2202 // If the Phi's Region is in an irreducible loop, and the Region
2203 // has had an input removed, but not yet transformed, it could be
2204 // that the Region (and this Phi) are not reachable from Root.
2205 // If we allow the Phi to collapse before the Region, this may lead
2206 // to dead-loop data. Wait for the Region to check for reachability,
2207 // and potentially remove the dead code.
2208 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2209 RegionNode* region = in(0)->as_Region();
2210 if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2211 Node* top = phase->C->top();
2212 for (uint j = 1; j < req(); j++) {
2213 Node* rc = region->in(j); // for each control input
2214 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2215 // Region is missing a control input
2216 Node* n = in(j);
2217 if (n != nullptr && n != top) {
2218 // Phi still has its input, so region just lost its input
2219 return true;
2220 }
2221 }
2222 }
2223 }
2224 return false;
2225 }
2226
2227 // Check if splitting a bot memory Phi through a parent MergeMem may lead to
2228 // non-termination. For more details, see comments at the call site in
2229 // PhiNode::Ideal.
2230 bool PhiNode::is_split_through_mergemem_terminating() const {
2231 ResourceMark rm;
2232 VectorSet visited;
2233 GrowableArray<const Node*> worklist;
2234 worklist.push(this);
2235 visited.set(this->_idx);
2236 auto maybe_add_to_worklist = [&](Node* input) {
2237 if (input != nullptr &&
2238 (input->is_MergeMem() || input->is_memory_phi()) &&
2239 !visited.test_set(input->_idx)) {
2240 worklist.push(input);
2241 assert(input->adr_type() == TypePtr::BOTTOM,
2242 "should only visit bottom memory");
2243 }
2244 };
2245 while (worklist.length() > 0) {
2246 const Node* n = worklist.pop();
2247 if (n->is_MergeMem()) {
2248 Node* input = n->as_MergeMem()->base_memory();
2249 if (input == this) {
2250 return false;
2251 }
2252 maybe_add_to_worklist(input);
2253 } else {
2254 assert(n->is_memory_phi(), "invariant");
2255 for (uint i = PhiNode::Input; i < n->req(); i++) {
2256 Node* input = n->in(i);
2257 if (input == this) {
2258 return false;
2259 }
2260 maybe_add_to_worklist(input);
2261 }
2262 }
2263 }
2264 return true;
2265 }
2266
2267 // Is one of the inputs a Cast that has not been processed by igvn yet?
2268 bool PhiNode::wait_for_cast_input_igvn(const PhaseIterGVN* igvn) const {
2269 for (uint i = 1, cnt = req(); i < cnt; ++i) {
2270 Node* n = in(i);
2271 while (n != nullptr && n->is_ConstraintCast()) {
2272 if (igvn->_worklist.member(n)) {
2273 return true;
2274 }
2275 n = n->in(1);
2276 }
2277 }
2278 return false;
2279 }
2280
2281 //------------------------------Ideal------------------------------------------
2282 // Return a node which is more "ideal" than the current node. Must preserve
2283 // the CFG, but we can still strip out dead paths.
2284 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2285 Node *r = in(0); // RegionNode
2286 assert(r != nullptr && r->is_Region(), "this phi must have a region");
2287 assert(r->in(0) == nullptr || !r->in(0)->is_Root(), "not a specially hidden merge");
2288
2289 // Note: During parsing, phis are often transformed before their regions.
2290 // This means we have to use type_or_null to defend against untyped regions.
2291 if( phase->type_or_null(r) == Type::TOP ) // Dead code?
2292 return nullptr; // No change
2293
2294 Node *top = phase->C->top();
2295 bool new_phi = (outcnt() == 0); // transforming new Phi
2296 // No change for igvn if new phi is not hooked
2297 if (new_phi && can_reshape)
2298 return nullptr;
2299
2300 if (must_wait_for_region_in_irreducible_loop(phase)) {
2301 return nullptr;
2302 }
2303
2304 // The are 2 situations when only one valid phi's input is left
2305 // (in addition to Region input).
2306 // One: region is not loop - replace phi with this input.
2307 // Two: region is loop - replace phi with top since this data path is dead
2308 // and we need to break the dead data loop.
2309 Node* progress = nullptr; // Record if any progress made
2310 for( uint j = 1; j < req(); ++j ){ // For all paths in
2311 // Check unreachable control paths
2312 Node* rc = r->in(j);
2313 Node* n = in(j); // Get the input
2314 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2315 if (n != top) { // Not already top?
2316 PhaseIterGVN *igvn = phase->is_IterGVN();
2317 if (can_reshape && igvn != nullptr) {
2318 igvn->_worklist.push(r);
2319 }
2320 // Nuke it down
2321 set_req_X(j, top, phase);
2322 progress = this; // Record progress
2323 }
2324 }
2325 }
2326
2327 if (can_reshape && outcnt() == 0) {
2328 // set_req() above may kill outputs if Phi is referenced
2329 // only by itself on the dead (top) control path.
2330 return top;
2331 }
2332
2333 bool uncasted = false;
2334 Node* uin = unique_input(phase, false);
2335 if (uin == nullptr && can_reshape &&
2336 // If there is a chance that the region can be optimized out do
2337 // not add a cast node that we can't remove yet.
2338 !wait_for_region_igvn(phase)) {
2339 // If one of the inputs is a cast that has yet to be processed by igvn, delay processing of this node to give the
2340 // inputs a chance to optimize and possibly end up with identical inputs (casts included).
2341 // Say we have:
2342 // (Phi region (Cast#1 c uin) (Cast#2 c uin))
2343 // and Cast#1 and Cast#2 have not had a chance to common yet
2344 // if the unique_input() transformation below proceeds, then PhiNode::Ideal returns:
2345 // (Cast#3 region uin) (1)
2346 // If PhiNode::Ideal is delayed until Cast#1 and Cast#2 common, then it returns:
2347 // (Cast#1 c uin) (2)
2348 //
2349 // In (1) the resulting cast is conservatively pinned at a later control and while Cast#3 and Cast#1/Cast#2 still
2350 // have a chance to common, that requires proving that c dominates region in ConstraintCastNode::dominating_cast()
2351 // which may not happen if control flow is too complicated and another pass of loop opts doesn't run. Delaying the
2352 // transformation here should allow a more optimal result.
2353 // Beyond the efficiency concern, there is a risk, if the casts are CastPPs, to end up with a chain of AddPs with
2354 // different base inputs (but a unique uncasted base input). This breaks an invariant in the shape of address
2355 // subtrees.
2356 PhaseIterGVN* igvn = phase->is_IterGVN();
2357 if (wait_for_cast_input_igvn(igvn)) {
2358 igvn->_worklist.push(this);
2359 return nullptr;
2360 }
2361 uncasted = true;
2362 uin = unique_input(phase, true);
2363 }
2364 if (uin == top) { // Simplest case: no alive inputs.
2365 if (can_reshape) // IGVN transformation
2366 return top;
2367 else
2368 return nullptr; // Identity will return TOP
2369 } else if (uin != nullptr) {
2370 // Only one not-null unique input path is left.
2371 // Determine if this input is backedge of a loop.
2372 // (Skip new phis which have no uses and dead regions).
2373 if (outcnt() > 0 && r->in(0) != nullptr) {
2374 if (is_data_loop(r->as_Region(), uin, phase)) {
2375 // Break this data loop to avoid creation of a dead loop.
2376 if (can_reshape) {
2377 return top;
2378 } else {
2379 // We can't return top if we are in Parse phase - cut inputs only
2380 // let Identity to handle the case.
2381 replace_edge(uin, top, phase);
2382 return nullptr;
2383 }
2384 }
2385 }
2386
2387 if (uncasted) {
2388 // Add cast nodes between the phi to be removed and its unique input.
2389 // Wait until after parsing for the type information to propagate from the casts.
2390 assert(can_reshape, "Invalid during parsing");
2391 const Type* phi_type = bottom_type();
2392 // Add casts to carry the control dependency of the Phi that is
2393 // going away
2394 Node* cast = nullptr;
2395 const TypeTuple* extra_types = collect_types(phase);
2396 if (phi_type->isa_ptr()) {
2397 const Type* uin_type = phase->type(uin);
2398 if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) {
2399 cast = new CastPPNode(r, uin, phi_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing, extra_types);
2400 } else {
2401 // Use a CastPP for a cast to not null and a CheckCastPP for
2402 // a cast to a new klass (and both if both null-ness and
2403 // klass change).
2404
2405 // If the type of phi is not null but the type of uin may be
2406 // null, uin's type must be casted to not null
2407 if (phi_type->join(TypePtr::NOTNULL) == phi_type->remove_speculative() &&
2408 uin_type->join(TypePtr::NOTNULL) != uin_type->remove_speculative()) {
2409 cast = new CastPPNode(r, uin, TypePtr::NOTNULL, ConstraintCastNode::DependencyType::NonFloatingNarrowing, extra_types);
2410 }
2411
2412 // If the type of phi and uin, both casted to not null,
2413 // differ the klass of uin must be (check)cast'ed to match
2414 // that of phi
2415 if (phi_type->join_speculative(TypePtr::NOTNULL) != uin_type->join_speculative(TypePtr::NOTNULL)) {
2416 Node* n = uin;
2417 if (cast != nullptr) {
2418 cast = phase->transform(cast);
2419 n = cast;
2420 }
2421 cast = new CheckCastPPNode(r, n, phi_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing, extra_types);
2422 }
2423 if (cast == nullptr) {
2424 cast = new CastPPNode(r, uin, phi_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing, extra_types);
2425 }
2426 }
2427 } else {
2428 cast = ConstraintCastNode::make_cast_for_type(r, uin, phi_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing, extra_types);
2429 }
2430 assert(cast != nullptr, "cast should be set");
2431 cast = phase->transform(cast);
2432 // set all inputs to the new cast(s) so the Phi is removed by Identity
2433 PhaseIterGVN* igvn = phase->is_IterGVN();
2434 for (uint i = 1; i < req(); i++) {
2435 set_req_X(i, cast, igvn);
2436 }
2437 uin = cast;
2438 }
2439
2440 // One unique input.
2441 DEBUG_ONLY(Node* ident = Identity(phase));
2442 // The unique input must eventually be detected by the Identity call.
2443 #ifdef ASSERT
2444 if (ident != uin && !ident->is_top() && !must_wait_for_region_in_irreducible_loop(phase)) {
2445 // print this output before failing assert
2446 r->dump(3);
2447 this->dump(3);
2448 ident->dump();
2449 uin->dump();
2450 }
2451 #endif
2452 // Identity may not return the expected uin, if it has to wait for the region, in irreducible case
2453 assert(ident == uin || ident->is_top() || must_wait_for_region_in_irreducible_loop(phase), "Identity must clean this up");
2454 return nullptr;
2455 }
2456
2457 Node* opt = nullptr;
2458 int true_path = is_diamond_phi();
2459 if (true_path != 0 &&
2460 // If one of the diamond's branch is in the process of dying then, the Phi's input for that branch might transform
2461 // to top. If that happens replacing the Phi with an operation that consumes the Phi's inputs will cause the Phi
2462 // to be replaced by top. To prevent that, delay the transformation until the branch has a chance to be removed.
2463 !(can_reshape && wait_for_region_igvn(phase))) {
2464 // Check for CMove'ing identity. If it would be unsafe,
2465 // handle it here. In the safe case, let Identity handle it.
2466 Node* unsafe_id = is_cmove_id(phase, true_path);
2467 if( unsafe_id != nullptr && is_unsafe_data_reference(unsafe_id) )
2468 opt = unsafe_id;
2469
2470 // Check for simple convert-to-boolean pattern
2471 if( opt == nullptr )
2472 opt = is_x2logic(phase, this, true_path);
2473
2474 // Check for absolute value
2475 if( opt == nullptr )
2476 opt = is_absolute(phase, this, true_path);
2477
2478 // Check for conditional add
2479 if( opt == nullptr && can_reshape )
2480 opt = is_cond_add(phase, this, true_path);
2481
2482 // These 4 optimizations could subsume the phi:
2483 // have to check for a dead data loop creation.
2484 if( opt != nullptr ) {
2485 if( opt == unsafe_id || is_unsafe_data_reference(opt) ) {
2486 // Found dead loop.
2487 if( can_reshape )
2488 return top;
2489 // We can't return top if we are in Parse phase - cut inputs only
2490 // to stop further optimizations for this phi. Identity will return TOP.
2491 assert(req() == 3, "only diamond merge phi here");
2492 set_req(1, top);
2493 set_req(2, top);
2494 return nullptr;
2495 } else {
2496 return opt;
2497 }
2498 }
2499 }
2500
2501 // Check for merging identical values and split flow paths
2502 if (can_reshape) {
2503 opt = split_flow_path(phase, this);
2504 // This optimization only modifies phi - don't need to check for dead loop.
2505 assert(opt == nullptr || opt == this, "do not elide phi");
2506 if (opt != nullptr) return opt;
2507 }
2508
2509 if (in(1) != nullptr && in(1)->Opcode() == Op_AddP && can_reshape) {
2510 // Try to undo Phi of AddP:
2511 // (Phi (AddP base address offset) (AddP base2 address2 offset2))
2512 // becomes:
2513 // newbase := (Phi base base2)
2514 // newaddress := (Phi address address2)
2515 // newoffset := (Phi offset offset2)
2516 // (AddP newbase newaddress newoffset)
2517 //
2518 // This occurs as a result of unsuccessful split_thru_phi and
2519 // interferes with taking advantage of addressing modes. See the
2520 // clone_shift_expressions code in matcher.cpp
2521 Node* addp = in(1);
2522 Node* base = addp->in(AddPNode::Base);
2523 Node* address = addp->in(AddPNode::Address);
2524 Node* offset = addp->in(AddPNode::Offset);
2525 if (base != nullptr && address != nullptr && offset != nullptr &&
2526 !base->is_top() && !address->is_top() && !offset->is_top()) {
2527 const Type* base_type = base->bottom_type();
2528 const Type* address_type = address->bottom_type();
2529 // make sure that all the inputs are similar to the first one,
2530 // i.e. AddP with base == address and same offset as first AddP
2531 bool doit = true;
2532 for (uint i = 2; i < req(); i++) {
2533 if (in(i) == nullptr ||
2534 in(i)->Opcode() != Op_AddP ||
2535 in(i)->in(AddPNode::Base) == nullptr ||
2536 in(i)->in(AddPNode::Address) == nullptr ||
2537 in(i)->in(AddPNode::Offset) == nullptr ||
2538 in(i)->in(AddPNode::Base)->is_top() ||
2539 in(i)->in(AddPNode::Address)->is_top() ||
2540 in(i)->in(AddPNode::Offset)->is_top()) {
2541 doit = false;
2542 break;
2543 }
2544 if (in(i)->in(AddPNode::Base) != base) {
2545 base = nullptr;
2546 }
2547 if (in(i)->in(AddPNode::Offset) != offset) {
2548 offset = nullptr;
2549 }
2550 if (in(i)->in(AddPNode::Address) != address) {
2551 address = nullptr;
2552 }
2553 // Accumulate type for resulting Phi
2554 base_type = base_type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type());
2555 address_type = address_type->meet_speculative(in(i)->in(AddPNode::Address)->bottom_type());
2556 }
2557 if (doit && base == nullptr) {
2558 // Check for neighboring AddP nodes in a tree.
2559 // If they have a base, use that it.
2560 for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) {
2561 Node* u = this->fast_out(k);
2562 if (u->is_AddP()) {
2563 Node* base2 = u->in(AddPNode::Base);
2564 if (base2 != nullptr && !base2->is_top()) {
2565 if (base == nullptr)
2566 base = base2;
2567 else if (base != base2)
2568 { doit = false; break; }
2569 }
2570 }
2571 }
2572 }
2573 if (doit) {
2574 if (base == nullptr) {
2575 base = new PhiNode(in(0), base_type, nullptr);
2576 for (uint i = 1; i < req(); i++) {
2577 base->init_req(i, in(i)->in(AddPNode::Base));
2578 }
2579 phase->is_IterGVN()->register_new_node_with_optimizer(base);
2580 }
2581 if (address == nullptr) {
2582 address = new PhiNode(in(0), address_type, nullptr);
2583 for (uint i = 1; i < req(); i++) {
2584 address->init_req(i, in(i)->in(AddPNode::Address));
2585 }
2586 phase->is_IterGVN()->register_new_node_with_optimizer(address);
2587 }
2588 if (offset == nullptr) {
2589 offset = new PhiNode(in(0), TypeX_X, nullptr);
2590 for (uint i = 1; i < req(); i++) {
2591 offset->init_req(i, in(i)->in(AddPNode::Offset));
2592 }
2593 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2594 }
2595 return new AddPNode(base, address, offset);
2596 }
2597 }
2598 }
2599
2600 // Split phis through memory merges, so that the memory merges will go away.
2601 // Piggy-back this transformation on the search for a unique input....
2602 // It will be as if the merged memory is the unique value of the phi.
2603 // (Do not attempt this optimization unless parsing is complete.
2604 // It would make the parser's memory-merge logic sick.)
2605 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2606 if (progress == nullptr && can_reshape && type() == Type::MEMORY) {
2607
2608 // See if this Phi should be sliced. Determine the merge width of input
2609 // MergeMems and check if there is a direct loop to self, as illustrated
2610 // below.
2611 //
2612 // +-------------+
2613 // | |
2614 // (base_memory) v |
2615 // MergeMem |
2616 // | |
2617 // v |
2618 // Phi (this) |
2619 // | |
2620 // +-----------+
2621 //
2622 // Generally, there are issues with non-termination with such circularity
2623 // (see comment further below). However, if there is a direct loop to self,
2624 // splitting the Phi through the MergeMem will result in the below.
2625 //
2626 // +---+
2627 // | |
2628 // v |
2629 // Phi |
2630 // |\ |
2631 // | +-+
2632 // (base_memory) v
2633 // MergeMem
2634 //
2635 // This split breaks the circularity and consequently does not lead to
2636 // non-termination.
2637 uint merge_width = 0;
2638 // TODO revisit this with JDK-8247216
2639 bool mergemem_only = true;
2640 bool split_always_terminates = false; // Is splitting guaranteed to terminate?
2641 for( uint i=1; i<req(); ++i ) {// For all paths in
2642 Node *ii = in(i);
2643 // TOP inputs should not be counted as safe inputs because if the
2644 // Phi references itself through all other inputs then splitting the
2645 // Phi through memory merges would create dead loop at later stage.
2646 if (ii == top) {
2647 return nullptr; // Delay optimization until graph is cleaned.
2648 }
2649 if (ii->is_MergeMem()) {
2650 MergeMemNode* n = ii->as_MergeMem();
2651 merge_width = MAX2(merge_width, n->req());
2652 if (n->base_memory() == this) {
2653 split_always_terminates = true;
2654 }
2655 } else {
2656 mergemem_only = false;
2657 }
2658 }
2659
2660 // There are cases with circular dependencies between bottom Phis
2661 // and MergeMems. Below is a minimal example.
2662 //
2663 // +------------+
2664 // | |
2665 // (base_memory) v |
2666 // MergeMem |
2667 // | |
2668 // v |
2669 // Phi (this) |
2670 // | |
2671 // v |
2672 // Phi |
2673 // | |
2674 // +----------+
2675 //
2676 // Here, we cannot break the circularity through a self-loop as there
2677 // are two Phis involved. Repeatedly splitting the Phis through the
2678 // MergeMem leads to non-termination. We check for non-termination below.
2679 // Only check for non-termination if necessary.
2680 if (!mergemem_only && !split_always_terminates && adr_type() == TypePtr::BOTTOM &&
2681 merge_width > Compile::AliasIdxRaw) {
2682 split_always_terminates = is_split_through_mergemem_terminating();
2683 }
2684
2685 if (merge_width > Compile::AliasIdxRaw) {
2686 // found at least one non-empty MergeMem
2687 const TypePtr* at = adr_type();
2688 if (at != TypePtr::BOTTOM) {
2689 // Patch the existing phi to select an input from the merge:
2690 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2691 // Phi:AT1(...m1...)
2692 int alias_idx = phase->C->get_alias_index(at);
2693 for (uint i=1; i<req(); ++i) {
2694 Node *ii = in(i);
2695 if (ii->is_MergeMem()) {
2696 MergeMemNode* n = ii->as_MergeMem();
2697 // compress paths and change unreachable cycles to TOP
2698 // If not, we can update the input infinitely along a MergeMem cycle
2699 // Equivalent code is in MemNode::Ideal_common
2700 Node *m = phase->transform(n);
2701 if (outcnt() == 0) { // Above transform() may kill us!
2702 return top;
2703 }
2704 // If transformed to a MergeMem, get the desired slice
2705 // Otherwise the returned node represents memory for every slice
2706 Node *new_mem = (m->is_MergeMem()) ?
2707 m->as_MergeMem()->memory_at(alias_idx) : m;
2708 // Update input if it is progress over what we have now
2709 if (new_mem != ii) {
2710 set_req_X(i, new_mem, phase->is_IterGVN());
2711 progress = this;
2712 }
2713 }
2714 }
2715 } else if (mergemem_only || split_always_terminates) {
2716 // If all inputs reference this phi (directly or through data nodes) -
2717 // it is a dead loop.
2718 bool saw_safe_input = false;
2719 for (uint j = 1; j < req(); ++j) {
2720 Node* n = in(j);
2721 if (n->is_MergeMem()) {
2722 MergeMemNode* mm = n->as_MergeMem();
2723 if (mm->base_memory() == this || mm->base_memory() == mm->empty_memory()) {
2724 // Skip this input if it references back to this phi or if the memory path is dead
2725 continue;
2726 }
2727 }
2728 if (!is_unsafe_data_reference(n)) {
2729 saw_safe_input = true; // found safe input
2730 break;
2731 }
2732 }
2733 if (!saw_safe_input) {
2734 // There is a dead loop: All inputs are either dead or reference back to this phi
2735 return top;
2736 }
2737
2738 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2739 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2740 PhaseIterGVN* igvn = phase->is_IterGVN();
2741 assert(igvn != nullptr, "sanity check");
2742 PhiNode* new_base = (PhiNode*) clone();
2743 // Must eagerly register phis, since they participate in loops.
2744 igvn->register_new_node_with_optimizer(new_base);
2745
2746 MergeMemNode* result = MergeMemNode::make(new_base);
2747 for (uint i = 1; i < req(); ++i) {
2748 Node *ii = in(i);
2749 if (ii->is_MergeMem()) {
2750 MergeMemNode* n = ii->as_MergeMem();
2751 if (igvn) {
2752 // TODO revisit this with JDK-8247216
2753 // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2754 igvn->_worklist.push(n);
2755 }
2756 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2757 // If we have not seen this slice yet, make a phi for it.
2758 bool made_new_phi = false;
2759 if (mms.is_empty()) {
2760 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2761 made_new_phi = true;
2762 igvn->register_new_node_with_optimizer(new_phi);
2763 mms.set_memory(new_phi);
2764 }
2765 Node* phi = mms.memory();
2766 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2767 phi->set_req(i, mms.memory2());
2768 }
2769 }
2770 }
2771 // Distribute all self-loops.
2772 { // (Extra braces to hide mms.)
2773 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2774 Node* phi = mms.memory();
2775 for (uint i = 1; i < req(); ++i) {
2776 if (phi->in(i) == this) phi->set_req(i, phi);
2777 }
2778 }
2779 }
2780
2781 // We could immediately transform the new Phi nodes here, but that can
2782 // result in creating an excessive number of new nodes within a single
2783 // IGVN iteration. We have put the Phi nodes on the IGVN worklist, so
2784 // they are transformed later on in any case.
2785
2786 // Replace self with the result.
2787 return result;
2788 }
2789 }
2790 //
2791 // Other optimizations on the memory chain
2792 //
2793 const TypePtr* at = adr_type();
2794 for( uint i=1; i<req(); ++i ) {// For all paths in
2795 Node *ii = in(i);
2796 Node *new_in = MemNode::optimize_memory_chain(ii, at, nullptr, phase);
2797 if (ii != new_in ) {
2798 set_req_X(i, new_in, phase->is_IterGVN());
2799 progress = this;
2800 }
2801 }
2802 }
2803
2804 #ifdef _LP64
2805 // Push DecodeN/DecodeNKlass down through phi.
2806 // The rest of phi graph will transform by split EncodeP node though phis up.
2807 if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == nullptr) {
2808 bool may_push = true;
2809 bool has_decodeN = false;
2810 bool is_decodeN = false;
2811 for (uint i=1; i<req(); ++i) {// For all paths in
2812 Node *ii = in(i);
2813 if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) {
2814 // Do optimization if a non dead path exist.
2815 if (ii->in(1)->bottom_type() != Type::TOP) {
2816 has_decodeN = true;
2817 is_decodeN = ii->is_DecodeN();
2818 }
2819 } else if (!ii->is_Phi()) {
2820 may_push = false;
2821 }
2822 }
2823
2824 if (has_decodeN && may_push) {
2825 PhaseIterGVN *igvn = phase->is_IterGVN();
2826 // Make narrow type for new phi.
2827 const Type* narrow_t;
2828 if (is_decodeN) {
2829 narrow_t = TypeNarrowOop::make(this->bottom_type()->is_ptr());
2830 } else {
2831 narrow_t = TypeNarrowKlass::make(this->bottom_type()->is_ptr());
2832 }
2833 PhiNode* new_phi = new PhiNode(r, narrow_t);
2834 uint orig_cnt = req();
2835 for (uint i=1; i<req(); ++i) {// For all paths in
2836 Node *ii = in(i);
2837 Node* new_ii = nullptr;
2838 if (ii->is_DecodeNarrowPtr()) {
2839 assert(ii->bottom_type() == bottom_type(), "sanity");
2840 new_ii = ii->in(1);
2841 } else {
2842 assert(ii->is_Phi(), "sanity");
2843 if (ii->as_Phi() == this) {
2844 new_ii = new_phi;
2845 } else {
2846 if (is_decodeN) {
2847 new_ii = new EncodePNode(ii, narrow_t);
2848 } else {
2849 new_ii = new EncodePKlassNode(ii, narrow_t);
2850 }
2851 igvn->register_new_node_with_optimizer(new_ii);
2852 }
2853 }
2854 new_phi->set_req(i, new_ii);
2855 }
2856 igvn->register_new_node_with_optimizer(new_phi, this);
2857 if (is_decodeN) {
2858 progress = new DecodeNNode(new_phi, bottom_type());
2859 } else {
2860 progress = new DecodeNKlassNode(new_phi, bottom_type());
2861 }
2862 }
2863 }
2864 #endif
2865
2866 Node* inline_type = try_push_inline_types_down(phase, can_reshape);
2867 if (inline_type != this) {
2868 return inline_type;
2869 }
2870
2871 // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2872 // proceeding the phi, to de-duplicate the convert node and compact the IR.
2873 if (can_reshape && progress == nullptr) {
2874 ConvertNode* convert = in(1)->isa_Convert();
2875 if (convert != nullptr) {
2876 int conv_op = convert->Opcode();
2877 bool ok = true;
2878
2879 // Check the rest of the inputs
2880 for (uint i = 2; i < req(); i++) {
2881 // Make sure that all inputs are of the same type of convert node
2882 if (in(i)->Opcode() != conv_op) {
2883 ok = false;
2884 break;
2885 }
2886 }
2887
2888 if (ok) {
2889 // Find the local bottom type to set as the type of the phi
2890 const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());
2891 const Type* dest_type = convert->bottom_type();
2892
2893 PhiNode* newphi = new PhiNode(in(0), source_type, nullptr);
2894 // Set inputs to the new phi be the inputs of the convert
2895 for (uint i = 1; i < req(); i++) {
2896 newphi->init_req(i, in(i)->in(1));
2897 }
2898
2899 phase->is_IterGVN()->register_new_node_with_optimizer(newphi, this);
2900
2901 return ConvertNode::create_convert(get_convert_type(convert, source_type), get_convert_type(convert, dest_type), newphi);
2902 }
2903 }
2904 }
2905
2906 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2907 if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2908 progress = merge_through_phi(this, phase->is_IterGVN());
2909 }
2910
2911 return progress; // Return any progress
2912 }
2913
2914 // Check recursively if inputs are either an inline type, constant null
2915 // or another Phi (including self references through data loops). If so,
2916 // push the inline types down through the phis to enable folding of loads.
2917 Node* PhiNode::try_push_inline_types_down(PhaseGVN* phase, const bool can_reshape) {
2918 if (!can_be_inline_type()) {
2919 return this;
2920 }
2921
2922 ciInlineKlass* inline_klass;
2923 if (can_push_inline_types_down(phase, can_reshape, inline_klass)) {
2924 assert(inline_klass != nullptr, "must be");
2925 return push_inline_types_down(phase, can_reshape, inline_klass);
2926 }
2927 return this;
2928 }
2929
2930 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase, const bool can_reshape, ciInlineKlass*& inline_klass) {
2931 if (req() <= 2) {
2932 // Dead phi.
2933 return false;
2934 }
2935 inline_klass = nullptr;
2936
2937 // TODO 8302217 We need to prevent endless pushing through
2938 bool only_phi = (outcnt() != 0);
2939 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2940 Node* n = fast_out(i);
2941 if (n->is_InlineType() && n->in(1) == this) {
2942 return false;
2943 }
2944 if (!n->is_Phi()) {
2945 only_phi = false;
2946 }
2947 }
2948 if (only_phi) {
2949 return false;
2950 }
2951
2952 ResourceMark rm;
2953 Unique_Node_List worklist;
2954 worklist.push(this);
2955 Node_List casts;
2956
2957 for (uint next = 0; next < worklist.size(); next++) {
2958 Node* phi = worklist.at(next);
2959 for (uint i = 1; i < phi->req(); i++) {
2960 Node* n = phi->in(i);
2961 if (n == nullptr) {
2962 return false;
2963 }
2964 while (n->is_ConstraintCast()) {
2965 if (n->in(0) != nullptr && n->in(0)->is_top()) {
2966 // Will die, don't optimize
2967 return false;
2968 }
2969 casts.push(n);
2970 n = n->in(1);
2971 }
2972 const Type* type = phase->type(n);
2973 if (n->is_InlineType() && (inline_klass == nullptr || inline_klass == type->inline_klass())) {
2974 inline_klass = type->inline_klass();
2975 } else if (n->is_Phi() && can_reshape && n->bottom_type()->isa_ptr()) {
2976 worklist.push(n);
2977 } else if (!type->is_zero_type()) {
2978 return false;
2979 }
2980 }
2981 }
2982 if (inline_klass == nullptr) {
2983 return false;
2984 }
2985
2986 // Check if cast nodes can be pushed through
2987 const Type* t = Type::get_const_type(inline_klass);
2988 while (casts.size() != 0 && t != nullptr) {
2989 Node* cast = casts.pop();
2990 if (t->filter(cast->bottom_type()) == Type::TOP) {
2991 return false;
2992 }
2993 }
2994
2995 return true;
2996 }
2997
2998 #ifdef ASSERT
2999 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase) {
3000 if (!can_be_inline_type()) {
3001 return false;
3002 }
3003
3004 ciInlineKlass* inline_klass;
3005 return can_push_inline_types_down(phase, true, inline_klass);
3006 }
3007 #endif // ASSERT
3008
3009 static int compare_types(const Type* const& e1, const Type* const& e2) {
3010 return (intptr_t)e1 - (intptr_t)e2;
3011 }
3012
3013 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
3014 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
3015 // pointer comparison is enough to tell if 2 list of types are the same or not)
3016 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
3017 const Node* region = in(0);
3018 const Type* phi_type = bottom_type();
3019 ResourceMark rm;
3020 GrowableArray<const Type*> types;
3021 for (uint i = 1; i < req(); i++) {
3022 if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
3023 continue;
3024 }
3025 Node* in = Node::in(i);
3026 const Type* t = phase->type(in);
3027 if (in == nullptr || in == this || t == Type::TOP) {
3028 continue;
3029 }
3030 if (t != phi_type && t->higher_equal_speculative(phi_type)) {
3031 types.insert_sorted<compare_types>(t);
3032 }
3033 while (in != nullptr && in->is_ConstraintCast()) {
3034 Node* next = in->in(1);
3035 if (phase->type(next)->isa_rawptr() && phase->type(in)->isa_oopptr()) {
3036 break;
3037 }
3038 ConstraintCastNode* cast = in->as_ConstraintCast();
3039 for (int j = 0; j < cast->extra_types_count(); ++j) {
3040 const Type* extra_t = cast->extra_type_at(j);
3041 if (extra_t != phi_type && extra_t->higher_equal_speculative(phi_type)) {
3042 types.insert_sorted<compare_types>(extra_t);
3043 }
3044 }
3045 in = next;
3046 }
3047 }
3048 const Type **flds = (const Type **)(phase->C->type_arena()->AmallocWords(types.length()*sizeof(Type*)));
3049 for (int i = 0; i < types.length(); ++i) {
3050 flds[i] = types.at(i);
3051 }
3052 return TypeTuple::make(types.length(), flds);
3053 }
3054
3055 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
3056 Node_Stack stack(1);
3057 VectorSet visited;
3058 Node_List node_map;
3059
3060 stack.push(root_phi, 1); // ignore control
3061 visited.set(root_phi->_idx);
3062
3063 Node* new_phi = new PhiNode(root_phi->in(0), t);
3064 node_map.map(root_phi->_idx, new_phi);
3065
3066 while (stack.is_nonempty()) {
3067 Node* n = stack.node();
3068 uint idx = stack.index();
3069 assert(n->is_Phi(), "not a phi");
3070 if (idx < n->req()) {
3071 stack.set_index(idx + 1);
3072 Node* def = n->in(idx);
3073 if (def == nullptr) {
3074 continue; // ignore dead path
3075 } else if (def->is_Phi()) { // inner node
3076 Node* new_phi = node_map[n->_idx];
3077 if (!visited.test_set(def->_idx)) { // not visited yet
3078 node_map.map(def->_idx, new PhiNode(def->in(0), t));
3079 stack.push(def, 1); // ignore control
3080 }
3081 Node* new_in = node_map[def->_idx];
3082 new_phi->set_req(idx, new_in);
3083 } else if (def->Opcode() == Op_VectorBox) { // leaf
3084 assert(n->is_Phi(), "not a phi");
3085 Node* new_phi = node_map[n->_idx];
3086 new_phi->set_req(idx, def->in(c));
3087 } else {
3088 assert(false, "not optimizeable");
3089 return nullptr;
3090 }
3091 } else {
3092 Node* new_phi = node_map[n->_idx];
3093 igvn->register_new_node_with_optimizer(new_phi, n);
3094 stack.pop();
3095 }
3096 }
3097 return new_phi;
3098 }
3099
3100 Node* PhiNode::merge_through_phi(Node* root_phi, PhaseIterGVN* igvn) {
3101 Node_Stack stack(1);
3102 VectorSet visited;
3103
3104 stack.push(root_phi, 1); // ignore control
3105 visited.set(root_phi->_idx);
3106
3107 VectorBoxNode* cached_vbox = nullptr;
3108 while (stack.is_nonempty()) {
3109 Node* n = stack.node();
3110 uint idx = stack.index();
3111 if (idx < n->req()) {
3112 stack.set_index(idx + 1);
3113 Node* in = n->in(idx);
3114 if (in == nullptr) {
3115 continue; // ignore dead path
3116 } else if (in->isa_Phi()) {
3117 if (!visited.test_set(in->_idx)) {
3118 stack.push(in, 1); // ignore control
3119 }
3120 } else if (in->Opcode() == Op_VectorBox) {
3121 VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in);
3122 if (cached_vbox == nullptr) {
3123 cached_vbox = vbox;
3124 } else if (vbox->vec_type() != cached_vbox->vec_type()) {
3125 // TODO: vector type mismatch can be handled with additional reinterpret casts
3126 assert(!Type::equals(vbox->vec_type(), cached_vbox->vec_type()), "inconsistent");
3127 return nullptr; // not optimizable: vector type mismatch
3128 } else if (vbox->box_type() != cached_vbox->box_type()) {
3129 assert(!Type::equals(vbox->box_type(), cached_vbox->box_type()), "inconsistent");
3130 return nullptr; // not optimizable: box type mismatch
3131 }
3132 } else {
3133 return nullptr; // not optimizable: neither Phi nor VectorBox
3134 }
3135 } else {
3136 stack.pop();
3137 }
3138 }
3139 if (cached_vbox == nullptr) {
3140 // We have a Phi dead-loop (no data-input). Phi nodes are considered safe,
3141 // so just avoid this optimization.
3142 return nullptr;
3143 }
3144 const TypeInstPtr* btype = cached_vbox->box_type();
3145 const TypeVect* vtype = cached_vbox->vec_type();
3146 Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box, igvn);
3147 Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn);
3148 return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype);
3149 }
3150
3151 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
3152 // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
3153 // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
3154 // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
3155 assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
3156 const bool is_loop = (r->is_Loop() && r->req() == 3);
3157 const Node* top = phase->C->top();
3158 if (is_loop) {
3159 return !uin->eqv_uncast(in(LoopNode::EntryControl));
3160 } else {
3161 // We have a data loop either with an unsafe data reference or if a region is unreachable.
3162 return is_unsafe_data_reference(uin)
3163 || (r->req() == 3 && (r->in(1) != top && r->in(2) == top && r->is_unreachable_region(phase)));
3164 }
3165 }
3166
3167 //------------------------------is_tripcount-----------------------------------
3168 bool PhiNode::is_tripcount(BasicType bt) const {
3169 return (in(0) != nullptr && in(0)->is_BaseCountedLoop() &&
3170 in(0)->as_BaseCountedLoop()->bt() == bt &&
3171 in(0)->as_BaseCountedLoop()->phi() == this);
3172 }
3173
3174 //------------------------------out_RegMask------------------------------------
3175 const RegMask &PhiNode::in_RegMask(uint i) const {
3176 return i ? out_RegMask() : RegMask::EMPTY;
3177 }
3178
3179 const RegMask &PhiNode::out_RegMask() const {
3180 uint ideal_reg = _type->ideal_reg();
3181 assert( ideal_reg != Node::NotAMachineReg, "invalid type at Phi" );
3182 if (ideal_reg == 0) {
3183 return RegMask::EMPTY;
3184 }
3185 assert(ideal_reg != Op_RegFlags, "flags register is not spillable");
3186 return *(Compile::current()->matcher()->idealreg2spillmask[ideal_reg]);
3187 }
3188
3189 #ifndef PRODUCT
3190 void PhiNode::dump_spec(outputStream *st) const {
3191 TypeNode::dump_spec(st);
3192 if (is_tripcount(T_INT) || is_tripcount(T_LONG)) {
3193 st->print(" #tripcount");
3194 }
3195 }
3196 #endif
3197
3198
3199 //=============================================================================
3200 const Type* GotoNode::Value(PhaseGVN* phase) const {
3201 // If the input is reachable, then we are executed.
3202 // If the input is not reachable, then we are not executed.
3203 return phase->type(in(0));
3204 }
3205
3206 Node* GotoNode::Identity(PhaseGVN* phase) {
3207 return in(0); // Simple copy of incoming control
3208 }
3209
3210 const RegMask &GotoNode::out_RegMask() const {
3211 return RegMask::EMPTY;
3212 }
3213
3214 //=============================================================================
3215 const RegMask &JumpNode::out_RegMask() const {
3216 return RegMask::EMPTY;
3217 }
3218
3219 //=============================================================================
3220 const RegMask &JProjNode::out_RegMask() const {
3221 return RegMask::EMPTY;
3222 }
3223
3224 //=============================================================================
3225 const RegMask &CProjNode::out_RegMask() const {
3226 return RegMask::EMPTY;
3227 }
3228
3229
3230
3231 //=============================================================================
3232
3233 uint PCTableNode::hash() const { return Node::hash() + _size; }
3234 bool PCTableNode::cmp( const Node &n ) const
3235 { return _size == ((PCTableNode&)n)._size; }
3236
3237 const Type *PCTableNode::bottom_type() const {
3238 const Type** f = TypeTuple::fields(_size);
3239 for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL;
3240 return TypeTuple::make(_size, f);
3241 }
3242
3243 //------------------------------Value------------------------------------------
3244 // Compute the type of the PCTableNode. If reachable it is a tuple of
3245 // Control, otherwise the table targets are not reachable
3246 const Type* PCTableNode::Value(PhaseGVN* phase) const {
3247 if( phase->type(in(0)) == Type::CONTROL )
3248 return bottom_type();
3249 return Type::TOP; // All paths dead? Then so are we
3250 }
3251
3252 //------------------------------Ideal------------------------------------------
3253 // Return a node which is more "ideal" than the current node. Strip out
3254 // control copies
3255 Node *PCTableNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3256 return remove_dead_region(phase, can_reshape) ? this : nullptr;
3257 }
3258
3259 //=============================================================================
3260 uint JumpProjNode::hash() const {
3261 return Node::hash() + _dest_bci;
3262 }
3263
3264 bool JumpProjNode::cmp( const Node &n ) const {
3265 return ProjNode::cmp(n) &&
3266 _dest_bci == ((JumpProjNode&)n)._dest_bci;
3267 }
3268
3269 #ifndef PRODUCT
3270 void JumpProjNode::dump_spec(outputStream *st) const {
3271 ProjNode::dump_spec(st);
3272 st->print("@bci %d ",_dest_bci);
3273 }
3274
3275 void JumpProjNode::dump_compact_spec(outputStream *st) const {
3276 ProjNode::dump_compact_spec(st);
3277 st->print("(%d)%d@%d", _switch_val, _proj_no, _dest_bci);
3278 }
3279 #endif
3280
3281 //=============================================================================
3282 //------------------------------Value------------------------------------------
3283 // Check for being unreachable, or for coming from a Rethrow. Rethrow's cannot
3284 // have the default "fall_through_index" path.
3285 const Type* CatchNode::Value(PhaseGVN* phase) const {
3286 // Unreachable? Then so are all paths from here.
3287 if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
3288 // First assume all paths are reachable
3289 const Type** f = TypeTuple::fields(_size);
3290 for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL;
3291 // Identify cases that will always throw an exception
3292 // () rethrow call
3293 // () virtual or interface call with null receiver
3294 // () call is a check cast with incompatible arguments
3295 if( in(1)->is_Proj() ) {
3296 Node *i10 = in(1)->in(0);
3297 if( i10->is_Call() ) {
3298 CallNode *call = i10->as_Call();
3299 // Rethrows always throw exceptions, never return
3300 if (call->entry_point() == OptoRuntime::rethrow_stub()) {
3301 f[CatchProjNode::fall_through_index] = Type::TOP;
3302 } else if (call->is_AllocateArray()) {
3303 Node* klass_node = call->in(AllocateNode::KlassNode);
3304 Node* length = call->in(AllocateNode::ALength);
3305 const Type* length_type = phase->type(length);
3306 const Type* klass_type = phase->type(klass_node);
3307 Node* valid_length_test = call->in(AllocateNode::ValidLengthTest);
3308 const Type* valid_length_test_t = phase->type(valid_length_test);
3309 if (length_type == Type::TOP || klass_type == Type::TOP || valid_length_test_t == Type::TOP ||
3310 valid_length_test_t->is_int()->is_con(0)) {
3311 f[CatchProjNode::fall_through_index] = Type::TOP;
3312 }
3313 } else if( call->req() > TypeFunc::Parms ) {
3314 const Type *arg0 = phase->type( call->in(TypeFunc::Parms) );
3315 // Check for null receiver to virtual or interface calls
3316 if( call->is_CallDynamicJava() &&
3317 arg0->higher_equal(TypePtr::NULL_PTR) ) {
3318 f[CatchProjNode::fall_through_index] = Type::TOP;
3319 }
3320 } // End of if not a runtime stub
3321 } // End of if have call above me
3322 } // End of slot 1 is not a projection
3323 return TypeTuple::make(_size, f);
3324 }
3325
3326 //=============================================================================
3327 uint CatchProjNode::hash() const {
3328 return Node::hash() + _handler_bci;
3329 }
3330
3331
3332 bool CatchProjNode::cmp( const Node &n ) const {
3333 return ProjNode::cmp(n) &&
3334 _handler_bci == ((CatchProjNode&)n)._handler_bci;
3335 }
3336
3337
3338 //------------------------------Identity---------------------------------------
3339 // If only 1 target is possible, choose it if it is the main control
3340 Node* CatchProjNode::Identity(PhaseGVN* phase) {
3341 // If my value is control and no other value is, then treat as ID
3342 const TypeTuple *t = phase->type(in(0))->is_tuple();
3343 if (t->field_at(_con) != Type::CONTROL) return this;
3344 // If we remove the last CatchProj and elide the Catch/CatchProj, then we
3345 // also remove any exception table entry. Thus we must know the call
3346 // feeding the Catch will not really throw an exception. This is ok for
3347 // the main fall-thru control (happens when we know a call can never throw
3348 // an exception) or for "rethrow", because a further optimization will
3349 // yank the rethrow (happens when we inline a function that can throw an
3350 // exception and the caller has no handler). Not legal, e.g., for passing
3351 // a null receiver to a v-call, or passing bad types to a slow-check-cast.
3352 // These cases MUST throw an exception via the runtime system, so the VM
3353 // will be looking for a table entry.
3354 Node *proj = in(0)->in(1); // Expect a proj feeding CatchNode
3355 CallNode *call;
3356 if (_con != TypeFunc::Control && // Bail out if not the main control.
3357 !(proj->is_Proj() && // AND NOT a rethrow
3358 proj->in(0)->is_Call() &&
3359 (call = proj->in(0)->as_Call()) &&
3360 call->entry_point() == OptoRuntime::rethrow_stub()))
3361 return this;
3362
3363 // Search for any other path being control
3364 for (uint i = 0; i < t->cnt(); i++) {
3365 if (i != _con && t->field_at(i) == Type::CONTROL)
3366 return this;
3367 }
3368 // Only my path is possible; I am identity on control to the jump
3369 return in(0)->in(0);
3370 }
3371
3372
3373 #ifndef PRODUCT
3374 void CatchProjNode::dump_spec(outputStream *st) const {
3375 ProjNode::dump_spec(st);
3376 st->print("@bci %d ",_handler_bci);
3377 }
3378 #endif
3379
3380 //=============================================================================
3381 //------------------------------Identity---------------------------------------
3382 // Check for CreateEx being Identity.
3383 Node* CreateExNode::Identity(PhaseGVN* phase) {
3384 if( phase->type(in(1)) == Type::TOP ) return in(1);
3385 if( phase->type(in(0)) == Type::TOP ) return in(0);
3386 if (phase->type(in(0)->in(0)) == Type::TOP) {
3387 assert(in(0)->is_CatchProj(), "control is CatchProj");
3388 return phase->C->top(); // dead code
3389 }
3390 // We only come from CatchProj, unless the CatchProj goes away.
3391 // If the CatchProj is optimized away, then we just carry the
3392 // exception oop through.
3393
3394 // CheckCastPPNode::Ideal() for inline types reuses the exception
3395 // paths of a call to perform an allocation: we can see a Phi here.
3396 if (in(1)->is_Phi()) {
3397 return this;
3398 }
3399 CallNode *call = in(1)->in(0)->as_Call();
3400
3401 return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
3402 in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3403 }
3404
3405 //=============================================================================
3406 //------------------------------Value------------------------------------------
3407 // Check for being unreachable.
3408 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3409 if (!in(0) || in(0)->is_top()) return Type::TOP;
3410 return bottom_type();
3411 }
3412
3413 //------------------------------Ideal------------------------------------------
3414 // Check for no longer being part of a loop
3415 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3416 if (can_reshape && !in(0)->is_Region()) {
3417 // Dead code elimination can sometimes delete this projection so
3418 // if it's not there, there's nothing to do.
3419 Node* fallthru = proj_out_or_null(0);
3420 if (fallthru != nullptr) {
3421 phase->is_IterGVN()->replace_node(fallthru, in(0));
3422 }
3423 return phase->C->top();
3424 }
3425 return nullptr;
3426 }
3427
3428 #ifndef PRODUCT
3429 void NeverBranchNode::format( PhaseRegAlloc *ra_, outputStream *st) const {
3430 st->print("%s", Name());
3431 }
3432 #endif
3433
3434 Node* BlackholeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
3435 return remove_dead_region(phase, can_reshape) ? this : nullptr;
3436 }
3437
3438 #ifndef PRODUCT
3439 void BlackholeNode::format(PhaseRegAlloc* ra, outputStream* st) const {
3440 st->print("blackhole ");
3441 bool first = true;
3442 for (uint i = 0; i < req(); i++) {
3443 Node* n = in(i);
3444 if (n != nullptr && OptoReg::is_valid(ra->get_reg_first(n))) {
3445 if (first) {
3446 first = false;
3447 } else {
3448 st->print(", ");
3449 }
3450 char buf[128];
3451 ra->dump_register(n, buf, sizeof(buf));
3452 st->print("%s", buf);
3453 }
3454 }
3455 st->cr();
3456 }
3457 #endif
3458