1 /*
2 * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciTypeFlow.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "opto/addnode.hpp"
29 #include "opto/castnode.hpp"
30 #include "opto/cfgnode.hpp"
31 #include "opto/connode.hpp"
32 #include "opto/loopnode.hpp"
33 #include "opto/phaseX.hpp"
34 #include "opto/predicates_enums.hpp"
35 #include "opto/rootnode.hpp"
36 #include "opto/runtime.hpp"
37 #include "opto/subnode.hpp"
38 #include "opto/subtypenode.hpp"
39
40 // Portions of code courtesy of Clifford Click
41
42 // Optimization - Graph Style
43
44
45 #ifndef PRODUCT
46 extern uint explicit_null_checks_elided;
47 #endif
48
49 IfNode::IfNode(Node* control, Node* bol, float p, float fcnt)
50 : MultiBranchNode(2),
51 _prob(p),
52 _fcnt(fcnt),
53 _assertion_predicate_type(AssertionPredicateType::None) {
54 init_node(control, bol);
55 }
56
57 IfNode::IfNode(Node* control, Node* bol, float p, float fcnt, AssertionPredicateType assertion_predicate_type)
58 : MultiBranchNode(2),
59 _prob(p),
60 _fcnt(fcnt),
61 _assertion_predicate_type(assertion_predicate_type) {
62 init_node(control, bol);
63 }
64
65 //=============================================================================
66 //------------------------------Value------------------------------------------
67 // Return a tuple for whichever arm of the IF is reachable
68 const Type* IfNode::Value(PhaseGVN* phase) const {
69 if( !in(0) ) return Type::TOP;
70 if( phase->type(in(0)) == Type::TOP )
71 return Type::TOP;
72 const Type *t = phase->type(in(1));
73 if( t == Type::TOP ) // data is undefined
74 return TypeTuple::IFNEITHER; // unreachable altogether
75 if( t == TypeInt::ZERO ) // zero, or false
76 return TypeTuple::IFFALSE; // only false branch is reachable
77 if( t == TypeInt::ONE ) // 1, or true
78 return TypeTuple::IFTRUE; // only true branch is reachable
79 assert( t == TypeInt::BOOL, "expected boolean type" );
80
81 return TypeTuple::IFBOTH; // No progress
82 }
83
84 const RegMask &IfNode::out_RegMask() const {
85 return RegMask::EMPTY;
86 }
87
88 //------------------------------split_if---------------------------------------
89 // Look for places where we merge constants, then test on the merged value.
90 // If the IF test will be constant folded on the path with the constant, we
91 // win by splitting the IF to before the merge point.
92 static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
93 // I could be a lot more general here, but I'm trying to squeeze this
94 // in before the Christmas '98 break so I'm gonna be kinda restrictive
95 // on the patterns I accept. CNC
96
97 // Look for a compare of a constant and a merged value
98 Node *i1 = iff->in(1);
99 if( !i1->is_Bool() ) return nullptr;
100 BoolNode *b = i1->as_Bool();
101 Node *cmp = b->in(1);
102 if( !cmp->is_Cmp() ) return nullptr;
103 i1 = cmp->in(1);
104 if( i1 == nullptr || !i1->is_Phi() ) return nullptr;
105 PhiNode *phi = i1->as_Phi();
106 Node *con2 = cmp->in(2);
107 if( !con2->is_Con() ) return nullptr;
108 // See that the merge point contains some constants
109 Node *con1=nullptr;
110 uint i4;
111 RegionNode* phi_region = phi->region();
112 for (i4 = 1; i4 < phi->req(); i4++ ) {
113 con1 = phi->in(i4);
114 // Do not optimize partially collapsed merges
115 if (con1 == nullptr || phi_region->in(i4) == nullptr || igvn->type(phi_region->in(i4)) == Type::TOP) {
116 igvn->_worklist.push(iff);
117 return nullptr;
118 }
119 if( con1->is_Con() ) break; // Found a constant
120 // Also allow null-vs-not-null checks
121 const TypePtr *tp = igvn->type(con1)->isa_ptr();
122 if( tp && tp->_ptr == TypePtr::NotNull )
123 break;
124 }
125 if( i4 >= phi->req() ) return nullptr; // Found no constants
126
127 igvn->C->set_has_split_ifs(true); // Has chance for split-if
128
129 // Make sure that the compare can be constant folded away
130 Node *cmp2 = cmp->clone();
131 cmp2->set_req(1,con1);
132 cmp2->set_req(2,con2);
133 const Type *t = cmp2->Value(igvn);
134 // This compare is dead, so whack it!
135 igvn->remove_dead_node(cmp2, PhaseIterGVN::NodeOrigin::Speculative);
136 if( !t->singleton() ) return nullptr;
137
138 // No intervening control, like a simple Call
139 Node* r = iff->in(0);
140 if (!r->is_Region() || r->is_Loop() || phi_region != r || r->as_Region()->is_copy()) {
141 return nullptr;
142 }
143
144 // No other users of the cmp/bool
145 if (b->outcnt() != 1 || cmp->outcnt() != 1) {
146 //tty->print_cr("many users of cmp/bool");
147 return nullptr;
148 }
149
150 // Make sure we can determine where all the uses of merged values go
151 for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) {
152 Node* u = r->fast_out(j);
153 if( u == r ) continue;
154 if( u == iff ) continue;
155 if( u->outcnt() == 0 ) continue; // use is dead & ignorable
156 if( !u->is_Phi() ) {
157 /*
158 if( u->is_Start() ) {
159 tty->print_cr("Region has inlined start use");
160 } else {
161 tty->print_cr("Region has odd use");
162 u->dump(2);
163 }*/
164 return nullptr;
165 }
166 if( u != phi ) {
167 // CNC - do not allow any other merged value
168 //tty->print_cr("Merging another value");
169 //u->dump(2);
170 return nullptr;
171 }
172 // Make sure we can account for all Phi uses
173 for (DUIterator_Fast kmax, k = u->fast_outs(kmax); k < kmax; k++) {
174 Node* v = u->fast_out(k); // User of the phi
175 // CNC - Allow only really simple patterns.
176 // In particular I disallow AddP of the Phi, a fairly common pattern
177 if (v == cmp) continue; // The compare is OK
178 if (v->is_ConstraintCast()) {
179 // If the cast is derived from data flow edges, it may not have a control edge.
180 // If so, it should be safe to split. But follow-up code can not deal with
181 // this (l. 359). So skip.
182 if (v->in(0) == nullptr) {
183 return nullptr;
184 }
185 if (v->in(0)->in(0) == iff) {
186 continue; // CastPP/II of the IfNode is OK
187 }
188 }
189 // Disabled following code because I cannot tell if exactly one
190 // path dominates without a real dominator check. CNC 9/9/1999
191 //uint vop = v->Opcode();
192 //if( vop == Op_Phi ) { // Phi from another merge point might be OK
193 // Node *r = v->in(0); // Get controlling point
194 // if( !r ) return nullptr; // Degraded to a copy
195 // // Find exactly one path in (either True or False doms, but not IFF)
196 // int cnt = 0;
197 // for( uint i = 1; i < r->req(); i++ )
198 // if( r->in(i) && r->in(i)->in(0) == iff )
199 // cnt++;
200 // if( cnt == 1 ) continue; // Exactly one of True or False guards Phi
201 //}
202 if( !v->is_Call() ) {
203 /*
204 if( v->Opcode() == Op_AddP ) {
205 tty->print_cr("Phi has AddP use");
206 } else if( v->Opcode() == Op_CastPP ) {
207 tty->print_cr("Phi has CastPP use");
208 } else if( v->Opcode() == Op_CastII ) {
209 tty->print_cr("Phi has CastII use");
210 } else {
211 tty->print_cr("Phi has use I can't be bothered with");
212 }
213 */
214 }
215 return nullptr;
216
217 /* CNC - Cut out all the fancy acceptance tests
218 // Can we clone this use when doing the transformation?
219 // If all uses are from Phis at this merge or constants, then YES.
220 if( !v->in(0) && v != cmp ) {
221 tty->print_cr("Phi has free-floating use");
222 v->dump(2);
223 return nullptr;
224 }
225 for( uint l = 1; l < v->req(); l++ ) {
226 if( (!v->in(l)->is_Phi() || v->in(l)->in(0) != r) &&
227 !v->in(l)->is_Con() ) {
228 tty->print_cr("Phi has use");
229 v->dump(2);
230 return nullptr;
231 } // End of if Phi-use input is neither Phi nor Constant
232 } // End of for all inputs to Phi-use
233 */
234 } // End of for all uses of Phi
235 } // End of for all uses of Region
236
237 // Only do this if the IF node is in a sane state
238 if (iff->outcnt() != 2)
239 return nullptr;
240
241 // Got a hit! Do the Mondo Hack!
242 //
243 //ABC a1c def ghi B 1 e h A C a c d f g i
244 // R - Phi - Phi - Phi Rc - Phi - Phi - Phi Rx - Phi - Phi - Phi
245 // cmp - 2 cmp - 2 cmp - 2
246 // bool bool_c bool_x
247 // if if_c if_x
248 // T F T F T F
249 // ..s.. ..t .. ..s.. ..t.. ..s.. ..t..
250 //
251 // Split the paths coming into the merge point into 2 separate groups of
252 // merges. On the left will be all the paths feeding constants into the
253 // Cmp's Phi. On the right will be the remaining paths. The Cmp's Phi
254 // will fold up into a constant; this will let the Cmp fold up as well as
255 // all the control flow. Below the original IF we have 2 control
256 // dependent regions, 's' and 't'. Now we will merge the two paths
257 // just prior to 's' and 't' from the two IFs. At least 1 path (and quite
258 // likely 2 or more) will promptly constant fold away.
259 PhaseGVN *phase = igvn;
260
261 // Make a region merging constants and a region merging the rest
262 uint req_c = 0;
263 for (uint ii = 1; ii < r->req(); ii++) {
264 if (phi->in(ii) == con1) {
265 req_c++;
266 }
267 if (Node::may_be_loop_entry(r->in(ii))) {
268 // Bail out if splitting through a region with a Parse Predicate input (could
269 // also be a loop header before loop opts creates a LoopNode for it).
270 return nullptr;
271 }
272 }
273
274 // If all the defs of the phi are the same constant, we already have the desired end state.
275 // Skip the split that would create empty phi and region nodes.
276 if ((r->req() - req_c) == 1) {
277 return nullptr;
278 }
279
280 // At this point we know that we can apply the split if optimization. If the region is still on the worklist,
281 // we should wait until it is processed. The region might be removed which makes this optimization redundant.
282 // This also avoids the creation of dead data loops when rewiring data nodes below when a region is dying.
283 if (igvn->_worklist.member(r)) {
284 igvn->_worklist.push(iff); // retry split if later again
285 return nullptr;
286 }
287
288 Node *region_c = new RegionNode(req_c + 1);
289 Node *phi_c = con1;
290 uint len = r->req();
291 Node *region_x = new RegionNode(len - req_c);
292 Node *phi_x = PhiNode::make_blank(region_x, phi);
293 for (uint i = 1, i_c = 1, i_x = 1; i < len; i++) {
294 if (phi->in(i) == con1) {
295 region_c->init_req( i_c++, r ->in(i) );
296 } else {
297 region_x->init_req( i_x, r ->in(i) );
298 phi_x ->init_req( i_x++, phi->in(i) );
299 }
300 }
301
302 // Register the new RegionNodes but do not transform them. Cannot
303 // transform until the entire Region/Phi conglomerate has been hacked
304 // as a single huge transform.
305 igvn->register_new_node_with_optimizer( region_c );
306 igvn->register_new_node_with_optimizer( region_x );
307 // Prevent the untimely death of phi_x. Currently he has no uses. He is
308 // about to get one. If this only use goes away, then phi_x will look dead.
309 // However, he will be picking up some more uses down below.
310 Node *hook = new Node(4);
311 hook->init_req(0, phi_x);
312 hook->init_req(1, phi_c);
313 phi_x = phase->transform( phi_x );
314
315 // Make the compare
316 Node *cmp_c = phase->makecon(t);
317 Node *cmp_x = cmp->clone();
318 cmp_x->set_req(1,phi_x);
319 cmp_x->set_req(2,con2);
320 cmp_x = phase->transform(cmp_x);
321 // Make the bool
322 Node *b_c = phase->transform(new BoolNode(cmp_c,b->_test._test));
323 Node *b_x = phase->transform(new BoolNode(cmp_x,b->_test._test));
324 // Make the IfNode
325 IfNode* iff_c = iff->clone()->as_If();
326 iff_c->set_req(0, region_c);
327 iff_c->set_req(1, b_c);
328 igvn->set_type_bottom(iff_c);
329 igvn->_worklist.push(iff_c);
330 hook->init_req(2, iff_c);
331
332 IfNode* iff_x = iff->clone()->as_If();
333 iff_x->set_req(0, region_x);
334 iff_x->set_req(1, b_x);
335 igvn->set_type_bottom(iff_x);
336 igvn->_worklist.push(iff_x);
337 hook->init_req(3, iff_x);
338
339 // Make the true/false arms
340 Node *iff_c_t = phase->transform(new IfTrueNode (iff_c));
341 Node *iff_c_f = phase->transform(new IfFalseNode(iff_c));
342 Node *iff_x_t = phase->transform(new IfTrueNode (iff_x));
343 Node *iff_x_f = phase->transform(new IfFalseNode(iff_x));
344
345 // Merge the TRUE paths
346 Node *region_s = new RegionNode(3);
347 igvn->_worklist.push(region_s);
348 region_s->init_req(1, iff_c_t);
349 region_s->init_req(2, iff_x_t);
350 igvn->register_new_node_with_optimizer( region_s );
351
352 // Merge the FALSE paths
353 Node *region_f = new RegionNode(3);
354 igvn->_worklist.push(region_f);
355 region_f->init_req(1, iff_c_f);
356 region_f->init_req(2, iff_x_f);
357 igvn->register_new_node_with_optimizer( region_f );
358
359 igvn->hash_delete(cmp);// Remove soon-to-be-dead node from hash table.
360 cmp->set_req(1,nullptr); // Whack the inputs to cmp because it will be dead
361 cmp->set_req(2,nullptr);
362 // Check for all uses of the Phi and give them a new home.
363 // The 'cmp' got cloned, but CastPP/IIs need to be moved.
364 Node *phi_s = nullptr; // do not construct unless needed
365 Node *phi_f = nullptr; // do not construct unless needed
366 for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
367 Node* v = phi->last_out(i2);// User of the phi
368 igvn->rehash_node_delayed(v); // Have to fixup other Phi users
369 uint vop = v->Opcode();
370 Node *proj = nullptr;
371 if( vop == Op_Phi ) { // Remote merge point
372 Node *r = v->in(0);
373 for (uint i3 = 1; i3 < r->req(); i3++)
374 if (r->in(i3) && r->in(i3)->in(0) == iff) {
375 proj = r->in(i3);
376 break;
377 }
378 } else if( v->is_ConstraintCast() ) {
379 proj = v->in(0); // Controlling projection
380 } else {
381 assert( 0, "do not know how to handle this guy" );
382 }
383 guarantee(proj != nullptr, "sanity");
384
385 Node *proj_path_data, *proj_path_ctrl;
386 if( proj->Opcode() == Op_IfTrue ) {
387 if( phi_s == nullptr ) {
388 // Only construct phi_s if needed, otherwise provides
389 // interfering use.
390 phi_s = PhiNode::make_blank(region_s,phi);
391 phi_s->init_req( 1, phi_c );
392 phi_s->init_req( 2, phi_x );
393 hook->add_req(phi_s);
394 phi_s = phase->transform(phi_s);
395 }
396 proj_path_data = phi_s;
397 proj_path_ctrl = region_s;
398 } else {
399 if( phi_f == nullptr ) {
400 // Only construct phi_f if needed, otherwise provides
401 // interfering use.
402 phi_f = PhiNode::make_blank(region_f,phi);
403 phi_f->init_req( 1, phi_c );
404 phi_f->init_req( 2, phi_x );
405 hook->add_req(phi_f);
406 phi_f = phase->transform(phi_f);
407 }
408 proj_path_data = phi_f;
409 proj_path_ctrl = region_f;
410 }
411
412 // Fixup 'v' for for the split
413 if( vop == Op_Phi ) { // Remote merge point
414 uint i;
415 for( i = 1; i < v->req(); i++ )
416 if( v->in(i) == phi )
417 break;
418 v->set_req(i, proj_path_data );
419 } else if( v->is_ConstraintCast() ) {
420 v->set_req(0, proj_path_ctrl );
421 v->set_req(1, proj_path_data );
422 } else
423 ShouldNotReachHere();
424 }
425
426 // Now replace the original iff's True/False with region_s/region_t.
427 // This makes the original iff go dead.
428 for (DUIterator_Last i3min, i3 = iff->last_outs(i3min); i3 >= i3min; --i3) {
429 Node* p = iff->last_out(i3);
430 assert( p->Opcode() == Op_IfTrue || p->Opcode() == Op_IfFalse, "" );
431 Node *u = (p->Opcode() == Op_IfTrue) ? region_s : region_f;
432 // Replace p with u
433 igvn->add_users_to_worklist(p);
434 for (DUIterator_Last lmin, l = p->last_outs(lmin); l >= lmin;) {
435 Node* x = p->last_out(l);
436 igvn->hash_delete(x);
437 uint uses_found = 0;
438 for( uint j = 0; j < x->req(); j++ ) {
439 if( x->in(j) == p ) {
440 x->set_req(j, u);
441 uses_found++;
442 }
443 }
444 l -= uses_found; // we deleted 1 or more copies of this edge
445 }
446 igvn->remove_dead_node(p, PhaseIterGVN::NodeOrigin::Graph);
447 }
448
449 // Force the original merge dead
450 igvn->hash_delete(r);
451 // First, remove region's dead users.
452 for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) {
453 Node* u = r->last_out(l);
454 if( u == r ) {
455 r->set_req(0, nullptr);
456 } else {
457 assert(u->outcnt() == 0, "only dead users");
458 igvn->remove_dead_node(u, PhaseIterGVN::NodeOrigin::Graph);
459 }
460 l -= 1;
461 }
462 igvn->remove_dead_node(r, PhaseIterGVN::NodeOrigin::Graph);
463
464 // Now remove the bogus extra edges used to keep things alive
465 igvn->remove_dead_node(hook, PhaseIterGVN::NodeOrigin::Speculative);
466
467 // Must return either the original node (now dead) or a new node
468 // (Do not return a top here, since that would break the uniqueness of top.)
469 return new ConINode(TypeInt::ZERO);
470 }
471
472 IfNode* IfNode::make_with_same_profile(IfNode* if_node_profile, Node* ctrl, Node* bol) {
473 // Assert here that we only try to create a clone from an If node with the same profiling if that actually makes sense.
474 // Some If node subtypes should not be cloned in this way. In theory, we should not clone BaseCountedLoopEndNodes.
475 // But they can end up being used as normal If nodes when peeling a loop - they serve as zero-trip guard.
476 // Allow them as well.
477 assert(if_node_profile->Opcode() == Op_If || if_node_profile->is_RangeCheck()
478 || if_node_profile->is_BaseCountedLoopEnd(), "should not clone other nodes");
479 if (if_node_profile->is_RangeCheck()) {
480 // RangeCheck nodes could be further optimized.
481 return new RangeCheckNode(ctrl, bol, if_node_profile->_prob, if_node_profile->_fcnt);
482 } else {
483 // Not a RangeCheckNode? Fall back to IfNode.
484 return new IfNode(ctrl, bol, if_node_profile->_prob, if_node_profile->_fcnt);
485 }
486 }
487
488 // if this IfNode follows a range check pattern return the projection
489 // for the failed path
490 IfProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) const {
491 if (outcnt() != 2) {
492 return nullptr;
493 }
494 Node* b = in(1);
495 if (b == nullptr || !b->is_Bool()) return nullptr;
496 BoolNode* bn = b->as_Bool();
497 Node* cmp = bn->in(1);
498 if (cmp == nullptr) return nullptr;
499 if (cmp->Opcode() != Op_CmpU) return nullptr;
500
501 l = cmp->in(1);
502 r = cmp->in(2);
503 flip_test = 1;
504 if (bn->_test._test == BoolTest::le) {
505 l = cmp->in(2);
506 r = cmp->in(1);
507 flip_test = 2;
508 } else if (bn->_test._test != BoolTest::lt) {
509 return nullptr;
510 }
511 if (l->is_top()) return nullptr; // Top input means dead test
512 if (r->Opcode() != Op_LoadRange && !is_RangeCheck()) return nullptr;
513
514 // We have recognized one of these forms:
515 // Flip 1: If (Bool[<] CmpU(l, LoadRange)) ...
516 // Flip 2: If (Bool[<=] CmpU(LoadRange, l)) ...
517
518 if (flip_test == 2) {
519 return true_proj_or_null();
520 }
521 return false_proj_or_null();
522 }
523
524
525 //------------------------------is_range_check---------------------------------
526 // Return 0 if not a range check. Return 1 if a range check and set index and
527 // offset. Return 2 if we had to negate the test. Index is null if the check
528 // is versus a constant.
529 int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
530 int flip_test = 0;
531 Node* l = nullptr;
532 Node* r = nullptr;
533 IfProjNode* iftrap = range_check_trap_proj(flip_test, l, r);
534
535 if (iftrap == nullptr) {
536 return 0;
537 }
538
539 // Make sure it's a real range check by requiring an uncommon trap
540 // along the OOB path. Otherwise, it's possible that the user wrote
541 // something which optimized to look like a range check but behaves
542 // in some other way.
543 if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == nullptr) {
544 return 0;
545 }
546
547 // Look for index+offset form
548 Node* ind = l;
549 jint off = 0;
550 if (l->is_top()) {
551 return 0;
552 } else if (l->Opcode() == Op_AddI) {
553 if ((off = l->in(1)->find_int_con(0)) != 0) {
554 ind = l->in(2)->uncast();
555 } else if ((off = l->in(2)->find_int_con(0)) != 0) {
556 ind = l->in(1)->uncast();
557 }
558 } else if ((off = l->find_int_con(-1)) >= 0) {
559 // constant offset with no variable index
560 ind = nullptr;
561 } else {
562 // variable index with no constant offset (or dead negative index)
563 off = 0;
564 }
565
566 // Return all the values:
567 index = ind;
568 offset = off;
569 range = r;
570 return flip_test;
571 }
572
573 //------------------------------adjust_check-----------------------------------
574 // Adjust (widen) a prior range check
575 static void adjust_check(IfProjNode* proj, Node* range, Node* index,
576 int flip, jint off_lo, PhaseIterGVN* igvn) {
577 PhaseGVN *gvn = igvn;
578 // Break apart the old check
579 Node *iff = proj->in(0);
580 Node *bol = iff->in(1);
581 if( bol->is_top() ) return; // In case a partially dead range check appears
582 // bail (or bomb[ASSERT/DEBUG]) if NOT projection-->IfNode-->BoolNode
583 DEBUG_ONLY( if (!bol->is_Bool()) { proj->dump(3); fatal("Expect projection-->IfNode-->BoolNode"); } )
584 if (!bol->is_Bool()) return;
585
586 Node *cmp = bol->in(1);
587 // Compute a new check
588 Node *new_add = gvn->intcon(off_lo);
589 if (index) {
590 new_add = off_lo ? gvn->transform(new AddINode(index, new_add)) : index;
591 }
592 Node *new_cmp = (flip == 1)
593 ? new CmpUNode(new_add, range)
594 : new CmpUNode(range, new_add);
595 new_cmp = gvn->transform(new_cmp);
596 // See if no need to adjust the existing check
597 if (new_cmp == cmp) return;
598 // Else, adjust existing check
599 Node* new_bol = gvn->transform(new BoolNode(new_cmp, bol->as_Bool()->_test._test));
600 igvn->rehash_node_delayed(iff);
601 iff->set_req_X(1, new_bol, igvn);
602 // As part of range check smearing, this range check is widened. Loads and range check Cast nodes that are control
603 // dependent on this range check now depend on multiple dominating range checks. These control dependent nodes end up
604 // at the lowest/nearest dominating check in the graph. To ensure that these Loads/Casts do not float above any of the
605 // dominating checks (even when the lowest dominating check is later replaced by yet another dominating check), we
606 // need to pin them at the lowest dominating check.
607 proj->pin_dependent_nodes(igvn);
608 }
609
610 //------------------------------up_one_dom-------------------------------------
611 // Walk up the dominator tree one step. Return null at root or true
612 // complex merges. Skips through small diamonds.
613 Node* IfNode::up_one_dom(Node *curr, bool linear_only) {
614 Node *dom = curr->in(0);
615 if( !dom ) // Found a Region degraded to a copy?
616 return curr->nonnull_req(); // Skip thru it
617
618 if( curr != dom ) // Normal walk up one step?
619 return dom;
620
621 // Use linear_only if we are still parsing, since we cannot
622 // trust the regions to be fully filled in.
623 if (linear_only)
624 return nullptr;
625
626 if( dom->is_Root() )
627 return nullptr;
628
629 // Else hit a Region. Check for a loop header
630 if( dom->is_Loop() )
631 return dom->in(1); // Skip up thru loops
632
633 // Check for small diamonds
634 Node *din1, *din2, *din3, *din4;
635 if( dom->req() == 3 && // 2-path merge point
636 (din1 = dom ->in(1)) && // Left path exists
637 (din2 = dom ->in(2)) && // Right path exists
638 (din3 = din1->in(0)) && // Left path up one
639 (din4 = din2->in(0)) ) { // Right path up one
640 if( din3->is_Call() && // Handle a slow-path call on either arm
641 (din3 = din3->in(0)) )
642 din3 = din3->in(0);
643 if( din4->is_Call() && // Handle a slow-path call on either arm
644 (din4 = din4->in(0)) )
645 din4 = din4->in(0);
646 if (din3 != nullptr && din3 == din4 && din3->is_If()) // Regions not degraded to a copy
647 return din3; // Skip around diamonds
648 }
649
650 // Give up the search at true merges
651 return nullptr; // Dead loop? Or hit root?
652 }
653
654
655 //------------------------------filtered_int_type--------------------------------
656 // Return a possibly more restrictive type for val based on condition control flow for an if
657 const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj) {
658 assert(if_proj &&
659 (if_proj->Opcode() == Op_IfTrue || if_proj->Opcode() == Op_IfFalse), "expecting an if projection");
660 if (if_proj->in(0) && if_proj->in(0)->is_If()) {
661 IfNode* iff = if_proj->in(0)->as_If();
662 if (iff->in(1) && iff->in(1)->is_Bool()) {
663 BoolNode* bol = iff->in(1)->as_Bool();
664 if (bol->in(1) && bol->in(1)->is_Cmp()) {
665 const CmpNode* cmp = bol->in(1)->as_Cmp();
666 if (cmp->in(1) == val) {
667 const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int();
668 if (cmp2_t != nullptr) {
669 jint lo = cmp2_t->_lo;
670 jint hi = cmp2_t->_hi;
671 BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate();
672 switch (msk) {
673 case BoolTest::ne: {
674 // If val is compared to its lower or upper bound, we can narrow the type
675 const TypeInt* val_t = gvn->type(val)->isa_int();
676 if (val_t != nullptr && !val_t->singleton() && cmp2_t->is_con()) {
677 if (val_t->_lo == lo) {
678 return TypeInt::make(val_t->_lo + 1, val_t->_hi, val_t->_widen);
679 } else if (val_t->_hi == hi) {
680 return TypeInt::make(val_t->_lo, val_t->_hi - 1, val_t->_widen);
681 }
682 }
683 // Can't refine type
684 return nullptr;
685 }
686 case BoolTest::eq:
687 return cmp2_t;
688 case BoolTest::lt:
689 lo = TypeInt::INT->_lo;
690 if (hi != min_jint) {
691 hi = hi - 1;
692 }
693 break;
694 case BoolTest::le:
695 lo = TypeInt::INT->_lo;
696 break;
697 case BoolTest::gt:
698 if (lo != max_jint) {
699 lo = lo + 1;
700 }
701 hi = TypeInt::INT->_hi;
702 break;
703 case BoolTest::ge:
704 // lo unchanged
705 hi = TypeInt::INT->_hi;
706 break;
707 default:
708 break;
709 }
710 const TypeInt* rtn_t = TypeInt::make(lo, hi, cmp2_t->_widen);
711 return rtn_t;
712 }
713 }
714 }
715 }
716 }
717 return nullptr;
718 }
719
720 //------------------------------fold_compares----------------------------
721 // See if a pair of CmpIs can be converted into a CmpU. In some cases
722 // the direction of this if is determined by the preceding if so it
723 // can be eliminate entirely.
724 //
725 // Given an if testing (CmpI n v) check for an immediately control
726 // dependent if that is testing (CmpI n v2) and has one projection
727 // leading to this if and the other projection leading to a region
728 // that merges one of this ifs control projections.
729 //
730 // If
731 // / |
732 // / |
733 // / |
734 // If |
735 // /\ |
736 // / \ |
737 // / \ |
738 // / Region
739 //
740 // Or given an if testing (CmpI n v) check for a dominating if that is
741 // testing (CmpI n v2), both having one projection leading to an
742 // uncommon trap. Allow Another independent guard in between to cover
743 // an explicit range check:
744 // if (index < 0 || index >= array.length) {
745 // which may need a null check to guard the LoadRange
746 //
747 // If
748 // / \
749 // / \
750 // / \
751 // If unc
752 // /\
753 // / \
754 // / \
755 // / unc
756 //
757
758 // Is the comparison for this If suitable for folding?
759 bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) {
760 return in(1) != nullptr &&
761 in(1)->is_Bool() &&
762 in(1)->in(1) != nullptr &&
763 in(1)->in(1)->Opcode() == Op_CmpI &&
764 in(1)->in(1)->in(2) != nullptr &&
765 in(1)->in(1)->in(2) != igvn->C->top() &&
766 (in(1)->as_Bool()->_test.is_less() ||
767 in(1)->as_Bool()->_test.is_greater() ||
768 (fold_ne && in(1)->as_Bool()->_test._test == BoolTest::ne));
769 }
770
771 // Is a dominating control suitable for folding with this if?
772 bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) {
773 return ctrl != nullptr &&
774 ctrl->is_IfProj() &&
775 ctrl->outcnt() == 1 && // No side-effects
776 ctrl->in(0) != nullptr &&
777 ctrl->in(0)->Opcode() == Op_If &&
778 ctrl->in(0)->outcnt() == 2 &&
779 ctrl->in(0)->as_If()->cmpi_folds(igvn, true) &&
780 // Must compare same value
781 ctrl->in(0)->in(1)->in(1)->in(1) != nullptr &&
782 ctrl->in(0)->in(1)->in(1)->in(1) != igvn->C->top() &&
783 ctrl->in(0)->in(1)->in(1)->in(1) == in(1)->in(1)->in(1);
784 }
785
786 // Do this If and the dominating If share a region?
787 bool IfNode::has_shared_region(IfProjNode* proj, IfProjNode*& success, IfProjNode*& fail) const {
788 IfProjNode* otherproj = proj->other_if_proj();
789 Node* otherproj_ctrl_use = otherproj->unique_ctrl_out_or_null();
790 RegionNode* region = (otherproj_ctrl_use != nullptr && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : nullptr;
791 success = nullptr;
792 fail = nullptr;
793
794 if (otherproj->outcnt() == 1 && region != nullptr && !region->has_phi()) {
795 for (int i = 0; i < 2; i++) {
796 IfProjNode* next_proj = proj_out(i)->as_IfProj();
797 if (success == nullptr && next_proj->outcnt() == 1 && next_proj->unique_out() == region) {
798 success = next_proj;
799 } else if (fail == nullptr) {
800 fail = next_proj;
801 } else {
802 success = nullptr;
803 fail = nullptr;
804 }
805 }
806 }
807 return success != nullptr && fail != nullptr;
808 }
809
810 bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* unc) {
811 // Different methods and methods containing jsrs are not supported.
812 ciMethod* method = unc->jvms()->method();
813 ciMethod* dom_method = dom_unc->jvms()->method();
814 if (method != dom_method || method->has_jsrs()) {
815 return false;
816 }
817 // Check that both traps are in the same activation of the method (instead
818 // of two activations being inlined through different call sites) by verifying
819 // that the call stacks are equal for both JVMStates.
820 JVMState* dom_caller = dom_unc->jvms()->caller();
821 JVMState* caller = unc->jvms()->caller();
822 if ((dom_caller == nullptr) != (caller == nullptr)) {
823 // The current method must either be inlined into both dom_caller and
824 // caller or must not be inlined at all (top method). Bail out otherwise.
825 return false;
826 } else if (dom_caller != nullptr && !dom_caller->same_calls_as(caller)) {
827 return false;
828 }
829 // Check that the bci of the dominating uncommon trap dominates the bci
830 // of the dominated uncommon trap. Otherwise we may not re-execute
831 // the dominated check after deoptimization from the merged uncommon trap.
832 ciTypeFlow* flow = dom_method->get_flow_analysis();
833 int bci = unc->jvms()->bci();
834 int dom_bci = dom_unc->jvms()->bci();
835 if (!flow->is_dominated_by(bci, dom_bci)) {
836 return false;
837 }
838
839 return true;
840 }
841
842 // Return projection that leads to an uncommon trap if any
843 ProjNode* IfNode::uncommon_trap_proj(CallStaticJavaNode*& call, Deoptimization::DeoptReason reason) const {
844 for (int i = 0; i < 2; i++) {
845 call = proj_out(i)->is_uncommon_trap_proj(reason);
846 if (call != nullptr) {
847 return proj_out(i);
848 }
849 }
850 return nullptr;
851 }
852
853 // Do this If and the dominating If both branch out to an uncommon trap
854 bool IfNode::has_only_uncommon_traps(IfProjNode* proj, IfProjNode*& success, IfProjNode*& fail, PhaseIterGVN* igvn) const {
855 IfProjNode* otherproj = proj->other_if_proj();
856 CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj();
857
858 if (otherproj->outcnt() == 1 && dom_unc != nullptr) {
859 // We need to re-execute the folded Ifs after deoptimization from the merged traps
860 if (!dom_unc->jvms()->should_reexecute()) {
861 return false;
862 }
863
864 CallStaticJavaNode* unc = nullptr;
865 ProjNode* unc_proj = uncommon_trap_proj(unc);
866 if (unc_proj != nullptr && unc_proj->outcnt() == 1) {
867 if (dom_unc == unc) {
868 // Allow the uncommon trap to be shared through a region
869 RegionNode* r = unc->in(0)->as_Region();
870 if (r->outcnt() != 2 || r->req() != 3 || r->find_edge(otherproj) == -1 || r->find_edge(unc_proj) == -1) {
871 return false;
872 }
873 assert(r->has_phi() == nullptr, "simple region shouldn't have a phi");
874 } else if (dom_unc->in(0) != otherproj || unc->in(0) != unc_proj) {
875 return false;
876 }
877
878 if (!is_dominator_unc(dom_unc, unc)) {
879 return false;
880 }
881
882 // See merge_uncommon_traps: the reason of the uncommon trap
883 // will be changed and the state of the dominating If will be
884 // used. Checked that we didn't apply this transformation in a
885 // previous compilation and it didn't cause too many traps
886 ciMethod* dom_method = dom_unc->jvms()->method();
887 int dom_bci = dom_unc->jvms()->bci();
888 if (!igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_unstable_fused_if) &&
889 !igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_range_check) &&
890 // Return true if c2 manages to reconcile with UnstableIf optimization. See the comments for it.
891 igvn->C->remove_unstable_if_trap(dom_unc, true/*yield*/)) {
892 success = unc_proj->as_IfProj();
893 fail = unc_proj->as_IfProj()->other_if_proj();
894 return true;
895 }
896 }
897 }
898 return false;
899 }
900
901 // Check that the 2 CmpI can be folded into as single CmpU and proceed with the folding
902 bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjNode* fail, PhaseIterGVN* igvn) {
903 Node* this_cmp = in(1)->in(1);
904 BoolNode* this_bool = in(1)->as_Bool();
905 IfNode* dom_iff = proj->in(0)->as_If();
906 BoolNode* dom_bool = dom_iff->in(1)->as_Bool();
907 Node* lo = dom_iff->in(1)->in(1)->in(2);
908 Node* orig_lo = lo;
909 Node* hi = this_cmp->in(2);
910 Node* n = this_cmp->in(1);
911 IfProjNode* otherproj = proj->other_if_proj();
912
913 const TypeInt* lo_type = IfNode::filtered_int_type(igvn, n, otherproj);
914 const TypeInt* hi_type = IfNode::filtered_int_type(igvn, n, success);
915
916 BoolTest::mask lo_test = dom_bool->_test._test;
917 BoolTest::mask hi_test = this_bool->_test._test;
918 BoolTest::mask cond = hi_test;
919
920 PhaseTransform::SpeculativeProgressGuard progress_guard(igvn);
921 // convert:
922 //
923 // dom_bool = x {<,<=,>,>=} a
924 // / \
925 // proj = {True,False} / \ otherproj = {False,True}
926 // /
927 // this_bool = x {<,<=} b
928 // / \
929 // fail = {True,False} / \ success = {False,True}
930 // /
931 //
932 // (Second test guaranteed canonicalized, first one may not have
933 // been canonicalized yet)
934 //
935 // into:
936 //
937 // cond = (x - lo) {<u,<=u,>u,>=u} adjusted_lim
938 // / \
939 // fail / \ success
940 // /
941 //
942
943 // Figure out which of the two tests sets the upper bound and which
944 // sets the lower bound if any.
945 Node* adjusted_lim = nullptr;
946 if (lo_type != nullptr && hi_type != nullptr && hi_type->_lo > lo_type->_hi &&
947 hi_type->_hi == max_jint && lo_type->_lo == min_jint && lo_test != BoolTest::ne) {
948 assert((dom_bool->_test.is_less() && !proj->_con) ||
949 (dom_bool->_test.is_greater() && proj->_con), "incorrect test");
950
951 // this_bool = <
952 // dom_bool = >= (proj = True) or dom_bool = < (proj = False)
953 // x in [a, b[ on the fail (= True) projection, b > a-1 (because of hi_type->_lo > lo_type->_hi test above):
954 // lo = a, hi = b, adjusted_lim = b-a, cond = <u
955 // dom_bool = > (proj = True) or dom_bool = <= (proj = False)
956 // x in ]a, b[ on the fail (= True) projection, b > a:
957 // lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <u
958 // this_bool = <=
959 // dom_bool = >= (proj = True) or dom_bool = < (proj = False)
960 // x in [a, b] on the fail (= True) projection, b+1 > a-1:
961 // lo = a, hi = b, adjusted_lim = b-a+1, cond = <u
962 // lo = a, hi = b, adjusted_lim = b-a, cond = <=u doesn't work because b = a - 1 is possible, then b-a = -1
963 // dom_bool = > (proj = True) or dom_bool = <= (proj = False)
964 // x in ]a, b] on the fail (= True) projection b+1 > a:
965 // lo = a+1, hi = b, adjusted_lim = b-a, cond = <u
966 // lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <=u doesn't work because a = b is possible, then b-a-1 = -1
967
968 if (hi_test == BoolTest::lt) {
969 if (lo_test == BoolTest::gt || lo_test == BoolTest::le) {
970 lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
971 }
972 } else if (hi_test == BoolTest::le) {
973 if (lo_test == BoolTest::ge || lo_test == BoolTest::lt) {
974 adjusted_lim = igvn->transform(new SubINode(hi, lo));
975 adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1)));
976 cond = BoolTest::lt;
977 } else if (lo_test == BoolTest::gt || lo_test == BoolTest::le) {
978 adjusted_lim = igvn->transform(new SubINode(hi, lo));
979 lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
980 cond = BoolTest::lt;
981 } else {
982 assert(false, "unhandled lo_test: %d", lo_test);
983 return false;
984 }
985 } else {
986 assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled hi_test: %d", hi_test);
987 return false;
988 }
989 // this test was canonicalized
990 assert(this_bool->_test.is_less() && fail->_con, "incorrect test");
991 } else if (lo_type != nullptr && hi_type != nullptr && lo_type->_lo > hi_type->_hi &&
992 lo_type->_hi == max_jint && hi_type->_lo == min_jint && lo_test != BoolTest::ne) {
993
994 // this_bool = <
995 // dom_bool = < (proj = True) or dom_bool = >= (proj = False)
996 // x in [b, a[ on the fail (= False) projection, a > b-1 (because of lo_type->_lo > hi_type->_hi above):
997 // lo = b, hi = a, adjusted_lim = a-b, cond = >=u
998 // dom_bool = <= (proj = True) or dom_bool = > (proj = False)
999 // x in [b, a] on the fail (= False) projection, a+1 > b-1:
1000 // lo = b, hi = a, adjusted_lim = a-b+1, cond = >=u
1001 // lo = b, hi = a, adjusted_lim = a-b, cond = >u doesn't work because a = b - 1 is possible, then b-a = -1
1002 // this_bool = <=
1003 // dom_bool = < (proj = True) or dom_bool = >= (proj = False)
1004 // x in ]b, a[ on the fail (= False) projection, a > b:
1005 // lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >=u
1006 // dom_bool = <= (proj = True) or dom_bool = > (proj = False)
1007 // x in ]b, a] on the fail (= False) projection, a+1 > b:
1008 // lo = b+1, hi = a, adjusted_lim = a-b, cond = >=u
1009 // lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >u doesn't work because a = b is possible, then b-a-1 = -1
1010
1011 swap(lo, hi);
1012 swap(lo_type, hi_type);
1013 swap(lo_test, hi_test);
1014
1015 assert((dom_bool->_test.is_less() && proj->_con) ||
1016 (dom_bool->_test.is_greater() && !proj->_con), "incorrect test");
1017
1018 cond = (hi_test == BoolTest::le || hi_test == BoolTest::gt) ? BoolTest::gt : BoolTest::ge;
1019
1020 if (lo_test == BoolTest::lt) {
1021 if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) {
1022 cond = BoolTest::ge;
1023 } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) {
1024 adjusted_lim = igvn->transform(new SubINode(hi, lo));
1025 adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1)));
1026 cond = BoolTest::ge;
1027 } else {
1028 assert(false, "unhandled hi_test: %d", hi_test);
1029 return false;
1030 }
1031 } else if (lo_test == BoolTest::le) {
1032 if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) {
1033 lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
1034 cond = BoolTest::ge;
1035 } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) {
1036 adjusted_lim = igvn->transform(new SubINode(hi, lo));
1037 lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
1038 cond = BoolTest::ge;
1039 } else {
1040 assert(false, "unhandled hi_test: %d", hi_test);
1041 return false;
1042 }
1043 } else {
1044 assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled lo_test: %d", lo_test);
1045 return false;
1046 }
1047 // this test was canonicalized
1048 assert(this_bool->_test.is_less() && !fail->_con, "incorrect test");
1049 } else {
1050 const TypeInt* failtype = filtered_int_type(igvn, n, proj);
1051 if (failtype != nullptr) {
1052 const TypeInt* type2 = filtered_int_type(igvn, n, fail);
1053 if (type2 != nullptr) {
1054 if (failtype->filter(type2) == Type::TOP) {
1055 // previous if determines the result of this if so
1056 // replace Bool with constant
1057 igvn->replace_input_of(this, 1, igvn->intcon(success->_con));
1058 progress_guard.commit();
1059 return true;
1060 }
1061 }
1062 }
1063 return false;
1064 }
1065
1066 assert(lo != nullptr && hi != nullptr, "sanity");
1067 Node* hook = new Node(lo); // Add a use to lo to prevent him from dying
1068 // Merge the two compares into a single unsigned compare by building (CmpU (n - lo) (hi - lo))
1069 Node* adjusted_val = igvn->transform(new SubINode(n, lo));
1070 if (adjusted_lim == nullptr) {
1071 adjusted_lim = igvn->transform(new SubINode(hi, lo));
1072 }
1073 hook->destruct(igvn);
1074
1075 if (adjusted_val->is_top() || adjusted_lim->is_top()) {
1076 return false;
1077 }
1078
1079 if (igvn->type(adjusted_lim)->is_int()->_lo < 0 &&
1080 !igvn->C->post_loop_opts_phase()) {
1081 // If range check elimination applies to this comparison, it includes code to protect from overflows that may
1082 // cause the main loop to be skipped entirely. Delay this transformation.
1083 // Example:
1084 // for (int i = 0; i < limit; i++) {
1085 // if (i < max_jint && i > min_jint) {...
1086 // }
1087 // Comparisons folded as:
1088 // i - min_jint - 1 <u -2
1089 // when RC applies, main loop limit becomes:
1090 // min(limit, max(-2 + min_jint + 1, min_jint))
1091 // = min(limit, min_jint)
1092 // = min_jint
1093 if (lo != orig_lo && lo->outcnt() == 0) {
1094 igvn->remove_dead_node(lo, PhaseIterGVN::NodeOrigin::Speculative);
1095 }
1096 if (adjusted_val->outcnt() == 0) {
1097 igvn->remove_dead_node(adjusted_val, PhaseIterGVN::NodeOrigin::Speculative);
1098 }
1099 if (adjusted_lim->outcnt() == 0) {
1100 igvn->remove_dead_node(adjusted_lim, PhaseIterGVN::NodeOrigin::Speculative);
1101 }
1102 igvn->C->record_for_post_loop_opts_igvn(this);
1103 return false;
1104 }
1105
1106 Node* newcmp = igvn->transform(new CmpUNode(adjusted_val, adjusted_lim));
1107 Node* newbool = igvn->transform(new BoolNode(newcmp, cond));
1108
1109 igvn->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con));
1110 igvn->replace_input_of(this, 1, newbool);
1111
1112 progress_guard.commit();
1113 return true;
1114 }
1115
1116 // Merge the branches that trap for this If and the dominating If into
1117 // a single region that branches to the uncommon trap for the
1118 // dominating If
1119 Node* IfNode::merge_uncommon_traps(IfProjNode* proj, IfProjNode* success, IfProjNode* fail, PhaseIterGVN* igvn) {
1120 Node* res = this;
1121 assert(success->in(0) == this, "bad projection");
1122
1123 IfProjNode* otherproj = proj->other_if_proj();
1124
1125 CallStaticJavaNode* unc = success->is_uncommon_trap_proj();
1126 CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj();
1127
1128 if (unc != dom_unc) {
1129 Node* r = new RegionNode(3);
1130
1131 r->set_req(1, otherproj);
1132 r->set_req(2, success);
1133 r = igvn->transform(r);
1134 assert(r->is_Region(), "can't go away");
1135
1136 // Make both If trap at the state of the first If: once the CmpI
1137 // nodes are merged, if we trap we don't know which of the CmpI
1138 // nodes would have caused the trap so we have to restart
1139 // execution at the first one
1140 igvn->replace_input_of(dom_unc, 0, r);
1141 igvn->replace_input_of(unc, 0, igvn->C->top());
1142 }
1143 int trap_request = dom_unc->uncommon_trap_request();
1144 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
1145 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
1146
1147 int flip_test = 0;
1148 Node* l = nullptr;
1149 Node* r = nullptr;
1150
1151 if (success->in(0)->as_If()->range_check_trap_proj(flip_test, l, r) != nullptr) {
1152 // If this looks like a range check, change the trap to
1153 // Reason_range_check so the compiler recognizes it as a range
1154 // check and applies the corresponding optimizations
1155 trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_range_check, action);
1156
1157 improve_address_types(l, r, fail, igvn);
1158
1159 res = igvn->transform(new RangeCheckNode(in(0), in(1), _prob, _fcnt));
1160 } else if (unc != dom_unc) {
1161 // If we trap we won't know what CmpI would have caused the trap
1162 // so use a special trap reason to mark this pair of CmpI nodes as
1163 // bad candidate for folding. On recompilation we won't fold them
1164 // and we may trap again but this time we'll know what branch
1165 // traps
1166 trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_unstable_fused_if, action);
1167 }
1168 igvn->replace_input_of(dom_unc, TypeFunc::Parms, igvn->intcon(trap_request));
1169 return res;
1170 }
1171
1172 // If we are turning 2 CmpI nodes into a CmpU that follows the pattern
1173 // of a rangecheck on index i, on 64 bit the compares may be followed
1174 // by memory accesses using i as index. In that case, the CmpU tells
1175 // us something about the values taken by i that can help the compiler
1176 // (see Compile::conv_I2X_index())
1177 void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGVN* igvn) {
1178 #ifdef _LP64
1179 ResourceMark rm;
1180 Node_Stack stack(2);
1181
1182 assert(r->Opcode() == Op_LoadRange, "unexpected range check");
1183 const TypeInt* array_size = igvn->type(r)->is_int();
1184
1185 stack.push(l, 0);
1186
1187 while(stack.size() > 0) {
1188 Node* n = stack.node();
1189 uint start = stack.index();
1190
1191 uint i = start;
1192 for (; i < n->outcnt(); i++) {
1193 Node* use = n->raw_out(i);
1194 if (stack.size() == 1) {
1195 if (use->Opcode() == Op_ConvI2L) {
1196 const TypeLong* bounds = use->as_Type()->type()->is_long();
1197 if (bounds->_lo <= array_size->_lo && bounds->_hi >= array_size->_hi &&
1198 (bounds->_lo != array_size->_lo || bounds->_hi != array_size->_hi)) {
1199 stack.set_index(i+1);
1200 stack.push(use, 0);
1201 break;
1202 }
1203 }
1204 } else if (use->is_Mem()) {
1205 Node* ctrl = use->in(0);
1206 for (int i = 0; i < 10 && ctrl != nullptr && ctrl != fail; i++) {
1207 ctrl = up_one_dom(ctrl);
1208 }
1209 if (ctrl == fail) {
1210 Node* init_n = stack.node_at(1);
1211 assert(init_n->Opcode() == Op_ConvI2L, "unexpected first node");
1212 // Create a new narrow ConvI2L node that is dependent on the range check
1213 Node* new_n = igvn->C->conv_I2X_index(igvn, l, array_size, fail);
1214
1215 // The type of the ConvI2L may be widen and so the new
1216 // ConvI2L may not be better than an existing ConvI2L
1217 if (new_n != init_n) {
1218 for (uint j = 2; j < stack.size(); j++) {
1219 Node* n = stack.node_at(j);
1220 Node* clone = n->clone();
1221 int rep = clone->replace_edge(init_n, new_n, igvn);
1222 assert(rep > 0, "can't find expected node?");
1223 clone = igvn->transform(clone);
1224 init_n = n;
1225 new_n = clone;
1226 }
1227 igvn->hash_delete(use);
1228 int rep = use->replace_edge(init_n, new_n, igvn);
1229 assert(rep > 0, "can't find expected node?");
1230 igvn->transform(use);
1231 if (init_n->outcnt() == 0) {
1232 igvn->_worklist.push(init_n);
1233 }
1234 }
1235 }
1236 } else if (use->in(0) == nullptr && (igvn->type(use)->isa_long() ||
1237 igvn->type(use)->isa_ptr())) {
1238 stack.set_index(i+1);
1239 stack.push(use, 0);
1240 break;
1241 }
1242 }
1243 if (i == n->outcnt()) {
1244 stack.pop();
1245 }
1246 }
1247 #endif
1248 }
1249
1250 bool IfNode::is_cmp_with_loadrange(IfProjNode* proj) const {
1251 if (in(1) != nullptr &&
1252 in(1)->in(1) != nullptr &&
1253 in(1)->in(1)->in(2) != nullptr) {
1254 Node* other = in(1)->in(1)->in(2);
1255 if (other->Opcode() == Op_LoadRange &&
1256 ((other->in(0) != nullptr && other->in(0) == proj) ||
1257 (other->in(0) == nullptr &&
1258 other->in(2) != nullptr &&
1259 other->in(2)->is_AddP() &&
1260 other->in(2)->in(1) != nullptr &&
1261 other->in(2)->in(1)->Opcode() == Op_CastPP &&
1262 other->in(2)->in(1)->in(0) == proj))) {
1263 return true;
1264 }
1265 }
1266 return false;
1267 }
1268
1269 bool IfNode::is_null_check(IfProjNode* proj, PhaseIterGVN* igvn) const {
1270 Node* other = in(1)->in(1)->in(2);
1271 if (other->in(MemNode::Address) != nullptr &&
1272 proj->in(0)->in(1) != nullptr &&
1273 proj->in(0)->in(1)->is_Bool() &&
1274 proj->in(0)->in(1)->in(1) != nullptr &&
1275 proj->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1276 proj->in(0)->in(1)->in(1)->in(2) != nullptr &&
1277 proj->in(0)->in(1)->in(1)->in(1) == other->in(MemNode::Address)->in(AddPNode::Address)->uncast() &&
1278 igvn->type(proj->in(0)->in(1)->in(1)->in(2)) == TypePtr::NULL_PTR) {
1279 return true;
1280 }
1281 return false;
1282 }
1283
1284 // Returns true if this IfNode belongs to a flat array check
1285 // and returns the corresponding array in the 'array' parameter.
1286 bool IfNode::is_flat_array_check(PhaseTransform* phase, Node** array) {
1287 Node* bol = in(1);
1288 if (!bol->is_Bool()) {
1289 return false;
1290 }
1291 Node* cmp = bol->in(1);
1292 if (cmp->isa_FlatArrayCheck()) {
1293 if (array != nullptr) {
1294 *array = cmp->in(FlatArrayCheckNode::ArrayOrKlass);
1295 }
1296 return true;
1297 }
1298 return false;
1299 }
1300
1301 // Check that the If that is in between the 2 integer comparisons has
1302 // no side effect
1303 bool IfNode::is_side_effect_free_test(IfProjNode* proj, PhaseIterGVN* igvn) const {
1304 if (proj == nullptr) {
1305 return false;
1306 }
1307 CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern();
1308 if (unc != nullptr && proj->outcnt() <= 2) {
1309 if (proj->outcnt() == 1 ||
1310 // Allow simple null check from LoadRange
1311 (is_cmp_with_loadrange(proj) && is_null_check(proj, igvn))) {
1312 CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern();
1313 CallStaticJavaNode* dom_unc = proj->in(0)->in(0)->as_Proj()->is_uncommon_trap_if_pattern();
1314 assert(dom_unc != nullptr, "is_uncommon_trap_if_pattern returned null");
1315
1316 // reroute_side_effect_free_unc changes the state of this
1317 // uncommon trap to restart execution at the previous
1318 // CmpI. Check that this change in a previous compilation didn't
1319 // cause too many traps.
1320 int trap_request = unc->uncommon_trap_request();
1321 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
1322
1323 if (igvn->C->too_many_traps(dom_unc->jvms()->method(), dom_unc->jvms()->bci(), reason)) {
1324 return false;
1325 }
1326
1327 if (!is_dominator_unc(dom_unc, unc)) {
1328 return false;
1329 }
1330
1331 return true;
1332 }
1333 }
1334 return false;
1335 }
1336
1337 // Make the If between the 2 integer comparisons trap at the state of
1338 // the first If: the last CmpI is the one replaced by a CmpU and the
1339 // first CmpI is eliminated, so the test between the 2 CmpI nodes
1340 // won't be guarded by the first CmpI anymore. It can trap in cases
1341 // where the first CmpI would have prevented it from executing: on a
1342 // trap, we need to restart execution at the state of the first CmpI
1343 void IfNode::reroute_side_effect_free_unc(IfProjNode* proj, IfProjNode* dom_proj, PhaseIterGVN* igvn) {
1344 CallStaticJavaNode* dom_unc = dom_proj->is_uncommon_trap_if_pattern();
1345 IfProjNode* otherproj = proj->other_if_proj();
1346 CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern();
1347 Node* call_proj = dom_unc->unique_ctrl_out();
1348 Node* halt = call_proj->unique_ctrl_out();
1349
1350 Node* new_unc = dom_unc->clone();
1351 call_proj = call_proj->clone();
1352 halt = halt->clone();
1353 Node* c = otherproj->clone();
1354
1355 c = igvn->transform(c);
1356 new_unc->set_req(TypeFunc::Parms, unc->in(TypeFunc::Parms));
1357 new_unc->set_req(0, c);
1358 new_unc = igvn->transform(new_unc);
1359 call_proj->set_req(0, new_unc);
1360 call_proj = igvn->transform(call_proj);
1361 halt->set_req(0, call_proj);
1362 halt = igvn->transform(halt);
1363
1364 igvn->replace_node(otherproj, igvn->C->top());
1365 igvn->C->root()->add_req(halt);
1366 }
1367
1368 Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
1369 if (Opcode() != Op_If) return nullptr;
1370
1371 if (cmpi_folds(igvn)) {
1372 Node* ctrl = in(0);
1373 if (is_ctrl_folds(ctrl, igvn)) {
1374 // A integer comparison immediately dominated by another integer
1375 // comparison
1376 IfProjNode* success = nullptr;
1377 IfProjNode* fail = nullptr;
1378 IfProjNode* dom_cmp = ctrl->as_IfProj();
1379 if (has_shared_region(dom_cmp, success, fail) &&
1380 // Next call modifies graph so must be last
1381 fold_compares_helper(dom_cmp, success, fail, igvn)) {
1382 return this;
1383 }
1384 if (has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
1385 // Next call modifies graph so must be last
1386 fold_compares_helper(dom_cmp, success, fail, igvn)) {
1387 return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1388 }
1389 return nullptr;
1390 } else if (ctrl->in(0) != nullptr &&
1391 ctrl->in(0)->in(0) != nullptr) {
1392 IfProjNode* success = nullptr;
1393 IfProjNode* fail = nullptr;
1394 Node* dom = ctrl->in(0)->in(0);
1395 IfProjNode* dom_cmp = dom->isa_IfProj();
1396 IfProjNode* other_cmp = ctrl->isa_IfProj();
1397
1398 // Check if it's an integer comparison dominated by another
1399 // integer comparison with another test in between
1400 if (is_ctrl_folds(dom, igvn) &&
1401 has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
1402 is_side_effect_free_test(other_cmp, igvn) &&
1403 // Next call modifies graph so must be last
1404 fold_compares_helper(dom_cmp, success, fail, igvn)) {
1405 reroute_side_effect_free_unc(other_cmp, dom_cmp, igvn);
1406 return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1407 }
1408 }
1409 }
1410 return nullptr;
1411 }
1412
1413 //------------------------------remove_useless_bool----------------------------
1414 // Check for people making a useless boolean: things like
1415 // if( (x < y ? true : false) ) { ... }
1416 // Replace with if( x < y ) { ... }
1417 static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
1418 Node *i1 = iff->in(1);
1419 if( !i1->is_Bool() ) return nullptr;
1420 BoolNode *bol = i1->as_Bool();
1421
1422 Node *cmp = bol->in(1);
1423 if( cmp->Opcode() != Op_CmpI ) return nullptr;
1424
1425 // Must be comparing against a bool
1426 const Type *cmp2_t = phase->type( cmp->in(2) );
1427 if( cmp2_t != TypeInt::ZERO &&
1428 cmp2_t != TypeInt::ONE )
1429 return nullptr;
1430
1431 // Find a prior merge point merging the boolean
1432 i1 = cmp->in(1);
1433 if( !i1->is_Phi() ) return nullptr;
1434 PhiNode *phi = i1->as_Phi();
1435 if( phase->type( phi ) != TypeInt::BOOL )
1436 return nullptr;
1437
1438 // Check for diamond pattern
1439 int true_path = phi->is_diamond_phi();
1440 if( true_path == 0 ) return nullptr;
1441
1442 // Make sure that iff and the control of the phi are different. This
1443 // should really only happen for dead control flow since it requires
1444 // an illegal cycle.
1445 if (phi->in(0)->in(1)->in(0) == iff) return nullptr;
1446
1447 // phi->region->if_proj->ifnode->bool->cmp
1448 BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
1449
1450 // Now get the 'sense' of the test correct so we can plug in
1451 // either iff2->in(1) or its complement.
1452 int flip = 0;
1453 if( bol->_test._test == BoolTest::ne ) flip = 1-flip;
1454 else if( bol->_test._test != BoolTest::eq ) return nullptr;
1455 if( cmp2_t == TypeInt::ZERO ) flip = 1-flip;
1456
1457 const Type *phi1_t = phase->type( phi->in(1) );
1458 const Type *phi2_t = phase->type( phi->in(2) );
1459 // Check for Phi(0,1) and flip
1460 if( phi1_t == TypeInt::ZERO ) {
1461 if( phi2_t != TypeInt::ONE ) return nullptr;
1462 flip = 1-flip;
1463 } else {
1464 // Check for Phi(1,0)
1465 if( phi1_t != TypeInt::ONE ) return nullptr;
1466 if( phi2_t != TypeInt::ZERO ) return nullptr;
1467 }
1468 if( true_path == 2 ) {
1469 flip = 1-flip;
1470 }
1471
1472 Node* new_bol = (flip ? phase->transform( bol2->negate(phase) ) : bol2);
1473 assert(new_bol != iff->in(1), "must make progress");
1474 iff->set_req_X(1, new_bol, phase);
1475 // Intervening diamond probably goes dead
1476 phase->C->set_major_progress();
1477 return iff;
1478 }
1479
1480 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
1481
1482 struct RangeCheck {
1483 IfProjNode* ctl;
1484 jint off;
1485 };
1486
1487 Node* IfNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
1488 if (remove_dead_region(phase, can_reshape)) return this;
1489 // No Def-Use info?
1490 if (!can_reshape) return nullptr;
1491
1492 // Don't bother trying to transform a dead if
1493 if (in(0)->is_top()) return nullptr;
1494 // Don't bother trying to transform an if with a dead test
1495 if (in(1)->is_top()) return nullptr;
1496 // Another variation of a dead test
1497 if (in(1)->is_Con()) return nullptr;
1498 // Another variation of a dead if
1499 if (outcnt() < 2) return nullptr;
1500
1501 // Canonicalize the test.
1502 Node* idt_if = idealize_test(phase, this);
1503 if (idt_if != nullptr) return idt_if;
1504
1505 // Try to split the IF
1506 PhaseIterGVN *igvn = phase->is_IterGVN();
1507 Node *s = split_if(this, igvn);
1508 if (s != nullptr) return s;
1509
1510 return NodeSentinel;
1511 }
1512
1513 //------------------------------Ideal------------------------------------------
1514 // Return a node which is more "ideal" than the current node. Strip out
1515 // control copies
1516 Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1517 Node* res = Ideal_common(phase, can_reshape);
1518 if (res != NodeSentinel) {
1519 return res;
1520 }
1521
1522 // Check for people making a useless boolean: things like
1523 // if( (x < y ? true : false) ) { ... }
1524 // Replace with if( x < y ) { ... }
1525 Node* bol2 = remove_useless_bool(this, phase);
1526 if (bol2) return bol2;
1527
1528 if (in(0) == nullptr) return nullptr; // Dead loop?
1529
1530 PhaseIterGVN* igvn = phase->is_IterGVN();
1531 Node* result = fold_compares(igvn);
1532 if (result != nullptr) {
1533 return result;
1534 }
1535
1536 // Scan for an equivalent test
1537 int dist = 4; // Cutoff limit for search
1538 if (is_If() && in(1)->is_Bool()) {
1539 Node* cmp = in(1)->in(1);
1540 if (cmp->Opcode() == Op_CmpP &&
1541 cmp->in(2) != nullptr && // make sure cmp is not already dead
1542 cmp->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1543 dist = 64; // Limit for null-pointer scans
1544 }
1545 }
1546
1547 Node* prev_dom = search_identical(dist, igvn);
1548
1549 if (prev_dom != nullptr) {
1550 // Dominating CountedLoopEnd (left over from some now dead loop) will become the new loop exit. Outer strip mined
1551 // loop will go away. Mark this loop as no longer strip mined.
1552 if (is_CountedLoopEnd()) {
1553 CountedLoopNode* counted_loop_node = as_CountedLoopEnd()->loopnode();
1554 if (counted_loop_node != nullptr) {
1555 counted_loop_node->clear_strip_mined();
1556 }
1557 }
1558 // Replace dominated IfNode
1559 return dominated_by(prev_dom, igvn, false);
1560 }
1561
1562 return simple_subsuming(igvn);
1563 }
1564
1565 //------------------------------dominated_by-----------------------------------
1566 Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool prev_dom_not_imply_this) {
1567 #ifndef PRODUCT
1568 if (TraceIterativeGVN) {
1569 tty->print(" Removing IfNode: "); this->dump();
1570 }
1571 #endif
1572
1573 igvn->hash_delete(this); // Remove self to prevent spurious V-N
1574 Node *idom = in(0);
1575 // Need opcode to decide which way 'this' test goes
1576 int prev_op = prev_dom->Opcode();
1577 Node *top = igvn->C->top(); // Shortcut to top
1578
1579 // Now walk the current IfNode's projections.
1580 // Loop ends when 'this' has no more uses.
1581 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
1582 Node *ifp = last_out(i); // Get IfTrue/IfFalse
1583 igvn->add_users_to_worklist(ifp);
1584 // Check which projection it is and set target.
1585 // Data-target is either the dominating projection of the same type
1586 // or TOP if the dominating projection is of opposite type.
1587 // Data-target will be used as the new control edge for the non-CFG
1588 // nodes like Casts and Loads.
1589 Node *data_target = (ifp->Opcode() == prev_op) ? prev_dom : top;
1590 // Control-target is just the If's immediate dominator or TOP.
1591 Node *ctrl_target = (ifp->Opcode() == prev_op) ? idom : top;
1592
1593 // For each child of an IfTrue/IfFalse projection, reroute.
1594 // Loop ends when projection has no more uses.
1595 for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
1596 Node* s = ifp->last_out(j); // Get child of IfTrue/IfFalse
1597 if (s->depends_only_on_test()) {
1598 // For control producers
1599 igvn->replace_input_of(s, 0, data_target); // Move child to data-target
1600 if (prev_dom_not_imply_this && data_target != top) {
1601 // If prev_dom_not_imply_this, s now depends on multiple tests with prev_dom being the
1602 // lowest dominating one. As a result, it must be pinned there. Otherwise, it can be
1603 // incorrectly moved to a dominating test equivalent to the lowest one here.
1604 Node* clone = s->pin_node_under_control();
1605 if (clone != nullptr) {
1606 igvn->register_new_node_with_optimizer(clone, s);
1607 igvn->replace_node(s, clone);
1608 }
1609 }
1610 } else {
1611 // Find the control input matching this def-use edge.
1612 // For Regions it may not be in slot 0.
1613 uint l;
1614 for (l = 0; s->in(l) != ifp; l++) { }
1615 igvn->replace_input_of(s, l, ctrl_target);
1616 }
1617 } // End for each child of a projection
1618
1619 igvn->remove_dead_node(ifp, PhaseIterGVN::NodeOrigin::Graph);
1620 } // End for each IfTrue/IfFalse child of If
1621
1622 // Kill the IfNode
1623 igvn->remove_dead_node(this, PhaseIterGVN::NodeOrigin::Graph);
1624
1625 // Must return either the original node (now dead) or a new node
1626 // (Do not return a top here, since that would break the uniqueness of top.)
1627 return new ConINode(TypeInt::ZERO);
1628 }
1629
1630 Node* IfNode::search_identical(int dist, PhaseIterGVN* igvn) {
1631 // Setup to scan up the CFG looking for a dominating test
1632 Node* dom = in(0);
1633 Node* prev_dom = this;
1634 int op = Opcode();
1635 // Search up the dominator tree for an If with an identical test
1636 while (dom->Opcode() != op || // Not same opcode?
1637 !same_condition(dom, igvn) || // Not same input 1?
1638 prev_dom->in(0) != dom) { // One path of test does not dominate?
1639 if (dist < 0) return nullptr;
1640
1641 dist--;
1642 prev_dom = dom;
1643 dom = up_one_dom(dom);
1644 if (!dom) return nullptr;
1645 }
1646
1647 // Check that we did not follow a loop back to ourselves
1648 if (this == dom) {
1649 return nullptr;
1650 }
1651
1652 #ifndef PRODUCT
1653 if (dist > 2) { // Add to count of null checks elided
1654 explicit_null_checks_elided++;
1655 }
1656 #endif
1657
1658 return prev_dom;
1659 }
1660
1661 bool IfNode::same_condition(const Node* dom, PhaseIterGVN* igvn) const {
1662 Node* dom_bool = dom->in(1);
1663 Node* this_bool = in(1);
1664 if (dom_bool == this_bool) {
1665 return true;
1666 }
1667
1668 if (dom_bool == nullptr || !dom_bool->is_Bool() ||
1669 this_bool == nullptr || !this_bool->is_Bool()) {
1670 return false;
1671 }
1672 Node* dom_cmp = dom_bool->in(1);
1673 Node* this_cmp = this_bool->in(1);
1674
1675 // If the comparison is a subtype check, then SubTypeCheck nodes may have profile data attached to them and may be
1676 // different nodes even-though they perform the same subtype check
1677 if (dom_cmp == nullptr || !dom_cmp->is_SubTypeCheck() ||
1678 this_cmp == nullptr || !this_cmp->is_SubTypeCheck()) {
1679 return false;
1680 }
1681
1682 if (dom_cmp->in(1) != this_cmp->in(1) ||
1683 dom_cmp->in(2) != this_cmp->in(2) ||
1684 dom_bool->as_Bool()->_test._test != this_bool->as_Bool()->_test._test) {
1685 return false;
1686 }
1687
1688 return true;
1689 }
1690
1691
1692 static int subsuming_bool_test_encode(Node*);
1693
1694 // Check if dominating test is subsuming 'this' one.
1695 //
1696 // cmp
1697 // / \
1698 // (r1) bool \
1699 // / bool (r2)
1700 // (dom) if \
1701 // \ )
1702 // (pre) if[TF] /
1703 // \ /
1704 // if (this)
1705 // \r1
1706 // r2\ eqT eqF neT neF ltT ltF leT leF gtT gtF geT geF
1707 // eq t f f t f - - f f - - f
1708 // ne f t t f t - - t t - - t
1709 // lt f - - f t f - f f - f t
1710 // le t - - t t - t f f t - t
1711 // gt f - - f f - f t t f - f
1712 // ge t - - t f t - t t - t f
1713 //
1714 Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) {
1715 // Table encoding: N/A (na), True-branch (tb), False-branch (fb).
1716 static enum { na, tb, fb } s_short_circuit_map[6][12] = {
1717 /*rel: eq+T eq+F ne+T ne+F lt+T lt+F le+T le+F gt+T gt+F ge+T ge+F*/
1718 /*eq*/{ tb, fb, fb, tb, fb, na, na, fb, fb, na, na, fb },
1719 /*ne*/{ fb, tb, tb, fb, tb, na, na, tb, tb, na, na, tb },
1720 /*lt*/{ fb, na, na, fb, tb, fb, na, fb, fb, na, fb, tb },
1721 /*le*/{ tb, na, na, tb, tb, na, tb, fb, fb, tb, na, tb },
1722 /*gt*/{ fb, na, na, fb, fb, na, fb, tb, tb, fb, na, fb },
1723 /*ge*/{ tb, na, na, tb, fb, tb, na, tb, tb, na, tb, fb }};
1724
1725 Node* pre = in(0);
1726 if (!pre->is_IfTrue() && !pre->is_IfFalse()) {
1727 return nullptr;
1728 }
1729 Node* dom = pre->in(0);
1730 if (!dom->is_If()) {
1731 return nullptr;
1732 }
1733 Node* bol = in(1);
1734 if (!bol->is_Bool()) {
1735 return nullptr;
1736 }
1737 Node* cmp = in(1)->in(1);
1738 if (!cmp->is_Cmp()) {
1739 return nullptr;
1740 }
1741
1742 if (!dom->in(1)->is_Bool()) {
1743 return nullptr;
1744 }
1745 if (dom->in(1)->in(1) != cmp) { // Not same cond?
1746 return nullptr;
1747 }
1748
1749 int drel = subsuming_bool_test_encode(dom->in(1));
1750 int trel = subsuming_bool_test_encode(bol);
1751 int bout = pre->is_IfFalse() ? 1 : 0;
1752
1753 if (drel < 0 || trel < 0) {
1754 return nullptr;
1755 }
1756 int br = s_short_circuit_map[trel][2*drel+bout];
1757 if (br == na) {
1758 return nullptr;
1759 }
1760 #ifndef PRODUCT
1761 if (TraceIterativeGVN) {
1762 tty->print(" Subsumed IfNode: "); dump();
1763 }
1764 #endif
1765 // Replace condition with constant True(1)/False(0).
1766 bool is_always_true = br == tb;
1767 set_req(1, igvn->intcon(is_always_true ? 1 : 0));
1768
1769 // Update any data dependencies to the directly dominating test. This subsumed test is not immediately removed by igvn
1770 // and therefore subsequent optimizations might miss these data dependencies otherwise. There might be a dead loop
1771 // ('always_taken_proj' == 'pre') that is cleaned up later. Skip this case to make the iterator work properly.
1772 Node* always_taken_proj = proj_out(is_always_true);
1773 if (always_taken_proj != pre) {
1774 for (DUIterator_Fast imax, i = always_taken_proj->fast_outs(imax); i < imax; i++) {
1775 Node* u = always_taken_proj->fast_out(i);
1776 if (!u->is_CFG()) {
1777 igvn->replace_input_of(u, 0, pre);
1778 --i;
1779 --imax;
1780 }
1781 }
1782 }
1783
1784 if (bol->outcnt() == 0) {
1785 igvn->remove_dead_node(bol, PhaseIterGVN::NodeOrigin::Graph); // Kill the BoolNode.
1786 }
1787 return this;
1788 }
1789
1790 // Map BoolTest to local table encoding. The BoolTest (e)numerals
1791 // { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1 }
1792 // are mapped to table indices, while the remaining (e)numerals in BoolTest
1793 // { overflow = 2, no_overflow = 6, never = 8, illegal = 9 }
1794 // are ignored (these are not modeled in the table).
1795 //
1796 static int subsuming_bool_test_encode(Node* node) {
1797 precond(node->is_Bool());
1798 BoolTest::mask x = node->as_Bool()->_test._test;
1799 switch (x) {
1800 case BoolTest::eq: return 0;
1801 case BoolTest::ne: return 1;
1802 case BoolTest::lt: return 2;
1803 case BoolTest::le: return 3;
1804 case BoolTest::gt: return 4;
1805 case BoolTest::ge: return 5;
1806 case BoolTest::overflow:
1807 case BoolTest::no_overflow:
1808 case BoolTest::never:
1809 case BoolTest::illegal:
1810 default:
1811 return -1;
1812 }
1813 }
1814
1815 //------------------------------Identity---------------------------------------
1816 // If the test is constant & we match, then we are the input Control
1817 Node* IfProjNode::Identity(PhaseGVN* phase) {
1818 // Can only optimize if cannot go the other way
1819 const TypeTuple *t = phase->type(in(0))->is_tuple();
1820 if (t == TypeTuple::IFNEITHER || (always_taken(t) &&
1821 // During parsing (GVN) we don't remove dead code aggressively.
1822 // Cut off dead branch and let PhaseRemoveUseless take care of it.
1823 (!phase->is_IterGVN() ||
1824 // During IGVN, first wait for the dead branch to be killed.
1825 // Otherwise, the IfNode's control will have two control uses (the IfNode
1826 // that doesn't go away because it still has uses and this branch of the
1827 // If) which breaks other optimizations. Node::has_special_unique_user()
1828 // will cause this node to be reprocessed once the dead branch is killed.
1829 in(0)->outcnt() == 1))) {
1830 // IfNode control
1831 if (in(0)->is_BaseCountedLoopEnd()) {
1832 // CountedLoopEndNode may be eliminated by if subsuming, replace CountedLoopNode with LoopNode to
1833 // avoid mismatching between CountedLoopNode and CountedLoopEndNode in the following optimization.
1834 Node* head = unique_ctrl_out_or_null();
1835 if (head != nullptr && head->is_BaseCountedLoop() && head->in(LoopNode::LoopBackControl) == this) {
1836 Node* new_head = new LoopNode(head->in(LoopNode::EntryControl), this);
1837 phase->is_IterGVN()->register_new_node_with_optimizer(new_head);
1838 phase->is_IterGVN()->replace_node(head, new_head);
1839 }
1840 }
1841 return in(0)->in(0);
1842 }
1843 // no progress
1844 return this;
1845 }
1846
1847 bool IfNode::is_zero_trip_guard() const {
1848 if (in(1)->is_Bool() && in(1)->in(1)->is_Cmp()) {
1849 return in(1)->in(1)->in(1)->Opcode() == Op_OpaqueZeroTripGuard;
1850 }
1851 return false;
1852 }
1853
1854 void IfProjNode::pin_dependent_nodes(PhaseIterGVN* igvn) {
1855 for (DUIterator i = outs(); has_out(i); i++) {
1856 Node* u = out(i);
1857 if (!u->depends_only_on_test()) {
1858 continue;
1859 }
1860 Node* clone = u->pin_node_under_control();
1861 if (clone != nullptr) {
1862 igvn->register_new_node_with_optimizer(clone, u);
1863 igvn->replace_node(u, clone);
1864 --i;
1865 }
1866 }
1867 }
1868
1869 #ifndef PRODUCT
1870 void IfNode::dump_spec(outputStream* st) const {
1871 switch (_assertion_predicate_type) {
1872 case AssertionPredicateType::InitValue:
1873 st->print("#Init Value Assertion Predicate ");
1874 break;
1875 case AssertionPredicateType::LastValue:
1876 st->print("#Last Value Assertion Predicate ");
1877 break;
1878 case AssertionPredicateType::FinalIv:
1879 st->print("#Final IV Assertion Predicate ");
1880 break;
1881 case AssertionPredicateType::None:
1882 // No Assertion Predicate
1883 break;
1884 default:
1885 fatal("Unknown Assertion Predicate type");
1886 }
1887 st->print("P=%f, C=%f", _prob, _fcnt);
1888 }
1889 #endif // NOT PRODUCT
1890
1891 //------------------------------idealize_test----------------------------------
1892 // Try to canonicalize tests better. Peek at the Cmp/Bool/If sequence and
1893 // come up with a canonical sequence. Bools getting 'eq', 'gt' and 'ge' forms
1894 // converted to 'ne', 'le' and 'lt' forms. IfTrue/IfFalse get swapped as
1895 // needed.
1896 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) {
1897 assert(iff->in(0) != nullptr, "If must be live");
1898
1899 if (iff->outcnt() != 2) return nullptr; // Malformed projections.
1900 IfFalseNode* old_if_f = iff->false_proj();
1901 IfTrueNode* old_if_t = iff->true_proj();
1902
1903 // CountedLoopEnds want the back-control test to be TRUE, regardless of
1904 // whether they are testing a 'gt' or 'lt' condition. The 'gt' condition
1905 // happens in count-down loops
1906 if (iff->is_BaseCountedLoopEnd()) return nullptr;
1907 if (!iff->in(1)->is_Bool()) return nullptr; // Happens for partially optimized IF tests
1908 BoolNode *b = iff->in(1)->as_Bool();
1909 BoolTest bt = b->_test;
1910 // Test already in good order?
1911 if( bt.is_canonical() )
1912 return nullptr;
1913
1914 // Flip test to be canonical. Requires flipping the IfFalse/IfTrue and
1915 // cloning the IfNode.
1916 Node* new_b = phase->transform( new BoolNode(b->in(1), bt.negate()) );
1917 if( !new_b->is_Bool() ) return nullptr;
1918 b = new_b->as_Bool();
1919
1920 PhaseIterGVN *igvn = phase->is_IterGVN();
1921 assert( igvn, "Test is not canonical in parser?" );
1922
1923 // The IF node never really changes, but it needs to be cloned
1924 iff = iff->clone()->as_If();
1925 iff->set_req(1, b);
1926 iff->_prob = 1.0-iff->_prob;
1927
1928 Node *prior = igvn->hash_find_insert(iff);
1929 if( prior ) {
1930 igvn->remove_dead_node(iff, PhaseIterGVN::NodeOrigin::Graph);
1931 iff = (IfNode*)prior;
1932 } else {
1933 // Cannot call transform on it just yet
1934 igvn->set_type_bottom(iff);
1935 }
1936 igvn->_worklist.push(iff);
1937
1938 // Now handle projections. Cloning not required.
1939 Node* new_if_f = (Node*)(new IfFalseNode( iff ));
1940 Node* new_if_t = (Node*)(new IfTrueNode ( iff ));
1941
1942 igvn->register_new_node_with_optimizer(new_if_f);
1943 igvn->register_new_node_with_optimizer(new_if_t);
1944 // Flip test, so flip trailing control
1945 igvn->replace_node(old_if_f, new_if_t);
1946 igvn->replace_node(old_if_t, new_if_f);
1947
1948 // Progress
1949 return iff;
1950 }
1951
1952 Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1953 Node* res = Ideal_common(phase, can_reshape);
1954 if (res != NodeSentinel) {
1955 return res;
1956 }
1957
1958 PhaseIterGVN *igvn = phase->is_IterGVN();
1959 // Setup to scan up the CFG looking for a dominating test
1960 Node* prev_dom = this;
1961
1962 // Check for range-check vs other kinds of tests
1963 Node* index1;
1964 Node* range1;
1965 jint offset1;
1966 int flip1 = is_range_check(range1, index1, offset1);
1967 if (flip1) {
1968 Node* dom = in(0);
1969 // Try to remove extra range checks. All 'up_one_dom' gives up at merges
1970 // so all checks we inspect post-dominate the top-most check we find.
1971 // If we are going to fail the current check and we reach the top check
1972 // then we are guaranteed to fail, so just start interpreting there.
1973 // We 'expand' the top 3 range checks to include all post-dominating
1974 // checks.
1975 //
1976 // Example:
1977 // a[i+x] // (1) 1 < x < 6
1978 // a[i+3] // (2)
1979 // a[i+4] // (3)
1980 // a[i+6] // max = max of all constants
1981 // a[i+2]
1982 // a[i+1] // min = min of all constants
1983 //
1984 // If x < 3:
1985 // (1) a[i+x]: Leave unchanged
1986 // (2) a[i+3]: Replace with a[i+max] = a[i+6]: i+x < i+3 <= i+6 -> (2) is covered
1987 // (3) a[i+4]: Replace with a[i+min] = a[i+1]: i+1 < i+4 <= i+6 -> (3) and all following checks are covered
1988 // Remove all other a[i+c] checks
1989 //
1990 // If x >= 3:
1991 // (1) a[i+x]: Leave unchanged
1992 // (2) a[i+3]: Replace with a[i+min] = a[i+1]: i+1 < i+3 <= i+x -> (2) is covered
1993 // (3) a[i+4]: Replace with a[i+max] = a[i+6]: i+1 < i+4 <= i+6 -> (3) and all following checks are covered
1994 // Remove all other a[i+c] checks
1995 //
1996 // We only need the top 2 range checks if x is the min or max of all constants.
1997 //
1998 // This, however, only works if the interval [i+min,i+max] is not larger than max_int (i.e. abs(max - min) < max_int):
1999 // The theoretical max size of an array is max_int with:
2000 // - Valid index space: [0,max_int-1]
2001 // - Invalid index space: [max_int,-1] // max_int, min_int, min_int - 1 ..., -1
2002 //
2003 // The size of the consecutive valid index space is smaller than the size of the consecutive invalid index space.
2004 // If we choose min and max in such a way that:
2005 // - abs(max - min) < max_int
2006 // - i+max and i+min are inside the valid index space
2007 // then all indices [i+min,i+max] must be in the valid index space. Otherwise, the invalid index space must be
2008 // smaller than the valid index space which is never the case for any array size.
2009 //
2010 // Choosing a smaller array size only makes the valid index space smaller and the invalid index space larger and
2011 // the argument above still holds.
2012 //
2013 // Note that the same optimization with the same maximal accepted interval size can also be found in C1.
2014 const jlong maximum_number_of_min_max_interval_indices = (jlong)max_jint;
2015
2016 // The top 3 range checks seen
2017 const int NRC = 3;
2018 RangeCheck prev_checks[NRC];
2019 int nb_checks = 0;
2020
2021 // Low and high offsets seen so far
2022 jint off_lo = offset1;
2023 jint off_hi = offset1;
2024
2025 bool found_immediate_dominator = false;
2026
2027 // Scan for the top checks and collect range of offsets
2028 for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
2029 if (dom->Opcode() == Op_RangeCheck && // Not same opcode?
2030 prev_dom->in(0) == dom) { // One path of test does dominate?
2031 if (dom == this) return nullptr; // dead loop
2032 // See if this is a range check
2033 Node* index2;
2034 Node* range2;
2035 jint offset2;
2036 int flip2 = dom->as_RangeCheck()->is_range_check(range2, index2, offset2);
2037 // See if this is a _matching_ range check, checking against
2038 // the same array bounds.
2039 if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
2040 dom->outcnt() == 2) {
2041 if (nb_checks == 0 && dom->in(1) == in(1)) {
2042 // Found an immediately dominating test at the same offset.
2043 // This kind of back-to-back test can be eliminated locally,
2044 // and there is no need to search further for dominating tests.
2045 assert(offset2 == offset1, "Same test but different offsets");
2046 found_immediate_dominator = true;
2047 break;
2048 }
2049
2050 // "x - y" -> must add one to the difference for number of elements in [x,y]
2051 const jlong diff = (jlong)MIN2(offset2, off_lo) - (jlong)MAX2(offset2, off_hi);
2052 if (ABS(diff) < maximum_number_of_min_max_interval_indices) {
2053 // Gather expanded bounds
2054 off_lo = MIN2(off_lo, offset2);
2055 off_hi = MAX2(off_hi, offset2);
2056 // Record top NRC range checks
2057 prev_checks[nb_checks % NRC].ctl = prev_dom->as_IfProj();
2058 prev_checks[nb_checks % NRC].off = offset2;
2059 nb_checks++;
2060 }
2061 }
2062 }
2063 prev_dom = dom;
2064 dom = up_one_dom(dom);
2065 if (!dom) break;
2066 }
2067
2068 if (!found_immediate_dominator) {
2069 // Attempt to widen the dominating range check to cover some later
2070 // ones. Since range checks "fail" by uncommon-trapping to the
2071 // interpreter, widening a check can make us speculatively enter
2072 // the interpreter. If we see range-check deopt's, do not widen!
2073 if (!phase->C->allow_range_check_smearing()) return nullptr;
2074
2075 if (can_reshape && !phase->C->post_loop_opts_phase()) {
2076 // We are about to perform range check smearing (i.e. remove this RangeCheck if it is dominated by
2077 // a series of RangeChecks which have a range that covers this RangeCheck). This can cause array access nodes to
2078 // be pinned. We want to avoid that and first allow range check elimination a chance to remove the RangeChecks
2079 // from loops. Hence, we delay range check smearing until after loop opts.
2080 phase->C->record_for_post_loop_opts_igvn(this);
2081 return nullptr;
2082 }
2083
2084 // Didn't find prior covering check, so cannot remove anything.
2085 if (nb_checks == 0) {
2086 return nullptr;
2087 }
2088 // Constant indices only need to check the upper bound.
2089 // Non-constant indices must check both low and high.
2090 int chk0 = (nb_checks - 1) % NRC;
2091 if (index1) {
2092 if (nb_checks == 1) {
2093 return nullptr;
2094 } else {
2095 // If the top range check's constant is the min or max of
2096 // all constants we widen the next one to cover the whole
2097 // range of constants.
2098 RangeCheck rc0 = prev_checks[chk0];
2099 int chk1 = (nb_checks - 2) % NRC;
2100 RangeCheck rc1 = prev_checks[chk1];
2101 if (rc0.off == off_lo) {
2102 adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
2103 prev_dom = rc1.ctl;
2104 } else if (rc0.off == off_hi) {
2105 adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
2106 prev_dom = rc1.ctl;
2107 } else {
2108 // If the top test's constant is not the min or max of all
2109 // constants, we need 3 range checks. We must leave the
2110 // top test unchanged because widening it would allow the
2111 // accesses it protects to successfully read/write out of
2112 // bounds.
2113 if (nb_checks == 2) {
2114 return nullptr;
2115 }
2116 int chk2 = (nb_checks - 3) % NRC;
2117 RangeCheck rc2 = prev_checks[chk2];
2118 // The top range check a+i covers interval: -a <= i < length-a
2119 // The second range check b+i covers interval: -b <= i < length-b
2120 if (rc1.off <= rc0.off) {
2121 // if b <= a, we change the second range check to:
2122 // -min_of_all_constants <= i < length-min_of_all_constants
2123 // Together top and second range checks now cover:
2124 // -min_of_all_constants <= i < length-a
2125 // which is more restrictive than -b <= i < length-b:
2126 // -b <= -min_of_all_constants <= i < length-a <= length-b
2127 // The third check is then changed to:
2128 // -max_of_all_constants <= i < length-max_of_all_constants
2129 // so 2nd and 3rd checks restrict allowed values of i to:
2130 // -min_of_all_constants <= i < length-max_of_all_constants
2131 adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
2132 adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn);
2133 } else {
2134 // if b > a, we change the second range check to:
2135 // -max_of_all_constants <= i < length-max_of_all_constants
2136 // Together top and second range checks now cover:
2137 // -a <= i < length-max_of_all_constants
2138 // which is more restrictive than -b <= i < length-b:
2139 // -b < -a <= i < length-max_of_all_constants <= length-b
2140 // The third check is then changed to:
2141 // -max_of_all_constants <= i < length-max_of_all_constants
2142 // so 2nd and 3rd checks restrict allowed values of i to:
2143 // -min_of_all_constants <= i < length-max_of_all_constants
2144 adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
2145 adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn);
2146 }
2147 prev_dom = rc2.ctl;
2148 }
2149 }
2150 } else {
2151 RangeCheck rc0 = prev_checks[chk0];
2152 // 'Widen' the offset of the 1st and only covering check
2153 adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn);
2154 // Test is now covered by prior checks, dominate it out
2155 prev_dom = rc0.ctl;
2156 }
2157 // The last RangeCheck is found to be redundant with a sequence of n (n >= 2) preceding RangeChecks.
2158 // If an array load is control dependent on the eliminated range check, the array load nodes (CastII and Load)
2159 // become control dependent on the last range check of the sequence, but they are really dependent on the entire
2160 // sequence of RangeChecks. If RangeCheck#n is later replaced by a dominating identical check, the array load
2161 // nodes must not float above the n-1 other RangeCheck in the sequence. We pin the array load nodes here to
2162 // guarantee it doesn't happen.
2163 //
2164 // RangeCheck#1 RangeCheck#1
2165 // | \ | \
2166 // | uncommon trap | uncommon trap
2167 // .. ..
2168 // RangeCheck#n -> RangeCheck#n
2169 // | \ | \
2170 // | uncommon trap CastII uncommon trap
2171 // RangeCheck Load
2172 // | \
2173 // CastII uncommon trap
2174 // Load
2175
2176 return dominated_by(prev_dom, igvn, true);
2177 }
2178 } else {
2179 prev_dom = search_identical(4, igvn);
2180
2181 if (prev_dom == nullptr) {
2182 return nullptr;
2183 }
2184 }
2185
2186 // Replace dominated IfNode
2187 return dominated_by(prev_dom, igvn, false);
2188 }
2189
2190 ParsePredicateNode::ParsePredicateNode(Node* control, Deoptimization::DeoptReason deopt_reason, PhaseGVN* gvn)
2191 : IfNode(control, gvn->intcon(1), PROB_MAX, COUNT_UNKNOWN),
2192 _deopt_reason(deopt_reason),
2193 _predicate_state(PredicateState::Useful) {
2194 init_class_id(Class_ParsePredicate);
2195 gvn->C->add_parse_predicate(this);
2196 gvn->C->record_for_post_loop_opts_igvn(this);
2197 #ifdef ASSERT
2198 switch (deopt_reason) {
2199 case Deoptimization::Reason_predicate:
2200 case Deoptimization::Reason_profile_predicate:
2201 case Deoptimization::Reason_auto_vectorization_check:
2202 case Deoptimization::Reason_loop_limit_check:
2203 case Deoptimization::Reason_short_running_long_loop:
2204 break;
2205 default:
2206 assert(false, "unsupported deoptimization reason for Parse Predicate");
2207 }
2208 #endif // ASSERT
2209 }
2210
2211 void ParsePredicateNode::mark_useless(PhaseIterGVN& igvn) {
2212 _predicate_state = PredicateState::Useless;
2213 igvn._worklist.push(this);
2214 }
2215
2216 Node* ParsePredicateNode::uncommon_trap() const {
2217 ParsePredicateUncommonProj* uncommon_proj = false_proj();
2218 Node* uct_region_or_call = uncommon_proj->unique_ctrl_out();
2219 assert(uct_region_or_call->is_Region() || uct_region_or_call->is_Call(), "must be a region or call uct");
2220 return uct_region_or_call;
2221 }
2222
2223 // Fold this node away once it becomes useless or at latest in post loop opts IGVN.
2224 const Type* ParsePredicateNode::Value(PhaseGVN* phase) const {
2225 assert(_predicate_state != PredicateState::MaybeUseful, "should only be MaybeUseful when eliminating useless "
2226 "predicates during loop opts");
2227 if (phase->type(in(0)) == Type::TOP) {
2228 return Type::TOP;
2229 }
2230 if (_predicate_state == PredicateState::Useless || phase->C->post_loop_opts_phase()) {
2231 return TypeTuple::IFTRUE;
2232 }
2233 return bottom_type();
2234 }
2235
2236 #ifndef PRODUCT
2237 void ParsePredicateNode::dump_spec(outputStream* st) const {
2238 st->print(" #");
2239 switch (_deopt_reason) {
2240 case Deoptimization::DeoptReason::Reason_predicate:
2241 st->print("Loop ");
2242 break;
2243 case Deoptimization::DeoptReason::Reason_profile_predicate:
2244 st->print("Profiled_Loop ");
2245 break;
2246 case Deoptimization::DeoptReason::Reason_auto_vectorization_check:
2247 st->print("Auto_Vectorization_Check ");
2248 break;
2249 case Deoptimization::DeoptReason::Reason_loop_limit_check:
2250 st->print("Loop_Limit_Check ");
2251 break;
2252 case Deoptimization::DeoptReason::Reason_short_running_long_loop:
2253 st->print("Short_Running_Long_Loop ");
2254 break;
2255 default:
2256 fatal("unknown kind");
2257 }
2258 if (_predicate_state == PredicateState::Useless) {
2259 st->print("#useless ");
2260 }
2261 }
2262 #endif // NOT PRODUCT