1 /*
2 * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "opto/addnode.hpp"
26 #include "opto/callnode.hpp"
27 #include "opto/castnode.hpp"
28 #include "opto/cfgnode.hpp"
29 #include "opto/connode.hpp"
30 #include "opto/graphKit.hpp"
31 #include "opto/inlinetypenode.hpp"
32 #include "opto/loopnode.hpp"
33 #include "opto/matcher.hpp"
34 #include "opto/phaseX.hpp"
35 #include "opto/rootnode.hpp"
36 #include "opto/subnode.hpp"
37 #include "opto/type.hpp"
38 #include "utilities/checkedCast.hpp"
39
40 const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::FloatingNarrowing(true, true, "floating narrowing dependency"); // not pinned, narrows type
41 const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::FloatingNonNarrowing(true, false, "floating non-narrowing dependency"); // not pinned, doesn't narrow type
42 const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::NonFloatingNarrowing(false, true, "non-floating narrowing dependency"); // pinned, narrows type
43 const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::NonFloatingNonNarrowing(false, false, "non-floating non-narrowing dependency"); // pinned, doesn't narrow type
44
45 //=============================================================================
46 // If input is already higher or equal to cast type, then this is an identity.
47 Node* ConstraintCastNode::Identity(PhaseGVN* phase) {
48 if (!_dependency.narrows_type()) {
49 // If this cast doesn't carry a type dependency (i.e. not used for type narrowing), we cannot optimize it.
50 return this;
51 }
52
53 // This cast node carries a type dependency. We can remove it if:
54 // - Its input has a narrower type
55 // - There's a dominating cast with same input but narrower type
56 Node* dom = dominating_cast(phase, phase);
57 if (dom != nullptr) {
58 return dom;
59 }
60 return higher_equal_types(phase, in(1)) ? in(1) : this;
61 }
62
63 //------------------------------Value------------------------------------------
64 // Take 'join' of input and cast-up type
65 const Type* ConstraintCastNode::Value(PhaseGVN* phase) const {
66 if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
67
68 const Type* in_type = phase->type(in(1));
69 const Type* ft = in_type->filter_speculative(_type);
70
71 // Check if both _type and in_type had a speculative type, but for the just
72 // computed ft the speculative type was dropped.
73 if (ft->speculative() == nullptr &&
74 _type->speculative() != nullptr &&
75 in_type->speculative() != nullptr) {
76 // Speculative type may have disagreed between cast and input, and was
77 // dropped in filtering. Recompute so that ft can take speculative type
78 // of in_type. If we did not do it now, a subsequent ::Value call would
79 // do it, and violate idempotence of ::Value.
80 ft = in_type->filter_speculative(ft);
81 }
82
83 #ifdef ASSERT
84 // Previous versions of this function had some special case logic,
85 // which is no longer necessary. Make sure of the required effects.
86 switch (Opcode()) {
87 case Op_CastII:
88 {
89 if (in_type == Type::TOP) {
90 assert(ft == Type::TOP, "special case #1");
91 }
92 const Type* rt = in_type->join_speculative(_type);
93 if (rt->empty()) {
94 assert(ft == Type::TOP, "special case #2");
95 }
96 break;
97 }
98 case Op_CastPP:
99 if (in_type == TypePtr::NULL_PTR &&
100 _type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull) {
101 assert(ft == Type::TOP, "special case #3");
102 break;
103 }
104 }
105 #endif //ASSERT
106
107 return ft;
108 }
109
110 //------------------------------Ideal------------------------------------------
111 // Return a node which is more "ideal" than the current node. Strip out
112 // control copies
113 Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape) {
114 if (in(0) != nullptr && remove_dead_region(phase, can_reshape)) {
115 return this;
116 }
117
118 // Push cast through InlineTypeNode
119 InlineTypeNode* vt = in(1)->isa_InlineType();
120 if (vt != nullptr && phase->type(vt)->filter_speculative(_type) != Type::TOP) {
121 Node* cast = clone();
122 cast->set_req(1, vt->get_oop());
123 vt = vt->clone()->as_InlineType();
124 if (!_type->maybe_null()) {
125 vt->as_InlineType()->set_null_marker(*phase);
126 }
127 vt->set_oop(*phase, phase->transform(cast));
128 return vt;
129 }
130
131 if (in(1) != nullptr && phase->type(in(1)) != Type::TOP) {
132 return TypeNode::Ideal(phase, can_reshape);
133 }
134
135 return nullptr;
136 }
137
138 uint ConstraintCastNode::hash() const {
139 return TypeNode::hash() + _dependency.hash() + (_extra_types != nullptr ? _extra_types->hash() : 0);
140 }
141
142 bool ConstraintCastNode::cmp(const Node &n) const {
143 if (!TypeNode::cmp(n)) {
144 return false;
145 }
146 ConstraintCastNode& cast = (ConstraintCastNode&) n;
147 if (!cast._dependency.cmp(_dependency)) {
148 return false;
149 }
150 if (_extra_types == nullptr || cast._extra_types == nullptr) {
151 return _extra_types == cast._extra_types;
152 }
153 return _extra_types->eq(cast._extra_types);
154 }
155
156 uint ConstraintCastNode::size_of() const {
157 return sizeof(*this);
158 }
159
160 Node* ConstraintCastNode::make_cast_for_basic_type(Node* c, Node* n, const Type* t, const DependencyType& dependency, BasicType bt) {
161 switch(bt) {
162 case T_INT:
163 return new CastIINode(c, n, t, dependency);
164 case T_LONG:
165 return new CastLLNode(c, n, t, dependency);
166 default:
167 fatal("Bad basic type %s", type2name(bt));
168 }
169 return nullptr;
170 }
171
172 TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const {
173 // See discussion at definition of ConstraintCastNode::DependencyType: replacing this cast with a dominating one is
174 // not safe if _dependency.narrows_type() is not true.
175 assert(_dependency.narrows_type(), "cast can't be replaced by dominating one");
176 Node* val = in(1);
177 Node* ctl = in(0);
178 int opc = Opcode();
179 if (ctl == nullptr) {
180 return nullptr;
181 }
182 // Range check CastIIs may all end up under a single range check and
183 // in that case only the narrower CastII would be kept by the code
184 // below which would be incorrect.
185 if (is_CastII() && as_CastII()->has_range_check()) {
186 return nullptr;
187 }
188 if (type()->isa_rawptr() && (gvn->type_or_null(val) == nullptr || gvn->type(val)->isa_oopptr())) {
189 return nullptr;
190 }
191 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
192 Node* u = val->fast_out(i);
193 if (u != this &&
194 u->outcnt() > 0 &&
195 u->Opcode() == opc &&
196 u->in(0) != nullptr &&
197 higher_equal_types(gvn, u)) {
198 if (pt->is_dominator(u->in(0), ctl)) {
199 return u->as_Type();
200 }
201 if (is_CheckCastPP() && u->in(1)->is_Proj() && u->in(1)->in(0)->is_Allocate() &&
202 u->in(0)->is_Proj() && u->in(0)->in(0)->is_Initialize() &&
203 u->in(1)->in(0)->as_Allocate()->initialization() == u->in(0)->in(0)) {
204 // CheckCastPP following an allocation always dominates all
205 // use of the allocation result
206 return u->as_Type();
207 }
208 }
209 }
210 return nullptr;
211 }
212
213 bool ConstraintCastNode::higher_equal_types(PhaseGVN* phase, const Node* other) const {
214 const Type* t = phase->type(other);
215 if (!t->higher_equal_speculative(type())) {
216 return false;
217 }
218 if (_extra_types != nullptr) {
219 for (uint i = 0; i < _extra_types->cnt(); ++i) {
220 if (!t->higher_equal_speculative(_extra_types->field_at(i))) {
221 return false;
222 }
223 }
224 }
225 return true;
226 }
227
228 #ifndef PRODUCT
229 void ConstraintCastNode::dump_spec(outputStream *st) const {
230 TypeNode::dump_spec(st);
231 if (_extra_types != nullptr) {
232 st->print(" extra types: ");
233 _extra_types->dump_on(st);
234 }
235 st->print(" ");
236 _dependency.dump_on(st);
237 }
238 #endif
239
240 CastIINode* CastIINode::make_with(Node* parent, const TypeInteger* type, const DependencyType& dependency) const {
241 return new CastIINode(in(0), parent, type, dependency, _range_check_dependency, _extra_types);
242 }
243
244 CastLLNode* CastLLNode::make_with(Node* parent, const TypeInteger* type, const DependencyType& dependency) const {
245 return new CastLLNode(in(0), parent, type, dependency, _extra_types);
246 }
247
248 Node* ConstraintCastNode::find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, const TypeInteger* type, const DependencyType& dependency) const {
249 Node* n = make_with(parent, type, dependency);
250 Node* existing = igvn->hash_find_insert(n);
251 if (existing != nullptr) {
252 n->destruct(igvn);
253 return existing;
254 }
255 return igvn->register_new_node_with_optimizer(n);
256 }
257
258 Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) {
259 Node* progress = ConstraintCastNode::Ideal(phase, can_reshape);
260 if (progress != nullptr) {
261 return progress;
262 }
263 if (!phase->C->post_loop_opts_phase()) {
264 // makes sure we run widen_type() to potentially common type assertions after loop opts
265 phase->C->record_for_post_loop_opts_igvn(this);
266 }
267 if (!_range_check_dependency || phase->C->post_loop_opts_phase()) {
268 return optimize_integer_cast(phase, T_INT);
269 }
270 return nullptr;
271 }
272
273 Node* CastIINode::Identity(PhaseGVN* phase) {
274 Node* progress = ConstraintCastNode::Identity(phase);
275 if (progress != this) {
276 return progress;
277 }
278 return this;
279 }
280
281 bool CastIINode::cmp(const Node &n) const {
282 return ConstraintCastNode::cmp(n) && ((CastIINode&)n)._range_check_dependency == _range_check_dependency;
283 }
284
285 uint CastIINode::size_of() const {
286 return sizeof(*this);
287 }
288
289 #ifndef PRODUCT
290 void CastIINode::dump_spec(outputStream* st) const {
291 ConstraintCastNode::dump_spec(st);
292 if (_range_check_dependency) {
293 st->print(" range check dependency");
294 }
295 }
296 #endif
297
298 CastIINode* CastIINode::pin_array_access_node() const {
299 assert(_dependency.is_floating(), "already pinned");
300 if (has_range_check()) {
301 return new CastIINode(in(0), in(1), bottom_type(), _dependency.with_pinned_dependency(), has_range_check());
302 }
303 return nullptr;
304 }
305
306 void CastIINode::remove_range_check_cast(Compile* C) {
307 if (has_range_check()) {
308 // Range check CastII nodes feed into an address computation subgraph. Remove them to let that subgraph float freely.
309 // For memory access or integer divisions nodes that depend on the cast, record the dependency on the cast's control
310 // as a precedence edge, so they can't float above the cast in case that cast's narrowed type helped eliminate a
311 // range check or a null divisor check.
312 assert(in(0) != nullptr, "All RangeCheck CastII must have a control dependency");
313 ResourceMark rm;
314 Unique_Node_List wq;
315 wq.push(this);
316 for (uint next = 0; next < wq.size(); ++next) {
317 Node* m = wq.at(next);
318 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
319 Node* use = m->fast_out(i);
320 if (use->is_Mem() || use->is_div_or_mod(T_INT) || use->is_div_or_mod(T_LONG)) {
321 use->ensure_control_or_add_prec(in(0));
322 } else if (!use->is_CFG() && !use->is_Phi()) {
323 wq.push(use);
324 }
325 }
326 }
327 subsume_by(in(1), C);
328 if (outcnt() == 0) {
329 disconnect_inputs(C);
330 }
331 }
332 }
333
334 bool CastLLNode::is_inner_loop_backedge(IfProjNode* proj) {
335 if (proj != nullptr) {
336 Node* ctrl_use = proj->unique_ctrl_out_or_null();
337 if (ctrl_use != nullptr && ctrl_use->Opcode() == Op_Loop &&
338 ctrl_use->in(2) == proj &&
339 ctrl_use->as_Loop()->is_loop_nest_inner_loop()) {
340 return true;
341 }
342 }
343 return false;
344 }
345
346 bool CastLLNode::cmp_used_at_inner_loop_exit_test(CmpNode* cmp) {
347 for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
348 Node* bol = cmp->fast_out(i);
349 if (bol->Opcode() == Op_Bool) {
350 for (DUIterator_Fast jmax, j = bol->fast_outs(jmax); j < jmax; j++) {
351 Node* iff = bol->fast_out(j);
352 if (iff->Opcode() == Op_If) {
353 IfTrueNode* true_proj = iff->as_If()->true_proj_or_null();
354 IfFalseNode* false_proj = iff->as_If()->false_proj_or_null();
355 if (is_inner_loop_backedge(true_proj) || is_inner_loop_backedge(false_proj)) {
356 return true;
357 }
358 }
359 }
360 }
361 }
362 return false;
363 }
364
365 // Find if this is a cast node added by PhaseIdealLoop::create_loop_nest() to narrow the number of iterations of the
366 // inner loop
367 bool CastLLNode::used_at_inner_loop_exit_test() const {
368 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
369 Node* convl2i = fast_out(i);
370 if (convl2i->Opcode() == Op_ConvL2I) {
371 for (DUIterator_Fast jmax, j = convl2i->fast_outs(jmax); j < jmax; j++) {
372 Node* cmp_or_sub = convl2i->fast_out(j);
373 if (cmp_or_sub->Opcode() == Op_CmpI) {
374 if (cmp_used_at_inner_loop_exit_test(cmp_or_sub->as_Cmp())) {
375 // (Loop .. .. (IfProj (If (Bool (CmpI (ConvL2I (CastLL )))))))
376 return true;
377 }
378 } else if (cmp_or_sub->Opcode() == Op_SubI && cmp_or_sub->in(1)->find_int_con(-1) == 0) {
379 for (DUIterator_Fast kmax, k = cmp_or_sub->fast_outs(kmax); k < kmax; k++) {
380 Node* cmp = cmp_or_sub->fast_out(k);
381 if (cmp->Opcode() == Op_CmpI) {
382 if (cmp_used_at_inner_loop_exit_test(cmp->as_Cmp())) {
383 // (Loop .. .. (IfProj (If (Bool (CmpI (SubI 0 (ConvL2I (CastLL ))))))))
384 return true;
385 }
386 }
387 }
388 }
389 }
390 }
391 }
392 return false;
393 }
394
395 Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
396 Node* progress = ConstraintCastNode::Ideal(phase, can_reshape);
397 if (progress != nullptr) {
398 return progress;
399 }
400 if (!phase->C->post_loop_opts_phase()) {
401 // makes sure we run widen_type() to potentially common type assertions after loop opts
402 phase->C->record_for_post_loop_opts_igvn(this);
403 }
404 // transform (CastLL (ConvI2L ..)) into (ConvI2L (CastII ..)) if the type of the CastLL is narrower than the type of
405 // the ConvI2L.
406 Node* in1 = in(1);
407 if (in1 != nullptr && in1->Opcode() == Op_ConvI2L) {
408 const Type* t = Value(phase);
409 const Type* t_in = phase->type(in1);
410 if (t != Type::TOP && t_in != Type::TOP) {
411 const TypeLong* tl = t->is_long();
412 const TypeLong* t_in_l = t_in->is_long();
413 assert(tl->_lo >= t_in_l->_lo && tl->_hi <= t_in_l->_hi, "CastLL type should be narrower than or equal to the type of its input");
414 assert((tl != t_in_l) == (tl->_lo > t_in_l->_lo || tl->_hi < t_in_l->_hi), "if type differs then this nodes's type must be narrower");
415 if (tl != t_in_l) {
416 const TypeInt* ti = TypeInt::make(checked_cast<jint>(tl->_lo), checked_cast<jint>(tl->_hi), tl->_widen);
417 Node* castii = phase->transform(new CastIINode(in(0), in1->in(1), ti));
418 Node* convi2l = in1->clone();
419 convi2l->set_req(1, castii);
420 return convi2l;
421 }
422 }
423 }
424 // If it's a cast created by PhaseIdealLoop::short_running_loop(), don't transform it until the counted loop is created
425 // in next loop opts pass
426 if (!can_reshape || !used_at_inner_loop_exit_test()) {
427 return optimize_integer_cast(phase, T_LONG);
428 }
429 return nullptr;
430 }
431
432 //=============================================================================
433 //------------------------------Identity---------------------------------------
434 // If input is already higher or equal to cast type, then this is an identity.
435 Node* CheckCastPPNode::Identity(PhaseGVN* phase) {
436 if (in(1)->is_InlineType() && _type->isa_instptr() && phase->type(in(1))->inline_klass()->is_subtype_of(_type->is_instptr()->instance_klass())) {
437 return in(1);
438 }
439 return ConstraintCastNode::Identity(phase);
440 }
441
442 //------------------------------Value------------------------------------------
443 // Take 'join' of input and cast-up type, unless working with an Interface
444 const Type* CheckCastPPNode::Value(PhaseGVN* phase) const {
445 if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
446
447 const Type *inn = phase->type(in(1));
448 if( inn == Type::TOP ) return Type::TOP; // No information yet
449
450 if (inn->isa_oopptr() && _type->isa_oopptr()) {
451 return ConstraintCastNode::Value(phase);
452 }
453
454 const TypePtr *in_type = inn->isa_ptr();
455 const TypePtr *my_type = _type->isa_ptr();
456 const Type *result = _type;
457 if (in_type != nullptr && my_type != nullptr) {
458 // TODO 8302672
459 if (!StressReflectiveCode && my_type->isa_aryptr() && in_type->isa_aryptr()) {
460 // Propagate array properties (not flat/null-free)
461 // Don't do this when StressReflectiveCode is enabled because it might lead to
462 // a dying data path while the corresponding flat/null-free check is not folded.
463 my_type = my_type->is_aryptr()->update_properties(in_type->is_aryptr());
464 if (my_type == nullptr) {
465 return Type::TOP; // Inconsistent properties
466 }
467 }
468 TypePtr::PTR in_ptr = in_type->ptr();
469 if (in_ptr == TypePtr::Null) {
470 result = in_type;
471 } else if (in_ptr != TypePtr::Constant) {
472 result = my_type->cast_to_ptr_type(my_type->join_ptr(in_ptr));
473 }
474 }
475
476 return result;
477 }
478
479 //=============================================================================
480 //------------------------------Value------------------------------------------
481 const Type* CastX2PNode::Value(PhaseGVN* phase) const {
482 const Type* t = phase->type(in(1));
483 if (t == Type::TOP) return Type::TOP;
484 if (t->base() == Type_X && t->singleton()) {
485 uintptr_t bits = (uintptr_t) t->is_intptr_t()->get_con();
486 if (bits == 0) return TypePtr::NULL_PTR;
487 return TypeRawPtr::make((address) bits);
488 }
489 return CastX2PNode::bottom_type();
490 }
491
492 //------------------------------Idealize---------------------------------------
493 static inline bool fits_in_int(const Type* t, bool but_not_min_int = false) {
494 if (t == Type::TOP) return false;
495 const TypeX* tl = t->is_intptr_t();
496 jint lo = min_jint;
497 jint hi = max_jint;
498 if (but_not_min_int) ++lo; // caller wants to negate the value w/o overflow
499 return (tl->_lo >= lo) && (tl->_hi <= hi);
500 }
501
502 static inline Node* addP_of_X2P(PhaseGVN *phase,
503 Node* base,
504 Node* dispX,
505 bool negate = false) {
506 if (negate) {
507 dispX = phase->transform(new SubXNode(phase->MakeConX(0), dispX));
508 }
509 return new AddPNode(phase->C->top(),
510 phase->transform(new CastX2PNode(base)),
511 dispX);
512 }
513
514 Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) {
515 // convert CastX2P(AddX(x, y)) to AddP(CastX2P(x), y) if y fits in an int
516 int op = in(1)->Opcode();
517 Node* x;
518 Node* y;
519 switch (op) {
520 case Op_SubX:
521 x = in(1)->in(1);
522 // Avoid ideal transformations ping-pong between this and AddP for raw pointers.
523 if (phase->find_intptr_t_con(x, -1) == 0)
524 break;
525 y = in(1)->in(2);
526 if (fits_in_int(phase->type(y), true)) {
527 return addP_of_X2P(phase, x, y, true);
528 }
529 break;
530 case Op_AddX:
531 x = in(1)->in(1);
532 y = in(1)->in(2);
533 if (fits_in_int(phase->type(y))) {
534 return addP_of_X2P(phase, x, y);
535 }
536 if (fits_in_int(phase->type(x))) {
537 return addP_of_X2P(phase, y, x);
538 }
539 break;
540 }
541 return nullptr;
542 }
543
544 //------------------------------Identity---------------------------------------
545 Node* CastX2PNode::Identity(PhaseGVN* phase) {
546 if (in(1)->Opcode() == Op_CastP2X) return in(1)->in(1);
547 return this;
548 }
549
550 //=============================================================================
551 //------------------------------Value------------------------------------------
552 const Type* CastP2XNode::Value(PhaseGVN* phase) const {
553 const Type* t = phase->type(in(1));
554 if (t == Type::TOP) return Type::TOP;
555 if (t->base() == Type::RawPtr && t->singleton()) {
556 uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con();
557 return TypeX::make(bits);
558 }
559 return CastP2XNode::bottom_type();
560 }
561
562 Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) {
563 return (in(0) && remove_dead_region(phase, can_reshape)) ? this : nullptr;
564 }
565
566 //------------------------------Identity---------------------------------------
567 Node* CastP2XNode::Identity(PhaseGVN* phase) {
568 if (in(1)->Opcode() == Op_CastX2P) return in(1)->in(1);
569 return this;
570 }
571
572 Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, const DependencyType& dependency,
573 const TypeTuple* types) {
574 if (type->isa_int()) {
575 return new CastIINode(c, in, type, dependency, false, types);
576 } else if (type->isa_long()) {
577 return new CastLLNode(c, in, type, dependency, types);
578 } else if (type->isa_half_float()) {
579 return new CastHHNode(c, in, type, dependency, types);
580 } else if (type->isa_float()) {
581 return new CastFFNode(c, in, type, dependency, types);
582 } else if (type->isa_double()) {
583 return new CastDDNode(c, in, type, dependency, types);
584 } else if (type->isa_vect()) {
585 return new CastVVNode(c, in, type, dependency, types);
586 } else if (type->isa_ptr()) {
587 return new CastPPNode(c, in, type, dependency, types);
588 }
589 fatal("unreachable. Invalid cast type.");
590 return nullptr;
591 }
592
593 Node* ConstraintCastNode::optimize_integer_cast_of_add(PhaseGVN* phase, BasicType bt) {
594 PhaseIterGVN *igvn = phase->is_IterGVN();
595 const TypeInteger* this_type = this->type()->isa_integer(bt);
596 if (this_type == nullptr) {
597 return nullptr;
598 }
599
600 Node* z = in(1);
601 const TypeInteger* rx = nullptr;
602 const TypeInteger* ry = nullptr;
603 // Similar to ConvI2LNode::Ideal() for the same reasons
604 if (Compile::push_thru_add(phase, z, this_type, rx, ry, bt, bt)) {
605 if (igvn == nullptr) {
606 // Postpone this optimization to iterative GVN, where we can handle deep
607 // AddI chains without an exponential number of recursive Ideal() calls.
608 phase->record_for_igvn(this);
609 return nullptr;
610 }
611 int op = z->Opcode();
612 Node* x = z->in(1);
613 Node* y = z->in(2);
614
615 const TypeInteger* tx = phase->type(x)->is_integer(bt);
616 const TypeInteger* ty = phase->type(y)->is_integer(bt);
617
618 // (Cast (Add x y) tz) is transformed into (Add (Cast x rx) (Cast y ry))
619 //
620 // tz = [tzlo, tzhi]
621 // rx = [rxlo, rxhi]
622 // ry = [rylo, ryhi]
623 // with type of x, tx = [txlo, txhi]
624 // with type of y, ty = [tylo, tyhi]
625 //
626 // From Compile::push_thru_add():
627 // rxlo = max(tzlo - tyhi, txlo)
628 // rxhi = min(tzhi - tylo, txhi)
629 // rylo = max(tzlo - txhi, tylo)
630 // ryhi = min(tzhi - txlo, tyhi)
631 //
632 // If x is a constant, then txlo = txhi
633 // rxlo = txlo, rxhi = txhi
634 // The bounds of the type of the Add after transformation then is:
635 // rxlo + rylo >= txlo + tzlo - txhi >= tzlo
636 // rxhi + ryhi <= txhi + tzhi - txlo <= tzhi
637 // The resulting type is not wider than the type of the Cast
638 // before transformation
639 //
640 // If neither x nor y are constant then the type of the resulting
641 // Add can be wider than the type of the type of the Cast before
642 // transformation.
643 // For instance, tx = [0, 10], ty = [0, 10], tz = [0, 10]
644 // then rx = [0, 10], ry = [0, 10]
645 // and rx + ry = [0, 20] which is wider than tz
646 //
647 // Same reasoning applies to (Cast (Sub x y) tz)
648 const DependencyType& dependency = (!tx->is_con() && !ty->is_con()) ? _dependency.with_non_narrowing() : _dependency;
649 Node* cx = find_or_make_integer_cast(igvn, x, rx, dependency);
650 Node* cy = find_or_make_integer_cast(igvn, y, ry, dependency);
651 if (op == Op_Add(bt)) {
652 return AddNode::make(cx, cy, bt);
653 } else {
654 assert(op == Op_Sub(bt), "");
655 return SubNode::make(cx, cy, bt);
656 }
657 return nullptr;
658 }
659 return nullptr;
660 }
661
662 Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) {
663 Node* res = optimize_integer_cast_of_add(phase, bt);
664 if (res != nullptr) {
665 return res;
666 }
667 const Type* t = Value(phase);
668 if (t != Type::TOP && phase->C->post_loop_opts_phase()) {
669 const Type* bottom_t = bottom_type();
670 const TypeInteger* wide_t = widen_type(phase, bottom_t, bt);
671 if (wide_t != bottom_t) {
672 // Widening the type of the Cast (to allow some commoning) causes the Cast to change how it can be optimized (if
673 // type of its input is narrower than the Cast's type, we can't remove it to not loose the control dependency).
674 return make_with(in(1), wide_t, _dependency.with_non_narrowing());
675 }
676 }
677 return nullptr;
678 }
679
680 const TypeInteger* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const {
681 const TypeInteger* this_type = res->is_integer(bt);
682 // At VerifyConstraintCasts == 1, we verify the ConstraintCastNodes that are present during code
683 // emission. This allows us detecting possible mis-scheduling due to these nodes being pinned at
684 // the wrong control nodes.
685 // At VerifyConstraintCasts == 2, we do not perform widening so that we can verify the
686 // correctness of more ConstraintCastNodes. This further helps us detect possible
687 // mis-transformations that may happen due to these nodes being pinned at the wrong control
688 // nodes.
689 if (VerifyConstraintCasts > 1) {
690 return this_type;
691 }
692
693 const TypeInteger* in_type = phase->type(in(1))->isa_integer(bt);
694 if (in_type != nullptr &&
695 (in_type->lo_as_long() != this_type->lo_as_long() ||
696 in_type->hi_as_long() != this_type->hi_as_long())) {
697 jlong lo1 = this_type->lo_as_long();
698 jlong hi1 = this_type->hi_as_long();
699 int w1 = this_type->_widen;
700 if (lo1 >= 0) {
701 // Keep a range assertion of >=0.
702 lo1 = 0; hi1 = max_signed_integer(bt);
703 } else if (hi1 < 0) {
704 // Keep a range assertion of <0.
705 lo1 = min_signed_integer(bt); hi1 = -1;
706 } else {
707 lo1 = min_signed_integer(bt); hi1 = max_signed_integer(bt);
708 }
709 return TypeInteger::make(MAX2(in_type->lo_as_long(), lo1),
710 MIN2(in_type->hi_as_long(), hi1),
711 MAX2((int)in_type->_widen, w1), bt);
712 }
713 return this_type;
714 }