1 /*
2 * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "opto/addnode.hpp"
26 #include "opto/callnode.hpp"
27 #include "opto/castnode.hpp"
28 #include "opto/cfgnode.hpp"
29 #include "opto/connode.hpp"
30 #include "opto/loopnode.hpp"
31 #include "opto/matcher.hpp"
32 #include "opto/phaseX.hpp"
33 #include "opto/subnode.hpp"
34 #include "opto/type.hpp"
35 #include "utilities/checkedCast.hpp"
36
37 const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::FloatingNarrowing(true, true, "floating narrowing dependency"); // not pinned, narrows type
38 const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::FloatingNonNarrowing(true, false, "floating non-narrowing dependency"); // not pinned, doesn't narrow type
39 const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::NonFloatingNarrowing(false, true, "non-floating narrowing dependency"); // pinned, narrows type
40 const ConstraintCastNode::DependencyType ConstraintCastNode::DependencyType::NonFloatingNonNarrowing(false, false, "non-floating non-narrowing dependency"); // pinned, doesn't narrow type
41
42 //=============================================================================
43 // If input is already higher or equal to cast type, then this is an identity.
44 Node* ConstraintCastNode::Identity(PhaseGVN* phase) {
45 if (!_dependency.narrows_type()) {
46 // If this cast doesn't carry a type dependency (i.e. not used for type narrowing), we cannot optimize it.
47 return this;
48 }
49
50 // This cast node carries a type dependency. We can remove it if:
51 // - Its input has a narrower type
52 // - There's a dominating cast with same input but narrower type
53 Node* dom = dominating_cast(phase, phase);
54 if (dom != nullptr) {
55 return dom;
56 }
57 return higher_equal_types(phase, in(1)) ? in(1) : this;
58 }
59
60 //------------------------------Value------------------------------------------
61 // Take 'join' of input and cast-up type
62 const Type* ConstraintCastNode::Value(PhaseGVN* phase) const {
63 if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
64
65 const Type* in_type = phase->type(in(1));
66 const Type* ft = in_type->filter_speculative(_type);
67
68 // Check if both _type and in_type had a speculative type, but for the just
69 // computed ft the speculative type was dropped.
70 if (ft->speculative() == nullptr &&
71 _type->speculative() != nullptr &&
72 in_type->speculative() != nullptr) {
73 // Speculative type may have disagreed between cast and input, and was
74 // dropped in filtering. Recompute so that ft can take speculative type
75 // of in_type. If we did not do it now, a subsequent ::Value call would
76 // do it, and violate idempotence of ::Value.
77 ft = in_type->filter_speculative(ft);
78 }
79
80 #ifdef ASSERT
81 // Previous versions of this function had some special case logic,
82 // which is no longer necessary. Make sure of the required effects.
83 switch (Opcode()) {
84 case Op_CastII:
85 {
86 if (in_type == Type::TOP) {
87 assert(ft == Type::TOP, "special case #1");
88 }
89 const Type* rt = in_type->join_speculative(_type);
90 if (rt->empty()) {
91 assert(ft == Type::TOP, "special case #2");
92 }
93 break;
94 }
95 case Op_CastPP:
96 if (in_type == TypePtr::NULL_PTR &&
97 _type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull) {
98 assert(ft == Type::TOP, "special case #3");
99 break;
100 }
101 }
102 #endif //ASSERT
103
104 return ft;
105 }
106
107 //------------------------------Ideal------------------------------------------
108 // Return a node which is more "ideal" than the current node. Strip out
109 // control copies
110 Node* ConstraintCastNode::Ideal(PhaseGVN* phase, bool can_reshape) {
111 if (in(0) != nullptr && remove_dead_region(phase, can_reshape)) {
112 return this;
113 }
114 if (in(1) != nullptr && phase->type(in(1)) != Type::TOP) {
115 return TypeNode::Ideal(phase, can_reshape);
116 }
117 return nullptr;
118 }
119
120 uint ConstraintCastNode::hash() const {
121 return TypeNode::hash() + _dependency.hash() + (_extra_types != nullptr ? _extra_types->hash() : 0);
122 }
123
124 bool ConstraintCastNode::cmp(const Node &n) const {
125 if (!TypeNode::cmp(n)) {
126 return false;
127 }
128 ConstraintCastNode& cast = (ConstraintCastNode&) n;
129 if (!cast._dependency.cmp(_dependency)) {
130 return false;
131 }
132 if (_extra_types == nullptr || cast._extra_types == nullptr) {
133 return _extra_types == cast._extra_types;
134 }
135 return _extra_types->eq(cast._extra_types);
136 }
137
138 uint ConstraintCastNode::size_of() const {
139 return sizeof(*this);
140 }
141
142 Node* ConstraintCastNode::make_cast_for_basic_type(Node* c, Node* n, const Type* t, const DependencyType& dependency, BasicType bt) {
143 switch(bt) {
144 case T_INT:
145 return new CastIINode(c, n, t, dependency);
146 case T_LONG:
147 return new CastLLNode(c, n, t, dependency);
148 default:
149 fatal("Bad basic type %s", type2name(bt));
150 }
151 return nullptr;
152 }
153
154 TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const {
155 // See discussion at definition of ConstraintCastNode::DependencyType: replacing this cast with a dominating one is
156 // not safe if _dependency.narrows_type() is not true.
157 assert(_dependency.narrows_type(), "cast can't be replaced by dominating one");
158 Node* val = in(1);
159 Node* ctl = in(0);
160 int opc = Opcode();
161 if (ctl == nullptr) {
162 return nullptr;
163 }
164 // Range check CastIIs may all end up under a single range check and
165 // in that case only the narrower CastII would be kept by the code
166 // below which would be incorrect.
167 if (is_CastII() && as_CastII()->has_range_check()) {
168 return nullptr;
169 }
170 if (type()->isa_rawptr() && (gvn->type_or_null(val) == nullptr || gvn->type(val)->isa_oopptr())) {
171 return nullptr;
172 }
173 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
174 Node* u = val->fast_out(i);
175 if (u != this &&
176 u->outcnt() > 0 &&
177 u->Opcode() == opc &&
178 u->in(0) != nullptr &&
179 higher_equal_types(gvn, u)) {
180 if (pt->is_dominator(u->in(0), ctl)) {
181 return u->as_Type();
182 }
183 if (is_CheckCastPP() && u->in(1)->is_Proj() && u->in(1)->in(0)->is_Allocate() &&
184 u->in(0)->is_Proj() && u->in(0)->in(0)->is_Initialize() &&
185 u->in(1)->in(0)->as_Allocate()->initialization() == u->in(0)->in(0)) {
186 // CheckCastPP following an allocation always dominates all
187 // use of the allocation result
188 return u->as_Type();
189 }
190 }
191 }
192 return nullptr;
193 }
194
195 bool ConstraintCastNode::higher_equal_types(PhaseGVN* phase, const Node* other) const {
196 const Type* t = phase->type(other);
197 if (!t->higher_equal_speculative(type())) {
198 return false;
199 }
200 if (_extra_types != nullptr) {
201 for (uint i = 0; i < _extra_types->cnt(); ++i) {
202 if (!t->higher_equal_speculative(_extra_types->field_at(i))) {
203 return false;
204 }
205 }
206 }
207 return true;
208 }
209
210 Node* ConstraintCastNode::pin_node_under_control_impl() const {
211 assert(_dependency.is_floating(), "already pinned");
212 return make_cast_for_type(in(0), in(1), bottom_type(), _dependency.with_pinned_dependency(), _extra_types);
213 }
214
215 #ifndef PRODUCT
216 void ConstraintCastNode::dump_spec(outputStream *st) const {
217 TypeNode::dump_spec(st);
218 if (_extra_types != nullptr) {
219 st->print(" extra types: ");
220 _extra_types->dump_on(st);
221 }
222 st->print(" ");
223 _dependency.dump_on(st);
224 }
225 #endif
226
227 CastIINode* CastIINode::make_with(Node* parent, const TypeInteger* type, const DependencyType& dependency) const {
228 return new CastIINode(in(0), parent, type, dependency, _range_check_dependency, _extra_types);
229 }
230
231 CastLLNode* CastLLNode::make_with(Node* parent, const TypeInteger* type, const DependencyType& dependency) const {
232 return new CastLLNode(in(0), parent, type, dependency, _extra_types);
233 }
234
235 Node* ConstraintCastNode::find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, const TypeInteger* type, const DependencyType& dependency) const {
236 Node* n = make_with(parent, type, dependency);
237 Node* existing = igvn->hash_find_insert(n);
238 if (existing != nullptr) {
239 n->destruct(igvn);
240 return existing;
241 }
242 return igvn->register_new_node_with_optimizer(n);
243 }
244
245 Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) {
246 Node* progress = ConstraintCastNode::Ideal(phase, can_reshape);
247 if (progress != nullptr) {
248 return progress;
249 }
250 if (!phase->C->post_loop_opts_phase()) {
251 // makes sure we run widen_type() to potentially common type assertions after loop opts
252 phase->C->record_for_post_loop_opts_igvn(this);
253 }
254 if (!_range_check_dependency || phase->C->post_loop_opts_phase()) {
255 return optimize_integer_cast(phase, T_INT);
256 }
257 return nullptr;
258 }
259
260 Node* CastIINode::Identity(PhaseGVN* phase) {
261 Node* progress = ConstraintCastNode::Identity(phase);
262 if (progress != this) {
263 return progress;
264 }
265 return this;
266 }
267
268 bool CastIINode::cmp(const Node &n) const {
269 return ConstraintCastNode::cmp(n) && ((CastIINode&)n)._range_check_dependency == _range_check_dependency;
270 }
271
272 uint CastIINode::size_of() const {
273 return sizeof(*this);
274 }
275
276 #ifndef PRODUCT
277 void CastIINode::dump_spec(outputStream* st) const {
278 ConstraintCastNode::dump_spec(st);
279 if (_range_check_dependency) {
280 st->print(" range check dependency");
281 }
282 }
283 #endif
284
285 CastIINode* CastIINode::pin_node_under_control_impl() const {
286 assert(_dependency.is_floating(), "already pinned");
287 return new CastIINode(in(0), in(1), bottom_type(), _dependency.with_pinned_dependency(), _range_check_dependency, _extra_types);
288 }
289
290 void CastIINode::remove_range_check_cast(Compile* C) {
291 if (has_range_check()) {
292 // Range check CastII nodes feed into an address computation subgraph. Remove them to let that subgraph float freely.
293 // For memory access or integer divisions nodes that depend on the cast, record the dependency on the cast's control
294 // as a precedence edge, so they can't float above the cast in case that cast's narrowed type helped eliminate a
295 // range check or a null divisor check.
296 assert(in(0) != nullptr, "All RangeCheck CastII must have a control dependency");
297 ResourceMark rm;
298 Unique_Node_List wq;
299 wq.push(this);
300 for (uint next = 0; next < wq.size(); ++next) {
301 Node* m = wq.at(next);
302 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
303 Node* use = m->fast_out(i);
304 if (use->is_Mem() || use->is_div_or_mod(T_INT) || use->is_div_or_mod(T_LONG)) {
305 use->ensure_control_or_add_prec(in(0));
306 } else if (!use->is_CFG() && !use->is_Phi()) {
307 wq.push(use);
308 }
309 }
310 }
311 subsume_by(in(1), C);
312 if (outcnt() == 0) {
313 disconnect_inputs(C);
314 }
315 }
316 }
317
318 bool CastLLNode::is_inner_loop_backedge(IfProjNode* proj) {
319 if (proj != nullptr) {
320 Node* ctrl_use = proj->unique_ctrl_out_or_null();
321 if (ctrl_use != nullptr && ctrl_use->Opcode() == Op_Loop &&
322 ctrl_use->in(2) == proj &&
323 ctrl_use->as_Loop()->is_loop_nest_inner_loop()) {
324 return true;
325 }
326 }
327 return false;
328 }
329
330 bool CastLLNode::cmp_used_at_inner_loop_exit_test(CmpNode* cmp) {
331 for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
332 Node* bol = cmp->fast_out(i);
333 if (bol->Opcode() == Op_Bool) {
334 for (DUIterator_Fast jmax, j = bol->fast_outs(jmax); j < jmax; j++) {
335 Node* iff = bol->fast_out(j);
336 if (iff->Opcode() == Op_If) {
337 IfTrueNode* true_proj = iff->as_If()->true_proj_or_null();
338 IfFalseNode* false_proj = iff->as_If()->false_proj_or_null();
339 if (is_inner_loop_backedge(true_proj) || is_inner_loop_backedge(false_proj)) {
340 return true;
341 }
342 }
343 }
344 }
345 }
346 return false;
347 }
348
349 // Find if this is a cast node added by PhaseIdealLoop::create_loop_nest() to narrow the number of iterations of the
350 // inner loop
351 bool CastLLNode::used_at_inner_loop_exit_test() const {
352 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
353 Node* convl2i = fast_out(i);
354 if (convl2i->Opcode() == Op_ConvL2I) {
355 for (DUIterator_Fast jmax, j = convl2i->fast_outs(jmax); j < jmax; j++) {
356 Node* cmp_or_sub = convl2i->fast_out(j);
357 if (cmp_or_sub->Opcode() == Op_CmpI) {
358 if (cmp_used_at_inner_loop_exit_test(cmp_or_sub->as_Cmp())) {
359 // (Loop .. .. (IfProj (If (Bool (CmpI (ConvL2I (CastLL )))))))
360 return true;
361 }
362 } else if (cmp_or_sub->Opcode() == Op_SubI && cmp_or_sub->in(1)->find_int_con(-1) == 0) {
363 for (DUIterator_Fast kmax, k = cmp_or_sub->fast_outs(kmax); k < kmax; k++) {
364 Node* cmp = cmp_or_sub->fast_out(k);
365 if (cmp->Opcode() == Op_CmpI) {
366 if (cmp_used_at_inner_loop_exit_test(cmp->as_Cmp())) {
367 // (Loop .. .. (IfProj (If (Bool (CmpI (SubI 0 (ConvL2I (CastLL ))))))))
368 return true;
369 }
370 }
371 }
372 }
373 }
374 }
375 }
376 return false;
377 }
378
379 Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
380 Node* progress = ConstraintCastNode::Ideal(phase, can_reshape);
381 if (progress != nullptr) {
382 return progress;
383 }
384 if (!phase->C->post_loop_opts_phase()) {
385 // makes sure we run widen_type() to potentially common type assertions after loop opts
386 phase->C->record_for_post_loop_opts_igvn(this);
387 }
388 // transform (CastLL (ConvI2L ..)) into (ConvI2L (CastII ..)) if the type of the CastLL is narrower than the type of
389 // the ConvI2L.
390 Node* in1 = in(1);
391 if (in1 != nullptr && in1->Opcode() == Op_ConvI2L) {
392 const Type* t = Value(phase);
393 const Type* t_in = phase->type(in1);
394 if (t != Type::TOP && t_in != Type::TOP) {
395 const TypeLong* tl = t->is_long();
396 const TypeLong* t_in_l = t_in->is_long();
397 assert(t_in_l->contains(tl), "CastLL type should be narrower than or equal to the type of its input");
398 assert((tl != t_in_l) == t_in_l->strictly_contains(tl), "if type differs then this nodes's type must be narrower");
399 if (tl != t_in_l) {
400 const TypeInt* ti = TypeInt::make(checked_cast<jint>(tl->_lo), checked_cast<jint>(tl->_hi), tl->_widen);
401 Node* castii = phase->transform(new CastIINode(in(0), in1->in(1), ti));
402 Node* convi2l = in1->clone();
403 convi2l->set_req(1, castii);
404 return convi2l;
405 }
406 }
407 }
408 // If it's a cast created by PhaseIdealLoop::short_running_loop(), don't transform it until the counted loop is created
409 // in next loop opts pass
410 if (!can_reshape || !used_at_inner_loop_exit_test()) {
411 return optimize_integer_cast(phase, T_LONG);
412 }
413 return nullptr;
414 }
415
416 // CastPPNodes are removed before matching, while alias classes are needed in global code motion.
417 // As a result, it is not valid for a CastPPNode to change the oop such that the derived pointers
418 // lie in different alias classes with and without the node. For example, a CastPPNode c may not
419 // cast an Object to a Bottom[], because later removal of c would affect the alias class of c's
420 // array length field (c + arrayOopDesc::length_offset_in_bytes()).
421 //
422 // This function verifies that a CastPPNode on an oop does not violate the aforementioned property.
423 //
424 // TODO 8382147: Currently, this verification only applies during the construction of a CastPPNode,
425 // we may want to apply the same verification during IGVN transformations, as well as final graph
426 // reshaping.
427 void CastPPNode::verify_type(const Type* in_type, const Type* out_type) {
428 #ifdef ASSERT
429 out_type = out_type->join(in_type);
430 if (in_type->empty() || out_type->empty()) {
431 return;
432 }
433 if (in_type == TypePtr::NULL_PTR || out_type == TypePtr::NULL_PTR) {
434 return;
435 }
436 if (!in_type->isa_oopptr() && !out_type->isa_oopptr()) {
437 return;
438 }
439
440 assert(in_type->isa_oopptr() && out_type->isa_oopptr(), "must be both oops or both non-oops");
441 if (in_type->isa_aryptr() && out_type->isa_aryptr()) {
442 const Type* e1 = in_type->is_aryptr()->elem();
443 const Type* e2 = out_type->is_aryptr()->elem();
444 assert(e1->basic_type() == e2->basic_type(), "must both be arrays of the same primitive type or both be oops arrays");
445 return;
446 }
447
448 assert(in_type->isa_instptr() && out_type->isa_instptr(), "must be both array oops or both non-array oops");
449 assert(in_type->is_instptr()->instance_klass() == out_type->is_instptr()->instance_klass(), "must not cast to a different type");
450 #endif // ASSERT
451 }
452
453 //------------------------------Value------------------------------------------
454 // Take 'join' of input and cast-up type, unless working with an Interface
455 const Type* CheckCastPPNode::Value(PhaseGVN* phase) const {
456 if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
457
458 const Type *inn = phase->type(in(1));
459 if( inn == Type::TOP ) return Type::TOP; // No information yet
460
461 if (inn->isa_oopptr() && _type->isa_oopptr()) {
462 return ConstraintCastNode::Value(phase);
463 }
464
465 const TypePtr *in_type = inn->isa_ptr();
466 const TypePtr *my_type = _type->isa_ptr();
467 const Type *result = _type;
468 if (in_type != nullptr && my_type != nullptr) {
469 TypePtr::PTR in_ptr = in_type->ptr();
470 if (in_ptr == TypePtr::Null) {
471 result = in_type;
472 } else if (in_ptr != TypePtr::Constant) {
473 result = my_type->cast_to_ptr_type(my_type->join_ptr(in_ptr));
474 }
475 }
476
477 return result;
478 }
479
480 Node* CheckCastPPNode::pin_node_under_control_impl() const {
481 assert(_dependency.is_floating(), "already pinned");
482 return new CheckCastPPNode(in(0), in(1), bottom_type(), _dependency.with_pinned_dependency(), _extra_types);
483 }
484
485 //=============================================================================
486 //------------------------------Value------------------------------------------
487 const Type* CastX2PNode::Value(PhaseGVN* phase) const {
488 const Type* t = phase->type(in(1));
489 if (t == Type::TOP) return Type::TOP;
490 if (t->base() == Type_X && t->singleton()) {
491 uintptr_t bits = (uintptr_t) t->is_intptr_t()->get_con();
492 if (bits == 0) return TypePtr::NULL_PTR;
493 return TypeRawPtr::make((address) bits);
494 }
495 return CastX2PNode::bottom_type();
496 }
497
498 //------------------------------Idealize---------------------------------------
499 static inline bool fits_in_int(const Type* t, bool but_not_min_int = false) {
500 if (t == Type::TOP) return false;
501 const TypeX* tl = t->is_intptr_t();
502 jint lo = min_jint;
503 jint hi = max_jint;
504 if (but_not_min_int) ++lo; // caller wants to negate the value w/o overflow
505 return (tl->_lo >= lo) && (tl->_hi <= hi);
506 }
507
508 static inline Node* addP_of_X2P(PhaseGVN *phase,
509 Node* base,
510 Node* dispX,
511 bool negate = false) {
512 if (negate) {
513 dispX = phase->transform(new SubXNode(phase->MakeConX(0), dispX));
514 }
515 return AddPNode::make_off_heap(phase->transform(new CastX2PNode(base)), dispX);
516 }
517
518 Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) {
519 // convert CastX2P(AddX(x, y)) to AddP(CastX2P(x), y) if y fits in an int
520 int op = in(1)->Opcode();
521 Node* x;
522 Node* y;
523 switch (op) {
524 case Op_SubX:
525 x = in(1)->in(1);
526 // Avoid ideal transformations ping-pong between this and AddP for raw pointers.
527 if (phase->find_intptr_t_con(x, -1) == 0)
528 break;
529 y = in(1)->in(2);
530 if (fits_in_int(phase->type(y), true)) {
531 return addP_of_X2P(phase, x, y, true);
532 }
533 break;
534 case Op_AddX:
535 x = in(1)->in(1);
536 y = in(1)->in(2);
537 if (fits_in_int(phase->type(y))) {
538 return addP_of_X2P(phase, x, y);
539 }
540 if (fits_in_int(phase->type(x))) {
541 return addP_of_X2P(phase, y, x);
542 }
543 break;
544 }
545 return nullptr;
546 }
547
548 //------------------------------Identity---------------------------------------
549 Node* CastX2PNode::Identity(PhaseGVN* phase) {
550 if (in(1)->Opcode() == Op_CastP2X) return in(1)->in(1);
551 return this;
552 }
553
554 //=============================================================================
555 //------------------------------Value------------------------------------------
556 const Type* CastP2XNode::Value(PhaseGVN* phase) const {
557 const Type* t = phase->type(in(1));
558 if (t == Type::TOP) return Type::TOP;
559 if (t->base() == Type::RawPtr && t->singleton()) {
560 uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con();
561 return TypeX::make(bits);
562 }
563 return CastP2XNode::bottom_type();
564 }
565
566 Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) {
567 return (in(0) && remove_dead_region(phase, can_reshape)) ? this : nullptr;
568 }
569
570 //------------------------------Identity---------------------------------------
571 Node* CastP2XNode::Identity(PhaseGVN* phase) {
572 if (in(1)->Opcode() == Op_CastX2P) return in(1)->in(1);
573 return this;
574 }
575
576 Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, const DependencyType& dependency,
577 const TypeTuple* types) {
578 if (type->isa_int()) {
579 return new CastIINode(c, in, type, dependency, false, types);
580 } else if (type->isa_long()) {
581 return new CastLLNode(c, in, type, dependency, types);
582 } else if (type->isa_half_float()) {
583 return new CastHHNode(c, in, type, dependency, types);
584 } else if (type->isa_float()) {
585 return new CastFFNode(c, in, type, dependency, types);
586 } else if (type->isa_double()) {
587 return new CastDDNode(c, in, type, dependency, types);
588 } else if (type->isa_vect()) {
589 return new CastVVNode(c, in, type, dependency, types);
590 } else if (type->isa_ptr()) {
591 return new CastPPNode(c, in, type, dependency, types);
592 }
593 fatal("unreachable. Invalid cast type.");
594 return nullptr;
595 }
596
597 Node* ConstraintCastNode::optimize_integer_cast_of_add(PhaseGVN* phase, BasicType bt) {
598 PhaseIterGVN *igvn = phase->is_IterGVN();
599 const TypeInteger* this_type = this->type()->isa_integer(bt);
600 if (this_type == nullptr) {
601 return nullptr;
602 }
603
604 Node* z = in(1);
605 const TypeInteger* rx = nullptr;
606 const TypeInteger* ry = nullptr;
607 // Similar to ConvI2LNode::Ideal() for the same reasons
608 if (Compile::push_thru_add(phase, z, this_type, rx, ry, bt, bt)) {
609 if (igvn == nullptr) {
610 // Postpone this optimization to iterative GVN, where we can handle deep
611 // AddI chains without an exponential number of recursive Ideal() calls.
612 phase->record_for_igvn(this);
613 return nullptr;
614 }
615 int op = z->Opcode();
616 Node* x = z->in(1);
617 Node* y = z->in(2);
618
619 const TypeInteger* tx = phase->type(x)->is_integer(bt);
620 const TypeInteger* ty = phase->type(y)->is_integer(bt);
621
622 // (Cast (Add x y) tz) is transformed into (Add (Cast x rx) (Cast y ry))
623 //
624 // tz = [tzlo, tzhi]
625 // rx = [rxlo, rxhi]
626 // ry = [rylo, ryhi]
627 // with type of x, tx = [txlo, txhi]
628 // with type of y, ty = [tylo, tyhi]
629 //
630 // From Compile::push_thru_add():
631 // rxlo = max(tzlo - tyhi, txlo)
632 // rxhi = min(tzhi - tylo, txhi)
633 // rylo = max(tzlo - txhi, tylo)
634 // ryhi = min(tzhi - txlo, tyhi)
635 //
636 // If x is a constant, then txlo = txhi
637 // rxlo = txlo, rxhi = txhi
638 // The bounds of the type of the Add after transformation then is:
639 // rxlo + rylo >= txlo + tzlo - txhi >= tzlo
640 // rxhi + ryhi <= txhi + tzhi - txlo <= tzhi
641 // The resulting type is not wider than the type of the Cast
642 // before transformation
643 //
644 // If neither x nor y are constant then the type of the resulting
645 // Add can be wider than the type of the type of the Cast before
646 // transformation.
647 // For instance, tx = [0, 10], ty = [0, 10], tz = [0, 10]
648 // then rx = [0, 10], ry = [0, 10]
649 // and rx + ry = [0, 20] which is wider than tz
650 //
651 // Same reasoning applies to (Cast (Sub x y) tz)
652 const DependencyType& dependency = (!tx->is_con() && !ty->is_con()) ? _dependency.with_non_narrowing() : _dependency;
653 Node* cx = find_or_make_integer_cast(igvn, x, rx, dependency);
654 Node* cy = find_or_make_integer_cast(igvn, y, ry, dependency);
655 if (op == Op_Add(bt)) {
656 return AddNode::make(cx, cy, bt);
657 } else {
658 assert(op == Op_Sub(bt), "");
659 return SubNode::make(cx, cy, bt);
660 }
661 return nullptr;
662 }
663 return nullptr;
664 }
665
666 Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) {
667 Node* res = optimize_integer_cast_of_add(phase, bt);
668 if (res != nullptr) {
669 return res;
670 }
671 const Type* t = Value(phase);
672 if (t != Type::TOP && phase->C->post_loop_opts_phase()) {
673 const Type* bottom_t = bottom_type();
674 const TypeInteger* wide_t = widen_type(phase, bottom_t, bt);
675 if (wide_t != bottom_t) {
676 // Widening the type of the Cast (to allow some commoning) causes the Cast to change how it can be optimized (if
677 // type of its input is narrower than the Cast's type, we can't remove it to not loose the control dependency).
678 return make_with(in(1), wide_t, _dependency.with_non_narrowing());
679 }
680 }
681 return nullptr;
682 }
683
684 const TypeInteger* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const {
685 const TypeInteger* this_type = res->is_integer(bt);
686 // At VerifyConstraintCasts == 1, we verify the ConstraintCastNodes that are present during code
687 // emission. This allows us detecting possible mis-scheduling due to these nodes being pinned at
688 // the wrong control nodes.
689 // At VerifyConstraintCasts == 2, we do not perform widening so that we can verify the
690 // correctness of more ConstraintCastNodes. This further helps us detect possible
691 // mis-transformations that may happen due to these nodes being pinned at the wrong control
692 // nodes.
693 if (VerifyConstraintCasts > 1) {
694 return this_type;
695 }
696
697 const TypeInteger* in_type = phase->type(in(1))->isa_integer(bt);
698 if (in_type != nullptr &&
699 (in_type->lo_as_long() != this_type->lo_as_long() ||
700 in_type->hi_as_long() != this_type->hi_as_long())) {
701 jlong lo1 = this_type->lo_as_long();
702 jlong hi1 = this_type->hi_as_long();
703 int w1 = this_type->_widen;
704 if (lo1 >= 0) {
705 // Keep a range assertion of >=0.
706 lo1 = 0; hi1 = max_signed_integer(bt);
707 } else if (hi1 < 0) {
708 // Keep a range assertion of <0.
709 lo1 = min_signed_integer(bt); hi1 = -1;
710 } else {
711 lo1 = min_signed_integer(bt); hi1 = max_signed_integer(bt);
712 }
713 return TypeInteger::make(MAX2(in_type->lo_as_long(), lo1),
714 MIN2(in_type->hi_as_long(), hi1),
715 MAX2((int)in_type->_widen, w1), bt);
716 }
717 return this_type;
718 }