1 /*
2 * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "castnode.hpp"
26 #include "opto/addnode.hpp"
27 #include "opto/callnode.hpp"
28 #include "opto/castnode.hpp"
29 #include "opto/cfgnode.hpp"
30 #include "opto/connode.hpp"
31 #include "opto/graphKit.hpp"
32 #include "opto/inlinetypenode.hpp"
33 #include "opto/loopnode.hpp"
34 #include "opto/matcher.hpp"
35 #include "opto/phaseX.hpp"
36 #include "opto/rootnode.hpp"
37 #include "opto/subnode.hpp"
38 #include "opto/type.hpp"
39 #include "utilities/checkedCast.hpp"
40
41 //=============================================================================
42 // If input is already higher or equal to cast type, then this is an identity.
43 Node* ConstraintCastNode::Identity(PhaseGVN* phase) {
44 if (_dependency == UnconditionalDependency) {
45 return this;
46 }
47 Node* dom = dominating_cast(phase, phase);
48 if (dom != nullptr) {
49 return dom;
50 }
51 return higher_equal_types(phase, in(1)) ? in(1) : this;
52 }
53
54 //------------------------------Value------------------------------------------
55 // Take 'join' of input and cast-up type
56 const Type* ConstraintCastNode::Value(PhaseGVN* phase) const {
57 if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
58
59 const Type* in_type = phase->type(in(1));
60 const Type* ft = in_type->filter_speculative(_type);
61
62 // Check if both _type and in_type had a speculative type, but for the just
63 // computed ft the speculative type was dropped.
64 if (ft->speculative() == nullptr &&
65 _type->speculative() != nullptr &&
66 in_type->speculative() != nullptr) {
67 // Speculative type may have disagreed between cast and input, and was
68 // dropped in filtering. Recompute so that ft can take speculative type
69 // of in_type. If we did not do it now, a subsequent ::Value call would
70 // do it, and violate idempotence of ::Value.
71 ft = in_type->filter_speculative(ft);
72 }
73
74 #ifdef ASSERT
75 // Previous versions of this function had some special case logic,
76 // which is no longer necessary. Make sure of the required effects.
77 switch (Opcode()) {
78 case Op_CastII:
79 {
80 if (in_type == Type::TOP) {
81 assert(ft == Type::TOP, "special case #1");
82 }
83 const Type* rt = in_type->join_speculative(_type);
84 if (rt->empty()) {
85 assert(ft == Type::TOP, "special case #2");
86 }
87 break;
88 }
89 case Op_CastPP:
90 if (in_type == TypePtr::NULL_PTR &&
91 _type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull) {
92 assert(ft == Type::TOP, "special case #3");
93 break;
94 }
95 }
96 #endif //ASSERT
97
98 return ft;
99 }
100
101 //------------------------------Ideal------------------------------------------
102 // Return a node which is more "ideal" than the current node. Strip out
103 // control copies
104 Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape) {
105 if (in(0) != nullptr && remove_dead_region(phase, can_reshape)) {
106 return this;
107 }
108
109 // Push cast through InlineTypeNode
110 InlineTypeNode* vt = in(1)->isa_InlineType();
111 if (vt != nullptr && phase->type(vt)->filter_speculative(_type) != Type::TOP) {
112 Node* cast = clone();
113 cast->set_req(1, vt->get_oop());
114 vt = vt->clone()->as_InlineType();
115 if (!_type->maybe_null()) {
116 vt->as_InlineType()->set_null_marker(*phase);
117 }
118 vt->set_oop(*phase, phase->transform(cast));
119 return vt;
120 }
121
122 if (in(1) != nullptr && phase->type(in(1)) != Type::TOP) {
123 return TypeNode::Ideal(phase, can_reshape);
124 }
125
126 return nullptr;
127 }
128
129 uint ConstraintCastNode::hash() const {
130 return TypeNode::hash() + (int)_dependency + (_extra_types != nullptr ? _extra_types->hash() : 0);
131 }
132
133 bool ConstraintCastNode::cmp(const Node &n) const {
134 if (!TypeNode::cmp(n)) {
135 return false;
136 }
137 ConstraintCastNode& cast = (ConstraintCastNode&) n;
138 if (cast._dependency != _dependency) {
139 return false;
140 }
141 if (_extra_types == nullptr || cast._extra_types == nullptr) {
142 return _extra_types == cast._extra_types;
143 }
144 return _extra_types->eq(cast._extra_types);
145 }
146
147 uint ConstraintCastNode::size_of() const {
148 return sizeof(*this);
149 }
150
151 Node* ConstraintCastNode::make_cast_for_basic_type(Node* c, Node* n, const Type* t, DependencyType dependency, BasicType bt) {
152 switch(bt) {
153 case T_INT:
154 return new CastIINode(c, n, t, dependency);
155 case T_LONG:
156 return new CastLLNode(c, n, t, dependency);
157 default:
158 fatal("Bad basic type %s", type2name(bt));
159 }
160 return nullptr;
161 }
162
163 TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const {
164 if (_dependency == UnconditionalDependency) {
165 return nullptr;
166 }
167 Node* val = in(1);
168 Node* ctl = in(0);
169 int opc = Opcode();
170 if (ctl == nullptr) {
171 return nullptr;
172 }
173 // Range check CastIIs may all end up under a single range check and
174 // in that case only the narrower CastII would be kept by the code
175 // below which would be incorrect.
176 if (is_CastII() && as_CastII()->has_range_check()) {
177 return nullptr;
178 }
179 if (type()->isa_rawptr() && (gvn->type_or_null(val) == nullptr || gvn->type(val)->isa_oopptr())) {
180 return nullptr;
181 }
182 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
183 Node* u = val->fast_out(i);
184 if (u != this &&
185 u->outcnt() > 0 &&
186 u->Opcode() == opc &&
187 u->in(0) != nullptr &&
188 higher_equal_types(gvn, u)) {
189 if (pt->is_dominator(u->in(0), ctl)) {
190 return u->as_Type();
191 }
192 if (is_CheckCastPP() && u->in(1)->is_Proj() && u->in(1)->in(0)->is_Allocate() &&
193 u->in(0)->is_Proj() && u->in(0)->in(0)->is_Initialize() &&
194 u->in(1)->in(0)->as_Allocate()->initialization() == u->in(0)->in(0)) {
195 // CheckCastPP following an allocation always dominates all
196 // use of the allocation result
197 return u->as_Type();
198 }
199 }
200 }
201 return nullptr;
202 }
203
204 bool ConstraintCastNode::higher_equal_types(PhaseGVN* phase, const Node* other) const {
205 const Type* t = phase->type(other);
206 if (!t->higher_equal_speculative(type())) {
207 return false;
208 }
209 if (_extra_types != nullptr) {
210 for (uint i = 0; i < _extra_types->cnt(); ++i) {
211 if (!t->higher_equal_speculative(_extra_types->field_at(i))) {
212 return false;
213 }
214 }
215 }
216 return true;
217 }
218
219 #ifndef PRODUCT
220 void ConstraintCastNode::dump_spec(outputStream *st) const {
221 TypeNode::dump_spec(st);
222 if (_extra_types != nullptr) {
223 st->print(" extra types: ");
224 _extra_types->dump_on(st);
225 }
226 if (_dependency != RegularDependency) {
227 st->print(" %s dependency", _dependency == StrongDependency ? "strong" : "unconditional");
228 }
229 }
230 #endif
231
232 const Type* CastIINode::Value(PhaseGVN* phase) const {
233 const Type *res = ConstraintCastNode::Value(phase);
234 if (res == Type::TOP) {
235 return Type::TOP;
236 }
237 assert(res->isa_int(), "res must be int");
238
239 // Similar to ConvI2LNode::Value() for the same reasons
240 // see if we can remove type assertion after loop opts
241 res = widen_type(phase, res, T_INT);
242
243 return res;
244 }
245
246 Node* ConstraintCastNode::find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, const TypeInteger* type) const {
247 Node* n = clone();
248 n->set_req(1, parent);
249 n->as_ConstraintCast()->set_type(type);
250 Node* existing = igvn->hash_find_insert(n);
251 if (existing != nullptr) {
252 n->destruct(igvn);
253 return existing;
254 }
255 return igvn->register_new_node_with_optimizer(n);
256 }
257
258 Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) {
259 Node* progress = ConstraintCastNode::Ideal(phase, can_reshape);
260 if (progress != nullptr) {
261 return progress;
262 }
263 if (can_reshape && !phase->C->post_loop_opts_phase()) {
264 // makes sure we run ::Value to potentially remove type assertion after loop opts
265 phase->C->record_for_post_loop_opts_igvn(this);
266 }
267 if (!_range_check_dependency || phase->C->post_loop_opts_phase()) {
268 return optimize_integer_cast(phase, T_INT);
269 }
270 phase->C->record_for_post_loop_opts_igvn(this);
271 return nullptr;
272 }
273
274 Node* CastIINode::Identity(PhaseGVN* phase) {
275 Node* progress = ConstraintCastNode::Identity(phase);
276 if (progress != this) {
277 return progress;
278 }
279 return this;
280 }
281
282 bool CastIINode::cmp(const Node &n) const {
283 return ConstraintCastNode::cmp(n) && ((CastIINode&)n)._range_check_dependency == _range_check_dependency;
284 }
285
286 uint CastIINode::size_of() const {
287 return sizeof(*this);
288 }
289
290 #ifndef PRODUCT
291 void CastIINode::dump_spec(outputStream* st) const {
292 ConstraintCastNode::dump_spec(st);
293 if (_range_check_dependency) {
294 st->print(" range check dependency");
295 }
296 }
297 #endif
298
299 CastIINode* CastIINode::pin_array_access_node() const {
300 assert(_dependency == RegularDependency, "already pinned");
301 if (has_range_check()) {
302 return new CastIINode(in(0), in(1), bottom_type(), StrongDependency, has_range_check());
303 }
304 return nullptr;
305 }
306
307 void CastIINode::remove_range_check_cast(Compile* C) {
308 if (has_range_check()) {
309 // Range check CastII nodes feed into an address computation subgraph. Remove them to let that subgraph float freely.
310 // For memory access or integer divisions nodes that depend on the cast, record the dependency on the cast's control
311 // as a precedence edge, so they can't float above the cast in case that cast's narrowed type helped eliminate a
312 // range check or a null divisor check.
313 assert(in(0) != nullptr, "All RangeCheck CastII must have a control dependency");
314 ResourceMark rm;
315 Unique_Node_List wq;
316 wq.push(this);
317 for (uint next = 0; next < wq.size(); ++next) {
318 Node* m = wq.at(next);
319 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
320 Node* use = m->fast_out(i);
321 if (use->is_Mem() || use->is_div_or_mod(T_INT) || use->is_div_or_mod(T_LONG)) {
322 use->ensure_control_or_add_prec(in(0));
323 } else if (!use->is_CFG() && !use->is_Phi()) {
324 wq.push(use);
325 }
326 }
327 }
328 subsume_by(in(1), C);
329 if (outcnt() == 0) {
330 disconnect_inputs(C);
331 }
332 }
333 }
334
335
336 const Type* CastLLNode::Value(PhaseGVN* phase) const {
337 const Type* res = ConstraintCastNode::Value(phase);
338 if (res == Type::TOP) {
339 return Type::TOP;
340 }
341 assert(res->isa_long(), "res must be long");
342
343 return widen_type(phase, res, T_LONG);
344 }
345
346 bool CastLLNode::is_inner_loop_backedge(ProjNode* proj) {
347 if (proj != nullptr) {
348 Node* ctrl_use = proj->unique_ctrl_out_or_null();
349 if (ctrl_use != nullptr && ctrl_use->Opcode() == Op_Loop &&
350 ctrl_use->in(2) == proj &&
351 ctrl_use->as_Loop()->is_loop_nest_inner_loop()) {
352 return true;
353 }
354 }
355 return false;
356 }
357
358 bool CastLLNode::cmp_used_at_inner_loop_exit_test(CmpNode* cmp) {
359 for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
360 Node* bol = cmp->fast_out(i);
361 if (bol->Opcode() == Op_Bool) {
362 for (DUIterator_Fast jmax, j = bol->fast_outs(jmax); j < jmax; j++) {
363 Node* iff = bol->fast_out(j);
364 if (iff->Opcode() == Op_If) {
365 ProjNode* true_proj = iff->as_If()->proj_out_or_null(true);
366 ProjNode* false_proj = iff->as_If()->proj_out_or_null(false);
367 if (is_inner_loop_backedge(true_proj) || is_inner_loop_backedge(false_proj)) {
368 return true;
369 }
370 }
371 }
372 }
373 }
374 return false;
375 }
376
377 // Find if this is a cast node added by PhaseIdealLoop::create_loop_nest() to narrow the number of iterations of the
378 // inner loop
379 bool CastLLNode::used_at_inner_loop_exit_test() const {
380 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
381 Node* convl2i = fast_out(i);
382 if (convl2i->Opcode() == Op_ConvL2I) {
383 for (DUIterator_Fast jmax, j = convl2i->fast_outs(jmax); j < jmax; j++) {
384 Node* cmp_or_sub = convl2i->fast_out(j);
385 if (cmp_or_sub->Opcode() == Op_CmpI) {
386 if (cmp_used_at_inner_loop_exit_test(cmp_or_sub->as_Cmp())) {
387 // (Loop .. .. (IfProj (If (Bool (CmpI (ConvL2I (CastLL )))))))
388 return true;
389 }
390 } else if (cmp_or_sub->Opcode() == Op_SubI && cmp_or_sub->in(1)->find_int_con(-1) == 0) {
391 for (DUIterator_Fast kmax, k = cmp_or_sub->fast_outs(kmax); k < kmax; k++) {
392 Node* cmp = cmp_or_sub->fast_out(k);
393 if (cmp->Opcode() == Op_CmpI) {
394 if (cmp_used_at_inner_loop_exit_test(cmp->as_Cmp())) {
395 // (Loop .. .. (IfProj (If (Bool (CmpI (SubI 0 (ConvL2I (CastLL ))))))))
396 return true;
397 }
398 }
399 }
400 }
401 }
402 }
403 }
404 return false;
405 }
406
407 Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
408 Node* progress = ConstraintCastNode::Ideal(phase, can_reshape);
409 if (progress != nullptr) {
410 return progress;
411 }
412 if (!phase->C->post_loop_opts_phase()) {
413 // makes sure we run ::Value to potentially remove type assertion after loop opts
414 phase->C->record_for_post_loop_opts_igvn(this);
415 }
416 // transform (CastLL (ConvI2L ..)) into (ConvI2L (CastII ..)) if the type of the CastLL is narrower than the type of
417 // the ConvI2L.
418 Node* in1 = in(1);
419 if (in1 != nullptr && in1->Opcode() == Op_ConvI2L) {
420 const Type* t = Value(phase);
421 const Type* t_in = phase->type(in1);
422 if (t != Type::TOP && t_in != Type::TOP) {
423 const TypeLong* tl = t->is_long();
424 const TypeLong* t_in_l = t_in->is_long();
425 assert(tl->_lo >= t_in_l->_lo && tl->_hi <= t_in_l->_hi, "CastLL type should be narrower than or equal to the type of its input");
426 assert((tl != t_in_l) == (tl->_lo > t_in_l->_lo || tl->_hi < t_in_l->_hi), "if type differs then this nodes's type must be narrower");
427 if (tl != t_in_l) {
428 const TypeInt* ti = TypeInt::make(checked_cast<jint>(tl->_lo), checked_cast<jint>(tl->_hi), tl->_widen);
429 Node* castii = phase->transform(new CastIINode(in(0), in1->in(1), ti));
430 Node* convi2l = in1->clone();
431 convi2l->set_req(1, castii);
432 return convi2l;
433 }
434 }
435 }
436 // If it's a cast created by PhaseIdealLoop::short_running_loop(), don't transform it until the counted loop is created
437 // in next loop opts pass
438 if (!can_reshape || !used_at_inner_loop_exit_test()) {
439 return optimize_integer_cast(phase, T_LONG);
440 }
441 return nullptr;
442 }
443
444 //=============================================================================
445 //------------------------------Identity---------------------------------------
446 // If input is already higher or equal to cast type, then this is an identity.
447 Node* CheckCastPPNode::Identity(PhaseGVN* phase) {
448 if (in(1)->is_InlineType() && _type->isa_instptr() && phase->type(in(1))->inline_klass()->is_subtype_of(_type->is_instptr()->instance_klass())) {
449 return in(1);
450 }
451 return ConstraintCastNode::Identity(phase);
452 }
453
454 //------------------------------Value------------------------------------------
455 // Take 'join' of input and cast-up type, unless working with an Interface
456 const Type* CheckCastPPNode::Value(PhaseGVN* phase) const {
457 if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
458
459 const Type *inn = phase->type(in(1));
460 if( inn == Type::TOP ) return Type::TOP; // No information yet
461
462 if (inn->isa_oopptr() && _type->isa_oopptr()) {
463 return ConstraintCastNode::Value(phase);
464 }
465
466 const TypePtr *in_type = inn->isa_ptr();
467 const TypePtr *my_type = _type->isa_ptr();
468 const Type *result = _type;
469 if (in_type != nullptr && my_type != nullptr) {
470 // TODO 8302672
471 if (!StressReflectiveCode && my_type->isa_aryptr() && in_type->isa_aryptr()) {
472 // Propagate array properties (not flat/null-free)
473 // Don't do this when StressReflectiveCode is enabled because it might lead to
474 // a dying data path while the corresponding flat/null-free check is not folded.
475 my_type = my_type->is_aryptr()->update_properties(in_type->is_aryptr());
476 if (my_type == nullptr) {
477 return Type::TOP; // Inconsistent properties
478 }
479 }
480 TypePtr::PTR in_ptr = in_type->ptr();
481 if (in_ptr == TypePtr::Null) {
482 result = in_type;
483 } else if (in_ptr != TypePtr::Constant) {
484 result = my_type->cast_to_ptr_type(my_type->join_ptr(in_ptr));
485 }
486 }
487
488 return result;
489 }
490
491 //=============================================================================
492 //------------------------------Value------------------------------------------
493 const Type* CastX2PNode::Value(PhaseGVN* phase) const {
494 const Type* t = phase->type(in(1));
495 if (t == Type::TOP) return Type::TOP;
496 if (t->base() == Type_X && t->singleton()) {
497 uintptr_t bits = (uintptr_t) t->is_intptr_t()->get_con();
498 if (bits == 0) return TypePtr::NULL_PTR;
499 return TypeRawPtr::make((address) bits);
500 }
501 return CastX2PNode::bottom_type();
502 }
503
504 //------------------------------Idealize---------------------------------------
505 static inline bool fits_in_int(const Type* t, bool but_not_min_int = false) {
506 if (t == Type::TOP) return false;
507 const TypeX* tl = t->is_intptr_t();
508 jint lo = min_jint;
509 jint hi = max_jint;
510 if (but_not_min_int) ++lo; // caller wants to negate the value w/o overflow
511 return (tl->_lo >= lo) && (tl->_hi <= hi);
512 }
513
514 static inline Node* addP_of_X2P(PhaseGVN *phase,
515 Node* base,
516 Node* dispX,
517 bool negate = false) {
518 if (negate) {
519 dispX = phase->transform(new SubXNode(phase->MakeConX(0), dispX));
520 }
521 return new AddPNode(phase->C->top(),
522 phase->transform(new CastX2PNode(base)),
523 dispX);
524 }
525
526 Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) {
527 // convert CastX2P(AddX(x, y)) to AddP(CastX2P(x), y) if y fits in an int
528 int op = in(1)->Opcode();
529 Node* x;
530 Node* y;
531 switch (op) {
532 case Op_SubX:
533 x = in(1)->in(1);
534 // Avoid ideal transformations ping-pong between this and AddP for raw pointers.
535 if (phase->find_intptr_t_con(x, -1) == 0)
536 break;
537 y = in(1)->in(2);
538 if (fits_in_int(phase->type(y), true)) {
539 return addP_of_X2P(phase, x, y, true);
540 }
541 break;
542 case Op_AddX:
543 x = in(1)->in(1);
544 y = in(1)->in(2);
545 if (fits_in_int(phase->type(y))) {
546 return addP_of_X2P(phase, x, y);
547 }
548 if (fits_in_int(phase->type(x))) {
549 return addP_of_X2P(phase, y, x);
550 }
551 break;
552 }
553 return nullptr;
554 }
555
556 //------------------------------Identity---------------------------------------
557 Node* CastX2PNode::Identity(PhaseGVN* phase) {
558 if (in(1)->Opcode() == Op_CastP2X) return in(1)->in(1);
559 return this;
560 }
561
562 //=============================================================================
563 //------------------------------Value------------------------------------------
564 const Type* CastP2XNode::Value(PhaseGVN* phase) const {
565 const Type* t = phase->type(in(1));
566 if (t == Type::TOP) return Type::TOP;
567 if (t->base() == Type::RawPtr && t->singleton()) {
568 uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con();
569 return TypeX::make(bits);
570 }
571
572 if (t->is_zero_type() || !t->maybe_null()) {
573 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
574 Node* u = fast_out(i);
575 if (u->Opcode() == Op_OrL) {
576 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
577 Node* cmp = u->fast_out(j);
578 if (cmp->Opcode() == Op_CmpL) {
579 // Give CmpL a chance to get optimized
580 phase->record_for_igvn(cmp);
581 }
582 }
583 }
584 }
585 }
586
587 return CastP2XNode::bottom_type();
588 }
589
590 Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) {
591 return (in(0) && remove_dead_region(phase, can_reshape)) ? this : nullptr;
592 }
593
594 //------------------------------Identity---------------------------------------
595 Node* CastP2XNode::Identity(PhaseGVN* phase) {
596 if (in(1)->Opcode() == Op_CastX2P) return in(1)->in(1);
597 return this;
598 }
599
600 Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency,
601 const TypeTuple* types) {
602 if (type->isa_int()) {
603 return new CastIINode(c, in, type, dependency, false, types);
604 } else if (type->isa_long()) {
605 return new CastLLNode(c, in, type, dependency, types);
606 } else if (type->isa_half_float()) {
607 return new CastHHNode(c, in, type, dependency, types);
608 } else if (type->isa_float()) {
609 return new CastFFNode(c, in, type, dependency, types);
610 } else if (type->isa_double()) {
611 return new CastDDNode(c, in, type, dependency, types);
612 } else if (type->isa_vect()) {
613 return new CastVVNode(c, in, type, dependency, types);
614 } else if (type->isa_ptr()) {
615 return new CastPPNode(c, in, type, dependency, types);
616 }
617 fatal("unreachable. Invalid cast type.");
618 return nullptr;
619 }
620
621 Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) {
622 PhaseIterGVN *igvn = phase->is_IterGVN();
623 const TypeInteger* this_type = this->type()->isa_integer(bt);
624 if (this_type == nullptr) {
625 return nullptr;
626 }
627
628 Node* z = in(1);
629 const TypeInteger* rx = nullptr;
630 const TypeInteger* ry = nullptr;
631 // Similar to ConvI2LNode::Ideal() for the same reasons
632 if (Compile::push_thru_add(phase, z, this_type, rx, ry, bt, bt)) {
633 if (igvn == nullptr) {
634 // Postpone this optimization to iterative GVN, where we can handle deep
635 // AddI chains without an exponential number of recursive Ideal() calls.
636 phase->record_for_igvn(this);
637 return nullptr;
638 }
639 int op = z->Opcode();
640 Node* x = z->in(1);
641 Node* y = z->in(2);
642
643 Node* cx = find_or_make_integer_cast(igvn, x, rx);
644 Node* cy = find_or_make_integer_cast(igvn, y, ry);
645 if (op == Op_Add(bt)) {
646 return AddNode::make(cx, cy, bt);
647 } else {
648 assert(op == Op_Sub(bt), "");
649 return SubNode::make(cx, cy, bt);
650 }
651 return nullptr;
652 }
653 return nullptr;
654 }
655
656 const Type* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const {
657 if (!phase->C->post_loop_opts_phase()) {
658 return res;
659 }
660
661 // At VerifyConstraintCasts == 1, we verify the ConstraintCastNodes that are present during code
662 // emission. This allows us detecting possible mis-scheduling due to these nodes being pinned at
663 // the wrong control nodes.
664 // At VerifyConstraintCasts == 2, we do not perform widening so that we can verify the
665 // correctness of more ConstraintCastNodes. This further helps us detect possible
666 // mis-transformations that may happen due to these nodes being pinned at the wrong control
667 // nodes.
668 if (VerifyConstraintCasts > 1) {
669 return res;
670 }
671
672 const TypeInteger* this_type = res->is_integer(bt);
673 const TypeInteger* in_type = phase->type(in(1))->isa_integer(bt);
674 if (in_type != nullptr &&
675 (in_type->lo_as_long() != this_type->lo_as_long() ||
676 in_type->hi_as_long() != this_type->hi_as_long())) {
677 jlong lo1 = this_type->lo_as_long();
678 jlong hi1 = this_type->hi_as_long();
679 int w1 = this_type->_widen;
680 if (lo1 >= 0) {
681 // Keep a range assertion of >=0.
682 lo1 = 0; hi1 = max_signed_integer(bt);
683 } else if (hi1 < 0) {
684 // Keep a range assertion of <0.
685 lo1 = min_signed_integer(bt); hi1 = -1;
686 } else {
687 lo1 = min_signed_integer(bt); hi1 = max_signed_integer(bt);
688 }
689 return TypeInteger::make(MAX2(in_type->lo_as_long(), lo1),
690 MIN2(in_type->hi_as_long(), hi1),
691 MAX2((int)in_type->_widen, w1), bt);
692 }
693 return res;
694 }