1 /* 2 * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "opto/addnode.hpp" 26 #include "opto/callnode.hpp" 27 #include "opto/castnode.hpp" 28 #include "opto/connode.hpp" 29 #include "opto/graphKit.hpp" 30 #include "opto/inlinetypenode.hpp" 31 #include "opto/matcher.hpp" 32 #include "opto/phaseX.hpp" 33 #include "opto/rootnode.hpp" 34 #include "opto/subnode.hpp" 35 #include "opto/type.hpp" 36 #include "castnode.hpp" 37 #include "utilities/checkedCast.hpp" 38 39 //============================================================================= 40 // If input is already higher or equal to cast type, then this is an identity. 41 Node* ConstraintCastNode::Identity(PhaseGVN* phase) { 42 if (_dependency == UnconditionalDependency) { 43 return this; 44 } 45 Node* dom = dominating_cast(phase, phase); 46 if (dom != nullptr) { 47 return dom; 48 } 49 return higher_equal_types(phase, in(1)) ? in(1) : this; 50 } 51 52 //------------------------------Value------------------------------------------ 53 // Take 'join' of input and cast-up type 54 const Type* ConstraintCastNode::Value(PhaseGVN* phase) const { 55 if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP; 56 57 const Type* in_type = phase->type(in(1)); 58 const Type* ft = in_type->filter_speculative(_type); 59 60 // Check if both _type and in_type had a speculative type, but for the just 61 // computed ft the speculative type was dropped. 62 if (ft->speculative() == nullptr && 63 _type->speculative() != nullptr && 64 in_type->speculative() != nullptr) { 65 // Speculative type may have disagreed between cast and input, and was 66 // dropped in filtering. Recompute so that ft can take speculative type 67 // of in_type. If we did not do it now, a subsequent ::Value call would 68 // do it, and violate idempotence of ::Value. 69 ft = in_type->filter_speculative(ft); 70 } 71 72 #ifdef ASSERT 73 // Previous versions of this function had some special case logic, 74 // which is no longer necessary. Make sure of the required effects. 75 switch (Opcode()) { 76 case Op_CastII: 77 { 78 if (in_type == Type::TOP) { 79 assert(ft == Type::TOP, "special case #1"); 80 } 81 const Type* rt = in_type->join_speculative(_type); 82 if (rt->empty()) { 83 assert(ft == Type::TOP, "special case #2"); 84 } 85 break; 86 } 87 case Op_CastPP: 88 if (in_type == TypePtr::NULL_PTR && 89 _type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull) { 90 assert(ft == Type::TOP, "special case #3"); 91 break; 92 } 93 } 94 #endif //ASSERT 95 96 return ft; 97 } 98 99 //------------------------------Ideal------------------------------------------ 100 // Return a node which is more "ideal" than the current node. Strip out 101 // control copies 102 Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape) { 103 if (in(0) && remove_dead_region(phase, can_reshape)) { 104 return this; 105 } 106 107 // Push cast through InlineTypeNode 108 InlineTypeNode* vt = in(1)->isa_InlineType(); 109 if (vt != nullptr && phase->type(vt)->filter_speculative(_type) != Type::TOP) { 110 Node* cast = clone(); 111 cast->set_req(1, vt->get_oop()); 112 vt = vt->clone()->as_InlineType(); 113 if (!_type->maybe_null()) { 114 vt->as_InlineType()->set_is_init(*phase); 115 } 116 vt->set_oop(*phase, phase->transform(cast)); 117 return vt; 118 } 119 120 return nullptr; 121 } 122 123 uint ConstraintCastNode::hash() const { 124 return TypeNode::hash() + (int)_dependency + (_extra_types != nullptr ? _extra_types->hash() : 0); 125 } 126 127 bool ConstraintCastNode::cmp(const Node &n) const { 128 if (!TypeNode::cmp(n)) { 129 return false; 130 } 131 ConstraintCastNode& cast = (ConstraintCastNode&) n; 132 if (cast._dependency != _dependency) { 133 return false; 134 } 135 if (_extra_types == nullptr || cast._extra_types == nullptr) { 136 return _extra_types == cast._extra_types; 137 } 138 return _extra_types->eq(cast._extra_types); 139 } 140 141 uint ConstraintCastNode::size_of() const { 142 return sizeof(*this); 143 } 144 145 Node* ConstraintCastNode::make_cast_for_basic_type(Node* c, Node* n, const Type* t, DependencyType dependency, BasicType bt) { 146 switch(bt) { 147 case T_INT: 148 return new CastIINode(c, n, t, dependency); 149 case T_LONG: 150 return new CastLLNode(c, n, t, dependency); 151 default: 152 fatal("Bad basic type %s", type2name(bt)); 153 } 154 return nullptr; 155 } 156 157 TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const { 158 if (_dependency == UnconditionalDependency) { 159 return nullptr; 160 } 161 Node* val = in(1); 162 Node* ctl = in(0); 163 int opc = Opcode(); 164 if (ctl == nullptr) { 165 return nullptr; 166 } 167 // Range check CastIIs may all end up under a single range check and 168 // in that case only the narrower CastII would be kept by the code 169 // below which would be incorrect. 170 if (is_CastII() && as_CastII()->has_range_check()) { 171 return nullptr; 172 } 173 if (type()->isa_rawptr() && (gvn->type_or_null(val) == nullptr || gvn->type(val)->isa_oopptr())) { 174 return nullptr; 175 } 176 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { 177 Node* u = val->fast_out(i); 178 if (u != this && 179 u->outcnt() > 0 && 180 u->Opcode() == opc && 181 u->in(0) != nullptr && 182 higher_equal_types(gvn, u)) { 183 if (pt->is_dominator(u->in(0), ctl)) { 184 return u->as_Type(); 185 } 186 if (is_CheckCastPP() && u->in(1)->is_Proj() && u->in(1)->in(0)->is_Allocate() && 187 u->in(0)->is_Proj() && u->in(0)->in(0)->is_Initialize() && 188 u->in(1)->in(0)->as_Allocate()->initialization() == u->in(0)->in(0)) { 189 // CheckCastPP following an allocation always dominates all 190 // use of the allocation result 191 return u->as_Type(); 192 } 193 } 194 } 195 return nullptr; 196 } 197 198 bool ConstraintCastNode::higher_equal_types(PhaseGVN* phase, const Node* other) const { 199 const Type* t = phase->type(other); 200 if (!t->higher_equal_speculative(type())) { 201 return false; 202 } 203 if (_extra_types != nullptr) { 204 for (uint i = 0; i < _extra_types->cnt(); ++i) { 205 if (!t->higher_equal_speculative(_extra_types->field_at(i))) { 206 return false; 207 } 208 } 209 } 210 return true; 211 } 212 213 #ifndef PRODUCT 214 void ConstraintCastNode::dump_spec(outputStream *st) const { 215 TypeNode::dump_spec(st); 216 if (_extra_types != nullptr) { 217 st->print(" extra types: "); 218 _extra_types->dump_on(st); 219 } 220 if (_dependency != RegularDependency) { 221 st->print(" %s dependency", _dependency == StrongDependency ? "strong" : "unconditional"); 222 } 223 } 224 #endif 225 226 const Type* CastIINode::Value(PhaseGVN* phase) const { 227 const Type *res = ConstraintCastNode::Value(phase); 228 if (res == Type::TOP) { 229 return Type::TOP; 230 } 231 assert(res->isa_int(), "res must be int"); 232 233 // Similar to ConvI2LNode::Value() for the same reasons 234 // see if we can remove type assertion after loop opts 235 res = widen_type(phase, res, T_INT); 236 237 return res; 238 } 239 240 Node* ConstraintCastNode::find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, const TypeInteger* type) const { 241 Node* n = clone(); 242 n->set_req(1, parent); 243 n->as_ConstraintCast()->set_type(type); 244 Node* existing = igvn->hash_find_insert(n); 245 if (existing != nullptr) { 246 n->destruct(igvn); 247 return existing; 248 } 249 return igvn->register_new_node_with_optimizer(n); 250 } 251 252 Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) { 253 Node* progress = ConstraintCastNode::Ideal(phase, can_reshape); 254 if (progress != nullptr) { 255 return progress; 256 } 257 if (can_reshape && !phase->C->post_loop_opts_phase()) { 258 // makes sure we run ::Value to potentially remove type assertion after loop opts 259 phase->C->record_for_post_loop_opts_igvn(this); 260 } 261 if (!_range_check_dependency || phase->C->post_loop_opts_phase()) { 262 return optimize_integer_cast(phase, T_INT); 263 } 264 phase->C->record_for_post_loop_opts_igvn(this); 265 return nullptr; 266 } 267 268 Node* CastIINode::Identity(PhaseGVN* phase) { 269 Node* progress = ConstraintCastNode::Identity(phase); 270 if (progress != this) { 271 return progress; 272 } 273 return this; 274 } 275 276 bool CastIINode::cmp(const Node &n) const { 277 return ConstraintCastNode::cmp(n) && ((CastIINode&)n)._range_check_dependency == _range_check_dependency; 278 } 279 280 uint CastIINode::size_of() const { 281 return sizeof(*this); 282 } 283 284 #ifndef PRODUCT 285 void CastIINode::dump_spec(outputStream* st) const { 286 ConstraintCastNode::dump_spec(st); 287 if (_range_check_dependency) { 288 st->print(" range check dependency"); 289 } 290 } 291 #endif 292 293 CastIINode* CastIINode::pin_array_access_node() const { 294 assert(_dependency == RegularDependency, "already pinned"); 295 if (has_range_check()) { 296 return new CastIINode(in(0), in(1), bottom_type(), StrongDependency, has_range_check()); 297 } 298 return nullptr; 299 } 300 301 void CastIINode::remove_range_check_cast(Compile* C) { 302 if (has_range_check()) { 303 // Range check CastII nodes feed into an address computation subgraph. Remove them to let that subgraph float freely. 304 // For memory access or integer divisions nodes that depend on the cast, record the dependency on the cast's control 305 // as a precedence edge, so they can't float above the cast in case that cast's narrowed type helped eliminate a 306 // range check or a null divisor check. 307 assert(in(0) != nullptr, "All RangeCheck CastII must have a control dependency"); 308 ResourceMark rm; 309 Unique_Node_List wq; 310 wq.push(this); 311 for (uint next = 0; next < wq.size(); ++next) { 312 Node* m = wq.at(next); 313 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { 314 Node* use = m->fast_out(i); 315 if (use->is_Mem() || use->is_div_or_mod(T_INT) || use->is_div_or_mod(T_LONG)) { 316 use->ensure_control_or_add_prec(in(0)); 317 } else if (!use->is_CFG() && !use->is_Phi()) { 318 wq.push(use); 319 } 320 } 321 } 322 subsume_by(in(1), C); 323 if (outcnt() == 0) { 324 disconnect_inputs(C); 325 } 326 } 327 } 328 329 330 const Type* CastLLNode::Value(PhaseGVN* phase) const { 331 const Type* res = ConstraintCastNode::Value(phase); 332 if (res == Type::TOP) { 333 return Type::TOP; 334 } 335 assert(res->isa_long(), "res must be long"); 336 337 return widen_type(phase, res, T_LONG); 338 } 339 340 Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) { 341 Node* progress = ConstraintCastNode::Ideal(phase, can_reshape); 342 if (progress != nullptr) { 343 return progress; 344 } 345 if (!phase->C->post_loop_opts_phase()) { 346 // makes sure we run ::Value to potentially remove type assertion after loop opts 347 phase->C->record_for_post_loop_opts_igvn(this); 348 } 349 // transform (CastLL (ConvI2L ..)) into (ConvI2L (CastII ..)) if the type of the CastLL is narrower than the type of 350 // the ConvI2L. 351 Node* in1 = in(1); 352 if (in1 != nullptr && in1->Opcode() == Op_ConvI2L) { 353 const Type* t = Value(phase); 354 const Type* t_in = phase->type(in1); 355 if (t != Type::TOP && t_in != Type::TOP) { 356 const TypeLong* tl = t->is_long(); 357 const TypeLong* t_in_l = t_in->is_long(); 358 assert(tl->_lo >= t_in_l->_lo && tl->_hi <= t_in_l->_hi, "CastLL type should be narrower than or equal to the type of its input"); 359 assert((tl != t_in_l) == (tl->_lo > t_in_l->_lo || tl->_hi < t_in_l->_hi), "if type differs then this nodes's type must be narrower"); 360 if (tl != t_in_l) { 361 const TypeInt* ti = TypeInt::make(checked_cast<jint>(tl->_lo), checked_cast<jint>(tl->_hi), tl->_widen); 362 Node* castii = phase->transform(new CastIINode(in(0), in1->in(1), ti)); 363 Node* convi2l = in1->clone(); 364 convi2l->set_req(1, castii); 365 return convi2l; 366 } 367 } 368 } 369 return optimize_integer_cast(phase, T_LONG); 370 } 371 372 //============================================================================= 373 //------------------------------Identity--------------------------------------- 374 // If input is already higher or equal to cast type, then this is an identity. 375 Node* CheckCastPPNode::Identity(PhaseGVN* phase) { 376 if (in(1)->is_InlineType() && _type->isa_instptr() && phase->type(in(1))->inline_klass()->is_subtype_of(_type->is_instptr()->instance_klass())) { 377 return in(1); 378 } 379 return ConstraintCastNode::Identity(phase); 380 } 381 382 //------------------------------Value------------------------------------------ 383 // Take 'join' of input and cast-up type, unless working with an Interface 384 const Type* CheckCastPPNode::Value(PhaseGVN* phase) const { 385 if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP; 386 387 const Type *inn = phase->type(in(1)); 388 if( inn == Type::TOP ) return Type::TOP; // No information yet 389 390 if (inn->isa_oopptr() && _type->isa_oopptr()) { 391 return ConstraintCastNode::Value(phase); 392 } 393 394 const TypePtr *in_type = inn->isa_ptr(); 395 const TypePtr *my_type = _type->isa_ptr(); 396 const Type *result = _type; 397 if (in_type != nullptr && my_type != nullptr) { 398 // TODO 8302672 399 if (!StressReflectiveCode && my_type->isa_aryptr() && in_type->isa_aryptr()) { 400 // Propagate array properties (not flat/null-free) 401 // Don't do this when StressReflectiveCode is enabled because it might lead to 402 // a dying data path while the corresponding flat/null-free check is not folded. 403 my_type = my_type->is_aryptr()->update_properties(in_type->is_aryptr()); 404 if (my_type == nullptr) { 405 return Type::TOP; // Inconsistent properties 406 } 407 } 408 TypePtr::PTR in_ptr = in_type->ptr(); 409 if (in_ptr == TypePtr::Null) { 410 result = in_type; 411 } else if (in_ptr != TypePtr::Constant) { 412 result = my_type->cast_to_ptr_type(my_type->join_ptr(in_ptr)); 413 } 414 } 415 416 return result; 417 } 418 419 //============================================================================= 420 //------------------------------Value------------------------------------------ 421 const Type* CastX2PNode::Value(PhaseGVN* phase) const { 422 const Type* t = phase->type(in(1)); 423 if (t == Type::TOP) return Type::TOP; 424 if (t->base() == Type_X && t->singleton()) { 425 uintptr_t bits = (uintptr_t) t->is_intptr_t()->get_con(); 426 if (bits == 0) return TypePtr::NULL_PTR; 427 return TypeRawPtr::make((address) bits); 428 } 429 return CastX2PNode::bottom_type(); 430 } 431 432 //------------------------------Idealize--------------------------------------- 433 static inline bool fits_in_int(const Type* t, bool but_not_min_int = false) { 434 if (t == Type::TOP) return false; 435 const TypeX* tl = t->is_intptr_t(); 436 jint lo = min_jint; 437 jint hi = max_jint; 438 if (but_not_min_int) ++lo; // caller wants to negate the value w/o overflow 439 return (tl->_lo >= lo) && (tl->_hi <= hi); 440 } 441 442 static inline Node* addP_of_X2P(PhaseGVN *phase, 443 Node* base, 444 Node* dispX, 445 bool negate = false) { 446 if (negate) { 447 dispX = phase->transform(new SubXNode(phase->MakeConX(0), dispX)); 448 } 449 return new AddPNode(phase->C->top(), 450 phase->transform(new CastX2PNode(base)), 451 dispX); 452 } 453 454 Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) { 455 // convert CastX2P(AddX(x, y)) to AddP(CastX2P(x), y) if y fits in an int 456 int op = in(1)->Opcode(); 457 Node* x; 458 Node* y; 459 switch (op) { 460 case Op_SubX: 461 x = in(1)->in(1); 462 // Avoid ideal transformations ping-pong between this and AddP for raw pointers. 463 if (phase->find_intptr_t_con(x, -1) == 0) 464 break; 465 y = in(1)->in(2); 466 if (fits_in_int(phase->type(y), true)) { 467 return addP_of_X2P(phase, x, y, true); 468 } 469 break; 470 case Op_AddX: 471 x = in(1)->in(1); 472 y = in(1)->in(2); 473 if (fits_in_int(phase->type(y))) { 474 return addP_of_X2P(phase, x, y); 475 } 476 if (fits_in_int(phase->type(x))) { 477 return addP_of_X2P(phase, y, x); 478 } 479 break; 480 } 481 return nullptr; 482 } 483 484 //------------------------------Identity--------------------------------------- 485 Node* CastX2PNode::Identity(PhaseGVN* phase) { 486 if (in(1)->Opcode() == Op_CastP2X) return in(1)->in(1); 487 return this; 488 } 489 490 //============================================================================= 491 //------------------------------Value------------------------------------------ 492 const Type* CastP2XNode::Value(PhaseGVN* phase) const { 493 const Type* t = phase->type(in(1)); 494 if (t == Type::TOP) return Type::TOP; 495 if (t->base() == Type::RawPtr && t->singleton()) { 496 uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con(); 497 return TypeX::make(bits); 498 } 499 500 if (t->is_zero_type() || !t->maybe_null()) { 501 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 502 Node* u = fast_out(i); 503 if (u->Opcode() == Op_OrL) { 504 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 505 Node* cmp = u->fast_out(j); 506 if (cmp->Opcode() == Op_CmpL) { 507 // Give CmpL a chance to get optimized 508 phase->record_for_igvn(cmp); 509 } 510 } 511 } 512 } 513 } 514 515 return CastP2XNode::bottom_type(); 516 } 517 518 Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) { 519 return (in(0) && remove_dead_region(phase, can_reshape)) ? this : nullptr; 520 } 521 522 //------------------------------Identity--------------------------------------- 523 Node* CastP2XNode::Identity(PhaseGVN* phase) { 524 if (in(1)->Opcode() == Op_CastX2P) return in(1)->in(1); 525 return this; 526 } 527 528 Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency, 529 const TypeTuple* types) { 530 if (type->isa_int()) { 531 return new CastIINode(c, in, type, dependency, false, types); 532 } else if (type->isa_long()) { 533 return new CastLLNode(c, in, type, dependency, types); 534 } else if (type->isa_half_float()) { 535 return new CastHHNode(c, in, type, dependency, types); 536 } else if (type->isa_float()) { 537 return new CastFFNode(c, in, type, dependency, types); 538 } else if (type->isa_double()) { 539 return new CastDDNode(c, in, type, dependency, types); 540 } else if (type->isa_vect()) { 541 return new CastVVNode(c, in, type, dependency, types); 542 } else if (type->isa_ptr()) { 543 return new CastPPNode(c, in, type, dependency, types); 544 } 545 fatal("unreachable. Invalid cast type."); 546 return nullptr; 547 } 548 549 Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) { 550 PhaseIterGVN *igvn = phase->is_IterGVN(); 551 const TypeInteger* this_type = this->type()->is_integer(bt); 552 Node* z = in(1); 553 const TypeInteger* rx = nullptr; 554 const TypeInteger* ry = nullptr; 555 // Similar to ConvI2LNode::Ideal() for the same reasons 556 if (Compile::push_thru_add(phase, z, this_type, rx, ry, bt, bt)) { 557 if (igvn == nullptr) { 558 // Postpone this optimization to iterative GVN, where we can handle deep 559 // AddI chains without an exponential number of recursive Ideal() calls. 560 phase->record_for_igvn(this); 561 return nullptr; 562 } 563 int op = z->Opcode(); 564 Node* x = z->in(1); 565 Node* y = z->in(2); 566 567 Node* cx = find_or_make_integer_cast(igvn, x, rx); 568 Node* cy = find_or_make_integer_cast(igvn, y, ry); 569 if (op == Op_Add(bt)) { 570 return AddNode::make(cx, cy, bt); 571 } else { 572 assert(op == Op_Sub(bt), ""); 573 return SubNode::make(cx, cy, bt); 574 } 575 return nullptr; 576 } 577 return nullptr; 578 } 579 580 const Type* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const { 581 if (!phase->C->post_loop_opts_phase()) { 582 return res; 583 } 584 const TypeInteger* this_type = res->is_integer(bt); 585 const TypeInteger* in_type = phase->type(in(1))->isa_integer(bt); 586 if (in_type != nullptr && 587 (in_type->lo_as_long() != this_type->lo_as_long() || 588 in_type->hi_as_long() != this_type->hi_as_long())) { 589 jlong lo1 = this_type->lo_as_long(); 590 jlong hi1 = this_type->hi_as_long(); 591 int w1 = this_type->_widen; 592 if (lo1 >= 0) { 593 // Keep a range assertion of >=0. 594 lo1 = 0; hi1 = max_signed_integer(bt); 595 } else if (hi1 < 0) { 596 // Keep a range assertion of <0. 597 lo1 = min_signed_integer(bt); hi1 = -1; 598 } else { 599 lo1 = min_signed_integer(bt); hi1 = max_signed_integer(bt); 600 } 601 return TypeInteger::make(MAX2(in_type->lo_as_long(), lo1), 602 MIN2(in_type->hi_as_long(), hi1), 603 MAX2((int)in_type->_widen, w1), bt); 604 } 605 return res; 606 }