1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "memory/allocation.inline.hpp"
  26 #include "opto/addnode.hpp"
  27 #include "opto/connode.hpp"
  28 #include "opto/convertnode.hpp"
  29 #include "opto/memnode.hpp"
  30 #include "opto/mulnode.hpp"
  31 #include "opto/phaseX.hpp"
  32 #include "opto/subnode.hpp"
  33 #include "utilities/powerOfTwo.hpp"
  34 
  35 // Portions of code courtesy of Clifford Click
  36 
  37 
  38 //=============================================================================
  39 //------------------------------hash-------------------------------------------
  40 // Hash function over MulNodes.  Needs to be commutative; i.e., I swap
  41 // (commute) inputs to MulNodes willy-nilly so the hash function must return
  42 // the same value in the presence of edge swapping.
  43 uint MulNode::hash() const {
  44   return (uintptr_t)in(1) + (uintptr_t)in(2) + Opcode();
  45 }
  46 
  47 //------------------------------Identity---------------------------------------
  48 // Multiplying a one preserves the other argument
  49 Node* MulNode::Identity(PhaseGVN* phase) {
  50   const Type *one = mul_id();  // The multiplicative identity
  51   if( phase->type( in(1) )->higher_equal( one ) ) return in(2);
  52   if( phase->type( in(2) )->higher_equal( one ) ) return in(1);
  53 
  54   return this;
  55 }
  56 
  57 //------------------------------Ideal------------------------------------------
  58 // We also canonicalize the Node, moving constants to the right input,
  59 // and flatten expressions (so that 1+x+2 becomes x+3).
  60 Node *MulNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  61   Node* in1 = in(1);
  62   Node* in2 = in(2);
  63   Node* progress = nullptr;        // Progress flag
  64 
  65   // This code is used by And nodes too, but some conversions are
  66   // only valid for the actual Mul nodes.
  67   uint op = Opcode();
  68   bool real_mul = (op == Op_MulI) || (op == Op_MulL) ||
  69                   (op == Op_MulF) || (op == Op_MulD) ||
  70                   (op == Op_MulHF);
  71 
  72   // Convert "(-a)*(-b)" into "a*b".
  73   if (real_mul && in1->is_Sub() && in2->is_Sub()) {
  74     if (phase->type(in1->in(1))->is_zero_type() &&
  75         phase->type(in2->in(1))->is_zero_type()) {
  76       set_req_X(1, in1->in(2), phase);
  77       set_req_X(2, in2->in(2), phase);
  78       in1 = in(1);
  79       in2 = in(2);
  80       progress = this;
  81     }
  82   }
  83 
  84   // convert "max(a,b) * min(a,b)" into "a*b".
  85   if ((in(1)->Opcode() == max_opcode() && in(2)->Opcode() == min_opcode())
  86       || (in(1)->Opcode() == min_opcode() && in(2)->Opcode() == max_opcode())) {
  87     Node *in11 = in(1)->in(1);
  88     Node *in12 = in(1)->in(2);
  89 
  90     Node *in21 = in(2)->in(1);
  91     Node *in22 = in(2)->in(2);
  92 
  93     if ((in11 == in21 && in12 == in22) ||
  94         (in11 == in22 && in12 == in21)) {
  95       set_req_X(1, in11, phase);
  96       set_req_X(2, in12, phase);
  97       in1 = in(1);
  98       in2 = in(2);
  99       progress = this;
 100     }
 101   }
 102 
 103   const Type* t1 = phase->type(in1);
 104   const Type* t2 = phase->type(in2);
 105 
 106   // We are OK if right is a constant, or right is a load and
 107   // left is a non-constant.
 108   if( !(t2->singleton() ||
 109         (in(2)->is_Load() && !(t1->singleton() || in(1)->is_Load())) ) ) {
 110     if( t1->singleton() ||       // Left input is a constant?
 111         // Otherwise, sort inputs (commutativity) to help value numbering.
 112         (in(1)->_idx > in(2)->_idx) ) {
 113       swap_edges(1, 2);
 114       const Type *t = t1;
 115       t1 = t2;
 116       t2 = t;
 117       progress = this;            // Made progress
 118     }
 119   }
 120 
 121   // If the right input is a constant, and the left input is a product of a
 122   // constant, flatten the expression tree.
 123   if( t2->singleton() &&        // Right input is a constant?
 124       op != Op_MulF &&          // Float & double cannot reassociate
 125       op != Op_MulD &&
 126       op != Op_MulHF) {
 127     if( t2 == Type::TOP ) return nullptr;
 128     Node *mul1 = in(1);
 129 #ifdef ASSERT
 130     // Check for dead loop
 131     int op1 = mul1->Opcode();
 132     if ((mul1 == this) || (in(2) == this) ||
 133         ((op1 == mul_opcode() || op1 == add_opcode()) &&
 134          ((mul1->in(1) == this) || (mul1->in(2) == this) ||
 135           (mul1->in(1) == mul1) || (mul1->in(2) == mul1)))) {
 136       assert(false, "dead loop in MulNode::Ideal");
 137     }
 138 #endif
 139 
 140     if( mul1->Opcode() == mul_opcode() ) {  // Left input is a multiply?
 141       // Mul of a constant?
 142       const Type *t12 = phase->type( mul1->in(2) );
 143       if( t12->singleton() && t12 != Type::TOP) { // Left input is an add of a constant?
 144         // Compute new constant; check for overflow
 145         const Type *tcon01 = ((MulNode*)mul1)->mul_ring(t2,t12);
 146         if( tcon01->singleton() ) {
 147           // The Mul of the flattened expression
 148           set_req_X(1, mul1->in(1), phase);
 149           set_req_X(2, phase->makecon(tcon01), phase);
 150           t2 = tcon01;
 151           progress = this;      // Made progress
 152         }
 153       }
 154     }
 155     // If the right input is a constant, and the left input is an add of a
 156     // constant, flatten the tree: (X+con1)*con0 ==> X*con0 + con1*con0
 157     const Node *add1 = in(1);
 158     if( add1->Opcode() == add_opcode() ) {      // Left input is an add?
 159       // Add of a constant?
 160       const Type *t12 = phase->type( add1->in(2) );
 161       if( t12->singleton() && t12 != Type::TOP ) { // Left input is an add of a constant?
 162         assert( add1->in(1) != add1, "dead loop in MulNode::Ideal" );
 163         // Compute new constant; check for overflow
 164         const Type *tcon01 = mul_ring(t2,t12);
 165         if( tcon01->singleton() ) {
 166 
 167         // Convert (X+con1)*con0 into X*con0
 168           Node *mul = clone();    // mul = ()*con0
 169           mul->set_req(1,add1->in(1));  // mul = X*con0
 170           mul = phase->transform(mul);
 171 
 172           Node *add2 = add1->clone();
 173           add2->set_req(1, mul);        // X*con0 + con0*con1
 174           add2->set_req(2, phase->makecon(tcon01) );
 175           progress = add2;
 176         }
 177       }
 178     } // End of is left input an add
 179   } // End of is right input a Mul
 180 
 181   return progress;
 182 }
 183 
 184 //------------------------------Value-----------------------------------------
 185 const Type* MulNode::Value(PhaseGVN* phase) const {
 186   const Type *t1 = phase->type( in(1) );
 187   const Type *t2 = phase->type( in(2) );
 188   // Either input is TOP ==> the result is TOP
 189   if( t1 == Type::TOP ) return Type::TOP;
 190   if( t2 == Type::TOP ) return Type::TOP;
 191 
 192   // Either input is ZERO ==> the result is ZERO.
 193   // Not valid for floats or doubles since +0.0 * -0.0 --> +0.0
 194   int op = Opcode();
 195   if( op == Op_MulI || op == Op_AndI || op == Op_MulL || op == Op_AndL ) {
 196     const Type *zero = add_id();        // The multiplicative zero
 197     if( t1->higher_equal( zero ) ) return zero;
 198     if( t2->higher_equal( zero ) ) return zero;
 199   }
 200 
 201   // Code pattern on return from a call that returns an __Value.  Can
 202   // be optimized away if the return value turns out to be an oop.
 203   if (op == Op_AndX &&
 204       in(1) != nullptr &&
 205       in(1)->Opcode() == Op_CastP2X &&
 206       in(1)->in(1) != nullptr &&
 207       phase->type(in(1)->in(1))->isa_oopptr() &&
 208       t2->isa_intptr_t()->_lo >= 0 &&
 209       t2->isa_intptr_t()->_hi <= MinObjAlignmentInBytesMask) {
 210     return add_id();
 211   }
 212 
 213   // Either input is BOTTOM ==> the result is the local BOTTOM
 214   if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
 215     return bottom_type();
 216 
 217 #if defined(IA32)
 218   // Can't trust native compilers to properly fold strict double
 219   // multiplication with round-to-zero on this platform.
 220   if (op == Op_MulD) {
 221     return TypeD::DOUBLE;
 222   }
 223 #endif
 224 
 225   return mul_ring(t1,t2);            // Local flavor of type multiplication
 226 }
 227 
 228 MulNode* MulNode::make(Node* in1, Node* in2, BasicType bt) {
 229   switch (bt) {
 230     case T_INT:
 231       return new MulINode(in1, in2);
 232     case T_LONG:
 233       return new MulLNode(in1, in2);
 234     default:
 235       fatal("Not implemented for %s", type2name(bt));
 236   }
 237   return nullptr;
 238 }
 239 
 240 MulNode* MulNode::make_and(Node* in1, Node* in2, BasicType bt) {
 241   switch (bt) {
 242     case T_INT:
 243       return new AndINode(in1, in2);
 244     case T_LONG:
 245       return new AndLNode(in1, in2);
 246     default:
 247       fatal("Not implemented for %s", type2name(bt));
 248   }
 249   return nullptr;
 250 }
 251 
 252 
 253 //=============================================================================
 254 //------------------------------Ideal------------------------------------------
 255 // Check for power-of-2 multiply, then try the regular MulNode::Ideal
 256 Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) {
 257   const jint con = in(2)->find_int_con(0);
 258   if (con == 0) {
 259     // If in(2) is not a constant, call Ideal() of the parent class to
 260     // try to move constant to the right side.
 261     return MulNode::Ideal(phase, can_reshape);
 262   }
 263 
 264   // Now we have a constant Node on the right and the constant in con.
 265   if (con == 1) {
 266     // By one is handled by Identity call
 267     return nullptr;
 268   }
 269 
 270   // Check for negative constant; if so negate the final result
 271   bool sign_flip = false;
 272 
 273   unsigned int abs_con = g_uabs(con);
 274   if (abs_con != (unsigned int)con) {
 275     sign_flip = true;
 276   }
 277 
 278   // Get low bit; check for being the only bit
 279   Node *res = nullptr;
 280   unsigned int bit1 = submultiple_power_of_2(abs_con);
 281   if (bit1 == abs_con) {           // Found a power of 2?
 282     res = new LShiftINode(in(1), phase->intcon(log2i_exact(bit1)));
 283   } else {
 284     // Check for constant with 2 bits set
 285     unsigned int bit2 = abs_con - bit1;
 286     bit2 = bit2 & (0 - bit2);          // Extract 2nd bit
 287     if (bit2 + bit1 == abs_con) {    // Found all bits in con?
 288       Node *n1 = phase->transform(new LShiftINode(in(1), phase->intcon(log2i_exact(bit1))));
 289       Node *n2 = phase->transform(new LShiftINode(in(1), phase->intcon(log2i_exact(bit2))));
 290       res = new AddINode(n2, n1);
 291     } else if (is_power_of_2(abs_con + 1)) {
 292       // Sleezy: power-of-2 - 1.  Next time be generic.
 293       unsigned int temp = abs_con + 1;
 294       Node *n1 = phase->transform(new LShiftINode(in(1), phase->intcon(log2i_exact(temp))));
 295       res = new SubINode(n1, in(1));
 296     } else {
 297       return MulNode::Ideal(phase, can_reshape);
 298     }
 299   }
 300 
 301   if (sign_flip) {             // Need to negate result?
 302     res = phase->transform(res);// Transform, before making the zero con
 303     res = new SubINode(phase->intcon(0),res);
 304   }
 305 
 306   return res;                   // Return final result
 307 }
 308 
 309 // This template class performs type multiplication for MulI/MulLNode. NativeType is either jint or jlong.
 310 // In this class, the inputs of the MulNodes are named left and right with types [left_lo,left_hi] and [right_lo,right_hi].
 311 //
 312 // In general, the multiplication of two x-bit values could produce a result that consumes up to 2x bits if there is
 313 // enough space to hold them all. We can therefore distinguish the following two cases for the product:
 314 // - no overflow (i.e. product fits into x bits)
 315 // - overflow (i.e. product does not fit into x bits)
 316 //
 317 // When multiplying the two x-bit inputs 'left' and 'right' with their x-bit types [left_lo,left_hi] and [right_lo,right_hi]
 318 // we need to find the minimum and maximum of all possible products to define a new type. To do that, we compute the
 319 // cross product of [left_lo,left_hi] and [right_lo,right_hi] in 2x-bit space where no over- or underflow can happen.
 320 // The cross product consists of the following four multiplications with 2x-bit results:
 321 // (1) left_lo * right_lo
 322 // (2) left_lo * right_hi
 323 // (3) left_hi * right_lo
 324 // (4) left_hi * right_hi
 325 //
 326 // Let's define the following two functions:
 327 // - Lx(i): Returns the lower x bits of the 2x-bit number i.
 328 // - Ux(i): Returns the upper x bits of the 2x-bit number i.
 329 //
 330 // Let's first assume all products are positive where only overflows are possible but no underflows. If there is no
 331 // overflow for a product p, then the upper x bits of the 2x-bit result p are all zero:
 332 //     Ux(p) = 0
 333 //     Lx(p) = p
 334 //
 335 // If none of the multiplications (1)-(4) overflow, we can truncate the upper x bits and use the following result type
 336 // with x bits:
 337 //      [result_lo,result_hi] = [MIN(Lx(1),Lx(2),Lx(3),Lx(4)),MAX(Lx(1),Lx(2),Lx(3),Lx(4))]
 338 //
 339 // If any of these multiplications overflows, we could pessimistically take the bottom type for the x bit result
 340 // (i.e. all values in the x-bit space could be possible):
 341 //      [result_lo,result_hi] = [NativeType_min,NativeType_max]
 342 //
 343 // However, in case of any overflow, we can do better by analyzing the upper x bits of all multiplications (1)-(4) with
 344 // 2x-bit results. The upper x bits tell us something about how many times a multiplication has overflown the lower
 345 // x bits. If the upper x bits of (1)-(4) are all equal, then we know that all of these multiplications overflowed
 346 // the lower x bits the same number of times:
 347 //     Ux((1)) = Ux((2)) = Ux((3)) = Ux((4))
 348 //
 349 // If all upper x bits are equal, we can conclude:
 350 //     Lx(MIN((1),(2),(3),(4))) = MIN(Lx(1),Lx(2),Lx(3),Lx(4)))
 351 //     Lx(MAX((1),(2),(3),(4))) = MAX(Lx(1),Lx(2),Lx(3),Lx(4)))
 352 //
 353 // Therefore, we can use the same precise x-bit result type as for the no-overflow case:
 354 //     [result_lo,result_hi] = [(MIN(Lx(1),Lx(2),Lx(3),Lx(4))),MAX(Lx(1),Lx(2),Lx(3),Lx(4)))]
 355 //
 356 //
 357 // Now let's assume that (1)-(4) are signed multiplications where over- and underflow could occur:
 358 // Negative numbers are all sign extend with ones. Therefore, if a negative product does not underflow, then the
 359 // upper x bits of the 2x-bit result are all set to ones which is minus one in two's complement. If there is an underflow,
 360 // the upper x bits are decremented by the number of times an underflow occurred. The smallest possible negative product
 361 // is NativeType_min*NativeType_max, where the upper x bits are set to NativeType_min / 2 (b11...0). It is therefore
 362 // impossible to underflow the upper x bits. Thus, when having all ones (i.e. minus one) in the upper x bits, we know
 363 // that there is no underflow.
 364 //
 365 // To be able to compare the number of over-/underflows of positive and negative products, respectively, we normalize
 366 // the upper x bits of negative 2x-bit products by adding one. This way a product has no over- or underflow if the
 367 // normalized upper x bits are zero. Now we can use the same improved type as for strictly positive products because we
 368 // can compare the upper x bits in a unified way with N() being the normalization function:
 369 //     N(Ux((1))) = N(Ux((2))) = N(Ux((3)) = N(Ux((4)))
 370 template<typename NativeType>
 371 class IntegerTypeMultiplication {
 372 
 373   NativeType _lo_left;
 374   NativeType _lo_right;
 375   NativeType _hi_left;
 376   NativeType _hi_right;
 377   short _widen_left;
 378   short _widen_right;
 379 
 380   static const Type* overflow_type();
 381   static NativeType multiply_high(NativeType x, NativeType y);
 382   const Type* create_type(NativeType lo, NativeType hi) const;
 383 
 384   static NativeType multiply_high_signed_overflow_value(NativeType x, NativeType y) {
 385     return normalize_overflow_value(x, y, multiply_high(x, y));
 386   }
 387 
 388   bool cross_product_not_same_overflow_value() const {
 389     const NativeType lo_lo_high_product = multiply_high_signed_overflow_value(_lo_left, _lo_right);
 390     const NativeType lo_hi_high_product = multiply_high_signed_overflow_value(_lo_left, _hi_right);
 391     const NativeType hi_lo_high_product = multiply_high_signed_overflow_value(_hi_left, _lo_right);
 392     const NativeType hi_hi_high_product = multiply_high_signed_overflow_value(_hi_left, _hi_right);
 393     return lo_lo_high_product != lo_hi_high_product ||
 394            lo_hi_high_product != hi_lo_high_product ||
 395            hi_lo_high_product != hi_hi_high_product;
 396   }
 397 
 398   bool does_product_overflow(NativeType x, NativeType y) const {
 399     return multiply_high_signed_overflow_value(x, y) != 0;
 400   }
 401 
 402   static NativeType normalize_overflow_value(const NativeType x, const NativeType y, NativeType result) {
 403     return java_multiply(x, y) < 0 ? result + 1 : result;
 404   }
 405 
 406  public:
 407   template<class IntegerType>
 408   IntegerTypeMultiplication(const IntegerType* left, const IntegerType* right)
 409       : _lo_left(left->_lo), _lo_right(right->_lo),
 410         _hi_left(left->_hi), _hi_right(right->_hi),
 411         _widen_left(left->_widen), _widen_right(right->_widen)  {}
 412 
 413   // Compute the product type by multiplying the two input type ranges. We take the minimum and maximum of all possible
 414   // values (requires 4 multiplications of all possible combinations of the two range boundary values). If any of these
 415   // multiplications overflows/underflows, we need to make sure that they all have the same number of overflows/underflows
 416   // If that is not the case, we return the bottom type to cover all values due to the inconsistent overflows/underflows).
 417   const Type* compute() const {
 418     if (cross_product_not_same_overflow_value()) {
 419       return overflow_type();
 420     }
 421 
 422     NativeType lo_lo_product = java_multiply(_lo_left, _lo_right);
 423     NativeType lo_hi_product = java_multiply(_lo_left, _hi_right);
 424     NativeType hi_lo_product = java_multiply(_hi_left, _lo_right);
 425     NativeType hi_hi_product = java_multiply(_hi_left, _hi_right);
 426     const NativeType min = MIN4(lo_lo_product, lo_hi_product, hi_lo_product, hi_hi_product);
 427     const NativeType max = MAX4(lo_lo_product, lo_hi_product, hi_lo_product, hi_hi_product);
 428     return create_type(min, max);
 429   }
 430 
 431   bool does_overflow() const {
 432     return does_product_overflow(_lo_left, _lo_right) ||
 433            does_product_overflow(_lo_left, _hi_right) ||
 434            does_product_overflow(_hi_left, _lo_right) ||
 435            does_product_overflow(_hi_left, _hi_right);
 436   }
 437 };
 438 
 439 template <>
 440 const Type* IntegerTypeMultiplication<jint>::overflow_type() {
 441   return TypeInt::INT;
 442 }
 443 
 444 template <>
 445 jint IntegerTypeMultiplication<jint>::multiply_high(const jint x, const jint y) {
 446   const jlong x_64 = x;
 447   const jlong y_64 = y;
 448   const jlong product = x_64 * y_64;
 449   return (jint)((uint64_t)product >> 32u);
 450 }
 451 
 452 template <>
 453 const Type* IntegerTypeMultiplication<jint>::create_type(jint lo, jint hi) const {
 454   return TypeInt::make(lo, hi, MAX2(_widen_left, _widen_right));
 455 }
 456 
 457 template <>
 458 const Type* IntegerTypeMultiplication<jlong>::overflow_type() {
 459   return TypeLong::LONG;
 460 }
 461 
 462 template <>
 463 jlong IntegerTypeMultiplication<jlong>::multiply_high(const jlong x, const jlong y) {
 464   return multiply_high_signed(x, y);
 465 }
 466 
 467 template <>
 468 const Type* IntegerTypeMultiplication<jlong>::create_type(jlong lo, jlong hi) const {
 469   return TypeLong::make(lo, hi, MAX2(_widen_left, _widen_right));
 470 }
 471 
 472 // Compute the product type of two integer ranges into this node.
 473 const Type* MulINode::mul_ring(const Type* type_left, const Type* type_right) const {
 474   const IntegerTypeMultiplication<jint> integer_multiplication(type_left->is_int(), type_right->is_int());
 475   return integer_multiplication.compute();
 476 }
 477 
 478 bool MulINode::does_overflow(const TypeInt* type_left, const TypeInt* type_right) {
 479   const IntegerTypeMultiplication<jint> integer_multiplication(type_left, type_right);
 480   return integer_multiplication.does_overflow();
 481 }
 482 
 483 // Compute the product type of two long ranges into this node.
 484 const Type* MulLNode::mul_ring(const Type* type_left, const Type* type_right) const {
 485   const IntegerTypeMultiplication<jlong> integer_multiplication(type_left->is_long(), type_right->is_long());
 486   return integer_multiplication.compute();
 487 }
 488 
 489 //=============================================================================
 490 //------------------------------Ideal------------------------------------------
 491 // Check for power-of-2 multiply, then try the regular MulNode::Ideal
 492 Node *MulLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 493   const jlong con = in(2)->find_long_con(0);
 494   if (con == 0) {
 495     // If in(2) is not a constant, call Ideal() of the parent class to
 496     // try to move constant to the right side.
 497     return MulNode::Ideal(phase, can_reshape);
 498   }
 499 
 500   // Now we have a constant Node on the right and the constant in con.
 501   if (con == 1) {
 502     // By one is handled by Identity call
 503     return nullptr;
 504   }
 505 
 506   // Check for negative constant; if so negate the final result
 507   bool sign_flip = false;
 508   julong abs_con = g_uabs(con);
 509   if (abs_con != (julong)con) {
 510     sign_flip = true;
 511   }
 512 
 513   // Get low bit; check for being the only bit
 514   Node *res = nullptr;
 515   julong bit1 = submultiple_power_of_2(abs_con);
 516   if (bit1 == abs_con) {           // Found a power of 2?
 517     res = new LShiftLNode(in(1), phase->intcon(log2i_exact(bit1)));
 518   } else {
 519 
 520     // Check for constant with 2 bits set
 521     julong bit2 = abs_con-bit1;
 522     bit2 = bit2 & (0-bit2);          // Extract 2nd bit
 523     if (bit2 + bit1 == abs_con) {    // Found all bits in con?
 524       Node *n1 = phase->transform(new LShiftLNode(in(1), phase->intcon(log2i_exact(bit1))));
 525       Node *n2 = phase->transform(new LShiftLNode(in(1), phase->intcon(log2i_exact(bit2))));
 526       res = new AddLNode(n2, n1);
 527 
 528     } else if (is_power_of_2(abs_con+1)) {
 529       // Sleezy: power-of-2 -1.  Next time be generic.
 530       julong temp = abs_con + 1;
 531       Node *n1 = phase->transform( new LShiftLNode(in(1), phase->intcon(log2i_exact(temp))));
 532       res = new SubLNode(n1, in(1));
 533     } else {
 534       return MulNode::Ideal(phase, can_reshape);
 535     }
 536   }
 537 
 538   if (sign_flip) {             // Need to negate result?
 539     res = phase->transform(res);// Transform, before making the zero con
 540     res = new SubLNode(phase->longcon(0),res);
 541   }
 542 
 543   return res;                   // Return final result
 544 }
 545 
 546 //=============================================================================
 547 //------------------------------mul_ring---------------------------------------
 548 // Compute the product type of two double ranges into this node.
 549 const Type *MulFNode::mul_ring(const Type *t0, const Type *t1) const {
 550   if( t0 == Type::FLOAT || t1 == Type::FLOAT ) return Type::FLOAT;
 551   return TypeF::make( t0->getf() * t1->getf() );
 552 }
 553 
 554 //------------------------------Ideal---------------------------------------
 555 // Check to see if we are multiplying by a constant 2 and convert to add, then try the regular MulNode::Ideal
 556 Node* MulFNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 557   const TypeF *t2 = phase->type(in(2))->isa_float_constant();
 558 
 559   // x * 2 -> x + x
 560   if (t2 != nullptr && t2->getf() == 2) {
 561     Node* base = in(1);
 562     return new AddFNode(base, base);
 563   }
 564   return MulNode::Ideal(phase, can_reshape);
 565 }
 566 
 567 //=============================================================================
 568 //------------------------------Ideal------------------------------------------
 569 // Check to see if we are multiplying by a constant 2 and convert to add, then try the regular MulNode::Ideal
 570 Node* MulHFNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 571   const TypeH* t2 = phase->type(in(2))->isa_half_float_constant();
 572 
 573   // x * 2 -> x + x
 574   if (t2 != nullptr && t2->getf() == 2) {
 575     Node* base = in(1);
 576     return new AddHFNode(base, base);
 577   }
 578   return MulNode::Ideal(phase, can_reshape);
 579 }
 580 
 581 // Compute the product type of two half float ranges into this node.
 582 const Type* MulHFNode::mul_ring(const Type* t0, const Type* t1) const {
 583   if (t0 == Type::HALF_FLOAT || t1 == Type::HALF_FLOAT) {
 584     return Type::HALF_FLOAT;
 585   }
 586   return TypeH::make(t0->getf() * t1->getf());
 587 }
 588 
 589 //=============================================================================
 590 //------------------------------mul_ring---------------------------------------
 591 // Compute the product type of two double ranges into this node.
 592 const Type *MulDNode::mul_ring(const Type *t0, const Type *t1) const {
 593   if( t0 == Type::DOUBLE || t1 == Type::DOUBLE ) return Type::DOUBLE;
 594   // We must be multiplying 2 double constants.
 595   return TypeD::make( t0->getd() * t1->getd() );
 596 }
 597 
 598 //------------------------------Ideal---------------------------------------
 599 // Check to see if we are multiplying by a constant 2 and convert to add, then try the regular MulNode::Ideal
 600 Node* MulDNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 601   const TypeD *t2 = phase->type(in(2))->isa_double_constant();
 602 
 603   // x * 2 -> x + x
 604   if (t2 != nullptr && t2->getd() == 2) {
 605     Node* base = in(1);
 606     return new AddDNode(base, base);
 607   }
 608 
 609   return MulNode::Ideal(phase, can_reshape);
 610 }
 611 
 612 //=============================================================================
 613 //------------------------------Value------------------------------------------
 614 const Type* MulHiLNode::Value(PhaseGVN* phase) const {
 615   const Type *t1 = phase->type( in(1) );
 616   const Type *t2 = phase->type( in(2) );
 617   const Type *bot = bottom_type();
 618   return MulHiValue(t1, t2, bot);
 619 }
 620 
 621 const Type* UMulHiLNode::Value(PhaseGVN* phase) const {
 622   const Type *t1 = phase->type( in(1) );
 623   const Type *t2 = phase->type( in(2) );
 624   const Type *bot = bottom_type();
 625   return MulHiValue(t1, t2, bot);
 626 }
 627 
 628 // A common routine used by UMulHiLNode and MulHiLNode
 629 const Type* MulHiValue(const Type *t1, const Type *t2, const Type *bot) {
 630   // Either input is TOP ==> the result is TOP
 631   if( t1 == Type::TOP ) return Type::TOP;
 632   if( t2 == Type::TOP ) return Type::TOP;
 633 
 634   // Either input is BOTTOM ==> the result is the local BOTTOM
 635   if( (t1 == bot) || (t2 == bot) ||
 636       (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
 637     return bot;
 638 
 639   // It is not worth trying to constant fold this stuff!
 640   return TypeLong::LONG;
 641 }
 642 
 643 template<typename IntegerType>
 644 static const IntegerType* and_value(const IntegerType* r0, const IntegerType* r1) {
 645   typedef typename IntegerType::NativeType NativeType;
 646   static_assert(std::is_signed<NativeType>::value, "Native type of IntegerType must be signed!");
 647 
 648   int widen = MAX2(r0->_widen, r1->_widen);
 649 
 650   // If both types are constants, we can calculate a constant result.
 651   if (r0->is_con() && r1->is_con()) {
 652     return IntegerType::make(r0->get_con() & r1->get_con());
 653   }
 654 
 655   // If both ranges are positive, the result will range from 0 up to the hi value of the smaller range. The minimum
 656   // of the two constrains the upper bound because any higher value in the other range will see all zeroes, so it will be masked out.
 657   if (r0->_lo >= 0 && r1->_lo >= 0) {
 658     return IntegerType::make(0, MIN2(r0->_hi, r1->_hi), widen);
 659   }
 660 
 661   // If only one range is positive, the result will range from 0 up to that range's maximum value.
 662   // For the operation 'x & C' where C is a positive constant, the result will be in the range [0..C]. With that observation,
 663   // we can say that for any integer c such that 0 <= c <= C will also be in the range [0..C]. Therefore, 'x & [c..C]'
 664   // where c >= 0 will be in the range [0..C].
 665   if (r0->_lo >= 0) {
 666     return IntegerType::make(0, r0->_hi, widen);
 667   }
 668 
 669   if (r1->_lo >= 0) {
 670     return IntegerType::make(0, r1->_hi, widen);
 671   }
 672 
 673   // At this point, all positive ranges will have already been handled, so the only remaining cases will be negative ranges
 674   // and constants.
 675 
 676   assert(r0->_lo < 0 && r1->_lo < 0, "positive ranges should already be handled!");
 677 
 678   // As two's complement means that both numbers will start with leading 1s, the lower bound of both ranges will contain
 679   // the common leading 1s of both minimum values. In order to count them with count_leading_zeros, the bits are inverted.
 680   NativeType sel_val = ~MIN2(r0->_lo, r1->_lo);
 681 
 682   NativeType min;
 683   if (sel_val == 0) {
 684     // Since count_leading_zeros is undefined at 0, we short-circuit the condition where both ranges have a minimum of -1.
 685     min = -1;
 686   } else {
 687     // To get the number of bits to shift, we count the leading 0-bits and then subtract one, as the sign bit is already set.
 688     int shift_bits = count_leading_zeros(sel_val) - 1;
 689     min = std::numeric_limits<NativeType>::min() >> shift_bits;
 690   }
 691 
 692   NativeType max;
 693   if (r0->_hi < 0 && r1->_hi < 0) {
 694     // If both ranges are negative, then the same optimization as both positive ranges will apply, and the smaller hi
 695     // value will mask off any bits set by higher values.
 696     max = MIN2(r0->_hi, r1->_hi);
 697   } else {
 698     // In the case of ranges that cross zero, negative values can cause the higher order bits to be set, so the maximum
 699     // positive value can be as high as the larger hi value.
 700     max = MAX2(r0->_hi, r1->_hi);
 701   }
 702 
 703   return IntegerType::make(min, max, widen);
 704 }
 705 
 706 //=============================================================================
 707 //------------------------------mul_ring---------------------------------------
 708 // Supplied function returns the product of the inputs IN THE CURRENT RING.
 709 // For the logical operations the ring's MUL is really a logical AND function.
 710 // This also type-checks the inputs for sanity.  Guaranteed never to
 711 // be passed a TOP or BOTTOM type, these are filtered out by pre-check.
 712 const Type *AndINode::mul_ring( const Type *t0, const Type *t1 ) const {
 713   const TypeInt* r0 = t0->is_int();
 714   const TypeInt* r1 = t1->is_int();
 715 
 716   return and_value<TypeInt>(r0, r1);
 717 }
 718 
 719 static bool AndIL_is_zero_element_under_mask(const PhaseGVN* phase, const Node* expr, const Node* mask, BasicType bt);
 720 
 721 const Type* AndINode::Value(PhaseGVN* phase) const {
 722   if (AndIL_is_zero_element_under_mask(phase, in(1), in(2), T_INT) ||
 723       AndIL_is_zero_element_under_mask(phase, in(2), in(1), T_INT)) {
 724     return TypeInt::ZERO;
 725   }
 726 
 727   return MulNode::Value(phase);
 728 }
 729 
 730 //------------------------------Identity---------------------------------------
 731 // Masking off the high bits of an unsigned load is not required
 732 Node* AndINode::Identity(PhaseGVN* phase) {
 733 
 734   // x & x => x
 735   if (in(1) == in(2)) {
 736     return in(1);
 737   }
 738 
 739   Node* in1 = in(1);
 740   uint op = in1->Opcode();
 741   const TypeInt* t2 = phase->type(in(2))->isa_int();
 742   if (t2 && t2->is_con()) {
 743     int con = t2->get_con();
 744     // Masking off high bits which are always zero is useless.
 745     const TypeInt* t1 = phase->type(in(1))->isa_int();
 746     if (t1 != nullptr && t1->_lo >= 0) {
 747       jint t1_support = right_n_bits(1 + log2i_graceful(t1->_hi));
 748       if ((t1_support & con) == t1_support)
 749         return in1;
 750     }
 751     // Masking off the high bits of a unsigned-shift-right is not
 752     // needed either.
 753     if (op == Op_URShiftI) {
 754       const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
 755       if (t12 && t12->is_con()) {  // Shift is by a constant
 756         int shift = t12->get_con();
 757         shift &= BitsPerJavaInteger - 1;  // semantics of Java shifts
 758         int mask = max_juint >> shift;
 759         if ((mask & con) == mask)  // If AND is useless, skip it
 760           return in1;
 761       }
 762     }
 763   }
 764   return MulNode::Identity(phase);
 765 }
 766 
 767 //------------------------------Ideal------------------------------------------
 768 Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) {
 769   // Simplify (v1 + v2) & mask to v1 & mask or v2 & mask when possible.
 770   Node* progress = AndIL_sum_and_mask(phase, T_INT);
 771   if (progress != nullptr) {
 772     return progress;
 773   }
 774 
 775   // Convert "(~a) & (~b)" into "~(a | b)"
 776   if (AddNode::is_not(phase, in(1), T_INT) && AddNode::is_not(phase, in(2), T_INT)) {
 777     Node* or_a_b = new OrINode(in(1)->in(1), in(2)->in(1));
 778     Node* tn = phase->transform(or_a_b);
 779     return AddNode::make_not(phase, tn, T_INT);
 780   }
 781 
 782   // Special case constant AND mask
 783   const TypeInt *t2 = phase->type( in(2) )->isa_int();
 784   if( !t2 || !t2->is_con() ) return MulNode::Ideal(phase, can_reshape);
 785   const int mask = t2->get_con();
 786   Node *load = in(1);
 787   uint lop = load->Opcode();
 788 
 789   // Masking bits off of a Character?  Hi bits are already zero.
 790   if( lop == Op_LoadUS &&
 791       (mask & 0xFFFF0000) )     // Can we make a smaller mask?
 792     return new AndINode(load,phase->intcon(mask&0xFFFF));
 793 
 794   // Masking bits off of a Short?  Loading a Character does some masking
 795   if (can_reshape &&
 796       load->outcnt() == 1 && load->unique_out() == this) {
 797     if (lop == Op_LoadS && (mask & 0xFFFF0000) == 0 ) {
 798       Node* ldus = load->as_Load()->convert_to_unsigned_load(*phase);
 799       ldus = phase->transform(ldus);
 800       return new AndINode(ldus, phase->intcon(mask & 0xFFFF));
 801     }
 802 
 803     // Masking sign bits off of a Byte?  Do an unsigned byte load plus
 804     // an and.
 805     if (lop == Op_LoadB && (mask & 0xFFFFFF00) == 0) {
 806       Node* ldub = load->as_Load()->convert_to_unsigned_load(*phase);
 807       ldub = phase->transform(ldub);
 808       return new AndINode(ldub, phase->intcon(mask));
 809     }
 810   }
 811 
 812   // Masking off sign bits?  Dont make them!
 813   if( lop == Op_RShiftI ) {
 814     const TypeInt *t12 = phase->type(load->in(2))->isa_int();
 815     if( t12 && t12->is_con() ) { // Shift is by a constant
 816       int shift = t12->get_con();
 817       shift &= BitsPerJavaInteger-1;  // semantics of Java shifts
 818       const int sign_bits_mask = ~right_n_bits(BitsPerJavaInteger - shift);
 819       // If the AND'ing of the 2 masks has no bits, then only original shifted
 820       // bits survive.  NO sign-extension bits survive the maskings.
 821       if( (sign_bits_mask & mask) == 0 ) {
 822         // Use zero-fill shift instead
 823         Node *zshift = phase->transform(new URShiftINode(load->in(1),load->in(2)));
 824         return new AndINode( zshift, in(2) );
 825       }
 826     }
 827   }
 828 
 829   // Check for 'negate/and-1', a pattern emitted when someone asks for
 830   // 'mod 2'.  Negate leaves the low order bit unchanged (think: complement
 831   // plus 1) and the mask is of the low order bit.  Skip the negate.
 832   if( lop == Op_SubI && mask == 1 && load->in(1) &&
 833       phase->type(load->in(1)) == TypeInt::ZERO )
 834     return new AndINode( load->in(2), in(2) );
 835 
 836   return MulNode::Ideal(phase, can_reshape);
 837 }
 838 
 839 //=============================================================================
 840 //------------------------------mul_ring---------------------------------------
 841 // Supplied function returns the product of the inputs IN THE CURRENT RING.
 842 // For the logical operations the ring's MUL is really a logical AND function.
 843 // This also type-checks the inputs for sanity.  Guaranteed never to
 844 // be passed a TOP or BOTTOM type, these are filtered out by pre-check.
 845 const Type *AndLNode::mul_ring( const Type *t0, const Type *t1 ) const {
 846   const TypeLong* r0 = t0->is_long();
 847   const TypeLong* r1 = t1->is_long();
 848 
 849   return and_value<TypeLong>(r0, r1);
 850 }
 851 
 852 const Type* AndLNode::Value(PhaseGVN* phase) const {
 853   if (AndIL_is_zero_element_under_mask(phase, in(1), in(2), T_LONG) ||
 854       AndIL_is_zero_element_under_mask(phase, in(2), in(1), T_LONG)) {
 855     return TypeLong::ZERO;
 856   }
 857 
 858   return MulNode::Value(phase);
 859 }
 860 
 861 //------------------------------Identity---------------------------------------
 862 // Masking off the high bits of an unsigned load is not required
 863 Node* AndLNode::Identity(PhaseGVN* phase) {
 864 
 865   // x & x => x
 866   if (in(1) == in(2)) {
 867     return in(1);
 868   }
 869 
 870   Node *usr = in(1);
 871   const TypeLong *t2 = phase->type( in(2) )->isa_long();
 872   if( t2 && t2->is_con() ) {
 873     jlong con = t2->get_con();
 874     // Masking off high bits which are always zero is useless.
 875     const TypeLong* t1 = phase->type( in(1) )->isa_long();
 876     if (t1 != nullptr && t1->_lo >= 0) {
 877       int bit_count = log2i_graceful(t1->_hi) + 1;
 878       jlong t1_support = jlong(max_julong >> (BitsPerJavaLong - bit_count));
 879       if ((t1_support & con) == t1_support)
 880         return usr;
 881     }
 882     uint lop = usr->Opcode();
 883     // Masking off the high bits of a unsigned-shift-right is not
 884     // needed either.
 885     if( lop == Op_URShiftL ) {
 886       const TypeInt *t12 = phase->type( usr->in(2) )->isa_int();
 887       if( t12 && t12->is_con() ) {  // Shift is by a constant
 888         int shift = t12->get_con();
 889         shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
 890         jlong mask = max_julong >> shift;
 891         if( (mask&con) == mask )  // If AND is useless, skip it
 892           return usr;
 893       }
 894     }
 895   }
 896   return MulNode::Identity(phase);
 897 }
 898 
 899 //------------------------------Ideal------------------------------------------
 900 Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 901   // Simplify (v1 + v2) & mask to v1 & mask or v2 & mask when possible.
 902   Node* progress = AndIL_sum_and_mask(phase, T_LONG);
 903   if (progress != nullptr) {
 904     return progress;
 905   }
 906 
 907   // Convert "(~a) & (~b)" into "~(a | b)"
 908   if (AddNode::is_not(phase, in(1), T_LONG) && AddNode::is_not(phase, in(2), T_LONG)) {
 909     Node* or_a_b = new OrLNode(in(1)->in(1), in(2)->in(1));
 910     Node* tn = phase->transform(or_a_b);
 911     return AddNode::make_not(phase, tn, T_LONG);
 912   }
 913 
 914   // Special case constant AND mask
 915   const TypeLong *t2 = phase->type( in(2) )->isa_long();
 916   if( !t2 || !t2->is_con() ) return MulNode::Ideal(phase, can_reshape);
 917   const jlong mask = t2->get_con();
 918 
 919   Node* in1 = in(1);
 920   int op = in1->Opcode();
 921 
 922   // Are we masking a long that was converted from an int with a mask
 923   // that fits in 32-bits?  Commute them and use an AndINode.  Don't
 924   // convert masks which would cause a sign extension of the integer
 925   // value.  This check includes UI2L masks (0x00000000FFFFFFFF) which
 926   // would be optimized away later in Identity.
 927   if (op == Op_ConvI2L && (mask & UCONST64(0xFFFFFFFF80000000)) == 0) {
 928     Node* andi = new AndINode(in1->in(1), phase->intcon(mask));
 929     andi = phase->transform(andi);
 930     return new ConvI2LNode(andi);
 931   }
 932 
 933   // Masking off sign bits?  Dont make them!
 934   if (op == Op_RShiftL) {
 935     const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
 936     if( t12 && t12->is_con() ) { // Shift is by a constant
 937       int shift = t12->get_con();
 938       shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
 939       if (shift != 0) {
 940         const julong sign_bits_mask = ~(((julong)CONST64(1) << (julong)(BitsPerJavaLong - shift)) -1);
 941         // If the AND'ing of the 2 masks has no bits, then only original shifted
 942         // bits survive.  NO sign-extension bits survive the maskings.
 943         if( (sign_bits_mask & mask) == 0 ) {
 944           // Use zero-fill shift instead
 945           Node *zshift = phase->transform(new URShiftLNode(in1->in(1), in1->in(2)));
 946           return new AndLNode(zshift, in(2));
 947         }
 948       }
 949     }
 950   }
 951 
 952   // Search for GraphKit::mark_word_test patterns and fold the test if the result is statically known
 953   Node* load1 = in(1);
 954   Node* load2 = nullptr;
 955   if (load1->is_Phi() && phase->type(load1)->isa_long()) {
 956     load1 = in(1)->in(1);
 957     load2 = in(1)->in(2);
 958   }
 959   if (load1 != nullptr && load1->is_Load() && phase->type(load1)->isa_long() &&
 960       (load2 == nullptr || (load2->is_Load() && phase->type(load2)->isa_long()))) {
 961     const TypePtr* adr_t1 = phase->type(load1->in(MemNode::Address))->isa_ptr();
 962     const TypePtr* adr_t2 = (load2 != nullptr) ? phase->type(load2->in(MemNode::Address))->isa_ptr() : nullptr;
 963     if (adr_t1 != nullptr && adr_t1->offset() == oopDesc::mark_offset_in_bytes() &&
 964         (load2 == nullptr || (adr_t2 != nullptr && adr_t2->offset() == in_bytes(Klass::prototype_header_offset())))) {
 965       if (mask == markWord::inline_type_pattern) {
 966         if (adr_t1->is_inlinetypeptr()) {
 967           set_req_X(1, in(2), phase);
 968           return this;
 969         } else if (!adr_t1->can_be_inline_type()) {
 970           set_req_X(1, phase->longcon(0), phase);
 971           return this;
 972         }
 973       } else if (mask == markWord::null_free_array_bit_in_place) {
 974         if (adr_t1->is_null_free()) {
 975           set_req_X(1, in(2), phase);
 976           return this;
 977         } else if (adr_t1->is_not_null_free()) {
 978           set_req_X(1, phase->longcon(0), phase);
 979           return this;
 980         }
 981       } else if (mask == markWord::flat_array_bit_in_place) {
 982         if (adr_t1->is_flat()) {
 983           set_req_X(1, in(2), phase);
 984           return this;
 985         } else if (adr_t1->is_not_flat()) {
 986           set_req_X(1, phase->longcon(0), phase);
 987           return this;
 988         }
 989       }
 990     }
 991   }
 992 
 993   return MulNode::Ideal(phase, can_reshape);
 994 }
 995 
 996 LShiftNode* LShiftNode::make(Node* in1, Node* in2, BasicType bt) {
 997   switch (bt) {
 998     case T_INT:
 999       return new LShiftINode(in1, in2);
1000     case T_LONG:
1001       return new LShiftLNode(in1, in2);
1002     default:
1003       fatal("Not implemented for %s", type2name(bt));
1004   }
1005   return nullptr;
1006 }
1007 
1008 // Returns whether the shift amount is constant. If so, sets count.
1009 static bool const_shift_count(PhaseGVN* phase, const Node* shift_node, int* count) {
1010   const TypeInt* tcount = phase->type(shift_node->in(2))->isa_int();
1011   if (tcount != nullptr && tcount->is_con()) {
1012     *count = tcount->get_con();
1013     return true;
1014   }
1015   return false;
1016 }
1017 
1018 // Returns whether the shift amount is constant. If so, sets real_shift and masked_shift.
1019 static bool mask_shift_amount(PhaseGVN* phase, const Node* shift_node, uint nBits, int& real_shift, uint& masked_shift) {
1020   if (const_shift_count(phase, shift_node, &real_shift)) {
1021     masked_shift = real_shift & (nBits - 1);
1022     return true;
1023   }
1024   return false;
1025 }
1026 
1027 // Convenience for when we don't care about the real amount
1028 static bool mask_shift_amount(PhaseGVN* phase, const Node* shift_node, uint nBits, uint& masked_shift) {
1029   int real_shift;
1030   return mask_shift_amount(phase, shift_node, nBits, real_shift, masked_shift);
1031 }
1032 
1033 // Use this in ::Ideal only with shiftNode == this!
1034 // Returns the masked shift amount if constant or 0 if not constant.
1035 static uint mask_and_replace_shift_amount(PhaseGVN* phase, Node* shift_node, uint nBits) {
1036   int real_shift;
1037   uint masked_shift;
1038   if (mask_shift_amount(phase, shift_node, nBits, real_shift, masked_shift)) {
1039     if (masked_shift == 0) {
1040       // Let Identity() handle 0 shift count.
1041       return 0;
1042     }
1043 
1044     if (real_shift != (int)masked_shift) {
1045       PhaseIterGVN* igvn = phase->is_IterGVN();
1046       if (igvn != nullptr) {
1047         igvn->_worklist.push(shift_node);
1048       }
1049       shift_node->set_req(2, phase->intcon(masked_shift)); // Replace shift count with masked value.
1050     }
1051     return masked_shift;
1052   }
1053   // Not a shift by a constant.
1054   return 0;
1055 }
1056 
1057 // Called with
1058 //   outer_shift = (_ << rhs_outer)
1059 // We are looking for the pattern:
1060 //   outer_shift = ((X << rhs_inner) << rhs_outer)
1061 //   where rhs_outer and rhs_inner are constant
1062 //   we denote inner_shift the nested expression (X << rhs_inner)
1063 //   con_inner = rhs_inner % nbits and con_outer = rhs_outer % nbits
1064 //   where nbits is the number of bits of the shifts
1065 //
1066 // There are 2 cases:
1067 // if con_outer + con_inner >= nbits => 0
1068 // if con_outer + con_inner < nbits => X << (con_outer + con_inner)
1069 static Node* collapse_nested_shift_left(PhaseGVN* phase, const Node* outer_shift, uint con_outer, BasicType bt) {
1070   assert(bt == T_LONG || bt == T_INT, "Unexpected type");
1071   const Node* inner_shift = outer_shift->in(1);
1072   if (inner_shift->Opcode() != Op_LShift(bt)) {
1073     return nullptr;
1074   }
1075 
1076   uint nbits = bits_per_java_integer(bt);
1077   uint con_inner;
1078   if (!mask_shift_amount(phase, inner_shift, nbits, con_inner)) {
1079     return nullptr;
1080   }
1081 
1082   if (con_inner == 0) {
1083     // We let the Identity() of the inner shift do its job.
1084     return nullptr;
1085   }
1086 
1087   if (con_outer + con_inner >= nbits) {
1088     // While it might be tempting to use
1089     // phase->zerocon(bt);
1090     // it would be incorrect: zerocon caches nodes, while Ideal is only allowed
1091     // to return a new node, this or nullptr, but not an old (cached) node.
1092     return ConNode::make(TypeInteger::zero(bt));
1093   }
1094 
1095   // con0 + con1 < nbits ==> actual shift happens now
1096   Node* con0_plus_con1 = phase->intcon(con_outer + con_inner);
1097   return LShiftNode::make(inner_shift->in(1), con0_plus_con1, bt);
1098 }
1099 
1100 //------------------------------Identity---------------------------------------
1101 Node* LShiftINode::Identity(PhaseGVN* phase) {
1102   return IdentityIL(phase, T_INT);
1103 }
1104 
1105 Node* LShiftNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
1106   uint con = mask_and_replace_shift_amount(phase, this, bits_per_java_integer(bt));
1107   if (con == 0) {
1108     return nullptr;
1109   }
1110 
1111   // If the right input is a constant, and the left input is an add of a
1112   // constant, flatten the tree: (X+con1)<<con0 ==> X<<con0 + con1<<con0
1113   Node* add1 = in(1);
1114   int add1_op = add1->Opcode();
1115   if (add1_op == Op_Add(bt)) {    // Left input is an add?
1116     assert(add1 != add1->in(1), "dead loop in LShiftINode::Ideal");
1117 
1118     // Transform is legal, but check for profit.  Avoid breaking 'i2s'
1119     // and 'i2b' patterns which typically fold into 'StoreC/StoreB'.
1120     if (bt != T_INT || con < 16) {
1121       // Left input is an add of the same number?
1122       if (con != (bits_per_java_integer(bt) - 1) && add1->in(1) == add1->in(2)) {
1123         // Convert "(x + x) << c0" into "x << (c0 + 1)"
1124         // In general, this optimization cannot be applied for c0 == 31 (for LShiftI) since
1125         // 2x << 31 != x << 32 = x << 0 = x (e.g. x = 1: 2 << 31 = 0 != 1)
1126         // or c0 != 63 (for LShiftL) because:
1127         // (x + x) << 63 = 2x << 63, while
1128         // (x + x) << 63 --transform--> x << 64 = x << 0 = x (!= 2x << 63, for example for x = 1)
1129         // According to the Java spec, chapter 15.19, we only consider the six lowest-order bits of the right-hand operand
1130         // (i.e. "right-hand operand" & 0b111111). Therefore, x << 64 is the same as x << 0 (64 = 0b10000000 & 0b0111111 = 0).
1131         return LShiftNode::make(add1->in(1), phase->intcon(con + 1), bt);
1132       }
1133 
1134       // Left input is an add of a constant?
1135       const TypeInteger* t12 = phase->type(add1->in(2))->isa_integer(bt);
1136       if (t12 != nullptr && t12->is_con()) { // Left input is an add of a con?
1137         // Compute X << con0
1138         Node* lsh = phase->transform(LShiftNode::make(add1->in(1), in(2), bt));
1139         // Compute X<<con0 + (con1<<con0)
1140         return AddNode::make(lsh, phase->integercon(java_shift_left(t12->get_con_as_long(bt), con, bt), bt), bt);
1141       }
1142     }
1143   }
1144 
1145   // Check for "(x >> C1) << C2"
1146   if (add1_op == Op_RShift(bt) || add1_op == Op_URShift(bt)) {
1147     int add1Con = 0;
1148     const_shift_count(phase, add1, &add1Con);
1149 
1150     // Special case C1 == C2, which just masks off low bits
1151     if (add1Con > 0 && con == (uint)add1Con) {
1152       // Convert to "(x & -(1 << C2))"
1153       return  MulNode::make_and(add1->in(1), phase->integercon(java_negate(java_shift_left(1, con, bt), bt), bt), bt);
1154     } else {
1155       // Wait until the right shift has been sharpened to the correct count
1156       if (add1Con > 0 && (uint)add1Con < bits_per_java_integer(bt)) {
1157         // As loop parsing can produce LShiftI nodes, we should wait until the graph is fully formed
1158         // to apply optimizations, otherwise we can inadvertently stop vectorization opportunities.
1159         if (phase->is_IterGVN()) {
1160           if (con > (uint)add1Con) {
1161             // Creates "(x << (C2 - C1)) & -(1 << C2)"
1162             Node* lshift = phase->transform(LShiftNode::make(add1->in(1), phase->intcon(con - add1Con), bt));
1163             return MulNode::make_and(lshift, phase->integercon(java_negate(java_shift_left(1, con, bt), bt), bt), bt);
1164           } else {
1165             assert(con < (uint)add1Con, "must be (%d < %d)", con, add1Con);
1166             // Creates "(x >> (C1 - C2)) & -(1 << C2)"
1167 
1168             // Handle logical and arithmetic shifts
1169             Node* rshift;
1170             if (add1_op == Op_RShift(bt)) {
1171               rshift = phase->transform(RShiftNode::make(add1->in(1), phase->intcon(add1Con - con), bt));
1172             } else {
1173               rshift = phase->transform(URShiftNode::make(add1->in(1), phase->intcon(add1Con - con), bt));
1174             }
1175 
1176             return MulNode::make_and(rshift, phase->integercon(java_negate(java_shift_left(1,  con, bt)), bt), bt);
1177           }
1178         } else {
1179           phase->record_for_igvn(this);
1180         }
1181       }
1182     }
1183   }
1184 
1185   // Check for "((x >> C1) & Y) << C2"
1186   if (add1_op == Op_And(bt)) {
1187     Node* add2 = add1->in(1);
1188     int add2_op = add2->Opcode();
1189     if (add2_op == Op_RShift(bt) || add2_op == Op_URShift(bt)) {
1190       // Special case C1 == C2, which just masks off low bits
1191       if (add2->in(2) == in(2)) {
1192         // Convert to "(x & (Y << C2))"
1193         Node* y_sh = phase->transform(LShiftNode::make(add1->in(2), phase->intcon(con), bt));
1194         return MulNode::make_and(add2->in(1), y_sh, bt);
1195       }
1196 
1197       int add2Con = 0;
1198       const_shift_count(phase, add2, &add2Con);
1199       if (add2Con > 0 && (uint)add2Con < bits_per_java_integer(bt)) {
1200         if (phase->is_IterGVN()) {
1201           // Convert to "((x >> C1) << C2) & (Y << C2)"
1202 
1203           // Make "(x >> C1) << C2", which will get folded away by the rule above
1204           Node* x_sh = phase->transform(LShiftNode::make(add2, phase->intcon(con), bt));
1205           // Make "Y << C2", which will simplify when Y is a constant
1206           Node* y_sh = phase->transform(LShiftNode::make(add1->in(2), phase->intcon(con), bt));
1207 
1208           return MulNode::make_and(x_sh, y_sh, bt);
1209         } else {
1210           phase->record_for_igvn(this);
1211         }
1212       }
1213     }
1214   }
1215 
1216   // Check for ((x & ((1<<(32-c0))-1)) << c0) which ANDs off high bits
1217   // before shifting them away.
1218   const jlong bits_mask = max_unsigned_integer(bt) >> con;
1219   assert(bt != T_INT || bits_mask == right_n_bits(bits_per_java_integer(bt)-con), "inconsistent");
1220   if (add1_op == Op_And(bt) &&
1221       phase->type(add1->in(2)) == TypeInteger::make(bits_mask, bt)) {
1222     return LShiftNode::make(add1->in(1), in(2), bt);
1223   }
1224 
1225   // Collapse nested left-shifts with constant rhs:
1226   // (X << con1) << con2 ==> X << (con1 + con2)
1227   Node* doubleShift = collapse_nested_shift_left(phase, this, con, bt);
1228   if (doubleShift != nullptr) {
1229     return doubleShift;
1230   }
1231 
1232   return nullptr;
1233 }
1234 
1235 //------------------------------Ideal------------------------------------------
1236 Node* LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
1237   return IdealIL(phase, can_reshape, T_INT);
1238 }
1239 
1240 const Type* LShiftNode::ValueIL(PhaseGVN* phase, BasicType bt) const {
1241   const Type* t1 = phase->type(in(1));
1242   const Type* t2 = phase->type(in(2));
1243   // Either input is TOP ==> the result is TOP
1244   if (t1 == Type::TOP) {
1245     return Type::TOP;
1246   }
1247   if (t2 == Type::TOP) {
1248     return Type::TOP;
1249   }
1250 
1251   // Left input is ZERO ==> the result is ZERO.
1252   if (t1 == TypeInteger::zero(bt)) {
1253     return TypeInteger::zero(bt);
1254   }
1255   // Shift by zero does nothing
1256   if (t2 == TypeInt::ZERO) {
1257     return t1;
1258   }
1259 
1260   // Either input is BOTTOM ==> the result is BOTTOM
1261   if ((t1 == TypeInteger::bottom(bt)) || (t2 == TypeInt::INT) ||
1262       (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM)) {
1263     return TypeInteger::bottom(bt);
1264   }
1265 
1266   const TypeInteger* r1 = t1->is_integer(bt); // Handy access
1267   const TypeInt* r2 = t2->is_int(); // Handy access
1268 
1269   if (!r2->is_con()) {
1270     return TypeInteger::bottom(bt);
1271   }
1272 
1273   uint shift = r2->get_con();
1274   shift &= bits_per_java_integer(bt) - 1;  // semantics of Java shifts
1275   // Shift by a multiple of 32/64 does nothing:
1276   if (shift == 0) {
1277     return t1;
1278   }
1279 
1280   // If the shift is a constant, shift the bounds of the type,
1281   // unless this could lead to an overflow.
1282   if (!r1->is_con()) {
1283     jlong lo = r1->lo_as_long(), hi = r1->hi_as_long();
1284 #ifdef ASSERT
1285     if (bt == T_INT) {
1286       jint lo_int = r1->is_int()->_lo, hi_int = r1->is_int()->_hi;
1287       assert((java_shift_right(java_shift_left(lo, shift, bt),  shift, bt) == lo) == (((lo_int << shift) >> shift) == lo_int), "inconsistent");
1288       assert((java_shift_right(java_shift_left(hi, shift, bt),  shift, bt) == hi) == (((hi_int << shift) >> shift) == hi_int), "inconsistent");
1289     }
1290 #endif
1291     if (java_shift_right(java_shift_left(lo, shift, bt),  shift, bt) == lo &&
1292         java_shift_right(java_shift_left(hi, shift, bt), shift, bt) == hi) {
1293       // No overflow.  The range shifts up cleanly.
1294       return TypeInteger::make(java_shift_left(lo, shift, bt),
1295                                java_shift_left(hi,  shift, bt),
1296                                MAX2(r1->_widen, r2->_widen), bt);
1297     }
1298     return TypeInteger::bottom(bt);
1299   }
1300 
1301   return TypeInteger::make(java_shift_left(r1->get_con_as_long(bt), shift, bt), bt);
1302 }
1303 
1304 //------------------------------Value------------------------------------------
1305 const Type* LShiftINode::Value(PhaseGVN* phase) const {
1306   return ValueIL(phase, T_INT);
1307 }
1308 
1309 Node* LShiftNode::IdentityIL(PhaseGVN* phase, BasicType bt) {
1310   int count = 0;
1311   if (const_shift_count(phase, this, &count) && (count & (bits_per_java_integer(bt) - 1)) == 0) {
1312     // Shift by a multiple of 32/64 does nothing
1313     return in(1);
1314   }
1315   return this;
1316 }
1317 
1318 //=============================================================================
1319 //------------------------------Identity---------------------------------------
1320 Node* LShiftLNode::Identity(PhaseGVN* phase) {
1321   return IdentityIL(phase, T_LONG);
1322 }
1323 
1324 //------------------------------Ideal------------------------------------------
1325 Node* LShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1326   return IdealIL(phase, can_reshape, T_LONG);
1327 }
1328 
1329 //------------------------------Value------------------------------------------
1330 const Type* LShiftLNode::Value(PhaseGVN* phase) const {
1331   return ValueIL(phase, T_LONG);
1332 }
1333 
1334 RShiftNode* RShiftNode::make(Node* in1, Node* in2, BasicType bt) {
1335   switch (bt) {
1336     case T_INT:
1337       return new RShiftINode(in1, in2);
1338     case T_LONG:
1339       return new RShiftLNode(in1, in2);
1340     default:
1341       fatal("Not implemented for %s", type2name(bt));
1342   }
1343   return nullptr;
1344 }
1345 
1346 
1347 //=============================================================================
1348 //------------------------------Identity---------------------------------------
1349 Node* RShiftNode::IdentityIL(PhaseGVN* phase, BasicType bt) {
1350   int count = 0;
1351   if (const_shift_count(phase, this, &count)) {
1352     if ((count & (bits_per_java_integer(bt) - 1)) == 0) {
1353       // Shift by a multiple of 32/64 does nothing
1354       return in(1);
1355     }
1356     // Check for useless sign-masking
1357     if (in(1)->Opcode() == Op_LShift(bt) &&
1358         in(1)->req() == 3 &&
1359         in(1)->in(2) == in(2)) {
1360       count &= bits_per_java_integer(bt) - 1; // semantics of Java shifts
1361       // Compute masks for which this shifting doesn't change
1362       jlong lo = (CONST64(-1) << (bits_per_java_integer(bt) - ((uint)count)-1)); // FFFF8000
1363       jlong hi = ~lo;                                                            // 00007FFF
1364       const TypeInteger* t11 = phase->type(in(1)->in(1))->isa_integer(bt);
1365       if (t11 == nullptr) {
1366         return this;
1367       }
1368       // Does actual value fit inside of mask?
1369       if (lo <= t11->lo_as_long() && t11->hi_as_long() <= hi) {
1370         return in(1)->in(1);      // Then shifting is a nop
1371       }
1372     }
1373   }
1374   return this;
1375 }
1376 
1377 Node* RShiftINode::Identity(PhaseGVN* phase) {
1378   return IdentityIL(phase, T_INT);
1379 }
1380 
1381 Node* RShiftNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
1382   // Inputs may be TOP if they are dead.
1383   const TypeInteger* t1 = phase->type(in(1))->isa_integer(bt);
1384   if (t1 == nullptr) {
1385     return NodeSentinel;        // Left input is an integer
1386   }
1387   int shift = mask_and_replace_shift_amount(phase, this, bits_per_java_integer(bt));
1388   if (shift == 0) {
1389     return NodeSentinel;
1390   }
1391 
1392   // Check for (x & 0xFF000000) >> 24, whose mask can be made smaller.
1393   // and convert to (x >> 24) & (0xFF000000 >> 24) = x >> 24
1394   // Such expressions arise normally from shift chains like (byte)(x >> 24).
1395   const Node* and_node = in(1);
1396   if (and_node->Opcode() != Op_And(bt)) {
1397     return nullptr;
1398   }
1399   const TypeInteger* mask_t = phase->type(and_node->in(2))->isa_integer(bt);
1400   if (mask_t != nullptr && mask_t->is_con()) {
1401     jlong maskbits = mask_t->get_con_as_long(bt);
1402     // Convert to "(x >> shift) & (mask >> shift)"
1403     Node* shr_nomask = phase->transform(RShiftNode::make(and_node->in(1), in(2), bt));
1404     return MulNode::make_and(shr_nomask, phase->integercon(maskbits >> shift, bt), bt);
1405   }
1406   return nullptr;
1407 }
1408 
1409 Node* RShiftINode::Ideal(PhaseGVN* phase, bool can_reshape) {
1410   Node* progress = IdealIL(phase, can_reshape, T_INT);
1411   if (progress == NodeSentinel) {
1412     return nullptr;
1413   }
1414   if (progress != nullptr) {
1415     return progress;
1416   }
1417   int shift = mask_and_replace_shift_amount(phase, this, BitsPerJavaInteger);
1418   assert(shift != 0, "handled by IdealIL");
1419 
1420   // Check for "(short[i] <<16)>>16" which simply sign-extends
1421   const Node *shl = in(1);
1422   if (shl->Opcode() != Op_LShiftI) {
1423     return nullptr;
1424   }
1425 
1426   const TypeInt* left_shift_t = phase->type(shl->in(2))->isa_int();
1427   if (left_shift_t == nullptr) {
1428     return nullptr;
1429   }
1430   if (shift == 16 && left_shift_t->is_con(16)) {
1431     Node *ld = shl->in(1);
1432     if (ld->Opcode() == Op_LoadS) {
1433       // Sign extension is just useless here.  Return a RShiftI of zero instead
1434       // returning 'ld' directly.  We cannot return an old Node directly as
1435       // that is the job of 'Identity' calls and Identity calls only work on
1436       // direct inputs ('ld' is an extra Node removed from 'this').  The
1437       // combined optimization requires Identity only return direct inputs.
1438       set_req_X(1, ld, phase);
1439       set_req_X(2, phase->intcon(0), phase);
1440       return this;
1441     }
1442     else if (can_reshape &&
1443              ld->Opcode() == Op_LoadUS &&
1444              ld->outcnt() == 1 && ld->unique_out() == shl)
1445       // Replace zero-extension-load with sign-extension-load
1446       return ld->as_Load()->convert_to_signed_load(*phase);
1447   }
1448 
1449   // Check for "(byte[i] <<24)>>24" which simply sign-extends
1450   if (shift == 24 && left_shift_t->is_con(24)) {
1451     Node *ld = shl->in(1);
1452     if (ld->Opcode() == Op_LoadB) {
1453       // Sign extension is just useless here
1454       set_req_X(1, ld, phase);
1455       set_req_X(2, phase->intcon(0), phase);
1456       return this;
1457     }
1458   }
1459 
1460   return nullptr;
1461 }
1462 
1463 const Type* RShiftNode::ValueIL(PhaseGVN* phase, BasicType bt) const {
1464   const Type* t1 = phase->type(in(1));
1465   const Type* t2 = phase->type(in(2));
1466   // Either input is TOP ==> the result is TOP
1467   if (t1 == Type::TOP) {
1468     return Type::TOP;
1469   }
1470   if (t2 == Type::TOP) {
1471     return Type::TOP;
1472   }
1473 
1474   // Left input is ZERO ==> the result is ZERO.
1475   if (t1 == TypeInteger::zero(bt)) {
1476     return TypeInteger::zero(bt);
1477   }
1478   // Shift by zero does nothing
1479   if (t2 == TypeInt::ZERO) {
1480     return t1;
1481   }
1482 
1483   // Either input is BOTTOM ==> the result is BOTTOM
1484   if (t1 == Type::BOTTOM || t2 == Type::BOTTOM) {
1485     return TypeInteger::bottom(bt);
1486   }
1487 
1488   const TypeInteger* r1 = t1->isa_integer(bt);
1489   const TypeInt* r2 = t2->isa_int();
1490 
1491   // If the shift is a constant, just shift the bounds of the type.
1492   // For example, if the shift is 31/63, we just propagate sign bits.
1493   if (!r1->is_con() && r2->is_con()) {
1494     uint shift = r2->get_con();
1495     shift &= bits_per_java_integer(bt) - 1;  // semantics of Java shifts
1496     // Shift by a multiple of 32/64 does nothing:
1497     if (shift == 0) {
1498       return t1;
1499     }
1500     // Calculate reasonably aggressive bounds for the result.
1501     // This is necessary if we are to correctly type things
1502     // like (x<<24>>24) == ((byte)x).
1503     jlong lo = r1->lo_as_long() >> (jint)shift;
1504     jlong hi = r1->hi_as_long() >> (jint)shift;
1505     assert(lo <= hi, "must have valid bounds");
1506 #ifdef ASSERT
1507    if (bt == T_INT) {
1508      jint lo_verify = checked_cast<jint>(r1->lo_as_long()) >> (jint)shift;
1509      jint hi_verify = checked_cast<jint>(r1->hi_as_long()) >> (jint)shift;
1510      assert((checked_cast<jint>(lo) == lo_verify) && (checked_cast<jint>(hi) == hi_verify), "inconsistent");
1511    }
1512 #endif
1513     const TypeInteger* ti = TypeInteger::make(lo, hi, MAX2(r1->_widen,r2->_widen), bt);
1514 #ifdef ASSERT
1515     // Make sure we get the sign-capture idiom correct.
1516     if (shift == bits_per_java_integer(bt) - 1) {
1517       if (r1->lo_as_long() >= 0) {
1518         assert(ti == TypeInteger::zero(bt),    ">>31/63 of + is  0");
1519       }
1520       if (r1->hi_as_long() <  0) {
1521         assert(ti == TypeInteger::minus_1(bt), ">>31/63 of - is -1");
1522       }
1523     }
1524 #endif
1525     return ti;
1526   }
1527 
1528   if (!r1->is_con() || !r2->is_con()) {
1529     // If the left input is non-negative the result must also be non-negative, regardless of what the right input is.
1530     if (r1->lo_as_long() >= 0) {
1531       return TypeInteger::make(0, r1->hi_as_long(), MAX2(r1->_widen, r2->_widen), bt);
1532     }
1533 
1534     // Conversely, if the left input is negative then the result must be negative.
1535     if (r1->hi_as_long() <= -1) {
1536       return TypeInteger::make(r1->lo_as_long(), -1, MAX2(r1->_widen, r2->_widen), bt);
1537     }
1538 
1539     return TypeInteger::bottom(bt);
1540   }
1541 
1542   // Signed shift right
1543   return TypeInteger::make(r1->get_con_as_long(bt) >> (r2->get_con() & (bits_per_java_integer(bt) - 1)), bt);
1544 }
1545 
1546 const Type* RShiftINode::Value(PhaseGVN* phase) const {
1547   return ValueIL(phase, T_INT);
1548 }
1549 
1550 //=============================================================================
1551 //------------------------------Identity---------------------------------------
1552 Node* RShiftLNode::Identity(PhaseGVN* phase) {
1553   return IdentityIL(phase, T_LONG);
1554 }
1555 
1556 Node* RShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1557   Node* progress = IdealIL(phase, can_reshape, T_LONG);
1558   if (progress == NodeSentinel) {
1559     return nullptr;
1560   }
1561   return progress;
1562 }
1563 
1564 const Type* RShiftLNode::Value(PhaseGVN* phase) const {
1565   return ValueIL(phase, T_LONG);
1566 }
1567 
1568 URShiftNode* URShiftNode::make(Node* in1, Node* in2, BasicType bt) {
1569   switch (bt) {
1570     case T_INT:
1571       return new URShiftINode(in1, in2);
1572     case T_LONG:
1573       return new URShiftLNode(in1, in2);
1574     default:
1575       fatal("Not implemented for %s", type2name(bt));
1576   }
1577   return nullptr;
1578 }
1579 
1580 //=============================================================================
1581 //------------------------------Identity---------------------------------------
1582 Node* URShiftINode::Identity(PhaseGVN* phase) {
1583   int count = 0;
1584   if (const_shift_count(phase, this, &count) && (count & (BitsPerJavaInteger - 1)) == 0) {
1585     // Shift by a multiple of 32 does nothing
1586     return in(1);
1587   }
1588 
1589   // Check for "((x << LogBytesPerWord) + (wordSize-1)) >> LogBytesPerWord" which is just "x".
1590   // Happens during new-array length computation.
1591   // Safe if 'x' is in the range [0..(max_int>>LogBytesPerWord)]
1592   Node *add = in(1);
1593   if (add->Opcode() == Op_AddI) {
1594     const TypeInt *t2 = phase->type(add->in(2))->isa_int();
1595     if (t2 && t2->is_con(wordSize - 1) &&
1596         add->in(1)->Opcode() == Op_LShiftI) {
1597       // Check that shift_counts are LogBytesPerWord.
1598       Node          *lshift_count   = add->in(1)->in(2);
1599       const TypeInt *t_lshift_count = phase->type(lshift_count)->isa_int();
1600       if (t_lshift_count && t_lshift_count->is_con(LogBytesPerWord) &&
1601           t_lshift_count == phase->type(in(2))) {
1602         Node          *x   = add->in(1)->in(1);
1603         const TypeInt *t_x = phase->type(x)->isa_int();
1604         if (t_x != nullptr && 0 <= t_x->_lo && t_x->_hi <= (max_jint>>LogBytesPerWord)) {
1605           return x;
1606         }
1607       }
1608     }
1609   }
1610 
1611   return (phase->type(in(2))->higher_equal(TypeInt::ZERO)) ? in(1) : this;
1612 }
1613 
1614 //------------------------------Ideal------------------------------------------
1615 Node* URShiftINode::Ideal(PhaseGVN* phase, bool can_reshape) {
1616   int con = mask_and_replace_shift_amount(phase, this, BitsPerJavaInteger);
1617   if (con == 0) {
1618     return nullptr;
1619   }
1620 
1621   // We'll be wanting the right-shift amount as a mask of that many bits
1622   const int mask = right_n_bits(BitsPerJavaInteger - con);
1623 
1624   int in1_op = in(1)->Opcode();
1625 
1626   // Check for ((x>>>a)>>>b) and replace with (x>>>(a+b)) when a+b < 32
1627   if( in1_op == Op_URShiftI ) {
1628     const TypeInt *t12 = phase->type( in(1)->in(2) )->isa_int();
1629     if( t12 && t12->is_con() ) { // Right input is a constant
1630       assert( in(1) != in(1)->in(1), "dead loop in URShiftINode::Ideal" );
1631       const int con2 = t12->get_con() & 31; // Shift count is always masked
1632       const int con3 = con+con2;
1633       if( con3 < 32 )           // Only merge shifts if total is < 32
1634         return new URShiftINode( in(1)->in(1), phase->intcon(con3) );
1635     }
1636   }
1637 
1638   // Check for ((x << z) + Y) >>> z.  Replace with x + con>>>z
1639   // The idiom for rounding to a power of 2 is "(Q+(2^z-1)) >>> z".
1640   // If Q is "X << z" the rounding is useless.  Look for patterns like
1641   // ((X<<Z) + Y) >>> Z  and replace with (X + Y>>>Z) & Z-mask.
1642   Node *add = in(1);
1643   const TypeInt *t2 = phase->type(in(2))->isa_int();
1644   if (in1_op == Op_AddI) {
1645     Node *lshl = add->in(1);
1646     if( lshl->Opcode() == Op_LShiftI &&
1647         phase->type(lshl->in(2)) == t2 ) {
1648       Node *y_z = phase->transform( new URShiftINode(add->in(2),in(2)) );
1649       Node *sum = phase->transform( new AddINode( lshl->in(1), y_z ) );
1650       return new AndINode( sum, phase->intcon(mask) );
1651     }
1652   }
1653 
1654   // Check for (x & mask) >>> z.  Replace with (x >>> z) & (mask >>> z)
1655   // This shortens the mask.  Also, if we are extracting a high byte and
1656   // storing it to a buffer, the mask will be removed completely.
1657   Node *andi = in(1);
1658   if( in1_op == Op_AndI ) {
1659     const TypeInt *t3 = phase->type( andi->in(2) )->isa_int();
1660     if( t3 && t3->is_con() ) { // Right input is a constant
1661       jint mask2 = t3->get_con();
1662       mask2 >>= con;  // *signed* shift downward (high-order zeroes do not help)
1663       Node *newshr = phase->transform( new URShiftINode(andi->in(1), in(2)) );
1664       return new AndINode(newshr, phase->intcon(mask2));
1665       // The negative values are easier to materialize than positive ones.
1666       // A typical case from address arithmetic is ((x & ~15) >> 4).
1667       // It's better to change that to ((x >> 4) & ~0) versus
1668       // ((x >> 4) & 0x0FFFFFFF).  The difference is greatest in LP64.
1669     }
1670   }
1671 
1672   // Check for "(X << z ) >>> z" which simply zero-extends
1673   Node *shl = in(1);
1674   if( in1_op == Op_LShiftI &&
1675       phase->type(shl->in(2)) == t2 )
1676     return new AndINode( shl->in(1), phase->intcon(mask) );
1677 
1678   // Check for (x >> n) >>> 31. Replace with (x >>> 31)
1679   Node *shr = in(1);
1680   if ( in1_op == Op_RShiftI ) {
1681     Node *in11 = shr->in(1);
1682     Node *in12 = shr->in(2);
1683     const TypeInt *t11 = phase->type(in11)->isa_int();
1684     const TypeInt *t12 = phase->type(in12)->isa_int();
1685     if ( t11 && t2 && t2->is_con(31) && t12 && t12->is_con() ) {
1686       return new URShiftINode(in11, phase->intcon(31));
1687     }
1688   }
1689 
1690   return nullptr;
1691 }
1692 
1693 //------------------------------Value------------------------------------------
1694 // A URShiftINode shifts its input2 right by input1 amount.
1695 const Type* URShiftINode::Value(PhaseGVN* phase) const {
1696   // (This is a near clone of RShiftINode::Value.)
1697   const Type *t1 = phase->type( in(1) );
1698   const Type *t2 = phase->type( in(2) );
1699   // Either input is TOP ==> the result is TOP
1700   if( t1 == Type::TOP ) return Type::TOP;
1701   if( t2 == Type::TOP ) return Type::TOP;
1702 
1703   // Left input is ZERO ==> the result is ZERO.
1704   if( t1 == TypeInt::ZERO ) return TypeInt::ZERO;
1705   // Shift by zero does nothing
1706   if( t2 == TypeInt::ZERO ) return t1;
1707 
1708   // Either input is BOTTOM ==> the result is BOTTOM
1709   if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
1710     return TypeInt::INT;
1711 
1712   if (t2 == TypeInt::INT)
1713     return TypeInt::INT;
1714 
1715   const TypeInt *r1 = t1->is_int();     // Handy access
1716   const TypeInt *r2 = t2->is_int();     // Handy access
1717 
1718   if (r2->is_con()) {
1719     uint shift = r2->get_con();
1720     shift &= BitsPerJavaInteger-1;  // semantics of Java shifts
1721     // Shift by a multiple of 32 does nothing:
1722     if (shift == 0)  return t1;
1723     // Calculate reasonably aggressive bounds for the result.
1724     jint lo = (juint)r1->_lo >> (juint)shift;
1725     jint hi = (juint)r1->_hi >> (juint)shift;
1726     if (r1->_hi >= 0 && r1->_lo < 0) {
1727       // If the type has both negative and positive values,
1728       // there are two separate sub-domains to worry about:
1729       // The positive half and the negative half.
1730       jint neg_lo = lo;
1731       jint neg_hi = (juint)-1 >> (juint)shift;
1732       jint pos_lo = (juint) 0 >> (juint)shift;
1733       jint pos_hi = hi;
1734       lo = MIN2(neg_lo, pos_lo);  // == 0
1735       hi = MAX2(neg_hi, pos_hi);  // == -1 >>> shift;
1736     }
1737     assert(lo <= hi, "must have valid bounds");
1738     const TypeInt* ti = TypeInt::make(lo, hi, MAX2(r1->_widen,r2->_widen));
1739     #ifdef ASSERT
1740     // Make sure we get the sign-capture idiom correct.
1741     if (shift == BitsPerJavaInteger-1) {
1742       if (r1->_lo >= 0) assert(ti == TypeInt::ZERO, ">>>31 of + is 0");
1743       if (r1->_hi < 0)  assert(ti == TypeInt::ONE,  ">>>31 of - is +1");
1744     }
1745     #endif
1746     return ti;
1747   }
1748 
1749   //
1750   // Do not support shifted oops in info for GC
1751   //
1752   // else if( t1->base() == Type::InstPtr ) {
1753   //
1754   //   const TypeInstPtr *o = t1->is_instptr();
1755   //   if( t1->singleton() )
1756   //     return TypeInt::make( ((uint32_t)o->const_oop() + o->_offset) >> shift );
1757   // }
1758   // else if( t1->base() == Type::KlassPtr ) {
1759   //   const TypeKlassPtr *o = t1->is_klassptr();
1760   //   if( t1->singleton() )
1761   //     return TypeInt::make( ((uint32_t)o->const_oop() + o->_offset) >> shift );
1762   // }
1763 
1764   return TypeInt::INT;
1765 }
1766 
1767 //=============================================================================
1768 //------------------------------Identity---------------------------------------
1769 Node* URShiftLNode::Identity(PhaseGVN* phase) {
1770   int count = 0;
1771   if (const_shift_count(phase, this, &count) && (count & (BitsPerJavaLong - 1)) == 0) {
1772     // Shift by a multiple of 64 does nothing
1773     return in(1);
1774   }
1775   return this;
1776 }
1777 
1778 //------------------------------Ideal------------------------------------------
1779 Node* URShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1780   int con = mask_and_replace_shift_amount(phase, this, BitsPerJavaLong);
1781   if (con == 0) {
1782     return nullptr;
1783   }
1784 
1785   // We'll be wanting the right-shift amount as a mask of that many bits
1786   const jlong mask = jlong(max_julong >> con);
1787 
1788   // Check for ((x << z) + Y) >>> z.  Replace with x + con>>>z
1789   // The idiom for rounding to a power of 2 is "(Q+(2^z-1)) >>> z".
1790   // If Q is "X << z" the rounding is useless.  Look for patterns like
1791   // ((X<<Z) + Y) >>> Z  and replace with (X + Y>>>Z) & Z-mask.
1792   Node *add = in(1);
1793   const TypeInt *t2 = phase->type(in(2))->isa_int();
1794   if (add->Opcode() == Op_AddL) {
1795     Node *lshl = add->in(1);
1796     if( lshl->Opcode() == Op_LShiftL &&
1797         phase->type(lshl->in(2)) == t2 ) {
1798       Node *y_z = phase->transform( new URShiftLNode(add->in(2),in(2)) );
1799       Node *sum = phase->transform( new AddLNode( lshl->in(1), y_z ) );
1800       return new AndLNode( sum, phase->longcon(mask) );
1801     }
1802   }
1803 
1804   // Check for (x & mask) >>> z.  Replace with (x >>> z) & (mask >>> z)
1805   // This shortens the mask.  Also, if we are extracting a high byte and
1806   // storing it to a buffer, the mask will be removed completely.
1807   Node *andi = in(1);
1808   if( andi->Opcode() == Op_AndL ) {
1809     const TypeLong *t3 = phase->type( andi->in(2) )->isa_long();
1810     if( t3 && t3->is_con() ) { // Right input is a constant
1811       jlong mask2 = t3->get_con();
1812       mask2 >>= con;  // *signed* shift downward (high-order zeroes do not help)
1813       Node *newshr = phase->transform( new URShiftLNode(andi->in(1), in(2)) );
1814       return new AndLNode(newshr, phase->longcon(mask2));
1815     }
1816   }
1817 
1818   // Check for "(X << z ) >>> z" which simply zero-extends
1819   Node *shl = in(1);
1820   if( shl->Opcode() == Op_LShiftL &&
1821       phase->type(shl->in(2)) == t2 )
1822     return new AndLNode( shl->in(1), phase->longcon(mask) );
1823 
1824   // Check for (x >> n) >>> 63. Replace with (x >>> 63)
1825   Node *shr = in(1);
1826   if ( shr->Opcode() == Op_RShiftL ) {
1827     Node *in11 = shr->in(1);
1828     Node *in12 = shr->in(2);
1829     const TypeLong *t11 = phase->type(in11)->isa_long();
1830     const TypeInt *t12 = phase->type(in12)->isa_int();
1831     if ( t11 && t2 && t2->is_con(63) && t12 && t12->is_con() ) {
1832       return new URShiftLNode(in11, phase->intcon(63));
1833     }
1834   }
1835   return nullptr;
1836 }
1837 
1838 //------------------------------Value------------------------------------------
1839 // A URShiftINode shifts its input2 right by input1 amount.
1840 const Type* URShiftLNode::Value(PhaseGVN* phase) const {
1841   // (This is a near clone of RShiftLNode::Value.)
1842   const Type *t1 = phase->type( in(1) );
1843   const Type *t2 = phase->type( in(2) );
1844   // Either input is TOP ==> the result is TOP
1845   if( t1 == Type::TOP ) return Type::TOP;
1846   if( t2 == Type::TOP ) return Type::TOP;
1847 
1848   // Left input is ZERO ==> the result is ZERO.
1849   if( t1 == TypeLong::ZERO ) return TypeLong::ZERO;
1850   // Shift by zero does nothing
1851   if( t2 == TypeInt::ZERO ) return t1;
1852 
1853   // Either input is BOTTOM ==> the result is BOTTOM
1854   if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
1855     return TypeLong::LONG;
1856 
1857   if (t2 == TypeInt::INT)
1858     return TypeLong::LONG;
1859 
1860   const TypeLong *r1 = t1->is_long(); // Handy access
1861   const TypeInt  *r2 = t2->is_int (); // Handy access
1862 
1863   if (r2->is_con()) {
1864     uint shift = r2->get_con();
1865     shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
1866     // Shift by a multiple of 64 does nothing:
1867     if (shift == 0)  return t1;
1868     // Calculate reasonably aggressive bounds for the result.
1869     jlong lo = (julong)r1->_lo >> (juint)shift;
1870     jlong hi = (julong)r1->_hi >> (juint)shift;
1871     if (r1->_hi >= 0 && r1->_lo < 0) {
1872       // If the type has both negative and positive values,
1873       // there are two separate sub-domains to worry about:
1874       // The positive half and the negative half.
1875       jlong neg_lo = lo;
1876       jlong neg_hi = (julong)-1 >> (juint)shift;
1877       jlong pos_lo = (julong) 0 >> (juint)shift;
1878       jlong pos_hi = hi;
1879       //lo = MIN2(neg_lo, pos_lo);  // == 0
1880       lo = neg_lo < pos_lo ? neg_lo : pos_lo;
1881       //hi = MAX2(neg_hi, pos_hi);  // == -1 >>> shift;
1882       hi = neg_hi > pos_hi ? neg_hi : pos_hi;
1883     }
1884     assert(lo <= hi, "must have valid bounds");
1885     const TypeLong* tl = TypeLong::make(lo, hi, MAX2(r1->_widen,r2->_widen));
1886     #ifdef ASSERT
1887     // Make sure we get the sign-capture idiom correct.
1888     if (shift == BitsPerJavaLong - 1) {
1889       if (r1->_lo >= 0) assert(tl == TypeLong::ZERO, ">>>63 of + is 0");
1890       if (r1->_hi < 0)  assert(tl == TypeLong::ONE,  ">>>63 of - is +1");
1891     }
1892     #endif
1893     return tl;
1894   }
1895 
1896   return TypeLong::LONG;                // Give up
1897 }
1898 
1899 //=============================================================================
1900 //------------------------------Ideal------------------------------------------
1901 Node* FmaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1902   // We canonicalize the node by converting "(-a)*b+c" into "b*(-a)+c"
1903   // This reduces the number of rules in the matcher, as we only need to check
1904   // for negations on the second argument, and not the symmetric case where
1905   // the first argument is negated.
1906   if (in(1)->is_Neg() && !in(2)->is_Neg()) {
1907     swap_edges(1, 2);
1908     return this;
1909   }
1910   return nullptr;
1911 }
1912 
1913 //=============================================================================
1914 //------------------------------Value------------------------------------------
1915 const Type* FmaDNode::Value(PhaseGVN* phase) const {
1916   const Type *t1 = phase->type(in(1));
1917   if (t1 == Type::TOP) return Type::TOP;
1918   if (t1->base() != Type::DoubleCon) return Type::DOUBLE;
1919   const Type *t2 = phase->type(in(2));
1920   if (t2 == Type::TOP) return Type::TOP;
1921   if (t2->base() != Type::DoubleCon) return Type::DOUBLE;
1922   const Type *t3 = phase->type(in(3));
1923   if (t3 == Type::TOP) return Type::TOP;
1924   if (t3->base() != Type::DoubleCon) return Type::DOUBLE;
1925 #ifndef __STDC_IEC_559__
1926   return Type::DOUBLE;
1927 #else
1928   double d1 = t1->getd();
1929   double d2 = t2->getd();
1930   double d3 = t3->getd();
1931   return TypeD::make(fma(d1, d2, d3));
1932 #endif
1933 }
1934 
1935 //=============================================================================
1936 //------------------------------Value------------------------------------------
1937 const Type* FmaFNode::Value(PhaseGVN* phase) const {
1938   const Type *t1 = phase->type(in(1));
1939   if (t1 == Type::TOP) return Type::TOP;
1940   if (t1->base() != Type::FloatCon) return Type::FLOAT;
1941   const Type *t2 = phase->type(in(2));
1942   if (t2 == Type::TOP) return Type::TOP;
1943   if (t2->base() != Type::FloatCon) return Type::FLOAT;
1944   const Type *t3 = phase->type(in(3));
1945   if (t3 == Type::TOP) return Type::TOP;
1946   if (t3->base() != Type::FloatCon) return Type::FLOAT;
1947 #ifndef __STDC_IEC_559__
1948   return Type::FLOAT;
1949 #else
1950   float f1 = t1->getf();
1951   float f2 = t2->getf();
1952   float f3 = t3->getf();
1953   return TypeF::make(fma(f1, f2, f3));
1954 #endif
1955 }
1956 
1957 //=============================================================================
1958 //------------------------------Value------------------------------------------
1959 const Type* FmaHFNode::Value(PhaseGVN* phase) const {
1960   const Type* t1 = phase->type(in(1));
1961   if (t1 == Type::TOP) { return Type::TOP; }
1962   if (t1->base() != Type::HalfFloatCon) { return Type::HALF_FLOAT; }
1963   const Type* t2 = phase->type(in(2));
1964   if (t2 == Type::TOP) { return Type::TOP; }
1965   if (t2->base() != Type::HalfFloatCon) { return Type::HALF_FLOAT; }
1966   const Type* t3 = phase->type(in(3));
1967   if (t3 == Type::TOP) { return Type::TOP; }
1968   if (t3->base() != Type::HalfFloatCon) { return Type::HALF_FLOAT; }
1969 #ifndef __STDC_IEC_559__
1970   return Type::HALF_FLOAT;
1971 #else
1972   float f1 = t1->getf();
1973   float f2 = t2->getf();
1974   float f3 = t3->getf();
1975   return TypeH::make(fma(f1, f2, f3));
1976 #endif
1977 }
1978 
1979 //=============================================================================
1980 //------------------------------hash-------------------------------------------
1981 // Hash function for MulAddS2INode.  Operation is commutative with commutative pairs.
1982 // The hash function must return the same value when edge swapping is performed.
1983 uint MulAddS2INode::hash() const {
1984   return (uintptr_t)in(1) + (uintptr_t)in(2) + (uintptr_t)in(3) + (uintptr_t)in(4) + Opcode();
1985 }
1986 
1987 //------------------------------Rotate Operations ------------------------------
1988 
1989 Node* RotateLeftNode::Identity(PhaseGVN* phase) {
1990   const Type* t1 = phase->type(in(1));
1991   if (t1 == Type::TOP) {
1992     return this;
1993   }
1994   int count = 0;
1995   assert(t1->isa_int() || t1->isa_long(), "Unexpected type");
1996   int mask = (t1->isa_int() ? BitsPerJavaInteger : BitsPerJavaLong) - 1;
1997   if (const_shift_count(phase, this, &count) && (count & mask) == 0) {
1998     // Rotate by a multiple of 32/64 does nothing
1999     return in(1);
2000   }
2001   return this;
2002 }
2003 
2004 const Type* RotateLeftNode::Value(PhaseGVN* phase) const {
2005   const Type* t1 = phase->type(in(1));
2006   const Type* t2 = phase->type(in(2));
2007   // Either input is TOP ==> the result is TOP
2008   if (t1 == Type::TOP || t2 == Type::TOP) {
2009     return Type::TOP;
2010   }
2011 
2012   if (t1->isa_int()) {
2013     const TypeInt* r1 = t1->is_int();
2014     const TypeInt* r2 = t2->is_int();
2015 
2016     // Left input is ZERO ==> the result is ZERO.
2017     if (r1 == TypeInt::ZERO) {
2018       return TypeInt::ZERO;
2019     }
2020     // Rotate by zero does nothing
2021     if (r2 == TypeInt::ZERO) {
2022       return r1;
2023     }
2024     if (r1->is_con() && r2->is_con()) {
2025       juint r1_con = (juint)r1->get_con();
2026       juint shift = (juint)(r2->get_con()) & (juint)(BitsPerJavaInteger - 1); // semantics of Java shifts
2027       return TypeInt::make((r1_con << shift) | (r1_con >> (32 - shift)));
2028     }
2029     return TypeInt::INT;
2030   } else {
2031     assert(t1->isa_long(), "Type must be a long");
2032     const TypeLong* r1 = t1->is_long();
2033     const TypeInt*  r2 = t2->is_int();
2034 
2035     // Left input is ZERO ==> the result is ZERO.
2036     if (r1 == TypeLong::ZERO) {
2037       return TypeLong::ZERO;
2038     }
2039     // Rotate by zero does nothing
2040     if (r2 == TypeInt::ZERO) {
2041       return r1;
2042     }
2043     if (r1->is_con() && r2->is_con()) {
2044       julong r1_con = (julong)r1->get_con();
2045       julong shift = (julong)(r2->get_con()) & (julong)(BitsPerJavaLong - 1); // semantics of Java shifts
2046       return TypeLong::make((r1_con << shift) | (r1_con >> (64 - shift)));
2047     }
2048     return TypeLong::LONG;
2049   }
2050 }
2051 
2052 Node* RotateLeftNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2053   const Type* t1 = phase->type(in(1));
2054   const Type* t2 = phase->type(in(2));
2055   if (t2->isa_int() && t2->is_int()->is_con()) {
2056     if (t1->isa_int()) {
2057       int lshift = t2->is_int()->get_con() & 31;
2058       return new RotateRightNode(in(1), phase->intcon(32 - (lshift & 31)), TypeInt::INT);
2059     } else if (t1 != Type::TOP) {
2060       assert(t1->isa_long(), "Type must be a long");
2061       int lshift = t2->is_int()->get_con() & 63;
2062       return new RotateRightNode(in(1), phase->intcon(64 - (lshift & 63)), TypeLong::LONG);
2063     }
2064   }
2065   return nullptr;
2066 }
2067 
2068 Node* RotateRightNode::Identity(PhaseGVN* phase) {
2069   const Type* t1 = phase->type(in(1));
2070   if (t1 == Type::TOP) {
2071     return this;
2072   }
2073   int count = 0;
2074   assert(t1->isa_int() || t1->isa_long(), "Unexpected type");
2075   int mask = (t1->isa_int() ? BitsPerJavaInteger : BitsPerJavaLong) - 1;
2076   if (const_shift_count(phase, this, &count) && (count & mask) == 0) {
2077     // Rotate by a multiple of 32/64 does nothing
2078     return in(1);
2079   }
2080   return this;
2081 }
2082 
2083 const Type* RotateRightNode::Value(PhaseGVN* phase) const {
2084   const Type* t1 = phase->type(in(1));
2085   const Type* t2 = phase->type(in(2));
2086   // Either input is TOP ==> the result is TOP
2087   if (t1 == Type::TOP || t2 == Type::TOP) {
2088     return Type::TOP;
2089   }
2090 
2091   if (t1->isa_int()) {
2092     const TypeInt* r1 = t1->is_int();
2093     const TypeInt* r2 = t2->is_int();
2094 
2095     // Left input is ZERO ==> the result is ZERO.
2096     if (r1 == TypeInt::ZERO) {
2097       return TypeInt::ZERO;
2098     }
2099     // Rotate by zero does nothing
2100     if (r2 == TypeInt::ZERO) {
2101       return r1;
2102     }
2103     if (r1->is_con() && r2->is_con()) {
2104       juint r1_con = (juint)r1->get_con();
2105       juint shift = (juint)(r2->get_con()) & (juint)(BitsPerJavaInteger - 1); // semantics of Java shifts
2106       return TypeInt::make((r1_con >> shift) | (r1_con << (32 - shift)));
2107     }
2108     return TypeInt::INT;
2109   } else {
2110     assert(t1->isa_long(), "Type must be a long");
2111     const TypeLong* r1 = t1->is_long();
2112     const TypeInt*  r2 = t2->is_int();
2113     // Left input is ZERO ==> the result is ZERO.
2114     if (r1 == TypeLong::ZERO) {
2115       return TypeLong::ZERO;
2116     }
2117     // Rotate by zero does nothing
2118     if (r2 == TypeInt::ZERO) {
2119       return r1;
2120     }
2121     if (r1->is_con() && r2->is_con()) {
2122       julong r1_con = (julong)r1->get_con();
2123       julong shift = (julong)(r2->get_con()) & (julong)(BitsPerJavaLong - 1); // semantics of Java shifts
2124       return TypeLong::make((r1_con >> shift) | (r1_con << (64 - shift)));
2125     }
2126     return TypeLong::LONG;
2127   }
2128 }
2129 
2130 //------------------------------ Sum & Mask ------------------------------
2131 
2132 // Returns a lower bound on the number of trailing zeros in expr.
2133 static jint AndIL_min_trailing_zeros(const PhaseGVN* phase, const Node* expr, BasicType bt) {
2134   const TypeInteger* type = phase->type(expr)->isa_integer(bt);
2135   if (type == nullptr) {
2136     return 0;
2137   }
2138 
2139   expr = expr->uncast();
2140   type = phase->type(expr)->isa_integer(bt);
2141   if (type == nullptr) {
2142     return 0;
2143   }
2144 
2145   if (type->is_con()) {
2146     jlong con = type->get_con_as_long(bt);
2147     return con == 0L ? (type2aelembytes(bt) * BitsPerByte) : count_trailing_zeros(con);
2148   }
2149 
2150   if (expr->Opcode() == Op_ConvI2L) {
2151     expr = expr->in(1)->uncast();
2152     bt = T_INT;
2153     type = phase->type(expr)->isa_int();
2154   }
2155 
2156   // Pattern: expr = (x << shift)
2157   if (expr->Opcode() == Op_LShift(bt)) {
2158     const TypeInt* shift_t = phase->type(expr->in(2))->isa_int();
2159     if (shift_t == nullptr || !shift_t->is_con()) {
2160       return 0;
2161     }
2162     // We need to truncate the shift, as it may not have been canonicalized yet.
2163     // T_INT:  0..31 -> shift_mask = 4 * 8 - 1 = 31
2164     // T_LONG: 0..63 -> shift_mask = 8 * 8 - 1 = 63
2165     // (JLS: "Shift Operators")
2166     jint shift_mask = type2aelembytes(bt) * BitsPerByte - 1;
2167     return shift_t->get_con() & shift_mask;
2168   }
2169 
2170   return 0;
2171 }
2172 
2173 // Checks whether expr is neutral additive element (zero) under mask,
2174 // i.e. whether an expression of the form:
2175 //   (AndX (AddX (expr addend) mask)
2176 //   (expr + addend) & mask
2177 // is equivalent to
2178 //   (AndX addend mask)
2179 //   addend & mask
2180 // for any addend.
2181 // (The X in AndX must be I or L, depending on bt).
2182 //
2183 // We check for the sufficient condition when the lowest set bit in expr is higher than
2184 // the highest set bit in mask, i.e.:
2185 // expr: eeeeee0000000000000
2186 // mask: 000000mmmmmmmmmmmmm
2187 //             <--w bits--->
2188 // We do not test for other cases.
2189 //
2190 // Correctness:
2191 //   Given "expr" with at least "w" trailing zeros,
2192 //   let "mod = 2^w", "suffix_mask = mod - 1"
2193 //
2194 //   Since "mask" only has bits set where "suffix_mask" does, we have:
2195 //     mask = suffix_mask & mask     (SUFFIX_MASK)
2196 //
2197 //   And since expr only has bits set above w, and suffix_mask only below:
2198 //     expr & suffix_mask == 0     (NO_BIT_OVERLAP)
2199 //
2200 //   From unsigned modular arithmetic (with unsigned modulo %), and since mod is
2201 //   a power of 2, and we are computing in a ring of powers of 2, we know that
2202 //     (x + y) % mod         = (x % mod         + y) % mod
2203 //     (x + y) & suffix_mask = (x & suffix_mask + y) & suffix_mask       (MOD_ARITH)
2204 //
2205 //   We can now prove the equality:
2206 //     (expr               + addend)               & mask
2207 //   = (expr               + addend) & suffix_mask & mask    (SUFFIX_MASK)
2208 //   = (expr & suffix_mask + addend) & suffix_mask & mask    (MOD_ARITH)
2209 //   = (0                  + addend) & suffix_mask & mask    (NO_BIT_OVERLAP)
2210 //   =                       addend                & mask    (SUFFIX_MASK)
2211 //
2212 // Hence, an expr with at least w trailing zeros is a neutral additive element under any mask with bit width w.
2213 static bool AndIL_is_zero_element_under_mask(const PhaseGVN* phase, const Node* expr, const Node* mask, BasicType bt) {
2214   // When the mask is negative, it has the most significant bit set.
2215   const TypeInteger* mask_t = phase->type(mask)->isa_integer(bt);
2216   if (mask_t == nullptr || mask_t->lo_as_long() < 0) {
2217     return false;
2218   }
2219 
2220   // When the mask is constant zero, we defer to MulNode::Value to eliminate the entire AndX operation.
2221   if (mask_t->hi_as_long() == 0) {
2222     assert(mask_t->lo_as_long() == 0, "checked earlier");
2223     return false;
2224   }
2225 
2226   jint mask_bit_width = BitsPerLong - count_leading_zeros(mask_t->hi_as_long());
2227   jint expr_trailing_zeros = AndIL_min_trailing_zeros(phase, expr, bt);
2228   return expr_trailing_zeros >= mask_bit_width;
2229 }
2230 
2231 // Reduces the pattern:
2232 //   (AndX (AddX add1 add2) mask)
2233 // to
2234 //   (AndX add1 mask), if add2 is neutral wrt mask (see above), and vice versa.
2235 Node* MulNode::AndIL_sum_and_mask(PhaseGVN* phase, BasicType bt) {
2236   Node* add = in(1);
2237   Node* mask = in(2);
2238   int addidx = 0;
2239   if (add->Opcode() == Op_Add(bt)) {
2240     addidx = 1;
2241   } else if (mask->Opcode() == Op_Add(bt)) {
2242     mask = add;
2243     addidx = 2;
2244     add = in(addidx);
2245   }
2246   if (addidx > 0) {
2247     Node* add1 = add->in(1);
2248     Node* add2 = add->in(2);
2249     if (AndIL_is_zero_element_under_mask(phase, add1, mask, bt)) {
2250       set_req_X(addidx, add2, phase);
2251       return this;
2252     } else if (AndIL_is_zero_element_under_mask(phase, add2, mask, bt)) {
2253       set_req_X(addidx, add1, phase);
2254       return this;
2255     }
2256   }
2257   return nullptr;
2258 }