1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "memory/allocation.inline.hpp"
26 #include "opto/addnode.hpp"
27 #include "opto/connode.hpp"
28 #include "opto/convertnode.hpp"
29 #include "opto/memnode.hpp"
30 #include "opto/mulnode.hpp"
31 #include "opto/phaseX.hpp"
32 #include "opto/rangeinference.hpp"
33 #include "opto/subnode.hpp"
34 #include "utilities/powerOfTwo.hpp"
35
36 // Portions of code courtesy of Clifford Click
37
38
39 //=============================================================================
40 //------------------------------hash-------------------------------------------
41 // Hash function over MulNodes. Needs to be commutative; i.e., I swap
42 // (commute) inputs to MulNodes willy-nilly so the hash function must return
43 // the same value in the presence of edge swapping.
44 uint MulNode::hash() const {
45 return (uintptr_t)in(1) + (uintptr_t)in(2) + Opcode();
46 }
47
48 //------------------------------Identity---------------------------------------
49 // Multiplying a one preserves the other argument
50 Node* MulNode::Identity(PhaseGVN* phase) {
51 const Type *one = mul_id(); // The multiplicative identity
52 if( phase->type( in(1) )->higher_equal( one ) ) return in(2);
53 if( phase->type( in(2) )->higher_equal( one ) ) return in(1);
54
55 return this;
56 }
57
58 //------------------------------Ideal------------------------------------------
59 // We also canonicalize the Node, moving constants to the right input,
60 // and flatten expressions (so that 1+x+2 becomes x+3).
61 Node *MulNode::Ideal(PhaseGVN *phase, bool can_reshape) {
62 Node* in1 = in(1);
63 Node* in2 = in(2);
64 Node* progress = nullptr; // Progress flag
65
66 // This code is used by And nodes too, but some conversions are
67 // only valid for the actual Mul nodes.
68 uint op = Opcode();
69 bool real_mul = (op == Op_MulI) || (op == Op_MulL) ||
70 (op == Op_MulF) || (op == Op_MulD) ||
71 (op == Op_MulHF);
72
73 // Convert "(-a)*(-b)" into "a*b".
74 if (real_mul && in1->is_Sub() && in2->is_Sub()) {
75 if (phase->type(in1->in(1))->is_zero_type() &&
76 phase->type(in2->in(1))->is_zero_type()) {
77 set_req_X(1, in1->in(2), phase);
78 set_req_X(2, in2->in(2), phase);
79 in1 = in(1);
80 in2 = in(2);
81 progress = this;
82 }
83 }
84
85 // convert "max(a,b) * min(a,b)" into "a*b".
86 if ((in(1)->Opcode() == max_opcode() && in(2)->Opcode() == min_opcode())
87 || (in(1)->Opcode() == min_opcode() && in(2)->Opcode() == max_opcode())) {
88 Node *in11 = in(1)->in(1);
89 Node *in12 = in(1)->in(2);
90
91 Node *in21 = in(2)->in(1);
92 Node *in22 = in(2)->in(2);
93
94 if ((in11 == in21 && in12 == in22) ||
95 (in11 == in22 && in12 == in21)) {
96 set_req_X(1, in11, phase);
97 set_req_X(2, in12, phase);
98 in1 = in(1);
99 in2 = in(2);
100 progress = this;
101 }
102 }
103
104 const Type* t1 = phase->type(in1);
105 const Type* t2 = phase->type(in2);
106
107 // We are OK if right is a constant, or right is a load and
108 // left is a non-constant.
109 if( !(t2->singleton() ||
110 (in(2)->is_Load() && !(t1->singleton() || in(1)->is_Load())) ) ) {
111 if( t1->singleton() || // Left input is a constant?
112 // Otherwise, sort inputs (commutativity) to help value numbering.
113 (in(1)->_idx > in(2)->_idx) ) {
114 swap_edges(1, 2);
115 const Type *t = t1;
116 t1 = t2;
117 t2 = t;
118 progress = this; // Made progress
119 }
120 }
121
122 // If the right input is a constant, and the left input is a product of a
123 // constant, flatten the expression tree.
124 if( t2->singleton() && // Right input is a constant?
125 op != Op_MulF && // Float & double cannot reassociate
126 op != Op_MulD &&
127 op != Op_MulHF) {
128 if( t2 == Type::TOP ) return nullptr;
129 Node *mul1 = in(1);
130 #ifdef ASSERT
131 // Check for dead loop
132 int op1 = mul1->Opcode();
133 if ((mul1 == this) || (in(2) == this) ||
134 ((op1 == mul_opcode() || op1 == add_opcode()) &&
135 ((mul1->in(1) == this) || (mul1->in(2) == this) ||
136 (mul1->in(1) == mul1) || (mul1->in(2) == mul1)))) {
137 assert(false, "dead loop in MulNode::Ideal");
138 }
139 #endif
140
141 if( mul1->Opcode() == mul_opcode() ) { // Left input is a multiply?
142 // Mul of a constant?
143 const Type *t12 = phase->type( mul1->in(2) );
144 if( t12->singleton() && t12 != Type::TOP) { // Left input is an add of a constant?
145 // Compute new constant; check for overflow
146 const Type *tcon01 = ((MulNode*)mul1)->mul_ring(t2,t12);
147 if( tcon01->singleton() ) {
148 // The Mul of the flattened expression
149 set_req_X(1, mul1->in(1), phase);
150 set_req_X(2, phase->makecon(tcon01), phase);
151 t2 = tcon01;
152 progress = this; // Made progress
153 }
154 }
155 }
156 // If the right input is a constant, and the left input is an add of a
157 // constant, flatten the tree: (X+con1)*con0 ==> X*con0 + con1*con0
158 const Node *add1 = in(1);
159 if( add1->Opcode() == add_opcode() ) { // Left input is an add?
160 // Add of a constant?
161 const Type *t12 = phase->type( add1->in(2) );
162 if( t12->singleton() && t12 != Type::TOP ) { // Left input is an add of a constant?
163 assert( add1->in(1) != add1, "dead loop in MulNode::Ideal" );
164 // Compute new constant; check for overflow
165 const Type *tcon01 = mul_ring(t2,t12);
166 if( tcon01->singleton() ) {
167
168 // Convert (X+con1)*con0 into X*con0
169 Node *mul = clone(); // mul = ()*con0
170 mul->set_req(1,add1->in(1)); // mul = X*con0
171 mul = phase->transform(mul);
172
173 Node *add2 = add1->clone();
174 add2->set_req(1, mul); // X*con0 + con0*con1
175 add2->set_req(2, phase->makecon(tcon01) );
176 progress = add2;
177 }
178 }
179 } // End of is left input an add
180 } // End of is right input a Mul
181
182 return progress;
183 }
184
185 //------------------------------Value-----------------------------------------
186 const Type* MulNode::Value(PhaseGVN* phase) const {
187 const Type *t1 = phase->type( in(1) );
188 const Type *t2 = phase->type( in(2) );
189 // Either input is TOP ==> the result is TOP
190 if( t1 == Type::TOP ) return Type::TOP;
191 if( t2 == Type::TOP ) return Type::TOP;
192
193 // Either input is ZERO ==> the result is ZERO.
194 // Not valid for floats or doubles since +0.0 * -0.0 --> +0.0
195 int op = Opcode();
196 if( op == Op_MulI || op == Op_AndI || op == Op_MulL || op == Op_AndL ) {
197 const Type *zero = add_id(); // The multiplicative zero
198 if( t1->higher_equal( zero ) ) return zero;
199 if( t2->higher_equal( zero ) ) return zero;
200 }
201
202 // TODO 8350865 Still needed? Yes, I think this is from PhaseMacroExpand::expand_mh_intrinsic_return
203 // Code pattern on return from a call that returns an __Value. Can
204 // be optimized away if the return value turns out to be an oop.
205 if (op == Op_AndX &&
206 in(1) != nullptr &&
207 in(1)->Opcode() == Op_CastP2X &&
208 in(1)->in(1) != nullptr &&
209 phase->type(in(1)->in(1))->isa_oopptr() &&
210 t2->isa_intptr_t()->_lo >= 0 &&
211 t2->isa_intptr_t()->_hi <= MinObjAlignmentInBytesMask) {
212 return add_id();
213 }
214
215 // Either input is BOTTOM ==> the result is the local BOTTOM
216 if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
217 return bottom_type();
218
219 return mul_ring(t1,t2); // Local flavor of type multiplication
220 }
221
222 MulNode* MulNode::make(Node* in1, Node* in2, BasicType bt) {
223 switch (bt) {
224 case T_INT:
225 return new MulINode(in1, in2);
226 case T_LONG:
227 return new MulLNode(in1, in2);
228 default:
229 fatal("Not implemented for %s", type2name(bt));
230 }
231 return nullptr;
232 }
233
234 MulNode* MulNode::make_and(Node* in1, Node* in2, BasicType bt) {
235 switch (bt) {
236 case T_INT:
237 return new AndINode(in1, in2);
238 case T_LONG:
239 return new AndLNode(in1, in2);
240 default:
241 fatal("Not implemented for %s", type2name(bt));
242 }
243 return nullptr;
244 }
245
246
247 //=============================================================================
248 //------------------------------Ideal------------------------------------------
249 // Check for power-of-2 multiply, then try the regular MulNode::Ideal
250 Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) {
251 const jint con = in(2)->find_int_con(0);
252 if (con == 0) {
253 // If in(2) is not a constant, call Ideal() of the parent class to
254 // try to move constant to the right side.
255 return MulNode::Ideal(phase, can_reshape);
256 }
257
258 // Now we have a constant Node on the right and the constant in con.
259 if (con == 1) {
260 // By one is handled by Identity call
261 return nullptr;
262 }
263
264 // Check for negative constant; if so negate the final result
265 bool sign_flip = false;
266
267 unsigned int abs_con = g_uabs(con);
268 if (abs_con != (unsigned int)con) {
269 sign_flip = true;
270 }
271
272 // Get low bit; check for being the only bit
273 Node *res = nullptr;
274 unsigned int bit1 = submultiple_power_of_2(abs_con);
275 if (bit1 == abs_con) { // Found a power of 2?
276 res = new LShiftINode(in(1), phase->intcon(log2i_exact(bit1)));
277 } else {
278 // Check for constant with 2 bits set
279 unsigned int bit2 = abs_con - bit1;
280 bit2 = bit2 & (0 - bit2); // Extract 2nd bit
281 if (bit2 + bit1 == abs_con) { // Found all bits in con?
282 Node *n1 = phase->transform(new LShiftINode(in(1), phase->intcon(log2i_exact(bit1))));
283 Node *n2 = phase->transform(new LShiftINode(in(1), phase->intcon(log2i_exact(bit2))));
284 res = new AddINode(n2, n1);
285 } else if (is_power_of_2(abs_con + 1)) {
286 // Sleezy: power-of-2 - 1. Next time be generic.
287 unsigned int temp = abs_con + 1;
288 Node *n1 = phase->transform(new LShiftINode(in(1), phase->intcon(log2i_exact(temp))));
289 res = new SubINode(n1, in(1));
290 } else {
291 return MulNode::Ideal(phase, can_reshape);
292 }
293 }
294
295 if (sign_flip) { // Need to negate result?
296 res = phase->transform(res);// Transform, before making the zero con
297 res = new SubINode(phase->intcon(0),res);
298 }
299
300 return res; // Return final result
301 }
302
303 // This template class performs type multiplication for MulI/MulLNode. NativeType is either jint or jlong.
304 // In this class, the inputs of the MulNodes are named left and right with types [left_lo,left_hi] and [right_lo,right_hi].
305 //
306 // In general, the multiplication of two x-bit values could produce a result that consumes up to 2x bits if there is
307 // enough space to hold them all. We can therefore distinguish the following two cases for the product:
308 // - no overflow (i.e. product fits into x bits)
309 // - overflow (i.e. product does not fit into x bits)
310 //
311 // When multiplying the two x-bit inputs 'left' and 'right' with their x-bit types [left_lo,left_hi] and [right_lo,right_hi]
312 // we need to find the minimum and maximum of all possible products to define a new type. To do that, we compute the
313 // cross product of [left_lo,left_hi] and [right_lo,right_hi] in 2x-bit space where no over- or underflow can happen.
314 // The cross product consists of the following four multiplications with 2x-bit results:
315 // (1) left_lo * right_lo
316 // (2) left_lo * right_hi
317 // (3) left_hi * right_lo
318 // (4) left_hi * right_hi
319 //
320 // Let's define the following two functions:
321 // - Lx(i): Returns the lower x bits of the 2x-bit number i.
322 // - Ux(i): Returns the upper x bits of the 2x-bit number i.
323 //
324 // Let's first assume all products are positive where only overflows are possible but no underflows. If there is no
325 // overflow for a product p, then the upper x bits of the 2x-bit result p are all zero:
326 // Ux(p) = 0
327 // Lx(p) = p
328 //
329 // If none of the multiplications (1)-(4) overflow, we can truncate the upper x bits and use the following result type
330 // with x bits:
331 // [result_lo,result_hi] = [MIN(Lx(1),Lx(2),Lx(3),Lx(4)),MAX(Lx(1),Lx(2),Lx(3),Lx(4))]
332 //
333 // If any of these multiplications overflows, we could pessimistically take the bottom type for the x bit result
334 // (i.e. all values in the x-bit space could be possible):
335 // [result_lo,result_hi] = [NativeType_min,NativeType_max]
336 //
337 // However, in case of any overflow, we can do better by analyzing the upper x bits of all multiplications (1)-(4) with
338 // 2x-bit results. The upper x bits tell us something about how many times a multiplication has overflown the lower
339 // x bits. If the upper x bits of (1)-(4) are all equal, then we know that all of these multiplications overflowed
340 // the lower x bits the same number of times:
341 // Ux((1)) = Ux((2)) = Ux((3)) = Ux((4))
342 //
343 // If all upper x bits are equal, we can conclude:
344 // Lx(MIN((1),(2),(3),(4))) = MIN(Lx(1),Lx(2),Lx(3),Lx(4)))
345 // Lx(MAX((1),(2),(3),(4))) = MAX(Lx(1),Lx(2),Lx(3),Lx(4)))
346 //
347 // Therefore, we can use the same precise x-bit result type as for the no-overflow case:
348 // [result_lo,result_hi] = [(MIN(Lx(1),Lx(2),Lx(3),Lx(4))),MAX(Lx(1),Lx(2),Lx(3),Lx(4)))]
349 //
350 //
351 // Now let's assume that (1)-(4) are signed multiplications where over- and underflow could occur:
352 // Negative numbers are all sign extend with ones. Therefore, if a negative product does not underflow, then the
353 // upper x bits of the 2x-bit result are all set to ones which is minus one in two's complement. If there is an underflow,
354 // the upper x bits are decremented by the number of times an underflow occurred. The smallest possible negative product
355 // is NativeType_min*NativeType_max, where the upper x bits are set to NativeType_min / 2 (b11...0). It is therefore
356 // impossible to underflow the upper x bits. Thus, when having all ones (i.e. minus one) in the upper x bits, we know
357 // that there is no underflow.
358 //
359 // To be able to compare the number of over-/underflows of positive and negative products, respectively, we normalize
360 // the upper x bits of negative 2x-bit products by adding one. This way a product has no over- or underflow if the
361 // normalized upper x bits are zero. Now we can use the same improved type as for strictly positive products because we
362 // can compare the upper x bits in a unified way with N() being the normalization function:
363 // N(Ux((1))) = N(Ux((2))) = N(Ux((3)) = N(Ux((4)))
364 template<typename NativeType>
365 class IntegerTypeMultiplication {
366
367 NativeType _lo_left;
368 NativeType _lo_right;
369 NativeType _hi_left;
370 NativeType _hi_right;
371 short _widen_left;
372 short _widen_right;
373
374 static const Type* overflow_type();
375 static NativeType multiply_high(NativeType x, NativeType y);
376 const Type* create_type(NativeType lo, NativeType hi) const;
377
378 static NativeType multiply_high_signed_overflow_value(NativeType x, NativeType y) {
379 return normalize_overflow_value(x, y, multiply_high(x, y));
380 }
381
382 bool cross_product_not_same_overflow_value() const {
383 const NativeType lo_lo_high_product = multiply_high_signed_overflow_value(_lo_left, _lo_right);
384 const NativeType lo_hi_high_product = multiply_high_signed_overflow_value(_lo_left, _hi_right);
385 const NativeType hi_lo_high_product = multiply_high_signed_overflow_value(_hi_left, _lo_right);
386 const NativeType hi_hi_high_product = multiply_high_signed_overflow_value(_hi_left, _hi_right);
387 return lo_lo_high_product != lo_hi_high_product ||
388 lo_hi_high_product != hi_lo_high_product ||
389 hi_lo_high_product != hi_hi_high_product;
390 }
391
392 bool does_product_overflow(NativeType x, NativeType y) const {
393 return multiply_high_signed_overflow_value(x, y) != 0;
394 }
395
396 static NativeType normalize_overflow_value(const NativeType x, const NativeType y, NativeType result) {
397 return java_multiply(x, y) < 0 ? result + 1 : result;
398 }
399
400 public:
401 template<class IntegerType>
402 IntegerTypeMultiplication(const IntegerType* left, const IntegerType* right)
403 : _lo_left(left->_lo), _lo_right(right->_lo),
404 _hi_left(left->_hi), _hi_right(right->_hi),
405 _widen_left(left->_widen), _widen_right(right->_widen) {}
406
407 // Compute the product type by multiplying the two input type ranges. We take the minimum and maximum of all possible
408 // values (requires 4 multiplications of all possible combinations of the two range boundary values). If any of these
409 // multiplications overflows/underflows, we need to make sure that they all have the same number of overflows/underflows
410 // If that is not the case, we return the bottom type to cover all values due to the inconsistent overflows/underflows).
411 const Type* compute() const {
412 if (cross_product_not_same_overflow_value()) {
413 return overflow_type();
414 }
415
416 NativeType lo_lo_product = java_multiply(_lo_left, _lo_right);
417 NativeType lo_hi_product = java_multiply(_lo_left, _hi_right);
418 NativeType hi_lo_product = java_multiply(_hi_left, _lo_right);
419 NativeType hi_hi_product = java_multiply(_hi_left, _hi_right);
420 const NativeType min = MIN4(lo_lo_product, lo_hi_product, hi_lo_product, hi_hi_product);
421 const NativeType max = MAX4(lo_lo_product, lo_hi_product, hi_lo_product, hi_hi_product);
422 return create_type(min, max);
423 }
424
425 bool does_overflow() const {
426 return does_product_overflow(_lo_left, _lo_right) ||
427 does_product_overflow(_lo_left, _hi_right) ||
428 does_product_overflow(_hi_left, _lo_right) ||
429 does_product_overflow(_hi_left, _hi_right);
430 }
431 };
432
433 template <>
434 const Type* IntegerTypeMultiplication<jint>::overflow_type() {
435 return TypeInt::INT;
436 }
437
438 template <>
439 jint IntegerTypeMultiplication<jint>::multiply_high(const jint x, const jint y) {
440 const jlong x_64 = x;
441 const jlong y_64 = y;
442 const jlong product = x_64 * y_64;
443 return (jint)((uint64_t)product >> 32u);
444 }
445
446 template <>
447 const Type* IntegerTypeMultiplication<jint>::create_type(jint lo, jint hi) const {
448 return TypeInt::make(lo, hi, MAX2(_widen_left, _widen_right));
449 }
450
451 template <>
452 const Type* IntegerTypeMultiplication<jlong>::overflow_type() {
453 return TypeLong::LONG;
454 }
455
456 template <>
457 jlong IntegerTypeMultiplication<jlong>::multiply_high(const jlong x, const jlong y) {
458 return multiply_high_signed(x, y);
459 }
460
461 template <>
462 const Type* IntegerTypeMultiplication<jlong>::create_type(jlong lo, jlong hi) const {
463 return TypeLong::make(lo, hi, MAX2(_widen_left, _widen_right));
464 }
465
466 // Compute the product type of two integer ranges into this node.
467 const Type* MulINode::mul_ring(const Type* type_left, const Type* type_right) const {
468 const IntegerTypeMultiplication<jint> integer_multiplication(type_left->is_int(), type_right->is_int());
469 return integer_multiplication.compute();
470 }
471
472 bool MulINode::does_overflow(const TypeInt* type_left, const TypeInt* type_right) {
473 const IntegerTypeMultiplication<jint> integer_multiplication(type_left, type_right);
474 return integer_multiplication.does_overflow();
475 }
476
477 // Compute the product type of two long ranges into this node.
478 const Type* MulLNode::mul_ring(const Type* type_left, const Type* type_right) const {
479 const IntegerTypeMultiplication<jlong> integer_multiplication(type_left->is_long(), type_right->is_long());
480 return integer_multiplication.compute();
481 }
482
483 //=============================================================================
484 //------------------------------Ideal------------------------------------------
485 // Check for power-of-2 multiply, then try the regular MulNode::Ideal
486 Node *MulLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
487 const jlong con = in(2)->find_long_con(0);
488 if (con == 0) {
489 // If in(2) is not a constant, call Ideal() of the parent class to
490 // try to move constant to the right side.
491 return MulNode::Ideal(phase, can_reshape);
492 }
493
494 // Now we have a constant Node on the right and the constant in con.
495 if (con == 1) {
496 // By one is handled by Identity call
497 return nullptr;
498 }
499
500 // Check for negative constant; if so negate the final result
501 bool sign_flip = false;
502 julong abs_con = g_uabs(con);
503 if (abs_con != (julong)con) {
504 sign_flip = true;
505 }
506
507 // Get low bit; check for being the only bit
508 Node *res = nullptr;
509 julong bit1 = submultiple_power_of_2(abs_con);
510 if (bit1 == abs_con) { // Found a power of 2?
511 res = new LShiftLNode(in(1), phase->intcon(log2i_exact(bit1)));
512 } else {
513
514 // Check for constant with 2 bits set
515 julong bit2 = abs_con-bit1;
516 bit2 = bit2 & (0-bit2); // Extract 2nd bit
517 if (bit2 + bit1 == abs_con) { // Found all bits in con?
518 Node *n1 = phase->transform(new LShiftLNode(in(1), phase->intcon(log2i_exact(bit1))));
519 Node *n2 = phase->transform(new LShiftLNode(in(1), phase->intcon(log2i_exact(bit2))));
520 res = new AddLNode(n2, n1);
521
522 } else if (is_power_of_2(abs_con+1)) {
523 // Sleezy: power-of-2 -1. Next time be generic.
524 julong temp = abs_con + 1;
525 Node *n1 = phase->transform( new LShiftLNode(in(1), phase->intcon(log2i_exact(temp))));
526 res = new SubLNode(n1, in(1));
527 } else {
528 return MulNode::Ideal(phase, can_reshape);
529 }
530 }
531
532 if (sign_flip) { // Need to negate result?
533 res = phase->transform(res);// Transform, before making the zero con
534 res = new SubLNode(phase->longcon(0),res);
535 }
536
537 return res; // Return final result
538 }
539
540 //=============================================================================
541 //------------------------------mul_ring---------------------------------------
542 // Compute the product type of two double ranges into this node.
543 const Type *MulFNode::mul_ring(const Type *t0, const Type *t1) const {
544 if( t0 == Type::FLOAT || t1 == Type::FLOAT ) return Type::FLOAT;
545 return TypeF::make( t0->getf() * t1->getf() );
546 }
547
548 //------------------------------Ideal---------------------------------------
549 // Check to see if we are multiplying by a constant 2 and convert to add, then try the regular MulNode::Ideal
550 Node* MulFNode::Ideal(PhaseGVN* phase, bool can_reshape) {
551 const TypeF *t2 = phase->type(in(2))->isa_float_constant();
552
553 // x * 2 -> x + x
554 if (t2 != nullptr && t2->getf() == 2) {
555 Node* base = in(1);
556 return new AddFNode(base, base);
557 }
558 return MulNode::Ideal(phase, can_reshape);
559 }
560
561 //=============================================================================
562 //------------------------------Ideal------------------------------------------
563 // Check to see if we are multiplying by a constant 2 and convert to add, then try the regular MulNode::Ideal
564 Node* MulHFNode::Ideal(PhaseGVN* phase, bool can_reshape) {
565 const TypeH* t2 = phase->type(in(2))->isa_half_float_constant();
566
567 // x * 2 -> x + x
568 if (t2 != nullptr && t2->getf() == 2) {
569 Node* base = in(1);
570 return new AddHFNode(base, base);
571 }
572 return MulNode::Ideal(phase, can_reshape);
573 }
574
575 // Compute the product type of two half float ranges into this node.
576 const Type* MulHFNode::mul_ring(const Type* t0, const Type* t1) const {
577 if (t0 == Type::HALF_FLOAT || t1 == Type::HALF_FLOAT) {
578 return Type::HALF_FLOAT;
579 }
580 return TypeH::make(t0->getf() * t1->getf());
581 }
582
583 //=============================================================================
584 //------------------------------mul_ring---------------------------------------
585 // Compute the product type of two double ranges into this node.
586 const Type *MulDNode::mul_ring(const Type *t0, const Type *t1) const {
587 if( t0 == Type::DOUBLE || t1 == Type::DOUBLE ) return Type::DOUBLE;
588 // We must be multiplying 2 double constants.
589 return TypeD::make( t0->getd() * t1->getd() );
590 }
591
592 //------------------------------Ideal---------------------------------------
593 // Check to see if we are multiplying by a constant 2 and convert to add, then try the regular MulNode::Ideal
594 Node* MulDNode::Ideal(PhaseGVN* phase, bool can_reshape) {
595 const TypeD *t2 = phase->type(in(2))->isa_double_constant();
596
597 // x * 2 -> x + x
598 if (t2 != nullptr && t2->getd() == 2) {
599 Node* base = in(1);
600 return new AddDNode(base, base);
601 }
602
603 return MulNode::Ideal(phase, can_reshape);
604 }
605
606 //=============================================================================
607 //------------------------------Value------------------------------------------
608 const Type* MulHiLNode::Value(PhaseGVN* phase) const {
609 const Type *t1 = phase->type( in(1) );
610 const Type *t2 = phase->type( in(2) );
611 const Type *bot = bottom_type();
612 return MulHiValue(t1, t2, bot);
613 }
614
615 const Type* UMulHiLNode::Value(PhaseGVN* phase) const {
616 const Type *t1 = phase->type( in(1) );
617 const Type *t2 = phase->type( in(2) );
618 const Type *bot = bottom_type();
619 return MulHiValue(t1, t2, bot);
620 }
621
622 // A common routine used by UMulHiLNode and MulHiLNode
623 const Type* MulHiValue(const Type *t1, const Type *t2, const Type *bot) {
624 // Either input is TOP ==> the result is TOP
625 if( t1 == Type::TOP ) return Type::TOP;
626 if( t2 == Type::TOP ) return Type::TOP;
627
628 // Either input is BOTTOM ==> the result is the local BOTTOM
629 if( (t1 == bot) || (t2 == bot) ||
630 (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
631 return bot;
632
633 // It is not worth trying to constant fold this stuff!
634 return TypeLong::LONG;
635 }
636
637 //=============================================================================
638 //------------------------------mul_ring---------------------------------------
639 // Supplied function returns the product of the inputs IN THE CURRENT RING.
640 // For the logical operations the ring's MUL is really a logical AND function.
641 // This also type-checks the inputs for sanity. Guaranteed never to
642 // be passed a TOP or BOTTOM type, these are filtered out by pre-check.
643 const Type* AndINode::mul_ring(const Type* t1, const Type* t2) const {
644 return RangeInference::infer_and(t1->is_int(), t2->is_int());
645 }
646
647 static bool AndIL_is_zero_element_under_mask(const PhaseGVN* phase, const Node* expr, const Node* mask, BasicType bt);
648
649 const Type* AndINode::Value(PhaseGVN* phase) const {
650 if (AndIL_is_zero_element_under_mask(phase, in(1), in(2), T_INT) ||
651 AndIL_is_zero_element_under_mask(phase, in(2), in(1), T_INT)) {
652 return TypeInt::ZERO;
653 }
654
655 return MulNode::Value(phase);
656 }
657
658 //------------------------------Identity---------------------------------------
659 // Masking off the high bits of an unsigned load is not required
660 Node* AndINode::Identity(PhaseGVN* phase) {
661
662 // x & x => x
663 if (in(1) == in(2)) {
664 return in(1);
665 }
666
667 Node* in1 = in(1);
668 uint op = in1->Opcode();
669 const TypeInt* t2 = phase->type(in(2))->isa_int();
670 if (t2 && t2->is_con()) {
671 int con = t2->get_con();
672 // Masking off high bits which are always zero is useless.
673 const TypeInt* t1 = phase->type(in(1))->isa_int();
674 if (t1 != nullptr && t1->_lo >= 0) {
675 jint t1_support = right_n_bits(1 + log2i_graceful(t1->_hi));
676 if ((t1_support & con) == t1_support)
677 return in1;
678 }
679 // Masking off the high bits of a unsigned-shift-right is not
680 // needed either.
681 if (op == Op_URShiftI) {
682 const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
683 if (t12 && t12->is_con()) { // Shift is by a constant
684 int shift = t12->get_con();
685 shift &= BitsPerJavaInteger - 1; // semantics of Java shifts
686 int mask = max_juint >> shift;
687 if ((mask & con) == mask) // If AND is useless, skip it
688 return in1;
689 }
690 }
691 }
692 return MulNode::Identity(phase);
693 }
694
695 //------------------------------Ideal------------------------------------------
696 Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) {
697 // Simplify (v1 + v2) & mask to v1 & mask or v2 & mask when possible.
698 Node* progress = AndIL_sum_and_mask(phase, T_INT);
699 if (progress != nullptr) {
700 return progress;
701 }
702
703 // Convert "(~a) & (~b)" into "~(a | b)"
704 if (AddNode::is_not(phase, in(1), T_INT) && AddNode::is_not(phase, in(2), T_INT)) {
705 Node* or_a_b = new OrINode(in(1)->in(1), in(2)->in(1));
706 Node* tn = phase->transform(or_a_b);
707 return AddNode::make_not(phase, tn, T_INT);
708 }
709
710 // Special case constant AND mask
711 const TypeInt *t2 = phase->type( in(2) )->isa_int();
712 if( !t2 || !t2->is_con() ) return MulNode::Ideal(phase, can_reshape);
713 const int mask = t2->get_con();
714 Node *load = in(1);
715 uint lop = load->Opcode();
716
717 // Masking bits off of a Character? Hi bits are already zero.
718 if( lop == Op_LoadUS &&
719 (mask & 0xFFFF0000) ) // Can we make a smaller mask?
720 return new AndINode(load,phase->intcon(mask&0xFFFF));
721
722 // Masking bits off of a Short? Loading a Character does some masking
723 if (can_reshape &&
724 load->outcnt() == 1 && load->unique_out() == this) {
725 if (lop == Op_LoadS && (mask & 0xFFFF0000) == 0 ) {
726 Node* ldus = load->as_Load()->convert_to_unsigned_load(*phase);
727 ldus = phase->transform(ldus);
728 return new AndINode(ldus, phase->intcon(mask & 0xFFFF));
729 }
730
731 // Masking sign bits off of a Byte? Do an unsigned byte load plus
732 // an and.
733 if (lop == Op_LoadB && (mask & 0xFFFFFF00) == 0) {
734 Node* ldub = load->as_Load()->convert_to_unsigned_load(*phase);
735 ldub = phase->transform(ldub);
736 return new AndINode(ldub, phase->intcon(mask));
737 }
738 }
739
740 // Masking off sign bits? Dont make them!
741 if( lop == Op_RShiftI ) {
742 const TypeInt *t12 = phase->type(load->in(2))->isa_int();
743 if( t12 && t12->is_con() ) { // Shift is by a constant
744 int shift = t12->get_con();
745 shift &= BitsPerJavaInteger-1; // semantics of Java shifts
746 const int sign_bits_mask = ~right_n_bits(BitsPerJavaInteger - shift);
747 // If the AND'ing of the 2 masks has no bits, then only original shifted
748 // bits survive. NO sign-extension bits survive the maskings.
749 if( (sign_bits_mask & mask) == 0 ) {
750 // Use zero-fill shift instead
751 Node *zshift = phase->transform(new URShiftINode(load->in(1),load->in(2)));
752 return new AndINode( zshift, in(2) );
753 }
754 }
755 }
756
757 // Check for 'negate/and-1', a pattern emitted when someone asks for
758 // 'mod 2'. Negate leaves the low order bit unchanged (think: complement
759 // plus 1) and the mask is of the low order bit. Skip the negate.
760 if( lop == Op_SubI && mask == 1 && load->in(1) &&
761 phase->type(load->in(1)) == TypeInt::ZERO )
762 return new AndINode( load->in(2), in(2) );
763
764 return MulNode::Ideal(phase, can_reshape);
765 }
766
767 //=============================================================================
768 //------------------------------mul_ring---------------------------------------
769 // Supplied function returns the product of the inputs IN THE CURRENT RING.
770 // For the logical operations the ring's MUL is really a logical AND function.
771 // This also type-checks the inputs for sanity. Guaranteed never to
772 // be passed a TOP or BOTTOM type, these are filtered out by pre-check.
773 const Type* AndLNode::mul_ring(const Type* t1, const Type* t2) const {
774 return RangeInference::infer_and(t1->is_long(), t2->is_long());
775 }
776
777 const Type* AndLNode::Value(PhaseGVN* phase) const {
778 if (AndIL_is_zero_element_under_mask(phase, in(1), in(2), T_LONG) ||
779 AndIL_is_zero_element_under_mask(phase, in(2), in(1), T_LONG)) {
780 return TypeLong::ZERO;
781 }
782
783 return MulNode::Value(phase);
784 }
785
786 //------------------------------Identity---------------------------------------
787 // Masking off the high bits of an unsigned load is not required
788 Node* AndLNode::Identity(PhaseGVN* phase) {
789
790 // x & x => x
791 if (in(1) == in(2)) {
792 return in(1);
793 }
794
795 Node *usr = in(1);
796 const TypeLong *t2 = phase->type( in(2) )->isa_long();
797 if( t2 && t2->is_con() ) {
798 jlong con = t2->get_con();
799 // Masking off high bits which are always zero is useless.
800 const TypeLong* t1 = phase->type( in(1) )->isa_long();
801 if (t1 != nullptr && t1->_lo >= 0) {
802 int bit_count = log2i_graceful(t1->_hi) + 1;
803 jlong t1_support = jlong(max_julong >> (BitsPerJavaLong - bit_count));
804 if ((t1_support & con) == t1_support)
805 return usr;
806 }
807 uint lop = usr->Opcode();
808 // Masking off the high bits of a unsigned-shift-right is not
809 // needed either.
810 if( lop == Op_URShiftL ) {
811 const TypeInt *t12 = phase->type( usr->in(2) )->isa_int();
812 if( t12 && t12->is_con() ) { // Shift is by a constant
813 int shift = t12->get_con();
814 shift &= BitsPerJavaLong - 1; // semantics of Java shifts
815 jlong mask = max_julong >> shift;
816 if( (mask&con) == mask ) // If AND is useless, skip it
817 return usr;
818 }
819 }
820 }
821 return MulNode::Identity(phase);
822 }
823
824 //------------------------------Ideal------------------------------------------
825 Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
826 // Simplify (v1 + v2) & mask to v1 & mask or v2 & mask when possible.
827 Node* progress = AndIL_sum_and_mask(phase, T_LONG);
828 if (progress != nullptr) {
829 return progress;
830 }
831
832 // Convert "(~a) & (~b)" into "~(a | b)"
833 if (AddNode::is_not(phase, in(1), T_LONG) && AddNode::is_not(phase, in(2), T_LONG)) {
834 Node* or_a_b = new OrLNode(in(1)->in(1), in(2)->in(1));
835 Node* tn = phase->transform(or_a_b);
836 return AddNode::make_not(phase, tn, T_LONG);
837 }
838
839 // Special case constant AND mask
840 const TypeLong *t2 = phase->type( in(2) )->isa_long();
841 if( !t2 || !t2->is_con() ) return MulNode::Ideal(phase, can_reshape);
842 const jlong mask = t2->get_con();
843
844 Node* in1 = in(1);
845 int op = in1->Opcode();
846
847 // Are we masking a long that was converted from an int with a mask
848 // that fits in 32-bits? Commute them and use an AndINode. Don't
849 // convert masks which would cause a sign extension of the integer
850 // value. This check includes UI2L masks (0x00000000FFFFFFFF) which
851 // would be optimized away later in Identity.
852 if (op == Op_ConvI2L && (mask & UCONST64(0xFFFFFFFF80000000)) == 0) {
853 Node* andi = new AndINode(in1->in(1), phase->intcon(mask));
854 andi = phase->transform(andi);
855 return new ConvI2LNode(andi);
856 }
857
858 // Masking off sign bits? Dont make them!
859 if (op == Op_RShiftL) {
860 const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
861 if( t12 && t12->is_con() ) { // Shift is by a constant
862 int shift = t12->get_con();
863 shift &= BitsPerJavaLong - 1; // semantics of Java shifts
864 if (shift != 0) {
865 const julong sign_bits_mask = ~(((julong)CONST64(1) << (julong)(BitsPerJavaLong - shift)) -1);
866 // If the AND'ing of the 2 masks has no bits, then only original shifted
867 // bits survive. NO sign-extension bits survive the maskings.
868 if( (sign_bits_mask & mask) == 0 ) {
869 // Use zero-fill shift instead
870 Node *zshift = phase->transform(new URShiftLNode(in1->in(1), in1->in(2)));
871 return new AndLNode(zshift, in(2));
872 }
873 }
874 }
875 }
876
877 // Search for GraphKit::mark_word_test patterns and fold the test if the result is statically known
878 Node* load1 = in(1);
879 Node* load2 = nullptr;
880 if (load1->is_Phi() && phase->type(load1)->isa_long()) {
881 load1 = in(1)->in(1);
882 load2 = in(1)->in(2);
883 }
884 if (load1 != nullptr && load1->is_Load() && phase->type(load1)->isa_long() &&
885 (load2 == nullptr || (load2->is_Load() && phase->type(load2)->isa_long()))) {
886 const TypePtr* adr_t1 = phase->type(load1->in(MemNode::Address))->isa_ptr();
887 const TypePtr* adr_t2 = (load2 != nullptr) ? phase->type(load2->in(MemNode::Address))->isa_ptr() : nullptr;
888 if (adr_t1 != nullptr && adr_t1->offset() == oopDesc::mark_offset_in_bytes() &&
889 (load2 == nullptr || (adr_t2 != nullptr && adr_t2->offset() == in_bytes(Klass::prototype_header_offset())))) {
890 if (mask == markWord::inline_type_pattern) {
891 if (adr_t1->is_inlinetypeptr()) {
892 set_req_X(1, in(2), phase);
893 return this;
894 } else if (!adr_t1->can_be_inline_type()) {
895 set_req_X(1, phase->longcon(0), phase);
896 return this;
897 }
898 } else if (mask == markWord::null_free_array_bit_in_place) {
899 if (adr_t1->is_null_free()) {
900 set_req_X(1, in(2), phase);
901 return this;
902 } else if (adr_t1->is_not_null_free()) {
903 set_req_X(1, phase->longcon(0), phase);
904 return this;
905 }
906 } else if (mask == markWord::flat_array_bit_in_place) {
907 if (adr_t1->is_flat()) {
908 set_req_X(1, in(2), phase);
909 return this;
910 } else if (adr_t1->is_not_flat()) {
911 set_req_X(1, phase->longcon(0), phase);
912 return this;
913 }
914 }
915 }
916 }
917
918 return MulNode::Ideal(phase, can_reshape);
919 }
920
921 LShiftNode* LShiftNode::make(Node* in1, Node* in2, BasicType bt) {
922 switch (bt) {
923 case T_INT:
924 return new LShiftINode(in1, in2);
925 case T_LONG:
926 return new LShiftLNode(in1, in2);
927 default:
928 fatal("Not implemented for %s", type2name(bt));
929 }
930 return nullptr;
931 }
932
933 // Returns whether the shift amount is constant. If so, sets count.
934 static bool const_shift_count(PhaseGVN* phase, const Node* shift_node, int* count) {
935 const TypeInt* tcount = phase->type(shift_node->in(2))->isa_int();
936 if (tcount != nullptr && tcount->is_con()) {
937 *count = tcount->get_con();
938 return true;
939 }
940 return false;
941 }
942
943 // Returns whether the shift amount is constant. If so, sets real_shift and masked_shift.
944 static bool mask_shift_amount(PhaseGVN* phase, const Node* shift_node, uint nBits, int& real_shift, uint& masked_shift) {
945 if (const_shift_count(phase, shift_node, &real_shift)) {
946 masked_shift = real_shift & (nBits - 1);
947 return true;
948 }
949 return false;
950 }
951
952 // Convenience for when we don't care about the real amount
953 static bool mask_shift_amount(PhaseGVN* phase, const Node* shift_node, uint nBits, uint& masked_shift) {
954 int real_shift;
955 return mask_shift_amount(phase, shift_node, nBits, real_shift, masked_shift);
956 }
957
958 // Use this in ::Ideal only with shiftNode == this!
959 // Returns the masked shift amount if constant or 0 if not constant.
960 static uint mask_and_replace_shift_amount(PhaseGVN* phase, Node* shift_node, uint nBits) {
961 int real_shift;
962 uint masked_shift;
963 if (mask_shift_amount(phase, shift_node, nBits, real_shift, masked_shift)) {
964 if (masked_shift == 0) {
965 // Let Identity() handle 0 shift count.
966 return 0;
967 }
968
969 if (real_shift != (int)masked_shift) {
970 PhaseIterGVN* igvn = phase->is_IterGVN();
971 if (igvn != nullptr) {
972 igvn->_worklist.push(shift_node);
973 }
974 shift_node->set_req(2, phase->intcon(masked_shift)); // Replace shift count with masked value.
975 }
976 return masked_shift;
977 }
978 // Not a shift by a constant.
979 return 0;
980 }
981
982 // Called with
983 // outer_shift = (_ << rhs_outer)
984 // We are looking for the pattern:
985 // outer_shift = ((X << rhs_inner) << rhs_outer)
986 // where rhs_outer and rhs_inner are constant
987 // we denote inner_shift the nested expression (X << rhs_inner)
988 // con_inner = rhs_inner % nbits and con_outer = rhs_outer % nbits
989 // where nbits is the number of bits of the shifts
990 //
991 // There are 2 cases:
992 // if con_outer + con_inner >= nbits => 0
993 // if con_outer + con_inner < nbits => X << (con_outer + con_inner)
994 static Node* collapse_nested_shift_left(PhaseGVN* phase, const Node* outer_shift, uint con_outer, BasicType bt) {
995 assert(bt == T_LONG || bt == T_INT, "Unexpected type");
996 const Node* inner_shift = outer_shift->in(1);
997 if (inner_shift->Opcode() != Op_LShift(bt)) {
998 return nullptr;
999 }
1000
1001 uint nbits = bits_per_java_integer(bt);
1002 uint con_inner;
1003 if (!mask_shift_amount(phase, inner_shift, nbits, con_inner)) {
1004 return nullptr;
1005 }
1006
1007 if (con_inner == 0) {
1008 // We let the Identity() of the inner shift do its job.
1009 return nullptr;
1010 }
1011
1012 if (con_outer + con_inner >= nbits) {
1013 // While it might be tempting to use
1014 // phase->zerocon(bt);
1015 // it would be incorrect: zerocon caches nodes, while Ideal is only allowed
1016 // to return a new node, this or nullptr, but not an old (cached) node.
1017 return ConNode::make(TypeInteger::zero(bt));
1018 }
1019
1020 // con0 + con1 < nbits ==> actual shift happens now
1021 Node* con0_plus_con1 = phase->intcon(con_outer + con_inner);
1022 return LShiftNode::make(inner_shift->in(1), con0_plus_con1, bt);
1023 }
1024
1025 //------------------------------Identity---------------------------------------
1026 Node* LShiftINode::Identity(PhaseGVN* phase) {
1027 return IdentityIL(phase, T_INT);
1028 }
1029
1030 Node* LShiftNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
1031 uint con = mask_and_replace_shift_amount(phase, this, bits_per_java_integer(bt));
1032 if (con == 0) {
1033 return nullptr;
1034 }
1035
1036 // If the right input is a constant, and the left input is an add of a
1037 // constant, flatten the tree: (X+con1)<<con0 ==> X<<con0 + con1<<con0
1038 Node* add1 = in(1);
1039 int add1_op = add1->Opcode();
1040 if (add1_op == Op_Add(bt)) { // Left input is an add?
1041 assert(add1 != add1->in(1), "dead loop in LShiftINode::Ideal");
1042
1043 // Transform is legal, but check for profit. Avoid breaking 'i2s'
1044 // and 'i2b' patterns which typically fold into 'StoreC/StoreB'.
1045 if (bt != T_INT || con < 16) {
1046 // Left input is an add of the same number?
1047 if (con != (bits_per_java_integer(bt) - 1) && add1->in(1) == add1->in(2)) {
1048 // Convert "(x + x) << c0" into "x << (c0 + 1)"
1049 // In general, this optimization cannot be applied for c0 == 31 (for LShiftI) since
1050 // 2x << 31 != x << 32 = x << 0 = x (e.g. x = 1: 2 << 31 = 0 != 1)
1051 // or c0 != 63 (for LShiftL) because:
1052 // (x + x) << 63 = 2x << 63, while
1053 // (x + x) << 63 --transform--> x << 64 = x << 0 = x (!= 2x << 63, for example for x = 1)
1054 // According to the Java spec, chapter 15.19, we only consider the six lowest-order bits of the right-hand operand
1055 // (i.e. "right-hand operand" & 0b111111). Therefore, x << 64 is the same as x << 0 (64 = 0b10000000 & 0b0111111 = 0).
1056 return LShiftNode::make(add1->in(1), phase->intcon(con + 1), bt);
1057 }
1058
1059 // Left input is an add of a constant?
1060 const TypeInteger* t12 = phase->type(add1->in(2))->isa_integer(bt);
1061 if (t12 != nullptr && t12->is_con()) { // Left input is an add of a con?
1062 // Compute X << con0
1063 Node* lsh = phase->transform(LShiftNode::make(add1->in(1), in(2), bt));
1064 // Compute X<<con0 + (con1<<con0)
1065 return AddNode::make(lsh, phase->integercon(java_shift_left(t12->get_con_as_long(bt), con, bt), bt), bt);
1066 }
1067 }
1068 }
1069 // Check for "(con0 - X) << con1"
1070 // Transform is legal, but check for profit. Avoid breaking 'i2s'
1071 // and 'i2b' patterns which typically fold into 'StoreC/StoreB'.
1072 if (add1_op == Op_Sub(bt) && (bt != T_INT || con < 16)) { // Left input is a sub?
1073 // Left input is a sub from a constant?
1074 const TypeInteger* t11 = phase->type(add1->in(1))->isa_integer(bt);
1075 if (t11 != nullptr && t11->is_con()) {
1076 // Compute X << con0
1077 Node* lsh = phase->transform(LShiftNode::make(add1->in(2), in(2), bt));
1078 // Compute (con1<<con0) - (X<<con0)
1079 return SubNode::make(phase->integercon(java_shift_left(t11->get_con_as_long(bt), con, bt), bt), lsh, bt);
1080 }
1081 }
1082
1083 // Check for "(x >> C1) << C2"
1084 if (add1_op == Op_RShift(bt) || add1_op == Op_URShift(bt)) {
1085 int add1Con = 0;
1086 const_shift_count(phase, add1, &add1Con);
1087
1088 // Special case C1 == C2, which just masks off low bits
1089 if (add1Con > 0 && con == (uint)add1Con) {
1090 // Convert to "(x & -(1 << C2))"
1091 return MulNode::make_and(add1->in(1), phase->integercon(java_negate(java_shift_left(1, con, bt), bt), bt), bt);
1092 } else {
1093 // Wait until the right shift has been sharpened to the correct count
1094 if (add1Con > 0 && (uint)add1Con < bits_per_java_integer(bt)) {
1095 // As loop parsing can produce LShiftI nodes, we should wait until the graph is fully formed
1096 // to apply optimizations, otherwise we can inadvertently stop vectorization opportunities.
1097 if (phase->is_IterGVN()) {
1098 if (con > (uint)add1Con) {
1099 // Creates "(x << (C2 - C1)) & -(1 << C2)"
1100 Node* lshift = phase->transform(LShiftNode::make(add1->in(1), phase->intcon(con - add1Con), bt));
1101 return MulNode::make_and(lshift, phase->integercon(java_negate(java_shift_left(1, con, bt), bt), bt), bt);
1102 } else {
1103 assert(con < (uint)add1Con, "must be (%d < %d)", con, add1Con);
1104 // Creates "(x >> (C1 - C2)) & -(1 << C2)"
1105
1106 // Handle logical and arithmetic shifts
1107 Node* rshift;
1108 if (add1_op == Op_RShift(bt)) {
1109 rshift = phase->transform(RShiftNode::make(add1->in(1), phase->intcon(add1Con - con), bt));
1110 } else {
1111 rshift = phase->transform(URShiftNode::make(add1->in(1), phase->intcon(add1Con - con), bt));
1112 }
1113
1114 return MulNode::make_and(rshift, phase->integercon(java_negate(java_shift_left(1, con, bt)), bt), bt);
1115 }
1116 } else {
1117 phase->record_for_igvn(this);
1118 }
1119 }
1120 }
1121 }
1122
1123 // Check for "((x >> C1) & Y) << C2"
1124 if (add1_op == Op_And(bt)) {
1125 Node* add2 = add1->in(1);
1126 int add2_op = add2->Opcode();
1127 if (add2_op == Op_RShift(bt) || add2_op == Op_URShift(bt)) {
1128 // Special case C1 == C2, which just masks off low bits
1129 if (add2->in(2) == in(2)) {
1130 // Convert to "(x & (Y << C2))"
1131 Node* y_sh = phase->transform(LShiftNode::make(add1->in(2), phase->intcon(con), bt));
1132 return MulNode::make_and(add2->in(1), y_sh, bt);
1133 }
1134
1135 int add2Con = 0;
1136 const_shift_count(phase, add2, &add2Con);
1137 if (add2Con > 0 && (uint)add2Con < bits_per_java_integer(bt)) {
1138 if (phase->is_IterGVN()) {
1139 // Convert to "((x >> C1) << C2) & (Y << C2)"
1140
1141 // Make "(x >> C1) << C2", which will get folded away by the rule above
1142 Node* x_sh = phase->transform(LShiftNode::make(add2, phase->intcon(con), bt));
1143 // Make "Y << C2", which will simplify when Y is a constant
1144 Node* y_sh = phase->transform(LShiftNode::make(add1->in(2), phase->intcon(con), bt));
1145
1146 return MulNode::make_and(x_sh, y_sh, bt);
1147 } else {
1148 phase->record_for_igvn(this);
1149 }
1150 }
1151 }
1152 }
1153
1154 // Check for ((x & ((1<<(32-c0))-1)) << c0) which ANDs off high bits
1155 // before shifting them away.
1156 const jlong bits_mask = max_unsigned_integer(bt) >> con;
1157 assert(bt != T_INT || bits_mask == right_n_bits(bits_per_java_integer(bt)-con), "inconsistent");
1158 if (add1_op == Op_And(bt) &&
1159 phase->type(add1->in(2)) == TypeInteger::make(bits_mask, bt)) {
1160 return LShiftNode::make(add1->in(1), in(2), bt);
1161 }
1162
1163 // Collapse nested left-shifts with constant rhs:
1164 // (X << con1) << con2 ==> X << (con1 + con2)
1165 Node* doubleShift = collapse_nested_shift_left(phase, this, con, bt);
1166 if (doubleShift != nullptr) {
1167 return doubleShift;
1168 }
1169
1170 return nullptr;
1171 }
1172
1173 //------------------------------Ideal------------------------------------------
1174 Node* LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
1175 return IdealIL(phase, can_reshape, T_INT);
1176 }
1177
1178 const Type* LShiftNode::ValueIL(PhaseGVN* phase, BasicType bt) const {
1179 const Type* t1 = phase->type(in(1));
1180 const Type* t2 = phase->type(in(2));
1181 // Either input is TOP ==> the result is TOP
1182 if (t1 == Type::TOP) {
1183 return Type::TOP;
1184 }
1185 if (t2 == Type::TOP) {
1186 return Type::TOP;
1187 }
1188
1189 // Left input is ZERO ==> the result is ZERO.
1190 if (t1 == TypeInteger::zero(bt)) {
1191 return TypeInteger::zero(bt);
1192 }
1193 // Shift by zero does nothing
1194 if (t2 == TypeInt::ZERO) {
1195 return t1;
1196 }
1197
1198 // Either input is BOTTOM ==> the result is BOTTOM
1199 if ((t1 == TypeInteger::bottom(bt)) || (t2 == TypeInt::INT) ||
1200 (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM)) {
1201 return TypeInteger::bottom(bt);
1202 }
1203
1204 const TypeInteger* r1 = t1->is_integer(bt); // Handy access
1205 const TypeInt* r2 = t2->is_int(); // Handy access
1206
1207 if (!r2->is_con()) {
1208 return TypeInteger::bottom(bt);
1209 }
1210
1211 uint shift = r2->get_con();
1212 shift &= bits_per_java_integer(bt) - 1; // semantics of Java shifts
1213 // Shift by a multiple of 32/64 does nothing:
1214 if (shift == 0) {
1215 return t1;
1216 }
1217
1218 // If the shift is a constant, shift the bounds of the type,
1219 // unless this could lead to an overflow.
1220 if (!r1->is_con()) {
1221 jlong lo = r1->lo_as_long(), hi = r1->hi_as_long();
1222 #ifdef ASSERT
1223 if (bt == T_INT) {
1224 jint lo_int = r1->is_int()->_lo, hi_int = r1->is_int()->_hi;
1225 assert((java_shift_right(java_shift_left(lo, shift, bt), shift, bt) == lo) == (((lo_int << shift) >> shift) == lo_int), "inconsistent");
1226 assert((java_shift_right(java_shift_left(hi, shift, bt), shift, bt) == hi) == (((hi_int << shift) >> shift) == hi_int), "inconsistent");
1227 }
1228 #endif
1229 if (java_shift_right(java_shift_left(lo, shift, bt), shift, bt) == lo &&
1230 java_shift_right(java_shift_left(hi, shift, bt), shift, bt) == hi) {
1231 // No overflow. The range shifts up cleanly.
1232 return TypeInteger::make(java_shift_left(lo, shift, bt),
1233 java_shift_left(hi, shift, bt),
1234 MAX2(r1->_widen, r2->_widen), bt);
1235 }
1236 return TypeInteger::bottom(bt);
1237 }
1238
1239 return TypeInteger::make(java_shift_left(r1->get_con_as_long(bt), shift, bt), bt);
1240 }
1241
1242 //------------------------------Value------------------------------------------
1243 const Type* LShiftINode::Value(PhaseGVN* phase) const {
1244 return ValueIL(phase, T_INT);
1245 }
1246
1247 Node* LShiftNode::IdentityIL(PhaseGVN* phase, BasicType bt) {
1248 int count = 0;
1249 if (const_shift_count(phase, this, &count) && (count & (bits_per_java_integer(bt) - 1)) == 0) {
1250 // Shift by a multiple of 32/64 does nothing
1251 return in(1);
1252 }
1253 return this;
1254 }
1255
1256 //=============================================================================
1257 //------------------------------Identity---------------------------------------
1258 Node* LShiftLNode::Identity(PhaseGVN* phase) {
1259 return IdentityIL(phase, T_LONG);
1260 }
1261
1262 //------------------------------Ideal------------------------------------------
1263 Node* LShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1264 return IdealIL(phase, can_reshape, T_LONG);
1265 }
1266
1267 //------------------------------Value------------------------------------------
1268 const Type* LShiftLNode::Value(PhaseGVN* phase) const {
1269 return ValueIL(phase, T_LONG);
1270 }
1271
1272 RShiftNode* RShiftNode::make(Node* in1, Node* in2, BasicType bt) {
1273 switch (bt) {
1274 case T_INT:
1275 return new RShiftINode(in1, in2);
1276 case T_LONG:
1277 return new RShiftLNode(in1, in2);
1278 default:
1279 fatal("Not implemented for %s", type2name(bt));
1280 }
1281 return nullptr;
1282 }
1283
1284
1285 //=============================================================================
1286 //------------------------------Identity---------------------------------------
1287 Node* RShiftNode::IdentityIL(PhaseGVN* phase, BasicType bt) {
1288 int count = 0;
1289 if (const_shift_count(phase, this, &count)) {
1290 if ((count & (bits_per_java_integer(bt) - 1)) == 0) {
1291 // Shift by a multiple of 32/64 does nothing
1292 return in(1);
1293 }
1294 // Check for useless sign-masking
1295 if (in(1)->Opcode() == Op_LShift(bt) &&
1296 in(1)->req() == 3 &&
1297 in(1)->in(2) == in(2)) {
1298 count &= bits_per_java_integer(bt) - 1; // semantics of Java shifts
1299 // Compute masks for which this shifting doesn't change
1300 jlong lo = (CONST64(-1) << (bits_per_java_integer(bt) - ((uint)count)-1)); // FFFF8000
1301 jlong hi = ~lo; // 00007FFF
1302 const TypeInteger* t11 = phase->type(in(1)->in(1))->isa_integer(bt);
1303 if (t11 == nullptr) {
1304 return this;
1305 }
1306 // Does actual value fit inside of mask?
1307 if (lo <= t11->lo_as_long() && t11->hi_as_long() <= hi) {
1308 return in(1)->in(1); // Then shifting is a nop
1309 }
1310 }
1311 }
1312 return this;
1313 }
1314
1315 Node* RShiftINode::Identity(PhaseGVN* phase) {
1316 return IdentityIL(phase, T_INT);
1317 }
1318
1319 Node* RShiftNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
1320 // Inputs may be TOP if they are dead.
1321 const TypeInteger* t1 = phase->type(in(1))->isa_integer(bt);
1322 if (t1 == nullptr) {
1323 return NodeSentinel; // Left input is an integer
1324 }
1325 int shift = mask_and_replace_shift_amount(phase, this, bits_per_java_integer(bt));
1326 if (shift == 0) {
1327 return NodeSentinel;
1328 }
1329
1330 // Check for (x & 0xFF000000) >> 24, whose mask can be made smaller.
1331 // and convert to (x >> 24) & (0xFF000000 >> 24) = x >> 24
1332 // Such expressions arise normally from shift chains like (byte)(x >> 24).
1333 const Node* and_node = in(1);
1334 if (and_node->Opcode() != Op_And(bt)) {
1335 return nullptr;
1336 }
1337 const TypeInteger* mask_t = phase->type(and_node->in(2))->isa_integer(bt);
1338 if (mask_t != nullptr && mask_t->is_con()) {
1339 jlong maskbits = mask_t->get_con_as_long(bt);
1340 // Convert to "(x >> shift) & (mask >> shift)"
1341 Node* shr_nomask = phase->transform(RShiftNode::make(and_node->in(1), in(2), bt));
1342 return MulNode::make_and(shr_nomask, phase->integercon(maskbits >> shift, bt), bt);
1343 }
1344 return nullptr;
1345 }
1346
1347 Node* RShiftINode::Ideal(PhaseGVN* phase, bool can_reshape) {
1348 Node* progress = IdealIL(phase, can_reshape, T_INT);
1349 if (progress == NodeSentinel) {
1350 return nullptr;
1351 }
1352 if (progress != nullptr) {
1353 return progress;
1354 }
1355 int shift = mask_and_replace_shift_amount(phase, this, BitsPerJavaInteger);
1356 assert(shift != 0, "handled by IdealIL");
1357
1358 // Check for "(short[i] <<16)>>16" which simply sign-extends
1359 const Node *shl = in(1);
1360 if (shl->Opcode() != Op_LShiftI) {
1361 return nullptr;
1362 }
1363
1364 const TypeInt* left_shift_t = phase->type(shl->in(2))->isa_int();
1365 if (left_shift_t == nullptr) {
1366 return nullptr;
1367 }
1368 if (shift == 16 && left_shift_t->is_con(16)) {
1369 Node *ld = shl->in(1);
1370 if (ld->Opcode() == Op_LoadS) {
1371 // Sign extension is just useless here. Return a RShiftI of zero instead
1372 // returning 'ld' directly. We cannot return an old Node directly as
1373 // that is the job of 'Identity' calls and Identity calls only work on
1374 // direct inputs ('ld' is an extra Node removed from 'this'). The
1375 // combined optimization requires Identity only return direct inputs.
1376 set_req_X(1, ld, phase);
1377 set_req_X(2, phase->intcon(0), phase);
1378 return this;
1379 }
1380 else if (can_reshape &&
1381 ld->Opcode() == Op_LoadUS &&
1382 ld->outcnt() == 1 && ld->unique_out() == shl)
1383 // Replace zero-extension-load with sign-extension-load
1384 return ld->as_Load()->convert_to_signed_load(*phase);
1385 }
1386
1387 // Check for "(byte[i] <<24)>>24" which simply sign-extends
1388 if (shift == 24 && left_shift_t->is_con(24)) {
1389 Node *ld = shl->in(1);
1390 if (ld->Opcode() == Op_LoadB) {
1391 // Sign extension is just useless here
1392 set_req_X(1, ld, phase);
1393 set_req_X(2, phase->intcon(0), phase);
1394 return this;
1395 }
1396 }
1397
1398 return nullptr;
1399 }
1400
1401 const Type* RShiftNode::ValueIL(PhaseGVN* phase, BasicType bt) const {
1402 const Type* t1 = phase->type(in(1));
1403 const Type* t2 = phase->type(in(2));
1404 // Either input is TOP ==> the result is TOP
1405 if (t1 == Type::TOP) {
1406 return Type::TOP;
1407 }
1408 if (t2 == Type::TOP) {
1409 return Type::TOP;
1410 }
1411
1412 // Left input is ZERO ==> the result is ZERO.
1413 if (t1 == TypeInteger::zero(bt)) {
1414 return TypeInteger::zero(bt);
1415 }
1416 // Shift by zero does nothing
1417 if (t2 == TypeInt::ZERO) {
1418 return t1;
1419 }
1420
1421 // Either input is BOTTOM ==> the result is BOTTOM
1422 if (t1 == Type::BOTTOM || t2 == Type::BOTTOM) {
1423 return TypeInteger::bottom(bt);
1424 }
1425
1426 const TypeInteger* r1 = t1->isa_integer(bt);
1427 const TypeInt* r2 = t2->isa_int();
1428
1429 // If the shift is a constant, just shift the bounds of the type.
1430 // For example, if the shift is 31/63, we just propagate sign bits.
1431 if (!r1->is_con() && r2->is_con()) {
1432 uint shift = r2->get_con();
1433 shift &= bits_per_java_integer(bt) - 1; // semantics of Java shifts
1434 // Shift by a multiple of 32/64 does nothing:
1435 if (shift == 0) {
1436 return t1;
1437 }
1438 // Calculate reasonably aggressive bounds for the result.
1439 // This is necessary if we are to correctly type things
1440 // like (x<<24>>24) == ((byte)x).
1441 jlong lo = r1->lo_as_long() >> (jint)shift;
1442 jlong hi = r1->hi_as_long() >> (jint)shift;
1443 assert(lo <= hi, "must have valid bounds");
1444 #ifdef ASSERT
1445 if (bt == T_INT) {
1446 jint lo_verify = checked_cast<jint>(r1->lo_as_long()) >> (jint)shift;
1447 jint hi_verify = checked_cast<jint>(r1->hi_as_long()) >> (jint)shift;
1448 assert((checked_cast<jint>(lo) == lo_verify) && (checked_cast<jint>(hi) == hi_verify), "inconsistent");
1449 }
1450 #endif
1451 const TypeInteger* ti = TypeInteger::make(lo, hi, MAX2(r1->_widen,r2->_widen), bt);
1452 #ifdef ASSERT
1453 // Make sure we get the sign-capture idiom correct.
1454 if (shift == bits_per_java_integer(bt) - 1) {
1455 if (r1->lo_as_long() >= 0) {
1456 assert(ti == TypeInteger::zero(bt), ">>31/63 of + is 0");
1457 }
1458 if (r1->hi_as_long() < 0) {
1459 assert(ti == TypeInteger::minus_1(bt), ">>31/63 of - is -1");
1460 }
1461 }
1462 #endif
1463 return ti;
1464 }
1465
1466 if (!r1->is_con() || !r2->is_con()) {
1467 // If the left input is non-negative the result must also be non-negative, regardless of what the right input is.
1468 if (r1->lo_as_long() >= 0) {
1469 return TypeInteger::make(0, r1->hi_as_long(), MAX2(r1->_widen, r2->_widen), bt);
1470 }
1471
1472 // Conversely, if the left input is negative then the result must be negative.
1473 if (r1->hi_as_long() <= -1) {
1474 return TypeInteger::make(r1->lo_as_long(), -1, MAX2(r1->_widen, r2->_widen), bt);
1475 }
1476
1477 return TypeInteger::bottom(bt);
1478 }
1479
1480 // Signed shift right
1481 return TypeInteger::make(r1->get_con_as_long(bt) >> (r2->get_con() & (bits_per_java_integer(bt) - 1)), bt);
1482 }
1483
1484 const Type* RShiftINode::Value(PhaseGVN* phase) const {
1485 return ValueIL(phase, T_INT);
1486 }
1487
1488 //=============================================================================
1489 //------------------------------Identity---------------------------------------
1490 Node* RShiftLNode::Identity(PhaseGVN* phase) {
1491 return IdentityIL(phase, T_LONG);
1492 }
1493
1494 Node* RShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1495 Node* progress = IdealIL(phase, can_reshape, T_LONG);
1496 if (progress == NodeSentinel) {
1497 return nullptr;
1498 }
1499 return progress;
1500 }
1501
1502 const Type* RShiftLNode::Value(PhaseGVN* phase) const {
1503 return ValueIL(phase, T_LONG);
1504 }
1505
1506 URShiftNode* URShiftNode::make(Node* in1, Node* in2, BasicType bt) {
1507 switch (bt) {
1508 case T_INT:
1509 return new URShiftINode(in1, in2);
1510 case T_LONG:
1511 return new URShiftLNode(in1, in2);
1512 default:
1513 fatal("Not implemented for %s", type2name(bt));
1514 }
1515 return nullptr;
1516 }
1517
1518 //=============================================================================
1519 //------------------------------Identity---------------------------------------
1520 Node* URShiftINode::Identity(PhaseGVN* phase) {
1521 int count = 0;
1522 if (const_shift_count(phase, this, &count) && (count & (BitsPerJavaInteger - 1)) == 0) {
1523 // Shift by a multiple of 32 does nothing
1524 return in(1);
1525 }
1526
1527 // Check for "((x << LogBytesPerWord) + (wordSize-1)) >> LogBytesPerWord" which is just "x".
1528 // Happens during new-array length computation.
1529 // Safe if 'x' is in the range [0..(max_int>>LogBytesPerWord)]
1530 Node *add = in(1);
1531 if (add->Opcode() == Op_AddI) {
1532 const TypeInt *t2 = phase->type(add->in(2))->isa_int();
1533 if (t2 && t2->is_con(wordSize - 1) &&
1534 add->in(1)->Opcode() == Op_LShiftI) {
1535 // Check that shift_counts are LogBytesPerWord.
1536 Node *lshift_count = add->in(1)->in(2);
1537 const TypeInt *t_lshift_count = phase->type(lshift_count)->isa_int();
1538 if (t_lshift_count && t_lshift_count->is_con(LogBytesPerWord) &&
1539 t_lshift_count == phase->type(in(2))) {
1540 Node *x = add->in(1)->in(1);
1541 const TypeInt *t_x = phase->type(x)->isa_int();
1542 if (t_x != nullptr && 0 <= t_x->_lo && t_x->_hi <= (max_jint>>LogBytesPerWord)) {
1543 return x;
1544 }
1545 }
1546 }
1547 }
1548
1549 return (phase->type(in(2))->higher_equal(TypeInt::ZERO)) ? in(1) : this;
1550 }
1551
1552 //------------------------------Ideal------------------------------------------
1553 Node* URShiftINode::Ideal(PhaseGVN* phase, bool can_reshape) {
1554 int con = mask_and_replace_shift_amount(phase, this, BitsPerJavaInteger);
1555 if (con == 0) {
1556 return nullptr;
1557 }
1558
1559 // We'll be wanting the right-shift amount as a mask of that many bits
1560 const int mask = right_n_bits(BitsPerJavaInteger - con);
1561
1562 int in1_op = in(1)->Opcode();
1563
1564 // Check for ((x>>>a)>>>b) and replace with (x>>>(a+b)) when a+b < 32
1565 if( in1_op == Op_URShiftI ) {
1566 const TypeInt *t12 = phase->type( in(1)->in(2) )->isa_int();
1567 if( t12 && t12->is_con() ) { // Right input is a constant
1568 assert( in(1) != in(1)->in(1), "dead loop in URShiftINode::Ideal" );
1569 const int con2 = t12->get_con() & 31; // Shift count is always masked
1570 const int con3 = con+con2;
1571 if( con3 < 32 ) // Only merge shifts if total is < 32
1572 return new URShiftINode( in(1)->in(1), phase->intcon(con3) );
1573 }
1574 }
1575
1576 // Check for ((x << z) + Y) >>> z. Replace with x + con>>>z
1577 // The idiom for rounding to a power of 2 is "(Q+(2^z-1)) >>> z".
1578 // If Q is "X << z" the rounding is useless. Look for patterns like
1579 // ((X<<Z) + Y) >>> Z and replace with (X + Y>>>Z) & Z-mask.
1580 Node *add = in(1);
1581 const TypeInt *t2 = phase->type(in(2))->isa_int();
1582 if (in1_op == Op_AddI) {
1583 Node *lshl = add->in(1);
1584 if( lshl->Opcode() == Op_LShiftI &&
1585 phase->type(lshl->in(2)) == t2 ) {
1586 Node *y_z = phase->transform( new URShiftINode(add->in(2),in(2)) );
1587 Node *sum = phase->transform( new AddINode( lshl->in(1), y_z ) );
1588 return new AndINode( sum, phase->intcon(mask) );
1589 }
1590 }
1591
1592 // Check for (x & mask) >>> z. Replace with (x >>> z) & (mask >>> z)
1593 // This shortens the mask. Also, if we are extracting a high byte and
1594 // storing it to a buffer, the mask will be removed completely.
1595 Node *andi = in(1);
1596 if( in1_op == Op_AndI ) {
1597 const TypeInt *t3 = phase->type( andi->in(2) )->isa_int();
1598 if( t3 && t3->is_con() ) { // Right input is a constant
1599 jint mask2 = t3->get_con();
1600 mask2 >>= con; // *signed* shift downward (high-order zeroes do not help)
1601 Node *newshr = phase->transform( new URShiftINode(andi->in(1), in(2)) );
1602 return new AndINode(newshr, phase->intcon(mask2));
1603 // The negative values are easier to materialize than positive ones.
1604 // A typical case from address arithmetic is ((x & ~15) >> 4).
1605 // It's better to change that to ((x >> 4) & ~0) versus
1606 // ((x >> 4) & 0x0FFFFFFF). The difference is greatest in LP64.
1607 }
1608 }
1609
1610 // Check for "(X << z ) >>> z" which simply zero-extends
1611 Node *shl = in(1);
1612 if( in1_op == Op_LShiftI &&
1613 phase->type(shl->in(2)) == t2 )
1614 return new AndINode( shl->in(1), phase->intcon(mask) );
1615
1616 // Check for (x >> n) >>> 31. Replace with (x >>> 31)
1617 Node *shr = in(1);
1618 if ( in1_op == Op_RShiftI ) {
1619 Node *in11 = shr->in(1);
1620 Node *in12 = shr->in(2);
1621 const TypeInt *t11 = phase->type(in11)->isa_int();
1622 const TypeInt *t12 = phase->type(in12)->isa_int();
1623 if ( t11 && t2 && t2->is_con(31) && t12 && t12->is_con() ) {
1624 return new URShiftINode(in11, phase->intcon(31));
1625 }
1626 }
1627
1628 return nullptr;
1629 }
1630
1631 //------------------------------Value------------------------------------------
1632 // A URShiftINode shifts its input2 right by input1 amount.
1633 const Type* URShiftINode::Value(PhaseGVN* phase) const {
1634 // (This is a near clone of RShiftINode::Value.)
1635 const Type *t1 = phase->type( in(1) );
1636 const Type *t2 = phase->type( in(2) );
1637 // Either input is TOP ==> the result is TOP
1638 if( t1 == Type::TOP ) return Type::TOP;
1639 if( t2 == Type::TOP ) return Type::TOP;
1640
1641 // Left input is ZERO ==> the result is ZERO.
1642 if( t1 == TypeInt::ZERO ) return TypeInt::ZERO;
1643 // Shift by zero does nothing
1644 if( t2 == TypeInt::ZERO ) return t1;
1645
1646 // Either input is BOTTOM ==> the result is BOTTOM
1647 if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
1648 return TypeInt::INT;
1649
1650 if (t2 == TypeInt::INT)
1651 return TypeInt::INT;
1652
1653 const TypeInt *r1 = t1->is_int(); // Handy access
1654 const TypeInt *r2 = t2->is_int(); // Handy access
1655
1656 if (r2->is_con()) {
1657 uint shift = r2->get_con();
1658 shift &= BitsPerJavaInteger-1; // semantics of Java shifts
1659 // Shift by a multiple of 32 does nothing:
1660 if (shift == 0) return t1;
1661 // Calculate reasonably aggressive bounds for the result.
1662 jint lo = (juint)r1->_lo >> (juint)shift;
1663 jint hi = (juint)r1->_hi >> (juint)shift;
1664 if (r1->_hi >= 0 && r1->_lo < 0) {
1665 // If the type has both negative and positive values,
1666 // there are two separate sub-domains to worry about:
1667 // The positive half and the negative half.
1668 jint neg_lo = lo;
1669 jint neg_hi = (juint)-1 >> (juint)shift;
1670 jint pos_lo = (juint) 0 >> (juint)shift;
1671 jint pos_hi = hi;
1672 lo = MIN2(neg_lo, pos_lo); // == 0
1673 hi = MAX2(neg_hi, pos_hi); // == -1 >>> shift;
1674 }
1675 assert(lo <= hi, "must have valid bounds");
1676 const TypeInt* ti = TypeInt::make(lo, hi, MAX2(r1->_widen,r2->_widen));
1677 #ifdef ASSERT
1678 // Make sure we get the sign-capture idiom correct.
1679 if (shift == BitsPerJavaInteger-1) {
1680 if (r1->_lo >= 0) assert(ti == TypeInt::ZERO, ">>>31 of + is 0");
1681 if (r1->_hi < 0) assert(ti == TypeInt::ONE, ">>>31 of - is +1");
1682 }
1683 #endif
1684 return ti;
1685 }
1686
1687 //
1688 // Do not support shifted oops in info for GC
1689 //
1690 // else if( t1->base() == Type::InstPtr ) {
1691 //
1692 // const TypeInstPtr *o = t1->is_instptr();
1693 // if( t1->singleton() )
1694 // return TypeInt::make( ((uint32_t)o->const_oop() + o->_offset) >> shift );
1695 // }
1696 // else if( t1->base() == Type::KlassPtr ) {
1697 // const TypeKlassPtr *o = t1->is_klassptr();
1698 // if( t1->singleton() )
1699 // return TypeInt::make( ((uint32_t)o->const_oop() + o->_offset) >> shift );
1700 // }
1701
1702 return TypeInt::INT;
1703 }
1704
1705 //=============================================================================
1706 //------------------------------Identity---------------------------------------
1707 Node* URShiftLNode::Identity(PhaseGVN* phase) {
1708 int count = 0;
1709 if (const_shift_count(phase, this, &count) && (count & (BitsPerJavaLong - 1)) == 0) {
1710 // Shift by a multiple of 64 does nothing
1711 return in(1);
1712 }
1713 return this;
1714 }
1715
1716 //------------------------------Ideal------------------------------------------
1717 Node* URShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1718 int con = mask_and_replace_shift_amount(phase, this, BitsPerJavaLong);
1719 if (con == 0) {
1720 return nullptr;
1721 }
1722
1723 // We'll be wanting the right-shift amount as a mask of that many bits
1724 const jlong mask = jlong(max_julong >> con);
1725
1726 // Check for ((x << z) + Y) >>> z. Replace with x + con>>>z
1727 // The idiom for rounding to a power of 2 is "(Q+(2^z-1)) >>> z".
1728 // If Q is "X << z" the rounding is useless. Look for patterns like
1729 // ((X<<Z) + Y) >>> Z and replace with (X + Y>>>Z) & Z-mask.
1730 Node *add = in(1);
1731 const TypeInt *t2 = phase->type(in(2))->isa_int();
1732 if (add->Opcode() == Op_AddL) {
1733 Node *lshl = add->in(1);
1734 if( lshl->Opcode() == Op_LShiftL &&
1735 phase->type(lshl->in(2)) == t2 ) {
1736 Node *y_z = phase->transform( new URShiftLNode(add->in(2),in(2)) );
1737 Node *sum = phase->transform( new AddLNode( lshl->in(1), y_z ) );
1738 return new AndLNode( sum, phase->longcon(mask) );
1739 }
1740 }
1741
1742 // Check for (x & mask) >>> z. Replace with (x >>> z) & (mask >>> z)
1743 // This shortens the mask. Also, if we are extracting a high byte and
1744 // storing it to a buffer, the mask will be removed completely.
1745 Node *andi = in(1);
1746 if( andi->Opcode() == Op_AndL ) {
1747 const TypeLong *t3 = phase->type( andi->in(2) )->isa_long();
1748 if( t3 && t3->is_con() ) { // Right input is a constant
1749 jlong mask2 = t3->get_con();
1750 mask2 >>= con; // *signed* shift downward (high-order zeroes do not help)
1751 Node *newshr = phase->transform( new URShiftLNode(andi->in(1), in(2)) );
1752 return new AndLNode(newshr, phase->longcon(mask2));
1753 }
1754 }
1755
1756 // Check for "(X << z ) >>> z" which simply zero-extends
1757 Node *shl = in(1);
1758 if( shl->Opcode() == Op_LShiftL &&
1759 phase->type(shl->in(2)) == t2 )
1760 return new AndLNode( shl->in(1), phase->longcon(mask) );
1761
1762 // Check for (x >> n) >>> 63. Replace with (x >>> 63)
1763 Node *shr = in(1);
1764 if ( shr->Opcode() == Op_RShiftL ) {
1765 Node *in11 = shr->in(1);
1766 Node *in12 = shr->in(2);
1767 const TypeLong *t11 = phase->type(in11)->isa_long();
1768 const TypeInt *t12 = phase->type(in12)->isa_int();
1769 if ( t11 && t2 && t2->is_con(63) && t12 && t12->is_con() ) {
1770 return new URShiftLNode(in11, phase->intcon(63));
1771 }
1772 }
1773 return nullptr;
1774 }
1775
1776 //------------------------------Value------------------------------------------
1777 // A URShiftINode shifts its input2 right by input1 amount.
1778 const Type* URShiftLNode::Value(PhaseGVN* phase) const {
1779 // (This is a near clone of RShiftLNode::Value.)
1780 const Type *t1 = phase->type( in(1) );
1781 const Type *t2 = phase->type( in(2) );
1782 // Either input is TOP ==> the result is TOP
1783 if( t1 == Type::TOP ) return Type::TOP;
1784 if( t2 == Type::TOP ) return Type::TOP;
1785
1786 // Left input is ZERO ==> the result is ZERO.
1787 if( t1 == TypeLong::ZERO ) return TypeLong::ZERO;
1788 // Shift by zero does nothing
1789 if( t2 == TypeInt::ZERO ) return t1;
1790
1791 // Either input is BOTTOM ==> the result is BOTTOM
1792 if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
1793 return TypeLong::LONG;
1794
1795 if (t2 == TypeInt::INT)
1796 return TypeLong::LONG;
1797
1798 const TypeLong *r1 = t1->is_long(); // Handy access
1799 const TypeInt *r2 = t2->is_int (); // Handy access
1800
1801 if (r2->is_con()) {
1802 uint shift = r2->get_con();
1803 shift &= BitsPerJavaLong - 1; // semantics of Java shifts
1804 // Shift by a multiple of 64 does nothing:
1805 if (shift == 0) return t1;
1806 // Calculate reasonably aggressive bounds for the result.
1807 jlong lo = (julong)r1->_lo >> (juint)shift;
1808 jlong hi = (julong)r1->_hi >> (juint)shift;
1809 if (r1->_hi >= 0 && r1->_lo < 0) {
1810 // If the type has both negative and positive values,
1811 // there are two separate sub-domains to worry about:
1812 // The positive half and the negative half.
1813 jlong neg_lo = lo;
1814 jlong neg_hi = (julong)-1 >> (juint)shift;
1815 jlong pos_lo = (julong) 0 >> (juint)shift;
1816 jlong pos_hi = hi;
1817 //lo = MIN2(neg_lo, pos_lo); // == 0
1818 lo = neg_lo < pos_lo ? neg_lo : pos_lo;
1819 //hi = MAX2(neg_hi, pos_hi); // == -1 >>> shift;
1820 hi = neg_hi > pos_hi ? neg_hi : pos_hi;
1821 }
1822 assert(lo <= hi, "must have valid bounds");
1823 const TypeLong* tl = TypeLong::make(lo, hi, MAX2(r1->_widen,r2->_widen));
1824 #ifdef ASSERT
1825 // Make sure we get the sign-capture idiom correct.
1826 if (shift == BitsPerJavaLong - 1) {
1827 if (r1->_lo >= 0) assert(tl == TypeLong::ZERO, ">>>63 of + is 0");
1828 if (r1->_hi < 0) assert(tl == TypeLong::ONE, ">>>63 of - is +1");
1829 }
1830 #endif
1831 return tl;
1832 }
1833
1834 return TypeLong::LONG; // Give up
1835 }
1836
1837 //=============================================================================
1838 //------------------------------Ideal------------------------------------------
1839 Node* FmaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1840 // We canonicalize the node by converting "(-a)*b+c" into "b*(-a)+c"
1841 // This reduces the number of rules in the matcher, as we only need to check
1842 // for negations on the second argument, and not the symmetric case where
1843 // the first argument is negated.
1844 if (in(1)->is_Neg() && !in(2)->is_Neg()) {
1845 swap_edges(1, 2);
1846 return this;
1847 }
1848 return nullptr;
1849 }
1850
1851 //=============================================================================
1852 //------------------------------Value------------------------------------------
1853 const Type* FmaDNode::Value(PhaseGVN* phase) const {
1854 const Type *t1 = phase->type(in(1));
1855 if (t1 == Type::TOP) return Type::TOP;
1856 if (t1->base() != Type::DoubleCon) return Type::DOUBLE;
1857 const Type *t2 = phase->type(in(2));
1858 if (t2 == Type::TOP) return Type::TOP;
1859 if (t2->base() != Type::DoubleCon) return Type::DOUBLE;
1860 const Type *t3 = phase->type(in(3));
1861 if (t3 == Type::TOP) return Type::TOP;
1862 if (t3->base() != Type::DoubleCon) return Type::DOUBLE;
1863 #ifndef __STDC_IEC_559__
1864 return Type::DOUBLE;
1865 #else
1866 double d1 = t1->getd();
1867 double d2 = t2->getd();
1868 double d3 = t3->getd();
1869 return TypeD::make(fma(d1, d2, d3));
1870 #endif
1871 }
1872
1873 //=============================================================================
1874 //------------------------------Value------------------------------------------
1875 const Type* FmaFNode::Value(PhaseGVN* phase) const {
1876 const Type *t1 = phase->type(in(1));
1877 if (t1 == Type::TOP) return Type::TOP;
1878 if (t1->base() != Type::FloatCon) return Type::FLOAT;
1879 const Type *t2 = phase->type(in(2));
1880 if (t2 == Type::TOP) return Type::TOP;
1881 if (t2->base() != Type::FloatCon) return Type::FLOAT;
1882 const Type *t3 = phase->type(in(3));
1883 if (t3 == Type::TOP) return Type::TOP;
1884 if (t3->base() != Type::FloatCon) return Type::FLOAT;
1885 #ifndef __STDC_IEC_559__
1886 return Type::FLOAT;
1887 #else
1888 float f1 = t1->getf();
1889 float f2 = t2->getf();
1890 float f3 = t3->getf();
1891 return TypeF::make(fma(f1, f2, f3));
1892 #endif
1893 }
1894
1895 //=============================================================================
1896 //------------------------------Value------------------------------------------
1897 const Type* FmaHFNode::Value(PhaseGVN* phase) const {
1898 const Type* t1 = phase->type(in(1));
1899 if (t1 == Type::TOP) { return Type::TOP; }
1900 if (t1->base() != Type::HalfFloatCon) { return Type::HALF_FLOAT; }
1901 const Type* t2 = phase->type(in(2));
1902 if (t2 == Type::TOP) { return Type::TOP; }
1903 if (t2->base() != Type::HalfFloatCon) { return Type::HALF_FLOAT; }
1904 const Type* t3 = phase->type(in(3));
1905 if (t3 == Type::TOP) { return Type::TOP; }
1906 if (t3->base() != Type::HalfFloatCon) { return Type::HALF_FLOAT; }
1907 #ifndef __STDC_IEC_559__
1908 return Type::HALF_FLOAT;
1909 #else
1910 float f1 = t1->getf();
1911 float f2 = t2->getf();
1912 float f3 = t3->getf();
1913 return TypeH::make(fma(f1, f2, f3));
1914 #endif
1915 }
1916
1917 //=============================================================================
1918 //------------------------------hash-------------------------------------------
1919 // Hash function for MulAddS2INode. Operation is commutative with commutative pairs.
1920 // The hash function must return the same value when edge swapping is performed.
1921 uint MulAddS2INode::hash() const {
1922 return (uintptr_t)in(1) + (uintptr_t)in(2) + (uintptr_t)in(3) + (uintptr_t)in(4) + Opcode();
1923 }
1924
1925 //------------------------------Rotate Operations ------------------------------
1926
1927 Node* RotateLeftNode::Identity(PhaseGVN* phase) {
1928 const Type* t1 = phase->type(in(1));
1929 if (t1 == Type::TOP) {
1930 return this;
1931 }
1932 int count = 0;
1933 assert(t1->isa_int() || t1->isa_long(), "Unexpected type");
1934 int mask = (t1->isa_int() ? BitsPerJavaInteger : BitsPerJavaLong) - 1;
1935 if (const_shift_count(phase, this, &count) && (count & mask) == 0) {
1936 // Rotate by a multiple of 32/64 does nothing
1937 return in(1);
1938 }
1939 return this;
1940 }
1941
1942 const Type* RotateLeftNode::Value(PhaseGVN* phase) const {
1943 const Type* t1 = phase->type(in(1));
1944 const Type* t2 = phase->type(in(2));
1945 // Either input is TOP ==> the result is TOP
1946 if (t1 == Type::TOP || t2 == Type::TOP) {
1947 return Type::TOP;
1948 }
1949
1950 if (t1->isa_int()) {
1951 const TypeInt* r1 = t1->is_int();
1952 const TypeInt* r2 = t2->is_int();
1953
1954 // Left input is ZERO ==> the result is ZERO.
1955 if (r1 == TypeInt::ZERO) {
1956 return TypeInt::ZERO;
1957 }
1958 // Rotate by zero does nothing
1959 if (r2 == TypeInt::ZERO) {
1960 return r1;
1961 }
1962 if (r1->is_con() && r2->is_con()) {
1963 juint r1_con = (juint)r1->get_con();
1964 juint shift = (juint)(r2->get_con()) & (juint)(BitsPerJavaInteger - 1); // semantics of Java shifts
1965 return TypeInt::make((r1_con << shift) | (r1_con >> (32 - shift)));
1966 }
1967 return TypeInt::INT;
1968 } else {
1969 assert(t1->isa_long(), "Type must be a long");
1970 const TypeLong* r1 = t1->is_long();
1971 const TypeInt* r2 = t2->is_int();
1972
1973 // Left input is ZERO ==> the result is ZERO.
1974 if (r1 == TypeLong::ZERO) {
1975 return TypeLong::ZERO;
1976 }
1977 // Rotate by zero does nothing
1978 if (r2 == TypeInt::ZERO) {
1979 return r1;
1980 }
1981 if (r1->is_con() && r2->is_con()) {
1982 julong r1_con = (julong)r1->get_con();
1983 julong shift = (julong)(r2->get_con()) & (julong)(BitsPerJavaLong - 1); // semantics of Java shifts
1984 return TypeLong::make((r1_con << shift) | (r1_con >> (64 - shift)));
1985 }
1986 return TypeLong::LONG;
1987 }
1988 }
1989
1990 Node* RotateLeftNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1991 const Type* t1 = phase->type(in(1));
1992 const Type* t2 = phase->type(in(2));
1993 if (t2->isa_int() && t2->is_int()->is_con()) {
1994 if (t1->isa_int()) {
1995 int lshift = t2->is_int()->get_con() & 31;
1996 return new RotateRightNode(in(1), phase->intcon(32 - (lshift & 31)), TypeInt::INT);
1997 } else if (t1 != Type::TOP) {
1998 assert(t1->isa_long(), "Type must be a long");
1999 int lshift = t2->is_int()->get_con() & 63;
2000 return new RotateRightNode(in(1), phase->intcon(64 - (lshift & 63)), TypeLong::LONG);
2001 }
2002 }
2003 return nullptr;
2004 }
2005
2006 Node* RotateRightNode::Identity(PhaseGVN* phase) {
2007 const Type* t1 = phase->type(in(1));
2008 if (t1 == Type::TOP) {
2009 return this;
2010 }
2011 int count = 0;
2012 assert(t1->isa_int() || t1->isa_long(), "Unexpected type");
2013 int mask = (t1->isa_int() ? BitsPerJavaInteger : BitsPerJavaLong) - 1;
2014 if (const_shift_count(phase, this, &count) && (count & mask) == 0) {
2015 // Rotate by a multiple of 32/64 does nothing
2016 return in(1);
2017 }
2018 return this;
2019 }
2020
2021 const Type* RotateRightNode::Value(PhaseGVN* phase) const {
2022 const Type* t1 = phase->type(in(1));
2023 const Type* t2 = phase->type(in(2));
2024 // Either input is TOP ==> the result is TOP
2025 if (t1 == Type::TOP || t2 == Type::TOP) {
2026 return Type::TOP;
2027 }
2028
2029 if (t1->isa_int()) {
2030 const TypeInt* r1 = t1->is_int();
2031 const TypeInt* r2 = t2->is_int();
2032
2033 // Left input is ZERO ==> the result is ZERO.
2034 if (r1 == TypeInt::ZERO) {
2035 return TypeInt::ZERO;
2036 }
2037 // Rotate by zero does nothing
2038 if (r2 == TypeInt::ZERO) {
2039 return r1;
2040 }
2041 if (r1->is_con() && r2->is_con()) {
2042 juint r1_con = (juint)r1->get_con();
2043 juint shift = (juint)(r2->get_con()) & (juint)(BitsPerJavaInteger - 1); // semantics of Java shifts
2044 return TypeInt::make((r1_con >> shift) | (r1_con << (32 - shift)));
2045 }
2046 return TypeInt::INT;
2047 } else {
2048 assert(t1->isa_long(), "Type must be a long");
2049 const TypeLong* r1 = t1->is_long();
2050 const TypeInt* r2 = t2->is_int();
2051 // Left input is ZERO ==> the result is ZERO.
2052 if (r1 == TypeLong::ZERO) {
2053 return TypeLong::ZERO;
2054 }
2055 // Rotate by zero does nothing
2056 if (r2 == TypeInt::ZERO) {
2057 return r1;
2058 }
2059 if (r1->is_con() && r2->is_con()) {
2060 julong r1_con = (julong)r1->get_con();
2061 julong shift = (julong)(r2->get_con()) & (julong)(BitsPerJavaLong - 1); // semantics of Java shifts
2062 return TypeLong::make((r1_con >> shift) | (r1_con << (64 - shift)));
2063 }
2064 return TypeLong::LONG;
2065 }
2066 }
2067
2068 //------------------------------ Sum & Mask ------------------------------
2069
2070 // Returns a lower bound on the number of trailing zeros in expr.
2071 static jint AndIL_min_trailing_zeros(const PhaseGVN* phase, const Node* expr, BasicType bt) {
2072 const TypeInteger* type = phase->type(expr)->isa_integer(bt);
2073 if (type == nullptr) {
2074 return 0;
2075 }
2076
2077 expr = expr->uncast();
2078 type = phase->type(expr)->isa_integer(bt);
2079 if (type == nullptr) {
2080 return 0;
2081 }
2082
2083 if (type->is_con()) {
2084 jlong con = type->get_con_as_long(bt);
2085 return con == 0L ? (type2aelembytes(bt) * BitsPerByte) : count_trailing_zeros(con);
2086 }
2087
2088 if (expr->Opcode() == Op_ConvI2L) {
2089 expr = expr->in(1)->uncast();
2090 bt = T_INT;
2091 type = phase->type(expr)->isa_int();
2092 }
2093
2094 // Pattern: expr = (x << shift)
2095 if (expr->Opcode() == Op_LShift(bt)) {
2096 const TypeInt* shift_t = phase->type(expr->in(2))->isa_int();
2097 if (shift_t == nullptr || !shift_t->is_con()) {
2098 return 0;
2099 }
2100 // We need to truncate the shift, as it may not have been canonicalized yet.
2101 // T_INT: 0..31 -> shift_mask = 4 * 8 - 1 = 31
2102 // T_LONG: 0..63 -> shift_mask = 8 * 8 - 1 = 63
2103 // (JLS: "Shift Operators")
2104 jint shift_mask = type2aelembytes(bt) * BitsPerByte - 1;
2105 return shift_t->get_con() & shift_mask;
2106 }
2107
2108 return 0;
2109 }
2110
2111 // Checks whether expr is neutral additive element (zero) under mask,
2112 // i.e. whether an expression of the form:
2113 // (AndX (AddX (expr addend) mask)
2114 // (expr + addend) & mask
2115 // is equivalent to
2116 // (AndX addend mask)
2117 // addend & mask
2118 // for any addend.
2119 // (The X in AndX must be I or L, depending on bt).
2120 //
2121 // We check for the sufficient condition when the lowest set bit in expr is higher than
2122 // the highest set bit in mask, i.e.:
2123 // expr: eeeeee0000000000000
2124 // mask: 000000mmmmmmmmmmmmm
2125 // <--w bits--->
2126 // We do not test for other cases.
2127 //
2128 // Correctness:
2129 // Given "expr" with at least "w" trailing zeros,
2130 // let "mod = 2^w", "suffix_mask = mod - 1"
2131 //
2132 // Since "mask" only has bits set where "suffix_mask" does, we have:
2133 // mask = suffix_mask & mask (SUFFIX_MASK)
2134 //
2135 // And since expr only has bits set above w, and suffix_mask only below:
2136 // expr & suffix_mask == 0 (NO_BIT_OVERLAP)
2137 //
2138 // From unsigned modular arithmetic (with unsigned modulo %), and since mod is
2139 // a power of 2, and we are computing in a ring of powers of 2, we know that
2140 // (x + y) % mod = (x % mod + y) % mod
2141 // (x + y) & suffix_mask = (x & suffix_mask + y) & suffix_mask (MOD_ARITH)
2142 //
2143 // We can now prove the equality:
2144 // (expr + addend) & mask
2145 // = (expr + addend) & suffix_mask & mask (SUFFIX_MASK)
2146 // = (expr & suffix_mask + addend) & suffix_mask & mask (MOD_ARITH)
2147 // = (0 + addend) & suffix_mask & mask (NO_BIT_OVERLAP)
2148 // = addend & mask (SUFFIX_MASK)
2149 //
2150 // Hence, an expr with at least w trailing zeros is a neutral additive element under any mask with bit width w.
2151 static bool AndIL_is_zero_element_under_mask(const PhaseGVN* phase, const Node* expr, const Node* mask, BasicType bt) {
2152 // When the mask is negative, it has the most significant bit set.
2153 const TypeInteger* mask_t = phase->type(mask)->isa_integer(bt);
2154 if (mask_t == nullptr || mask_t->lo_as_long() < 0) {
2155 return false;
2156 }
2157
2158 // When the mask is constant zero, we defer to MulNode::Value to eliminate the entire AndX operation.
2159 if (mask_t->hi_as_long() == 0) {
2160 assert(mask_t->lo_as_long() == 0, "checked earlier");
2161 return false;
2162 }
2163
2164 jint mask_bit_width = BitsPerLong - count_leading_zeros(mask_t->hi_as_long());
2165 jint expr_trailing_zeros = AndIL_min_trailing_zeros(phase, expr, bt);
2166 return expr_trailing_zeros >= mask_bit_width;
2167 }
2168
2169 // Reduces the pattern:
2170 // (AndX (AddX add1 add2) mask)
2171 // to
2172 // (AndX add1 mask), if add2 is neutral wrt mask (see above), and vice versa.
2173 Node* MulNode::AndIL_sum_and_mask(PhaseGVN* phase, BasicType bt) {
2174 Node* add = in(1);
2175 Node* mask = in(2);
2176 int addidx = 0;
2177 if (add->Opcode() == Op_Add(bt)) {
2178 addidx = 1;
2179 } else if (mask->Opcode() == Op_Add(bt)) {
2180 mask = add;
2181 addidx = 2;
2182 add = in(addidx);
2183 }
2184 if (addidx > 0) {
2185 Node* add1 = add->in(1);
2186 Node* add2 = add->in(2);
2187 if (AndIL_is_zero_element_under_mask(phase, add1, mask, bt)) {
2188 set_req_X(addidx, add2, phase);
2189 return this;
2190 } else if (AndIL_is_zero_element_under_mask(phase, add2, mask, bt)) {
2191 set_req_X(addidx, add1, phase);
2192 return this;
2193 }
2194 }
2195 return nullptr;
2196 }