1 dnl Copyright (c) 2014, Red Hat Inc. All rights reserved.
   2 dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3 dnl
   4 dnl This code is free software; you can redistribute it and/or modify it
   5 dnl under the terms of the GNU General Public License version 2 only, as
   6 dnl published by the Free Software Foundation.
   7 dnl
   8 dnl This code is distributed in the hope that it will be useful, but WITHOUT
   9 dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 dnl FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11 dnl version 2 for more details (a copy is included in the LICENSE file that
  12 dnl accompanied this code).
  13 dnl
  14 dnl You should have received a copy of the GNU General Public License version
  15 dnl 2 along with this work; if not, write to the Free Software Foundation,
  16 dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17 dnl
  18 dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19 dnl or visit www.oracle.com if you need additional information or have any
  20 dnl questions.
  21 dnl
  22 dnl 
  23 dnl Process this file with m4 aarch64_ad.m4 to generate the arithmetic
  24 dnl and shift patterns patterns used in aarch64.ad.
  25 dnl
  26 // BEGIN This section of the file is automatically generated. Do not edit --------------
  27 dnl
  28 define(`ORL2I', `ifelse($1,I,orL2I)')
  29 dnl
  30 define(`BASE_SHIFT_INSN',
  31 `
  32 instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
  33                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
  34                          immI src3, rFlagsReg cr) %{
  35   match(Set dst ($2$1 src1 ($4$1 src2 src3)));
  36 
  37   ins_cost(1.9 * INSN_COST);
  38   format %{ "$3  $dst, $src1, $src2, $5 $src3" %}
  39 
  40   ins_encode %{
  41     __ $3(as_Register($dst$$reg),
  42               as_Register($src1$$reg),
  43               as_Register($src2$$reg),
  44               Assembler::$5,
  45               $src3$$constant & ifelse($1,I,0x1f,0x3f));
  46   %}
  47 
  48   ins_pipe(ialu_reg_reg_shift);
  49 %}')dnl
  50 define(`BASE_INVERTED_INSN',
  51 `
  52 instruct $2$1_reg_not_reg(iReg$1NoSp dst,
  53                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1,
  54                          rFlagsReg cr) %{
  55 dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
  56 dnl into this canonical form.
  57   ifelse($2,Xor,
  58     match(Set dst (Xor$1 m1 (Xor$1 src2 src1)));,
  59     match(Set dst ($2$1 src1 (Xor$1 src2 m1)));)
  60   ins_cost(INSN_COST);
  61   format %{ "$3  $dst, $src1, $src2" %}
  62 
  63   ins_encode %{
  64     __ $3(as_Register($dst$$reg),
  65               as_Register($src1$$reg),
  66               as_Register($src2$$reg),
  67               Assembler::LSL, 0);
  68   %}
  69 
  70   ins_pipe(ialu_reg_reg);
  71 %}')dnl
  72 define(`INVERTED_SHIFT_INSN',
  73 `
  74 instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst,
  75                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
  76                          immI src3, imm$1_M1 src4, rFlagsReg cr) %{
  77 dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
  78 dnl into this canonical form.
  79   ifelse($2,Xor,
  80     match(Set dst ($2$1 src4 (Xor$1($4$1 src2 src3) src1)));,
  81     match(Set dst ($2$1 src1 (Xor$1($4$1 src2 src3) src4)));)
  82   ins_cost(1.9 * INSN_COST);
  83   format %{ "$3  $dst, $src1, $src2, $5 $src3" %}
  84 
  85   ins_encode %{
  86     __ $3(as_Register($dst$$reg),
  87               as_Register($src1$$reg),
  88               as_Register($src2$$reg),
  89               Assembler::$5,
  90               $src3$$constant & ifelse($1,I,0x1f,0x3f));
  91   %}
  92 
  93   ins_pipe(ialu_reg_reg_shift);
  94 %}')dnl
  95 define(`NOT_INSN',
  96 `instruct reg$1_not_reg(iReg$1NoSp dst,
  97                          iReg$1`'ORL2I($1) src1, imm$1_M1 m1,
  98                          rFlagsReg cr) %{
  99   match(Set dst (Xor$1 src1 m1));
 100   ins_cost(INSN_COST);
 101   format %{ "$2  $dst, $src1, zr" %}
 102 
 103   ins_encode %{
 104     __ $2(as_Register($dst$$reg),
 105               as_Register($src1$$reg),
 106               zr,
 107               Assembler::LSL, 0);
 108   %}
 109 
 110   ins_pipe(ialu_reg);
 111 %}')dnl
 112 dnl
 113 define(`BOTH_SHIFT_INSNS',
 114 `BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4)
 115 BASE_SHIFT_INSN(L, $1, $2, $3, $4)')dnl
 116 dnl
 117 define(`BOTH_INVERTED_INSNS',
 118 `BASE_INVERTED_INSN(I, $1, $2w, $3, $4)
 119 BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl
 120 dnl
 121 define(`BOTH_INVERTED_SHIFT_INSNS',
 122 `INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int)
 123 INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, long)')dnl
 124 dnl
 125 define(`ALL_SHIFT_KINDS',
 126 `BOTH_SHIFT_INSNS($1, $2, URShift, LSR)
 127 BOTH_SHIFT_INSNS($1, $2, RShift, ASR)
 128 BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
 129 dnl
 130 define(`ALL_INVERTED_SHIFT_KINDS',
 131 `BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR)
 132 BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR)
 133 BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
 134 dnl
 135 NOT_INSN(L, eon)
 136 NOT_INSN(I, eonw)
 137 BOTH_INVERTED_INSNS(And, bic)
 138 BOTH_INVERTED_INSNS(Or, orn)
 139 BOTH_INVERTED_INSNS(Xor, eon)
 140 ALL_INVERTED_SHIFT_KINDS(And, bic)
 141 ALL_INVERTED_SHIFT_KINDS(Xor, eon)
 142 ALL_INVERTED_SHIFT_KINDS(Or, orn)
 143 ALL_SHIFT_KINDS(And, andr)
 144 ALL_SHIFT_KINDS(Xor, eor)
 145 ALL_SHIFT_KINDS(Or, orr)
 146 ALL_SHIFT_KINDS(Add, add)
 147 ALL_SHIFT_KINDS(Sub, sub)
 148 dnl
 149 dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count
 150 define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)')
 151 define(`BFM_INSN',`
 152 // Shift Left followed by Shift Right.
 153 // This idiom is used by the compiler for the i2b bytecode etc.
 154 instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count)
 155 %{
 156   match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count));
 157   // Make sure we are not going to exceed what $4 can do.
 158   predicate((unsigned int)n->in(2)->get_int() <= $2
 159             && (unsigned int)n->in(1)->in(2)->get_int() <= $2);
 160 
 161   ins_cost(INSN_COST * 2);
 162   format %{ "$4  $dst, $src, $rshift_count - $lshift_count, #$2 - $lshift_count" %}
 163   ins_encode %{
 164     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
 165     int s = $2 - lshift;
 166     int r = (rshift - lshift) & $2;
 167     __ $4(as_Register($dst$$reg),
 168             as_Register($src$$reg),
 169             r, s);
 170   %}
 171 
 172   ins_pipe(ialu_reg_shift);
 173 %}')
 174 BFM_INSN(L, 63, RShift, sbfm)
 175 BFM_INSN(I, 31, RShift, sbfmw)
 176 BFM_INSN(L, 63, URShift, ubfm)
 177 BFM_INSN(I, 31, URShift, ubfmw)
 178 dnl
 179 // Bitfield extract with shift & mask
 180 define(`BFX_INSN',
 181 `instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
 182 %{
 183   match(Set dst (And$1 ($2$1 src rshift) mask));
 184 
 185   ins_cost(INSN_COST);
 186   format %{ "$3 $dst, $src, $rshift, $mask" %}
 187   ins_encode %{
 188     int rshift = $rshift$$constant;
 189     long mask = $mask$$constant;
 190     int width = exact_log2(mask+1);
 191     __ $3(as_Register($dst$$reg),
 192             as_Register($src$$reg), rshift, width);
 193   %}
 194   ins_pipe(ialu_reg_shift);
 195 %}')
 196 BFX_INSN(I,URShift,ubfxw)
 197 BFX_INSN(L,URShift,ubfx)
 198 
 199 // We can use ubfx when extending an And with a mask when we know mask
 200 // is positive.  We know that because immI_bitmask guarantees it.
 201 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
 202 %{
 203   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
 204 
 205   ins_cost(INSN_COST * 2);
 206   format %{ "ubfx $dst, $src, $rshift, $mask" %}
 207   ins_encode %{
 208     int rshift = $rshift$$constant;
 209     long mask = $mask$$constant;
 210     int width = exact_log2(mask+1);
 211     __ ubfx(as_Register($dst$$reg),
 212             as_Register($src$$reg), rshift, width);
 213   %}
 214   ins_pipe(ialu_reg_shift);
 215 %}
 216 
 217 define(`UBFIZ_INSN',
 218 // We can use ubfiz when masking by a positive number and then left shifting the result.
 219 // We know that the mask is positive because imm$1_bitmask guarantees it.
 220 `instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
 221 %{
 222   match(Set dst (LShift$1 (And$1 src mask) lshift));
 223   predicate((unsigned int)n->in(2)->get_int() <= $3 &&
 224     (exact_log2$5(n->in(1)->in(2)->get_$4()+1) + (unsigned int)n->in(2)->get_int()) <= ($3+1));
 225 
 226   ins_cost(INSN_COST);
 227   format %{ "$2 $dst, $src, $lshift, $mask" %}
 228   ins_encode %{
 229     int lshift = $lshift$$constant;
 230     long mask = $mask$$constant;
 231     int width = exact_log2(mask+1);
 232     __ $2(as_Register($dst$$reg),
 233           as_Register($src$$reg), lshift, width);
 234   %}
 235   ins_pipe(ialu_reg_shift);
 236 %}')
 237 UBFIZ_INSN(I, ubfizw, 31, int)
 238 UBFIZ_INSN(L, ubfiz, 63, long, _long)
 239 
 240 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
 241 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
 242 %{
 243   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
 244   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
 245     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
 246 
 247   ins_cost(INSN_COST);
 248   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
 249   ins_encode %{
 250     int lshift = $lshift$$constant;
 251     long mask = $mask$$constant;
 252     int width = exact_log2(mask+1);
 253     __ ubfiz(as_Register($dst$$reg),
 254              as_Register($src$$reg), lshift, width);
 255   %}
 256   ins_pipe(ialu_reg_shift);
 257 %}
 258 
 259 // Rotations
 260 
 261 define(`EXTRACT_INSN',
 262 `instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
 263 %{
 264   match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift)));
 265   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & $2));
 266 
 267   ins_cost(INSN_COST);
 268   format %{ "extr $dst, $src1, $src2, #$rshift" %}
 269 
 270   ins_encode %{
 271     __ $4(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
 272             $rshift$$constant & $2);
 273   %}
 274   ins_pipe(ialu_reg_reg_extr);
 275 %}
 276 ')dnl
 277 EXTRACT_INSN(L, 63, Or, extr)
 278 EXTRACT_INSN(I, 31, Or, extrw)
 279 EXTRACT_INSN(L, 63, Add, extr)
 280 EXTRACT_INSN(I, 31, Add, extrw)
 281 define(`ROL_EXPAND', `
 282 // $2 expander
 283 
 284 instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
 285 %{
 286   effect(DEF dst, USE src, USE shift);
 287 
 288   format %{ "$2    $dst, $src, $shift" %}
 289   ins_cost(INSN_COST * 3);
 290   ins_encode %{
 291     __ subw(rscratch1, zr, as_Register($shift$$reg));
 292     __ $3(as_Register($dst$$reg), as_Register($src$$reg),
 293             rscratch1);
 294     %}
 295   ins_pipe(ialu_reg_reg_vshift);
 296 %}')dnl
 297 define(`ROR_EXPAND', `
 298 // $2 expander
 299 
 300 instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
 301 %{
 302   effect(DEF dst, USE src, USE shift);
 303 
 304   format %{ "$2    $dst, $src, $shift" %}
 305   ins_cost(INSN_COST);
 306   ins_encode %{
 307     __ $3(as_Register($dst$$reg), as_Register($src$$reg),
 308             as_Register($shift$$reg));
 309     %}
 310   ins_pipe(ialu_reg_reg_vshift);
 311 %}')dnl
 312 define(ROL_INSN, `
 313 instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
 314 %{
 315   match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift))));
 316 
 317   expand %{
 318     $3$1_rReg(dst, src, shift, cr);
 319   %}
 320 %}')dnl
 321 define(ROR_INSN, `
 322 instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
 323 %{
 324   match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift))));
 325 
 326   expand %{
 327     $3$1_rReg(dst, src, shift, cr);
 328   %}
 329 %}')dnl
 330 ROL_EXPAND(L, rol, rorv)
 331 ROL_EXPAND(I, rol, rorvw)
 332 ROL_INSN(L, _64, rol)
 333 ROL_INSN(L, 0, rol)
 334 ROL_INSN(I, _32, rol)
 335 ROL_INSN(I, 0, rol)
 336 ROR_EXPAND(L, ror, rorv)
 337 ROR_EXPAND(I, ror, rorvw)
 338 ROR_INSN(L, _64, ror)
 339 ROR_INSN(L, 0, ror)
 340 ROR_INSN(I, _32, ror)
 341 ROR_INSN(I, 0, ror)
 342 
 343 // Add/subtract (extended)
 344 dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize
 345 define(`ADD_SUB_CONV', `
 346 instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2, rFlagsReg cr)
 347 %{
 348   match(Set dst ($3$2 src1 (ConvI2L src2)));
 349   ins_cost(INSN_COST);
 350   format %{ "$4  $dst, $src1, $src2, $5" %}
 351 
 352    ins_encode %{
 353      __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
 354             as_Register($src2$$reg), ext::$5);
 355    %}
 356   ins_pipe(ialu_reg_reg);
 357 %}')dnl
 358 ADD_SUB_CONV(I,L,Add,add,sxtw);
 359 ADD_SUB_CONV(I,L,Sub,sub,sxtw);
 360 dnl
 361 define(`ADD_SUB_EXTENDED', `
 362 instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr)
 363 %{
 364   match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
 365   ins_cost(INSN_COST);
 366   format %{ "$5  $dst, $src1, $src2, $6" %}
 367 
 368    ins_encode %{
 369      __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
 370             as_Register($src2$$reg), ext::$6);
 371    %}
 372   ins_pipe(ialu_reg_reg);
 373 %}')
 374 ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32)
 375 ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32)
 376 ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32)
 377 ADD_SUB_EXTENDED(L,16,Add,RShift,add,sxth,64)
 378 ADD_SUB_EXTENDED(L,32,Add,RShift,add,sxtw,64)
 379 ADD_SUB_EXTENDED(L,8,Add,RShift,add,sxtb,64)
 380 ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64)
 381 dnl
 382 dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type)
 383 define(`ADD_SUB_ZERO_EXTEND', `
 384 instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, rFlagsReg cr)
 385 %{
 386   match(Set dst ($3$1 src1 (And$1 src2 mask)));
 387   ins_cost(INSN_COST);
 388   format %{ "$4  $dst, $src1, $src2, $5" %}
 389 
 390    ins_encode %{
 391      __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
 392             as_Register($src2$$reg), ext::$5);
 393    %}
 394   ins_pipe(ialu_reg_reg);
 395 %}')
 396 dnl
 397 ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb)
 398 ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth)
 399 ADD_SUB_ZERO_EXTEND(L,255,Add,add,uxtb)
 400 ADD_SUB_ZERO_EXTEND(L,65535,Add,add,uxth)
 401 ADD_SUB_ZERO_EXTEND(L,4294967295,Add,add,uxtw)
 402 dnl
 403 ADD_SUB_ZERO_EXTEND(I,255,Sub,subw,uxtb)
 404 ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth)
 405 ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb)
 406 ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth)
 407 ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
 408 dnl
 409 dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type)
 410 define(`ADD_SUB_EXTENDED_SHIFT', `
 411 instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr)
 412 %{
 413   match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2)));
 414   ins_cost(1.9 * INSN_COST);
 415   format %{ "$5  $dst, $src1, $src2, $6 #lshift2" %}
 416 
 417    ins_encode %{
 418      __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
 419             as_Register($src2$$reg), ext::$6, ($lshift2$$constant));
 420    %}
 421   ins_pipe(ialu_reg_reg_shift);
 422 %}')
 423 dnl                   $1 $2 $3   $4   $5   $6  $7
 424 ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64)
 425 ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64)
 426 ADD_SUB_EXTENDED_SHIFT(L,32,Add,RShift,add,sxtw,64)
 427 dnl
 428 ADD_SUB_EXTENDED_SHIFT(L,8,Sub,RShift,sub,sxtb,64)
 429 ADD_SUB_EXTENDED_SHIFT(L,16,Sub,RShift,sub,sxth,64)
 430 ADD_SUB_EXTENDED_SHIFT(L,32,Sub,RShift,sub,sxtw,64)
 431 dnl
 432 ADD_SUB_EXTENDED_SHIFT(I,8,Add,RShift,addw,sxtb,32)
 433 ADD_SUB_EXTENDED_SHIFT(I,16,Add,RShift,addw,sxth,32)
 434 dnl
 435 ADD_SUB_EXTENDED_SHIFT(I,8,Sub,RShift,subw,sxtb,32)
 436 ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32)
 437 dnl
 438 dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type)
 439 define(`ADD_SUB_CONV_SHIFT', `
 440 instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
 441 %{
 442   match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift)));
 443   ins_cost(1.9 * INSN_COST);
 444   format %{ "$3  $dst, $src1, $src2, $4 #lshift" %}
 445 
 446    ins_encode %{
 447      __ $3(as_Register($dst$$reg), as_Register($src1$$reg),
 448             as_Register($src2$$reg), ext::$4, ($lshift$$constant));
 449    %}
 450   ins_pipe(ialu_reg_reg_shift);
 451 %}')
 452 dnl
 453 ADD_SUB_CONV_SHIFT(L,Add,add,sxtw);
 454 ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw);
 455 dnl
 456 dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type)
 457 define(`ADD_SUB_ZERO_EXTEND_SHIFT', `
 458 instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr)
 459 %{
 460   match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift)));
 461   ins_cost(1.9 * INSN_COST);
 462   format %{ "$4  $dst, $src1, $src2, $5 #lshift" %}
 463 
 464    ins_encode %{
 465      __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
 466             as_Register($src2$$reg), ext::$5, ($lshift$$constant));
 467    %}
 468   ins_pipe(ialu_reg_reg_shift);
 469 %}')
 470 dnl
 471 dnl                       $1 $2  $3  $4  $5
 472 ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb)
 473 ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth)
 474 ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Add,add,uxtw)
 475 dnl
 476 ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Sub,sub,uxtb)
 477 ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Sub,sub,uxth)
 478 ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Sub,sub,uxtw)
 479 dnl
 480 ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Add,addw,uxtb)
 481 ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Add,addw,uxth)
 482 dnl
 483 ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb)
 484 ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth)
 485 dnl
 486 // END This section of the file is automatically generated. Do not edit --------------