1 //
    2 // Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // architecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182 
  183   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  184   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  185   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  186   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  187 
  188   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  189   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  190   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  191   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  192 
  193   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  194   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  195   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  196   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  197 
  198   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  199   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  200   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  201   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  202 
  203   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  204   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  205   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  206   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  207 
  208   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  209   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  210   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  211   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  212 
  213   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  214   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  215   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  216   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  217 
  218   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  219   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  220   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  221   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  222 
  223   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  224   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  225   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  226   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  227 
  228   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  229   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  230   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  231   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  232 
  233   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  234   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  235   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  236   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  237 
  238   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  239   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  240   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  241   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  242 
  243   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  244   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  245   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  246   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  247 
  248   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  249   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  250   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  251   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  252 
  253   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  254   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  255   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  256   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  257 
  258   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  259   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  260   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  261   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  262 
  263   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  264   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  265   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  266   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  267 
  268   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  269   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  270   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  271   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  272 
  273   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  274   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  275   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  276   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  277 
  278   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  279   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  280   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  281   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  282 
  283   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  284   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  285   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  286   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  287 
  288   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  289   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  290   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  291   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  292 
  293   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  294   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  295   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  296   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  297 
  298   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  299   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  300   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  301   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  302 
  303   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  304   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  305   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  306   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  307 
  308   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  309   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  310   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  311   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  312 
  313   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  314   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  315   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  316   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  317 
  318   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  319   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  320   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  321   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  322 
  323   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  324   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  325   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  326   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  327 
  328   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  329   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  330   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  331   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  332 
  333   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  334   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  335   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  336   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  337 
  338 // ----------------------------
  339 // SVE Predicate Registers
  340 // ----------------------------
  341   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  342   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  343   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  344   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  345   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  346   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  347   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  348   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  349   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  350   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  351   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  352   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  353   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  354   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  355   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  356   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  357 
  358 // ----------------------------
  359 // Special Registers
  360 // ----------------------------
  361 
  362 // the AArch64 CSPR status flag register is not directly accessible as
  363 // instruction operand. the FPSR status flag register is a system
  364 // register which can be written/read using MSR/MRS but again does not
  365 // appear as an operand (a code identifying the FSPR occurs as an
  366 // immediate value in the instruction).
  367 
  368 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  369 
  370 // Specify priority of register selection within phases of register
  371 // allocation.  Highest priority is first.  A useful heuristic is to
  372 // give registers a low priority when they are required by machine
  373 // instructions, like EAX and EDX on I486, and choose no-save registers
  374 // before save-on-call, & save-on-call before save-on-entry.  Registers
  375 // which participate in fixed calling sequences should come last.
  376 // Registers which are used as pairs must fall on an even boundary.
  377 
  378 alloc_class chunk0(
  379     // volatiles
  380     R10, R10_H,
  381     R11, R11_H,
  382     R12, R12_H,
  383     R13, R13_H,
  384     R14, R14_H,
  385     R15, R15_H,
  386     R16, R16_H,
  387     R17, R17_H,
  388     R18, R18_H,
  389 
  390     // arg registers
  391     R0, R0_H,
  392     R1, R1_H,
  393     R2, R2_H,
  394     R3, R3_H,
  395     R4, R4_H,
  396     R5, R5_H,
  397     R6, R6_H,
  398     R7, R7_H,
  399 
  400     // non-volatiles
  401     R19, R19_H,
  402     R20, R20_H,
  403     R21, R21_H,
  404     R22, R22_H,
  405     R23, R23_H,
  406     R24, R24_H,
  407     R25, R25_H,
  408     R26, R26_H,
  409 
  410     // non-allocatable registers
  411 
  412     R27, R27_H, // heapbase
  413     R28, R28_H, // thread
  414     R29, R29_H, // fp
  415     R30, R30_H, // lr
  416     R31, R31_H, // sp
  417     R8, R8_H,   // rscratch1
  418     R9, R9_H,   // rscratch2
  419 );
  420 
  421 alloc_class chunk1(
  422 
  423     // no save
  424     V16, V16_H, V16_J, V16_K,
  425     V17, V17_H, V17_J, V17_K,
  426     V18, V18_H, V18_J, V18_K,
  427     V19, V19_H, V19_J, V19_K,
  428     V20, V20_H, V20_J, V20_K,
  429     V21, V21_H, V21_J, V21_K,
  430     V22, V22_H, V22_J, V22_K,
  431     V23, V23_H, V23_J, V23_K,
  432     V24, V24_H, V24_J, V24_K,
  433     V25, V25_H, V25_J, V25_K,
  434     V26, V26_H, V26_J, V26_K,
  435     V27, V27_H, V27_J, V27_K,
  436     V28, V28_H, V28_J, V28_K,
  437     V29, V29_H, V29_J, V29_K,
  438     V30, V30_H, V30_J, V30_K,
  439     V31, V31_H, V31_J, V31_K,
  440 
  441     // arg registers
  442     V0, V0_H, V0_J, V0_K,
  443     V1, V1_H, V1_J, V1_K,
  444     V2, V2_H, V2_J, V2_K,
  445     V3, V3_H, V3_J, V3_K,
  446     V4, V4_H, V4_J, V4_K,
  447     V5, V5_H, V5_J, V5_K,
  448     V6, V6_H, V6_J, V6_K,
  449     V7, V7_H, V7_J, V7_K,
  450 
  451     // non-volatiles
  452     V8, V8_H, V8_J, V8_K,
  453     V9, V9_H, V9_J, V9_K,
  454     V10, V10_H, V10_J, V10_K,
  455     V11, V11_H, V11_J, V11_K,
  456     V12, V12_H, V12_J, V12_K,
  457     V13, V13_H, V13_J, V13_K,
  458     V14, V14_H, V14_J, V14_K,
  459     V15, V15_H, V15_J, V15_K,
  460 );
  461 
  462 alloc_class chunk2 (
  463     // Governing predicates for load/store and arithmetic
  464     P0,
  465     P1,
  466     P2,
  467     P3,
  468     P4,
  469     P5,
  470     P6,
  471 
  472     // Extra predicates
  473     P8,
  474     P9,
  475     P10,
  476     P11,
  477     P12,
  478     P13,
  479     P14,
  480     P15,
  481 
  482     // Preserved for all-true predicate
  483     P7,
  484 );
  485 
  486 alloc_class chunk3(RFLAGS);
  487 
  488 //----------Architecture Description Register Classes--------------------------
  489 // Several register classes are automatically defined based upon information in
  490 // this architecture description.
  491 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  492 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  493 //
  494 
  495 // Class for all 32 bit general purpose registers
  496 reg_class all_reg32(
  497     R0,
  498     R1,
  499     R2,
  500     R3,
  501     R4,
  502     R5,
  503     R6,
  504     R7,
  505     R10,
  506     R11,
  507     R12,
  508     R13,
  509     R14,
  510     R15,
  511     R16,
  512     R17,
  513     R18,
  514     R19,
  515     R20,
  516     R21,
  517     R22,
  518     R23,
  519     R24,
  520     R25,
  521     R26,
  522     R27,
  523     R28,
  524     R29,
  525     R30,
  526     R31
  527 );
  528 
  529 
  530 // Class for all 32 bit integer registers (excluding SP which
  531 // will never be used as an integer register)
  532 reg_class any_reg32 %{
  533   return _ANY_REG32_mask;
  534 %}
  535 
  536 // Singleton class for R0 int register
  537 reg_class int_r0_reg(R0);
  538 
  539 // Singleton class for R2 int register
  540 reg_class int_r2_reg(R2);
  541 
  542 // Singleton class for R3 int register
  543 reg_class int_r3_reg(R3);
  544 
  545 // Singleton class for R4 int register
  546 reg_class int_r4_reg(R4);
  547 
  548 // Singleton class for R31 int register
  549 reg_class int_r31_reg(R31);
  550 
  551 // Class for all 64 bit general purpose registers
  552 reg_class all_reg(
  553     R0, R0_H,
  554     R1, R1_H,
  555     R2, R2_H,
  556     R3, R3_H,
  557     R4, R4_H,
  558     R5, R5_H,
  559     R6, R6_H,
  560     R7, R7_H,
  561     R10, R10_H,
  562     R11, R11_H,
  563     R12, R12_H,
  564     R13, R13_H,
  565     R14, R14_H,
  566     R15, R15_H,
  567     R16, R16_H,
  568     R17, R17_H,
  569     R18, R18_H,
  570     R19, R19_H,
  571     R20, R20_H,
  572     R21, R21_H,
  573     R22, R22_H,
  574     R23, R23_H,
  575     R24, R24_H,
  576     R25, R25_H,
  577     R26, R26_H,
  578     R27, R27_H,
  579     R28, R28_H,
  580     R29, R29_H,
  581     R30, R30_H,
  582     R31, R31_H
  583 );
  584 
  585 // Class for all long integer registers (including SP)
  586 reg_class any_reg %{
  587   return _ANY_REG_mask;
  588 %}
  589 
  590 // Class for non-allocatable 32 bit registers
  591 reg_class non_allocatable_reg32(
  592 #ifdef R18_RESERVED
  593     // See comment in register_aarch64.hpp
  594     R18,                        // tls on Windows
  595 #endif
  596     R28,                        // thread
  597     R30,                        // lr
  598     R31                         // sp
  599 );
  600 
  601 // Class for non-allocatable 64 bit registers
  602 reg_class non_allocatable_reg(
  603 #ifdef R18_RESERVED
  604     // See comment in register_aarch64.hpp
  605     R18, R18_H,                 // tls on Windows, platform register on macOS
  606 #endif
  607     R28, R28_H,                 // thread
  608     R30, R30_H,                 // lr
  609     R31, R31_H                  // sp
  610 );
  611 
  612 // Class for all non-special integer registers
  613 reg_class no_special_reg32 %{
  614   return _NO_SPECIAL_REG32_mask;
  615 %}
  616 
  617 // Class for all non-special long integer registers
  618 reg_class no_special_reg %{
  619   return _NO_SPECIAL_REG_mask;
  620 %}
  621 
  622 // Class for 64 bit register r0
  623 reg_class r0_reg(
  624     R0, R0_H
  625 );
  626 
  627 // Class for 64 bit register r1
  628 reg_class r1_reg(
  629     R1, R1_H
  630 );
  631 
  632 // Class for 64 bit register r2
  633 reg_class r2_reg(
  634     R2, R2_H
  635 );
  636 
  637 // Class for 64 bit register r3
  638 reg_class r3_reg(
  639     R3, R3_H
  640 );
  641 
  642 // Class for 64 bit register r4
  643 reg_class r4_reg(
  644     R4, R4_H
  645 );
  646 
  647 // Class for 64 bit register r5
  648 reg_class r5_reg(
  649     R5, R5_H
  650 );
  651 
  652 // Class for 64 bit register r10
  653 reg_class r10_reg(
  654     R10, R10_H
  655 );
  656 
  657 // Class for 64 bit register r11
  658 reg_class r11_reg(
  659     R11, R11_H
  660 );
  661 
  662 // Class for method register
  663 reg_class method_reg(
  664     R12, R12_H
  665 );
  666 
  667 // Class for thread register
  668 reg_class thread_reg(
  669     R28, R28_H
  670 );
  671 
  672 // Class for frame pointer register
  673 reg_class fp_reg(
  674     R29, R29_H
  675 );
  676 
  677 // Class for link register
  678 reg_class lr_reg(
  679     R30, R30_H
  680 );
  681 
  682 // Class for long sp register
  683 reg_class sp_reg(
  684   R31, R31_H
  685 );
  686 
  687 // Class for all pointer registers
  688 reg_class ptr_reg %{
  689   return _PTR_REG_mask;
  690 %}
  691 
  692 // Class for all non_special pointer registers
  693 reg_class no_special_ptr_reg %{
  694   return _NO_SPECIAL_PTR_REG_mask;
  695 %}
  696 
  697 // Class for all non_special pointer registers (excluding rfp)
  698 reg_class no_special_no_rfp_ptr_reg %{
  699   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  700 %}
  701 
  702 // Class for all float registers
  703 reg_class float_reg(
  704     V0,
  705     V1,
  706     V2,
  707     V3,
  708     V4,
  709     V5,
  710     V6,
  711     V7,
  712     V8,
  713     V9,
  714     V10,
  715     V11,
  716     V12,
  717     V13,
  718     V14,
  719     V15,
  720     V16,
  721     V17,
  722     V18,
  723     V19,
  724     V20,
  725     V21,
  726     V22,
  727     V23,
  728     V24,
  729     V25,
  730     V26,
  731     V27,
  732     V28,
  733     V29,
  734     V30,
  735     V31
  736 );
  737 
  738 // Double precision float registers have virtual `high halves' that
  739 // are needed by the allocator.
  740 // Class for all double registers
  741 reg_class double_reg(
  742     V0, V0_H,
  743     V1, V1_H,
  744     V2, V2_H,
  745     V3, V3_H,
  746     V4, V4_H,
  747     V5, V5_H,
  748     V6, V6_H,
  749     V7, V7_H,
  750     V8, V8_H,
  751     V9, V9_H,
  752     V10, V10_H,
  753     V11, V11_H,
  754     V12, V12_H,
  755     V13, V13_H,
  756     V14, V14_H,
  757     V15, V15_H,
  758     V16, V16_H,
  759     V17, V17_H,
  760     V18, V18_H,
  761     V19, V19_H,
  762     V20, V20_H,
  763     V21, V21_H,
  764     V22, V22_H,
  765     V23, V23_H,
  766     V24, V24_H,
  767     V25, V25_H,
  768     V26, V26_H,
  769     V27, V27_H,
  770     V28, V28_H,
  771     V29, V29_H,
  772     V30, V30_H,
  773     V31, V31_H
  774 );
  775 
  776 // Class for all SVE vector registers.
  777 reg_class vectora_reg (
  778     V0, V0_H, V0_J, V0_K,
  779     V1, V1_H, V1_J, V1_K,
  780     V2, V2_H, V2_J, V2_K,
  781     V3, V3_H, V3_J, V3_K,
  782     V4, V4_H, V4_J, V4_K,
  783     V5, V5_H, V5_J, V5_K,
  784     V6, V6_H, V6_J, V6_K,
  785     V7, V7_H, V7_J, V7_K,
  786     V8, V8_H, V8_J, V8_K,
  787     V9, V9_H, V9_J, V9_K,
  788     V10, V10_H, V10_J, V10_K,
  789     V11, V11_H, V11_J, V11_K,
  790     V12, V12_H, V12_J, V12_K,
  791     V13, V13_H, V13_J, V13_K,
  792     V14, V14_H, V14_J, V14_K,
  793     V15, V15_H, V15_J, V15_K,
  794     V16, V16_H, V16_J, V16_K,
  795     V17, V17_H, V17_J, V17_K,
  796     V18, V18_H, V18_J, V18_K,
  797     V19, V19_H, V19_J, V19_K,
  798     V20, V20_H, V20_J, V20_K,
  799     V21, V21_H, V21_J, V21_K,
  800     V22, V22_H, V22_J, V22_K,
  801     V23, V23_H, V23_J, V23_K,
  802     V24, V24_H, V24_J, V24_K,
  803     V25, V25_H, V25_J, V25_K,
  804     V26, V26_H, V26_J, V26_K,
  805     V27, V27_H, V27_J, V27_K,
  806     V28, V28_H, V28_J, V28_K,
  807     V29, V29_H, V29_J, V29_K,
  808     V30, V30_H, V30_J, V30_K,
  809     V31, V31_H, V31_J, V31_K,
  810 );
  811 
  812 // Class for all 64bit vector registers
  813 reg_class vectord_reg(
  814     V0, V0_H,
  815     V1, V1_H,
  816     V2, V2_H,
  817     V3, V3_H,
  818     V4, V4_H,
  819     V5, V5_H,
  820     V6, V6_H,
  821     V7, V7_H,
  822     V8, V8_H,
  823     V9, V9_H,
  824     V10, V10_H,
  825     V11, V11_H,
  826     V12, V12_H,
  827     V13, V13_H,
  828     V14, V14_H,
  829     V15, V15_H,
  830     V16, V16_H,
  831     V17, V17_H,
  832     V18, V18_H,
  833     V19, V19_H,
  834     V20, V20_H,
  835     V21, V21_H,
  836     V22, V22_H,
  837     V23, V23_H,
  838     V24, V24_H,
  839     V25, V25_H,
  840     V26, V26_H,
  841     V27, V27_H,
  842     V28, V28_H,
  843     V29, V29_H,
  844     V30, V30_H,
  845     V31, V31_H
  846 );
  847 
  848 // Class for all 128bit vector registers
  849 reg_class vectorx_reg(
  850     V0, V0_H, V0_J, V0_K,
  851     V1, V1_H, V1_J, V1_K,
  852     V2, V2_H, V2_J, V2_K,
  853     V3, V3_H, V3_J, V3_K,
  854     V4, V4_H, V4_J, V4_K,
  855     V5, V5_H, V5_J, V5_K,
  856     V6, V6_H, V6_J, V6_K,
  857     V7, V7_H, V7_J, V7_K,
  858     V8, V8_H, V8_J, V8_K,
  859     V9, V9_H, V9_J, V9_K,
  860     V10, V10_H, V10_J, V10_K,
  861     V11, V11_H, V11_J, V11_K,
  862     V12, V12_H, V12_J, V12_K,
  863     V13, V13_H, V13_J, V13_K,
  864     V14, V14_H, V14_J, V14_K,
  865     V15, V15_H, V15_J, V15_K,
  866     V16, V16_H, V16_J, V16_K,
  867     V17, V17_H, V17_J, V17_K,
  868     V18, V18_H, V18_J, V18_K,
  869     V19, V19_H, V19_J, V19_K,
  870     V20, V20_H, V20_J, V20_K,
  871     V21, V21_H, V21_J, V21_K,
  872     V22, V22_H, V22_J, V22_K,
  873     V23, V23_H, V23_J, V23_K,
  874     V24, V24_H, V24_J, V24_K,
  875     V25, V25_H, V25_J, V25_K,
  876     V26, V26_H, V26_J, V26_K,
  877     V27, V27_H, V27_J, V27_K,
  878     V28, V28_H, V28_J, V28_K,
  879     V29, V29_H, V29_J, V29_K,
  880     V30, V30_H, V30_J, V30_K,
  881     V31, V31_H, V31_J, V31_K
  882 );
  883 
  884 // Class for 128 bit register v0
  885 reg_class v0_reg(
  886     V0, V0_H
  887 );
  888 
  889 // Class for 128 bit register v1
  890 reg_class v1_reg(
  891     V1, V1_H
  892 );
  893 
  894 // Class for 128 bit register v2
  895 reg_class v2_reg(
  896     V2, V2_H
  897 );
  898 
  899 // Class for 128 bit register v3
  900 reg_class v3_reg(
  901     V3, V3_H
  902 );
  903 
  904 // Class for 128 bit register v4
  905 reg_class v4_reg(
  906     V4, V4_H
  907 );
  908 
  909 // Class for 128 bit register v5
  910 reg_class v5_reg(
  911     V5, V5_H
  912 );
  913 
  914 // Class for 128 bit register v6
  915 reg_class v6_reg(
  916     V6, V6_H
  917 );
  918 
  919 // Class for 128 bit register v7
  920 reg_class v7_reg(
  921     V7, V7_H
  922 );
  923 
  924 // Class for 128 bit register v8
  925 reg_class v8_reg(
  926     V8, V8_H
  927 );
  928 
  929 // Class for 128 bit register v9
  930 reg_class v9_reg(
  931     V9, V9_H
  932 );
  933 
  934 // Class for 128 bit register v10
  935 reg_class v10_reg(
  936     V10, V10_H
  937 );
  938 
  939 // Class for 128 bit register v11
  940 reg_class v11_reg(
  941     V11, V11_H
  942 );
  943 
  944 // Class for 128 bit register v12
  945 reg_class v12_reg(
  946     V12, V12_H
  947 );
  948 
  949 // Class for 128 bit register v13
  950 reg_class v13_reg(
  951     V13, V13_H
  952 );
  953 
  954 // Class for 128 bit register v14
  955 reg_class v14_reg(
  956     V14, V14_H
  957 );
  958 
  959 // Class for 128 bit register v15
  960 reg_class v15_reg(
  961     V15, V15_H
  962 );
  963 
  964 // Class for 128 bit register v16
  965 reg_class v16_reg(
  966     V16, V16_H
  967 );
  968 
  969 // Class for 128 bit register v17
  970 reg_class v17_reg(
  971     V17, V17_H
  972 );
  973 
  974 // Class for 128 bit register v18
  975 reg_class v18_reg(
  976     V18, V18_H
  977 );
  978 
  979 // Class for 128 bit register v19
  980 reg_class v19_reg(
  981     V19, V19_H
  982 );
  983 
  984 // Class for 128 bit register v20
  985 reg_class v20_reg(
  986     V20, V20_H
  987 );
  988 
  989 // Class for 128 bit register v21
  990 reg_class v21_reg(
  991     V21, V21_H
  992 );
  993 
  994 // Class for 128 bit register v22
  995 reg_class v22_reg(
  996     V22, V22_H
  997 );
  998 
  999 // Class for 128 bit register v23
 1000 reg_class v23_reg(
 1001     V23, V23_H
 1002 );
 1003 
 1004 // Class for 128 bit register v24
 1005 reg_class v24_reg(
 1006     V24, V24_H
 1007 );
 1008 
 1009 // Class for 128 bit register v25
 1010 reg_class v25_reg(
 1011     V25, V25_H
 1012 );
 1013 
 1014 // Class for 128 bit register v26
 1015 reg_class v26_reg(
 1016     V26, V26_H
 1017 );
 1018 
 1019 // Class for 128 bit register v27
 1020 reg_class v27_reg(
 1021     V27, V27_H
 1022 );
 1023 
 1024 // Class for 128 bit register v28
 1025 reg_class v28_reg(
 1026     V28, V28_H
 1027 );
 1028 
 1029 // Class for 128 bit register v29
 1030 reg_class v29_reg(
 1031     V29, V29_H
 1032 );
 1033 
 1034 // Class for 128 bit register v30
 1035 reg_class v30_reg(
 1036     V30, V30_H
 1037 );
 1038 
 1039 // Class for 128 bit register v31
 1040 reg_class v31_reg(
 1041     V31, V31_H
 1042 );
 1043 
 1044 // Class for all SVE predicate registers.
 1045 reg_class pr_reg (
 1046     P0,
 1047     P1,
 1048     P2,
 1049     P3,
 1050     P4,
 1051     P5,
 1052     P6,
 1053     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1054     P8,
 1055     P9,
 1056     P10,
 1057     P11,
 1058     P12,
 1059     P13,
 1060     P14,
 1061     P15
 1062 );
 1063 
 1064 // Class for SVE governing predicate registers, which are used
 1065 // to determine the active elements of a predicated instruction.
 1066 reg_class gov_pr (
 1067     P0,
 1068     P1,
 1069     P2,
 1070     P3,
 1071     P4,
 1072     P5,
 1073     P6,
 1074     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1075 );
 1076 
 1077 reg_class p0_reg(P0);
 1078 reg_class p1_reg(P1);
 1079 
 1080 // Singleton class for condition codes
 1081 reg_class int_flags(RFLAGS);
 1082 
 1083 %}
 1084 
 1085 //----------DEFINITION BLOCK---------------------------------------------------
 1086 // Define name --> value mappings to inform the ADLC of an integer valued name
 1087 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1088 // Format:
 1089 //        int_def  <name>         ( <int_value>, <expression>);
 1090 // Generated Code in ad_<arch>.hpp
 1091 //        #define  <name>   (<expression>)
 1092 //        // value == <int_value>
 1093 // Generated code in ad_<arch>.cpp adlc_verification()
 1094 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1095 //
 1096 
 1097 // we follow the ppc-aix port in using a simple cost model which ranks
 1098 // register operations as cheap, memory ops as more expensive and
 1099 // branches as most expensive. the first two have a low as well as a
 1100 // normal cost. huge cost appears to be a way of saying don't do
 1101 // something
 1102 
 1103 definitions %{
 1104   // The default cost (of a register move instruction).
 1105   int_def INSN_COST            (    100,     100);
 1106   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1107   int_def CALL_COST            (    200,     2 * INSN_COST);
 1108   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1109 %}
 1110 
 1111 
 1112 //----------SOURCE BLOCK-------------------------------------------------------
 1113 // This is a block of C++ code which provides values, functions, and
 1114 // definitions necessary in the rest of the architecture description
 1115 
 1116 source_hpp %{
 1117 
 1118 #include "asm/macroAssembler.hpp"
 1119 #include "gc/shared/barrierSetAssembler.hpp"
 1120 #include "gc/shared/cardTable.hpp"
 1121 #include "gc/shared/cardTableBarrierSet.hpp"
 1122 #include "gc/shared/collectedHeap.hpp"
 1123 #include "opto/addnode.hpp"
 1124 #include "opto/convertnode.hpp"
 1125 #include "runtime/objectMonitor.hpp"
 1126 
 1127 extern RegMask _ANY_REG32_mask;
 1128 extern RegMask _ANY_REG_mask;
 1129 extern RegMask _PTR_REG_mask;
 1130 extern RegMask _NO_SPECIAL_REG32_mask;
 1131 extern RegMask _NO_SPECIAL_REG_mask;
 1132 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1133 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1134 
 1135 class CallStubImpl {
 1136 
 1137   //--------------------------------------------------------------
 1138   //---<  Used for optimization in Compile::shorten_branches  >---
 1139   //--------------------------------------------------------------
 1140 
 1141  public:
 1142   // Size of call trampoline stub.
 1143   static uint size_call_trampoline() {
 1144     return 0; // no call trampolines on this platform
 1145   }
 1146 
 1147   // number of relocations needed by a call trampoline stub
 1148   static uint reloc_call_trampoline() {
 1149     return 0; // no call trampolines on this platform
 1150   }
 1151 };
 1152 
 1153 class HandlerImpl {
 1154 
 1155  public:
 1156 
 1157   static int emit_exception_handler(CodeBuffer &cbuf);
 1158   static int emit_deopt_handler(CodeBuffer& cbuf);
 1159 
 1160   static uint size_exception_handler() {
 1161     return MacroAssembler::far_codestub_branch_size();
 1162   }
 1163 
 1164   static uint size_deopt_handler() {
 1165     // count one adr and one far branch instruction
 1166     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1167   }
 1168 };
 1169 
 1170 class Node::PD {
 1171 public:
 1172   enum NodeFlags {
 1173     _last_flag = Node::_last_flag
 1174   };
 1175 };
 1176 
 1177   bool is_CAS(int opcode, bool maybe_volatile);
 1178 
 1179   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1180 
 1181   bool unnecessary_acquire(const Node *barrier);
 1182   bool needs_acquiring_load(const Node *load);
 1183 
 1184   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1185 
 1186   bool unnecessary_release(const Node *barrier);
 1187   bool unnecessary_volatile(const Node *barrier);
 1188   bool needs_releasing_store(const Node *store);
 1189 
 1190   // predicate controlling translation of CompareAndSwapX
 1191   bool needs_acquiring_load_exclusive(const Node *load);
 1192 
 1193   // predicate controlling addressing modes
 1194   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1195 
 1196   // Convert BootTest condition to Assembler condition.
 1197   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1198   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1199 %}
 1200 
 1201 source %{
 1202 
 1203   // Derived RegMask with conditionally allocatable registers
 1204 
 1205   void PhaseOutput::pd_perform_mach_node_analysis() {
 1206   }
 1207 
 1208   int MachNode::pd_alignment_required() const {
 1209     return 1;
 1210   }
 1211 
 1212   int MachNode::compute_padding(int current_offset) const {
 1213     return 0;
 1214   }
 1215 
 1216   RegMask _ANY_REG32_mask;
 1217   RegMask _ANY_REG_mask;
 1218   RegMask _PTR_REG_mask;
 1219   RegMask _NO_SPECIAL_REG32_mask;
 1220   RegMask _NO_SPECIAL_REG_mask;
 1221   RegMask _NO_SPECIAL_PTR_REG_mask;
 1222   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1223 
 1224   void reg_mask_init() {
 1225     // We derive below RegMask(s) from the ones which are auto-generated from
 1226     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1227     // registers conditionally reserved.
 1228 
 1229     _ANY_REG32_mask = _ALL_REG32_mask;
 1230     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1231 
 1232     _ANY_REG_mask = _ALL_REG_mask;
 1233 
 1234     _PTR_REG_mask = _ALL_REG_mask;
 1235 
 1236     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1237     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1238 
 1239     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1240     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1241 
 1242     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1243     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1244 
 1245     // r27 is not allocatable when compressed oops is on and heapbase is not
 1246     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1247     if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL)) {
 1248       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1249       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1250       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1251     }
 1252 
 1253     // r29 is not allocatable when PreserveFramePointer is on
 1254     if (PreserveFramePointer) {
 1255       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1256       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1257       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1258     }
 1259 
 1260     _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1261     _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1262   }
 1263 
 1264   // Optimizaton of volatile gets and puts
 1265   // -------------------------------------
 1266   //
 1267   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1268   // use to implement volatile reads and writes. For a volatile read
 1269   // we simply need
 1270   //
 1271   //   ldar<x>
 1272   //
 1273   // and for a volatile write we need
 1274   //
 1275   //   stlr<x>
 1276   //
 1277   // Alternatively, we can implement them by pairing a normal
 1278   // load/store with a memory barrier. For a volatile read we need
 1279   //
 1280   //   ldr<x>
 1281   //   dmb ishld
 1282   //
 1283   // for a volatile write
 1284   //
 1285   //   dmb ish
 1286   //   str<x>
 1287   //   dmb ish
 1288   //
 1289   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1290   // sequences. These are normally translated to an instruction
 1291   // sequence like the following
 1292   //
 1293   //   dmb      ish
 1294   // retry:
 1295   //   ldxr<x>   rval raddr
 1296   //   cmp       rval rold
 1297   //   b.ne done
 1298   //   stlxr<x>  rval, rnew, rold
 1299   //   cbnz      rval retry
 1300   // done:
 1301   //   cset      r0, eq
 1302   //   dmb ishld
 1303   //
 1304   // Note that the exclusive store is already using an stlxr
 1305   // instruction. That is required to ensure visibility to other
 1306   // threads of the exclusive write (assuming it succeeds) before that
 1307   // of any subsequent writes.
 1308   //
 1309   // The following instruction sequence is an improvement on the above
 1310   //
 1311   // retry:
 1312   //   ldaxr<x>  rval raddr
 1313   //   cmp       rval rold
 1314   //   b.ne done
 1315   //   stlxr<x>  rval, rnew, rold
 1316   //   cbnz      rval retry
 1317   // done:
 1318   //   cset      r0, eq
 1319   //
 1320   // We don't need the leading dmb ish since the stlxr guarantees
 1321   // visibility of prior writes in the case that the swap is
 1322   // successful. Crucially we don't have to worry about the case where
 1323   // the swap is not successful since no valid program should be
 1324   // relying on visibility of prior changes by the attempting thread
 1325   // in the case where the CAS fails.
 1326   //
 1327   // Similarly, we don't need the trailing dmb ishld if we substitute
 1328   // an ldaxr instruction since that will provide all the guarantees we
 1329   // require regarding observation of changes made by other threads
 1330   // before any change to the CAS address observed by the load.
 1331   //
 1332   // In order to generate the desired instruction sequence we need to
 1333   // be able to identify specific 'signature' ideal graph node
 1334   // sequences which i) occur as a translation of a volatile reads or
 1335   // writes or CAS operations and ii) do not occur through any other
 1336   // translation or graph transformation. We can then provide
 1337   // alternative aldc matching rules which translate these node
 1338   // sequences to the desired machine code sequences. Selection of the
 1339   // alternative rules can be implemented by predicates which identify
 1340   // the relevant node sequences.
 1341   //
 1342   // The ideal graph generator translates a volatile read to the node
 1343   // sequence
 1344   //
 1345   //   LoadX[mo_acquire]
 1346   //   MemBarAcquire
 1347   //
 1348   // As a special case when using the compressed oops optimization we
 1349   // may also see this variant
 1350   //
 1351   //   LoadN[mo_acquire]
 1352   //   DecodeN
 1353   //   MemBarAcquire
 1354   //
 1355   // A volatile write is translated to the node sequence
 1356   //
 1357   //   MemBarRelease
 1358   //   StoreX[mo_release] {CardMark}-optional
 1359   //   MemBarVolatile
 1360   //
 1361   // n.b. the above node patterns are generated with a strict
 1362   // 'signature' configuration of input and output dependencies (see
 1363   // the predicates below for exact details). The card mark may be as
 1364   // simple as a few extra nodes or, in a few GC configurations, may
 1365   // include more complex control flow between the leading and
 1366   // trailing memory barriers. However, whatever the card mark
 1367   // configuration these signatures are unique to translated volatile
 1368   // reads/stores -- they will not appear as a result of any other
 1369   // bytecode translation or inlining nor as a consequence of
 1370   // optimizing transforms.
 1371   //
 1372   // We also want to catch inlined unsafe volatile gets and puts and
 1373   // be able to implement them using either ldar<x>/stlr<x> or some
 1374   // combination of ldr<x>/stlr<x> and dmb instructions.
 1375   //
 1376   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1377   // normal volatile put node sequence containing an extra cpuorder
 1378   // membar
 1379   //
 1380   //   MemBarRelease
 1381   //   MemBarCPUOrder
 1382   //   StoreX[mo_release] {CardMark}-optional
 1383   //   MemBarCPUOrder
 1384   //   MemBarVolatile
 1385   //
 1386   // n.b. as an aside, a cpuorder membar is not itself subject to
 1387   // matching and translation by adlc rules.  However, the rule
 1388   // predicates need to detect its presence in order to correctly
 1389   // select the desired adlc rules.
 1390   //
 1391   // Inlined unsafe volatile gets manifest as a slightly different
 1392   // node sequence to a normal volatile get because of the
 1393   // introduction of some CPUOrder memory barriers to bracket the
 1394   // Load. However, but the same basic skeleton of a LoadX feeding a
 1395   // MemBarAcquire, possibly through an optional DecodeN, is still
 1396   // present
 1397   //
 1398   //   MemBarCPUOrder
 1399   //        ||       \\
 1400   //   MemBarCPUOrder LoadX[mo_acquire]
 1401   //        ||            |
 1402   //        ||       {DecodeN} optional
 1403   //        ||       /
 1404   //     MemBarAcquire
 1405   //
 1406   // In this case the acquire membar does not directly depend on the
 1407   // load. However, we can be sure that the load is generated from an
 1408   // inlined unsafe volatile get if we see it dependent on this unique
 1409   // sequence of membar nodes. Similarly, given an acquire membar we
 1410   // can know that it was added because of an inlined unsafe volatile
 1411   // get if it is fed and feeds a cpuorder membar and if its feed
 1412   // membar also feeds an acquiring load.
 1413   //
 1414   // Finally an inlined (Unsafe) CAS operation is translated to the
 1415   // following ideal graph
 1416   //
 1417   //   MemBarRelease
 1418   //   MemBarCPUOrder
 1419   //   CompareAndSwapX {CardMark}-optional
 1420   //   MemBarCPUOrder
 1421   //   MemBarAcquire
 1422   //
 1423   // So, where we can identify these volatile read and write
 1424   // signatures we can choose to plant either of the above two code
 1425   // sequences. For a volatile read we can simply plant a normal
 1426   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1427   // also choose to inhibit translation of the MemBarAcquire and
 1428   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1429   //
 1430   // When we recognise a volatile store signature we can choose to
 1431   // plant at a dmb ish as a translation for the MemBarRelease, a
 1432   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1433   // Alternatively, we can inhibit translation of the MemBarRelease
 1434   // and MemBarVolatile and instead plant a simple stlr<x>
 1435   // instruction.
 1436   //
 1437   // when we recognise a CAS signature we can choose to plant a dmb
 1438   // ish as a translation for the MemBarRelease, the conventional
 1439   // macro-instruction sequence for the CompareAndSwap node (which
 1440   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1441   // Alternatively, we can elide generation of the dmb instructions
 1442   // and plant the alternative CompareAndSwap macro-instruction
 1443   // sequence (which uses ldaxr<x>).
 1444   //
 1445   // Of course, the above only applies when we see these signature
 1446   // configurations. We still want to plant dmb instructions in any
 1447   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1448   // MemBarVolatile. For example, at the end of a constructor which
 1449   // writes final/volatile fields we will see a MemBarRelease
 1450   // instruction and this needs a 'dmb ish' lest we risk the
 1451   // constructed object being visible without making the
 1452   // final/volatile field writes visible.
 1453   //
 1454   // n.b. the translation rules below which rely on detection of the
 1455   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1456   // If we see anything other than the signature configurations we
 1457   // always just translate the loads and stores to ldr<x> and str<x>
 1458   // and translate acquire, release and volatile membars to the
 1459   // relevant dmb instructions.
 1460   //
 1461 
 1462   // is_CAS(int opcode, bool maybe_volatile)
 1463   //
 1464   // return true if opcode is one of the possible CompareAndSwapX
 1465   // values otherwise false.
 1466 
 1467   bool is_CAS(int opcode, bool maybe_volatile)
 1468   {
 1469     switch(opcode) {
 1470       // We handle these
 1471     case Op_CompareAndSwapI:
 1472     case Op_CompareAndSwapL:
 1473     case Op_CompareAndSwapP:
 1474     case Op_CompareAndSwapN:
 1475     case Op_ShenandoahCompareAndSwapP:
 1476     case Op_ShenandoahCompareAndSwapN:
 1477     case Op_CompareAndSwapB:
 1478     case Op_CompareAndSwapS:
 1479     case Op_GetAndSetI:
 1480     case Op_GetAndSetL:
 1481     case Op_GetAndSetP:
 1482     case Op_GetAndSetN:
 1483     case Op_GetAndAddI:
 1484     case Op_GetAndAddL:
 1485       return true;
 1486     case Op_CompareAndExchangeI:
 1487     case Op_CompareAndExchangeN:
 1488     case Op_CompareAndExchangeB:
 1489     case Op_CompareAndExchangeS:
 1490     case Op_CompareAndExchangeL:
 1491     case Op_CompareAndExchangeP:
 1492     case Op_WeakCompareAndSwapB:
 1493     case Op_WeakCompareAndSwapS:
 1494     case Op_WeakCompareAndSwapI:
 1495     case Op_WeakCompareAndSwapL:
 1496     case Op_WeakCompareAndSwapP:
 1497     case Op_WeakCompareAndSwapN:
 1498     case Op_ShenandoahWeakCompareAndSwapP:
 1499     case Op_ShenandoahWeakCompareAndSwapN:
 1500     case Op_ShenandoahCompareAndExchangeP:
 1501     case Op_ShenandoahCompareAndExchangeN:
 1502       return maybe_volatile;
 1503     default:
 1504       return false;
 1505     }
 1506   }
 1507 
 1508   // helper to determine the maximum number of Phi nodes we may need to
 1509   // traverse when searching from a card mark membar for the merge mem
 1510   // feeding a trailing membar or vice versa
 1511 
 1512 // predicates controlling emit of ldr<x>/ldar<x>
 1513 
 1514 bool unnecessary_acquire(const Node *barrier)
 1515 {
 1516   assert(barrier->is_MemBar(), "expecting a membar");
 1517 
 1518   MemBarNode* mb = barrier->as_MemBar();
 1519 
 1520   if (mb->trailing_load()) {
 1521     return true;
 1522   }
 1523 
 1524   if (mb->trailing_load_store()) {
 1525     Node* load_store = mb->in(MemBarNode::Precedent);
 1526     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1527     return is_CAS(load_store->Opcode(), true);
 1528   }
 1529 
 1530   return false;
 1531 }
 1532 
 1533 bool needs_acquiring_load(const Node *n)
 1534 {
 1535   assert(n->is_Load(), "expecting a load");
 1536   LoadNode *ld = n->as_Load();
 1537   return ld->is_acquire();
 1538 }
 1539 
 1540 bool unnecessary_release(const Node *n)
 1541 {
 1542   assert((n->is_MemBar() &&
 1543           n->Opcode() == Op_MemBarRelease),
 1544          "expecting a release membar");
 1545 
 1546   MemBarNode *barrier = n->as_MemBar();
 1547   if (!barrier->leading()) {
 1548     return false;
 1549   } else {
 1550     Node* trailing = barrier->trailing_membar();
 1551     MemBarNode* trailing_mb = trailing->as_MemBar();
 1552     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1553     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1554 
 1555     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1556     if (mem->is_Store()) {
 1557       assert(mem->as_Store()->is_release(), "");
 1558       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1559       return true;
 1560     } else {
 1561       assert(mem->is_LoadStore(), "");
 1562       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1563       return is_CAS(mem->Opcode(), true);
 1564     }
 1565   }
 1566   return false;
 1567 }
 1568 
 1569 bool unnecessary_volatile(const Node *n)
 1570 {
 1571   // assert n->is_MemBar();
 1572   MemBarNode *mbvol = n->as_MemBar();
 1573 
 1574   bool release = mbvol->trailing_store();
 1575   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1576 #ifdef ASSERT
 1577   if (release) {
 1578     Node* leading = mbvol->leading_membar();
 1579     assert(leading->Opcode() == Op_MemBarRelease, "");
 1580     assert(leading->as_MemBar()->leading_store(), "");
 1581     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1582   }
 1583 #endif
 1584 
 1585   return release;
 1586 }
 1587 
 1588 // predicates controlling emit of str<x>/stlr<x>
 1589 
 1590 bool needs_releasing_store(const Node *n)
 1591 {
 1592   // assert n->is_Store();
 1593   StoreNode *st = n->as_Store();
 1594   return st->trailing_membar() != NULL;
 1595 }
 1596 
 1597 // predicate controlling translation of CAS
 1598 //
 1599 // returns true if CAS needs to use an acquiring load otherwise false
 1600 
 1601 bool needs_acquiring_load_exclusive(const Node *n)
 1602 {
 1603   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1604   LoadStoreNode* ldst = n->as_LoadStore();
 1605   if (is_CAS(n->Opcode(), false)) {
 1606     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
 1607   } else {
 1608     return ldst->trailing_membar() != NULL;
 1609   }
 1610 
 1611   // so we can just return true here
 1612   return true;
 1613 }
 1614 
 1615 #define __ _masm.
 1616 
 1617 // advance declarations for helper functions to convert register
 1618 // indices to register objects
 1619 
 1620 // the ad file has to provide implementations of certain methods
 1621 // expected by the generic code
 1622 //
 1623 // REQUIRED FUNCTIONALITY
 1624 
 1625 //=============================================================================
 1626 
 1627 // !!!!! Special hack to get all types of calls to specify the byte offset
 1628 //       from the start of the call to the point where the return address
 1629 //       will point.
 1630 
 1631 int MachCallStaticJavaNode::ret_addr_offset()
 1632 {
 1633   // call should be a simple bl
 1634   int off = 4;
 1635   return off;
 1636 }
 1637 
 1638 int MachCallDynamicJavaNode::ret_addr_offset()
 1639 {
 1640   return 16; // movz, movk, movk, bl
 1641 }
 1642 
 1643 int MachCallRuntimeNode::ret_addr_offset() {
 1644   // for generated stubs the call will be
 1645   //   bl(addr)
 1646   // or with far branches
 1647   //   bl(trampoline_stub)
 1648   // for real runtime callouts it will be six instructions
 1649   // see aarch64_enc_java_to_runtime
 1650   //   adr(rscratch2, retaddr)
 1651   //   lea(rscratch1, RuntimeAddress(addr)
 1652   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
 1653   //   blr(rscratch1)
 1654   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1655   if (cb) {
 1656     return 1 * NativeInstruction::instruction_size;
 1657   } else {
 1658     return 6 * NativeInstruction::instruction_size;
 1659   }
 1660 }
 1661 
 1662 //=============================================================================
 1663 
 1664 #ifndef PRODUCT
 1665 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1666   st->print("BREAKPOINT");
 1667 }
 1668 #endif
 1669 
 1670 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1671   C2_MacroAssembler _masm(&cbuf);
 1672   __ brk(0);
 1673 }
 1674 
 1675 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1676   return MachNode::size(ra_);
 1677 }
 1678 
 1679 //=============================================================================
 1680 
 1681 #ifndef PRODUCT
 1682   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1683     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1684   }
 1685 #endif
 1686 
 1687   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
 1688     C2_MacroAssembler _masm(&cbuf);
 1689     for (int i = 0; i < _count; i++) {
 1690       __ nop();
 1691     }
 1692   }
 1693 
 1694   uint MachNopNode::size(PhaseRegAlloc*) const {
 1695     return _count * NativeInstruction::instruction_size;
 1696   }
 1697 
 1698 //=============================================================================
 1699 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1700 
 1701 int ConstantTable::calculate_table_base_offset() const {
 1702   return 0;  // absolute addressing, no offset
 1703 }
 1704 
 1705 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1706 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1707   ShouldNotReachHere();
 1708 }
 1709 
 1710 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 1711   // Empty encoding
 1712 }
 1713 
 1714 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1715   return 0;
 1716 }
 1717 
 1718 #ifndef PRODUCT
 1719 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1720   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1721 }
 1722 #endif
 1723 
 1724 #ifndef PRODUCT
 1725 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1726   Compile* C = ra_->C;
 1727 
 1728   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1729 
 1730   if (C->output()->need_stack_bang(framesize))
 1731     st->print("# stack bang size=%d\n\t", framesize);
 1732 
 1733   if (VM_Version::use_rop_protection()) {
 1734     st->print("ldr  zr, [lr]\n\t");
 1735     st->print("paciaz\n\t");
 1736   }
 1737   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1738     st->print("sub  sp, sp, #%d\n\t", framesize);
 1739     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1740     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1741   } else {
 1742     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1743     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1744     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1745     st->print("sub  sp, sp, rscratch1");
 1746   }
 1747   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1748     st->print("\n\t");
 1749     st->print("ldr  rscratch1, [guard]\n\t");
 1750     st->print("dmb ishld\n\t");
 1751     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1752     st->print("cmp  rscratch1, rscratch2\n\t");
 1753     st->print("b.eq skip");
 1754     st->print("\n\t");
 1755     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1756     st->print("b skip\n\t");
 1757     st->print("guard: int\n\t");
 1758     st->print("\n\t");
 1759     st->print("skip:\n\t");
 1760   }
 1761 }
 1762 #endif
 1763 
 1764 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1765   Compile* C = ra_->C;
 1766   C2_MacroAssembler _masm(&cbuf);
 1767 
 1768   // n.b. frame size includes space for return pc and rfp
 1769   const int framesize = C->output()->frame_size_in_bytes();
 1770 
 1771   // insert a nop at the start of the prolog so we can patch in a
 1772   // branch if we need to invalidate the method later
 1773   __ nop();
 1774 
 1775   if (C->clinit_barrier_on_entry()) {
 1776     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1777 
 1778     Label L_skip_barrier;
 1779 
 1780     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1781     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1782     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1783     __ bind(L_skip_barrier);
 1784   }
 1785 
 1786   if (C->max_vector_size() > 0) {
 1787     __ reinitialize_ptrue();
 1788   }
 1789 
 1790   int bangsize = C->output()->bang_size_in_bytes();
 1791   if (C->output()->need_stack_bang(bangsize))
 1792     __ generate_stack_overflow_check(bangsize);
 1793 
 1794   __ build_frame(framesize);
 1795 
 1796   if (C->stub_function() == NULL) {
 1797     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1798     if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1799       // Dummy labels for just measuring the code size
 1800       Label dummy_slow_path;
 1801       Label dummy_continuation;
 1802       Label dummy_guard;
 1803       Label* slow_path = &dummy_slow_path;
 1804       Label* continuation = &dummy_continuation;
 1805       Label* guard = &dummy_guard;
 1806       if (!Compile::current()->output()->in_scratch_emit_size()) {
 1807         // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1808         C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1809         Compile::current()->output()->add_stub(stub);
 1810         slow_path = &stub->entry();
 1811         continuation = &stub->continuation();
 1812         guard = &stub->guard();
 1813       }
 1814       // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1815       bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
 1816     }
 1817   }
 1818 
 1819   if (VerifyStackAtCalls) {
 1820     Unimplemented();
 1821   }
 1822 
 1823   C->output()->set_frame_complete(cbuf.insts_size());
 1824 
 1825   if (C->has_mach_constant_base_node()) {
 1826     // NOTE: We set the table base offset here because users might be
 1827     // emitted before MachConstantBaseNode.
 1828     ConstantTable& constant_table = C->output()->constant_table();
 1829     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1830   }
 1831 }
 1832 
 1833 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1834 {
 1835   return MachNode::size(ra_); // too many variables; just compute it
 1836                               // the hard way
 1837 }
 1838 
 1839 int MachPrologNode::reloc() const
 1840 {
 1841   return 0;
 1842 }
 1843 
 1844 //=============================================================================
 1845 
 1846 #ifndef PRODUCT
 1847 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1848   Compile* C = ra_->C;
 1849   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1850 
 1851   st->print("# pop frame %d\n\t",framesize);
 1852 
 1853   if (framesize == 0) {
 1854     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1855   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1856     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1857     st->print("add  sp, sp, #%d\n\t", framesize);
 1858   } else {
 1859     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1860     st->print("add  sp, sp, rscratch1\n\t");
 1861     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1862   }
 1863   if (VM_Version::use_rop_protection()) {
 1864     st->print("autiaz\n\t");
 1865     st->print("ldr  zr, [lr]\n\t");
 1866   }
 1867 
 1868   if (do_polling() && C->is_method_compilation()) {
 1869     st->print("# test polling word\n\t");
 1870     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1871     st->print("cmp  sp, rscratch1\n\t");
 1872     st->print("bhi #slow_path");
 1873   }
 1874 }
 1875 #endif
 1876 
 1877 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1878   Compile* C = ra_->C;
 1879   C2_MacroAssembler _masm(&cbuf);
 1880   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1881 
 1882   __ remove_frame(framesize);
 1883 
 1884   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1885     __ reserved_stack_check();
 1886   }
 1887 
 1888   if (do_polling() && C->is_method_compilation()) {
 1889     Label dummy_label;
 1890     Label* code_stub = &dummy_label;
 1891     if (!C->output()->in_scratch_emit_size()) {
 1892       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1893       C->output()->add_stub(stub);
 1894       code_stub = &stub->entry();
 1895     }
 1896     __ relocate(relocInfo::poll_return_type);
 1897     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1898   }
 1899 }
 1900 
 1901 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1902   // Variable size. Determine dynamically.
 1903   return MachNode::size(ra_);
 1904 }
 1905 
 1906 int MachEpilogNode::reloc() const {
 1907   // Return number of relocatable values contained in this instruction.
 1908   return 1; // 1 for polling page.
 1909 }
 1910 
 1911 const Pipeline * MachEpilogNode::pipeline() const {
 1912   return MachNode::pipeline_class();
 1913 }
 1914 
 1915 //=============================================================================
 1916 
 1917 // Figure out which register class each belongs in: rc_int, rc_float or
 1918 // rc_stack.
 1919 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 1920 
 1921 static enum RC rc_class(OptoReg::Name reg) {
 1922 
 1923   if (reg == OptoReg::Bad) {
 1924     return rc_bad;
 1925   }
 1926 
 1927   // we have 32 int registers * 2 halves
 1928   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1929 
 1930   if (reg < slots_of_int_registers) {
 1931     return rc_int;
 1932   }
 1933 
 1934   // we have 32 float register * 8 halves
 1935   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1936   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1937     return rc_float;
 1938   }
 1939 
 1940   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1941   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1942     return rc_predicate;
 1943   }
 1944 
 1945   // Between predicate regs & stack is the flags.
 1946   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1947 
 1948   return rc_stack;
 1949 }
 1950 
 1951 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1952   Compile* C = ra_->C;
 1953 
 1954   // Get registers to move.
 1955   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1956   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1957   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1958   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1959 
 1960   enum RC src_hi_rc = rc_class(src_hi);
 1961   enum RC src_lo_rc = rc_class(src_lo);
 1962   enum RC dst_hi_rc = rc_class(dst_hi);
 1963   enum RC dst_lo_rc = rc_class(dst_lo);
 1964 
 1965   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1966 
 1967   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1968     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1969            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1970            "expected aligned-adjacent pairs");
 1971   }
 1972 
 1973   if (src_lo == dst_lo && src_hi == dst_hi) {
 1974     return 0;            // Self copy, no move.
 1975   }
 1976 
 1977   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1978               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1979   int src_offset = ra_->reg2offset(src_lo);
 1980   int dst_offset = ra_->reg2offset(dst_lo);
 1981 
 1982   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1983     uint ireg = ideal_reg();
 1984     if (ireg == Op_VecA && cbuf) {
 1985       C2_MacroAssembler _masm(cbuf);
 1986       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1987       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1988         // stack->stack
 1989         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 1990                                                 sve_vector_reg_size_in_bytes);
 1991       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1992         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 1993                             sve_vector_reg_size_in_bytes);
 1994       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 1995         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 1996                               sve_vector_reg_size_in_bytes);
 1997       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1998         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1999                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 2000                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 2001       } else {
 2002         ShouldNotReachHere();
 2003       }
 2004     } else if (cbuf) {
 2005       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2006       C2_MacroAssembler _masm(cbuf);
 2007       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2008       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2009         // stack->stack
 2010         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2011         if (ireg == Op_VecD) {
 2012           __ unspill(rscratch1, true, src_offset);
 2013           __ spill(rscratch1, true, dst_offset);
 2014         } else {
 2015           __ spill_copy128(src_offset, dst_offset);
 2016         }
 2017       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2018         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2019                ireg == Op_VecD ? __ T8B : __ T16B,
 2020                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2021       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2022         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2023                  ireg == Op_VecD ? __ D : __ Q,
 2024                  ra_->reg2offset(dst_lo));
 2025       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2026         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2027                    ireg == Op_VecD ? __ D : __ Q,
 2028                    ra_->reg2offset(src_lo));
 2029       } else {
 2030         ShouldNotReachHere();
 2031       }
 2032     }
 2033   } else if (cbuf) {
 2034     C2_MacroAssembler _masm(cbuf);
 2035     switch (src_lo_rc) {
 2036     case rc_int:
 2037       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2038         if (is64) {
 2039             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2040                    as_Register(Matcher::_regEncode[src_lo]));
 2041         } else {
 2042             C2_MacroAssembler _masm(cbuf);
 2043             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2044                     as_Register(Matcher::_regEncode[src_lo]));
 2045         }
 2046       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2047         if (is64) {
 2048             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2049                      as_Register(Matcher::_regEncode[src_lo]));
 2050         } else {
 2051             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2052                      as_Register(Matcher::_regEncode[src_lo]));
 2053         }
 2054       } else {                    // gpr --> stack spill
 2055         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2056         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2057       }
 2058       break;
 2059     case rc_float:
 2060       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2061         if (is64) {
 2062             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2063                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2064         } else {
 2065             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2066                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2067         }
 2068       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2069         if (is64) {
 2070             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2071                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2072         } else {
 2073             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2074                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2075         }
 2076       } else {                    // fpr --> stack spill
 2077         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2078         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2079                  is64 ? __ D : __ S, dst_offset);
 2080       }
 2081       break;
 2082     case rc_stack:
 2083       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2084         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2085       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2086         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2087                    is64 ? __ D : __ S, src_offset);
 2088       } else if (dst_lo_rc == rc_predicate) {
 2089         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2090                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2091       } else {                    // stack --> stack copy
 2092         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2093         if (ideal_reg() == Op_RegVectMask) {
 2094           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2095                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2096         } else {
 2097           __ unspill(rscratch1, is64, src_offset);
 2098           __ spill(rscratch1, is64, dst_offset);
 2099         }
 2100       }
 2101       break;
 2102     case rc_predicate:
 2103       if (dst_lo_rc == rc_predicate) {
 2104         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2105       } else if (dst_lo_rc == rc_stack) {
 2106         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2107                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2108       } else {
 2109         assert(false, "bad src and dst rc_class combination.");
 2110         ShouldNotReachHere();
 2111       }
 2112       break;
 2113     default:
 2114       assert(false, "bad rc_class for spill");
 2115       ShouldNotReachHere();
 2116     }
 2117   }
 2118 
 2119   if (st) {
 2120     st->print("spill ");
 2121     if (src_lo_rc == rc_stack) {
 2122       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2123     } else {
 2124       st->print("%s -> ", Matcher::regName[src_lo]);
 2125     }
 2126     if (dst_lo_rc == rc_stack) {
 2127       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2128     } else {
 2129       st->print("%s", Matcher::regName[dst_lo]);
 2130     }
 2131     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2132       int vsize = 0;
 2133       switch (ideal_reg()) {
 2134       case Op_VecD:
 2135         vsize = 64;
 2136         break;
 2137       case Op_VecX:
 2138         vsize = 128;
 2139         break;
 2140       case Op_VecA:
 2141         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2142         break;
 2143       default:
 2144         assert(false, "bad register type for spill");
 2145         ShouldNotReachHere();
 2146       }
 2147       st->print("\t# vector spill size = %d", vsize);
 2148     } else if (ideal_reg() == Op_RegVectMask) {
 2149       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2150       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2151       st->print("\t# predicate spill size = %d", vsize);
 2152     } else {
 2153       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2154     }
 2155   }
 2156 
 2157   return 0;
 2158 
 2159 }
 2160 
 2161 #ifndef PRODUCT
 2162 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2163   if (!ra_)
 2164     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2165   else
 2166     implementation(NULL, ra_, false, st);
 2167 }
 2168 #endif
 2169 
 2170 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2171   implementation(&cbuf, ra_, false, NULL);
 2172 }
 2173 
 2174 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2175   return MachNode::size(ra_);
 2176 }
 2177 
 2178 //=============================================================================
 2179 
 2180 #ifndef PRODUCT
 2181 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2182   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2183   int reg = ra_->get_reg_first(this);
 2184   st->print("add %s, rsp, #%d]\t# box lock",
 2185             Matcher::regName[reg], offset);
 2186 }
 2187 #endif
 2188 
 2189 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2190   C2_MacroAssembler _masm(&cbuf);
 2191 
 2192   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2193   int reg    = ra_->get_encode(this);
 2194 
 2195   // This add will handle any 24-bit signed offset. 24 bits allows an
 2196   // 8 megabyte stack frame.
 2197   __ add(as_Register(reg), sp, offset);
 2198 }
 2199 
 2200 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2201   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2202   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2203 
 2204   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2205     return NativeInstruction::instruction_size;
 2206   } else {
 2207     return 2 * NativeInstruction::instruction_size;
 2208   }
 2209 }
 2210 
 2211 //=============================================================================
 2212 
 2213 #ifndef PRODUCT
 2214 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2215 {
 2216   st->print_cr("# MachUEPNode");
 2217   if (UseCompressedClassPointers) {
 2218     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2219     if (CompressedKlassPointers::shift() != 0) {
 2220       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2221     }
 2222   } else {
 2223    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2224   }
 2225   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2226   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2227 }
 2228 #endif
 2229 
 2230 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2231 {
 2232   // This is the unverified entry point.
 2233   C2_MacroAssembler _masm(&cbuf);
 2234 
 2235   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2236   Label skip;
 2237   // TODO
 2238   // can we avoid this skip and still use a reloc?
 2239   __ br(Assembler::EQ, skip);
 2240   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2241   __ bind(skip);
 2242 }
 2243 
 2244 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2245 {
 2246   return MachNode::size(ra_);
 2247 }
 2248 
 2249 // REQUIRED EMIT CODE
 2250 
 2251 //=============================================================================
 2252 
 2253 // Emit exception handler code.
 2254 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2255 {
 2256   // mov rscratch1 #exception_blob_entry_point
 2257   // br rscratch1
 2258   // Note that the code buffer's insts_mark is always relative to insts.
 2259   // That's why we must use the macroassembler to generate a handler.
 2260   C2_MacroAssembler _masm(&cbuf);
 2261   address base = __ start_a_stub(size_exception_handler());
 2262   if (base == NULL) {
 2263     ciEnv::current()->record_failure("CodeCache is full");
 2264     return 0;  // CodeBuffer::expand failed
 2265   }
 2266   int offset = __ offset();
 2267   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2268   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2269   __ end_a_stub();
 2270   return offset;
 2271 }
 2272 
 2273 // Emit deopt handler code.
 2274 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 2275 {
 2276   // Note that the code buffer's insts_mark is always relative to insts.
 2277   // That's why we must use the macroassembler to generate a handler.
 2278   C2_MacroAssembler _masm(&cbuf);
 2279   address base = __ start_a_stub(size_deopt_handler());
 2280   if (base == NULL) {
 2281     ciEnv::current()->record_failure("CodeCache is full");
 2282     return 0;  // CodeBuffer::expand failed
 2283   }
 2284   int offset = __ offset();
 2285 
 2286   __ adr(lr, __ pc());
 2287   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2288 
 2289   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2290   __ end_a_stub();
 2291   return offset;
 2292 }
 2293 
 2294 // REQUIRED MATCHER CODE
 2295 
 2296 //=============================================================================
 2297 
 2298 const bool Matcher::match_rule_supported(int opcode) {
 2299   if (!has_match_rule(opcode))
 2300     return false;
 2301 
 2302   bool ret_value = true;
 2303   switch (opcode) {
 2304     case Op_OnSpinWait:
 2305       return VM_Version::supports_on_spin_wait();
 2306     case Op_CacheWB:
 2307     case Op_CacheWBPreSync:
 2308     case Op_CacheWBPostSync:
 2309       if (!VM_Version::supports_data_cache_line_flush()) {
 2310         ret_value = false;
 2311       }
 2312       break;
 2313     case Op_ExpandBits:
 2314     case Op_CompressBits:
 2315       if (!(UseSVE > 1 && VM_Version::supports_svebitperm())) {
 2316         ret_value = false;
 2317       }
 2318       break;
 2319   }
 2320 
 2321   return ret_value; // Per default match rules are supported.
 2322 }
 2323 
 2324 const RegMask* Matcher::predicate_reg_mask(void) {
 2325   return &_PR_REG_mask;
 2326 }
 2327 
 2328 const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) {
 2329   return new TypeVectMask(elemTy, length);
 2330 }
 2331 
 2332 // Vector calling convention not yet implemented.
 2333 const bool Matcher::supports_vector_calling_convention(void) {
 2334   return false;
 2335 }
 2336 
 2337 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2338   Unimplemented();
 2339   return OptoRegPair(0, 0);
 2340 }
 2341 
 2342 // Is this branch offset short enough that a short branch can be used?
 2343 //
 2344 // NOTE: If the platform does not provide any short branch variants, then
 2345 //       this method should return false for offset 0.
 2346 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2347   // The passed offset is relative to address of the branch.
 2348 
 2349   return (-32768 <= offset && offset < 32768);
 2350 }
 2351 
 2352 // Vector width in bytes.
 2353 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2354   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2355   int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize);
 2356   // Minimum 2 values in vector
 2357   if (size < 2*type2aelembytes(bt)) size = 0;
 2358   // But never < 4
 2359   if (size < 4) size = 0;
 2360   return size;
 2361 }
 2362 
 2363 // Limits on vector size (number of elements) loaded into vector.
 2364 const int Matcher::max_vector_size(const BasicType bt) {
 2365   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2366 }
 2367 
 2368 const int Matcher::min_vector_size(const BasicType bt) {
 2369   int max_size = max_vector_size(bt);
 2370   // Limit the min vector size to 8 bytes.
 2371   int size = 8 / type2aelembytes(bt);
 2372   if (bt == T_BYTE) {
 2373     // To support vector api shuffle/rearrange.
 2374     size = 4;
 2375   } else if (bt == T_BOOLEAN) {
 2376     // To support vector api load/store mask.
 2377     size = 2;
 2378   }
 2379   if (size < 2) size = 2;
 2380   return MIN2(size, max_size);
 2381 }
 2382 
 2383 const int Matcher::superword_max_vector_size(const BasicType bt) {
 2384   return Matcher::max_vector_size(bt);
 2385 }
 2386 
 2387 // Actual max scalable vector register length.
 2388 const int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2389   return Matcher::max_vector_size(bt);
 2390 }
 2391 
 2392 // Vector ideal reg.
 2393 const uint Matcher::vector_ideal_reg(int len) {
 2394   if (UseSVE > 0 && 16 < len && len <= 256) {
 2395     return Op_VecA;
 2396   }
 2397   switch(len) {
 2398     // For 16-bit/32-bit mask vector, reuse VecD.
 2399     case  2:
 2400     case  4:
 2401     case  8: return Op_VecD;
 2402     case 16: return Op_VecX;
 2403   }
 2404   ShouldNotReachHere();
 2405   return 0;
 2406 }
 2407 
 2408 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2409   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2410   switch (ideal_reg) {
 2411     case Op_VecA: return new vecAOper();
 2412     case Op_VecD: return new vecDOper();
 2413     case Op_VecX: return new vecXOper();
 2414   }
 2415   ShouldNotReachHere();
 2416   return NULL;
 2417 }
 2418 
 2419 bool Matcher::is_reg2reg_move(MachNode* m) {
 2420   return false;
 2421 }
 2422 
 2423 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2424   return opnd->opcode() == VREG;
 2425 }
 2426 
 2427 // Return whether or not this register is ever used as an argument.
 2428 // This function is used on startup to build the trampoline stubs in
 2429 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2430 // call in the trampoline, and arguments in those registers not be
 2431 // available to the callee.
 2432 bool Matcher::can_be_java_arg(int reg)
 2433 {
 2434   return
 2435     reg ==  R0_num || reg == R0_H_num ||
 2436     reg ==  R1_num || reg == R1_H_num ||
 2437     reg ==  R2_num || reg == R2_H_num ||
 2438     reg ==  R3_num || reg == R3_H_num ||
 2439     reg ==  R4_num || reg == R4_H_num ||
 2440     reg ==  R5_num || reg == R5_H_num ||
 2441     reg ==  R6_num || reg == R6_H_num ||
 2442     reg ==  R7_num || reg == R7_H_num ||
 2443     reg ==  V0_num || reg == V0_H_num ||
 2444     reg ==  V1_num || reg == V1_H_num ||
 2445     reg ==  V2_num || reg == V2_H_num ||
 2446     reg ==  V3_num || reg == V3_H_num ||
 2447     reg ==  V4_num || reg == V4_H_num ||
 2448     reg ==  V5_num || reg == V5_H_num ||
 2449     reg ==  V6_num || reg == V6_H_num ||
 2450     reg ==  V7_num || reg == V7_H_num;
 2451 }
 2452 
 2453 bool Matcher::is_spillable_arg(int reg)
 2454 {
 2455   return can_be_java_arg(reg);
 2456 }
 2457 
 2458 uint Matcher::int_pressure_limit()
 2459 {
 2460   // JDK-8183543: When taking the number of available registers as int
 2461   // register pressure threshold, the jtreg test:
 2462   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2463   // failed due to C2 compilation failure with
 2464   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2465   //
 2466   // A derived pointer is live at CallNode and then is flagged by RA
 2467   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2468   // derived pointers and lastly fail to spill after reaching maximum
 2469   // number of iterations. Lowering the default pressure threshold to
 2470   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2471   // a high register pressure area of the code so that split_DEF can
 2472   // generate DefinitionSpillCopy for the derived pointer.
 2473   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2474   if (!PreserveFramePointer) {
 2475     // When PreserveFramePointer is off, frame pointer is allocatable,
 2476     // but different from other SOC registers, it is excluded from
 2477     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2478     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2479     // See check_pressure_at_fatproj().
 2480     default_int_pressure_threshold--;
 2481   }
 2482   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2483 }
 2484 
 2485 uint Matcher::float_pressure_limit()
 2486 {
 2487   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2488   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2489 }
 2490 
 2491 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2492   return false;
 2493 }
 2494 
 2495 RegMask Matcher::divI_proj_mask() {
 2496   ShouldNotReachHere();
 2497   return RegMask();
 2498 }
 2499 
 2500 // Register for MODI projection of divmodI.
 2501 RegMask Matcher::modI_proj_mask() {
 2502   ShouldNotReachHere();
 2503   return RegMask();
 2504 }
 2505 
 2506 // Register for DIVL projection of divmodL.
 2507 RegMask Matcher::divL_proj_mask() {
 2508   ShouldNotReachHere();
 2509   return RegMask();
 2510 }
 2511 
 2512 // Register for MODL projection of divmodL.
 2513 RegMask Matcher::modL_proj_mask() {
 2514   ShouldNotReachHere();
 2515   return RegMask();
 2516 }
 2517 
 2518 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2519   return FP_REG_mask();
 2520 }
 2521 
 2522 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2523   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2524     Node* u = addp->fast_out(i);
 2525     if (u->is_LoadStore()) {
 2526       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2527       // instructions) only take register indirect as an operand, so
 2528       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2529       // must fail.
 2530       return false;
 2531     }
 2532     if (u->is_Mem()) {
 2533       int opsize = u->as_Mem()->memory_size();
 2534       assert(opsize > 0, "unexpected memory operand size");
 2535       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2536         return false;
 2537       }
 2538     }
 2539   }
 2540   return true;
 2541 }
 2542 
 2543 // Convert BootTest condition to Assembler condition.
 2544 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2545 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2546   Assembler::Condition result;
 2547   switch(cond) {
 2548     case BoolTest::eq:
 2549       result = Assembler::EQ; break;
 2550     case BoolTest::ne:
 2551       result = Assembler::NE; break;
 2552     case BoolTest::le:
 2553       result = Assembler::LE; break;
 2554     case BoolTest::ge:
 2555       result = Assembler::GE; break;
 2556     case BoolTest::lt:
 2557       result = Assembler::LT; break;
 2558     case BoolTest::gt:
 2559       result = Assembler::GT; break;
 2560     case BoolTest::ule:
 2561       result = Assembler::LS; break;
 2562     case BoolTest::uge:
 2563       result = Assembler::HS; break;
 2564     case BoolTest::ult:
 2565       result = Assembler::LO; break;
 2566     case BoolTest::ugt:
 2567       result = Assembler::HI; break;
 2568     case BoolTest::overflow:
 2569       result = Assembler::VS; break;
 2570     case BoolTest::no_overflow:
 2571       result = Assembler::VC; break;
 2572     default:
 2573       ShouldNotReachHere();
 2574       return Assembler::Condition(-1);
 2575   }
 2576 
 2577   // Check conversion
 2578   if (cond & BoolTest::unsigned_compare) {
 2579     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2580   } else {
 2581     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2582   }
 2583 
 2584   return result;
 2585 }
 2586 
 2587 // Binary src (Replicate con)
 2588 bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2589   if (n == NULL || m == NULL) {
 2590     return false;
 2591   }
 2592 
 2593   if (UseSVE == 0 || !VectorNode::is_invariant_vector(m)) {
 2594     return false;
 2595   }
 2596 
 2597   Node* imm_node = m->in(1);
 2598   if (!imm_node->is_Con()) {
 2599     return false;
 2600   }
 2601 
 2602   const Type* t = imm_node->bottom_type();
 2603   if (!(t->isa_int() || t->isa_long())) {
 2604     return false;
 2605   }
 2606 
 2607   switch (n->Opcode()) {
 2608   case Op_AndV:
 2609   case Op_OrV:
 2610   case Op_XorV: {
 2611     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2612     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2613     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2614   }
 2615   case Op_AddVB:
 2616     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2617   case Op_AddVS:
 2618   case Op_AddVI:
 2619     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2620   case Op_AddVL:
 2621     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2622   default:
 2623     return false;
 2624   }
 2625 }
 2626 
 2627 // (XorV src (Replicate m1))
 2628 // (XorVMask src (MaskAll m1))
 2629 bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2630   if (n != NULL && m != NULL) {
 2631     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2632            VectorNode::is_all_ones_vector(m);
 2633   }
 2634   return false;
 2635 }
 2636 
 2637 // Should the matcher clone input 'm' of node 'n'?
 2638 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2639   if (is_vshift_con_pattern(n, m) ||
 2640       is_vector_bitwise_not_pattern(n, m) ||
 2641       is_valid_sve_arith_imm_pattern(n, m)) {
 2642     mstack.push(m, Visit);
 2643     return true;
 2644   }
 2645   return false;
 2646 }
 2647 
 2648 // Should the Matcher clone shifts on addressing modes, expecting them
 2649 // to be subsumed into complex addressing expressions or compute them
 2650 // into registers?
 2651 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2652   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2653     return true;
 2654   }
 2655 
 2656   Node *off = m->in(AddPNode::Offset);
 2657   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2658       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2659       // Are there other uses besides address expressions?
 2660       !is_visited(off)) {
 2661     address_visited.set(off->_idx); // Flag as address_visited
 2662     mstack.push(off->in(2), Visit);
 2663     Node *conv = off->in(1);
 2664     if (conv->Opcode() == Op_ConvI2L &&
 2665         // Are there other uses besides address expressions?
 2666         !is_visited(conv)) {
 2667       address_visited.set(conv->_idx); // Flag as address_visited
 2668       mstack.push(conv->in(1), Pre_Visit);
 2669     } else {
 2670       mstack.push(conv, Pre_Visit);
 2671     }
 2672     address_visited.test_set(m->_idx); // Flag as address_visited
 2673     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2674     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2675     return true;
 2676   } else if (off->Opcode() == Op_ConvI2L &&
 2677              // Are there other uses besides address expressions?
 2678              !is_visited(off)) {
 2679     address_visited.test_set(m->_idx); // Flag as address_visited
 2680     address_visited.set(off->_idx); // Flag as address_visited
 2681     mstack.push(off->in(1), Pre_Visit);
 2682     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2683     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2684     return true;
 2685   }
 2686   return false;
 2687 }
 2688 
 2689 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2690   C2_MacroAssembler _masm(&cbuf);                                       \
 2691   {                                                                     \
 2692     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2693     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2694     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2695     __ INSN(REG, as_Register(BASE));                                    \
 2696   }
 2697 
 2698 
 2699 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2700   {
 2701     Address::extend scale;
 2702 
 2703     // Hooboy, this is fugly.  We need a way to communicate to the
 2704     // encoder that the index needs to be sign extended, so we have to
 2705     // enumerate all the cases.
 2706     switch (opcode) {
 2707     case INDINDEXSCALEDI2L:
 2708     case INDINDEXSCALEDI2LN:
 2709     case INDINDEXI2L:
 2710     case INDINDEXI2LN:
 2711       scale = Address::sxtw(size);
 2712       break;
 2713     default:
 2714       scale = Address::lsl(size);
 2715     }
 2716 
 2717     if (index == -1) {
 2718       return Address(base, disp);
 2719     } else {
 2720       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2721       return Address(base, as_Register(index), scale);
 2722     }
 2723   }
 2724 
 2725 
 2726 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2727 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2728 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2729 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2730                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2731 
 2732   // Used for all non-volatile memory accesses.  The use of
 2733   // $mem->opcode() to discover whether this pattern uses sign-extended
 2734   // offsets is something of a kludge.
 2735   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
 2736                         Register reg, int opcode,
 2737                         Register base, int index, int scale, int disp,
 2738                         int size_in_memory)
 2739   {
 2740     Address addr = mem2address(opcode, base, index, scale, disp);
 2741     if (addr.getMode() == Address::base_plus_offset) {
 2742       /* If we get an out-of-range offset it is a bug in the compiler,
 2743          so we assert here. */
 2744       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
 2745              "c2 compiler bug");
 2746       /* Fix up any out-of-range offsets. */
 2747       assert_different_registers(rscratch1, base);
 2748       assert_different_registers(rscratch1, reg);
 2749       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2750     }
 2751     (masm.*insn)(reg, addr);
 2752   }
 2753 
 2754   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
 2755                         FloatRegister reg, int opcode,
 2756                         Register base, int index, int size, int disp,
 2757                         int size_in_memory)
 2758   {
 2759     Address::extend scale;
 2760 
 2761     switch (opcode) {
 2762     case INDINDEXSCALEDI2L:
 2763     case INDINDEXSCALEDI2LN:
 2764       scale = Address::sxtw(size);
 2765       break;
 2766     default:
 2767       scale = Address::lsl(size);
 2768     }
 2769 
 2770     if (index == -1) {
 2771       /* If we get an out-of-range offset it is a bug in the compiler,
 2772          so we assert here. */
 2773       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
 2774       /* Fix up any out-of-range offsets. */
 2775       assert_different_registers(rscratch1, base);
 2776       Address addr = Address(base, disp);
 2777       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2778       (masm.*insn)(reg, addr);
 2779     } else {
 2780       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2781       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 2782     }
 2783   }
 2784 
 2785   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
 2786                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2787                         int opcode, Register base, int index, int size, int disp)
 2788   {
 2789     if (index == -1) {
 2790       (masm.*insn)(reg, T, Address(base, disp));
 2791     } else {
 2792       assert(disp == 0, "unsupported address mode");
 2793       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2794     }
 2795   }
 2796 
 2797 %}
 2798 
 2799 
 2800 
 2801 //----------ENCODING BLOCK-----------------------------------------------------
 2802 // This block specifies the encoding classes used by the compiler to
 2803 // output byte streams.  Encoding classes are parameterized macros
 2804 // used by Machine Instruction Nodes in order to generate the bit
 2805 // encoding of the instruction.  Operands specify their base encoding
 2806 // interface with the interface keyword.  There are currently
 2807 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2808 // COND_INTER.  REG_INTER causes an operand to generate a function
 2809 // which returns its register number when queried.  CONST_INTER causes
 2810 // an operand to generate a function which returns the value of the
 2811 // constant when queried.  MEMORY_INTER causes an operand to generate
 2812 // four functions which return the Base Register, the Index Register,
 2813 // the Scale Value, and the Offset Value of the operand when queried.
 2814 // COND_INTER causes an operand to generate six functions which return
 2815 // the encoding code (ie - encoding bits for the instruction)
 2816 // associated with each basic boolean condition for a conditional
 2817 // instruction.
 2818 //
 2819 // Instructions specify two basic values for encoding.  Again, a
 2820 // function is available to check if the constant displacement is an
 2821 // oop. They use the ins_encode keyword to specify their encoding
 2822 // classes (which must be a sequence of enc_class names, and their
 2823 // parameters, specified in the encoding block), and they use the
 2824 // opcode keyword to specify, in order, their primary, secondary, and
 2825 // tertiary opcode.  Only the opcode sections which a particular
 2826 // instruction needs for encoding need to be specified.
 2827 encode %{
 2828   // Build emit functions for each basic byte or larger field in the
 2829   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2830   // from C++ code in the enc_class source block.  Emit functions will
 2831   // live in the main source block for now.  In future, we can
 2832   // generalize this by adding a syntax that specifies the sizes of
 2833   // fields in an order, so that the adlc can build the emit functions
 2834   // automagically
 2835 
 2836   // catch all for unimplemented encodings
 2837   enc_class enc_unimplemented %{
 2838     C2_MacroAssembler _masm(&cbuf);
 2839     __ unimplemented("C2 catch all");
 2840   %}
 2841 
 2842   // BEGIN Non-volatile memory access
 2843 
 2844   // This encoding class is generated automatically from ad_encode.m4.
 2845   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2846   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2847     Register dst_reg = as_Register($dst$$reg);
 2848     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2849                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2850   %}
 2851 
 2852   // This encoding class is generated automatically from ad_encode.m4.
 2853   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2854   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2855     Register dst_reg = as_Register($dst$$reg);
 2856     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2857                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2858   %}
 2859 
 2860   // This encoding class is generated automatically from ad_encode.m4.
 2861   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2862   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2863     Register dst_reg = as_Register($dst$$reg);
 2864     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2865                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2866   %}
 2867 
 2868   // This encoding class is generated automatically from ad_encode.m4.
 2869   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2870   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2871     Register dst_reg = as_Register($dst$$reg);
 2872     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2873                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2874   %}
 2875 
 2876   // This encoding class is generated automatically from ad_encode.m4.
 2877   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2878   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2879     Register dst_reg = as_Register($dst$$reg);
 2880     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2881                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2882   %}
 2883 
 2884   // This encoding class is generated automatically from ad_encode.m4.
 2885   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2886   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2887     Register dst_reg = as_Register($dst$$reg);
 2888     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2889                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2890   %}
 2891 
 2892   // This encoding class is generated automatically from ad_encode.m4.
 2893   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2894   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2895     Register dst_reg = as_Register($dst$$reg);
 2896     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2897                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2898   %}
 2899 
 2900   // This encoding class is generated automatically from ad_encode.m4.
 2901   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2902   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2903     Register dst_reg = as_Register($dst$$reg);
 2904     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2905                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2906   %}
 2907 
 2908   // This encoding class is generated automatically from ad_encode.m4.
 2909   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2910   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2911     Register dst_reg = as_Register($dst$$reg);
 2912     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2913                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2914   %}
 2915 
 2916   // This encoding class is generated automatically from ad_encode.m4.
 2917   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2918   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2919     Register dst_reg = as_Register($dst$$reg);
 2920     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2921                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2922   %}
 2923 
 2924   // This encoding class is generated automatically from ad_encode.m4.
 2925   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2926   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2927     Register dst_reg = as_Register($dst$$reg);
 2928     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2929                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2930   %}
 2931 
 2932   // This encoding class is generated automatically from ad_encode.m4.
 2933   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2934   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2935     Register dst_reg = as_Register($dst$$reg);
 2936     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2937                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2938   %}
 2939 
 2940   // This encoding class is generated automatically from ad_encode.m4.
 2941   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2942   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2943     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2944     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2945                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2946   %}
 2947 
 2948   // This encoding class is generated automatically from ad_encode.m4.
 2949   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2950   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2951     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2952     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2953                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2954   %}
 2955 
 2956   // This encoding class is generated automatically from ad_encode.m4.
 2957   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2958   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2959     Register src_reg = as_Register($src$$reg);
 2960     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
 2961                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2962   %}
 2963 
 2964   // This encoding class is generated automatically from ad_encode.m4.
 2965   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2966   enc_class aarch64_enc_strb0(memory1 mem) %{
 2967     C2_MacroAssembler _masm(&cbuf);
 2968     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2969                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2970   %}
 2971 
 2972   // This encoding class is generated automatically from ad_encode.m4.
 2973   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2974   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2975     Register src_reg = as_Register($src$$reg);
 2976     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
 2977                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2978   %}
 2979 
 2980   // This encoding class is generated automatically from ad_encode.m4.
 2981   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2982   enc_class aarch64_enc_strh0(memory2 mem) %{
 2983     C2_MacroAssembler _masm(&cbuf);
 2984     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2985                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2986   %}
 2987 
 2988   // This encoding class is generated automatically from ad_encode.m4.
 2989   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2990   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 2991     Register src_reg = as_Register($src$$reg);
 2992     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
 2993                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2994   %}
 2995 
 2996   // This encoding class is generated automatically from ad_encode.m4.
 2997   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2998   enc_class aarch64_enc_strw0(memory4 mem) %{
 2999     C2_MacroAssembler _masm(&cbuf);
 3000     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3001                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3002   %}
 3003 
 3004   // This encoding class is generated automatically from ad_encode.m4.
 3005   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3006   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3007     Register src_reg = as_Register($src$$reg);
 3008     // we sometimes get asked to store the stack pointer into the
 3009     // current thread -- we cannot do that directly on AArch64
 3010     if (src_reg == r31_sp) {
 3011       C2_MacroAssembler _masm(&cbuf);
 3012       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3013       __ mov(rscratch2, sp);
 3014       src_reg = rscratch2;
 3015     }
 3016     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
 3017                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3018   %}
 3019 
 3020   // This encoding class is generated automatically from ad_encode.m4.
 3021   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3022   enc_class aarch64_enc_str0(memory8 mem) %{
 3023     C2_MacroAssembler _masm(&cbuf);
 3024     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
 3025                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3026   %}
 3027 
 3028   // This encoding class is generated automatically from ad_encode.m4.
 3029   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3030   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3031     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3032     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
 3033                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3034   %}
 3035 
 3036   // This encoding class is generated automatically from ad_encode.m4.
 3037   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3038   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3039     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3040     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
 3041                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3042   %}
 3043 
 3044   // This encoding class is generated automatically from ad_encode.m4.
 3045   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3046   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3047       C2_MacroAssembler _masm(&cbuf);
 3048       __ membar(Assembler::StoreStore);
 3049       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3050                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3051   %}
 3052 
 3053   // END Non-volatile memory access
 3054 
 3055   // Vector loads and stores
 3056   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3057     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3058     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3059        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3060   %}
 3061 
 3062   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3063     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3064     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3065        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3066   %}
 3067 
 3068   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3069     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3070     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3071        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3072   %}
 3073 
 3074   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3075     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3076     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3077        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3078   %}
 3079 
 3080   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3081     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3082     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
 3083        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3084   %}
 3085 
 3086   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3087     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3088     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
 3089        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3090   %}
 3091 
 3092   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3093     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3094     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
 3095        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3096   %}
 3097 
 3098   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3099     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3100     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3101        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3102   %}
 3103 
 3104   // volatile loads and stores
 3105 
 3106   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3107     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3108                  rscratch1, stlrb);
 3109   %}
 3110 
 3111   enc_class aarch64_enc_stlrb0(memory mem) %{
 3112     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3113                  rscratch1, stlrb);
 3114   %}
 3115 
 3116   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3117     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3118                  rscratch1, stlrh);
 3119   %}
 3120 
 3121   enc_class aarch64_enc_stlrh0(memory mem) %{
 3122     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3123                  rscratch1, stlrh);
 3124   %}
 3125 
 3126   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3127     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3128                  rscratch1, stlrw);
 3129   %}
 3130 
 3131   enc_class aarch64_enc_stlrw0(memory mem) %{
 3132     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3133                  rscratch1, stlrw);
 3134   %}
 3135 
 3136   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3137     Register dst_reg = as_Register($dst$$reg);
 3138     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3139              rscratch1, ldarb);
 3140     __ sxtbw(dst_reg, dst_reg);
 3141   %}
 3142 
 3143   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3144     Register dst_reg = as_Register($dst$$reg);
 3145     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3146              rscratch1, ldarb);
 3147     __ sxtb(dst_reg, dst_reg);
 3148   %}
 3149 
 3150   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3151     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3152              rscratch1, ldarb);
 3153   %}
 3154 
 3155   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3156     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3157              rscratch1, ldarb);
 3158   %}
 3159 
 3160   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3161     Register dst_reg = as_Register($dst$$reg);
 3162     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3163              rscratch1, ldarh);
 3164     __ sxthw(dst_reg, dst_reg);
 3165   %}
 3166 
 3167   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3168     Register dst_reg = as_Register($dst$$reg);
 3169     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3170              rscratch1, ldarh);
 3171     __ sxth(dst_reg, dst_reg);
 3172   %}
 3173 
 3174   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3175     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3176              rscratch1, ldarh);
 3177   %}
 3178 
 3179   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3180     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3181              rscratch1, ldarh);
 3182   %}
 3183 
 3184   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3185     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3186              rscratch1, ldarw);
 3187   %}
 3188 
 3189   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3190     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3191              rscratch1, ldarw);
 3192   %}
 3193 
 3194   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3195     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3196              rscratch1, ldar);
 3197   %}
 3198 
 3199   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3200     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3201              rscratch1, ldarw);
 3202     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3203   %}
 3204 
 3205   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3206     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3207              rscratch1, ldar);
 3208     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3209   %}
 3210 
 3211   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3212     Register src_reg = as_Register($src$$reg);
 3213     // we sometimes get asked to store the stack pointer into the
 3214     // current thread -- we cannot do that directly on AArch64
 3215     if (src_reg == r31_sp) {
 3216       C2_MacroAssembler _masm(&cbuf);
 3217       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3218       __ mov(rscratch2, sp);
 3219       src_reg = rscratch2;
 3220     }
 3221     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3222                  rscratch1, stlr);
 3223   %}
 3224 
 3225   enc_class aarch64_enc_stlr0(memory mem) %{
 3226     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3227                  rscratch1, stlr);
 3228   %}
 3229 
 3230   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3231     {
 3232       C2_MacroAssembler _masm(&cbuf);
 3233       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3234       __ fmovs(rscratch2, src_reg);
 3235     }
 3236     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3237                  rscratch1, stlrw);
 3238   %}
 3239 
 3240   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3241     {
 3242       C2_MacroAssembler _masm(&cbuf);
 3243       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3244       __ fmovd(rscratch2, src_reg);
 3245     }
 3246     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3247                  rscratch1, stlr);
 3248   %}
 3249 
 3250   // synchronized read/update encodings
 3251 
 3252   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3253     C2_MacroAssembler _masm(&cbuf);
 3254     Register dst_reg = as_Register($dst$$reg);
 3255     Register base = as_Register($mem$$base);
 3256     int index = $mem$$index;
 3257     int scale = $mem$$scale;
 3258     int disp = $mem$$disp;
 3259     if (index == -1) {
 3260        if (disp != 0) {
 3261         __ lea(rscratch1, Address(base, disp));
 3262         __ ldaxr(dst_reg, rscratch1);
 3263       } else {
 3264         // TODO
 3265         // should we ever get anything other than this case?
 3266         __ ldaxr(dst_reg, base);
 3267       }
 3268     } else {
 3269       Register index_reg = as_Register(index);
 3270       if (disp == 0) {
 3271         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3272         __ ldaxr(dst_reg, rscratch1);
 3273       } else {
 3274         __ lea(rscratch1, Address(base, disp));
 3275         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3276         __ ldaxr(dst_reg, rscratch1);
 3277       }
 3278     }
 3279   %}
 3280 
 3281   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3282     C2_MacroAssembler _masm(&cbuf);
 3283     Register src_reg = as_Register($src$$reg);
 3284     Register base = as_Register($mem$$base);
 3285     int index = $mem$$index;
 3286     int scale = $mem$$scale;
 3287     int disp = $mem$$disp;
 3288     if (index == -1) {
 3289        if (disp != 0) {
 3290         __ lea(rscratch2, Address(base, disp));
 3291         __ stlxr(rscratch1, src_reg, rscratch2);
 3292       } else {
 3293         // TODO
 3294         // should we ever get anything other than this case?
 3295         __ stlxr(rscratch1, src_reg, base);
 3296       }
 3297     } else {
 3298       Register index_reg = as_Register(index);
 3299       if (disp == 0) {
 3300         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3301         __ stlxr(rscratch1, src_reg, rscratch2);
 3302       } else {
 3303         __ lea(rscratch2, Address(base, disp));
 3304         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3305         __ stlxr(rscratch1, src_reg, rscratch2);
 3306       }
 3307     }
 3308     __ cmpw(rscratch1, zr);
 3309   %}
 3310 
 3311   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3312     C2_MacroAssembler _masm(&cbuf);
 3313     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3314     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3315                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3316                /*weak*/ false, noreg);
 3317   %}
 3318 
 3319   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3320     C2_MacroAssembler _masm(&cbuf);
 3321     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3322     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3323                Assembler::word, /*acquire*/ false, /*release*/ true,
 3324                /*weak*/ false, noreg);
 3325   %}
 3326 
 3327   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3328     C2_MacroAssembler _masm(&cbuf);
 3329     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3330     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3331                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3332                /*weak*/ false, noreg);
 3333   %}
 3334 
 3335   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3336     C2_MacroAssembler _masm(&cbuf);
 3337     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3338     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3339                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3340                /*weak*/ false, noreg);
 3341   %}
 3342 
 3343 
 3344   // The only difference between aarch64_enc_cmpxchg and
 3345   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3346   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3347   // lock.
 3348   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3349     C2_MacroAssembler _masm(&cbuf);
 3350     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3351     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3352                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3353                /*weak*/ false, noreg);
 3354   %}
 3355 
 3356   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3357     C2_MacroAssembler _masm(&cbuf);
 3358     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3359     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3360                Assembler::word, /*acquire*/ true, /*release*/ true,
 3361                /*weak*/ false, noreg);
 3362   %}
 3363 
 3364   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3365     C2_MacroAssembler _masm(&cbuf);
 3366     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3367     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3368                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3369                /*weak*/ false, noreg);
 3370   %}
 3371 
 3372   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3373     C2_MacroAssembler _masm(&cbuf);
 3374     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3375     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3376                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3377                /*weak*/ false, noreg);
 3378   %}
 3379 
 3380   // auxiliary used for CompareAndSwapX to set result register
 3381   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3382     C2_MacroAssembler _masm(&cbuf);
 3383     Register res_reg = as_Register($res$$reg);
 3384     __ cset(res_reg, Assembler::EQ);
 3385   %}
 3386 
 3387   // prefetch encodings
 3388 
 3389   enc_class aarch64_enc_prefetchw(memory mem) %{
 3390     C2_MacroAssembler _masm(&cbuf);
 3391     Register base = as_Register($mem$$base);
 3392     int index = $mem$$index;
 3393     int scale = $mem$$scale;
 3394     int disp = $mem$$disp;
 3395     if (index == -1) {
 3396       __ prfm(Address(base, disp), PSTL1KEEP);
 3397     } else {
 3398       Register index_reg = as_Register(index);
 3399       if (disp == 0) {
 3400         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3401       } else {
 3402         __ lea(rscratch1, Address(base, disp));
 3403 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3404       }
 3405     }
 3406   %}
 3407 
 3408   /// mov envcodings
 3409 
 3410   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3411     C2_MacroAssembler _masm(&cbuf);
 3412     uint32_t con = (uint32_t)$src$$constant;
 3413     Register dst_reg = as_Register($dst$$reg);
 3414     if (con == 0) {
 3415       __ movw(dst_reg, zr);
 3416     } else {
 3417       __ movw(dst_reg, con);
 3418     }
 3419   %}
 3420 
 3421   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3422     C2_MacroAssembler _masm(&cbuf);
 3423     Register dst_reg = as_Register($dst$$reg);
 3424     uint64_t con = (uint64_t)$src$$constant;
 3425     if (con == 0) {
 3426       __ mov(dst_reg, zr);
 3427     } else {
 3428       __ mov(dst_reg, con);
 3429     }
 3430   %}
 3431 
 3432   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3433     C2_MacroAssembler _masm(&cbuf);
 3434     Register dst_reg = as_Register($dst$$reg);
 3435     address con = (address)$src$$constant;
 3436     if (con == NULL || con == (address)1) {
 3437       ShouldNotReachHere();
 3438     } else {
 3439       relocInfo::relocType rtype = $src->constant_reloc();
 3440       if (rtype == relocInfo::oop_type) {
 3441         __ movoop(dst_reg, (jobject)con);
 3442       } else if (rtype == relocInfo::metadata_type) {
 3443         __ mov_metadata(dst_reg, (Metadata*)con);
 3444       } else {
 3445         assert(rtype == relocInfo::none, "unexpected reloc type");
 3446         if (! __ is_valid_AArch64_address(con) ||
 3447             con < (address)(uintptr_t)os::vm_page_size()) {
 3448           __ mov(dst_reg, con);
 3449         } else {
 3450           uint64_t offset;
 3451           __ adrp(dst_reg, con, offset);
 3452           __ add(dst_reg, dst_reg, offset);
 3453         }
 3454       }
 3455     }
 3456   %}
 3457 
 3458   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3459     C2_MacroAssembler _masm(&cbuf);
 3460     Register dst_reg = as_Register($dst$$reg);
 3461     __ mov(dst_reg, zr);
 3462   %}
 3463 
 3464   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3465     C2_MacroAssembler _masm(&cbuf);
 3466     Register dst_reg = as_Register($dst$$reg);
 3467     __ mov(dst_reg, (uint64_t)1);
 3468   %}
 3469 
 3470   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3471     C2_MacroAssembler _masm(&cbuf);
 3472     __ load_byte_map_base($dst$$Register);
 3473   %}
 3474 
 3475   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3476     C2_MacroAssembler _masm(&cbuf);
 3477     Register dst_reg = as_Register($dst$$reg);
 3478     address con = (address)$src$$constant;
 3479     if (con == NULL) {
 3480       ShouldNotReachHere();
 3481     } else {
 3482       relocInfo::relocType rtype = $src->constant_reloc();
 3483       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3484       __ set_narrow_oop(dst_reg, (jobject)con);
 3485     }
 3486   %}
 3487 
 3488   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3489     C2_MacroAssembler _masm(&cbuf);
 3490     Register dst_reg = as_Register($dst$$reg);
 3491     __ mov(dst_reg, zr);
 3492   %}
 3493 
 3494   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3495     C2_MacroAssembler _masm(&cbuf);
 3496     Register dst_reg = as_Register($dst$$reg);
 3497     address con = (address)$src$$constant;
 3498     if (con == NULL) {
 3499       ShouldNotReachHere();
 3500     } else {
 3501       relocInfo::relocType rtype = $src->constant_reloc();
 3502       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3503       __ set_narrow_klass(dst_reg, (Klass *)con);
 3504     }
 3505   %}
 3506 
 3507   // arithmetic encodings
 3508 
 3509   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3510     C2_MacroAssembler _masm(&cbuf);
 3511     Register dst_reg = as_Register($dst$$reg);
 3512     Register src_reg = as_Register($src1$$reg);
 3513     int32_t con = (int32_t)$src2$$constant;
 3514     // add has primary == 0, subtract has primary == 1
 3515     if ($primary) { con = -con; }
 3516     if (con < 0) {
 3517       __ subw(dst_reg, src_reg, -con);
 3518     } else {
 3519       __ addw(dst_reg, src_reg, con);
 3520     }
 3521   %}
 3522 
 3523   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3524     C2_MacroAssembler _masm(&cbuf);
 3525     Register dst_reg = as_Register($dst$$reg);
 3526     Register src_reg = as_Register($src1$$reg);
 3527     int32_t con = (int32_t)$src2$$constant;
 3528     // add has primary == 0, subtract has primary == 1
 3529     if ($primary) { con = -con; }
 3530     if (con < 0) {
 3531       __ sub(dst_reg, src_reg, -con);
 3532     } else {
 3533       __ add(dst_reg, src_reg, con);
 3534     }
 3535   %}
 3536 
 3537   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3538     C2_MacroAssembler _masm(&cbuf);
 3539    Register dst_reg = as_Register($dst$$reg);
 3540    Register src1_reg = as_Register($src1$$reg);
 3541    Register src2_reg = as_Register($src2$$reg);
 3542     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3543   %}
 3544 
 3545   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3546     C2_MacroAssembler _masm(&cbuf);
 3547    Register dst_reg = as_Register($dst$$reg);
 3548    Register src1_reg = as_Register($src1$$reg);
 3549    Register src2_reg = as_Register($src2$$reg);
 3550     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3551   %}
 3552 
 3553   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3554     C2_MacroAssembler _masm(&cbuf);
 3555    Register dst_reg = as_Register($dst$$reg);
 3556    Register src1_reg = as_Register($src1$$reg);
 3557    Register src2_reg = as_Register($src2$$reg);
 3558     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3559   %}
 3560 
 3561   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3562     C2_MacroAssembler _masm(&cbuf);
 3563    Register dst_reg = as_Register($dst$$reg);
 3564    Register src1_reg = as_Register($src1$$reg);
 3565    Register src2_reg = as_Register($src2$$reg);
 3566     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3567   %}
 3568 
 3569   // compare instruction encodings
 3570 
 3571   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3572     C2_MacroAssembler _masm(&cbuf);
 3573     Register reg1 = as_Register($src1$$reg);
 3574     Register reg2 = as_Register($src2$$reg);
 3575     __ cmpw(reg1, reg2);
 3576   %}
 3577 
 3578   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3579     C2_MacroAssembler _masm(&cbuf);
 3580     Register reg = as_Register($src1$$reg);
 3581     int32_t val = $src2$$constant;
 3582     if (val >= 0) {
 3583       __ subsw(zr, reg, val);
 3584     } else {
 3585       __ addsw(zr, reg, -val);
 3586     }
 3587   %}
 3588 
 3589   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3590     C2_MacroAssembler _masm(&cbuf);
 3591     Register reg1 = as_Register($src1$$reg);
 3592     uint32_t val = (uint32_t)$src2$$constant;
 3593     __ movw(rscratch1, val);
 3594     __ cmpw(reg1, rscratch1);
 3595   %}
 3596 
 3597   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3598     C2_MacroAssembler _masm(&cbuf);
 3599     Register reg1 = as_Register($src1$$reg);
 3600     Register reg2 = as_Register($src2$$reg);
 3601     __ cmp(reg1, reg2);
 3602   %}
 3603 
 3604   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3605     C2_MacroAssembler _masm(&cbuf);
 3606     Register reg = as_Register($src1$$reg);
 3607     int64_t val = $src2$$constant;
 3608     if (val >= 0) {
 3609       __ subs(zr, reg, val);
 3610     } else if (val != -val) {
 3611       __ adds(zr, reg, -val);
 3612     } else {
 3613     // aargh, Long.MIN_VALUE is a special case
 3614       __ orr(rscratch1, zr, (uint64_t)val);
 3615       __ subs(zr, reg, rscratch1);
 3616     }
 3617   %}
 3618 
 3619   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3620     C2_MacroAssembler _masm(&cbuf);
 3621     Register reg1 = as_Register($src1$$reg);
 3622     uint64_t val = (uint64_t)$src2$$constant;
 3623     __ mov(rscratch1, val);
 3624     __ cmp(reg1, rscratch1);
 3625   %}
 3626 
 3627   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3628     C2_MacroAssembler _masm(&cbuf);
 3629     Register reg1 = as_Register($src1$$reg);
 3630     Register reg2 = as_Register($src2$$reg);
 3631     __ cmp(reg1, reg2);
 3632   %}
 3633 
 3634   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3635     C2_MacroAssembler _masm(&cbuf);
 3636     Register reg1 = as_Register($src1$$reg);
 3637     Register reg2 = as_Register($src2$$reg);
 3638     __ cmpw(reg1, reg2);
 3639   %}
 3640 
 3641   enc_class aarch64_enc_testp(iRegP src) %{
 3642     C2_MacroAssembler _masm(&cbuf);
 3643     Register reg = as_Register($src$$reg);
 3644     __ cmp(reg, zr);
 3645   %}
 3646 
 3647   enc_class aarch64_enc_testn(iRegN src) %{
 3648     C2_MacroAssembler _masm(&cbuf);
 3649     Register reg = as_Register($src$$reg);
 3650     __ cmpw(reg, zr);
 3651   %}
 3652 
 3653   enc_class aarch64_enc_b(label lbl) %{
 3654     C2_MacroAssembler _masm(&cbuf);
 3655     Label *L = $lbl$$label;
 3656     __ b(*L);
 3657   %}
 3658 
 3659   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3660     C2_MacroAssembler _masm(&cbuf);
 3661     Label *L = $lbl$$label;
 3662     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3663   %}
 3664 
 3665   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3666     C2_MacroAssembler _masm(&cbuf);
 3667     Label *L = $lbl$$label;
 3668     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3669   %}
 3670 
 3671   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3672   %{
 3673      Register sub_reg = as_Register($sub$$reg);
 3674      Register super_reg = as_Register($super$$reg);
 3675      Register temp_reg = as_Register($temp$$reg);
 3676      Register result_reg = as_Register($result$$reg);
 3677 
 3678      Label miss;
 3679      C2_MacroAssembler _masm(&cbuf);
 3680      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3681                                      NULL, &miss,
 3682                                      /*set_cond_codes:*/ true);
 3683      if ($primary) {
 3684        __ mov(result_reg, zr);
 3685      }
 3686      __ bind(miss);
 3687   %}
 3688 
 3689   enc_class aarch64_enc_java_static_call(method meth) %{
 3690     C2_MacroAssembler _masm(&cbuf);
 3691 
 3692     address addr = (address)$meth$$method;
 3693     address call;
 3694     if (!_method) {
 3695       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3696       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3697       if (call == NULL) {
 3698         ciEnv::current()->record_failure("CodeCache is full");
 3699         return;
 3700       }
 3701     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3702       // The NOP here is purely to ensure that eliding a call to
 3703       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3704       __ nop();
 3705       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3706     } else {
 3707       int method_index = resolved_method_index(cbuf);
 3708       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3709                                                   : static_call_Relocation::spec(method_index);
 3710       call = __ trampoline_call(Address(addr, rspec));
 3711       if (call == NULL) {
 3712         ciEnv::current()->record_failure("CodeCache is full");
 3713         return;
 3714       }
 3715       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3716         // Calls of the same statically bound method can share
 3717         // a stub to the interpreter.
 3718         cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
 3719       } else {
 3720         // Emit stub for static call
 3721         address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
 3722         if (stub == NULL) {
 3723           ciEnv::current()->record_failure("CodeCache is full");
 3724           return;
 3725         }
 3726       }
 3727     }
 3728 
 3729     __ post_call_nop();
 3730 
 3731     // Only non uncommon_trap calls need to reinitialize ptrue.
 3732     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3733       __ reinitialize_ptrue();
 3734     }
 3735   %}
 3736 
 3737   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3738     C2_MacroAssembler _masm(&cbuf);
 3739     int method_index = resolved_method_index(cbuf);
 3740     address call = __ ic_call((address)$meth$$method, method_index);
 3741     if (call == NULL) {
 3742       ciEnv::current()->record_failure("CodeCache is full");
 3743       return;
 3744     }
 3745     __ post_call_nop();
 3746     if (Compile::current()->max_vector_size() > 0) {
 3747       __ reinitialize_ptrue();
 3748     }
 3749   %}
 3750 
 3751   enc_class aarch64_enc_call_epilog() %{
 3752     C2_MacroAssembler _masm(&cbuf);
 3753     if (VerifyStackAtCalls) {
 3754       // Check that stack depth is unchanged: find majik cookie on stack
 3755       __ call_Unimplemented();
 3756     }
 3757   %}
 3758 
 3759   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3760     C2_MacroAssembler _masm(&cbuf);
 3761 
 3762     // some calls to generated routines (arraycopy code) are scheduled
 3763     // by C2 as runtime calls. if so we can call them using a br (they
 3764     // will be in a reachable segment) otherwise we have to use a blr
 3765     // which loads the absolute address into a register.
 3766     address entry = (address)$meth$$method;
 3767     CodeBlob *cb = CodeCache::find_blob(entry);
 3768     if (cb) {
 3769       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3770       if (call == NULL) {
 3771         ciEnv::current()->record_failure("CodeCache is full");
 3772         return;
 3773       }
 3774       __ post_call_nop();
 3775     } else {
 3776       Label retaddr;
 3777       __ adr(rscratch2, retaddr);
 3778       __ lea(rscratch1, RuntimeAddress(entry));
 3779       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
 3780       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
 3781       __ blr(rscratch1);
 3782       __ bind(retaddr);
 3783       __ post_call_nop();
 3784       __ add(sp, sp, 2 * wordSize);
 3785     }
 3786     if (Compile::current()->max_vector_size() > 0) {
 3787       __ reinitialize_ptrue();
 3788     }
 3789   %}
 3790 
 3791   enc_class aarch64_enc_rethrow() %{
 3792     C2_MacroAssembler _masm(&cbuf);
 3793     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3794   %}
 3795 
 3796   enc_class aarch64_enc_ret() %{
 3797     C2_MacroAssembler _masm(&cbuf);
 3798 #ifdef ASSERT
 3799     if (Compile::current()->max_vector_size() > 0) {
 3800       __ verify_ptrue();
 3801     }
 3802 #endif
 3803     __ ret(lr);
 3804   %}
 3805 
 3806   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3807     C2_MacroAssembler _masm(&cbuf);
 3808     Register target_reg = as_Register($jump_target$$reg);
 3809     __ br(target_reg);
 3810   %}
 3811 
 3812   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3813     C2_MacroAssembler _masm(&cbuf);
 3814     Register target_reg = as_Register($jump_target$$reg);
 3815     // exception oop should be in r0
 3816     // ret addr has been popped into lr
 3817     // callee expects it in r3
 3818     __ mov(r3, lr);
 3819     __ br(target_reg);
 3820   %}
 3821 
 3822 %}
 3823 
 3824 //----------FRAME--------------------------------------------------------------
 3825 // Definition of frame structure and management information.
 3826 //
 3827 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3828 //                             |   (to get allocators register number
 3829 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3830 //  r   CALLER     |        |
 3831 //  o     |        +--------+      pad to even-align allocators stack-slot
 3832 //  w     V        |  pad0  |        numbers; owned by CALLER
 3833 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3834 //  h     ^        |   in   |  5
 3835 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3836 //  |     |        |        |  3
 3837 //  |     |        +--------+
 3838 //  V     |        | old out|      Empty on Intel, window on Sparc
 3839 //        |    old |preserve|      Must be even aligned.
 3840 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3841 //        |        |   in   |  3   area for Intel ret address
 3842 //     Owned by    |preserve|      Empty on Sparc.
 3843 //       SELF      +--------+
 3844 //        |        |  pad2  |  2   pad to align old SP
 3845 //        |        +--------+  1
 3846 //        |        | locks  |  0
 3847 //        |        +--------+----> OptoReg::stack0(), even aligned
 3848 //        |        |  pad1  | 11   pad to align new SP
 3849 //        |        +--------+
 3850 //        |        |        | 10
 3851 //        |        | spills |  9   spills
 3852 //        V        |        |  8   (pad0 slot for callee)
 3853 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3854 //        ^        |  out   |  7
 3855 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3856 //     Owned by    +--------+
 3857 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3858 //        |    new |preserve|      Must be even-aligned.
 3859 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3860 //        |        |        |
 3861 //
 3862 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3863 //         known from SELF's arguments and the Java calling convention.
 3864 //         Region 6-7 is determined per call site.
 3865 // Note 2: If the calling convention leaves holes in the incoming argument
 3866 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3867 //         are owned by the CALLEE.  Holes should not be necessary in the
 3868 //         incoming area, as the Java calling convention is completely under
 3869 //         the control of the AD file.  Doubles can be sorted and packed to
 3870 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3871 //         varargs C calling conventions.
 3872 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3873 //         even aligned with pad0 as needed.
 3874 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3875 //           (the latter is true on Intel but is it false on AArch64?)
 3876 //         region 6-11 is even aligned; it may be padded out more so that
 3877 //         the region from SP to FP meets the minimum stack alignment.
 3878 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3879 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3880 //         SP meets the minimum alignment.
 3881 
 3882 frame %{
 3883   // These three registers define part of the calling convention
 3884   // between compiled code and the interpreter.
 3885 
 3886   // Inline Cache Register or Method for I2C.
 3887   inline_cache_reg(R12);
 3888 
 3889   // Number of stack slots consumed by locking an object
 3890   sync_stack_slots(2);
 3891 
 3892   // Compiled code's Frame Pointer
 3893   frame_pointer(R31);
 3894 
 3895   // Interpreter stores its frame pointer in a register which is
 3896   // stored to the stack by I2CAdaptors.
 3897   // I2CAdaptors convert from interpreted java to compiled java.
 3898   interpreter_frame_pointer(R29);
 3899 
 3900   // Stack alignment requirement
 3901   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3902 
 3903   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3904   // for calls to C.  Supports the var-args backing area for register parms.
 3905   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3906 
 3907   // The after-PROLOG location of the return address.  Location of
 3908   // return address specifies a type (REG or STACK) and a number
 3909   // representing the register number (i.e. - use a register name) or
 3910   // stack slot.
 3911   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3912   // Otherwise, it is above the locks and verification slot and alignment word
 3913   // TODO this may well be correct but need to check why that - 2 is there
 3914   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3915   // which folds in the space used for monitors
 3916   return_addr(STACK - 2 +
 3917               align_up((Compile::current()->in_preserve_stack_slots() +
 3918                         Compile::current()->fixed_slots()),
 3919                        stack_alignment_in_slots()));
 3920 
 3921   // Location of compiled Java return values.  Same as C for now.
 3922   return_value
 3923   %{
 3924     // TODO do we allow ideal_reg == Op_RegN???
 3925     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3926            "only return normal values");
 3927 
 3928     static const int lo[Op_RegL + 1] = { // enum name
 3929       0,                                 // Op_Node
 3930       0,                                 // Op_Set
 3931       R0_num,                            // Op_RegN
 3932       R0_num,                            // Op_RegI
 3933       R0_num,                            // Op_RegP
 3934       V0_num,                            // Op_RegF
 3935       V0_num,                            // Op_RegD
 3936       R0_num                             // Op_RegL
 3937     };
 3938 
 3939     static const int hi[Op_RegL + 1] = { // enum name
 3940       0,                                 // Op_Node
 3941       0,                                 // Op_Set
 3942       OptoReg::Bad,                      // Op_RegN
 3943       OptoReg::Bad,                      // Op_RegI
 3944       R0_H_num,                          // Op_RegP
 3945       OptoReg::Bad,                      // Op_RegF
 3946       V0_H_num,                          // Op_RegD
 3947       R0_H_num                           // Op_RegL
 3948     };
 3949 
 3950     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3951   %}
 3952 %}
 3953 
 3954 //----------ATTRIBUTES---------------------------------------------------------
 3955 //----------Operand Attributes-------------------------------------------------
 3956 op_attrib op_cost(1);        // Required cost attribute
 3957 
 3958 //----------Instruction Attributes---------------------------------------------
 3959 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3960 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3961 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3962                                 // a non-matching short branch variant
 3963                                 // of some long branch?
 3964 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3965                                 // be a power of 2) specifies the
 3966                                 // alignment that some part of the
 3967                                 // instruction (not necessarily the
 3968                                 // start) requires.  If > 1, a
 3969                                 // compute_padding() function must be
 3970                                 // provided for the instruction
 3971 
 3972 //----------OPERANDS-----------------------------------------------------------
 3973 // Operand definitions must precede instruction definitions for correct parsing
 3974 // in the ADLC because operands constitute user defined types which are used in
 3975 // instruction definitions.
 3976 
 3977 //----------Simple Operands----------------------------------------------------
 3978 
 3979 // Integer operands 32 bit
 3980 // 32 bit immediate
 3981 operand immI()
 3982 %{
 3983   match(ConI);
 3984 
 3985   op_cost(0);
 3986   format %{ %}
 3987   interface(CONST_INTER);
 3988 %}
 3989 
 3990 // 32 bit zero
 3991 operand immI0()
 3992 %{
 3993   predicate(n->get_int() == 0);
 3994   match(ConI);
 3995 
 3996   op_cost(0);
 3997   format %{ %}
 3998   interface(CONST_INTER);
 3999 %}
 4000 
 4001 // 32 bit unit increment
 4002 operand immI_1()
 4003 %{
 4004   predicate(n->get_int() == 1);
 4005   match(ConI);
 4006 
 4007   op_cost(0);
 4008   format %{ %}
 4009   interface(CONST_INTER);
 4010 %}
 4011 
 4012 // 32 bit unit decrement
 4013 operand immI_M1()
 4014 %{
 4015   predicate(n->get_int() == -1);
 4016   match(ConI);
 4017 
 4018   op_cost(0);
 4019   format %{ %}
 4020   interface(CONST_INTER);
 4021 %}
 4022 
 4023 // Shift values for add/sub extension shift
 4024 operand immIExt()
 4025 %{
 4026   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 4027   match(ConI);
 4028 
 4029   op_cost(0);
 4030   format %{ %}
 4031   interface(CONST_INTER);
 4032 %}
 4033 
 4034 operand immI_gt_1()
 4035 %{
 4036   predicate(n->get_int() > 1);
 4037   match(ConI);
 4038 
 4039   op_cost(0);
 4040   format %{ %}
 4041   interface(CONST_INTER);
 4042 %}
 4043 
 4044 operand immI_le_4()
 4045 %{
 4046   predicate(n->get_int() <= 4);
 4047   match(ConI);
 4048 
 4049   op_cost(0);
 4050   format %{ %}
 4051   interface(CONST_INTER);
 4052 %}
 4053 
 4054 operand immI_16()
 4055 %{
 4056   predicate(n->get_int() == 16);
 4057   match(ConI);
 4058 
 4059   op_cost(0);
 4060   format %{ %}
 4061   interface(CONST_INTER);
 4062 %}
 4063 
 4064 operand immI_24()
 4065 %{
 4066   predicate(n->get_int() == 24);
 4067   match(ConI);
 4068 
 4069   op_cost(0);
 4070   format %{ %}
 4071   interface(CONST_INTER);
 4072 %}
 4073 
 4074 operand immI_32()
 4075 %{
 4076   predicate(n->get_int() == 32);
 4077   match(ConI);
 4078 
 4079   op_cost(0);
 4080   format %{ %}
 4081   interface(CONST_INTER);
 4082 %}
 4083 
 4084 operand immI_48()
 4085 %{
 4086   predicate(n->get_int() == 48);
 4087   match(ConI);
 4088 
 4089   op_cost(0);
 4090   format %{ %}
 4091   interface(CONST_INTER);
 4092 %}
 4093 
 4094 operand immI_56()
 4095 %{
 4096   predicate(n->get_int() == 56);
 4097   match(ConI);
 4098 
 4099   op_cost(0);
 4100   format %{ %}
 4101   interface(CONST_INTER);
 4102 %}
 4103 
 4104 operand immI_63()
 4105 %{
 4106   predicate(n->get_int() == 63);
 4107   match(ConI);
 4108 
 4109   op_cost(0);
 4110   format %{ %}
 4111   interface(CONST_INTER);
 4112 %}
 4113 
 4114 operand immI_64()
 4115 %{
 4116   predicate(n->get_int() == 64);
 4117   match(ConI);
 4118 
 4119   op_cost(0);
 4120   format %{ %}
 4121   interface(CONST_INTER);
 4122 %}
 4123 
 4124 operand immI_255()
 4125 %{
 4126   predicate(n->get_int() == 255);
 4127   match(ConI);
 4128 
 4129   op_cost(0);
 4130   format %{ %}
 4131   interface(CONST_INTER);
 4132 %}
 4133 
 4134 operand immI_65535()
 4135 %{
 4136   predicate(n->get_int() == 65535);
 4137   match(ConI);
 4138 
 4139   op_cost(0);
 4140   format %{ %}
 4141   interface(CONST_INTER);
 4142 %}
 4143 
 4144 operand immI_positive()
 4145 %{
 4146   predicate(n->get_int() > 0);
 4147   match(ConI);
 4148 
 4149   op_cost(0);
 4150   format %{ %}
 4151   interface(CONST_INTER);
 4152 %}
 4153 
 4154 // BoolTest condition for signed compare
 4155 operand immI_cmp_cond()
 4156 %{
 4157   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4158   match(ConI);
 4159 
 4160   op_cost(0);
 4161   format %{ %}
 4162   interface(CONST_INTER);
 4163 %}
 4164 
 4165 // BoolTest condition for unsigned compare
 4166 operand immI_cmpU_cond()
 4167 %{
 4168   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4169   match(ConI);
 4170 
 4171   op_cost(0);
 4172   format %{ %}
 4173   interface(CONST_INTER);
 4174 %}
 4175 
 4176 operand immL_255()
 4177 %{
 4178   predicate(n->get_long() == 255L);
 4179   match(ConL);
 4180 
 4181   op_cost(0);
 4182   format %{ %}
 4183   interface(CONST_INTER);
 4184 %}
 4185 
 4186 operand immL_65535()
 4187 %{
 4188   predicate(n->get_long() == 65535L);
 4189   match(ConL);
 4190 
 4191   op_cost(0);
 4192   format %{ %}
 4193   interface(CONST_INTER);
 4194 %}
 4195 
 4196 operand immL_4294967295()
 4197 %{
 4198   predicate(n->get_long() == 4294967295L);
 4199   match(ConL);
 4200 
 4201   op_cost(0);
 4202   format %{ %}
 4203   interface(CONST_INTER);
 4204 %}
 4205 
 4206 operand immL_bitmask()
 4207 %{
 4208   predicate((n->get_long() != 0)
 4209             && ((n->get_long() & 0xc000000000000000l) == 0)
 4210             && is_power_of_2(n->get_long() + 1));
 4211   match(ConL);
 4212 
 4213   op_cost(0);
 4214   format %{ %}
 4215   interface(CONST_INTER);
 4216 %}
 4217 
 4218 operand immI_bitmask()
 4219 %{
 4220   predicate((n->get_int() != 0)
 4221             && ((n->get_int() & 0xc0000000) == 0)
 4222             && is_power_of_2(n->get_int() + 1));
 4223   match(ConI);
 4224 
 4225   op_cost(0);
 4226   format %{ %}
 4227   interface(CONST_INTER);
 4228 %}
 4229 
 4230 operand immL_positive_bitmaskI()
 4231 %{
 4232   predicate((n->get_long() != 0)
 4233             && ((julong)n->get_long() < 0x80000000ULL)
 4234             && is_power_of_2(n->get_long() + 1));
 4235   match(ConL);
 4236 
 4237   op_cost(0);
 4238   format %{ %}
 4239   interface(CONST_INTER);
 4240 %}
 4241 
 4242 // Scale values for scaled offset addressing modes (up to long but not quad)
 4243 operand immIScale()
 4244 %{
 4245   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4246   match(ConI);
 4247 
 4248   op_cost(0);
 4249   format %{ %}
 4250   interface(CONST_INTER);
 4251 %}
 4252 
 4253 // 26 bit signed offset -- for pc-relative branches
 4254 operand immI26()
 4255 %{
 4256   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
 4257   match(ConI);
 4258 
 4259   op_cost(0);
 4260   format %{ %}
 4261   interface(CONST_INTER);
 4262 %}
 4263 
 4264 // 19 bit signed offset -- for pc-relative loads
 4265 operand immI19()
 4266 %{
 4267   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
 4268   match(ConI);
 4269 
 4270   op_cost(0);
 4271   format %{ %}
 4272   interface(CONST_INTER);
 4273 %}
 4274 
 4275 // 5 bit signed integer
 4276 operand immI5()
 4277 %{
 4278   predicate(Assembler::is_simm(n->get_int(), 5));
 4279   match(ConI);
 4280 
 4281   op_cost(0);
 4282   format %{ %}
 4283   interface(CONST_INTER);
 4284 %}
 4285 
 4286 // 7 bit unsigned integer
 4287 operand immIU7()
 4288 %{
 4289   predicate(Assembler::is_uimm(n->get_int(), 7));
 4290   match(ConI);
 4291 
 4292   op_cost(0);
 4293   format %{ %}
 4294   interface(CONST_INTER);
 4295 %}
 4296 
 4297 // 12 bit unsigned offset -- for base plus immediate loads
 4298 operand immIU12()
 4299 %{
 4300   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
 4301   match(ConI);
 4302 
 4303   op_cost(0);
 4304   format %{ %}
 4305   interface(CONST_INTER);
 4306 %}
 4307 
 4308 operand immLU12()
 4309 %{
 4310   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
 4311   match(ConL);
 4312 
 4313   op_cost(0);
 4314   format %{ %}
 4315   interface(CONST_INTER);
 4316 %}
 4317 
 4318 // Offset for scaled or unscaled immediate loads and stores
 4319 operand immIOffset()
 4320 %{
 4321   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4322   match(ConI);
 4323 
 4324   op_cost(0);
 4325   format %{ %}
 4326   interface(CONST_INTER);
 4327 %}
 4328 
 4329 operand immIOffset1()
 4330 %{
 4331   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4332   match(ConI);
 4333 
 4334   op_cost(0);
 4335   format %{ %}
 4336   interface(CONST_INTER);
 4337 %}
 4338 
 4339 operand immIOffset2()
 4340 %{
 4341   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4342   match(ConI);
 4343 
 4344   op_cost(0);
 4345   format %{ %}
 4346   interface(CONST_INTER);
 4347 %}
 4348 
 4349 operand immIOffset4()
 4350 %{
 4351   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4352   match(ConI);
 4353 
 4354   op_cost(0);
 4355   format %{ %}
 4356   interface(CONST_INTER);
 4357 %}
 4358 
 4359 operand immIOffset8()
 4360 %{
 4361   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4362   match(ConI);
 4363 
 4364   op_cost(0);
 4365   format %{ %}
 4366   interface(CONST_INTER);
 4367 %}
 4368 
 4369 operand immIOffset16()
 4370 %{
 4371   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4372   match(ConI);
 4373 
 4374   op_cost(0);
 4375   format %{ %}
 4376   interface(CONST_INTER);
 4377 %}
 4378 
 4379 operand immLoffset()
 4380 %{
 4381   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4382   match(ConL);
 4383 
 4384   op_cost(0);
 4385   format %{ %}
 4386   interface(CONST_INTER);
 4387 %}
 4388 
 4389 operand immLoffset1()
 4390 %{
 4391   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4392   match(ConL);
 4393 
 4394   op_cost(0);
 4395   format %{ %}
 4396   interface(CONST_INTER);
 4397 %}
 4398 
 4399 operand immLoffset2()
 4400 %{
 4401   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4402   match(ConL);
 4403 
 4404   op_cost(0);
 4405   format %{ %}
 4406   interface(CONST_INTER);
 4407 %}
 4408 
 4409 operand immLoffset4()
 4410 %{
 4411   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4412   match(ConL);
 4413 
 4414   op_cost(0);
 4415   format %{ %}
 4416   interface(CONST_INTER);
 4417 %}
 4418 
 4419 operand immLoffset8()
 4420 %{
 4421   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4422   match(ConL);
 4423 
 4424   op_cost(0);
 4425   format %{ %}
 4426   interface(CONST_INTER);
 4427 %}
 4428 
 4429 operand immLoffset16()
 4430 %{
 4431   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4432   match(ConL);
 4433 
 4434   op_cost(0);
 4435   format %{ %}
 4436   interface(CONST_INTER);
 4437 %}
 4438 
 4439 // 5 bit signed long integer
 4440 operand immL5()
 4441 %{
 4442   predicate(Assembler::is_simm(n->get_long(), 5));
 4443   match(ConL);
 4444 
 4445   op_cost(0);
 4446   format %{ %}
 4447   interface(CONST_INTER);
 4448 %}
 4449 
 4450 // 7 bit unsigned long integer
 4451 operand immLU7()
 4452 %{
 4453   predicate(Assembler::is_uimm(n->get_long(), 7));
 4454   match(ConL);
 4455 
 4456   op_cost(0);
 4457   format %{ %}
 4458   interface(CONST_INTER);
 4459 %}
 4460 
 4461 // 8 bit signed value.
 4462 operand immI8()
 4463 %{
 4464   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4465   match(ConI);
 4466 
 4467   op_cost(0);
 4468   format %{ %}
 4469   interface(CONST_INTER);
 4470 %}
 4471 
 4472 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4473 operand immI8_shift8()
 4474 %{
 4475   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4476             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4477   match(ConI);
 4478 
 4479   op_cost(0);
 4480   format %{ %}
 4481   interface(CONST_INTER);
 4482 %}
 4483 
 4484 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4485 operand immL8_shift8()
 4486 %{
 4487   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4488             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4489   match(ConL);
 4490 
 4491   op_cost(0);
 4492   format %{ %}
 4493   interface(CONST_INTER);
 4494 %}
 4495 
 4496 // 8 bit integer valid for vector add sub immediate
 4497 operand immBAddSubV()
 4498 %{
 4499   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4500   match(ConI);
 4501 
 4502   op_cost(0);
 4503   format %{ %}
 4504   interface(CONST_INTER);
 4505 %}
 4506 
 4507 // 32 bit integer valid for add sub immediate
 4508 operand immIAddSub()
 4509 %{
 4510   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4511   match(ConI);
 4512   op_cost(0);
 4513   format %{ %}
 4514   interface(CONST_INTER);
 4515 %}
 4516 
 4517 // 32 bit integer valid for vector add sub immediate
 4518 operand immIAddSubV()
 4519 %{
 4520   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4521   match(ConI);
 4522 
 4523   op_cost(0);
 4524   format %{ %}
 4525   interface(CONST_INTER);
 4526 %}
 4527 
 4528 // 32 bit unsigned integer valid for logical immediate
 4529 
 4530 operand immBLog()
 4531 %{
 4532   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4533   match(ConI);
 4534 
 4535   op_cost(0);
 4536   format %{ %}
 4537   interface(CONST_INTER);
 4538 %}
 4539 
 4540 operand immSLog()
 4541 %{
 4542   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4543   match(ConI);
 4544 
 4545   op_cost(0);
 4546   format %{ %}
 4547   interface(CONST_INTER);
 4548 %}
 4549 
 4550 operand immILog()
 4551 %{
 4552   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4553   match(ConI);
 4554 
 4555   op_cost(0);
 4556   format %{ %}
 4557   interface(CONST_INTER);
 4558 %}
 4559 
 4560 // Integer operands 64 bit
 4561 // 64 bit immediate
 4562 operand immL()
 4563 %{
 4564   match(ConL);
 4565 
 4566   op_cost(0);
 4567   format %{ %}
 4568   interface(CONST_INTER);
 4569 %}
 4570 
 4571 // 64 bit zero
 4572 operand immL0()
 4573 %{
 4574   predicate(n->get_long() == 0);
 4575   match(ConL);
 4576 
 4577   op_cost(0);
 4578   format %{ %}
 4579   interface(CONST_INTER);
 4580 %}
 4581 
 4582 // 64 bit unit increment
 4583 operand immL_1()
 4584 %{
 4585   predicate(n->get_long() == 1);
 4586   match(ConL);
 4587 
 4588   op_cost(0);
 4589   format %{ %}
 4590   interface(CONST_INTER);
 4591 %}
 4592 
 4593 // 64 bit unit decrement
 4594 operand immL_M1()
 4595 %{
 4596   predicate(n->get_long() == -1);
 4597   match(ConL);
 4598 
 4599   op_cost(0);
 4600   format %{ %}
 4601   interface(CONST_INTER);
 4602 %}
 4603 
 4604 // 32 bit offset of pc in thread anchor
 4605 
 4606 operand immL_pc_off()
 4607 %{
 4608   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
 4609                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 4610   match(ConL);
 4611 
 4612   op_cost(0);
 4613   format %{ %}
 4614   interface(CONST_INTER);
 4615 %}
 4616 
 4617 // 64 bit integer valid for add sub immediate
 4618 operand immLAddSub()
 4619 %{
 4620   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4621   match(ConL);
 4622   op_cost(0);
 4623   format %{ %}
 4624   interface(CONST_INTER);
 4625 %}
 4626 
 4627 // 64 bit integer valid for addv subv immediate
 4628 operand immLAddSubV()
 4629 %{
 4630   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4631   match(ConL);
 4632 
 4633   op_cost(0);
 4634   format %{ %}
 4635   interface(CONST_INTER);
 4636 %}
 4637 
 4638 // 64 bit integer valid for logical immediate
 4639 operand immLLog()
 4640 %{
 4641   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4642   match(ConL);
 4643   op_cost(0);
 4644   format %{ %}
 4645   interface(CONST_INTER);
 4646 %}
 4647 
 4648 // Long Immediate: low 32-bit mask
 4649 operand immL_32bits()
 4650 %{
 4651   predicate(n->get_long() == 0xFFFFFFFFL);
 4652   match(ConL);
 4653   op_cost(0);
 4654   format %{ %}
 4655   interface(CONST_INTER);
 4656 %}
 4657 
 4658 // Pointer operands
 4659 // Pointer Immediate
 4660 operand immP()
 4661 %{
 4662   match(ConP);
 4663 
 4664   op_cost(0);
 4665   format %{ %}
 4666   interface(CONST_INTER);
 4667 %}
 4668 
 4669 // NULL Pointer Immediate
 4670 operand immP0()
 4671 %{
 4672   predicate(n->get_ptr() == 0);
 4673   match(ConP);
 4674 
 4675   op_cost(0);
 4676   format %{ %}
 4677   interface(CONST_INTER);
 4678 %}
 4679 
 4680 // Pointer Immediate One
 4681 // this is used in object initialization (initial object header)
 4682 operand immP_1()
 4683 %{
 4684   predicate(n->get_ptr() == 1);
 4685   match(ConP);
 4686 
 4687   op_cost(0);
 4688   format %{ %}
 4689   interface(CONST_INTER);
 4690 %}
 4691 
 4692 // Card Table Byte Map Base
 4693 operand immByteMapBase()
 4694 %{
 4695   // Get base of card map
 4696   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4697             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4698   match(ConP);
 4699 
 4700   op_cost(0);
 4701   format %{ %}
 4702   interface(CONST_INTER);
 4703 %}
 4704 
 4705 // Pointer Immediate Minus One
 4706 // this is used when we want to write the current PC to the thread anchor
 4707 operand immP_M1()
 4708 %{
 4709   predicate(n->get_ptr() == -1);
 4710   match(ConP);
 4711 
 4712   op_cost(0);
 4713   format %{ %}
 4714   interface(CONST_INTER);
 4715 %}
 4716 
 4717 // Pointer Immediate Minus Two
 4718 // this is used when we want to write the current PC to the thread anchor
 4719 operand immP_M2()
 4720 %{
 4721   predicate(n->get_ptr() == -2);
 4722   match(ConP);
 4723 
 4724   op_cost(0);
 4725   format %{ %}
 4726   interface(CONST_INTER);
 4727 %}
 4728 
 4729 // Float and Double operands
 4730 // Double Immediate
 4731 operand immD()
 4732 %{
 4733   match(ConD);
 4734   op_cost(0);
 4735   format %{ %}
 4736   interface(CONST_INTER);
 4737 %}
 4738 
 4739 // Double Immediate: +0.0d
 4740 operand immD0()
 4741 %{
 4742   predicate(jlong_cast(n->getd()) == 0);
 4743   match(ConD);
 4744 
 4745   op_cost(0);
 4746   format %{ %}
 4747   interface(CONST_INTER);
 4748 %}
 4749 
 4750 // constant 'double +0.0'.
 4751 operand immDPacked()
 4752 %{
 4753   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4754   match(ConD);
 4755   op_cost(0);
 4756   format %{ %}
 4757   interface(CONST_INTER);
 4758 %}
 4759 
 4760 // Float Immediate
 4761 operand immF()
 4762 %{
 4763   match(ConF);
 4764   op_cost(0);
 4765   format %{ %}
 4766   interface(CONST_INTER);
 4767 %}
 4768 
 4769 // Float Immediate: +0.0f.
 4770 operand immF0()
 4771 %{
 4772   predicate(jint_cast(n->getf()) == 0);
 4773   match(ConF);
 4774 
 4775   op_cost(0);
 4776   format %{ %}
 4777   interface(CONST_INTER);
 4778 %}
 4779 
 4780 //
 4781 operand immFPacked()
 4782 %{
 4783   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4784   match(ConF);
 4785   op_cost(0);
 4786   format %{ %}
 4787   interface(CONST_INTER);
 4788 %}
 4789 
 4790 // Narrow pointer operands
 4791 // Narrow Pointer Immediate
 4792 operand immN()
 4793 %{
 4794   match(ConN);
 4795 
 4796   op_cost(0);
 4797   format %{ %}
 4798   interface(CONST_INTER);
 4799 %}
 4800 
 4801 // Narrow NULL Pointer Immediate
 4802 operand immN0()
 4803 %{
 4804   predicate(n->get_narrowcon() == 0);
 4805   match(ConN);
 4806 
 4807   op_cost(0);
 4808   format %{ %}
 4809   interface(CONST_INTER);
 4810 %}
 4811 
 4812 operand immNKlass()
 4813 %{
 4814   match(ConNKlass);
 4815 
 4816   op_cost(0);
 4817   format %{ %}
 4818   interface(CONST_INTER);
 4819 %}
 4820 
 4821 // Integer 32 bit Register Operands
 4822 // Integer 32 bitRegister (excludes SP)
 4823 operand iRegI()
 4824 %{
 4825   constraint(ALLOC_IN_RC(any_reg32));
 4826   match(RegI);
 4827   match(iRegINoSp);
 4828   op_cost(0);
 4829   format %{ %}
 4830   interface(REG_INTER);
 4831 %}
 4832 
 4833 // Integer 32 bit Register not Special
 4834 operand iRegINoSp()
 4835 %{
 4836   constraint(ALLOC_IN_RC(no_special_reg32));
 4837   match(RegI);
 4838   op_cost(0);
 4839   format %{ %}
 4840   interface(REG_INTER);
 4841 %}
 4842 
 4843 // Integer 64 bit Register Operands
 4844 // Integer 64 bit Register (includes SP)
 4845 operand iRegL()
 4846 %{
 4847   constraint(ALLOC_IN_RC(any_reg));
 4848   match(RegL);
 4849   match(iRegLNoSp);
 4850   op_cost(0);
 4851   format %{ %}
 4852   interface(REG_INTER);
 4853 %}
 4854 
 4855 // Integer 64 bit Register not Special
 4856 operand iRegLNoSp()
 4857 %{
 4858   constraint(ALLOC_IN_RC(no_special_reg));
 4859   match(RegL);
 4860   match(iRegL_R0);
 4861   format %{ %}
 4862   interface(REG_INTER);
 4863 %}
 4864 
 4865 // Pointer Register Operands
 4866 // Pointer Register
 4867 operand iRegP()
 4868 %{
 4869   constraint(ALLOC_IN_RC(ptr_reg));
 4870   match(RegP);
 4871   match(iRegPNoSp);
 4872   match(iRegP_R0);
 4873   //match(iRegP_R2);
 4874   //match(iRegP_R4);
 4875   match(iRegP_R5);
 4876   match(thread_RegP);
 4877   op_cost(0);
 4878   format %{ %}
 4879   interface(REG_INTER);
 4880 %}
 4881 
 4882 // Pointer 64 bit Register not Special
 4883 operand iRegPNoSp()
 4884 %{
 4885   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4886   match(RegP);
 4887   // match(iRegP);
 4888   // match(iRegP_R0);
 4889   // match(iRegP_R2);
 4890   // match(iRegP_R4);
 4891   // match(iRegP_R5);
 4892   // match(thread_RegP);
 4893   op_cost(0);
 4894   format %{ %}
 4895   interface(REG_INTER);
 4896 %}
 4897 
 4898 // This operand is not allowed to use rfp even if
 4899 // rfp is not used to hold the frame pointer.
 4900 operand iRegPNoSpNoRfp()
 4901 %{
 4902   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4903   match(RegP);
 4904   match(iRegPNoSp);
 4905   op_cost(0);
 4906   format %{ %}
 4907   interface(REG_INTER);
 4908 %}
 4909 
 4910 // Pointer 64 bit Register R0 only
 4911 operand iRegP_R0()
 4912 %{
 4913   constraint(ALLOC_IN_RC(r0_reg));
 4914   match(RegP);
 4915   // match(iRegP);
 4916   match(iRegPNoSp);
 4917   op_cost(0);
 4918   format %{ %}
 4919   interface(REG_INTER);
 4920 %}
 4921 
 4922 // Pointer 64 bit Register R1 only
 4923 operand iRegP_R1()
 4924 %{
 4925   constraint(ALLOC_IN_RC(r1_reg));
 4926   match(RegP);
 4927   // match(iRegP);
 4928   match(iRegPNoSp);
 4929   op_cost(0);
 4930   format %{ %}
 4931   interface(REG_INTER);
 4932 %}
 4933 
 4934 // Pointer 64 bit Register R2 only
 4935 operand iRegP_R2()
 4936 %{
 4937   constraint(ALLOC_IN_RC(r2_reg));
 4938   match(RegP);
 4939   // match(iRegP);
 4940   match(iRegPNoSp);
 4941   op_cost(0);
 4942   format %{ %}
 4943   interface(REG_INTER);
 4944 %}
 4945 
 4946 // Pointer 64 bit Register R3 only
 4947 operand iRegP_R3()
 4948 %{
 4949   constraint(ALLOC_IN_RC(r3_reg));
 4950   match(RegP);
 4951   // match(iRegP);
 4952   match(iRegPNoSp);
 4953   op_cost(0);
 4954   format %{ %}
 4955   interface(REG_INTER);
 4956 %}
 4957 
 4958 // Pointer 64 bit Register R4 only
 4959 operand iRegP_R4()
 4960 %{
 4961   constraint(ALLOC_IN_RC(r4_reg));
 4962   match(RegP);
 4963   // match(iRegP);
 4964   match(iRegPNoSp);
 4965   op_cost(0);
 4966   format %{ %}
 4967   interface(REG_INTER);
 4968 %}
 4969 
 4970 // Pointer 64 bit Register R5 only
 4971 operand iRegP_R5()
 4972 %{
 4973   constraint(ALLOC_IN_RC(r5_reg));
 4974   match(RegP);
 4975   // match(iRegP);
 4976   match(iRegPNoSp);
 4977   op_cost(0);
 4978   format %{ %}
 4979   interface(REG_INTER);
 4980 %}
 4981 
 4982 // Pointer 64 bit Register R10 only
 4983 operand iRegP_R10()
 4984 %{
 4985   constraint(ALLOC_IN_RC(r10_reg));
 4986   match(RegP);
 4987   // match(iRegP);
 4988   match(iRegPNoSp);
 4989   op_cost(0);
 4990   format %{ %}
 4991   interface(REG_INTER);
 4992 %}
 4993 
 4994 // Long 64 bit Register R0 only
 4995 operand iRegL_R0()
 4996 %{
 4997   constraint(ALLOC_IN_RC(r0_reg));
 4998   match(RegL);
 4999   match(iRegLNoSp);
 5000   op_cost(0);
 5001   format %{ %}
 5002   interface(REG_INTER);
 5003 %}
 5004 
 5005 // Long 64 bit Register R2 only
 5006 operand iRegL_R2()
 5007 %{
 5008   constraint(ALLOC_IN_RC(r2_reg));
 5009   match(RegL);
 5010   match(iRegLNoSp);
 5011   op_cost(0);
 5012   format %{ %}
 5013   interface(REG_INTER);
 5014 %}
 5015 
 5016 // Long 64 bit Register R3 only
 5017 operand iRegL_R3()
 5018 %{
 5019   constraint(ALLOC_IN_RC(r3_reg));
 5020   match(RegL);
 5021   match(iRegLNoSp);
 5022   op_cost(0);
 5023   format %{ %}
 5024   interface(REG_INTER);
 5025 %}
 5026 
 5027 // Long 64 bit Register R11 only
 5028 operand iRegL_R11()
 5029 %{
 5030   constraint(ALLOC_IN_RC(r11_reg));
 5031   match(RegL);
 5032   match(iRegLNoSp);
 5033   op_cost(0);
 5034   format %{ %}
 5035   interface(REG_INTER);
 5036 %}
 5037 
 5038 // Pointer 64 bit Register FP only
 5039 operand iRegP_FP()
 5040 %{
 5041   constraint(ALLOC_IN_RC(fp_reg));
 5042   match(RegP);
 5043   // match(iRegP);
 5044   op_cost(0);
 5045   format %{ %}
 5046   interface(REG_INTER);
 5047 %}
 5048 
 5049 // Register R0 only
 5050 operand iRegI_R0()
 5051 %{
 5052   constraint(ALLOC_IN_RC(int_r0_reg));
 5053   match(RegI);
 5054   match(iRegINoSp);
 5055   op_cost(0);
 5056   format %{ %}
 5057   interface(REG_INTER);
 5058 %}
 5059 
 5060 // Register R2 only
 5061 operand iRegI_R2()
 5062 %{
 5063   constraint(ALLOC_IN_RC(int_r2_reg));
 5064   match(RegI);
 5065   match(iRegINoSp);
 5066   op_cost(0);
 5067   format %{ %}
 5068   interface(REG_INTER);
 5069 %}
 5070 
 5071 // Register R3 only
 5072 operand iRegI_R3()
 5073 %{
 5074   constraint(ALLOC_IN_RC(int_r3_reg));
 5075   match(RegI);
 5076   match(iRegINoSp);
 5077   op_cost(0);
 5078   format %{ %}
 5079   interface(REG_INTER);
 5080 %}
 5081 
 5082 
 5083 // Register R4 only
 5084 operand iRegI_R4()
 5085 %{
 5086   constraint(ALLOC_IN_RC(int_r4_reg));
 5087   match(RegI);
 5088   match(iRegINoSp);
 5089   op_cost(0);
 5090   format %{ %}
 5091   interface(REG_INTER);
 5092 %}
 5093 
 5094 
 5095 // Pointer Register Operands
 5096 // Narrow Pointer Register
 5097 operand iRegN()
 5098 %{
 5099   constraint(ALLOC_IN_RC(any_reg32));
 5100   match(RegN);
 5101   match(iRegNNoSp);
 5102   op_cost(0);
 5103   format %{ %}
 5104   interface(REG_INTER);
 5105 %}
 5106 
 5107 operand iRegN_R0()
 5108 %{
 5109   constraint(ALLOC_IN_RC(r0_reg));
 5110   match(iRegN);
 5111   op_cost(0);
 5112   format %{ %}
 5113   interface(REG_INTER);
 5114 %}
 5115 
 5116 operand iRegN_R2()
 5117 %{
 5118   constraint(ALLOC_IN_RC(r2_reg));
 5119   match(iRegN);
 5120   op_cost(0);
 5121   format %{ %}
 5122   interface(REG_INTER);
 5123 %}
 5124 
 5125 operand iRegN_R3()
 5126 %{
 5127   constraint(ALLOC_IN_RC(r3_reg));
 5128   match(iRegN);
 5129   op_cost(0);
 5130   format %{ %}
 5131   interface(REG_INTER);
 5132 %}
 5133 
 5134 // Integer 64 bit Register not Special
 5135 operand iRegNNoSp()
 5136 %{
 5137   constraint(ALLOC_IN_RC(no_special_reg32));
 5138   match(RegN);
 5139   op_cost(0);
 5140   format %{ %}
 5141   interface(REG_INTER);
 5142 %}
 5143 
 5144 // Float Register
 5145 // Float register operands
 5146 operand vRegF()
 5147 %{
 5148   constraint(ALLOC_IN_RC(float_reg));
 5149   match(RegF);
 5150 
 5151   op_cost(0);
 5152   format %{ %}
 5153   interface(REG_INTER);
 5154 %}
 5155 
 5156 // Double Register
 5157 // Double register operands
 5158 operand vRegD()
 5159 %{
 5160   constraint(ALLOC_IN_RC(double_reg));
 5161   match(RegD);
 5162 
 5163   op_cost(0);
 5164   format %{ %}
 5165   interface(REG_INTER);
 5166 %}
 5167 
 5168 // Generic vector class. This will be used for
 5169 // all vector operands, including NEON and SVE.
 5170 operand vReg()
 5171 %{
 5172   constraint(ALLOC_IN_RC(dynamic));
 5173   match(VecA);
 5174   match(VecD);
 5175   match(VecX);
 5176 
 5177   op_cost(0);
 5178   format %{ %}
 5179   interface(REG_INTER);
 5180 %}
 5181 
 5182 operand vecA()
 5183 %{
 5184   constraint(ALLOC_IN_RC(vectora_reg));
 5185   match(VecA);
 5186 
 5187   op_cost(0);
 5188   format %{ %}
 5189   interface(REG_INTER);
 5190 %}
 5191 
 5192 operand vecD()
 5193 %{
 5194   constraint(ALLOC_IN_RC(vectord_reg));
 5195   match(VecD);
 5196 
 5197   op_cost(0);
 5198   format %{ %}
 5199   interface(REG_INTER);
 5200 %}
 5201 
 5202 operand vecX()
 5203 %{
 5204   constraint(ALLOC_IN_RC(vectorx_reg));
 5205   match(VecX);
 5206 
 5207   op_cost(0);
 5208   format %{ %}
 5209   interface(REG_INTER);
 5210 %}
 5211 
 5212 operand vRegD_V0()
 5213 %{
 5214   constraint(ALLOC_IN_RC(v0_reg));
 5215   match(RegD);
 5216   op_cost(0);
 5217   format %{ %}
 5218   interface(REG_INTER);
 5219 %}
 5220 
 5221 operand vRegD_V1()
 5222 %{
 5223   constraint(ALLOC_IN_RC(v1_reg));
 5224   match(RegD);
 5225   op_cost(0);
 5226   format %{ %}
 5227   interface(REG_INTER);
 5228 %}
 5229 
 5230 operand vRegD_V2()
 5231 %{
 5232   constraint(ALLOC_IN_RC(v2_reg));
 5233   match(RegD);
 5234   op_cost(0);
 5235   format %{ %}
 5236   interface(REG_INTER);
 5237 %}
 5238 
 5239 operand vRegD_V3()
 5240 %{
 5241   constraint(ALLOC_IN_RC(v3_reg));
 5242   match(RegD);
 5243   op_cost(0);
 5244   format %{ %}
 5245   interface(REG_INTER);
 5246 %}
 5247 
 5248 operand vRegD_V4()
 5249 %{
 5250   constraint(ALLOC_IN_RC(v4_reg));
 5251   match(RegD);
 5252   op_cost(0);
 5253   format %{ %}
 5254   interface(REG_INTER);
 5255 %}
 5256 
 5257 operand vRegD_V5()
 5258 %{
 5259   constraint(ALLOC_IN_RC(v5_reg));
 5260   match(RegD);
 5261   op_cost(0);
 5262   format %{ %}
 5263   interface(REG_INTER);
 5264 %}
 5265 
 5266 operand vRegD_V6()
 5267 %{
 5268   constraint(ALLOC_IN_RC(v6_reg));
 5269   match(RegD);
 5270   op_cost(0);
 5271   format %{ %}
 5272   interface(REG_INTER);
 5273 %}
 5274 
 5275 operand vRegD_V7()
 5276 %{
 5277   constraint(ALLOC_IN_RC(v7_reg));
 5278   match(RegD);
 5279   op_cost(0);
 5280   format %{ %}
 5281   interface(REG_INTER);
 5282 %}
 5283 
 5284 operand vRegD_V8()
 5285 %{
 5286   constraint(ALLOC_IN_RC(v8_reg));
 5287   match(RegD);
 5288   op_cost(0);
 5289   format %{ %}
 5290   interface(REG_INTER);
 5291 %}
 5292 
 5293 operand vRegD_V9()
 5294 %{
 5295   constraint(ALLOC_IN_RC(v9_reg));
 5296   match(RegD);
 5297   op_cost(0);
 5298   format %{ %}
 5299   interface(REG_INTER);
 5300 %}
 5301 
 5302 operand vRegD_V10()
 5303 %{
 5304   constraint(ALLOC_IN_RC(v10_reg));
 5305   match(RegD);
 5306   op_cost(0);
 5307   format %{ %}
 5308   interface(REG_INTER);
 5309 %}
 5310 
 5311 operand vRegD_V11()
 5312 %{
 5313   constraint(ALLOC_IN_RC(v11_reg));
 5314   match(RegD);
 5315   op_cost(0);
 5316   format %{ %}
 5317   interface(REG_INTER);
 5318 %}
 5319 
 5320 operand vRegD_V12()
 5321 %{
 5322   constraint(ALLOC_IN_RC(v12_reg));
 5323   match(RegD);
 5324   op_cost(0);
 5325   format %{ %}
 5326   interface(REG_INTER);
 5327 %}
 5328 
 5329 operand vRegD_V13()
 5330 %{
 5331   constraint(ALLOC_IN_RC(v13_reg));
 5332   match(RegD);
 5333   op_cost(0);
 5334   format %{ %}
 5335   interface(REG_INTER);
 5336 %}
 5337 
 5338 operand vRegD_V14()
 5339 %{
 5340   constraint(ALLOC_IN_RC(v14_reg));
 5341   match(RegD);
 5342   op_cost(0);
 5343   format %{ %}
 5344   interface(REG_INTER);
 5345 %}
 5346 
 5347 operand vRegD_V15()
 5348 %{
 5349   constraint(ALLOC_IN_RC(v15_reg));
 5350   match(RegD);
 5351   op_cost(0);
 5352   format %{ %}
 5353   interface(REG_INTER);
 5354 %}
 5355 
 5356 operand vRegD_V16()
 5357 %{
 5358   constraint(ALLOC_IN_RC(v16_reg));
 5359   match(RegD);
 5360   op_cost(0);
 5361   format %{ %}
 5362   interface(REG_INTER);
 5363 %}
 5364 
 5365 operand vRegD_V17()
 5366 %{
 5367   constraint(ALLOC_IN_RC(v17_reg));
 5368   match(RegD);
 5369   op_cost(0);
 5370   format %{ %}
 5371   interface(REG_INTER);
 5372 %}
 5373 
 5374 operand vRegD_V18()
 5375 %{
 5376   constraint(ALLOC_IN_RC(v18_reg));
 5377   match(RegD);
 5378   op_cost(0);
 5379   format %{ %}
 5380   interface(REG_INTER);
 5381 %}
 5382 
 5383 operand vRegD_V19()
 5384 %{
 5385   constraint(ALLOC_IN_RC(v19_reg));
 5386   match(RegD);
 5387   op_cost(0);
 5388   format %{ %}
 5389   interface(REG_INTER);
 5390 %}
 5391 
 5392 operand vRegD_V20()
 5393 %{
 5394   constraint(ALLOC_IN_RC(v20_reg));
 5395   match(RegD);
 5396   op_cost(0);
 5397   format %{ %}
 5398   interface(REG_INTER);
 5399 %}
 5400 
 5401 operand vRegD_V21()
 5402 %{
 5403   constraint(ALLOC_IN_RC(v21_reg));
 5404   match(RegD);
 5405   op_cost(0);
 5406   format %{ %}
 5407   interface(REG_INTER);
 5408 %}
 5409 
 5410 operand vRegD_V22()
 5411 %{
 5412   constraint(ALLOC_IN_RC(v22_reg));
 5413   match(RegD);
 5414   op_cost(0);
 5415   format %{ %}
 5416   interface(REG_INTER);
 5417 %}
 5418 
 5419 operand vRegD_V23()
 5420 %{
 5421   constraint(ALLOC_IN_RC(v23_reg));
 5422   match(RegD);
 5423   op_cost(0);
 5424   format %{ %}
 5425   interface(REG_INTER);
 5426 %}
 5427 
 5428 operand vRegD_V24()
 5429 %{
 5430   constraint(ALLOC_IN_RC(v24_reg));
 5431   match(RegD);
 5432   op_cost(0);
 5433   format %{ %}
 5434   interface(REG_INTER);
 5435 %}
 5436 
 5437 operand vRegD_V25()
 5438 %{
 5439   constraint(ALLOC_IN_RC(v25_reg));
 5440   match(RegD);
 5441   op_cost(0);
 5442   format %{ %}
 5443   interface(REG_INTER);
 5444 %}
 5445 
 5446 operand vRegD_V26()
 5447 %{
 5448   constraint(ALLOC_IN_RC(v26_reg));
 5449   match(RegD);
 5450   op_cost(0);
 5451   format %{ %}
 5452   interface(REG_INTER);
 5453 %}
 5454 
 5455 operand vRegD_V27()
 5456 %{
 5457   constraint(ALLOC_IN_RC(v27_reg));
 5458   match(RegD);
 5459   op_cost(0);
 5460   format %{ %}
 5461   interface(REG_INTER);
 5462 %}
 5463 
 5464 operand vRegD_V28()
 5465 %{
 5466   constraint(ALLOC_IN_RC(v28_reg));
 5467   match(RegD);
 5468   op_cost(0);
 5469   format %{ %}
 5470   interface(REG_INTER);
 5471 %}
 5472 
 5473 operand vRegD_V29()
 5474 %{
 5475   constraint(ALLOC_IN_RC(v29_reg));
 5476   match(RegD);
 5477   op_cost(0);
 5478   format %{ %}
 5479   interface(REG_INTER);
 5480 %}
 5481 
 5482 operand vRegD_V30()
 5483 %{
 5484   constraint(ALLOC_IN_RC(v30_reg));
 5485   match(RegD);
 5486   op_cost(0);
 5487   format %{ %}
 5488   interface(REG_INTER);
 5489 %}
 5490 
 5491 operand vRegD_V31()
 5492 %{
 5493   constraint(ALLOC_IN_RC(v31_reg));
 5494   match(RegD);
 5495   op_cost(0);
 5496   format %{ %}
 5497   interface(REG_INTER);
 5498 %}
 5499 
 5500 operand pReg()
 5501 %{
 5502   constraint(ALLOC_IN_RC(pr_reg));
 5503   match(RegVectMask);
 5504   match(pRegGov);
 5505   op_cost(0);
 5506   format %{ %}
 5507   interface(REG_INTER);
 5508 %}
 5509 
 5510 operand pRegGov()
 5511 %{
 5512   constraint(ALLOC_IN_RC(gov_pr));
 5513   match(RegVectMask);
 5514   match(pReg);
 5515   op_cost(0);
 5516   format %{ %}
 5517   interface(REG_INTER);
 5518 %}
 5519 
 5520 operand pRegGov_P0()
 5521 %{
 5522   constraint(ALLOC_IN_RC(p0_reg));
 5523   match(RegVectMask);
 5524   op_cost(0);
 5525   format %{ %}
 5526   interface(REG_INTER);
 5527 %}
 5528 
 5529 operand pRegGov_P1()
 5530 %{
 5531   constraint(ALLOC_IN_RC(p1_reg));
 5532   match(RegVectMask);
 5533   op_cost(0);
 5534   format %{ %}
 5535   interface(REG_INTER);
 5536 %}
 5537 
 5538 // Flags register, used as output of signed compare instructions
 5539 
 5540 // note that on AArch64 we also use this register as the output for
 5541 // for floating point compare instructions (CmpF CmpD). this ensures
 5542 // that ordered inequality tests use GT, GE, LT or LE none of which
 5543 // pass through cases where the result is unordered i.e. one or both
 5544 // inputs to the compare is a NaN. this means that the ideal code can
 5545 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5546 // (where the comparison should always fail). EQ and NE tests are
 5547 // always generated in ideal code so that unordered folds into the NE
 5548 // case, matching the behaviour of AArch64 NE.
 5549 //
 5550 // This differs from x86 where the outputs of FP compares use a
 5551 // special FP flags registers and where compares based on this
 5552 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5553 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5554 // to explicitly handle the unordered case in branches. x86 also has
 5555 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5556 
 5557 operand rFlagsReg()
 5558 %{
 5559   constraint(ALLOC_IN_RC(int_flags));
 5560   match(RegFlags);
 5561 
 5562   op_cost(0);
 5563   format %{ "RFLAGS" %}
 5564   interface(REG_INTER);
 5565 %}
 5566 
 5567 // Flags register, used as output of unsigned compare instructions
 5568 operand rFlagsRegU()
 5569 %{
 5570   constraint(ALLOC_IN_RC(int_flags));
 5571   match(RegFlags);
 5572 
 5573   op_cost(0);
 5574   format %{ "RFLAGSU" %}
 5575   interface(REG_INTER);
 5576 %}
 5577 
 5578 // Special Registers
 5579 
 5580 // Method Register
 5581 operand inline_cache_RegP(iRegP reg)
 5582 %{
 5583   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5584   match(reg);
 5585   match(iRegPNoSp);
 5586   op_cost(0);
 5587   format %{ %}
 5588   interface(REG_INTER);
 5589 %}
 5590 
 5591 // Thread Register
 5592 operand thread_RegP(iRegP reg)
 5593 %{
 5594   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5595   match(reg);
 5596   op_cost(0);
 5597   format %{ %}
 5598   interface(REG_INTER);
 5599 %}
 5600 
 5601 operand lr_RegP(iRegP reg)
 5602 %{
 5603   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
 5604   match(reg);
 5605   op_cost(0);
 5606   format %{ %}
 5607   interface(REG_INTER);
 5608 %}
 5609 
 5610 //----------Memory Operands----------------------------------------------------
 5611 
 5612 operand indirect(iRegP reg)
 5613 %{
 5614   constraint(ALLOC_IN_RC(ptr_reg));
 5615   match(reg);
 5616   op_cost(0);
 5617   format %{ "[$reg]" %}
 5618   interface(MEMORY_INTER) %{
 5619     base($reg);
 5620     index(0xffffffff);
 5621     scale(0x0);
 5622     disp(0x0);
 5623   %}
 5624 %}
 5625 
 5626 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5627 %{
 5628   constraint(ALLOC_IN_RC(ptr_reg));
 5629   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5630   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5631   op_cost(0);
 5632   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5633   interface(MEMORY_INTER) %{
 5634     base($reg);
 5635     index($ireg);
 5636     scale($scale);
 5637     disp(0x0);
 5638   %}
 5639 %}
 5640 
 5641 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5642 %{
 5643   constraint(ALLOC_IN_RC(ptr_reg));
 5644   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5645   match(AddP reg (LShiftL lreg scale));
 5646   op_cost(0);
 5647   format %{ "$reg, $lreg lsl($scale)" %}
 5648   interface(MEMORY_INTER) %{
 5649     base($reg);
 5650     index($lreg);
 5651     scale($scale);
 5652     disp(0x0);
 5653   %}
 5654 %}
 5655 
 5656 operand indIndexI2L(iRegP reg, iRegI ireg)
 5657 %{
 5658   constraint(ALLOC_IN_RC(ptr_reg));
 5659   match(AddP reg (ConvI2L ireg));
 5660   op_cost(0);
 5661   format %{ "$reg, $ireg, 0, I2L" %}
 5662   interface(MEMORY_INTER) %{
 5663     base($reg);
 5664     index($ireg);
 5665     scale(0x0);
 5666     disp(0x0);
 5667   %}
 5668 %}
 5669 
 5670 operand indIndex(iRegP reg, iRegL lreg)
 5671 %{
 5672   constraint(ALLOC_IN_RC(ptr_reg));
 5673   match(AddP reg lreg);
 5674   op_cost(0);
 5675   format %{ "$reg, $lreg" %}
 5676   interface(MEMORY_INTER) %{
 5677     base($reg);
 5678     index($lreg);
 5679     scale(0x0);
 5680     disp(0x0);
 5681   %}
 5682 %}
 5683 
 5684 operand indOffI(iRegP reg, immIOffset off)
 5685 %{
 5686   constraint(ALLOC_IN_RC(ptr_reg));
 5687   match(AddP reg off);
 5688   op_cost(0);
 5689   format %{ "[$reg, $off]" %}
 5690   interface(MEMORY_INTER) %{
 5691     base($reg);
 5692     index(0xffffffff);
 5693     scale(0x0);
 5694     disp($off);
 5695   %}
 5696 %}
 5697 
 5698 operand indOffI1(iRegP reg, immIOffset1 off)
 5699 %{
 5700   constraint(ALLOC_IN_RC(ptr_reg));
 5701   match(AddP reg off);
 5702   op_cost(0);
 5703   format %{ "[$reg, $off]" %}
 5704   interface(MEMORY_INTER) %{
 5705     base($reg);
 5706     index(0xffffffff);
 5707     scale(0x0);
 5708     disp($off);
 5709   %}
 5710 %}
 5711 
 5712 operand indOffI2(iRegP reg, immIOffset2 off)
 5713 %{
 5714   constraint(ALLOC_IN_RC(ptr_reg));
 5715   match(AddP reg off);
 5716   op_cost(0);
 5717   format %{ "[$reg, $off]" %}
 5718   interface(MEMORY_INTER) %{
 5719     base($reg);
 5720     index(0xffffffff);
 5721     scale(0x0);
 5722     disp($off);
 5723   %}
 5724 %}
 5725 
 5726 operand indOffI4(iRegP reg, immIOffset4 off)
 5727 %{
 5728   constraint(ALLOC_IN_RC(ptr_reg));
 5729   match(AddP reg off);
 5730   op_cost(0);
 5731   format %{ "[$reg, $off]" %}
 5732   interface(MEMORY_INTER) %{
 5733     base($reg);
 5734     index(0xffffffff);
 5735     scale(0x0);
 5736     disp($off);
 5737   %}
 5738 %}
 5739 
 5740 operand indOffI8(iRegP reg, immIOffset8 off)
 5741 %{
 5742   constraint(ALLOC_IN_RC(ptr_reg));
 5743   match(AddP reg off);
 5744   op_cost(0);
 5745   format %{ "[$reg, $off]" %}
 5746   interface(MEMORY_INTER) %{
 5747     base($reg);
 5748     index(0xffffffff);
 5749     scale(0x0);
 5750     disp($off);
 5751   %}
 5752 %}
 5753 
 5754 operand indOffI16(iRegP reg, immIOffset16 off)
 5755 %{
 5756   constraint(ALLOC_IN_RC(ptr_reg));
 5757   match(AddP reg off);
 5758   op_cost(0);
 5759   format %{ "[$reg, $off]" %}
 5760   interface(MEMORY_INTER) %{
 5761     base($reg);
 5762     index(0xffffffff);
 5763     scale(0x0);
 5764     disp($off);
 5765   %}
 5766 %}
 5767 
 5768 operand indOffL(iRegP reg, immLoffset off)
 5769 %{
 5770   constraint(ALLOC_IN_RC(ptr_reg));
 5771   match(AddP reg off);
 5772   op_cost(0);
 5773   format %{ "[$reg, $off]" %}
 5774   interface(MEMORY_INTER) %{
 5775     base($reg);
 5776     index(0xffffffff);
 5777     scale(0x0);
 5778     disp($off);
 5779   %}
 5780 %}
 5781 
 5782 operand indOffL1(iRegP reg, immLoffset1 off)
 5783 %{
 5784   constraint(ALLOC_IN_RC(ptr_reg));
 5785   match(AddP reg off);
 5786   op_cost(0);
 5787   format %{ "[$reg, $off]" %}
 5788   interface(MEMORY_INTER) %{
 5789     base($reg);
 5790     index(0xffffffff);
 5791     scale(0x0);
 5792     disp($off);
 5793   %}
 5794 %}
 5795 
 5796 operand indOffL2(iRegP reg, immLoffset2 off)
 5797 %{
 5798   constraint(ALLOC_IN_RC(ptr_reg));
 5799   match(AddP reg off);
 5800   op_cost(0);
 5801   format %{ "[$reg, $off]" %}
 5802   interface(MEMORY_INTER) %{
 5803     base($reg);
 5804     index(0xffffffff);
 5805     scale(0x0);
 5806     disp($off);
 5807   %}
 5808 %}
 5809 
 5810 operand indOffL4(iRegP reg, immLoffset4 off)
 5811 %{
 5812   constraint(ALLOC_IN_RC(ptr_reg));
 5813   match(AddP reg off);
 5814   op_cost(0);
 5815   format %{ "[$reg, $off]" %}
 5816   interface(MEMORY_INTER) %{
 5817     base($reg);
 5818     index(0xffffffff);
 5819     scale(0x0);
 5820     disp($off);
 5821   %}
 5822 %}
 5823 
 5824 operand indOffL8(iRegP reg, immLoffset8 off)
 5825 %{
 5826   constraint(ALLOC_IN_RC(ptr_reg));
 5827   match(AddP reg off);
 5828   op_cost(0);
 5829   format %{ "[$reg, $off]" %}
 5830   interface(MEMORY_INTER) %{
 5831     base($reg);
 5832     index(0xffffffff);
 5833     scale(0x0);
 5834     disp($off);
 5835   %}
 5836 %}
 5837 
 5838 operand indOffL16(iRegP reg, immLoffset16 off)
 5839 %{
 5840   constraint(ALLOC_IN_RC(ptr_reg));
 5841   match(AddP reg off);
 5842   op_cost(0);
 5843   format %{ "[$reg, $off]" %}
 5844   interface(MEMORY_INTER) %{
 5845     base($reg);
 5846     index(0xffffffff);
 5847     scale(0x0);
 5848     disp($off);
 5849   %}
 5850 %}
 5851 
 5852 operand indirectN(iRegN reg)
 5853 %{
 5854   predicate(CompressedOops::shift() == 0);
 5855   constraint(ALLOC_IN_RC(ptr_reg));
 5856   match(DecodeN reg);
 5857   op_cost(0);
 5858   format %{ "[$reg]\t# narrow" %}
 5859   interface(MEMORY_INTER) %{
 5860     base($reg);
 5861     index(0xffffffff);
 5862     scale(0x0);
 5863     disp(0x0);
 5864   %}
 5865 %}
 5866 
 5867 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5868 %{
 5869   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5870   constraint(ALLOC_IN_RC(ptr_reg));
 5871   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5872   op_cost(0);
 5873   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5874   interface(MEMORY_INTER) %{
 5875     base($reg);
 5876     index($ireg);
 5877     scale($scale);
 5878     disp(0x0);
 5879   %}
 5880 %}
 5881 
 5882 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5883 %{
 5884   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5885   constraint(ALLOC_IN_RC(ptr_reg));
 5886   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5887   op_cost(0);
 5888   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5889   interface(MEMORY_INTER) %{
 5890     base($reg);
 5891     index($lreg);
 5892     scale($scale);
 5893     disp(0x0);
 5894   %}
 5895 %}
 5896 
 5897 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5898 %{
 5899   predicate(CompressedOops::shift() == 0);
 5900   constraint(ALLOC_IN_RC(ptr_reg));
 5901   match(AddP (DecodeN reg) (ConvI2L ireg));
 5902   op_cost(0);
 5903   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5904   interface(MEMORY_INTER) %{
 5905     base($reg);
 5906     index($ireg);
 5907     scale(0x0);
 5908     disp(0x0);
 5909   %}
 5910 %}
 5911 
 5912 operand indIndexN(iRegN reg, iRegL lreg)
 5913 %{
 5914   predicate(CompressedOops::shift() == 0);
 5915   constraint(ALLOC_IN_RC(ptr_reg));
 5916   match(AddP (DecodeN reg) lreg);
 5917   op_cost(0);
 5918   format %{ "$reg, $lreg\t# narrow" %}
 5919   interface(MEMORY_INTER) %{
 5920     base($reg);
 5921     index($lreg);
 5922     scale(0x0);
 5923     disp(0x0);
 5924   %}
 5925 %}
 5926 
 5927 operand indOffIN(iRegN reg, immIOffset off)
 5928 %{
 5929   predicate(CompressedOops::shift() == 0);
 5930   constraint(ALLOC_IN_RC(ptr_reg));
 5931   match(AddP (DecodeN reg) off);
 5932   op_cost(0);
 5933   format %{ "[$reg, $off]\t# narrow" %}
 5934   interface(MEMORY_INTER) %{
 5935     base($reg);
 5936     index(0xffffffff);
 5937     scale(0x0);
 5938     disp($off);
 5939   %}
 5940 %}
 5941 
 5942 operand indOffLN(iRegN reg, immLoffset off)
 5943 %{
 5944   predicate(CompressedOops::shift() == 0);
 5945   constraint(ALLOC_IN_RC(ptr_reg));
 5946   match(AddP (DecodeN reg) off);
 5947   op_cost(0);
 5948   format %{ "[$reg, $off]\t# narrow" %}
 5949   interface(MEMORY_INTER) %{
 5950     base($reg);
 5951     index(0xffffffff);
 5952     scale(0x0);
 5953     disp($off);
 5954   %}
 5955 %}
 5956 
 5957 
 5958 
 5959 // AArch64 opto stubs need to write to the pc slot in the thread anchor
 5960 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
 5961 %{
 5962   constraint(ALLOC_IN_RC(ptr_reg));
 5963   match(AddP reg off);
 5964   op_cost(0);
 5965   format %{ "[$reg, $off]" %}
 5966   interface(MEMORY_INTER) %{
 5967     base($reg);
 5968     index(0xffffffff);
 5969     scale(0x0);
 5970     disp($off);
 5971   %}
 5972 %}
 5973 
 5974 //----------Special Memory Operands--------------------------------------------
 5975 // Stack Slot Operand - This operand is used for loading and storing temporary
 5976 //                      values on the stack where a match requires a value to
 5977 //                      flow through memory.
 5978 operand stackSlotP(sRegP reg)
 5979 %{
 5980   constraint(ALLOC_IN_RC(stack_slots));
 5981   op_cost(100);
 5982   // No match rule because this operand is only generated in matching
 5983   // match(RegP);
 5984   format %{ "[$reg]" %}
 5985   interface(MEMORY_INTER) %{
 5986     base(0x1e);  // RSP
 5987     index(0x0);  // No Index
 5988     scale(0x0);  // No Scale
 5989     disp($reg);  // Stack Offset
 5990   %}
 5991 %}
 5992 
 5993 operand stackSlotI(sRegI reg)
 5994 %{
 5995   constraint(ALLOC_IN_RC(stack_slots));
 5996   // No match rule because this operand is only generated in matching
 5997   // match(RegI);
 5998   format %{ "[$reg]" %}
 5999   interface(MEMORY_INTER) %{
 6000     base(0x1e);  // RSP
 6001     index(0x0);  // No Index
 6002     scale(0x0);  // No Scale
 6003     disp($reg);  // Stack Offset
 6004   %}
 6005 %}
 6006 
 6007 operand stackSlotF(sRegF reg)
 6008 %{
 6009   constraint(ALLOC_IN_RC(stack_slots));
 6010   // No match rule because this operand is only generated in matching
 6011   // match(RegF);
 6012   format %{ "[$reg]" %}
 6013   interface(MEMORY_INTER) %{
 6014     base(0x1e);  // RSP
 6015     index(0x0);  // No Index
 6016     scale(0x0);  // No Scale
 6017     disp($reg);  // Stack Offset
 6018   %}
 6019 %}
 6020 
 6021 operand stackSlotD(sRegD reg)
 6022 %{
 6023   constraint(ALLOC_IN_RC(stack_slots));
 6024   // No match rule because this operand is only generated in matching
 6025   // match(RegD);
 6026   format %{ "[$reg]" %}
 6027   interface(MEMORY_INTER) %{
 6028     base(0x1e);  // RSP
 6029     index(0x0);  // No Index
 6030     scale(0x0);  // No Scale
 6031     disp($reg);  // Stack Offset
 6032   %}
 6033 %}
 6034 
 6035 operand stackSlotL(sRegL reg)
 6036 %{
 6037   constraint(ALLOC_IN_RC(stack_slots));
 6038   // No match rule because this operand is only generated in matching
 6039   // match(RegL);
 6040   format %{ "[$reg]" %}
 6041   interface(MEMORY_INTER) %{
 6042     base(0x1e);  // RSP
 6043     index(0x0);  // No Index
 6044     scale(0x0);  // No Scale
 6045     disp($reg);  // Stack Offset
 6046   %}
 6047 %}
 6048 
 6049 // Operands for expressing Control Flow
 6050 // NOTE: Label is a predefined operand which should not be redefined in
 6051 //       the AD file. It is generically handled within the ADLC.
 6052 
 6053 //----------Conditional Branch Operands----------------------------------------
 6054 // Comparison Op  - This is the operation of the comparison, and is limited to
 6055 //                  the following set of codes:
 6056 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 6057 //
 6058 // Other attributes of the comparison, such as unsignedness, are specified
 6059 // by the comparison instruction that sets a condition code flags register.
 6060 // That result is represented by a flags operand whose subtype is appropriate
 6061 // to the unsignedness (etc.) of the comparison.
 6062 //
 6063 // Later, the instruction which matches both the Comparison Op (a Bool) and
 6064 // the flags (produced by the Cmp) specifies the coding of the comparison op
 6065 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 6066 
 6067 // used for signed integral comparisons and fp comparisons
 6068 
 6069 operand cmpOp()
 6070 %{
 6071   match(Bool);
 6072 
 6073   format %{ "" %}
 6074   interface(COND_INTER) %{
 6075     equal(0x0, "eq");
 6076     not_equal(0x1, "ne");
 6077     less(0xb, "lt");
 6078     greater_equal(0xa, "ge");
 6079     less_equal(0xd, "le");
 6080     greater(0xc, "gt");
 6081     overflow(0x6, "vs");
 6082     no_overflow(0x7, "vc");
 6083   %}
 6084 %}
 6085 
 6086 // used for unsigned integral comparisons
 6087 
 6088 operand cmpOpU()
 6089 %{
 6090   match(Bool);
 6091 
 6092   format %{ "" %}
 6093   interface(COND_INTER) %{
 6094     equal(0x0, "eq");
 6095     not_equal(0x1, "ne");
 6096     less(0x3, "lo");
 6097     greater_equal(0x2, "hs");
 6098     less_equal(0x9, "ls");
 6099     greater(0x8, "hi");
 6100     overflow(0x6, "vs");
 6101     no_overflow(0x7, "vc");
 6102   %}
 6103 %}
 6104 
 6105 // used for certain integral comparisons which can be
 6106 // converted to cbxx or tbxx instructions
 6107 
 6108 operand cmpOpEqNe()
 6109 %{
 6110   match(Bool);
 6111   op_cost(0);
 6112   predicate(n->as_Bool()->_test._test == BoolTest::ne
 6113             || n->as_Bool()->_test._test == BoolTest::eq);
 6114 
 6115   format %{ "" %}
 6116   interface(COND_INTER) %{
 6117     equal(0x0, "eq");
 6118     not_equal(0x1, "ne");
 6119     less(0xb, "lt");
 6120     greater_equal(0xa, "ge");
 6121     less_equal(0xd, "le");
 6122     greater(0xc, "gt");
 6123     overflow(0x6, "vs");
 6124     no_overflow(0x7, "vc");
 6125   %}
 6126 %}
 6127 
 6128 // used for certain integral comparisons which can be
 6129 // converted to cbxx or tbxx instructions
 6130 
 6131 operand cmpOpLtGe()
 6132 %{
 6133   match(Bool);
 6134   op_cost(0);
 6135 
 6136   predicate(n->as_Bool()->_test._test == BoolTest::lt
 6137             || n->as_Bool()->_test._test == BoolTest::ge);
 6138 
 6139   format %{ "" %}
 6140   interface(COND_INTER) %{
 6141     equal(0x0, "eq");
 6142     not_equal(0x1, "ne");
 6143     less(0xb, "lt");
 6144     greater_equal(0xa, "ge");
 6145     less_equal(0xd, "le");
 6146     greater(0xc, "gt");
 6147     overflow(0x6, "vs");
 6148     no_overflow(0x7, "vc");
 6149   %}
 6150 %}
 6151 
 6152 // used for certain unsigned integral comparisons which can be
 6153 // converted to cbxx or tbxx instructions
 6154 
 6155 operand cmpOpUEqNeLtGe()
 6156 %{
 6157   match(Bool);
 6158   op_cost(0);
 6159 
 6160   predicate(n->as_Bool()->_test._test == BoolTest::eq
 6161             || n->as_Bool()->_test._test == BoolTest::ne
 6162             || n->as_Bool()->_test._test == BoolTest::lt
 6163             || n->as_Bool()->_test._test == BoolTest::ge);
 6164 
 6165   format %{ "" %}
 6166   interface(COND_INTER) %{
 6167     equal(0x0, "eq");
 6168     not_equal(0x1, "ne");
 6169     less(0xb, "lt");
 6170     greater_equal(0xa, "ge");
 6171     less_equal(0xd, "le");
 6172     greater(0xc, "gt");
 6173     overflow(0x6, "vs");
 6174     no_overflow(0x7, "vc");
 6175   %}
 6176 %}
 6177 
 6178 // Special operand allowing long args to int ops to be truncated for free
 6179 
 6180 operand iRegL2I(iRegL reg) %{
 6181 
 6182   op_cost(0);
 6183 
 6184   match(ConvL2I reg);
 6185 
 6186   format %{ "l2i($reg)" %}
 6187 
 6188   interface(REG_INTER)
 6189 %}
 6190 
 6191 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 6192 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 6193 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 6194 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 6195 
 6196 //----------OPERAND CLASSES----------------------------------------------------
 6197 // Operand Classes are groups of operands that are used as to simplify
 6198 // instruction definitions by not requiring the AD writer to specify
 6199 // separate instructions for every form of operand when the
 6200 // instruction accepts multiple operand types with the same basic
 6201 // encoding and format. The classic case of this is memory operands.
 6202 
 6203 // memory is used to define read/write location for load/store
 6204 // instruction defs. we can turn a memory op into an Address
 6205 
 6206 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 6207                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6208 
 6209 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 6210                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6211 
 6212 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 6213                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6214 
 6215 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 6216                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6217 
 6218 // All of the memory operands. For the pipeline description.
 6219 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 6220                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 6221                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6222 
 6223 
 6224 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 6225 // operations. it allows the src to be either an iRegI or a (ConvL2I
 6226 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 6227 // can be elided because the 32-bit instruction will just employ the
 6228 // lower 32 bits anyway.
 6229 //
 6230 // n.b. this does not elide all L2I conversions. if the truncated
 6231 // value is consumed by more than one operation then the ConvL2I
 6232 // cannot be bundled into the consuming nodes so an l2i gets planted
 6233 // (actually a movw $dst $src) and the downstream instructions consume
 6234 // the result of the l2i as an iRegI input. That's a shame since the
 6235 // movw is actually redundant but its not too costly.
 6236 
 6237 opclass iRegIorL2I(iRegI, iRegL2I);
 6238 
 6239 //----------PIPELINE-----------------------------------------------------------
 6240 // Rules which define the behavior of the target architectures pipeline.
 6241 
 6242 // For specific pipelines, eg A53, define the stages of that pipeline
 6243 //pipe_desc(ISS, EX1, EX2, WR);
 6244 #define ISS S0
 6245 #define EX1 S1
 6246 #define EX2 S2
 6247 #define WR  S3
 6248 
 6249 // Integer ALU reg operation
 6250 pipeline %{
 6251 
 6252 attributes %{
 6253   // ARM instructions are of fixed length
 6254   fixed_size_instructions;        // Fixed size instructions TODO does
 6255   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 6256   // ARM instructions come in 32-bit word units
 6257   instruction_unit_size = 4;         // An instruction is 4 bytes long
 6258   instruction_fetch_unit_size = 64;  // The processor fetches one line
 6259   instruction_fetch_units = 1;       // of 64 bytes
 6260 
 6261   // List of nop instructions
 6262   nops( MachNop );
 6263 %}
 6264 
 6265 // We don't use an actual pipeline model so don't care about resources
 6266 // or description. we do use pipeline classes to introduce fixed
 6267 // latencies
 6268 
 6269 //----------RESOURCES----------------------------------------------------------
 6270 // Resources are the functional units available to the machine
 6271 
 6272 resources( INS0, INS1, INS01 = INS0 | INS1,
 6273            ALU0, ALU1, ALU = ALU0 | ALU1,
 6274            MAC,
 6275            DIV,
 6276            BRANCH,
 6277            LDST,
 6278            NEON_FP);
 6279 
 6280 //----------PIPELINE DESCRIPTION-----------------------------------------------
 6281 // Pipeline Description specifies the stages in the machine's pipeline
 6282 
 6283 // Define the pipeline as a generic 6 stage pipeline
 6284 pipe_desc(S0, S1, S2, S3, S4, S5);
 6285 
 6286 //----------PIPELINE CLASSES---------------------------------------------------
 6287 // Pipeline Classes describe the stages in which input and output are
 6288 // referenced by the hardware pipeline.
 6289 
 6290 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 6291 %{
 6292   single_instruction;
 6293   src1   : S1(read);
 6294   src2   : S2(read);
 6295   dst    : S5(write);
 6296   INS01  : ISS;
 6297   NEON_FP : S5;
 6298 %}
 6299 
 6300 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6301 %{
 6302   single_instruction;
 6303   src1   : S1(read);
 6304   src2   : S2(read);
 6305   dst    : S5(write);
 6306   INS01  : ISS;
 6307   NEON_FP : S5;
 6308 %}
 6309 
 6310 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6311 %{
 6312   single_instruction;
 6313   src    : S1(read);
 6314   dst    : S5(write);
 6315   INS01  : ISS;
 6316   NEON_FP : S5;
 6317 %}
 6318 
 6319 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6320 %{
 6321   single_instruction;
 6322   src    : S1(read);
 6323   dst    : S5(write);
 6324   INS01  : ISS;
 6325   NEON_FP : S5;
 6326 %}
 6327 
 6328 pipe_class fp_d2f(vRegF dst, vRegD src)
 6329 %{
 6330   single_instruction;
 6331   src    : S1(read);
 6332   dst    : S5(write);
 6333   INS01  : ISS;
 6334   NEON_FP : S5;
 6335 %}
 6336 
 6337 pipe_class fp_f2d(vRegD dst, vRegF src)
 6338 %{
 6339   single_instruction;
 6340   src    : S1(read);
 6341   dst    : S5(write);
 6342   INS01  : ISS;
 6343   NEON_FP : S5;
 6344 %}
 6345 
 6346 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6347 %{
 6348   single_instruction;
 6349   src    : S1(read);
 6350   dst    : S5(write);
 6351   INS01  : ISS;
 6352   NEON_FP : S5;
 6353 %}
 6354 
 6355 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6356 %{
 6357   single_instruction;
 6358   src    : S1(read);
 6359   dst    : S5(write);
 6360   INS01  : ISS;
 6361   NEON_FP : S5;
 6362 %}
 6363 
 6364 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6365 %{
 6366   single_instruction;
 6367   src    : S1(read);
 6368   dst    : S5(write);
 6369   INS01  : ISS;
 6370   NEON_FP : S5;
 6371 %}
 6372 
 6373 pipe_class fp_l2f(vRegF dst, iRegL src)
 6374 %{
 6375   single_instruction;
 6376   src    : S1(read);
 6377   dst    : S5(write);
 6378   INS01  : ISS;
 6379   NEON_FP : S5;
 6380 %}
 6381 
 6382 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6383 %{
 6384   single_instruction;
 6385   src    : S1(read);
 6386   dst    : S5(write);
 6387   INS01  : ISS;
 6388   NEON_FP : S5;
 6389 %}
 6390 
 6391 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6392 %{
 6393   single_instruction;
 6394   src    : S1(read);
 6395   dst    : S5(write);
 6396   INS01  : ISS;
 6397   NEON_FP : S5;
 6398 %}
 6399 
 6400 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6401 %{
 6402   single_instruction;
 6403   src    : S1(read);
 6404   dst    : S5(write);
 6405   INS01  : ISS;
 6406   NEON_FP : S5;
 6407 %}
 6408 
 6409 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6410 %{
 6411   single_instruction;
 6412   src    : S1(read);
 6413   dst    : S5(write);
 6414   INS01  : ISS;
 6415   NEON_FP : S5;
 6416 %}
 6417 
 6418 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6419 %{
 6420   single_instruction;
 6421   src1   : S1(read);
 6422   src2   : S2(read);
 6423   dst    : S5(write);
 6424   INS0   : ISS;
 6425   NEON_FP : S5;
 6426 %}
 6427 
 6428 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6429 %{
 6430   single_instruction;
 6431   src1   : S1(read);
 6432   src2   : S2(read);
 6433   dst    : S5(write);
 6434   INS0   : ISS;
 6435   NEON_FP : S5;
 6436 %}
 6437 
 6438 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6439 %{
 6440   single_instruction;
 6441   cr     : S1(read);
 6442   src1   : S1(read);
 6443   src2   : S1(read);
 6444   dst    : S3(write);
 6445   INS01  : ISS;
 6446   NEON_FP : S3;
 6447 %}
 6448 
 6449 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6450 %{
 6451   single_instruction;
 6452   cr     : S1(read);
 6453   src1   : S1(read);
 6454   src2   : S1(read);
 6455   dst    : S3(write);
 6456   INS01  : ISS;
 6457   NEON_FP : S3;
 6458 %}
 6459 
 6460 pipe_class fp_imm_s(vRegF dst)
 6461 %{
 6462   single_instruction;
 6463   dst    : S3(write);
 6464   INS01  : ISS;
 6465   NEON_FP : S3;
 6466 %}
 6467 
 6468 pipe_class fp_imm_d(vRegD dst)
 6469 %{
 6470   single_instruction;
 6471   dst    : S3(write);
 6472   INS01  : ISS;
 6473   NEON_FP : S3;
 6474 %}
 6475 
 6476 pipe_class fp_load_constant_s(vRegF dst)
 6477 %{
 6478   single_instruction;
 6479   dst    : S4(write);
 6480   INS01  : ISS;
 6481   NEON_FP : S4;
 6482 %}
 6483 
 6484 pipe_class fp_load_constant_d(vRegD dst)
 6485 %{
 6486   single_instruction;
 6487   dst    : S4(write);
 6488   INS01  : ISS;
 6489   NEON_FP : S4;
 6490 %}
 6491 
 6492 //------- Integer ALU operations --------------------------
 6493 
 6494 // Integer ALU reg-reg operation
 6495 // Operands needed in EX1, result generated in EX2
 6496 // Eg.  ADD     x0, x1, x2
 6497 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6498 %{
 6499   single_instruction;
 6500   dst    : EX2(write);
 6501   src1   : EX1(read);
 6502   src2   : EX1(read);
 6503   INS01  : ISS; // Dual issue as instruction 0 or 1
 6504   ALU    : EX2;
 6505 %}
 6506 
 6507 // Integer ALU reg-reg operation with constant shift
 6508 // Shifted register must be available in LATE_ISS instead of EX1
 6509 // Eg.  ADD     x0, x1, x2, LSL #2
 6510 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6511 %{
 6512   single_instruction;
 6513   dst    : EX2(write);
 6514   src1   : EX1(read);
 6515   src2   : ISS(read);
 6516   INS01  : ISS;
 6517   ALU    : EX2;
 6518 %}
 6519 
 6520 // Integer ALU reg operation with constant shift
 6521 // Eg.  LSL     x0, x1, #shift
 6522 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6523 %{
 6524   single_instruction;
 6525   dst    : EX2(write);
 6526   src1   : ISS(read);
 6527   INS01  : ISS;
 6528   ALU    : EX2;
 6529 %}
 6530 
 6531 // Integer ALU reg-reg operation with variable shift
 6532 // Both operands must be available in LATE_ISS instead of EX1
 6533 // Result is available in EX1 instead of EX2
 6534 // Eg.  LSLV    x0, x1, x2
 6535 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6536 %{
 6537   single_instruction;
 6538   dst    : EX1(write);
 6539   src1   : ISS(read);
 6540   src2   : ISS(read);
 6541   INS01  : ISS;
 6542   ALU    : EX1;
 6543 %}
 6544 
 6545 // Integer ALU reg-reg operation with extract
 6546 // As for _vshift above, but result generated in EX2
 6547 // Eg.  EXTR    x0, x1, x2, #N
 6548 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6549 %{
 6550   single_instruction;
 6551   dst    : EX2(write);
 6552   src1   : ISS(read);
 6553   src2   : ISS(read);
 6554   INS1   : ISS; // Can only dual issue as Instruction 1
 6555   ALU    : EX1;
 6556 %}
 6557 
 6558 // Integer ALU reg operation
 6559 // Eg.  NEG     x0, x1
 6560 pipe_class ialu_reg(iRegI dst, iRegI src)
 6561 %{
 6562   single_instruction;
 6563   dst    : EX2(write);
 6564   src    : EX1(read);
 6565   INS01  : ISS;
 6566   ALU    : EX2;
 6567 %}
 6568 
 6569 // Integer ALU reg mmediate operation
 6570 // Eg.  ADD     x0, x1, #N
 6571 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6572 %{
 6573   single_instruction;
 6574   dst    : EX2(write);
 6575   src1   : EX1(read);
 6576   INS01  : ISS;
 6577   ALU    : EX2;
 6578 %}
 6579 
 6580 // Integer ALU immediate operation (no source operands)
 6581 // Eg.  MOV     x0, #N
 6582 pipe_class ialu_imm(iRegI dst)
 6583 %{
 6584   single_instruction;
 6585   dst    : EX1(write);
 6586   INS01  : ISS;
 6587   ALU    : EX1;
 6588 %}
 6589 
 6590 //------- Compare operation -------------------------------
 6591 
 6592 // Compare reg-reg
 6593 // Eg.  CMP     x0, x1
 6594 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6595 %{
 6596   single_instruction;
 6597 //  fixed_latency(16);
 6598   cr     : EX2(write);
 6599   op1    : EX1(read);
 6600   op2    : EX1(read);
 6601   INS01  : ISS;
 6602   ALU    : EX2;
 6603 %}
 6604 
 6605 // Compare reg-reg
 6606 // Eg.  CMP     x0, #N
 6607 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6608 %{
 6609   single_instruction;
 6610 //  fixed_latency(16);
 6611   cr     : EX2(write);
 6612   op1    : EX1(read);
 6613   INS01  : ISS;
 6614   ALU    : EX2;
 6615 %}
 6616 
 6617 //------- Conditional instructions ------------------------
 6618 
 6619 // Conditional no operands
 6620 // Eg.  CSINC   x0, zr, zr, <cond>
 6621 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6622 %{
 6623   single_instruction;
 6624   cr     : EX1(read);
 6625   dst    : EX2(write);
 6626   INS01  : ISS;
 6627   ALU    : EX2;
 6628 %}
 6629 
 6630 // Conditional 2 operand
 6631 // EG.  CSEL    X0, X1, X2, <cond>
 6632 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6633 %{
 6634   single_instruction;
 6635   cr     : EX1(read);
 6636   src1   : EX1(read);
 6637   src2   : EX1(read);
 6638   dst    : EX2(write);
 6639   INS01  : ISS;
 6640   ALU    : EX2;
 6641 %}
 6642 
 6643 // Conditional 2 operand
 6644 // EG.  CSEL    X0, X1, X2, <cond>
 6645 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6646 %{
 6647   single_instruction;
 6648   cr     : EX1(read);
 6649   src    : EX1(read);
 6650   dst    : EX2(write);
 6651   INS01  : ISS;
 6652   ALU    : EX2;
 6653 %}
 6654 
 6655 //------- Multiply pipeline operations --------------------
 6656 
 6657 // Multiply reg-reg
 6658 // Eg.  MUL     w0, w1, w2
 6659 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6660 %{
 6661   single_instruction;
 6662   dst    : WR(write);
 6663   src1   : ISS(read);
 6664   src2   : ISS(read);
 6665   INS01  : ISS;
 6666   MAC    : WR;
 6667 %}
 6668 
 6669 // Multiply accumulate
 6670 // Eg.  MADD    w0, w1, w2, w3
 6671 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6672 %{
 6673   single_instruction;
 6674   dst    : WR(write);
 6675   src1   : ISS(read);
 6676   src2   : ISS(read);
 6677   src3   : ISS(read);
 6678   INS01  : ISS;
 6679   MAC    : WR;
 6680 %}
 6681 
 6682 // Eg.  MUL     w0, w1, w2
 6683 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6684 %{
 6685   single_instruction;
 6686   fixed_latency(3); // Maximum latency for 64 bit mul
 6687   dst    : WR(write);
 6688   src1   : ISS(read);
 6689   src2   : ISS(read);
 6690   INS01  : ISS;
 6691   MAC    : WR;
 6692 %}
 6693 
 6694 // Multiply accumulate
 6695 // Eg.  MADD    w0, w1, w2, w3
 6696 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6697 %{
 6698   single_instruction;
 6699   fixed_latency(3); // Maximum latency for 64 bit mul
 6700   dst    : WR(write);
 6701   src1   : ISS(read);
 6702   src2   : ISS(read);
 6703   src3   : ISS(read);
 6704   INS01  : ISS;
 6705   MAC    : WR;
 6706 %}
 6707 
 6708 //------- Divide pipeline operations --------------------
 6709 
 6710 // Eg.  SDIV    w0, w1, w2
 6711 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6712 %{
 6713   single_instruction;
 6714   fixed_latency(8); // Maximum latency for 32 bit divide
 6715   dst    : WR(write);
 6716   src1   : ISS(read);
 6717   src2   : ISS(read);
 6718   INS0   : ISS; // Can only dual issue as instruction 0
 6719   DIV    : WR;
 6720 %}
 6721 
 6722 // Eg.  SDIV    x0, x1, x2
 6723 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6724 %{
 6725   single_instruction;
 6726   fixed_latency(16); // Maximum latency for 64 bit divide
 6727   dst    : WR(write);
 6728   src1   : ISS(read);
 6729   src2   : ISS(read);
 6730   INS0   : ISS; // Can only dual issue as instruction 0
 6731   DIV    : WR;
 6732 %}
 6733 
 6734 //------- Load pipeline operations ------------------------
 6735 
 6736 // Load - prefetch
 6737 // Eg.  PFRM    <mem>
 6738 pipe_class iload_prefetch(memory mem)
 6739 %{
 6740   single_instruction;
 6741   mem    : ISS(read);
 6742   INS01  : ISS;
 6743   LDST   : WR;
 6744 %}
 6745 
 6746 // Load - reg, mem
 6747 // Eg.  LDR     x0, <mem>
 6748 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6749 %{
 6750   single_instruction;
 6751   dst    : WR(write);
 6752   mem    : ISS(read);
 6753   INS01  : ISS;
 6754   LDST   : WR;
 6755 %}
 6756 
 6757 // Load - reg, reg
 6758 // Eg.  LDR     x0, [sp, x1]
 6759 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6760 %{
 6761   single_instruction;
 6762   dst    : WR(write);
 6763   src    : ISS(read);
 6764   INS01  : ISS;
 6765   LDST   : WR;
 6766 %}
 6767 
 6768 //------- Store pipeline operations -----------------------
 6769 
 6770 // Store - zr, mem
 6771 // Eg.  STR     zr, <mem>
 6772 pipe_class istore_mem(memory mem)
 6773 %{
 6774   single_instruction;
 6775   mem    : ISS(read);
 6776   INS01  : ISS;
 6777   LDST   : WR;
 6778 %}
 6779 
 6780 // Store - reg, mem
 6781 // Eg.  STR     x0, <mem>
 6782 pipe_class istore_reg_mem(iRegI src, memory mem)
 6783 %{
 6784   single_instruction;
 6785   mem    : ISS(read);
 6786   src    : EX2(read);
 6787   INS01  : ISS;
 6788   LDST   : WR;
 6789 %}
 6790 
 6791 // Store - reg, reg
 6792 // Eg. STR      x0, [sp, x1]
 6793 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6794 %{
 6795   single_instruction;
 6796   dst    : ISS(read);
 6797   src    : EX2(read);
 6798   INS01  : ISS;
 6799   LDST   : WR;
 6800 %}
 6801 
 6802 //------- Store pipeline operations -----------------------
 6803 
 6804 // Branch
 6805 pipe_class pipe_branch()
 6806 %{
 6807   single_instruction;
 6808   INS01  : ISS;
 6809   BRANCH : EX1;
 6810 %}
 6811 
 6812 // Conditional branch
 6813 pipe_class pipe_branch_cond(rFlagsReg cr)
 6814 %{
 6815   single_instruction;
 6816   cr     : EX1(read);
 6817   INS01  : ISS;
 6818   BRANCH : EX1;
 6819 %}
 6820 
 6821 // Compare & Branch
 6822 // EG.  CBZ/CBNZ
 6823 pipe_class pipe_cmp_branch(iRegI op1)
 6824 %{
 6825   single_instruction;
 6826   op1    : EX1(read);
 6827   INS01  : ISS;
 6828   BRANCH : EX1;
 6829 %}
 6830 
 6831 //------- Synchronisation operations ----------------------
 6832 
 6833 // Any operation requiring serialization.
 6834 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6835 pipe_class pipe_serial()
 6836 %{
 6837   single_instruction;
 6838   force_serialization;
 6839   fixed_latency(16);
 6840   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6841   LDST   : WR;
 6842 %}
 6843 
 6844 // Generic big/slow expanded idiom - also serialized
 6845 pipe_class pipe_slow()
 6846 %{
 6847   instruction_count(10);
 6848   multiple_bundles;
 6849   force_serialization;
 6850   fixed_latency(16);
 6851   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6852   LDST   : WR;
 6853 %}
 6854 
 6855 // Empty pipeline class
 6856 pipe_class pipe_class_empty()
 6857 %{
 6858   single_instruction;
 6859   fixed_latency(0);
 6860 %}
 6861 
 6862 // Default pipeline class.
 6863 pipe_class pipe_class_default()
 6864 %{
 6865   single_instruction;
 6866   fixed_latency(2);
 6867 %}
 6868 
 6869 // Pipeline class for compares.
 6870 pipe_class pipe_class_compare()
 6871 %{
 6872   single_instruction;
 6873   fixed_latency(16);
 6874 %}
 6875 
 6876 // Pipeline class for memory operations.
 6877 pipe_class pipe_class_memory()
 6878 %{
 6879   single_instruction;
 6880   fixed_latency(16);
 6881 %}
 6882 
 6883 // Pipeline class for call.
 6884 pipe_class pipe_class_call()
 6885 %{
 6886   single_instruction;
 6887   fixed_latency(100);
 6888 %}
 6889 
 6890 // Define the class for the Nop node.
 6891 define %{
 6892    MachNop = pipe_class_empty;
 6893 %}
 6894 
 6895 %}
 6896 //----------INSTRUCTIONS-------------------------------------------------------
 6897 //
 6898 // match      -- States which machine-independent subtree may be replaced
 6899 //               by this instruction.
 6900 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6901 //               selection to identify a minimum cost tree of machine
 6902 //               instructions that matches a tree of machine-independent
 6903 //               instructions.
 6904 // format     -- A string providing the disassembly for this instruction.
 6905 //               The value of an instruction's operand may be inserted
 6906 //               by referring to it with a '$' prefix.
 6907 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6908 //               to within an encode class as $primary, $secondary, and $tertiary
 6909 //               rrspectively.  The primary opcode is commonly used to
 6910 //               indicate the type of machine instruction, while secondary
 6911 //               and tertiary are often used for prefix options or addressing
 6912 //               modes.
 6913 // ins_encode -- A list of encode classes with parameters. The encode class
 6914 //               name must have been defined in an 'enc_class' specification
 6915 //               in the encode section of the architecture description.
 6916 
 6917 // ============================================================================
 6918 // Memory (Load/Store) Instructions
 6919 
 6920 // Load Instructions
 6921 
 6922 // Load Byte (8 bit signed)
 6923 instruct loadB(iRegINoSp dst, memory1 mem)
 6924 %{
 6925   match(Set dst (LoadB mem));
 6926   predicate(!needs_acquiring_load(n));
 6927 
 6928   ins_cost(4 * INSN_COST);
 6929   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6930 
 6931   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6932 
 6933   ins_pipe(iload_reg_mem);
 6934 %}
 6935 
 6936 // Load Byte (8 bit signed) into long
 6937 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6938 %{
 6939   match(Set dst (ConvI2L (LoadB mem)));
 6940   predicate(!needs_acquiring_load(n->in(1)));
 6941 
 6942   ins_cost(4 * INSN_COST);
 6943   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6944 
 6945   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6946 
 6947   ins_pipe(iload_reg_mem);
 6948 %}
 6949 
 6950 // Load Byte (8 bit unsigned)
 6951 instruct loadUB(iRegINoSp dst, memory1 mem)
 6952 %{
 6953   match(Set dst (LoadUB mem));
 6954   predicate(!needs_acquiring_load(n));
 6955 
 6956   ins_cost(4 * INSN_COST);
 6957   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6958 
 6959   ins_encode(aarch64_enc_ldrb(dst, mem));
 6960 
 6961   ins_pipe(iload_reg_mem);
 6962 %}
 6963 
 6964 // Load Byte (8 bit unsigned) into long
 6965 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6966 %{
 6967   match(Set dst (ConvI2L (LoadUB mem)));
 6968   predicate(!needs_acquiring_load(n->in(1)));
 6969 
 6970   ins_cost(4 * INSN_COST);
 6971   format %{ "ldrb  $dst, $mem\t# byte" %}
 6972 
 6973   ins_encode(aarch64_enc_ldrb(dst, mem));
 6974 
 6975   ins_pipe(iload_reg_mem);
 6976 %}
 6977 
 6978 // Load Short (16 bit signed)
 6979 instruct loadS(iRegINoSp dst, memory2 mem)
 6980 %{
 6981   match(Set dst (LoadS mem));
 6982   predicate(!needs_acquiring_load(n));
 6983 
 6984   ins_cost(4 * INSN_COST);
 6985   format %{ "ldrshw  $dst, $mem\t# short" %}
 6986 
 6987   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6988 
 6989   ins_pipe(iload_reg_mem);
 6990 %}
 6991 
 6992 // Load Short (16 bit signed) into long
 6993 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6994 %{
 6995   match(Set dst (ConvI2L (LoadS mem)));
 6996   predicate(!needs_acquiring_load(n->in(1)));
 6997 
 6998   ins_cost(4 * INSN_COST);
 6999   format %{ "ldrsh  $dst, $mem\t# short" %}
 7000 
 7001   ins_encode(aarch64_enc_ldrsh(dst, mem));
 7002 
 7003   ins_pipe(iload_reg_mem);
 7004 %}
 7005 
 7006 // Load Char (16 bit unsigned)
 7007 instruct loadUS(iRegINoSp dst, memory2 mem)
 7008 %{
 7009   match(Set dst (LoadUS mem));
 7010   predicate(!needs_acquiring_load(n));
 7011 
 7012   ins_cost(4 * INSN_COST);
 7013   format %{ "ldrh  $dst, $mem\t# short" %}
 7014 
 7015   ins_encode(aarch64_enc_ldrh(dst, mem));
 7016 
 7017   ins_pipe(iload_reg_mem);
 7018 %}
 7019 
 7020 // Load Short/Char (16 bit unsigned) into long
 7021 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 7022 %{
 7023   match(Set dst (ConvI2L (LoadUS mem)));
 7024   predicate(!needs_acquiring_load(n->in(1)));
 7025 
 7026   ins_cost(4 * INSN_COST);
 7027   format %{ "ldrh  $dst, $mem\t# short" %}
 7028 
 7029   ins_encode(aarch64_enc_ldrh(dst, mem));
 7030 
 7031   ins_pipe(iload_reg_mem);
 7032 %}
 7033 
 7034 // Load Integer (32 bit signed)
 7035 instruct loadI(iRegINoSp dst, memory4 mem)
 7036 %{
 7037   match(Set dst (LoadI mem));
 7038   predicate(!needs_acquiring_load(n));
 7039 
 7040   ins_cost(4 * INSN_COST);
 7041   format %{ "ldrw  $dst, $mem\t# int" %}
 7042 
 7043   ins_encode(aarch64_enc_ldrw(dst, mem));
 7044 
 7045   ins_pipe(iload_reg_mem);
 7046 %}
 7047 
 7048 // Load Integer (32 bit signed) into long
 7049 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 7050 %{
 7051   match(Set dst (ConvI2L (LoadI mem)));
 7052   predicate(!needs_acquiring_load(n->in(1)));
 7053 
 7054   ins_cost(4 * INSN_COST);
 7055   format %{ "ldrsw  $dst, $mem\t# int" %}
 7056 
 7057   ins_encode(aarch64_enc_ldrsw(dst, mem));
 7058 
 7059   ins_pipe(iload_reg_mem);
 7060 %}
 7061 
 7062 // Load Integer (32 bit unsigned) into long
 7063 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 7064 %{
 7065   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7066   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 7067 
 7068   ins_cost(4 * INSN_COST);
 7069   format %{ "ldrw  $dst, $mem\t# int" %}
 7070 
 7071   ins_encode(aarch64_enc_ldrw(dst, mem));
 7072 
 7073   ins_pipe(iload_reg_mem);
 7074 %}
 7075 
 7076 // Load Long (64 bit signed)
 7077 instruct loadL(iRegLNoSp dst, memory8 mem)
 7078 %{
 7079   match(Set dst (LoadL mem));
 7080   predicate(!needs_acquiring_load(n));
 7081 
 7082   ins_cost(4 * INSN_COST);
 7083   format %{ "ldr  $dst, $mem\t# int" %}
 7084 
 7085   ins_encode(aarch64_enc_ldr(dst, mem));
 7086 
 7087   ins_pipe(iload_reg_mem);
 7088 %}
 7089 
 7090 // Load Range
 7091 instruct loadRange(iRegINoSp dst, memory4 mem)
 7092 %{
 7093   match(Set dst (LoadRange mem));
 7094 
 7095   ins_cost(4 * INSN_COST);
 7096   format %{ "ldrw  $dst, $mem\t# range" %}
 7097 
 7098   ins_encode(aarch64_enc_ldrw(dst, mem));
 7099 
 7100   ins_pipe(iload_reg_mem);
 7101 %}
 7102 
 7103 // Load Pointer
 7104 instruct loadP(iRegPNoSp dst, memory8 mem)
 7105 %{
 7106   match(Set dst (LoadP mem));
 7107   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 7108 
 7109   ins_cost(4 * INSN_COST);
 7110   format %{ "ldr  $dst, $mem\t# ptr" %}
 7111 
 7112   ins_encode(aarch64_enc_ldr(dst, mem));
 7113 
 7114   ins_pipe(iload_reg_mem);
 7115 %}
 7116 
 7117 // Load Compressed Pointer
 7118 instruct loadN(iRegNNoSp dst, memory4 mem)
 7119 %{
 7120   match(Set dst (LoadN mem));
 7121   predicate(!needs_acquiring_load(n));
 7122 
 7123   ins_cost(4 * INSN_COST);
 7124   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 7125 
 7126   ins_encode(aarch64_enc_ldrw(dst, mem));
 7127 
 7128   ins_pipe(iload_reg_mem);
 7129 %}
 7130 
 7131 // Load Klass Pointer
 7132 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7133 %{
 7134   match(Set dst (LoadKlass mem));
 7135   predicate(!needs_acquiring_load(n));
 7136 
 7137   ins_cost(4 * INSN_COST);
 7138   format %{ "ldr  $dst, $mem\t# class" %}
 7139 
 7140   ins_encode(aarch64_enc_ldr(dst, mem));
 7141 
 7142   ins_pipe(iload_reg_mem);
 7143 %}
 7144 
 7145 // Load Narrow Klass Pointer
 7146 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7147 %{
 7148   match(Set dst (LoadNKlass mem));
 7149   predicate(!needs_acquiring_load(n));
 7150 
 7151   ins_cost(4 * INSN_COST);
 7152   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7153 
 7154   ins_encode(aarch64_enc_ldrw(dst, mem));
 7155 
 7156   ins_pipe(iload_reg_mem);
 7157 %}
 7158 
 7159 // Load Float
 7160 instruct loadF(vRegF dst, memory4 mem)
 7161 %{
 7162   match(Set dst (LoadF mem));
 7163   predicate(!needs_acquiring_load(n));
 7164 
 7165   ins_cost(4 * INSN_COST);
 7166   format %{ "ldrs  $dst, $mem\t# float" %}
 7167 
 7168   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7169 
 7170   ins_pipe(pipe_class_memory);
 7171 %}
 7172 
 7173 // Load Double
 7174 instruct loadD(vRegD dst, memory8 mem)
 7175 %{
 7176   match(Set dst (LoadD mem));
 7177   predicate(!needs_acquiring_load(n));
 7178 
 7179   ins_cost(4 * INSN_COST);
 7180   format %{ "ldrd  $dst, $mem\t# double" %}
 7181 
 7182   ins_encode( aarch64_enc_ldrd(dst, mem) );
 7183 
 7184   ins_pipe(pipe_class_memory);
 7185 %}
 7186 
 7187 
 7188 // Load Int Constant
 7189 instruct loadConI(iRegINoSp dst, immI src)
 7190 %{
 7191   match(Set dst src);
 7192 
 7193   ins_cost(INSN_COST);
 7194   format %{ "mov $dst, $src\t# int" %}
 7195 
 7196   ins_encode( aarch64_enc_movw_imm(dst, src) );
 7197 
 7198   ins_pipe(ialu_imm);
 7199 %}
 7200 
 7201 // Load Long Constant
 7202 instruct loadConL(iRegLNoSp dst, immL src)
 7203 %{
 7204   match(Set dst src);
 7205 
 7206   ins_cost(INSN_COST);
 7207   format %{ "mov $dst, $src\t# long" %}
 7208 
 7209   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7210 
 7211   ins_pipe(ialu_imm);
 7212 %}
 7213 
 7214 // Load Pointer Constant
 7215 
 7216 instruct loadConP(iRegPNoSp dst, immP con)
 7217 %{
 7218   match(Set dst con);
 7219 
 7220   ins_cost(INSN_COST * 4);
 7221   format %{
 7222     "mov  $dst, $con\t# ptr\n\t"
 7223   %}
 7224 
 7225   ins_encode(aarch64_enc_mov_p(dst, con));
 7226 
 7227   ins_pipe(ialu_imm);
 7228 %}
 7229 
 7230 // Load Null Pointer Constant
 7231 
 7232 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7233 %{
 7234   match(Set dst con);
 7235 
 7236   ins_cost(INSN_COST);
 7237   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7238 
 7239   ins_encode(aarch64_enc_mov_p0(dst, con));
 7240 
 7241   ins_pipe(ialu_imm);
 7242 %}
 7243 
 7244 // Load Pointer Constant One
 7245 
 7246 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 7247 %{
 7248   match(Set dst con);
 7249 
 7250   ins_cost(INSN_COST);
 7251   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7252 
 7253   ins_encode(aarch64_enc_mov_p1(dst, con));
 7254 
 7255   ins_pipe(ialu_imm);
 7256 %}
 7257 
 7258 // Load Byte Map Base Constant
 7259 
 7260 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 7261 %{
 7262   match(Set dst con);
 7263 
 7264   ins_cost(INSN_COST);
 7265   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 7266 
 7267   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 7268 
 7269   ins_pipe(ialu_imm);
 7270 %}
 7271 
 7272 // Load Narrow Pointer Constant
 7273 
 7274 instruct loadConN(iRegNNoSp dst, immN con)
 7275 %{
 7276   match(Set dst con);
 7277 
 7278   ins_cost(INSN_COST * 4);
 7279   format %{ "mov  $dst, $con\t# compressed ptr" %}
 7280 
 7281   ins_encode(aarch64_enc_mov_n(dst, con));
 7282 
 7283   ins_pipe(ialu_imm);
 7284 %}
 7285 
 7286 // Load Narrow Null Pointer Constant
 7287 
 7288 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7289 %{
 7290   match(Set dst con);
 7291 
 7292   ins_cost(INSN_COST);
 7293   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
 7294 
 7295   ins_encode(aarch64_enc_mov_n0(dst, con));
 7296 
 7297   ins_pipe(ialu_imm);
 7298 %}
 7299 
 7300 // Load Narrow Klass Constant
 7301 
 7302 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7303 %{
 7304   match(Set dst con);
 7305 
 7306   ins_cost(INSN_COST);
 7307   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7308 
 7309   ins_encode(aarch64_enc_mov_nk(dst, con));
 7310 
 7311   ins_pipe(ialu_imm);
 7312 %}
 7313 
 7314 // Load Packed Float Constant
 7315 
 7316 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7317   match(Set dst con);
 7318   ins_cost(INSN_COST * 4);
 7319   format %{ "fmovs  $dst, $con"%}
 7320   ins_encode %{
 7321     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7322   %}
 7323 
 7324   ins_pipe(fp_imm_s);
 7325 %}
 7326 
 7327 // Load Float Constant
 7328 
 7329 instruct loadConF(vRegF dst, immF con) %{
 7330   match(Set dst con);
 7331 
 7332   ins_cost(INSN_COST * 4);
 7333 
 7334   format %{
 7335     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7336   %}
 7337 
 7338   ins_encode %{
 7339     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7340   %}
 7341 
 7342   ins_pipe(fp_load_constant_s);
 7343 %}
 7344 
 7345 // Load Packed Double Constant
 7346 
 7347 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7348   match(Set dst con);
 7349   ins_cost(INSN_COST);
 7350   format %{ "fmovd  $dst, $con"%}
 7351   ins_encode %{
 7352     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7353   %}
 7354 
 7355   ins_pipe(fp_imm_d);
 7356 %}
 7357 
 7358 // Load Double Constant
 7359 
 7360 instruct loadConD(vRegD dst, immD con) %{
 7361   match(Set dst con);
 7362 
 7363   ins_cost(INSN_COST * 5);
 7364   format %{
 7365     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7366   %}
 7367 
 7368   ins_encode %{
 7369     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7370   %}
 7371 
 7372   ins_pipe(fp_load_constant_d);
 7373 %}
 7374 
 7375 // Store Instructions
 7376 
 7377 // Store CMS card-mark Immediate
 7378 instruct storeimmCM0(immI0 zero, memory1 mem)
 7379 %{
 7380   match(Set mem (StoreCM mem zero));
 7381 
 7382   ins_cost(INSN_COST);
 7383   format %{ "storestore (elided)\n\t"
 7384             "strb zr, $mem\t# byte" %}
 7385 
 7386   ins_encode(aarch64_enc_strb0(mem));
 7387 
 7388   ins_pipe(istore_mem);
 7389 %}
 7390 
 7391 // Store CMS card-mark Immediate with intervening StoreStore
 7392 // needed when using CMS with no conditional card marking
 7393 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
 7394 %{
 7395   match(Set mem (StoreCM mem zero));
 7396 
 7397   ins_cost(INSN_COST * 2);
 7398   format %{ "storestore\n\t"
 7399             "dmb ishst"
 7400             "\n\tstrb zr, $mem\t# byte" %}
 7401 
 7402   ins_encode(aarch64_enc_strb0_ordered(mem));
 7403 
 7404   ins_pipe(istore_mem);
 7405 %}
 7406 
 7407 // Store Byte
 7408 instruct storeB(iRegIorL2I src, memory1 mem)
 7409 %{
 7410   match(Set mem (StoreB mem src));
 7411   predicate(!needs_releasing_store(n));
 7412 
 7413   ins_cost(INSN_COST);
 7414   format %{ "strb  $src, $mem\t# byte" %}
 7415 
 7416   ins_encode(aarch64_enc_strb(src, mem));
 7417 
 7418   ins_pipe(istore_reg_mem);
 7419 %}
 7420 
 7421 
 7422 instruct storeimmB0(immI0 zero, memory1 mem)
 7423 %{
 7424   match(Set mem (StoreB mem zero));
 7425   predicate(!needs_releasing_store(n));
 7426 
 7427   ins_cost(INSN_COST);
 7428   format %{ "strb rscractch2, $mem\t# byte" %}
 7429 
 7430   ins_encode(aarch64_enc_strb0(mem));
 7431 
 7432   ins_pipe(istore_mem);
 7433 %}
 7434 
 7435 // Store Char/Short
 7436 instruct storeC(iRegIorL2I src, memory2 mem)
 7437 %{
 7438   match(Set mem (StoreC mem src));
 7439   predicate(!needs_releasing_store(n));
 7440 
 7441   ins_cost(INSN_COST);
 7442   format %{ "strh  $src, $mem\t# short" %}
 7443 
 7444   ins_encode(aarch64_enc_strh(src, mem));
 7445 
 7446   ins_pipe(istore_reg_mem);
 7447 %}
 7448 
 7449 instruct storeimmC0(immI0 zero, memory2 mem)
 7450 %{
 7451   match(Set mem (StoreC mem zero));
 7452   predicate(!needs_releasing_store(n));
 7453 
 7454   ins_cost(INSN_COST);
 7455   format %{ "strh  zr, $mem\t# short" %}
 7456 
 7457   ins_encode(aarch64_enc_strh0(mem));
 7458 
 7459   ins_pipe(istore_mem);
 7460 %}
 7461 
 7462 // Store Integer
 7463 
 7464 instruct storeI(iRegIorL2I src, memory4 mem)
 7465 %{
 7466   match(Set mem(StoreI mem src));
 7467   predicate(!needs_releasing_store(n));
 7468 
 7469   ins_cost(INSN_COST);
 7470   format %{ "strw  $src, $mem\t# int" %}
 7471 
 7472   ins_encode(aarch64_enc_strw(src, mem));
 7473 
 7474   ins_pipe(istore_reg_mem);
 7475 %}
 7476 
 7477 instruct storeimmI0(immI0 zero, memory4 mem)
 7478 %{
 7479   match(Set mem(StoreI mem zero));
 7480   predicate(!needs_releasing_store(n));
 7481 
 7482   ins_cost(INSN_COST);
 7483   format %{ "strw  zr, $mem\t# int" %}
 7484 
 7485   ins_encode(aarch64_enc_strw0(mem));
 7486 
 7487   ins_pipe(istore_mem);
 7488 %}
 7489 
 7490 // Store Long (64 bit signed)
 7491 instruct storeL(iRegL src, memory8 mem)
 7492 %{
 7493   match(Set mem (StoreL mem src));
 7494   predicate(!needs_releasing_store(n));
 7495 
 7496   ins_cost(INSN_COST);
 7497   format %{ "str  $src, $mem\t# int" %}
 7498 
 7499   ins_encode(aarch64_enc_str(src, mem));
 7500 
 7501   ins_pipe(istore_reg_mem);
 7502 %}
 7503 
 7504 // Store Long (64 bit signed)
 7505 instruct storeimmL0(immL0 zero, memory8 mem)
 7506 %{
 7507   match(Set mem (StoreL mem zero));
 7508   predicate(!needs_releasing_store(n));
 7509 
 7510   ins_cost(INSN_COST);
 7511   format %{ "str  zr, $mem\t# int" %}
 7512 
 7513   ins_encode(aarch64_enc_str0(mem));
 7514 
 7515   ins_pipe(istore_mem);
 7516 %}
 7517 
 7518 // Store Pointer
 7519 instruct storeP(iRegP src, memory8 mem)
 7520 %{
 7521   match(Set mem (StoreP mem src));
 7522   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7523 
 7524   ins_cost(INSN_COST);
 7525   format %{ "str  $src, $mem\t# ptr" %}
 7526 
 7527   ins_encode(aarch64_enc_str(src, mem));
 7528 
 7529   ins_pipe(istore_reg_mem);
 7530 %}
 7531 
 7532 // Store Pointer
 7533 instruct storeimmP0(immP0 zero, memory8 mem)
 7534 %{
 7535   match(Set mem (StoreP mem zero));
 7536   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7537 
 7538   ins_cost(INSN_COST);
 7539   format %{ "str zr, $mem\t# ptr" %}
 7540 
 7541   ins_encode(aarch64_enc_str0(mem));
 7542 
 7543   ins_pipe(istore_mem);
 7544 %}
 7545 
 7546 // Store Compressed Pointer
 7547 instruct storeN(iRegN src, memory4 mem)
 7548 %{
 7549   match(Set mem (StoreN mem src));
 7550   predicate(!needs_releasing_store(n));
 7551 
 7552   ins_cost(INSN_COST);
 7553   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7554 
 7555   ins_encode(aarch64_enc_strw(src, mem));
 7556 
 7557   ins_pipe(istore_reg_mem);
 7558 %}
 7559 
 7560 instruct storeImmN0(immN0 zero, memory4 mem)
 7561 %{
 7562   match(Set mem (StoreN mem zero));
 7563   predicate(!needs_releasing_store(n));
 7564 
 7565   ins_cost(INSN_COST);
 7566   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7567 
 7568   ins_encode(aarch64_enc_strw0(mem));
 7569 
 7570   ins_pipe(istore_mem);
 7571 %}
 7572 
 7573 // Store Float
 7574 instruct storeF(vRegF src, memory4 mem)
 7575 %{
 7576   match(Set mem (StoreF mem src));
 7577   predicate(!needs_releasing_store(n));
 7578 
 7579   ins_cost(INSN_COST);
 7580   format %{ "strs  $src, $mem\t# float" %}
 7581 
 7582   ins_encode( aarch64_enc_strs(src, mem) );
 7583 
 7584   ins_pipe(pipe_class_memory);
 7585 %}
 7586 
 7587 // TODO
 7588 // implement storeImmF0 and storeFImmPacked
 7589 
 7590 // Store Double
 7591 instruct storeD(vRegD src, memory8 mem)
 7592 %{
 7593   match(Set mem (StoreD mem src));
 7594   predicate(!needs_releasing_store(n));
 7595 
 7596   ins_cost(INSN_COST);
 7597   format %{ "strd  $src, $mem\t# double" %}
 7598 
 7599   ins_encode( aarch64_enc_strd(src, mem) );
 7600 
 7601   ins_pipe(pipe_class_memory);
 7602 %}
 7603 
 7604 // Store Compressed Klass Pointer
 7605 instruct storeNKlass(iRegN src, memory4 mem)
 7606 %{
 7607   predicate(!needs_releasing_store(n));
 7608   match(Set mem (StoreNKlass mem src));
 7609 
 7610   ins_cost(INSN_COST);
 7611   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7612 
 7613   ins_encode(aarch64_enc_strw(src, mem));
 7614 
 7615   ins_pipe(istore_reg_mem);
 7616 %}
 7617 
 7618 // TODO
 7619 // implement storeImmD0 and storeDImmPacked
 7620 
 7621 // prefetch instructions
 7622 // Must be safe to execute with invalid address (cannot fault).
 7623 
 7624 instruct prefetchalloc( memory8 mem ) %{
 7625   match(PrefetchAllocation mem);
 7626 
 7627   ins_cost(INSN_COST);
 7628   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7629 
 7630   ins_encode( aarch64_enc_prefetchw(mem) );
 7631 
 7632   ins_pipe(iload_prefetch);
 7633 %}
 7634 
 7635 //  ---------------- volatile loads and stores ----------------
 7636 
 7637 // Load Byte (8 bit signed)
 7638 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7639 %{
 7640   match(Set dst (LoadB mem));
 7641 
 7642   ins_cost(VOLATILE_REF_COST);
 7643   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7644 
 7645   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7646 
 7647   ins_pipe(pipe_serial);
 7648 %}
 7649 
 7650 // Load Byte (8 bit signed) into long
 7651 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7652 %{
 7653   match(Set dst (ConvI2L (LoadB mem)));
 7654 
 7655   ins_cost(VOLATILE_REF_COST);
 7656   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7657 
 7658   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7659 
 7660   ins_pipe(pipe_serial);
 7661 %}
 7662 
 7663 // Load Byte (8 bit unsigned)
 7664 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7665 %{
 7666   match(Set dst (LoadUB mem));
 7667 
 7668   ins_cost(VOLATILE_REF_COST);
 7669   format %{ "ldarb  $dst, $mem\t# byte" %}
 7670 
 7671   ins_encode(aarch64_enc_ldarb(dst, mem));
 7672 
 7673   ins_pipe(pipe_serial);
 7674 %}
 7675 
 7676 // Load Byte (8 bit unsigned) into long
 7677 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7678 %{
 7679   match(Set dst (ConvI2L (LoadUB mem)));
 7680 
 7681   ins_cost(VOLATILE_REF_COST);
 7682   format %{ "ldarb  $dst, $mem\t# byte" %}
 7683 
 7684   ins_encode(aarch64_enc_ldarb(dst, mem));
 7685 
 7686   ins_pipe(pipe_serial);
 7687 %}
 7688 
 7689 // Load Short (16 bit signed)
 7690 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7691 %{
 7692   match(Set dst (LoadS mem));
 7693 
 7694   ins_cost(VOLATILE_REF_COST);
 7695   format %{ "ldarshw  $dst, $mem\t# short" %}
 7696 
 7697   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7698 
 7699   ins_pipe(pipe_serial);
 7700 %}
 7701 
 7702 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7703 %{
 7704   match(Set dst (LoadUS mem));
 7705 
 7706   ins_cost(VOLATILE_REF_COST);
 7707   format %{ "ldarhw  $dst, $mem\t# short" %}
 7708 
 7709   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7710 
 7711   ins_pipe(pipe_serial);
 7712 %}
 7713 
 7714 // Load Short/Char (16 bit unsigned) into long
 7715 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7716 %{
 7717   match(Set dst (ConvI2L (LoadUS mem)));
 7718 
 7719   ins_cost(VOLATILE_REF_COST);
 7720   format %{ "ldarh  $dst, $mem\t# short" %}
 7721 
 7722   ins_encode(aarch64_enc_ldarh(dst, mem));
 7723 
 7724   ins_pipe(pipe_serial);
 7725 %}
 7726 
 7727 // Load Short/Char (16 bit signed) into long
 7728 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7729 %{
 7730   match(Set dst (ConvI2L (LoadS mem)));
 7731 
 7732   ins_cost(VOLATILE_REF_COST);
 7733   format %{ "ldarh  $dst, $mem\t# short" %}
 7734 
 7735   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7736 
 7737   ins_pipe(pipe_serial);
 7738 %}
 7739 
 7740 // Load Integer (32 bit signed)
 7741 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7742 %{
 7743   match(Set dst (LoadI mem));
 7744 
 7745   ins_cost(VOLATILE_REF_COST);
 7746   format %{ "ldarw  $dst, $mem\t# int" %}
 7747 
 7748   ins_encode(aarch64_enc_ldarw(dst, mem));
 7749 
 7750   ins_pipe(pipe_serial);
 7751 %}
 7752 
 7753 // Load Integer (32 bit unsigned) into long
 7754 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7755 %{
 7756   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7757 
 7758   ins_cost(VOLATILE_REF_COST);
 7759   format %{ "ldarw  $dst, $mem\t# int" %}
 7760 
 7761   ins_encode(aarch64_enc_ldarw(dst, mem));
 7762 
 7763   ins_pipe(pipe_serial);
 7764 %}
 7765 
 7766 // Load Long (64 bit signed)
 7767 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7768 %{
 7769   match(Set dst (LoadL mem));
 7770 
 7771   ins_cost(VOLATILE_REF_COST);
 7772   format %{ "ldar  $dst, $mem\t# int" %}
 7773 
 7774   ins_encode(aarch64_enc_ldar(dst, mem));
 7775 
 7776   ins_pipe(pipe_serial);
 7777 %}
 7778 
 7779 // Load Pointer
 7780 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7781 %{
 7782   match(Set dst (LoadP mem));
 7783   predicate(n->as_Load()->barrier_data() == 0);
 7784 
 7785   ins_cost(VOLATILE_REF_COST);
 7786   format %{ "ldar  $dst, $mem\t# ptr" %}
 7787 
 7788   ins_encode(aarch64_enc_ldar(dst, mem));
 7789 
 7790   ins_pipe(pipe_serial);
 7791 %}
 7792 
 7793 // Load Compressed Pointer
 7794 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7795 %{
 7796   match(Set dst (LoadN mem));
 7797 
 7798   ins_cost(VOLATILE_REF_COST);
 7799   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7800 
 7801   ins_encode(aarch64_enc_ldarw(dst, mem));
 7802 
 7803   ins_pipe(pipe_serial);
 7804 %}
 7805 
 7806 // Load Float
 7807 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7808 %{
 7809   match(Set dst (LoadF mem));
 7810 
 7811   ins_cost(VOLATILE_REF_COST);
 7812   format %{ "ldars  $dst, $mem\t# float" %}
 7813 
 7814   ins_encode( aarch64_enc_fldars(dst, mem) );
 7815 
 7816   ins_pipe(pipe_serial);
 7817 %}
 7818 
 7819 // Load Double
 7820 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7821 %{
 7822   match(Set dst (LoadD mem));
 7823 
 7824   ins_cost(VOLATILE_REF_COST);
 7825   format %{ "ldard  $dst, $mem\t# double" %}
 7826 
 7827   ins_encode( aarch64_enc_fldard(dst, mem) );
 7828 
 7829   ins_pipe(pipe_serial);
 7830 %}
 7831 
 7832 // Store Byte
 7833 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7834 %{
 7835   match(Set mem (StoreB mem src));
 7836 
 7837   ins_cost(VOLATILE_REF_COST);
 7838   format %{ "stlrb  $src, $mem\t# byte" %}
 7839 
 7840   ins_encode(aarch64_enc_stlrb(src, mem));
 7841 
 7842   ins_pipe(pipe_class_memory);
 7843 %}
 7844 
 7845 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7846 %{
 7847   match(Set mem (StoreB mem zero));
 7848 
 7849   ins_cost(VOLATILE_REF_COST);
 7850   format %{ "stlrb  zr, $mem\t# byte" %}
 7851 
 7852   ins_encode(aarch64_enc_stlrb0(mem));
 7853 
 7854   ins_pipe(pipe_class_memory);
 7855 %}
 7856 
 7857 // Store Char/Short
 7858 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7859 %{
 7860   match(Set mem (StoreC mem src));
 7861 
 7862   ins_cost(VOLATILE_REF_COST);
 7863   format %{ "stlrh  $src, $mem\t# short" %}
 7864 
 7865   ins_encode(aarch64_enc_stlrh(src, mem));
 7866 
 7867   ins_pipe(pipe_class_memory);
 7868 %}
 7869 
 7870 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7871 %{
 7872   match(Set mem (StoreC mem zero));
 7873 
 7874   ins_cost(VOLATILE_REF_COST);
 7875   format %{ "stlrh  zr, $mem\t# short" %}
 7876 
 7877   ins_encode(aarch64_enc_stlrh0(mem));
 7878 
 7879   ins_pipe(pipe_class_memory);
 7880 %}
 7881 
 7882 // Store Integer
 7883 
 7884 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7885 %{
 7886   match(Set mem(StoreI mem src));
 7887 
 7888   ins_cost(VOLATILE_REF_COST);
 7889   format %{ "stlrw  $src, $mem\t# int" %}
 7890 
 7891   ins_encode(aarch64_enc_stlrw(src, mem));
 7892 
 7893   ins_pipe(pipe_class_memory);
 7894 %}
 7895 
 7896 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7897 %{
 7898   match(Set mem(StoreI mem zero));
 7899 
 7900   ins_cost(VOLATILE_REF_COST);
 7901   format %{ "stlrw  zr, $mem\t# int" %}
 7902 
 7903   ins_encode(aarch64_enc_stlrw0(mem));
 7904 
 7905   ins_pipe(pipe_class_memory);
 7906 %}
 7907 
 7908 // Store Long (64 bit signed)
 7909 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7910 %{
 7911   match(Set mem (StoreL mem src));
 7912 
 7913   ins_cost(VOLATILE_REF_COST);
 7914   format %{ "stlr  $src, $mem\t# int" %}
 7915 
 7916   ins_encode(aarch64_enc_stlr(src, mem));
 7917 
 7918   ins_pipe(pipe_class_memory);
 7919 %}
 7920 
 7921 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7922 %{
 7923   match(Set mem (StoreL mem zero));
 7924 
 7925   ins_cost(VOLATILE_REF_COST);
 7926   format %{ "stlr  zr, $mem\t# int" %}
 7927 
 7928   ins_encode(aarch64_enc_stlr0(mem));
 7929 
 7930   ins_pipe(pipe_class_memory);
 7931 %}
 7932 
 7933 // Store Pointer
 7934 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7935 %{
 7936   match(Set mem (StoreP mem src));
 7937   predicate(n->as_Store()->barrier_data() == 0);
 7938 
 7939   ins_cost(VOLATILE_REF_COST);
 7940   format %{ "stlr  $src, $mem\t# ptr" %}
 7941 
 7942   ins_encode(aarch64_enc_stlr(src, mem));
 7943 
 7944   ins_pipe(pipe_class_memory);
 7945 %}
 7946 
 7947 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7948 %{
 7949   match(Set mem (StoreP mem zero));
 7950   predicate(n->as_Store()->barrier_data() == 0);
 7951 
 7952   ins_cost(VOLATILE_REF_COST);
 7953   format %{ "stlr  zr, $mem\t# ptr" %}
 7954 
 7955   ins_encode(aarch64_enc_stlr0(mem));
 7956 
 7957   ins_pipe(pipe_class_memory);
 7958 %}
 7959 
 7960 // Store Compressed Pointer
 7961 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7962 %{
 7963   match(Set mem (StoreN mem src));
 7964 
 7965   ins_cost(VOLATILE_REF_COST);
 7966   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7967 
 7968   ins_encode(aarch64_enc_stlrw(src, mem));
 7969 
 7970   ins_pipe(pipe_class_memory);
 7971 %}
 7972 
 7973 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7974 %{
 7975   match(Set mem (StoreN mem zero));
 7976 
 7977   ins_cost(VOLATILE_REF_COST);
 7978   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7979 
 7980   ins_encode(aarch64_enc_stlrw0(mem));
 7981 
 7982   ins_pipe(pipe_class_memory);
 7983 %}
 7984 
 7985 // Store Float
 7986 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7987 %{
 7988   match(Set mem (StoreF mem src));
 7989 
 7990   ins_cost(VOLATILE_REF_COST);
 7991   format %{ "stlrs  $src, $mem\t# float" %}
 7992 
 7993   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7994 
 7995   ins_pipe(pipe_class_memory);
 7996 %}
 7997 
 7998 // TODO
 7999 // implement storeImmF0 and storeFImmPacked
 8000 
 8001 // Store Double
 8002 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 8003 %{
 8004   match(Set mem (StoreD mem src));
 8005 
 8006   ins_cost(VOLATILE_REF_COST);
 8007   format %{ "stlrd  $src, $mem\t# double" %}
 8008 
 8009   ins_encode( aarch64_enc_fstlrd(src, mem) );
 8010 
 8011   ins_pipe(pipe_class_memory);
 8012 %}
 8013 
 8014 //  ---------------- end of volatile loads and stores ----------------
 8015 
 8016 instruct cacheWB(indirect addr)
 8017 %{
 8018   predicate(VM_Version::supports_data_cache_line_flush());
 8019   match(CacheWB addr);
 8020 
 8021   ins_cost(100);
 8022   format %{"cache wb $addr" %}
 8023   ins_encode %{
 8024     assert($addr->index_position() < 0, "should be");
 8025     assert($addr$$disp == 0, "should be");
 8026     __ cache_wb(Address($addr$$base$$Register, 0));
 8027   %}
 8028   ins_pipe(pipe_slow); // XXX
 8029 %}
 8030 
 8031 instruct cacheWBPreSync()
 8032 %{
 8033   predicate(VM_Version::supports_data_cache_line_flush());
 8034   match(CacheWBPreSync);
 8035 
 8036   ins_cost(100);
 8037   format %{"cache wb presync" %}
 8038   ins_encode %{
 8039     __ cache_wbsync(true);
 8040   %}
 8041   ins_pipe(pipe_slow); // XXX
 8042 %}
 8043 
 8044 instruct cacheWBPostSync()
 8045 %{
 8046   predicate(VM_Version::supports_data_cache_line_flush());
 8047   match(CacheWBPostSync);
 8048 
 8049   ins_cost(100);
 8050   format %{"cache wb postsync" %}
 8051   ins_encode %{
 8052     __ cache_wbsync(false);
 8053   %}
 8054   ins_pipe(pipe_slow); // XXX
 8055 %}
 8056 
 8057 // ============================================================================
 8058 // BSWAP Instructions
 8059 
 8060 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 8061   match(Set dst (ReverseBytesI src));
 8062 
 8063   ins_cost(INSN_COST);
 8064   format %{ "revw  $dst, $src" %}
 8065 
 8066   ins_encode %{
 8067     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 8068   %}
 8069 
 8070   ins_pipe(ialu_reg);
 8071 %}
 8072 
 8073 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 8074   match(Set dst (ReverseBytesL src));
 8075 
 8076   ins_cost(INSN_COST);
 8077   format %{ "rev  $dst, $src" %}
 8078 
 8079   ins_encode %{
 8080     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 8081   %}
 8082 
 8083   ins_pipe(ialu_reg);
 8084 %}
 8085 
 8086 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 8087   match(Set dst (ReverseBytesUS src));
 8088 
 8089   ins_cost(INSN_COST);
 8090   format %{ "rev16w  $dst, $src" %}
 8091 
 8092   ins_encode %{
 8093     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8094   %}
 8095 
 8096   ins_pipe(ialu_reg);
 8097 %}
 8098 
 8099 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 8100   match(Set dst (ReverseBytesS src));
 8101 
 8102   ins_cost(INSN_COST);
 8103   format %{ "rev16w  $dst, $src\n\t"
 8104             "sbfmw $dst, $dst, #0, #15" %}
 8105 
 8106   ins_encode %{
 8107     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8108     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 8109   %}
 8110 
 8111   ins_pipe(ialu_reg);
 8112 %}
 8113 
 8114 // ============================================================================
 8115 // Zero Count Instructions
 8116 
 8117 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8118   match(Set dst (CountLeadingZerosI src));
 8119 
 8120   ins_cost(INSN_COST);
 8121   format %{ "clzw  $dst, $src" %}
 8122   ins_encode %{
 8123     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 8124   %}
 8125 
 8126   ins_pipe(ialu_reg);
 8127 %}
 8128 
 8129 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 8130   match(Set dst (CountLeadingZerosL src));
 8131 
 8132   ins_cost(INSN_COST);
 8133   format %{ "clz   $dst, $src" %}
 8134   ins_encode %{
 8135     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 8136   %}
 8137 
 8138   ins_pipe(ialu_reg);
 8139 %}
 8140 
 8141 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8142   match(Set dst (CountTrailingZerosI src));
 8143 
 8144   ins_cost(INSN_COST * 2);
 8145   format %{ "rbitw  $dst, $src\n\t"
 8146             "clzw   $dst, $dst" %}
 8147   ins_encode %{
 8148     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 8149     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 8150   %}
 8151 
 8152   ins_pipe(ialu_reg);
 8153 %}
 8154 
 8155 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 8156   match(Set dst (CountTrailingZerosL src));
 8157 
 8158   ins_cost(INSN_COST * 2);
 8159   format %{ "rbit   $dst, $src\n\t"
 8160             "clz    $dst, $dst" %}
 8161   ins_encode %{
 8162     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 8163     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 8164   %}
 8165 
 8166   ins_pipe(ialu_reg);
 8167 %}
 8168 
 8169 //---------- Population Count Instructions -------------------------------------
 8170 //
 8171 
 8172 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 8173   match(Set dst (PopCountI src));
 8174   effect(TEMP tmp);
 8175   ins_cost(INSN_COST * 13);
 8176 
 8177   format %{ "movw   $src, $src\n\t"
 8178             "mov    $tmp, $src\t# vector (1D)\n\t"
 8179             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8180             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8181             "mov    $dst, $tmp\t# vector (1D)" %}
 8182   ins_encode %{
 8183     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 8184     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 8185     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8186     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8187     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8188   %}
 8189 
 8190   ins_pipe(pipe_class_default);
 8191 %}
 8192 
 8193 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 8194   match(Set dst (PopCountI (LoadI mem)));
 8195   effect(TEMP tmp);
 8196   ins_cost(INSN_COST * 13);
 8197 
 8198   format %{ "ldrs   $tmp, $mem\n\t"
 8199             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8200             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8201             "mov    $dst, $tmp\t# vector (1D)" %}
 8202   ins_encode %{
 8203     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8204     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 8205               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 8206     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8207     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8208     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8209   %}
 8210 
 8211   ins_pipe(pipe_class_default);
 8212 %}
 8213 
 8214 // Note: Long.bitCount(long) returns an int.
 8215 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 8216   match(Set dst (PopCountL src));
 8217   effect(TEMP tmp);
 8218   ins_cost(INSN_COST * 13);
 8219 
 8220   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 8221             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8222             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8223             "mov    $dst, $tmp\t# vector (1D)" %}
 8224   ins_encode %{
 8225     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 8226     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8227     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8228     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8229   %}
 8230 
 8231   ins_pipe(pipe_class_default);
 8232 %}
 8233 
 8234 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 8235   match(Set dst (PopCountL (LoadL mem)));
 8236   effect(TEMP tmp);
 8237   ins_cost(INSN_COST * 13);
 8238 
 8239   format %{ "ldrd   $tmp, $mem\n\t"
 8240             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8241             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8242             "mov    $dst, $tmp\t# vector (1D)" %}
 8243   ins_encode %{
 8244     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8245     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 8246               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 8247     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8248     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8249     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8250   %}
 8251 
 8252   ins_pipe(pipe_class_default);
 8253 %}
 8254 
 8255 // ============================================================================
 8256 // MemBar Instruction
 8257 
 8258 instruct load_fence() %{
 8259   match(LoadFence);
 8260   ins_cost(VOLATILE_REF_COST);
 8261 
 8262   format %{ "load_fence" %}
 8263 
 8264   ins_encode %{
 8265     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8266   %}
 8267   ins_pipe(pipe_serial);
 8268 %}
 8269 
 8270 instruct unnecessary_membar_acquire() %{
 8271   predicate(unnecessary_acquire(n));
 8272   match(MemBarAcquire);
 8273   ins_cost(0);
 8274 
 8275   format %{ "membar_acquire (elided)" %}
 8276 
 8277   ins_encode %{
 8278     __ block_comment("membar_acquire (elided)");
 8279   %}
 8280 
 8281   ins_pipe(pipe_class_empty);
 8282 %}
 8283 
 8284 instruct membar_acquire() %{
 8285   match(MemBarAcquire);
 8286   ins_cost(VOLATILE_REF_COST);
 8287 
 8288   format %{ "membar_acquire\n\t"
 8289             "dmb ish" %}
 8290 
 8291   ins_encode %{
 8292     __ block_comment("membar_acquire");
 8293     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8294   %}
 8295 
 8296   ins_pipe(pipe_serial);
 8297 %}
 8298 
 8299 
 8300 instruct membar_acquire_lock() %{
 8301   match(MemBarAcquireLock);
 8302   ins_cost(VOLATILE_REF_COST);
 8303 
 8304   format %{ "membar_acquire_lock (elided)" %}
 8305 
 8306   ins_encode %{
 8307     __ block_comment("membar_acquire_lock (elided)");
 8308   %}
 8309 
 8310   ins_pipe(pipe_serial);
 8311 %}
 8312 
 8313 instruct store_fence() %{
 8314   match(StoreFence);
 8315   ins_cost(VOLATILE_REF_COST);
 8316 
 8317   format %{ "store_fence" %}
 8318 
 8319   ins_encode %{
 8320     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8321   %}
 8322   ins_pipe(pipe_serial);
 8323 %}
 8324 
 8325 instruct unnecessary_membar_release() %{
 8326   predicate(unnecessary_release(n));
 8327   match(MemBarRelease);
 8328   ins_cost(0);
 8329 
 8330   format %{ "membar_release (elided)" %}
 8331 
 8332   ins_encode %{
 8333     __ block_comment("membar_release (elided)");
 8334   %}
 8335   ins_pipe(pipe_serial);
 8336 %}
 8337 
 8338 instruct membar_release() %{
 8339   match(MemBarRelease);
 8340   ins_cost(VOLATILE_REF_COST);
 8341 
 8342   format %{ "membar_release\n\t"
 8343             "dmb ish" %}
 8344 
 8345   ins_encode %{
 8346     __ block_comment("membar_release");
 8347     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8348   %}
 8349   ins_pipe(pipe_serial);
 8350 %}
 8351 
 8352 instruct membar_storestore() %{
 8353   match(MemBarStoreStore);
 8354   match(StoreStoreFence);
 8355   ins_cost(VOLATILE_REF_COST);
 8356 
 8357   format %{ "MEMBAR-store-store" %}
 8358 
 8359   ins_encode %{
 8360     __ membar(Assembler::StoreStore);
 8361   %}
 8362   ins_pipe(pipe_serial);
 8363 %}
 8364 
 8365 instruct membar_release_lock() %{
 8366   match(MemBarReleaseLock);
 8367   ins_cost(VOLATILE_REF_COST);
 8368 
 8369   format %{ "membar_release_lock (elided)" %}
 8370 
 8371   ins_encode %{
 8372     __ block_comment("membar_release_lock (elided)");
 8373   %}
 8374 
 8375   ins_pipe(pipe_serial);
 8376 %}
 8377 
 8378 instruct unnecessary_membar_volatile() %{
 8379   predicate(unnecessary_volatile(n));
 8380   match(MemBarVolatile);
 8381   ins_cost(0);
 8382 
 8383   format %{ "membar_volatile (elided)" %}
 8384 
 8385   ins_encode %{
 8386     __ block_comment("membar_volatile (elided)");
 8387   %}
 8388 
 8389   ins_pipe(pipe_serial);
 8390 %}
 8391 
 8392 instruct membar_volatile() %{
 8393   match(MemBarVolatile);
 8394   ins_cost(VOLATILE_REF_COST*100);
 8395 
 8396   format %{ "membar_volatile\n\t"
 8397              "dmb ish"%}
 8398 
 8399   ins_encode %{
 8400     __ block_comment("membar_volatile");
 8401     __ membar(Assembler::StoreLoad);
 8402   %}
 8403 
 8404   ins_pipe(pipe_serial);
 8405 %}
 8406 
 8407 // ============================================================================
 8408 // Cast/Convert Instructions
 8409 
 8410 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8411   match(Set dst (CastX2P src));
 8412 
 8413   ins_cost(INSN_COST);
 8414   format %{ "mov $dst, $src\t# long -> ptr" %}
 8415 
 8416   ins_encode %{
 8417     if ($dst$$reg != $src$$reg) {
 8418       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8419     }
 8420   %}
 8421 
 8422   ins_pipe(ialu_reg);
 8423 %}
 8424 
 8425 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8426   match(Set dst (CastP2X src));
 8427 
 8428   ins_cost(INSN_COST);
 8429   format %{ "mov $dst, $src\t# ptr -> long" %}
 8430 
 8431   ins_encode %{
 8432     if ($dst$$reg != $src$$reg) {
 8433       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8434     }
 8435   %}
 8436 
 8437   ins_pipe(ialu_reg);
 8438 %}
 8439 
 8440 // Convert oop into int for vectors alignment masking
 8441 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8442   match(Set dst (ConvL2I (CastP2X src)));
 8443 
 8444   ins_cost(INSN_COST);
 8445   format %{ "movw $dst, $src\t# ptr -> int" %}
 8446   ins_encode %{
 8447     __ movw($dst$$Register, $src$$Register);
 8448   %}
 8449 
 8450   ins_pipe(ialu_reg);
 8451 %}
 8452 
 8453 // Convert compressed oop into int for vectors alignment masking
 8454 // in case of 32bit oops (heap < 4Gb).
 8455 instruct convN2I(iRegINoSp dst, iRegN src)
 8456 %{
 8457   predicate(CompressedOops::shift() == 0);
 8458   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8459 
 8460   ins_cost(INSN_COST);
 8461   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8462   ins_encode %{
 8463     __ movw($dst$$Register, $src$$Register);
 8464   %}
 8465 
 8466   ins_pipe(ialu_reg);
 8467 %}
 8468 
 8469 
 8470 // Convert oop pointer into compressed form
 8471 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8472   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8473   match(Set dst (EncodeP src));
 8474   effect(KILL cr);
 8475   ins_cost(INSN_COST * 3);
 8476   format %{ "encode_heap_oop $dst, $src" %}
 8477   ins_encode %{
 8478     Register s = $src$$Register;
 8479     Register d = $dst$$Register;
 8480     __ encode_heap_oop(d, s);
 8481   %}
 8482   ins_pipe(ialu_reg);
 8483 %}
 8484 
 8485 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8486   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8487   match(Set dst (EncodeP src));
 8488   ins_cost(INSN_COST * 3);
 8489   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8490   ins_encode %{
 8491     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8492   %}
 8493   ins_pipe(ialu_reg);
 8494 %}
 8495 
 8496 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8497   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8498             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8499   match(Set dst (DecodeN src));
 8500   ins_cost(INSN_COST * 3);
 8501   format %{ "decode_heap_oop $dst, $src" %}
 8502   ins_encode %{
 8503     Register s = $src$$Register;
 8504     Register d = $dst$$Register;
 8505     __ decode_heap_oop(d, s);
 8506   %}
 8507   ins_pipe(ialu_reg);
 8508 %}
 8509 
 8510 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8511   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8512             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8513   match(Set dst (DecodeN src));
 8514   ins_cost(INSN_COST * 3);
 8515   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8516   ins_encode %{
 8517     Register s = $src$$Register;
 8518     Register d = $dst$$Register;
 8519     __ decode_heap_oop_not_null(d, s);
 8520   %}
 8521   ins_pipe(ialu_reg);
 8522 %}
 8523 
 8524 // n.b. AArch64 implementations of encode_klass_not_null and
 8525 // decode_klass_not_null do not modify the flags register so, unlike
 8526 // Intel, we don't kill CR as a side effect here
 8527 
 8528 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8529   match(Set dst (EncodePKlass src));
 8530 
 8531   ins_cost(INSN_COST * 3);
 8532   format %{ "encode_klass_not_null $dst,$src" %}
 8533 
 8534   ins_encode %{
 8535     Register src_reg = as_Register($src$$reg);
 8536     Register dst_reg = as_Register($dst$$reg);
 8537     __ encode_klass_not_null(dst_reg, src_reg);
 8538   %}
 8539 
 8540    ins_pipe(ialu_reg);
 8541 %}
 8542 
 8543 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8544   match(Set dst (DecodeNKlass src));
 8545 
 8546   ins_cost(INSN_COST * 3);
 8547   format %{ "decode_klass_not_null $dst,$src" %}
 8548 
 8549   ins_encode %{
 8550     Register src_reg = as_Register($src$$reg);
 8551     Register dst_reg = as_Register($dst$$reg);
 8552     if (dst_reg != src_reg) {
 8553       __ decode_klass_not_null(dst_reg, src_reg);
 8554     } else {
 8555       __ decode_klass_not_null(dst_reg);
 8556     }
 8557   %}
 8558 
 8559    ins_pipe(ialu_reg);
 8560 %}
 8561 
 8562 instruct checkCastPP(iRegPNoSp dst)
 8563 %{
 8564   match(Set dst (CheckCastPP dst));
 8565 
 8566   size(0);
 8567   format %{ "# checkcastPP of $dst" %}
 8568   ins_encode(/* empty encoding */);
 8569   ins_pipe(pipe_class_empty);
 8570 %}
 8571 
 8572 instruct castPP(iRegPNoSp dst)
 8573 %{
 8574   match(Set dst (CastPP dst));
 8575 
 8576   size(0);
 8577   format %{ "# castPP of $dst" %}
 8578   ins_encode(/* empty encoding */);
 8579   ins_pipe(pipe_class_empty);
 8580 %}
 8581 
 8582 instruct castII(iRegI dst)
 8583 %{
 8584   match(Set dst (CastII dst));
 8585 
 8586   size(0);
 8587   format %{ "# castII of $dst" %}
 8588   ins_encode(/* empty encoding */);
 8589   ins_cost(0);
 8590   ins_pipe(pipe_class_empty);
 8591 %}
 8592 
 8593 instruct castLL(iRegL dst)
 8594 %{
 8595   match(Set dst (CastLL dst));
 8596 
 8597   size(0);
 8598   format %{ "# castLL of $dst" %}
 8599   ins_encode(/* empty encoding */);
 8600   ins_cost(0);
 8601   ins_pipe(pipe_class_empty);
 8602 %}
 8603 
 8604 instruct castFF(vRegF dst)
 8605 %{
 8606   match(Set dst (CastFF dst));
 8607 
 8608   size(0);
 8609   format %{ "# castFF of $dst" %}
 8610   ins_encode(/* empty encoding */);
 8611   ins_cost(0);
 8612   ins_pipe(pipe_class_empty);
 8613 %}
 8614 
 8615 instruct castDD(vRegD dst)
 8616 %{
 8617   match(Set dst (CastDD dst));
 8618 
 8619   size(0);
 8620   format %{ "# castDD of $dst" %}
 8621   ins_encode(/* empty encoding */);
 8622   ins_cost(0);
 8623   ins_pipe(pipe_class_empty);
 8624 %}
 8625 
 8626 instruct castVV(vReg dst)
 8627 %{
 8628   match(Set dst (CastVV dst));
 8629 
 8630   size(0);
 8631   format %{ "# castVV of $dst" %}
 8632   ins_encode(/* empty encoding */);
 8633   ins_cost(0);
 8634   ins_pipe(pipe_class_empty);
 8635 %}
 8636 
 8637 instruct castVVMask(pRegGov dst)
 8638 %{
 8639   match(Set dst (CastVV dst));
 8640 
 8641   size(0);
 8642   format %{ "# castVV of $dst" %}
 8643   ins_encode(/* empty encoding */);
 8644   ins_cost(0);
 8645   ins_pipe(pipe_class_empty);
 8646 %}
 8647 
 8648 // ============================================================================
 8649 // Atomic operation instructions
 8650 //
 8651 
 8652 // standard CompareAndSwapX when we are using barriers
 8653 // these have higher priority than the rules selected by a predicate
 8654 
 8655 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8656 // can't match them
 8657 
 8658 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8659 
 8660   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8661   ins_cost(2 * VOLATILE_REF_COST);
 8662 
 8663   effect(KILL cr);
 8664 
 8665   format %{
 8666     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8667     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8668   %}
 8669 
 8670   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8671             aarch64_enc_cset_eq(res));
 8672 
 8673   ins_pipe(pipe_slow);
 8674 %}
 8675 
 8676 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8677 
 8678   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8679   ins_cost(2 * VOLATILE_REF_COST);
 8680 
 8681   effect(KILL cr);
 8682 
 8683   format %{
 8684     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8685     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8686   %}
 8687 
 8688   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8689             aarch64_enc_cset_eq(res));
 8690 
 8691   ins_pipe(pipe_slow);
 8692 %}
 8693 
 8694 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8695 
 8696   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8697   ins_cost(2 * VOLATILE_REF_COST);
 8698 
 8699   effect(KILL cr);
 8700 
 8701  format %{
 8702     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8703     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8704  %}
 8705 
 8706  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8707             aarch64_enc_cset_eq(res));
 8708 
 8709   ins_pipe(pipe_slow);
 8710 %}
 8711 
 8712 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8713 
 8714   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8715   ins_cost(2 * VOLATILE_REF_COST);
 8716 
 8717   effect(KILL cr);
 8718 
 8719  format %{
 8720     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8721     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8722  %}
 8723 
 8724  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8725             aarch64_enc_cset_eq(res));
 8726 
 8727   ins_pipe(pipe_slow);
 8728 %}
 8729 
 8730 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8731 
 8732   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8733   predicate(n->as_LoadStore()->barrier_data() == 0);
 8734   ins_cost(2 * VOLATILE_REF_COST);
 8735 
 8736   effect(KILL cr);
 8737 
 8738  format %{
 8739     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8740     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8741  %}
 8742 
 8743  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8744             aarch64_enc_cset_eq(res));
 8745 
 8746   ins_pipe(pipe_slow);
 8747 %}
 8748 
 8749 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8750 
 8751   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8752   ins_cost(2 * VOLATILE_REF_COST);
 8753 
 8754   effect(KILL cr);
 8755 
 8756  format %{
 8757     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8758     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8759  %}
 8760 
 8761  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8762             aarch64_enc_cset_eq(res));
 8763 
 8764   ins_pipe(pipe_slow);
 8765 %}
 8766 
 8767 // alternative CompareAndSwapX when we are eliding barriers
 8768 
 8769 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8770 
 8771   predicate(needs_acquiring_load_exclusive(n));
 8772   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8773   ins_cost(VOLATILE_REF_COST);
 8774 
 8775   effect(KILL cr);
 8776 
 8777   format %{
 8778     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8779     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8780   %}
 8781 
 8782   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8783             aarch64_enc_cset_eq(res));
 8784 
 8785   ins_pipe(pipe_slow);
 8786 %}
 8787 
 8788 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8789 
 8790   predicate(needs_acquiring_load_exclusive(n));
 8791   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8792   ins_cost(VOLATILE_REF_COST);
 8793 
 8794   effect(KILL cr);
 8795 
 8796   format %{
 8797     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8798     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8799   %}
 8800 
 8801   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8802             aarch64_enc_cset_eq(res));
 8803 
 8804   ins_pipe(pipe_slow);
 8805 %}
 8806 
 8807 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8808 
 8809   predicate(needs_acquiring_load_exclusive(n));
 8810   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8811   ins_cost(VOLATILE_REF_COST);
 8812 
 8813   effect(KILL cr);
 8814 
 8815  format %{
 8816     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8817     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8818  %}
 8819 
 8820  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8821             aarch64_enc_cset_eq(res));
 8822 
 8823   ins_pipe(pipe_slow);
 8824 %}
 8825 
 8826 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8827 
 8828   predicate(needs_acquiring_load_exclusive(n));
 8829   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8830   ins_cost(VOLATILE_REF_COST);
 8831 
 8832   effect(KILL cr);
 8833 
 8834  format %{
 8835     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8836     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8837  %}
 8838 
 8839  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8840             aarch64_enc_cset_eq(res));
 8841 
 8842   ins_pipe(pipe_slow);
 8843 %}
 8844 
 8845 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8846 
 8847   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8848   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8849   ins_cost(VOLATILE_REF_COST);
 8850 
 8851   effect(KILL cr);
 8852 
 8853  format %{
 8854     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8855     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8856  %}
 8857 
 8858  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8859             aarch64_enc_cset_eq(res));
 8860 
 8861   ins_pipe(pipe_slow);
 8862 %}
 8863 
 8864 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8865 
 8866   predicate(needs_acquiring_load_exclusive(n));
 8867   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8868   ins_cost(VOLATILE_REF_COST);
 8869 
 8870   effect(KILL cr);
 8871 
 8872  format %{
 8873     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8874     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8875  %}
 8876 
 8877  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8878             aarch64_enc_cset_eq(res));
 8879 
 8880   ins_pipe(pipe_slow);
 8881 %}
 8882 
 8883 
 8884 // ---------------------------------------------------------------------
 8885 
 8886 // BEGIN This section of the file is automatically generated. Do not edit --------------
 8887 
 8888 // Sundry CAS operations.  Note that release is always true,
 8889 // regardless of the memory ordering of the CAS.  This is because we
 8890 // need the volatile case to be sequentially consistent but there is
 8891 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 8892 // can't check the type of memory ordering here, so we always emit a
 8893 // STLXR.
 8894 
 8895 // This section is generated from cas.m4
 8896 
 8897 
 8898 // This pattern is generated automatically from cas.m4.
 8899 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8900 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8901   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8902   ins_cost(2 * VOLATILE_REF_COST);
 8903   effect(TEMP_DEF res, KILL cr);
 8904   format %{
 8905     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8906   %}
 8907   ins_encode %{
 8908     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8909                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8910                /*weak*/ false, $res$$Register);
 8911     __ sxtbw($res$$Register, $res$$Register);
 8912   %}
 8913   ins_pipe(pipe_slow);
 8914 %}
 8915 
 8916 // This pattern is generated automatically from cas.m4.
 8917 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8918 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8919   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8920   ins_cost(2 * VOLATILE_REF_COST);
 8921   effect(TEMP_DEF res, KILL cr);
 8922   format %{
 8923     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8924   %}
 8925   ins_encode %{
 8926     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8927                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8928                /*weak*/ false, $res$$Register);
 8929     __ sxthw($res$$Register, $res$$Register);
 8930   %}
 8931   ins_pipe(pipe_slow);
 8932 %}
 8933 
 8934 // This pattern is generated automatically from cas.m4.
 8935 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8936 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8937   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8938   ins_cost(2 * VOLATILE_REF_COST);
 8939   effect(TEMP_DEF res, KILL cr);
 8940   format %{
 8941     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8942   %}
 8943   ins_encode %{
 8944     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8945                Assembler::word, /*acquire*/ false, /*release*/ true,
 8946                /*weak*/ false, $res$$Register);
 8947   %}
 8948   ins_pipe(pipe_slow);
 8949 %}
 8950 
 8951 // This pattern is generated automatically from cas.m4.
 8952 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8953 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8954   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8955   ins_cost(2 * VOLATILE_REF_COST);
 8956   effect(TEMP_DEF res, KILL cr);
 8957   format %{
 8958     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8959   %}
 8960   ins_encode %{
 8961     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8962                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8963                /*weak*/ false, $res$$Register);
 8964   %}
 8965   ins_pipe(pipe_slow);
 8966 %}
 8967 
 8968 // This pattern is generated automatically from cas.m4.
 8969 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8970 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8971   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8972   ins_cost(2 * VOLATILE_REF_COST);
 8973   effect(TEMP_DEF res, KILL cr);
 8974   format %{
 8975     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8976   %}
 8977   ins_encode %{
 8978     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8979                Assembler::word, /*acquire*/ false, /*release*/ true,
 8980                /*weak*/ false, $res$$Register);
 8981   %}
 8982   ins_pipe(pipe_slow);
 8983 %}
 8984 
 8985 // This pattern is generated automatically from cas.m4.
 8986 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8987 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8988   predicate(n->as_LoadStore()->barrier_data() == 0);
 8989   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8990   ins_cost(2 * VOLATILE_REF_COST);
 8991   effect(TEMP_DEF res, KILL cr);
 8992   format %{
 8993     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8994   %}
 8995   ins_encode %{
 8996     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8997                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8998                /*weak*/ false, $res$$Register);
 8999   %}
 9000   ins_pipe(pipe_slow);
 9001 %}
 9002 
 9003 // This pattern is generated automatically from cas.m4.
 9004 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9005 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9006   predicate(needs_acquiring_load_exclusive(n));
 9007   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9008   ins_cost(VOLATILE_REF_COST);
 9009   effect(TEMP_DEF res, KILL cr);
 9010   format %{
 9011     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9012   %}
 9013   ins_encode %{
 9014     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9015                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9016                /*weak*/ false, $res$$Register);
 9017     __ sxtbw($res$$Register, $res$$Register);
 9018   %}
 9019   ins_pipe(pipe_slow);
 9020 %}
 9021 
 9022 // This pattern is generated automatically from cas.m4.
 9023 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9024 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9025   predicate(needs_acquiring_load_exclusive(n));
 9026   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9027   ins_cost(VOLATILE_REF_COST);
 9028   effect(TEMP_DEF res, KILL cr);
 9029   format %{
 9030     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9031   %}
 9032   ins_encode %{
 9033     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9034                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9035                /*weak*/ false, $res$$Register);
 9036     __ sxthw($res$$Register, $res$$Register);
 9037   %}
 9038   ins_pipe(pipe_slow);
 9039 %}
 9040 
 9041 // This pattern is generated automatically from cas.m4.
 9042 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9043 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9044   predicate(needs_acquiring_load_exclusive(n));
 9045   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9046   ins_cost(VOLATILE_REF_COST);
 9047   effect(TEMP_DEF res, KILL cr);
 9048   format %{
 9049     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9050   %}
 9051   ins_encode %{
 9052     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9053                Assembler::word, /*acquire*/ true, /*release*/ true,
 9054                /*weak*/ false, $res$$Register);
 9055   %}
 9056   ins_pipe(pipe_slow);
 9057 %}
 9058 
 9059 // This pattern is generated automatically from cas.m4.
 9060 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9061 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9062   predicate(needs_acquiring_load_exclusive(n));
 9063   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9064   ins_cost(VOLATILE_REF_COST);
 9065   effect(TEMP_DEF res, KILL cr);
 9066   format %{
 9067     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9068   %}
 9069   ins_encode %{
 9070     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9071                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9072                /*weak*/ false, $res$$Register);
 9073   %}
 9074   ins_pipe(pipe_slow);
 9075 %}
 9076 
 9077 // This pattern is generated automatically from cas.m4.
 9078 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9079 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9080   predicate(needs_acquiring_load_exclusive(n));
 9081   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9082   ins_cost(VOLATILE_REF_COST);
 9083   effect(TEMP_DEF res, KILL cr);
 9084   format %{
 9085     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9086   %}
 9087   ins_encode %{
 9088     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9089                Assembler::word, /*acquire*/ true, /*release*/ true,
 9090                /*weak*/ false, $res$$Register);
 9091   %}
 9092   ins_pipe(pipe_slow);
 9093 %}
 9094 
 9095 // This pattern is generated automatically from cas.m4.
 9096 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9097 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9098   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9099   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9100   ins_cost(VOLATILE_REF_COST);
 9101   effect(TEMP_DEF res, KILL cr);
 9102   format %{
 9103     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9104   %}
 9105   ins_encode %{
 9106     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9107                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9108                /*weak*/ false, $res$$Register);
 9109   %}
 9110   ins_pipe(pipe_slow);
 9111 %}
 9112 
 9113 // This pattern is generated automatically from cas.m4.
 9114 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9115 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9116   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9117   ins_cost(2 * VOLATILE_REF_COST);
 9118   effect(KILL cr);
 9119   format %{
 9120     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9121     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9122   %}
 9123   ins_encode %{
 9124     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9125                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9126                /*weak*/ true, noreg);
 9127     __ csetw($res$$Register, Assembler::EQ);
 9128   %}
 9129   ins_pipe(pipe_slow);
 9130 %}
 9131 
 9132 // This pattern is generated automatically from cas.m4.
 9133 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9134 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9135   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9136   ins_cost(2 * VOLATILE_REF_COST);
 9137   effect(KILL cr);
 9138   format %{
 9139     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9140     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9141   %}
 9142   ins_encode %{
 9143     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9144                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9145                /*weak*/ true, noreg);
 9146     __ csetw($res$$Register, Assembler::EQ);
 9147   %}
 9148   ins_pipe(pipe_slow);
 9149 %}
 9150 
 9151 // This pattern is generated automatically from cas.m4.
 9152 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9153 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9154   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9155   ins_cost(2 * VOLATILE_REF_COST);
 9156   effect(KILL cr);
 9157   format %{
 9158     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9159     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9160   %}
 9161   ins_encode %{
 9162     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9163                Assembler::word, /*acquire*/ false, /*release*/ true,
 9164                /*weak*/ true, noreg);
 9165     __ csetw($res$$Register, Assembler::EQ);
 9166   %}
 9167   ins_pipe(pipe_slow);
 9168 %}
 9169 
 9170 // This pattern is generated automatically from cas.m4.
 9171 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9172 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9173   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9174   ins_cost(2 * VOLATILE_REF_COST);
 9175   effect(KILL cr);
 9176   format %{
 9177     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9178     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9179   %}
 9180   ins_encode %{
 9181     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9182                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9183                /*weak*/ true, noreg);
 9184     __ csetw($res$$Register, Assembler::EQ);
 9185   %}
 9186   ins_pipe(pipe_slow);
 9187 %}
 9188 
 9189 // This pattern is generated automatically from cas.m4.
 9190 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9191 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9192   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9193   ins_cost(2 * VOLATILE_REF_COST);
 9194   effect(KILL cr);
 9195   format %{
 9196     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9197     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9198   %}
 9199   ins_encode %{
 9200     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9201                Assembler::word, /*acquire*/ false, /*release*/ true,
 9202                /*weak*/ true, noreg);
 9203     __ csetw($res$$Register, Assembler::EQ);
 9204   %}
 9205   ins_pipe(pipe_slow);
 9206 %}
 9207 
 9208 // This pattern is generated automatically from cas.m4.
 9209 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9210 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9211   predicate(n->as_LoadStore()->barrier_data() == 0);
 9212   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9213   ins_cost(2 * VOLATILE_REF_COST);
 9214   effect(KILL cr);
 9215   format %{
 9216     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9217     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9218   %}
 9219   ins_encode %{
 9220     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9221                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9222                /*weak*/ true, noreg);
 9223     __ csetw($res$$Register, Assembler::EQ);
 9224   %}
 9225   ins_pipe(pipe_slow);
 9226 %}
 9227 
 9228 // This pattern is generated automatically from cas.m4.
 9229 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9230 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9231   predicate(needs_acquiring_load_exclusive(n));
 9232   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9233   ins_cost(VOLATILE_REF_COST);
 9234   effect(KILL cr);
 9235   format %{
 9236     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9237     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9238   %}
 9239   ins_encode %{
 9240     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9241                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9242                /*weak*/ true, noreg);
 9243     __ csetw($res$$Register, Assembler::EQ);
 9244   %}
 9245   ins_pipe(pipe_slow);
 9246 %}
 9247 
 9248 // This pattern is generated automatically from cas.m4.
 9249 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9250 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9251   predicate(needs_acquiring_load_exclusive(n));
 9252   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9253   ins_cost(VOLATILE_REF_COST);
 9254   effect(KILL cr);
 9255   format %{
 9256     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9257     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9258   %}
 9259   ins_encode %{
 9260     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9261                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9262                /*weak*/ true, noreg);
 9263     __ csetw($res$$Register, Assembler::EQ);
 9264   %}
 9265   ins_pipe(pipe_slow);
 9266 %}
 9267 
 9268 // This pattern is generated automatically from cas.m4.
 9269 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9270 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9271   predicate(needs_acquiring_load_exclusive(n));
 9272   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9273   ins_cost(VOLATILE_REF_COST);
 9274   effect(KILL cr);
 9275   format %{
 9276     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9277     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9278   %}
 9279   ins_encode %{
 9280     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9281                Assembler::word, /*acquire*/ true, /*release*/ true,
 9282                /*weak*/ true, noreg);
 9283     __ csetw($res$$Register, Assembler::EQ);
 9284   %}
 9285   ins_pipe(pipe_slow);
 9286 %}
 9287 
 9288 // This pattern is generated automatically from cas.m4.
 9289 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9290 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9291   predicate(needs_acquiring_load_exclusive(n));
 9292   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9293   ins_cost(VOLATILE_REF_COST);
 9294   effect(KILL cr);
 9295   format %{
 9296     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9297     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9298   %}
 9299   ins_encode %{
 9300     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9301                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9302                /*weak*/ true, noreg);
 9303     __ csetw($res$$Register, Assembler::EQ);
 9304   %}
 9305   ins_pipe(pipe_slow);
 9306 %}
 9307 
 9308 // This pattern is generated automatically from cas.m4.
 9309 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9310 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9311   predicate(needs_acquiring_load_exclusive(n));
 9312   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9313   ins_cost(VOLATILE_REF_COST);
 9314   effect(KILL cr);
 9315   format %{
 9316     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9317     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9318   %}
 9319   ins_encode %{
 9320     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9321                Assembler::word, /*acquire*/ true, /*release*/ true,
 9322                /*weak*/ true, noreg);
 9323     __ csetw($res$$Register, Assembler::EQ);
 9324   %}
 9325   ins_pipe(pipe_slow);
 9326 %}
 9327 
 9328 // This pattern is generated automatically from cas.m4.
 9329 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9330 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9331   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9332   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9333   ins_cost(VOLATILE_REF_COST);
 9334   effect(KILL cr);
 9335   format %{
 9336     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9337     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9338   %}
 9339   ins_encode %{
 9340     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9341                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9342                /*weak*/ true, noreg);
 9343     __ csetw($res$$Register, Assembler::EQ);
 9344   %}
 9345   ins_pipe(pipe_slow);
 9346 %}
 9347 
 9348 // END This section of the file is automatically generated. Do not edit --------------
 9349 // ---------------------------------------------------------------------
 9350 
 9351 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9352   match(Set prev (GetAndSetI mem newv));
 9353   ins_cost(2 * VOLATILE_REF_COST);
 9354   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9355   ins_encode %{
 9356     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9357   %}
 9358   ins_pipe(pipe_serial);
 9359 %}
 9360 
 9361 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9362   match(Set prev (GetAndSetL mem newv));
 9363   ins_cost(2 * VOLATILE_REF_COST);
 9364   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9365   ins_encode %{
 9366     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9367   %}
 9368   ins_pipe(pipe_serial);
 9369 %}
 9370 
 9371 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9372   match(Set prev (GetAndSetN mem newv));
 9373   ins_cost(2 * VOLATILE_REF_COST);
 9374   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9375   ins_encode %{
 9376     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9377   %}
 9378   ins_pipe(pipe_serial);
 9379 %}
 9380 
 9381 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9382   predicate(n->as_LoadStore()->barrier_data() == 0);
 9383   match(Set prev (GetAndSetP mem newv));
 9384   ins_cost(2 * VOLATILE_REF_COST);
 9385   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9386   ins_encode %{
 9387     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9388   %}
 9389   ins_pipe(pipe_serial);
 9390 %}
 9391 
 9392 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9393   predicate(needs_acquiring_load_exclusive(n));
 9394   match(Set prev (GetAndSetI mem newv));
 9395   ins_cost(VOLATILE_REF_COST);
 9396   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9397   ins_encode %{
 9398     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9399   %}
 9400   ins_pipe(pipe_serial);
 9401 %}
 9402 
 9403 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9404   predicate(needs_acquiring_load_exclusive(n));
 9405   match(Set prev (GetAndSetL mem newv));
 9406   ins_cost(VOLATILE_REF_COST);
 9407   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9408   ins_encode %{
 9409     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9410   %}
 9411   ins_pipe(pipe_serial);
 9412 %}
 9413 
 9414 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9415   predicate(needs_acquiring_load_exclusive(n));
 9416   match(Set prev (GetAndSetN mem newv));
 9417   ins_cost(VOLATILE_REF_COST);
 9418   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9419   ins_encode %{
 9420     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9421   %}
 9422   ins_pipe(pipe_serial);
 9423 %}
 9424 
 9425 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9426   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9427   match(Set prev (GetAndSetP mem newv));
 9428   ins_cost(VOLATILE_REF_COST);
 9429   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9430   ins_encode %{
 9431     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9432   %}
 9433   ins_pipe(pipe_serial);
 9434 %}
 9435 
 9436 
 9437 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9438   match(Set newval (GetAndAddL mem incr));
 9439   ins_cost(2 * VOLATILE_REF_COST + 1);
 9440   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9441   ins_encode %{
 9442     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9443   %}
 9444   ins_pipe(pipe_serial);
 9445 %}
 9446 
 9447 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9448   predicate(n->as_LoadStore()->result_not_used());
 9449   match(Set dummy (GetAndAddL mem incr));
 9450   ins_cost(2 * VOLATILE_REF_COST);
 9451   format %{ "get_and_addL [$mem], $incr" %}
 9452   ins_encode %{
 9453     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9454   %}
 9455   ins_pipe(pipe_serial);
 9456 %}
 9457 
 9458 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9459   match(Set newval (GetAndAddL mem incr));
 9460   ins_cost(2 * VOLATILE_REF_COST + 1);
 9461   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9462   ins_encode %{
 9463     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9464   %}
 9465   ins_pipe(pipe_serial);
 9466 %}
 9467 
 9468 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9469   predicate(n->as_LoadStore()->result_not_used());
 9470   match(Set dummy (GetAndAddL mem incr));
 9471   ins_cost(2 * VOLATILE_REF_COST);
 9472   format %{ "get_and_addL [$mem], $incr" %}
 9473   ins_encode %{
 9474     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9475   %}
 9476   ins_pipe(pipe_serial);
 9477 %}
 9478 
 9479 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9480   match(Set newval (GetAndAddI mem incr));
 9481   ins_cost(2 * VOLATILE_REF_COST + 1);
 9482   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9483   ins_encode %{
 9484     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9485   %}
 9486   ins_pipe(pipe_serial);
 9487 %}
 9488 
 9489 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9490   predicate(n->as_LoadStore()->result_not_used());
 9491   match(Set dummy (GetAndAddI mem incr));
 9492   ins_cost(2 * VOLATILE_REF_COST);
 9493   format %{ "get_and_addI [$mem], $incr" %}
 9494   ins_encode %{
 9495     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9496   %}
 9497   ins_pipe(pipe_serial);
 9498 %}
 9499 
 9500 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9501   match(Set newval (GetAndAddI mem incr));
 9502   ins_cost(2 * VOLATILE_REF_COST + 1);
 9503   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9504   ins_encode %{
 9505     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9506   %}
 9507   ins_pipe(pipe_serial);
 9508 %}
 9509 
 9510 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9511   predicate(n->as_LoadStore()->result_not_used());
 9512   match(Set dummy (GetAndAddI mem incr));
 9513   ins_cost(2 * VOLATILE_REF_COST);
 9514   format %{ "get_and_addI [$mem], $incr" %}
 9515   ins_encode %{
 9516     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9517   %}
 9518   ins_pipe(pipe_serial);
 9519 %}
 9520 
 9521 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9522   predicate(needs_acquiring_load_exclusive(n));
 9523   match(Set newval (GetAndAddL mem incr));
 9524   ins_cost(VOLATILE_REF_COST + 1);
 9525   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9526   ins_encode %{
 9527     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9528   %}
 9529   ins_pipe(pipe_serial);
 9530 %}
 9531 
 9532 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9533   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9534   match(Set dummy (GetAndAddL mem incr));
 9535   ins_cost(VOLATILE_REF_COST);
 9536   format %{ "get_and_addL_acq [$mem], $incr" %}
 9537   ins_encode %{
 9538     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9539   %}
 9540   ins_pipe(pipe_serial);
 9541 %}
 9542 
 9543 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9544   predicate(needs_acquiring_load_exclusive(n));
 9545   match(Set newval (GetAndAddL mem incr));
 9546   ins_cost(VOLATILE_REF_COST + 1);
 9547   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9548   ins_encode %{
 9549     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9550   %}
 9551   ins_pipe(pipe_serial);
 9552 %}
 9553 
 9554 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9555   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9556   match(Set dummy (GetAndAddL mem incr));
 9557   ins_cost(VOLATILE_REF_COST);
 9558   format %{ "get_and_addL_acq [$mem], $incr" %}
 9559   ins_encode %{
 9560     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9561   %}
 9562   ins_pipe(pipe_serial);
 9563 %}
 9564 
 9565 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9566   predicate(needs_acquiring_load_exclusive(n));
 9567   match(Set newval (GetAndAddI mem incr));
 9568   ins_cost(VOLATILE_REF_COST + 1);
 9569   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9570   ins_encode %{
 9571     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9572   %}
 9573   ins_pipe(pipe_serial);
 9574 %}
 9575 
 9576 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9577   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9578   match(Set dummy (GetAndAddI mem incr));
 9579   ins_cost(VOLATILE_REF_COST);
 9580   format %{ "get_and_addI_acq [$mem], $incr" %}
 9581   ins_encode %{
 9582     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9583   %}
 9584   ins_pipe(pipe_serial);
 9585 %}
 9586 
 9587 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9588   predicate(needs_acquiring_load_exclusive(n));
 9589   match(Set newval (GetAndAddI mem incr));
 9590   ins_cost(VOLATILE_REF_COST + 1);
 9591   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9592   ins_encode %{
 9593     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9594   %}
 9595   ins_pipe(pipe_serial);
 9596 %}
 9597 
 9598 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9599   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9600   match(Set dummy (GetAndAddI mem incr));
 9601   ins_cost(VOLATILE_REF_COST);
 9602   format %{ "get_and_addI_acq [$mem], $incr" %}
 9603   ins_encode %{
 9604     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9605   %}
 9606   ins_pipe(pipe_serial);
 9607 %}
 9608 
 9609 // Manifest a CmpU result in an integer register.
 9610 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9611 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 9612 %{
 9613   match(Set dst (CmpU3 src1 src2));
 9614   effect(KILL flags);
 9615 
 9616   ins_cost(INSN_COST * 3);
 9617   format %{
 9618       "cmpw $src1, $src2\n\t"
 9619       "csetw $dst, ne\n\t"
 9620       "cnegw $dst, lo\t# CmpU3(reg)"
 9621   %}
 9622   ins_encode %{
 9623     __ cmpw($src1$$Register, $src2$$Register);
 9624     __ csetw($dst$$Register, Assembler::NE);
 9625     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9626   %}
 9627 
 9628   ins_pipe(pipe_class_default);
 9629 %}
 9630 
 9631 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 9632 %{
 9633   match(Set dst (CmpU3 src1 src2));
 9634   effect(KILL flags);
 9635 
 9636   ins_cost(INSN_COST * 3);
 9637   format %{
 9638       "subsw zr, $src1, $src2\n\t"
 9639       "csetw $dst, ne\n\t"
 9640       "cnegw $dst, lo\t# CmpU3(imm)"
 9641   %}
 9642   ins_encode %{
 9643     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 9644     __ csetw($dst$$Register, Assembler::NE);
 9645     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9646   %}
 9647 
 9648   ins_pipe(pipe_class_default);
 9649 %}
 9650 
 9651 // Manifest a CmpUL result in an integer register.
 9652 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9653 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9654 %{
 9655   match(Set dst (CmpUL3 src1 src2));
 9656   effect(KILL flags);
 9657 
 9658   ins_cost(INSN_COST * 3);
 9659   format %{
 9660       "cmp $src1, $src2\n\t"
 9661       "csetw $dst, ne\n\t"
 9662       "cnegw $dst, lo\t# CmpUL3(reg)"
 9663   %}
 9664   ins_encode %{
 9665     __ cmp($src1$$Register, $src2$$Register);
 9666     __ csetw($dst$$Register, Assembler::NE);
 9667     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9668   %}
 9669 
 9670   ins_pipe(pipe_class_default);
 9671 %}
 9672 
 9673 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9674 %{
 9675   match(Set dst (CmpUL3 src1 src2));
 9676   effect(KILL flags);
 9677 
 9678   ins_cost(INSN_COST * 3);
 9679   format %{
 9680       "subs zr, $src1, $src2\n\t"
 9681       "csetw $dst, ne\n\t"
 9682       "cnegw $dst, lo\t# CmpUL3(imm)"
 9683   %}
 9684   ins_encode %{
 9685     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9686     __ csetw($dst$$Register, Assembler::NE);
 9687     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9688   %}
 9689 
 9690   ins_pipe(pipe_class_default);
 9691 %}
 9692 
 9693 // Manifest a CmpL result in an integer register.
 9694 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9695 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9696 %{
 9697   match(Set dst (CmpL3 src1 src2));
 9698   effect(KILL flags);
 9699 
 9700   ins_cost(INSN_COST * 3);
 9701   format %{
 9702       "cmp $src1, $src2\n\t"
 9703       "csetw $dst, ne\n\t"
 9704       "cnegw $dst, lt\t# CmpL3(reg)"
 9705   %}
 9706   ins_encode %{
 9707     __ cmp($src1$$Register, $src2$$Register);
 9708     __ csetw($dst$$Register, Assembler::NE);
 9709     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9710   %}
 9711 
 9712   ins_pipe(pipe_class_default);
 9713 %}
 9714 
 9715 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9716 %{
 9717   match(Set dst (CmpL3 src1 src2));
 9718   effect(KILL flags);
 9719 
 9720   ins_cost(INSN_COST * 3);
 9721   format %{
 9722       "subs zr, $src1, $src2\n\t"
 9723       "csetw $dst, ne\n\t"
 9724       "cnegw $dst, lt\t# CmpL3(imm)"
 9725   %}
 9726   ins_encode %{
 9727     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9728     __ csetw($dst$$Register, Assembler::NE);
 9729     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9730   %}
 9731 
 9732   ins_pipe(pipe_class_default);
 9733 %}
 9734 
 9735 // ============================================================================
 9736 // Conditional Move Instructions
 9737 
 9738 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9739 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9740 // define an op class which merged both inputs and use it to type the
 9741 // argument to a single rule. unfortunatelyt his fails because the
 9742 // opclass does not live up to the COND_INTER interface of its
 9743 // component operands. When the generic code tries to negate the
 9744 // operand it ends up running the generci Machoper::negate method
 9745 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9746 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9747 
 9748 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9749   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9750 
 9751   ins_cost(INSN_COST * 2);
 9752   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9753 
 9754   ins_encode %{
 9755     __ cselw(as_Register($dst$$reg),
 9756              as_Register($src2$$reg),
 9757              as_Register($src1$$reg),
 9758              (Assembler::Condition)$cmp$$cmpcode);
 9759   %}
 9760 
 9761   ins_pipe(icond_reg_reg);
 9762 %}
 9763 
 9764 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9765   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9766 
 9767   ins_cost(INSN_COST * 2);
 9768   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9769 
 9770   ins_encode %{
 9771     __ cselw(as_Register($dst$$reg),
 9772              as_Register($src2$$reg),
 9773              as_Register($src1$$reg),
 9774              (Assembler::Condition)$cmp$$cmpcode);
 9775   %}
 9776 
 9777   ins_pipe(icond_reg_reg);
 9778 %}
 9779 
 9780 // special cases where one arg is zero
 9781 
 9782 // n.b. this is selected in preference to the rule above because it
 9783 // avoids loading constant 0 into a source register
 9784 
 9785 // TODO
 9786 // we ought only to be able to cull one of these variants as the ideal
 9787 // transforms ought always to order the zero consistently (to left/right?)
 9788 
 9789 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9790   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9791 
 9792   ins_cost(INSN_COST * 2);
 9793   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9794 
 9795   ins_encode %{
 9796     __ cselw(as_Register($dst$$reg),
 9797              as_Register($src$$reg),
 9798              zr,
 9799              (Assembler::Condition)$cmp$$cmpcode);
 9800   %}
 9801 
 9802   ins_pipe(icond_reg);
 9803 %}
 9804 
 9805 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9806   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9807 
 9808   ins_cost(INSN_COST * 2);
 9809   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9810 
 9811   ins_encode %{
 9812     __ cselw(as_Register($dst$$reg),
 9813              as_Register($src$$reg),
 9814              zr,
 9815              (Assembler::Condition)$cmp$$cmpcode);
 9816   %}
 9817 
 9818   ins_pipe(icond_reg);
 9819 %}
 9820 
 9821 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9822   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9823 
 9824   ins_cost(INSN_COST * 2);
 9825   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9826 
 9827   ins_encode %{
 9828     __ cselw(as_Register($dst$$reg),
 9829              zr,
 9830              as_Register($src$$reg),
 9831              (Assembler::Condition)$cmp$$cmpcode);
 9832   %}
 9833 
 9834   ins_pipe(icond_reg);
 9835 %}
 9836 
 9837 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9838   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9839 
 9840   ins_cost(INSN_COST * 2);
 9841   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9842 
 9843   ins_encode %{
 9844     __ cselw(as_Register($dst$$reg),
 9845              zr,
 9846              as_Register($src$$reg),
 9847              (Assembler::Condition)$cmp$$cmpcode);
 9848   %}
 9849 
 9850   ins_pipe(icond_reg);
 9851 %}
 9852 
 9853 // special case for creating a boolean 0 or 1
 9854 
 9855 // n.b. this is selected in preference to the rule above because it
 9856 // avoids loading constants 0 and 1 into a source register
 9857 
 9858 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9859   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9860 
 9861   ins_cost(INSN_COST * 2);
 9862   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9863 
 9864   ins_encode %{
 9865     // equivalently
 9866     // cset(as_Register($dst$$reg),
 9867     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9868     __ csincw(as_Register($dst$$reg),
 9869              zr,
 9870              zr,
 9871              (Assembler::Condition)$cmp$$cmpcode);
 9872   %}
 9873 
 9874   ins_pipe(icond_none);
 9875 %}
 9876 
 9877 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9878   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9879 
 9880   ins_cost(INSN_COST * 2);
 9881   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9882 
 9883   ins_encode %{
 9884     // equivalently
 9885     // cset(as_Register($dst$$reg),
 9886     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9887     __ csincw(as_Register($dst$$reg),
 9888              zr,
 9889              zr,
 9890              (Assembler::Condition)$cmp$$cmpcode);
 9891   %}
 9892 
 9893   ins_pipe(icond_none);
 9894 %}
 9895 
 9896 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9897   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9898 
 9899   ins_cost(INSN_COST * 2);
 9900   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9901 
 9902   ins_encode %{
 9903     __ csel(as_Register($dst$$reg),
 9904             as_Register($src2$$reg),
 9905             as_Register($src1$$reg),
 9906             (Assembler::Condition)$cmp$$cmpcode);
 9907   %}
 9908 
 9909   ins_pipe(icond_reg_reg);
 9910 %}
 9911 
 9912 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9913   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9914 
 9915   ins_cost(INSN_COST * 2);
 9916   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9917 
 9918   ins_encode %{
 9919     __ csel(as_Register($dst$$reg),
 9920             as_Register($src2$$reg),
 9921             as_Register($src1$$reg),
 9922             (Assembler::Condition)$cmp$$cmpcode);
 9923   %}
 9924 
 9925   ins_pipe(icond_reg_reg);
 9926 %}
 9927 
 9928 // special cases where one arg is zero
 9929 
 9930 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9931   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9932 
 9933   ins_cost(INSN_COST * 2);
 9934   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 9935 
 9936   ins_encode %{
 9937     __ csel(as_Register($dst$$reg),
 9938             zr,
 9939             as_Register($src$$reg),
 9940             (Assembler::Condition)$cmp$$cmpcode);
 9941   %}
 9942 
 9943   ins_pipe(icond_reg);
 9944 %}
 9945 
 9946 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9947   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9948 
 9949   ins_cost(INSN_COST * 2);
 9950   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 9951 
 9952   ins_encode %{
 9953     __ csel(as_Register($dst$$reg),
 9954             zr,
 9955             as_Register($src$$reg),
 9956             (Assembler::Condition)$cmp$$cmpcode);
 9957   %}
 9958 
 9959   ins_pipe(icond_reg);
 9960 %}
 9961 
 9962 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9963   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9964 
 9965   ins_cost(INSN_COST * 2);
 9966   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 9967 
 9968   ins_encode %{
 9969     __ csel(as_Register($dst$$reg),
 9970             as_Register($src$$reg),
 9971             zr,
 9972             (Assembler::Condition)$cmp$$cmpcode);
 9973   %}
 9974 
 9975   ins_pipe(icond_reg);
 9976 %}
 9977 
 9978 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9979   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9980 
 9981   ins_cost(INSN_COST * 2);
 9982   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 9983 
 9984   ins_encode %{
 9985     __ csel(as_Register($dst$$reg),
 9986             as_Register($src$$reg),
 9987             zr,
 9988             (Assembler::Condition)$cmp$$cmpcode);
 9989   %}
 9990 
 9991   ins_pipe(icond_reg);
 9992 %}
 9993 
 9994 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9995   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9996 
 9997   ins_cost(INSN_COST * 2);
 9998   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
 9999 
10000   ins_encode %{
10001     __ csel(as_Register($dst$$reg),
10002             as_Register($src2$$reg),
10003             as_Register($src1$$reg),
10004             (Assembler::Condition)$cmp$$cmpcode);
10005   %}
10006 
10007   ins_pipe(icond_reg_reg);
10008 %}
10009 
10010 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10011   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10012 
10013   ins_cost(INSN_COST * 2);
10014   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10015 
10016   ins_encode %{
10017     __ csel(as_Register($dst$$reg),
10018             as_Register($src2$$reg),
10019             as_Register($src1$$reg),
10020             (Assembler::Condition)$cmp$$cmpcode);
10021   %}
10022 
10023   ins_pipe(icond_reg_reg);
10024 %}
10025 
10026 // special cases where one arg is zero
10027 
10028 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10029   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10030 
10031   ins_cost(INSN_COST * 2);
10032   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10033 
10034   ins_encode %{
10035     __ csel(as_Register($dst$$reg),
10036             zr,
10037             as_Register($src$$reg),
10038             (Assembler::Condition)$cmp$$cmpcode);
10039   %}
10040 
10041   ins_pipe(icond_reg);
10042 %}
10043 
10044 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10045   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10046 
10047   ins_cost(INSN_COST * 2);
10048   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10049 
10050   ins_encode %{
10051     __ csel(as_Register($dst$$reg),
10052             zr,
10053             as_Register($src$$reg),
10054             (Assembler::Condition)$cmp$$cmpcode);
10055   %}
10056 
10057   ins_pipe(icond_reg);
10058 %}
10059 
10060 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10061   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10062 
10063   ins_cost(INSN_COST * 2);
10064   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10065 
10066   ins_encode %{
10067     __ csel(as_Register($dst$$reg),
10068             as_Register($src$$reg),
10069             zr,
10070             (Assembler::Condition)$cmp$$cmpcode);
10071   %}
10072 
10073   ins_pipe(icond_reg);
10074 %}
10075 
10076 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10077   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10078 
10079   ins_cost(INSN_COST * 2);
10080   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10081 
10082   ins_encode %{
10083     __ csel(as_Register($dst$$reg),
10084             as_Register($src$$reg),
10085             zr,
10086             (Assembler::Condition)$cmp$$cmpcode);
10087   %}
10088 
10089   ins_pipe(icond_reg);
10090 %}
10091 
10092 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10093   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10094 
10095   ins_cost(INSN_COST * 2);
10096   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10097 
10098   ins_encode %{
10099     __ cselw(as_Register($dst$$reg),
10100              as_Register($src2$$reg),
10101              as_Register($src1$$reg),
10102              (Assembler::Condition)$cmp$$cmpcode);
10103   %}
10104 
10105   ins_pipe(icond_reg_reg);
10106 %}
10107 
10108 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10109   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10110 
10111   ins_cost(INSN_COST * 2);
10112   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10113 
10114   ins_encode %{
10115     __ cselw(as_Register($dst$$reg),
10116              as_Register($src2$$reg),
10117              as_Register($src1$$reg),
10118              (Assembler::Condition)$cmp$$cmpcode);
10119   %}
10120 
10121   ins_pipe(icond_reg_reg);
10122 %}
10123 
10124 // special cases where one arg is zero
10125 
10126 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10127   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10128 
10129   ins_cost(INSN_COST * 2);
10130   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10131 
10132   ins_encode %{
10133     __ cselw(as_Register($dst$$reg),
10134              zr,
10135              as_Register($src$$reg),
10136              (Assembler::Condition)$cmp$$cmpcode);
10137   %}
10138 
10139   ins_pipe(icond_reg);
10140 %}
10141 
10142 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10143   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10144 
10145   ins_cost(INSN_COST * 2);
10146   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10147 
10148   ins_encode %{
10149     __ cselw(as_Register($dst$$reg),
10150              zr,
10151              as_Register($src$$reg),
10152              (Assembler::Condition)$cmp$$cmpcode);
10153   %}
10154 
10155   ins_pipe(icond_reg);
10156 %}
10157 
10158 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10159   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10160 
10161   ins_cost(INSN_COST * 2);
10162   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10163 
10164   ins_encode %{
10165     __ cselw(as_Register($dst$$reg),
10166              as_Register($src$$reg),
10167              zr,
10168              (Assembler::Condition)$cmp$$cmpcode);
10169   %}
10170 
10171   ins_pipe(icond_reg);
10172 %}
10173 
10174 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10175   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10176 
10177   ins_cost(INSN_COST * 2);
10178   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10179 
10180   ins_encode %{
10181     __ cselw(as_Register($dst$$reg),
10182              as_Register($src$$reg),
10183              zr,
10184              (Assembler::Condition)$cmp$$cmpcode);
10185   %}
10186 
10187   ins_pipe(icond_reg);
10188 %}
10189 
10190 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10191 %{
10192   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10193 
10194   ins_cost(INSN_COST * 3);
10195 
10196   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10197   ins_encode %{
10198     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10199     __ fcsels(as_FloatRegister($dst$$reg),
10200               as_FloatRegister($src2$$reg),
10201               as_FloatRegister($src1$$reg),
10202               cond);
10203   %}
10204 
10205   ins_pipe(fp_cond_reg_reg_s);
10206 %}
10207 
10208 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10209 %{
10210   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10211 
10212   ins_cost(INSN_COST * 3);
10213 
10214   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10215   ins_encode %{
10216     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10217     __ fcsels(as_FloatRegister($dst$$reg),
10218               as_FloatRegister($src2$$reg),
10219               as_FloatRegister($src1$$reg),
10220               cond);
10221   %}
10222 
10223   ins_pipe(fp_cond_reg_reg_s);
10224 %}
10225 
10226 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10227 %{
10228   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10229 
10230   ins_cost(INSN_COST * 3);
10231 
10232   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10233   ins_encode %{
10234     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10235     __ fcseld(as_FloatRegister($dst$$reg),
10236               as_FloatRegister($src2$$reg),
10237               as_FloatRegister($src1$$reg),
10238               cond);
10239   %}
10240 
10241   ins_pipe(fp_cond_reg_reg_d);
10242 %}
10243 
10244 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10245 %{
10246   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10247 
10248   ins_cost(INSN_COST * 3);
10249 
10250   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10251   ins_encode %{
10252     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10253     __ fcseld(as_FloatRegister($dst$$reg),
10254               as_FloatRegister($src2$$reg),
10255               as_FloatRegister($src1$$reg),
10256               cond);
10257   %}
10258 
10259   ins_pipe(fp_cond_reg_reg_d);
10260 %}
10261 
10262 // ============================================================================
10263 // Arithmetic Instructions
10264 //
10265 
10266 // Integer Addition
10267 
10268 // TODO
10269 // these currently employ operations which do not set CR and hence are
10270 // not flagged as killing CR but we would like to isolate the cases
10271 // where we want to set flags from those where we don't. need to work
10272 // out how to do that.
10273 
10274 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10275   match(Set dst (AddI src1 src2));
10276 
10277   ins_cost(INSN_COST);
10278   format %{ "addw  $dst, $src1, $src2" %}
10279 
10280   ins_encode %{
10281     __ addw(as_Register($dst$$reg),
10282             as_Register($src1$$reg),
10283             as_Register($src2$$reg));
10284   %}
10285 
10286   ins_pipe(ialu_reg_reg);
10287 %}
10288 
10289 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10290   match(Set dst (AddI src1 src2));
10291 
10292   ins_cost(INSN_COST);
10293   format %{ "addw $dst, $src1, $src2" %}
10294 
10295   // use opcode to indicate that this is an add not a sub
10296   opcode(0x0);
10297 
10298   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10299 
10300   ins_pipe(ialu_reg_imm);
10301 %}
10302 
10303 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10304   match(Set dst (AddI (ConvL2I src1) src2));
10305 
10306   ins_cost(INSN_COST);
10307   format %{ "addw $dst, $src1, $src2" %}
10308 
10309   // use opcode to indicate that this is an add not a sub
10310   opcode(0x0);
10311 
10312   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10313 
10314   ins_pipe(ialu_reg_imm);
10315 %}
10316 
10317 // Pointer Addition
10318 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10319   match(Set dst (AddP src1 src2));
10320 
10321   ins_cost(INSN_COST);
10322   format %{ "add $dst, $src1, $src2\t# ptr" %}
10323 
10324   ins_encode %{
10325     __ add(as_Register($dst$$reg),
10326            as_Register($src1$$reg),
10327            as_Register($src2$$reg));
10328   %}
10329 
10330   ins_pipe(ialu_reg_reg);
10331 %}
10332 
10333 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10334   match(Set dst (AddP src1 (ConvI2L src2)));
10335 
10336   ins_cost(1.9 * INSN_COST);
10337   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10338 
10339   ins_encode %{
10340     __ add(as_Register($dst$$reg),
10341            as_Register($src1$$reg),
10342            as_Register($src2$$reg), ext::sxtw);
10343   %}
10344 
10345   ins_pipe(ialu_reg_reg);
10346 %}
10347 
10348 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10349   match(Set dst (AddP src1 (LShiftL src2 scale)));
10350 
10351   ins_cost(1.9 * INSN_COST);
10352   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10353 
10354   ins_encode %{
10355     __ lea(as_Register($dst$$reg),
10356            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10357                    Address::lsl($scale$$constant)));
10358   %}
10359 
10360   ins_pipe(ialu_reg_reg_shift);
10361 %}
10362 
10363 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10364   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10365 
10366   ins_cost(1.9 * INSN_COST);
10367   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10368 
10369   ins_encode %{
10370     __ lea(as_Register($dst$$reg),
10371            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10372                    Address::sxtw($scale$$constant)));
10373   %}
10374 
10375   ins_pipe(ialu_reg_reg_shift);
10376 %}
10377 
10378 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10379   match(Set dst (LShiftL (ConvI2L src) scale));
10380 
10381   ins_cost(INSN_COST);
10382   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10383 
10384   ins_encode %{
10385     __ sbfiz(as_Register($dst$$reg),
10386           as_Register($src$$reg),
10387           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10388   %}
10389 
10390   ins_pipe(ialu_reg_shift);
10391 %}
10392 
10393 // Pointer Immediate Addition
10394 // n.b. this needs to be more expensive than using an indirect memory
10395 // operand
10396 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10397   match(Set dst (AddP src1 src2));
10398 
10399   ins_cost(INSN_COST);
10400   format %{ "add $dst, $src1, $src2\t# ptr" %}
10401 
10402   // use opcode to indicate that this is an add not a sub
10403   opcode(0x0);
10404 
10405   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10406 
10407   ins_pipe(ialu_reg_imm);
10408 %}
10409 
10410 // Long Addition
10411 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10412 
10413   match(Set dst (AddL src1 src2));
10414 
10415   ins_cost(INSN_COST);
10416   format %{ "add  $dst, $src1, $src2" %}
10417 
10418   ins_encode %{
10419     __ add(as_Register($dst$$reg),
10420            as_Register($src1$$reg),
10421            as_Register($src2$$reg));
10422   %}
10423 
10424   ins_pipe(ialu_reg_reg);
10425 %}
10426 
10427 // No constant pool entries requiredLong Immediate Addition.
10428 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10429   match(Set dst (AddL src1 src2));
10430 
10431   ins_cost(INSN_COST);
10432   format %{ "add $dst, $src1, $src2" %}
10433 
10434   // use opcode to indicate that this is an add not a sub
10435   opcode(0x0);
10436 
10437   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10438 
10439   ins_pipe(ialu_reg_imm);
10440 %}
10441 
10442 // Integer Subtraction
10443 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10444   match(Set dst (SubI src1 src2));
10445 
10446   ins_cost(INSN_COST);
10447   format %{ "subw  $dst, $src1, $src2" %}
10448 
10449   ins_encode %{
10450     __ subw(as_Register($dst$$reg),
10451             as_Register($src1$$reg),
10452             as_Register($src2$$reg));
10453   %}
10454 
10455   ins_pipe(ialu_reg_reg);
10456 %}
10457 
10458 // Immediate Subtraction
10459 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10460   match(Set dst (SubI src1 src2));
10461 
10462   ins_cost(INSN_COST);
10463   format %{ "subw $dst, $src1, $src2" %}
10464 
10465   // use opcode to indicate that this is a sub not an add
10466   opcode(0x1);
10467 
10468   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10469 
10470   ins_pipe(ialu_reg_imm);
10471 %}
10472 
10473 // Long Subtraction
10474 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10475 
10476   match(Set dst (SubL src1 src2));
10477 
10478   ins_cost(INSN_COST);
10479   format %{ "sub  $dst, $src1, $src2" %}
10480 
10481   ins_encode %{
10482     __ sub(as_Register($dst$$reg),
10483            as_Register($src1$$reg),
10484            as_Register($src2$$reg));
10485   %}
10486 
10487   ins_pipe(ialu_reg_reg);
10488 %}
10489 
10490 // No constant pool entries requiredLong Immediate Subtraction.
10491 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10492   match(Set dst (SubL src1 src2));
10493 
10494   ins_cost(INSN_COST);
10495   format %{ "sub$dst, $src1, $src2" %}
10496 
10497   // use opcode to indicate that this is a sub not an add
10498   opcode(0x1);
10499 
10500   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10501 
10502   ins_pipe(ialu_reg_imm);
10503 %}
10504 
10505 // Integer Negation (special case for sub)
10506 
10507 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10508   match(Set dst (SubI zero src));
10509 
10510   ins_cost(INSN_COST);
10511   format %{ "negw $dst, $src\t# int" %}
10512 
10513   ins_encode %{
10514     __ negw(as_Register($dst$$reg),
10515             as_Register($src$$reg));
10516   %}
10517 
10518   ins_pipe(ialu_reg);
10519 %}
10520 
10521 // Long Negation
10522 
10523 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10524   match(Set dst (SubL zero src));
10525 
10526   ins_cost(INSN_COST);
10527   format %{ "neg $dst, $src\t# long" %}
10528 
10529   ins_encode %{
10530     __ neg(as_Register($dst$$reg),
10531            as_Register($src$$reg));
10532   %}
10533 
10534   ins_pipe(ialu_reg);
10535 %}
10536 
10537 // Integer Multiply
10538 
10539 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10540   match(Set dst (MulI src1 src2));
10541 
10542   ins_cost(INSN_COST * 3);
10543   format %{ "mulw  $dst, $src1, $src2" %}
10544 
10545   ins_encode %{
10546     __ mulw(as_Register($dst$$reg),
10547             as_Register($src1$$reg),
10548             as_Register($src2$$reg));
10549   %}
10550 
10551   ins_pipe(imul_reg_reg);
10552 %}
10553 
10554 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10555   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10556 
10557   ins_cost(INSN_COST * 3);
10558   format %{ "smull  $dst, $src1, $src2" %}
10559 
10560   ins_encode %{
10561     __ smull(as_Register($dst$$reg),
10562              as_Register($src1$$reg),
10563              as_Register($src2$$reg));
10564   %}
10565 
10566   ins_pipe(imul_reg_reg);
10567 %}
10568 
10569 // Long Multiply
10570 
10571 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10572   match(Set dst (MulL src1 src2));
10573 
10574   ins_cost(INSN_COST * 5);
10575   format %{ "mul  $dst, $src1, $src2" %}
10576 
10577   ins_encode %{
10578     __ mul(as_Register($dst$$reg),
10579            as_Register($src1$$reg),
10580            as_Register($src2$$reg));
10581   %}
10582 
10583   ins_pipe(lmul_reg_reg);
10584 %}
10585 
10586 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10587 %{
10588   match(Set dst (MulHiL src1 src2));
10589 
10590   ins_cost(INSN_COST * 7);
10591   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
10592 
10593   ins_encode %{
10594     __ smulh(as_Register($dst$$reg),
10595              as_Register($src1$$reg),
10596              as_Register($src2$$reg));
10597   %}
10598 
10599   ins_pipe(lmul_reg_reg);
10600 %}
10601 
10602 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10603 %{
10604   match(Set dst (UMulHiL src1 src2));
10605 
10606   ins_cost(INSN_COST * 7);
10607   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
10608 
10609   ins_encode %{
10610     __ umulh(as_Register($dst$$reg),
10611              as_Register($src1$$reg),
10612              as_Register($src2$$reg));
10613   %}
10614 
10615   ins_pipe(lmul_reg_reg);
10616 %}
10617 
10618 // Combined Integer Multiply & Add/Sub
10619 
10620 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10621   match(Set dst (AddI src3 (MulI src1 src2)));
10622 
10623   ins_cost(INSN_COST * 3);
10624   format %{ "madd  $dst, $src1, $src2, $src3" %}
10625 
10626   ins_encode %{
10627     __ maddw(as_Register($dst$$reg),
10628              as_Register($src1$$reg),
10629              as_Register($src2$$reg),
10630              as_Register($src3$$reg));
10631   %}
10632 
10633   ins_pipe(imac_reg_reg);
10634 %}
10635 
10636 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10637   match(Set dst (SubI src3 (MulI src1 src2)));
10638 
10639   ins_cost(INSN_COST * 3);
10640   format %{ "msub  $dst, $src1, $src2, $src3" %}
10641 
10642   ins_encode %{
10643     __ msubw(as_Register($dst$$reg),
10644              as_Register($src1$$reg),
10645              as_Register($src2$$reg),
10646              as_Register($src3$$reg));
10647   %}
10648 
10649   ins_pipe(imac_reg_reg);
10650 %}
10651 
10652 // Combined Integer Multiply & Neg
10653 
10654 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10655   match(Set dst (MulI (SubI zero src1) src2));
10656 
10657   ins_cost(INSN_COST * 3);
10658   format %{ "mneg  $dst, $src1, $src2" %}
10659 
10660   ins_encode %{
10661     __ mnegw(as_Register($dst$$reg),
10662              as_Register($src1$$reg),
10663              as_Register($src2$$reg));
10664   %}
10665 
10666   ins_pipe(imac_reg_reg);
10667 %}
10668 
10669 // Combined Long Multiply & Add/Sub
10670 
10671 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10672   match(Set dst (AddL src3 (MulL src1 src2)));
10673 
10674   ins_cost(INSN_COST * 5);
10675   format %{ "madd  $dst, $src1, $src2, $src3" %}
10676 
10677   ins_encode %{
10678     __ madd(as_Register($dst$$reg),
10679             as_Register($src1$$reg),
10680             as_Register($src2$$reg),
10681             as_Register($src3$$reg));
10682   %}
10683 
10684   ins_pipe(lmac_reg_reg);
10685 %}
10686 
10687 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10688   match(Set dst (SubL src3 (MulL src1 src2)));
10689 
10690   ins_cost(INSN_COST * 5);
10691   format %{ "msub  $dst, $src1, $src2, $src3" %}
10692 
10693   ins_encode %{
10694     __ msub(as_Register($dst$$reg),
10695             as_Register($src1$$reg),
10696             as_Register($src2$$reg),
10697             as_Register($src3$$reg));
10698   %}
10699 
10700   ins_pipe(lmac_reg_reg);
10701 %}
10702 
10703 // Combined Long Multiply & Neg
10704 
10705 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10706   match(Set dst (MulL (SubL zero src1) src2));
10707 
10708   ins_cost(INSN_COST * 5);
10709   format %{ "mneg  $dst, $src1, $src2" %}
10710 
10711   ins_encode %{
10712     __ mneg(as_Register($dst$$reg),
10713             as_Register($src1$$reg),
10714             as_Register($src2$$reg));
10715   %}
10716 
10717   ins_pipe(lmac_reg_reg);
10718 %}
10719 
10720 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10721 
10722 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10723   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10724 
10725   ins_cost(INSN_COST * 3);
10726   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10727 
10728   ins_encode %{
10729     __ smaddl(as_Register($dst$$reg),
10730               as_Register($src1$$reg),
10731               as_Register($src2$$reg),
10732               as_Register($src3$$reg));
10733   %}
10734 
10735   ins_pipe(imac_reg_reg);
10736 %}
10737 
10738 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10739   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10740 
10741   ins_cost(INSN_COST * 3);
10742   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10743 
10744   ins_encode %{
10745     __ smsubl(as_Register($dst$$reg),
10746               as_Register($src1$$reg),
10747               as_Register($src2$$reg),
10748               as_Register($src3$$reg));
10749   %}
10750 
10751   ins_pipe(imac_reg_reg);
10752 %}
10753 
10754 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10755   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10756 
10757   ins_cost(INSN_COST * 3);
10758   format %{ "smnegl  $dst, $src1, $src2" %}
10759 
10760   ins_encode %{
10761     __ smnegl(as_Register($dst$$reg),
10762               as_Register($src1$$reg),
10763               as_Register($src2$$reg));
10764   %}
10765 
10766   ins_pipe(imac_reg_reg);
10767 %}
10768 
10769 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10770 
10771 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10772   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10773 
10774   ins_cost(INSN_COST * 5);
10775   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10776             "maddw $dst, $src3, $src4, rscratch1" %}
10777 
10778   ins_encode %{
10779     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10780     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10781 
10782   ins_pipe(imac_reg_reg);
10783 %}
10784 
10785 // Integer Divide
10786 
10787 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10788   match(Set dst (DivI src1 src2));
10789 
10790   ins_cost(INSN_COST * 19);
10791   format %{ "sdivw  $dst, $src1, $src2" %}
10792 
10793   ins_encode(aarch64_enc_divw(dst, src1, src2));
10794   ins_pipe(idiv_reg_reg);
10795 %}
10796 
10797 // Long Divide
10798 
10799 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10800   match(Set dst (DivL src1 src2));
10801 
10802   ins_cost(INSN_COST * 35);
10803   format %{ "sdiv   $dst, $src1, $src2" %}
10804 
10805   ins_encode(aarch64_enc_div(dst, src1, src2));
10806   ins_pipe(ldiv_reg_reg);
10807 %}
10808 
10809 // Integer Remainder
10810 
10811 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10812   match(Set dst (ModI src1 src2));
10813 
10814   ins_cost(INSN_COST * 22);
10815   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10816             "msubw  $dst, rscratch1, $src2, $src1" %}
10817 
10818   ins_encode(aarch64_enc_modw(dst, src1, src2));
10819   ins_pipe(idiv_reg_reg);
10820 %}
10821 
10822 // Long Remainder
10823 
10824 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10825   match(Set dst (ModL src1 src2));
10826 
10827   ins_cost(INSN_COST * 38);
10828   format %{ "sdiv   rscratch1, $src1, $src2\n"
10829             "msub   $dst, rscratch1, $src2, $src1" %}
10830 
10831   ins_encode(aarch64_enc_mod(dst, src1, src2));
10832   ins_pipe(ldiv_reg_reg);
10833 %}
10834 
10835 // Unsigned Integer Divide
10836 
10837 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10838   match(Set dst (UDivI src1 src2));
10839 
10840   ins_cost(INSN_COST * 19);
10841   format %{ "udivw  $dst, $src1, $src2" %}
10842 
10843   ins_encode %{
10844     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
10845   %}
10846 
10847   ins_pipe(idiv_reg_reg);
10848 %}
10849 
10850 //  Unsigned Long Divide
10851 
10852 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10853   match(Set dst (UDivL src1 src2));
10854 
10855   ins_cost(INSN_COST * 35);
10856   format %{ "udiv   $dst, $src1, $src2" %}
10857 
10858   ins_encode %{
10859     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
10860   %}
10861 
10862   ins_pipe(ldiv_reg_reg);
10863 %}
10864 
10865 // Unsigned Integer Remainder
10866 
10867 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10868   match(Set dst (UModI src1 src2));
10869 
10870   ins_cost(INSN_COST * 22);
10871   format %{ "udivw  rscratch1, $src1, $src2\n\t"
10872             "msubw  $dst, rscratch1, $src2, $src1" %}
10873 
10874   ins_encode %{
10875     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
10876     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10877   %}
10878 
10879   ins_pipe(idiv_reg_reg);
10880 %}
10881 
10882 // Unsigned Long Remainder
10883 
10884 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10885   match(Set dst (UModL src1 src2));
10886 
10887   ins_cost(INSN_COST * 38);
10888   format %{ "udiv   rscratch1, $src1, $src2\n"
10889             "msub   $dst, rscratch1, $src2, $src1" %}
10890 
10891   ins_encode %{
10892     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
10893     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10894   %}
10895 
10896   ins_pipe(ldiv_reg_reg);
10897 %}
10898 
10899 // Integer Shifts
10900 
10901 // Shift Left Register
10902 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10903   match(Set dst (LShiftI src1 src2));
10904 
10905   ins_cost(INSN_COST * 2);
10906   format %{ "lslvw  $dst, $src1, $src2" %}
10907 
10908   ins_encode %{
10909     __ lslvw(as_Register($dst$$reg),
10910              as_Register($src1$$reg),
10911              as_Register($src2$$reg));
10912   %}
10913 
10914   ins_pipe(ialu_reg_reg_vshift);
10915 %}
10916 
10917 // Shift Left Immediate
10918 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10919   match(Set dst (LShiftI src1 src2));
10920 
10921   ins_cost(INSN_COST);
10922   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10923 
10924   ins_encode %{
10925     __ lslw(as_Register($dst$$reg),
10926             as_Register($src1$$reg),
10927             $src2$$constant & 0x1f);
10928   %}
10929 
10930   ins_pipe(ialu_reg_shift);
10931 %}
10932 
10933 // Shift Right Logical Register
10934 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10935   match(Set dst (URShiftI src1 src2));
10936 
10937   ins_cost(INSN_COST * 2);
10938   format %{ "lsrvw  $dst, $src1, $src2" %}
10939 
10940   ins_encode %{
10941     __ lsrvw(as_Register($dst$$reg),
10942              as_Register($src1$$reg),
10943              as_Register($src2$$reg));
10944   %}
10945 
10946   ins_pipe(ialu_reg_reg_vshift);
10947 %}
10948 
10949 // Shift Right Logical Immediate
10950 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10951   match(Set dst (URShiftI src1 src2));
10952 
10953   ins_cost(INSN_COST);
10954   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10955 
10956   ins_encode %{
10957     __ lsrw(as_Register($dst$$reg),
10958             as_Register($src1$$reg),
10959             $src2$$constant & 0x1f);
10960   %}
10961 
10962   ins_pipe(ialu_reg_shift);
10963 %}
10964 
10965 // Shift Right Arithmetic Register
10966 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10967   match(Set dst (RShiftI src1 src2));
10968 
10969   ins_cost(INSN_COST * 2);
10970   format %{ "asrvw  $dst, $src1, $src2" %}
10971 
10972   ins_encode %{
10973     __ asrvw(as_Register($dst$$reg),
10974              as_Register($src1$$reg),
10975              as_Register($src2$$reg));
10976   %}
10977 
10978   ins_pipe(ialu_reg_reg_vshift);
10979 %}
10980 
10981 // Shift Right Arithmetic Immediate
10982 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10983   match(Set dst (RShiftI src1 src2));
10984 
10985   ins_cost(INSN_COST);
10986   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10987 
10988   ins_encode %{
10989     __ asrw(as_Register($dst$$reg),
10990             as_Register($src1$$reg),
10991             $src2$$constant & 0x1f);
10992   %}
10993 
10994   ins_pipe(ialu_reg_shift);
10995 %}
10996 
10997 // Combined Int Mask and Right Shift (using UBFM)
10998 // TODO
10999 
11000 // Long Shifts
11001 
11002 // Shift Left Register
11003 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11004   match(Set dst (LShiftL src1 src2));
11005 
11006   ins_cost(INSN_COST * 2);
11007   format %{ "lslv  $dst, $src1, $src2" %}
11008 
11009   ins_encode %{
11010     __ lslv(as_Register($dst$$reg),
11011             as_Register($src1$$reg),
11012             as_Register($src2$$reg));
11013   %}
11014 
11015   ins_pipe(ialu_reg_reg_vshift);
11016 %}
11017 
11018 // Shift Left Immediate
11019 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11020   match(Set dst (LShiftL src1 src2));
11021 
11022   ins_cost(INSN_COST);
11023   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11024 
11025   ins_encode %{
11026     __ lsl(as_Register($dst$$reg),
11027             as_Register($src1$$reg),
11028             $src2$$constant & 0x3f);
11029   %}
11030 
11031   ins_pipe(ialu_reg_shift);
11032 %}
11033 
11034 // Shift Right Logical Register
11035 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11036   match(Set dst (URShiftL src1 src2));
11037 
11038   ins_cost(INSN_COST * 2);
11039   format %{ "lsrv  $dst, $src1, $src2" %}
11040 
11041   ins_encode %{
11042     __ lsrv(as_Register($dst$$reg),
11043             as_Register($src1$$reg),
11044             as_Register($src2$$reg));
11045   %}
11046 
11047   ins_pipe(ialu_reg_reg_vshift);
11048 %}
11049 
11050 // Shift Right Logical Immediate
11051 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11052   match(Set dst (URShiftL src1 src2));
11053 
11054   ins_cost(INSN_COST);
11055   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11056 
11057   ins_encode %{
11058     __ lsr(as_Register($dst$$reg),
11059            as_Register($src1$$reg),
11060            $src2$$constant & 0x3f);
11061   %}
11062 
11063   ins_pipe(ialu_reg_shift);
11064 %}
11065 
11066 // A special-case pattern for card table stores.
11067 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11068   match(Set dst (URShiftL (CastP2X src1) src2));
11069 
11070   ins_cost(INSN_COST);
11071   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11072 
11073   ins_encode %{
11074     __ lsr(as_Register($dst$$reg),
11075            as_Register($src1$$reg),
11076            $src2$$constant & 0x3f);
11077   %}
11078 
11079   ins_pipe(ialu_reg_shift);
11080 %}
11081 
11082 // Shift Right Arithmetic Register
11083 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11084   match(Set dst (RShiftL src1 src2));
11085 
11086   ins_cost(INSN_COST * 2);
11087   format %{ "asrv  $dst, $src1, $src2" %}
11088 
11089   ins_encode %{
11090     __ asrv(as_Register($dst$$reg),
11091             as_Register($src1$$reg),
11092             as_Register($src2$$reg));
11093   %}
11094 
11095   ins_pipe(ialu_reg_reg_vshift);
11096 %}
11097 
11098 // Shift Right Arithmetic Immediate
11099 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11100   match(Set dst (RShiftL src1 src2));
11101 
11102   ins_cost(INSN_COST);
11103   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11104 
11105   ins_encode %{
11106     __ asr(as_Register($dst$$reg),
11107            as_Register($src1$$reg),
11108            $src2$$constant & 0x3f);
11109   %}
11110 
11111   ins_pipe(ialu_reg_shift);
11112 %}
11113 
11114 // BEGIN This section of the file is automatically generated. Do not edit --------------
11115 // This section is generated from aarch64_ad.m4
11116 
11117 // This pattern is automatically generated from aarch64_ad.m4
11118 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11119 instruct regL_not_reg(iRegLNoSp dst,
11120                          iRegL src1, immL_M1 m1,
11121                          rFlagsReg cr) %{
11122   match(Set dst (XorL src1 m1));
11123   ins_cost(INSN_COST);
11124   format %{ "eon  $dst, $src1, zr" %}
11125 
11126   ins_encode %{
11127     __ eon(as_Register($dst$$reg),
11128               as_Register($src1$$reg),
11129               zr,
11130               Assembler::LSL, 0);
11131   %}
11132 
11133   ins_pipe(ialu_reg);
11134 %}
11135 
11136 // This pattern is automatically generated from aarch64_ad.m4
11137 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11138 instruct regI_not_reg(iRegINoSp dst,
11139                          iRegIorL2I src1, immI_M1 m1,
11140                          rFlagsReg cr) %{
11141   match(Set dst (XorI src1 m1));
11142   ins_cost(INSN_COST);
11143   format %{ "eonw  $dst, $src1, zr" %}
11144 
11145   ins_encode %{
11146     __ eonw(as_Register($dst$$reg),
11147               as_Register($src1$$reg),
11148               zr,
11149               Assembler::LSL, 0);
11150   %}
11151 
11152   ins_pipe(ialu_reg);
11153 %}
11154 
11155 // This pattern is automatically generated from aarch64_ad.m4
11156 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11157 instruct NegI_reg_URShift_reg(iRegINoSp dst,
11158                               immI0 zero, iRegIorL2I src1, immI src2) %{
11159   match(Set dst (SubI zero (URShiftI src1 src2)));
11160 
11161   ins_cost(1.9 * INSN_COST);
11162   format %{ "negw  $dst, $src1, LSR $src2" %}
11163 
11164   ins_encode %{
11165     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11166             Assembler::LSR, $src2$$constant & 0x1f);
11167   %}
11168 
11169   ins_pipe(ialu_reg_shift);
11170 %}
11171 
11172 // This pattern is automatically generated from aarch64_ad.m4
11173 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11174 instruct NegI_reg_RShift_reg(iRegINoSp dst,
11175                               immI0 zero, iRegIorL2I src1, immI src2) %{
11176   match(Set dst (SubI zero (RShiftI src1 src2)));
11177 
11178   ins_cost(1.9 * INSN_COST);
11179   format %{ "negw  $dst, $src1, ASR $src2" %}
11180 
11181   ins_encode %{
11182     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11183             Assembler::ASR, $src2$$constant & 0x1f);
11184   %}
11185 
11186   ins_pipe(ialu_reg_shift);
11187 %}
11188 
11189 // This pattern is automatically generated from aarch64_ad.m4
11190 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11191 instruct NegI_reg_LShift_reg(iRegINoSp dst,
11192                               immI0 zero, iRegIorL2I src1, immI src2) %{
11193   match(Set dst (SubI zero (LShiftI src1 src2)));
11194 
11195   ins_cost(1.9 * INSN_COST);
11196   format %{ "negw  $dst, $src1, LSL $src2" %}
11197 
11198   ins_encode %{
11199     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11200             Assembler::LSL, $src2$$constant & 0x1f);
11201   %}
11202 
11203   ins_pipe(ialu_reg_shift);
11204 %}
11205 
11206 // This pattern is automatically generated from aarch64_ad.m4
11207 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11208 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
11209                               immL0 zero, iRegL src1, immI src2) %{
11210   match(Set dst (SubL zero (URShiftL src1 src2)));
11211 
11212   ins_cost(1.9 * INSN_COST);
11213   format %{ "neg  $dst, $src1, LSR $src2" %}
11214 
11215   ins_encode %{
11216     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11217             Assembler::LSR, $src2$$constant & 0x3f);
11218   %}
11219 
11220   ins_pipe(ialu_reg_shift);
11221 %}
11222 
11223 // This pattern is automatically generated from aarch64_ad.m4
11224 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11225 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
11226                               immL0 zero, iRegL src1, immI src2) %{
11227   match(Set dst (SubL zero (RShiftL src1 src2)));
11228 
11229   ins_cost(1.9 * INSN_COST);
11230   format %{ "neg  $dst, $src1, ASR $src2" %}
11231 
11232   ins_encode %{
11233     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11234             Assembler::ASR, $src2$$constant & 0x3f);
11235   %}
11236 
11237   ins_pipe(ialu_reg_shift);
11238 %}
11239 
11240 // This pattern is automatically generated from aarch64_ad.m4
11241 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11242 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
11243                               immL0 zero, iRegL src1, immI src2) %{
11244   match(Set dst (SubL zero (LShiftL src1 src2)));
11245 
11246   ins_cost(1.9 * INSN_COST);
11247   format %{ "neg  $dst, $src1, LSL $src2" %}
11248 
11249   ins_encode %{
11250     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11251             Assembler::LSL, $src2$$constant & 0x3f);
11252   %}
11253 
11254   ins_pipe(ialu_reg_shift);
11255 %}
11256 
11257 // This pattern is automatically generated from aarch64_ad.m4
11258 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11259 instruct AndI_reg_not_reg(iRegINoSp dst,
11260                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11261   match(Set dst (AndI src1 (XorI src2 m1)));
11262   ins_cost(INSN_COST);
11263   format %{ "bicw  $dst, $src1, $src2" %}
11264 
11265   ins_encode %{
11266     __ bicw(as_Register($dst$$reg),
11267               as_Register($src1$$reg),
11268               as_Register($src2$$reg),
11269               Assembler::LSL, 0);
11270   %}
11271 
11272   ins_pipe(ialu_reg_reg);
11273 %}
11274 
11275 // This pattern is automatically generated from aarch64_ad.m4
11276 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11277 instruct AndL_reg_not_reg(iRegLNoSp dst,
11278                          iRegL src1, iRegL src2, immL_M1 m1) %{
11279   match(Set dst (AndL src1 (XorL src2 m1)));
11280   ins_cost(INSN_COST);
11281   format %{ "bic  $dst, $src1, $src2" %}
11282 
11283   ins_encode %{
11284     __ bic(as_Register($dst$$reg),
11285               as_Register($src1$$reg),
11286               as_Register($src2$$reg),
11287               Assembler::LSL, 0);
11288   %}
11289 
11290   ins_pipe(ialu_reg_reg);
11291 %}
11292 
11293 // This pattern is automatically generated from aarch64_ad.m4
11294 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11295 instruct OrI_reg_not_reg(iRegINoSp dst,
11296                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11297   match(Set dst (OrI src1 (XorI src2 m1)));
11298   ins_cost(INSN_COST);
11299   format %{ "ornw  $dst, $src1, $src2" %}
11300 
11301   ins_encode %{
11302     __ ornw(as_Register($dst$$reg),
11303               as_Register($src1$$reg),
11304               as_Register($src2$$reg),
11305               Assembler::LSL, 0);
11306   %}
11307 
11308   ins_pipe(ialu_reg_reg);
11309 %}
11310 
11311 // This pattern is automatically generated from aarch64_ad.m4
11312 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11313 instruct OrL_reg_not_reg(iRegLNoSp dst,
11314                          iRegL src1, iRegL src2, immL_M1 m1) %{
11315   match(Set dst (OrL src1 (XorL src2 m1)));
11316   ins_cost(INSN_COST);
11317   format %{ "orn  $dst, $src1, $src2" %}
11318 
11319   ins_encode %{
11320     __ orn(as_Register($dst$$reg),
11321               as_Register($src1$$reg),
11322               as_Register($src2$$reg),
11323               Assembler::LSL, 0);
11324   %}
11325 
11326   ins_pipe(ialu_reg_reg);
11327 %}
11328 
11329 // This pattern is automatically generated from aarch64_ad.m4
11330 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11331 instruct XorI_reg_not_reg(iRegINoSp dst,
11332                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11333   match(Set dst (XorI m1 (XorI src2 src1)));
11334   ins_cost(INSN_COST);
11335   format %{ "eonw  $dst, $src1, $src2" %}
11336 
11337   ins_encode %{
11338     __ eonw(as_Register($dst$$reg),
11339               as_Register($src1$$reg),
11340               as_Register($src2$$reg),
11341               Assembler::LSL, 0);
11342   %}
11343 
11344   ins_pipe(ialu_reg_reg);
11345 %}
11346 
11347 // This pattern is automatically generated from aarch64_ad.m4
11348 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11349 instruct XorL_reg_not_reg(iRegLNoSp dst,
11350                          iRegL src1, iRegL src2, immL_M1 m1) %{
11351   match(Set dst (XorL m1 (XorL src2 src1)));
11352   ins_cost(INSN_COST);
11353   format %{ "eon  $dst, $src1, $src2" %}
11354 
11355   ins_encode %{
11356     __ eon(as_Register($dst$$reg),
11357               as_Register($src1$$reg),
11358               as_Register($src2$$reg),
11359               Assembler::LSL, 0);
11360   %}
11361 
11362   ins_pipe(ialu_reg_reg);
11363 %}
11364 
11365 // This pattern is automatically generated from aarch64_ad.m4
11366 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11367 // val & (-1 ^ (val >>> shift)) ==> bicw
11368 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11369                          iRegIorL2I src1, iRegIorL2I src2,
11370                          immI src3, immI_M1 src4) %{
11371   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11372   ins_cost(1.9 * INSN_COST);
11373   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11374 
11375   ins_encode %{
11376     __ bicw(as_Register($dst$$reg),
11377               as_Register($src1$$reg),
11378               as_Register($src2$$reg),
11379               Assembler::LSR,
11380               $src3$$constant & 0x1f);
11381   %}
11382 
11383   ins_pipe(ialu_reg_reg_shift);
11384 %}
11385 
11386 // This pattern is automatically generated from aarch64_ad.m4
11387 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11388 // val & (-1 ^ (val >>> shift)) ==> bic
11389 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11390                          iRegL src1, iRegL src2,
11391                          immI src3, immL_M1 src4) %{
11392   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11393   ins_cost(1.9 * INSN_COST);
11394   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11395 
11396   ins_encode %{
11397     __ bic(as_Register($dst$$reg),
11398               as_Register($src1$$reg),
11399               as_Register($src2$$reg),
11400               Assembler::LSR,
11401               $src3$$constant & 0x3f);
11402   %}
11403 
11404   ins_pipe(ialu_reg_reg_shift);
11405 %}
11406 
11407 // This pattern is automatically generated from aarch64_ad.m4
11408 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11409 // val & (-1 ^ (val >> shift)) ==> bicw
11410 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11411                          iRegIorL2I src1, iRegIorL2I src2,
11412                          immI src3, immI_M1 src4) %{
11413   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11414   ins_cost(1.9 * INSN_COST);
11415   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11416 
11417   ins_encode %{
11418     __ bicw(as_Register($dst$$reg),
11419               as_Register($src1$$reg),
11420               as_Register($src2$$reg),
11421               Assembler::ASR,
11422               $src3$$constant & 0x1f);
11423   %}
11424 
11425   ins_pipe(ialu_reg_reg_shift);
11426 %}
11427 
11428 // This pattern is automatically generated from aarch64_ad.m4
11429 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11430 // val & (-1 ^ (val >> shift)) ==> bic
11431 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11432                          iRegL src1, iRegL src2,
11433                          immI src3, immL_M1 src4) %{
11434   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11435   ins_cost(1.9 * INSN_COST);
11436   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11437 
11438   ins_encode %{
11439     __ bic(as_Register($dst$$reg),
11440               as_Register($src1$$reg),
11441               as_Register($src2$$reg),
11442               Assembler::ASR,
11443               $src3$$constant & 0x3f);
11444   %}
11445 
11446   ins_pipe(ialu_reg_reg_shift);
11447 %}
11448 
11449 // This pattern is automatically generated from aarch64_ad.m4
11450 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11451 // val & (-1 ^ (val ror shift)) ==> bicw
11452 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11453                          iRegIorL2I src1, iRegIorL2I src2,
11454                          immI src3, immI_M1 src4) %{
11455   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11456   ins_cost(1.9 * INSN_COST);
11457   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11458 
11459   ins_encode %{
11460     __ bicw(as_Register($dst$$reg),
11461               as_Register($src1$$reg),
11462               as_Register($src2$$reg),
11463               Assembler::ROR,
11464               $src3$$constant & 0x1f);
11465   %}
11466 
11467   ins_pipe(ialu_reg_reg_shift);
11468 %}
11469 
11470 // This pattern is automatically generated from aarch64_ad.m4
11471 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11472 // val & (-1 ^ (val ror shift)) ==> bic
11473 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11474                          iRegL src1, iRegL src2,
11475                          immI src3, immL_M1 src4) %{
11476   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11477   ins_cost(1.9 * INSN_COST);
11478   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11479 
11480   ins_encode %{
11481     __ bic(as_Register($dst$$reg),
11482               as_Register($src1$$reg),
11483               as_Register($src2$$reg),
11484               Assembler::ROR,
11485               $src3$$constant & 0x3f);
11486   %}
11487 
11488   ins_pipe(ialu_reg_reg_shift);
11489 %}
11490 
11491 // This pattern is automatically generated from aarch64_ad.m4
11492 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11493 // val & (-1 ^ (val << shift)) ==> bicw
11494 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11495                          iRegIorL2I src1, iRegIorL2I src2,
11496                          immI src3, immI_M1 src4) %{
11497   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11498   ins_cost(1.9 * INSN_COST);
11499   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11500 
11501   ins_encode %{
11502     __ bicw(as_Register($dst$$reg),
11503               as_Register($src1$$reg),
11504               as_Register($src2$$reg),
11505               Assembler::LSL,
11506               $src3$$constant & 0x1f);
11507   %}
11508 
11509   ins_pipe(ialu_reg_reg_shift);
11510 %}
11511 
11512 // This pattern is automatically generated from aarch64_ad.m4
11513 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11514 // val & (-1 ^ (val << shift)) ==> bic
11515 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11516                          iRegL src1, iRegL src2,
11517                          immI src3, immL_M1 src4) %{
11518   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11519   ins_cost(1.9 * INSN_COST);
11520   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11521 
11522   ins_encode %{
11523     __ bic(as_Register($dst$$reg),
11524               as_Register($src1$$reg),
11525               as_Register($src2$$reg),
11526               Assembler::LSL,
11527               $src3$$constant & 0x3f);
11528   %}
11529 
11530   ins_pipe(ialu_reg_reg_shift);
11531 %}
11532 
11533 // This pattern is automatically generated from aarch64_ad.m4
11534 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11535 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11536 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11537                          iRegIorL2I src1, iRegIorL2I src2,
11538                          immI src3, immI_M1 src4) %{
11539   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11540   ins_cost(1.9 * INSN_COST);
11541   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11542 
11543   ins_encode %{
11544     __ eonw(as_Register($dst$$reg),
11545               as_Register($src1$$reg),
11546               as_Register($src2$$reg),
11547               Assembler::LSR,
11548               $src3$$constant & 0x1f);
11549   %}
11550 
11551   ins_pipe(ialu_reg_reg_shift);
11552 %}
11553 
11554 // This pattern is automatically generated from aarch64_ad.m4
11555 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11556 // val ^ (-1 ^ (val >>> shift)) ==> eon
11557 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11558                          iRegL src1, iRegL src2,
11559                          immI src3, immL_M1 src4) %{
11560   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11561   ins_cost(1.9 * INSN_COST);
11562   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11563 
11564   ins_encode %{
11565     __ eon(as_Register($dst$$reg),
11566               as_Register($src1$$reg),
11567               as_Register($src2$$reg),
11568               Assembler::LSR,
11569               $src3$$constant & 0x3f);
11570   %}
11571 
11572   ins_pipe(ialu_reg_reg_shift);
11573 %}
11574 
11575 // This pattern is automatically generated from aarch64_ad.m4
11576 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11577 // val ^ (-1 ^ (val >> shift)) ==> eonw
11578 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11579                          iRegIorL2I src1, iRegIorL2I src2,
11580                          immI src3, immI_M1 src4) %{
11581   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11582   ins_cost(1.9 * INSN_COST);
11583   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11584 
11585   ins_encode %{
11586     __ eonw(as_Register($dst$$reg),
11587               as_Register($src1$$reg),
11588               as_Register($src2$$reg),
11589               Assembler::ASR,
11590               $src3$$constant & 0x1f);
11591   %}
11592 
11593   ins_pipe(ialu_reg_reg_shift);
11594 %}
11595 
11596 // This pattern is automatically generated from aarch64_ad.m4
11597 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11598 // val ^ (-1 ^ (val >> shift)) ==> eon
11599 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11600                          iRegL src1, iRegL src2,
11601                          immI src3, immL_M1 src4) %{
11602   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11603   ins_cost(1.9 * INSN_COST);
11604   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11605 
11606   ins_encode %{
11607     __ eon(as_Register($dst$$reg),
11608               as_Register($src1$$reg),
11609               as_Register($src2$$reg),
11610               Assembler::ASR,
11611               $src3$$constant & 0x3f);
11612   %}
11613 
11614   ins_pipe(ialu_reg_reg_shift);
11615 %}
11616 
11617 // This pattern is automatically generated from aarch64_ad.m4
11618 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11619 // val ^ (-1 ^ (val ror shift)) ==> eonw
11620 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11621                          iRegIorL2I src1, iRegIorL2I src2,
11622                          immI src3, immI_M1 src4) %{
11623   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11624   ins_cost(1.9 * INSN_COST);
11625   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11626 
11627   ins_encode %{
11628     __ eonw(as_Register($dst$$reg),
11629               as_Register($src1$$reg),
11630               as_Register($src2$$reg),
11631               Assembler::ROR,
11632               $src3$$constant & 0x1f);
11633   %}
11634 
11635   ins_pipe(ialu_reg_reg_shift);
11636 %}
11637 
11638 // This pattern is automatically generated from aarch64_ad.m4
11639 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11640 // val ^ (-1 ^ (val ror shift)) ==> eon
11641 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11642                          iRegL src1, iRegL src2,
11643                          immI src3, immL_M1 src4) %{
11644   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11645   ins_cost(1.9 * INSN_COST);
11646   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11647 
11648   ins_encode %{
11649     __ eon(as_Register($dst$$reg),
11650               as_Register($src1$$reg),
11651               as_Register($src2$$reg),
11652               Assembler::ROR,
11653               $src3$$constant & 0x3f);
11654   %}
11655 
11656   ins_pipe(ialu_reg_reg_shift);
11657 %}
11658 
11659 // This pattern is automatically generated from aarch64_ad.m4
11660 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11661 // val ^ (-1 ^ (val << shift)) ==> eonw
11662 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11663                          iRegIorL2I src1, iRegIorL2I src2,
11664                          immI src3, immI_M1 src4) %{
11665   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11666   ins_cost(1.9 * INSN_COST);
11667   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11668 
11669   ins_encode %{
11670     __ eonw(as_Register($dst$$reg),
11671               as_Register($src1$$reg),
11672               as_Register($src2$$reg),
11673               Assembler::LSL,
11674               $src3$$constant & 0x1f);
11675   %}
11676 
11677   ins_pipe(ialu_reg_reg_shift);
11678 %}
11679 
11680 // This pattern is automatically generated from aarch64_ad.m4
11681 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11682 // val ^ (-1 ^ (val << shift)) ==> eon
11683 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11684                          iRegL src1, iRegL src2,
11685                          immI src3, immL_M1 src4) %{
11686   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11687   ins_cost(1.9 * INSN_COST);
11688   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11689 
11690   ins_encode %{
11691     __ eon(as_Register($dst$$reg),
11692               as_Register($src1$$reg),
11693               as_Register($src2$$reg),
11694               Assembler::LSL,
11695               $src3$$constant & 0x3f);
11696   %}
11697 
11698   ins_pipe(ialu_reg_reg_shift);
11699 %}
11700 
11701 // This pattern is automatically generated from aarch64_ad.m4
11702 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11703 // val | (-1 ^ (val >>> shift)) ==> ornw
11704 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11705                          iRegIorL2I src1, iRegIorL2I src2,
11706                          immI src3, immI_M1 src4) %{
11707   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11708   ins_cost(1.9 * INSN_COST);
11709   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11710 
11711   ins_encode %{
11712     __ ornw(as_Register($dst$$reg),
11713               as_Register($src1$$reg),
11714               as_Register($src2$$reg),
11715               Assembler::LSR,
11716               $src3$$constant & 0x1f);
11717   %}
11718 
11719   ins_pipe(ialu_reg_reg_shift);
11720 %}
11721 
11722 // This pattern is automatically generated from aarch64_ad.m4
11723 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11724 // val | (-1 ^ (val >>> shift)) ==> orn
11725 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11726                          iRegL src1, iRegL src2,
11727                          immI src3, immL_M1 src4) %{
11728   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11729   ins_cost(1.9 * INSN_COST);
11730   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11731 
11732   ins_encode %{
11733     __ orn(as_Register($dst$$reg),
11734               as_Register($src1$$reg),
11735               as_Register($src2$$reg),
11736               Assembler::LSR,
11737               $src3$$constant & 0x3f);
11738   %}
11739 
11740   ins_pipe(ialu_reg_reg_shift);
11741 %}
11742 
11743 // This pattern is automatically generated from aarch64_ad.m4
11744 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11745 // val | (-1 ^ (val >> shift)) ==> ornw
11746 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11747                          iRegIorL2I src1, iRegIorL2I src2,
11748                          immI src3, immI_M1 src4) %{
11749   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11750   ins_cost(1.9 * INSN_COST);
11751   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11752 
11753   ins_encode %{
11754     __ ornw(as_Register($dst$$reg),
11755               as_Register($src1$$reg),
11756               as_Register($src2$$reg),
11757               Assembler::ASR,
11758               $src3$$constant & 0x1f);
11759   %}
11760 
11761   ins_pipe(ialu_reg_reg_shift);
11762 %}
11763 
11764 // This pattern is automatically generated from aarch64_ad.m4
11765 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11766 // val | (-1 ^ (val >> shift)) ==> orn
11767 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11768                          iRegL src1, iRegL src2,
11769                          immI src3, immL_M1 src4) %{
11770   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11771   ins_cost(1.9 * INSN_COST);
11772   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11773 
11774   ins_encode %{
11775     __ orn(as_Register($dst$$reg),
11776               as_Register($src1$$reg),
11777               as_Register($src2$$reg),
11778               Assembler::ASR,
11779               $src3$$constant & 0x3f);
11780   %}
11781 
11782   ins_pipe(ialu_reg_reg_shift);
11783 %}
11784 
11785 // This pattern is automatically generated from aarch64_ad.m4
11786 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11787 // val | (-1 ^ (val ror shift)) ==> ornw
11788 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11789                          iRegIorL2I src1, iRegIorL2I src2,
11790                          immI src3, immI_M1 src4) %{
11791   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11792   ins_cost(1.9 * INSN_COST);
11793   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11794 
11795   ins_encode %{
11796     __ ornw(as_Register($dst$$reg),
11797               as_Register($src1$$reg),
11798               as_Register($src2$$reg),
11799               Assembler::ROR,
11800               $src3$$constant & 0x1f);
11801   %}
11802 
11803   ins_pipe(ialu_reg_reg_shift);
11804 %}
11805 
11806 // This pattern is automatically generated from aarch64_ad.m4
11807 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11808 // val | (-1 ^ (val ror shift)) ==> orn
11809 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11810                          iRegL src1, iRegL src2,
11811                          immI src3, immL_M1 src4) %{
11812   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11813   ins_cost(1.9 * INSN_COST);
11814   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11815 
11816   ins_encode %{
11817     __ orn(as_Register($dst$$reg),
11818               as_Register($src1$$reg),
11819               as_Register($src2$$reg),
11820               Assembler::ROR,
11821               $src3$$constant & 0x3f);
11822   %}
11823 
11824   ins_pipe(ialu_reg_reg_shift);
11825 %}
11826 
11827 // This pattern is automatically generated from aarch64_ad.m4
11828 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11829 // val | (-1 ^ (val << shift)) ==> ornw
11830 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11831                          iRegIorL2I src1, iRegIorL2I src2,
11832                          immI src3, immI_M1 src4) %{
11833   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11834   ins_cost(1.9 * INSN_COST);
11835   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11836 
11837   ins_encode %{
11838     __ ornw(as_Register($dst$$reg),
11839               as_Register($src1$$reg),
11840               as_Register($src2$$reg),
11841               Assembler::LSL,
11842               $src3$$constant & 0x1f);
11843   %}
11844 
11845   ins_pipe(ialu_reg_reg_shift);
11846 %}
11847 
11848 // This pattern is automatically generated from aarch64_ad.m4
11849 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11850 // val | (-1 ^ (val << shift)) ==> orn
11851 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11852                          iRegL src1, iRegL src2,
11853                          immI src3, immL_M1 src4) %{
11854   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11855   ins_cost(1.9 * INSN_COST);
11856   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11857 
11858   ins_encode %{
11859     __ orn(as_Register($dst$$reg),
11860               as_Register($src1$$reg),
11861               as_Register($src2$$reg),
11862               Assembler::LSL,
11863               $src3$$constant & 0x3f);
11864   %}
11865 
11866   ins_pipe(ialu_reg_reg_shift);
11867 %}
11868 
11869 // This pattern is automatically generated from aarch64_ad.m4
11870 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11871 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11872                          iRegIorL2I src1, iRegIorL2I src2,
11873                          immI src3) %{
11874   match(Set dst (AndI src1 (URShiftI src2 src3)));
11875 
11876   ins_cost(1.9 * INSN_COST);
11877   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11878 
11879   ins_encode %{
11880     __ andw(as_Register($dst$$reg),
11881               as_Register($src1$$reg),
11882               as_Register($src2$$reg),
11883               Assembler::LSR,
11884               $src3$$constant & 0x1f);
11885   %}
11886 
11887   ins_pipe(ialu_reg_reg_shift);
11888 %}
11889 
11890 // This pattern is automatically generated from aarch64_ad.m4
11891 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11892 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11893                          iRegL src1, iRegL src2,
11894                          immI src3) %{
11895   match(Set dst (AndL src1 (URShiftL src2 src3)));
11896 
11897   ins_cost(1.9 * INSN_COST);
11898   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11899 
11900   ins_encode %{
11901     __ andr(as_Register($dst$$reg),
11902               as_Register($src1$$reg),
11903               as_Register($src2$$reg),
11904               Assembler::LSR,
11905               $src3$$constant & 0x3f);
11906   %}
11907 
11908   ins_pipe(ialu_reg_reg_shift);
11909 %}
11910 
11911 // This pattern is automatically generated from aarch64_ad.m4
11912 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11913 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11914                          iRegIorL2I src1, iRegIorL2I src2,
11915                          immI src3) %{
11916   match(Set dst (AndI src1 (RShiftI src2 src3)));
11917 
11918   ins_cost(1.9 * INSN_COST);
11919   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11920 
11921   ins_encode %{
11922     __ andw(as_Register($dst$$reg),
11923               as_Register($src1$$reg),
11924               as_Register($src2$$reg),
11925               Assembler::ASR,
11926               $src3$$constant & 0x1f);
11927   %}
11928 
11929   ins_pipe(ialu_reg_reg_shift);
11930 %}
11931 
11932 // This pattern is automatically generated from aarch64_ad.m4
11933 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11934 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11935                          iRegL src1, iRegL src2,
11936                          immI src3) %{
11937   match(Set dst (AndL src1 (RShiftL src2 src3)));
11938 
11939   ins_cost(1.9 * INSN_COST);
11940   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11941 
11942   ins_encode %{
11943     __ andr(as_Register($dst$$reg),
11944               as_Register($src1$$reg),
11945               as_Register($src2$$reg),
11946               Assembler::ASR,
11947               $src3$$constant & 0x3f);
11948   %}
11949 
11950   ins_pipe(ialu_reg_reg_shift);
11951 %}
11952 
11953 // This pattern is automatically generated from aarch64_ad.m4
11954 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11955 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11956                          iRegIorL2I src1, iRegIorL2I src2,
11957                          immI src3) %{
11958   match(Set dst (AndI src1 (LShiftI src2 src3)));
11959 
11960   ins_cost(1.9 * INSN_COST);
11961   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11962 
11963   ins_encode %{
11964     __ andw(as_Register($dst$$reg),
11965               as_Register($src1$$reg),
11966               as_Register($src2$$reg),
11967               Assembler::LSL,
11968               $src3$$constant & 0x1f);
11969   %}
11970 
11971   ins_pipe(ialu_reg_reg_shift);
11972 %}
11973 
11974 // This pattern is automatically generated from aarch64_ad.m4
11975 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11976 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11977                          iRegL src1, iRegL src2,
11978                          immI src3) %{
11979   match(Set dst (AndL src1 (LShiftL src2 src3)));
11980 
11981   ins_cost(1.9 * INSN_COST);
11982   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11983 
11984   ins_encode %{
11985     __ andr(as_Register($dst$$reg),
11986               as_Register($src1$$reg),
11987               as_Register($src2$$reg),
11988               Assembler::LSL,
11989               $src3$$constant & 0x3f);
11990   %}
11991 
11992   ins_pipe(ialu_reg_reg_shift);
11993 %}
11994 
11995 // This pattern is automatically generated from aarch64_ad.m4
11996 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11997 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
11998                          iRegIorL2I src1, iRegIorL2I src2,
11999                          immI src3) %{
12000   match(Set dst (AndI src1 (RotateRight src2 src3)));
12001 
12002   ins_cost(1.9 * INSN_COST);
12003   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
12004 
12005   ins_encode %{
12006     __ andw(as_Register($dst$$reg),
12007               as_Register($src1$$reg),
12008               as_Register($src2$$reg),
12009               Assembler::ROR,
12010               $src3$$constant & 0x1f);
12011   %}
12012 
12013   ins_pipe(ialu_reg_reg_shift);
12014 %}
12015 
12016 // This pattern is automatically generated from aarch64_ad.m4
12017 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12018 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
12019                          iRegL src1, iRegL src2,
12020                          immI src3) %{
12021   match(Set dst (AndL src1 (RotateRight src2 src3)));
12022 
12023   ins_cost(1.9 * INSN_COST);
12024   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
12025 
12026   ins_encode %{
12027     __ andr(as_Register($dst$$reg),
12028               as_Register($src1$$reg),
12029               as_Register($src2$$reg),
12030               Assembler::ROR,
12031               $src3$$constant & 0x3f);
12032   %}
12033 
12034   ins_pipe(ialu_reg_reg_shift);
12035 %}
12036 
12037 // This pattern is automatically generated from aarch64_ad.m4
12038 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12039 instruct XorI_reg_URShift_reg(iRegINoSp dst,
12040                          iRegIorL2I src1, iRegIorL2I src2,
12041                          immI src3) %{
12042   match(Set dst (XorI src1 (URShiftI src2 src3)));
12043 
12044   ins_cost(1.9 * INSN_COST);
12045   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
12046 
12047   ins_encode %{
12048     __ eorw(as_Register($dst$$reg),
12049               as_Register($src1$$reg),
12050               as_Register($src2$$reg),
12051               Assembler::LSR,
12052               $src3$$constant & 0x1f);
12053   %}
12054 
12055   ins_pipe(ialu_reg_reg_shift);
12056 %}
12057 
12058 // This pattern is automatically generated from aarch64_ad.m4
12059 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12060 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12061                          iRegL src1, iRegL src2,
12062                          immI src3) %{
12063   match(Set dst (XorL src1 (URShiftL src2 src3)));
12064 
12065   ins_cost(1.9 * INSN_COST);
12066   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12067 
12068   ins_encode %{
12069     __ eor(as_Register($dst$$reg),
12070               as_Register($src1$$reg),
12071               as_Register($src2$$reg),
12072               Assembler::LSR,
12073               $src3$$constant & 0x3f);
12074   %}
12075 
12076   ins_pipe(ialu_reg_reg_shift);
12077 %}
12078 
12079 // This pattern is automatically generated from aarch64_ad.m4
12080 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12081 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12082                          iRegIorL2I src1, iRegIorL2I src2,
12083                          immI src3) %{
12084   match(Set dst (XorI src1 (RShiftI src2 src3)));
12085 
12086   ins_cost(1.9 * INSN_COST);
12087   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12088 
12089   ins_encode %{
12090     __ eorw(as_Register($dst$$reg),
12091               as_Register($src1$$reg),
12092               as_Register($src2$$reg),
12093               Assembler::ASR,
12094               $src3$$constant & 0x1f);
12095   %}
12096 
12097   ins_pipe(ialu_reg_reg_shift);
12098 %}
12099 
12100 // This pattern is automatically generated from aarch64_ad.m4
12101 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12102 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12103                          iRegL src1, iRegL src2,
12104                          immI src3) %{
12105   match(Set dst (XorL src1 (RShiftL src2 src3)));
12106 
12107   ins_cost(1.9 * INSN_COST);
12108   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12109 
12110   ins_encode %{
12111     __ eor(as_Register($dst$$reg),
12112               as_Register($src1$$reg),
12113               as_Register($src2$$reg),
12114               Assembler::ASR,
12115               $src3$$constant & 0x3f);
12116   %}
12117 
12118   ins_pipe(ialu_reg_reg_shift);
12119 %}
12120 
12121 // This pattern is automatically generated from aarch64_ad.m4
12122 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12123 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12124                          iRegIorL2I src1, iRegIorL2I src2,
12125                          immI src3) %{
12126   match(Set dst (XorI src1 (LShiftI src2 src3)));
12127 
12128   ins_cost(1.9 * INSN_COST);
12129   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12130 
12131   ins_encode %{
12132     __ eorw(as_Register($dst$$reg),
12133               as_Register($src1$$reg),
12134               as_Register($src2$$reg),
12135               Assembler::LSL,
12136               $src3$$constant & 0x1f);
12137   %}
12138 
12139   ins_pipe(ialu_reg_reg_shift);
12140 %}
12141 
12142 // This pattern is automatically generated from aarch64_ad.m4
12143 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12144 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12145                          iRegL src1, iRegL src2,
12146                          immI src3) %{
12147   match(Set dst (XorL src1 (LShiftL src2 src3)));
12148 
12149   ins_cost(1.9 * INSN_COST);
12150   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12151 
12152   ins_encode %{
12153     __ eor(as_Register($dst$$reg),
12154               as_Register($src1$$reg),
12155               as_Register($src2$$reg),
12156               Assembler::LSL,
12157               $src3$$constant & 0x3f);
12158   %}
12159 
12160   ins_pipe(ialu_reg_reg_shift);
12161 %}
12162 
12163 // This pattern is automatically generated from aarch64_ad.m4
12164 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12165 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
12166                          iRegIorL2I src1, iRegIorL2I src2,
12167                          immI src3) %{
12168   match(Set dst (XorI src1 (RotateRight src2 src3)));
12169 
12170   ins_cost(1.9 * INSN_COST);
12171   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
12172 
12173   ins_encode %{
12174     __ eorw(as_Register($dst$$reg),
12175               as_Register($src1$$reg),
12176               as_Register($src2$$reg),
12177               Assembler::ROR,
12178               $src3$$constant & 0x1f);
12179   %}
12180 
12181   ins_pipe(ialu_reg_reg_shift);
12182 %}
12183 
12184 // This pattern is automatically generated from aarch64_ad.m4
12185 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12186 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
12187                          iRegL src1, iRegL src2,
12188                          immI src3) %{
12189   match(Set dst (XorL src1 (RotateRight src2 src3)));
12190 
12191   ins_cost(1.9 * INSN_COST);
12192   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
12193 
12194   ins_encode %{
12195     __ eor(as_Register($dst$$reg),
12196               as_Register($src1$$reg),
12197               as_Register($src2$$reg),
12198               Assembler::ROR,
12199               $src3$$constant & 0x3f);
12200   %}
12201 
12202   ins_pipe(ialu_reg_reg_shift);
12203 %}
12204 
12205 // This pattern is automatically generated from aarch64_ad.m4
12206 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12207 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12208                          iRegIorL2I src1, iRegIorL2I src2,
12209                          immI src3) %{
12210   match(Set dst (OrI src1 (URShiftI src2 src3)));
12211 
12212   ins_cost(1.9 * INSN_COST);
12213   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12214 
12215   ins_encode %{
12216     __ orrw(as_Register($dst$$reg),
12217               as_Register($src1$$reg),
12218               as_Register($src2$$reg),
12219               Assembler::LSR,
12220               $src3$$constant & 0x1f);
12221   %}
12222 
12223   ins_pipe(ialu_reg_reg_shift);
12224 %}
12225 
12226 // This pattern is automatically generated from aarch64_ad.m4
12227 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12228 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12229                          iRegL src1, iRegL src2,
12230                          immI src3) %{
12231   match(Set dst (OrL src1 (URShiftL src2 src3)));
12232 
12233   ins_cost(1.9 * INSN_COST);
12234   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12235 
12236   ins_encode %{
12237     __ orr(as_Register($dst$$reg),
12238               as_Register($src1$$reg),
12239               as_Register($src2$$reg),
12240               Assembler::LSR,
12241               $src3$$constant & 0x3f);
12242   %}
12243 
12244   ins_pipe(ialu_reg_reg_shift);
12245 %}
12246 
12247 // This pattern is automatically generated from aarch64_ad.m4
12248 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12249 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12250                          iRegIorL2I src1, iRegIorL2I src2,
12251                          immI src3) %{
12252   match(Set dst (OrI src1 (RShiftI src2 src3)));
12253 
12254   ins_cost(1.9 * INSN_COST);
12255   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12256 
12257   ins_encode %{
12258     __ orrw(as_Register($dst$$reg),
12259               as_Register($src1$$reg),
12260               as_Register($src2$$reg),
12261               Assembler::ASR,
12262               $src3$$constant & 0x1f);
12263   %}
12264 
12265   ins_pipe(ialu_reg_reg_shift);
12266 %}
12267 
12268 // This pattern is automatically generated from aarch64_ad.m4
12269 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12270 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12271                          iRegL src1, iRegL src2,
12272                          immI src3) %{
12273   match(Set dst (OrL src1 (RShiftL src2 src3)));
12274 
12275   ins_cost(1.9 * INSN_COST);
12276   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12277 
12278   ins_encode %{
12279     __ orr(as_Register($dst$$reg),
12280               as_Register($src1$$reg),
12281               as_Register($src2$$reg),
12282               Assembler::ASR,
12283               $src3$$constant & 0x3f);
12284   %}
12285 
12286   ins_pipe(ialu_reg_reg_shift);
12287 %}
12288 
12289 // This pattern is automatically generated from aarch64_ad.m4
12290 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12291 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12292                          iRegIorL2I src1, iRegIorL2I src2,
12293                          immI src3) %{
12294   match(Set dst (OrI src1 (LShiftI src2 src3)));
12295 
12296   ins_cost(1.9 * INSN_COST);
12297   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12298 
12299   ins_encode %{
12300     __ orrw(as_Register($dst$$reg),
12301               as_Register($src1$$reg),
12302               as_Register($src2$$reg),
12303               Assembler::LSL,
12304               $src3$$constant & 0x1f);
12305   %}
12306 
12307   ins_pipe(ialu_reg_reg_shift);
12308 %}
12309 
12310 // This pattern is automatically generated from aarch64_ad.m4
12311 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12312 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12313                          iRegL src1, iRegL src2,
12314                          immI src3) %{
12315   match(Set dst (OrL src1 (LShiftL src2 src3)));
12316 
12317   ins_cost(1.9 * INSN_COST);
12318   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12319 
12320   ins_encode %{
12321     __ orr(as_Register($dst$$reg),
12322               as_Register($src1$$reg),
12323               as_Register($src2$$reg),
12324               Assembler::LSL,
12325               $src3$$constant & 0x3f);
12326   %}
12327 
12328   ins_pipe(ialu_reg_reg_shift);
12329 %}
12330 
12331 // This pattern is automatically generated from aarch64_ad.m4
12332 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12333 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
12334                          iRegIorL2I src1, iRegIorL2I src2,
12335                          immI src3) %{
12336   match(Set dst (OrI src1 (RotateRight src2 src3)));
12337 
12338   ins_cost(1.9 * INSN_COST);
12339   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
12340 
12341   ins_encode %{
12342     __ orrw(as_Register($dst$$reg),
12343               as_Register($src1$$reg),
12344               as_Register($src2$$reg),
12345               Assembler::ROR,
12346               $src3$$constant & 0x1f);
12347   %}
12348 
12349   ins_pipe(ialu_reg_reg_shift);
12350 %}
12351 
12352 // This pattern is automatically generated from aarch64_ad.m4
12353 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12354 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
12355                          iRegL src1, iRegL src2,
12356                          immI src3) %{
12357   match(Set dst (OrL src1 (RotateRight src2 src3)));
12358 
12359   ins_cost(1.9 * INSN_COST);
12360   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
12361 
12362   ins_encode %{
12363     __ orr(as_Register($dst$$reg),
12364               as_Register($src1$$reg),
12365               as_Register($src2$$reg),
12366               Assembler::ROR,
12367               $src3$$constant & 0x3f);
12368   %}
12369 
12370   ins_pipe(ialu_reg_reg_shift);
12371 %}
12372 
12373 // This pattern is automatically generated from aarch64_ad.m4
12374 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12375 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12376                          iRegIorL2I src1, iRegIorL2I src2,
12377                          immI src3) %{
12378   match(Set dst (AddI src1 (URShiftI src2 src3)));
12379 
12380   ins_cost(1.9 * INSN_COST);
12381   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12382 
12383   ins_encode %{
12384     __ addw(as_Register($dst$$reg),
12385               as_Register($src1$$reg),
12386               as_Register($src2$$reg),
12387               Assembler::LSR,
12388               $src3$$constant & 0x1f);
12389   %}
12390 
12391   ins_pipe(ialu_reg_reg_shift);
12392 %}
12393 
12394 // This pattern is automatically generated from aarch64_ad.m4
12395 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12396 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12397                          iRegL src1, iRegL src2,
12398                          immI src3) %{
12399   match(Set dst (AddL src1 (URShiftL src2 src3)));
12400 
12401   ins_cost(1.9 * INSN_COST);
12402   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12403 
12404   ins_encode %{
12405     __ add(as_Register($dst$$reg),
12406               as_Register($src1$$reg),
12407               as_Register($src2$$reg),
12408               Assembler::LSR,
12409               $src3$$constant & 0x3f);
12410   %}
12411 
12412   ins_pipe(ialu_reg_reg_shift);
12413 %}
12414 
12415 // This pattern is automatically generated from aarch64_ad.m4
12416 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12417 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12418                          iRegIorL2I src1, iRegIorL2I src2,
12419                          immI src3) %{
12420   match(Set dst (AddI src1 (RShiftI src2 src3)));
12421 
12422   ins_cost(1.9 * INSN_COST);
12423   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12424 
12425   ins_encode %{
12426     __ addw(as_Register($dst$$reg),
12427               as_Register($src1$$reg),
12428               as_Register($src2$$reg),
12429               Assembler::ASR,
12430               $src3$$constant & 0x1f);
12431   %}
12432 
12433   ins_pipe(ialu_reg_reg_shift);
12434 %}
12435 
12436 // This pattern is automatically generated from aarch64_ad.m4
12437 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12438 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12439                          iRegL src1, iRegL src2,
12440                          immI src3) %{
12441   match(Set dst (AddL src1 (RShiftL src2 src3)));
12442 
12443   ins_cost(1.9 * INSN_COST);
12444   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12445 
12446   ins_encode %{
12447     __ add(as_Register($dst$$reg),
12448               as_Register($src1$$reg),
12449               as_Register($src2$$reg),
12450               Assembler::ASR,
12451               $src3$$constant & 0x3f);
12452   %}
12453 
12454   ins_pipe(ialu_reg_reg_shift);
12455 %}
12456 
12457 // This pattern is automatically generated from aarch64_ad.m4
12458 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12459 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12460                          iRegIorL2I src1, iRegIorL2I src2,
12461                          immI src3) %{
12462   match(Set dst (AddI src1 (LShiftI src2 src3)));
12463 
12464   ins_cost(1.9 * INSN_COST);
12465   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12466 
12467   ins_encode %{
12468     __ addw(as_Register($dst$$reg),
12469               as_Register($src1$$reg),
12470               as_Register($src2$$reg),
12471               Assembler::LSL,
12472               $src3$$constant & 0x1f);
12473   %}
12474 
12475   ins_pipe(ialu_reg_reg_shift);
12476 %}
12477 
12478 // This pattern is automatically generated from aarch64_ad.m4
12479 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12480 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12481                          iRegL src1, iRegL src2,
12482                          immI src3) %{
12483   match(Set dst (AddL src1 (LShiftL src2 src3)));
12484 
12485   ins_cost(1.9 * INSN_COST);
12486   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12487 
12488   ins_encode %{
12489     __ add(as_Register($dst$$reg),
12490               as_Register($src1$$reg),
12491               as_Register($src2$$reg),
12492               Assembler::LSL,
12493               $src3$$constant & 0x3f);
12494   %}
12495 
12496   ins_pipe(ialu_reg_reg_shift);
12497 %}
12498 
12499 // This pattern is automatically generated from aarch64_ad.m4
12500 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12501 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12502                          iRegIorL2I src1, iRegIorL2I src2,
12503                          immI src3) %{
12504   match(Set dst (SubI src1 (URShiftI src2 src3)));
12505 
12506   ins_cost(1.9 * INSN_COST);
12507   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12508 
12509   ins_encode %{
12510     __ subw(as_Register($dst$$reg),
12511               as_Register($src1$$reg),
12512               as_Register($src2$$reg),
12513               Assembler::LSR,
12514               $src3$$constant & 0x1f);
12515   %}
12516 
12517   ins_pipe(ialu_reg_reg_shift);
12518 %}
12519 
12520 // This pattern is automatically generated from aarch64_ad.m4
12521 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12522 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12523                          iRegL src1, iRegL src2,
12524                          immI src3) %{
12525   match(Set dst (SubL src1 (URShiftL src2 src3)));
12526 
12527   ins_cost(1.9 * INSN_COST);
12528   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12529 
12530   ins_encode %{
12531     __ sub(as_Register($dst$$reg),
12532               as_Register($src1$$reg),
12533               as_Register($src2$$reg),
12534               Assembler::LSR,
12535               $src3$$constant & 0x3f);
12536   %}
12537 
12538   ins_pipe(ialu_reg_reg_shift);
12539 %}
12540 
12541 // This pattern is automatically generated from aarch64_ad.m4
12542 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12543 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12544                          iRegIorL2I src1, iRegIorL2I src2,
12545                          immI src3) %{
12546   match(Set dst (SubI src1 (RShiftI src2 src3)));
12547 
12548   ins_cost(1.9 * INSN_COST);
12549   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12550 
12551   ins_encode %{
12552     __ subw(as_Register($dst$$reg),
12553               as_Register($src1$$reg),
12554               as_Register($src2$$reg),
12555               Assembler::ASR,
12556               $src3$$constant & 0x1f);
12557   %}
12558 
12559   ins_pipe(ialu_reg_reg_shift);
12560 %}
12561 
12562 // This pattern is automatically generated from aarch64_ad.m4
12563 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12564 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12565                          iRegL src1, iRegL src2,
12566                          immI src3) %{
12567   match(Set dst (SubL src1 (RShiftL src2 src3)));
12568 
12569   ins_cost(1.9 * INSN_COST);
12570   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12571 
12572   ins_encode %{
12573     __ sub(as_Register($dst$$reg),
12574               as_Register($src1$$reg),
12575               as_Register($src2$$reg),
12576               Assembler::ASR,
12577               $src3$$constant & 0x3f);
12578   %}
12579 
12580   ins_pipe(ialu_reg_reg_shift);
12581 %}
12582 
12583 // This pattern is automatically generated from aarch64_ad.m4
12584 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12585 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12586                          iRegIorL2I src1, iRegIorL2I src2,
12587                          immI src3) %{
12588   match(Set dst (SubI src1 (LShiftI src2 src3)));
12589 
12590   ins_cost(1.9 * INSN_COST);
12591   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12592 
12593   ins_encode %{
12594     __ subw(as_Register($dst$$reg),
12595               as_Register($src1$$reg),
12596               as_Register($src2$$reg),
12597               Assembler::LSL,
12598               $src3$$constant & 0x1f);
12599   %}
12600 
12601   ins_pipe(ialu_reg_reg_shift);
12602 %}
12603 
12604 // This pattern is automatically generated from aarch64_ad.m4
12605 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12606 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12607                          iRegL src1, iRegL src2,
12608                          immI src3) %{
12609   match(Set dst (SubL src1 (LShiftL src2 src3)));
12610 
12611   ins_cost(1.9 * INSN_COST);
12612   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12613 
12614   ins_encode %{
12615     __ sub(as_Register($dst$$reg),
12616               as_Register($src1$$reg),
12617               as_Register($src2$$reg),
12618               Assembler::LSL,
12619               $src3$$constant & 0x3f);
12620   %}
12621 
12622   ins_pipe(ialu_reg_reg_shift);
12623 %}
12624 
12625 // This pattern is automatically generated from aarch64_ad.m4
12626 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12627 
12628 // Shift Left followed by Shift Right.
12629 // This idiom is used by the compiler for the i2b bytecode etc.
12630 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12631 %{
12632   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12633   ins_cost(INSN_COST * 2);
12634   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12635   ins_encode %{
12636     int lshift = $lshift_count$$constant & 63;
12637     int rshift = $rshift_count$$constant & 63;
12638     int s = 63 - lshift;
12639     int r = (rshift - lshift) & 63;
12640     __ sbfm(as_Register($dst$$reg),
12641             as_Register($src$$reg),
12642             r, s);
12643   %}
12644 
12645   ins_pipe(ialu_reg_shift);
12646 %}
12647 
12648 // This pattern is automatically generated from aarch64_ad.m4
12649 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12650 
12651 // Shift Left followed by Shift Right.
12652 // This idiom is used by the compiler for the i2b bytecode etc.
12653 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12654 %{
12655   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12656   ins_cost(INSN_COST * 2);
12657   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12658   ins_encode %{
12659     int lshift = $lshift_count$$constant & 31;
12660     int rshift = $rshift_count$$constant & 31;
12661     int s = 31 - lshift;
12662     int r = (rshift - lshift) & 31;
12663     __ sbfmw(as_Register($dst$$reg),
12664             as_Register($src$$reg),
12665             r, s);
12666   %}
12667 
12668   ins_pipe(ialu_reg_shift);
12669 %}
12670 
12671 // This pattern is automatically generated from aarch64_ad.m4
12672 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12673 
12674 // Shift Left followed by Shift Right.
12675 // This idiom is used by the compiler for the i2b bytecode etc.
12676 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12677 %{
12678   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12679   ins_cost(INSN_COST * 2);
12680   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12681   ins_encode %{
12682     int lshift = $lshift_count$$constant & 63;
12683     int rshift = $rshift_count$$constant & 63;
12684     int s = 63 - lshift;
12685     int r = (rshift - lshift) & 63;
12686     __ ubfm(as_Register($dst$$reg),
12687             as_Register($src$$reg),
12688             r, s);
12689   %}
12690 
12691   ins_pipe(ialu_reg_shift);
12692 %}
12693 
12694 // This pattern is automatically generated from aarch64_ad.m4
12695 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12696 
12697 // Shift Left followed by Shift Right.
12698 // This idiom is used by the compiler for the i2b bytecode etc.
12699 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12700 %{
12701   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12702   ins_cost(INSN_COST * 2);
12703   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12704   ins_encode %{
12705     int lshift = $lshift_count$$constant & 31;
12706     int rshift = $rshift_count$$constant & 31;
12707     int s = 31 - lshift;
12708     int r = (rshift - lshift) & 31;
12709     __ ubfmw(as_Register($dst$$reg),
12710             as_Register($src$$reg),
12711             r, s);
12712   %}
12713 
12714   ins_pipe(ialu_reg_shift);
12715 %}
12716 
12717 // Bitfield extract with shift & mask
12718 
12719 // This pattern is automatically generated from aarch64_ad.m4
12720 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12721 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12722 %{
12723   match(Set dst (AndI (URShiftI src rshift) mask));
12724   // Make sure we are not going to exceed what ubfxw can do.
12725   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12726 
12727   ins_cost(INSN_COST);
12728   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12729   ins_encode %{
12730     int rshift = $rshift$$constant & 31;
12731     intptr_t mask = $mask$$constant;
12732     int width = exact_log2(mask+1);
12733     __ ubfxw(as_Register($dst$$reg),
12734             as_Register($src$$reg), rshift, width);
12735   %}
12736   ins_pipe(ialu_reg_shift);
12737 %}
12738 
12739 // This pattern is automatically generated from aarch64_ad.m4
12740 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12741 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12742 %{
12743   match(Set dst (AndL (URShiftL src rshift) mask));
12744   // Make sure we are not going to exceed what ubfx can do.
12745   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12746 
12747   ins_cost(INSN_COST);
12748   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12749   ins_encode %{
12750     int rshift = $rshift$$constant & 63;
12751     intptr_t mask = $mask$$constant;
12752     int width = exact_log2_long(mask+1);
12753     __ ubfx(as_Register($dst$$reg),
12754             as_Register($src$$reg), rshift, width);
12755   %}
12756   ins_pipe(ialu_reg_shift);
12757 %}
12758 
12759 
12760 // This pattern is automatically generated from aarch64_ad.m4
12761 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12762 
12763 // We can use ubfx when extending an And with a mask when we know mask
12764 // is positive.  We know that because immI_bitmask guarantees it.
12765 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12766 %{
12767   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12768   // Make sure we are not going to exceed what ubfxw can do.
12769   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12770 
12771   ins_cost(INSN_COST * 2);
12772   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12773   ins_encode %{
12774     int rshift = $rshift$$constant & 31;
12775     intptr_t mask = $mask$$constant;
12776     int width = exact_log2(mask+1);
12777     __ ubfx(as_Register($dst$$reg),
12778             as_Register($src$$reg), rshift, width);
12779   %}
12780   ins_pipe(ialu_reg_shift);
12781 %}
12782 
12783 
12784 // This pattern is automatically generated from aarch64_ad.m4
12785 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12786 
12787 // We can use ubfiz when masking by a positive number and then left shifting the result.
12788 // We know that the mask is positive because immI_bitmask guarantees it.
12789 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12790 %{
12791   match(Set dst (LShiftI (AndI src mask) lshift));
12792   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12793 
12794   ins_cost(INSN_COST);
12795   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12796   ins_encode %{
12797     int lshift = $lshift$$constant & 31;
12798     intptr_t mask = $mask$$constant;
12799     int width = exact_log2(mask+1);
12800     __ ubfizw(as_Register($dst$$reg),
12801           as_Register($src$$reg), lshift, width);
12802   %}
12803   ins_pipe(ialu_reg_shift);
12804 %}
12805 
12806 // This pattern is automatically generated from aarch64_ad.m4
12807 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12808 
12809 // We can use ubfiz when masking by a positive number and then left shifting the result.
12810 // We know that the mask is positive because immL_bitmask guarantees it.
12811 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12812 %{
12813   match(Set dst (LShiftL (AndL src mask) lshift));
12814   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12815 
12816   ins_cost(INSN_COST);
12817   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12818   ins_encode %{
12819     int lshift = $lshift$$constant & 63;
12820     intptr_t mask = $mask$$constant;
12821     int width = exact_log2_long(mask+1);
12822     __ ubfiz(as_Register($dst$$reg),
12823           as_Register($src$$reg), lshift, width);
12824   %}
12825   ins_pipe(ialu_reg_shift);
12826 %}
12827 
12828 // This pattern is automatically generated from aarch64_ad.m4
12829 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12830 
12831 // We can use ubfiz when masking by a positive number and then left shifting the result.
12832 // We know that the mask is positive because immI_bitmask guarantees it.
12833 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12834 %{
12835   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12836   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12837 
12838   ins_cost(INSN_COST);
12839   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12840   ins_encode %{
12841     int lshift = $lshift$$constant & 31;
12842     intptr_t mask = $mask$$constant;
12843     int width = exact_log2(mask+1);
12844     __ ubfizw(as_Register($dst$$reg),
12845           as_Register($src$$reg), lshift, width);
12846   %}
12847   ins_pipe(ialu_reg_shift);
12848 %}
12849 
12850 // This pattern is automatically generated from aarch64_ad.m4
12851 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12852 
12853 // We can use ubfiz when masking by a positive number and then left shifting the result.
12854 // We know that the mask is positive because immL_bitmask guarantees it.
12855 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12856 %{
12857   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12858   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12859 
12860   ins_cost(INSN_COST);
12861   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12862   ins_encode %{
12863     int lshift = $lshift$$constant & 63;
12864     intptr_t mask = $mask$$constant;
12865     int width = exact_log2_long(mask+1);
12866     __ ubfiz(as_Register($dst$$reg),
12867           as_Register($src$$reg), lshift, width);
12868   %}
12869   ins_pipe(ialu_reg_shift);
12870 %}
12871 
12872 
12873 // This pattern is automatically generated from aarch64_ad.m4
12874 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12875 
12876 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12877 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12878 %{
12879   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12880   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12881 
12882   ins_cost(INSN_COST);
12883   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12884   ins_encode %{
12885     int lshift = $lshift$$constant & 63;
12886     intptr_t mask = $mask$$constant;
12887     int width = exact_log2(mask+1);
12888     __ ubfiz(as_Register($dst$$reg),
12889              as_Register($src$$reg), lshift, width);
12890   %}
12891   ins_pipe(ialu_reg_shift);
12892 %}
12893 
12894 // This pattern is automatically generated from aarch64_ad.m4
12895 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12896 
12897 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12898 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12899 %{
12900   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12901   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12902 
12903   ins_cost(INSN_COST);
12904   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12905   ins_encode %{
12906     int lshift = $lshift$$constant & 31;
12907     intptr_t mask = $mask$$constant;
12908     int width = exact_log2(mask+1);
12909     __ ubfiz(as_Register($dst$$reg),
12910              as_Register($src$$reg), lshift, width);
12911   %}
12912   ins_pipe(ialu_reg_shift);
12913 %}
12914 
12915 // This pattern is automatically generated from aarch64_ad.m4
12916 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12917 
12918 // Can skip int2long conversions after AND with small bitmask
12919 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12920 %{
12921   match(Set dst (ConvI2L (AndI src msk)));
12922   ins_cost(INSN_COST);
12923   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12924   ins_encode %{
12925     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12926   %}
12927   ins_pipe(ialu_reg_shift);
12928 %}
12929 
12930 
12931 // Rotations
12932 
12933 // This pattern is automatically generated from aarch64_ad.m4
12934 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12935 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12936 %{
12937   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12938   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12939 
12940   ins_cost(INSN_COST);
12941   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12942 
12943   ins_encode %{
12944     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12945             $rshift$$constant & 63);
12946   %}
12947   ins_pipe(ialu_reg_reg_extr);
12948 %}
12949 
12950 
12951 // This pattern is automatically generated from aarch64_ad.m4
12952 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12953 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12954 %{
12955   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12956   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12957 
12958   ins_cost(INSN_COST);
12959   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12960 
12961   ins_encode %{
12962     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12963             $rshift$$constant & 31);
12964   %}
12965   ins_pipe(ialu_reg_reg_extr);
12966 %}
12967 
12968 
12969 // This pattern is automatically generated from aarch64_ad.m4
12970 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12971 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12972 %{
12973   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12974   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12975 
12976   ins_cost(INSN_COST);
12977   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12978 
12979   ins_encode %{
12980     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12981             $rshift$$constant & 63);
12982   %}
12983   ins_pipe(ialu_reg_reg_extr);
12984 %}
12985 
12986 
12987 // This pattern is automatically generated from aarch64_ad.m4
12988 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12989 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12990 %{
12991   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12992   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12993 
12994   ins_cost(INSN_COST);
12995   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12996 
12997   ins_encode %{
12998     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12999             $rshift$$constant & 31);
13000   %}
13001   ins_pipe(ialu_reg_reg_extr);
13002 %}
13003 
13004 // This pattern is automatically generated from aarch64_ad.m4
13005 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13006 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
13007 %{
13008   match(Set dst (RotateRight src shift));
13009 
13010   ins_cost(INSN_COST);
13011   format %{ "ror    $dst, $src, $shift" %}
13012 
13013   ins_encode %{
13014      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
13015                $shift$$constant & 0x1f);
13016   %}
13017   ins_pipe(ialu_reg_reg_vshift);
13018 %}
13019 
13020 // This pattern is automatically generated from aarch64_ad.m4
13021 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13022 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
13023 %{
13024   match(Set dst (RotateRight src shift));
13025 
13026   ins_cost(INSN_COST);
13027   format %{ "ror    $dst, $src, $shift" %}
13028 
13029   ins_encode %{
13030      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
13031                $shift$$constant & 0x3f);
13032   %}
13033   ins_pipe(ialu_reg_reg_vshift);
13034 %}
13035 
13036 // This pattern is automatically generated from aarch64_ad.m4
13037 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13038 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
13039 %{
13040   match(Set dst (RotateRight src shift));
13041 
13042   ins_cost(INSN_COST);
13043   format %{ "ror    $dst, $src, $shift" %}
13044 
13045   ins_encode %{
13046      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
13047   %}
13048   ins_pipe(ialu_reg_reg_vshift);
13049 %}
13050 
13051 // This pattern is automatically generated from aarch64_ad.m4
13052 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13053 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
13054 %{
13055   match(Set dst (RotateRight src shift));
13056 
13057   ins_cost(INSN_COST);
13058   format %{ "ror    $dst, $src, $shift" %}
13059 
13060   ins_encode %{
13061      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
13062   %}
13063   ins_pipe(ialu_reg_reg_vshift);
13064 %}
13065 
13066 // This pattern is automatically generated from aarch64_ad.m4
13067 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13068 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
13069 %{
13070   match(Set dst (RotateLeft src shift));
13071 
13072   ins_cost(INSN_COST);
13073   format %{ "rol    $dst, $src, $shift" %}
13074 
13075   ins_encode %{
13076      __ subw(rscratch1, zr, as_Register($shift$$reg));
13077      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
13078   %}
13079   ins_pipe(ialu_reg_reg_vshift);
13080 %}
13081 
13082 // This pattern is automatically generated from aarch64_ad.m4
13083 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13084 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
13085 %{
13086   match(Set dst (RotateLeft src shift));
13087 
13088   ins_cost(INSN_COST);
13089   format %{ "rol    $dst, $src, $shift" %}
13090 
13091   ins_encode %{
13092      __ subw(rscratch1, zr, as_Register($shift$$reg));
13093      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
13094   %}
13095   ins_pipe(ialu_reg_reg_vshift);
13096 %}
13097 
13098 
13099 // Add/subtract (extended)
13100 
13101 // This pattern is automatically generated from aarch64_ad.m4
13102 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13103 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13104 %{
13105   match(Set dst (AddL src1 (ConvI2L src2)));
13106   ins_cost(INSN_COST);
13107   format %{ "add  $dst, $src1, $src2, sxtw" %}
13108 
13109    ins_encode %{
13110      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13111             as_Register($src2$$reg), ext::sxtw);
13112    %}
13113   ins_pipe(ialu_reg_reg);
13114 %}
13115 
13116 // This pattern is automatically generated from aarch64_ad.m4
13117 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13118 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13119 %{
13120   match(Set dst (SubL src1 (ConvI2L src2)));
13121   ins_cost(INSN_COST);
13122   format %{ "sub  $dst, $src1, $src2, sxtw" %}
13123 
13124    ins_encode %{
13125      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13126             as_Register($src2$$reg), ext::sxtw);
13127    %}
13128   ins_pipe(ialu_reg_reg);
13129 %}
13130 
13131 // This pattern is automatically generated from aarch64_ad.m4
13132 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13133 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
13134 %{
13135   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13136   ins_cost(INSN_COST);
13137   format %{ "add  $dst, $src1, $src2, sxth" %}
13138 
13139    ins_encode %{
13140      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13141             as_Register($src2$$reg), ext::sxth);
13142    %}
13143   ins_pipe(ialu_reg_reg);
13144 %}
13145 
13146 // This pattern is automatically generated from aarch64_ad.m4
13147 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13148 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13149 %{
13150   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13151   ins_cost(INSN_COST);
13152   format %{ "add  $dst, $src1, $src2, sxtb" %}
13153 
13154    ins_encode %{
13155      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13156             as_Register($src2$$reg), ext::sxtb);
13157    %}
13158   ins_pipe(ialu_reg_reg);
13159 %}
13160 
13161 // This pattern is automatically generated from aarch64_ad.m4
13162 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13163 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13164 %{
13165   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
13166   ins_cost(INSN_COST);
13167   format %{ "add  $dst, $src1, $src2, uxtb" %}
13168 
13169    ins_encode %{
13170      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13171             as_Register($src2$$reg), ext::uxtb);
13172    %}
13173   ins_pipe(ialu_reg_reg);
13174 %}
13175 
13176 // This pattern is automatically generated from aarch64_ad.m4
13177 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13178 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
13179 %{
13180   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13181   ins_cost(INSN_COST);
13182   format %{ "add  $dst, $src1, $src2, sxth" %}
13183 
13184    ins_encode %{
13185      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13186             as_Register($src2$$reg), ext::sxth);
13187    %}
13188   ins_pipe(ialu_reg_reg);
13189 %}
13190 
13191 // This pattern is automatically generated from aarch64_ad.m4
13192 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13193 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
13194 %{
13195   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13196   ins_cost(INSN_COST);
13197   format %{ "add  $dst, $src1, $src2, sxtw" %}
13198 
13199    ins_encode %{
13200      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13201             as_Register($src2$$reg), ext::sxtw);
13202    %}
13203   ins_pipe(ialu_reg_reg);
13204 %}
13205 
13206 // This pattern is automatically generated from aarch64_ad.m4
13207 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13208 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13209 %{
13210   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13211   ins_cost(INSN_COST);
13212   format %{ "add  $dst, $src1, $src2, sxtb" %}
13213 
13214    ins_encode %{
13215      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13216             as_Register($src2$$reg), ext::sxtb);
13217    %}
13218   ins_pipe(ialu_reg_reg);
13219 %}
13220 
13221 // This pattern is automatically generated from aarch64_ad.m4
13222 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13223 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13224 %{
13225   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13226   ins_cost(INSN_COST);
13227   format %{ "add  $dst, $src1, $src2, uxtb" %}
13228 
13229    ins_encode %{
13230      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13231             as_Register($src2$$reg), ext::uxtb);
13232    %}
13233   ins_pipe(ialu_reg_reg);
13234 %}
13235 
13236 // This pattern is automatically generated from aarch64_ad.m4
13237 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13238 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13239 %{
13240   match(Set dst (AddI src1 (AndI src2 mask)));
13241   ins_cost(INSN_COST);
13242   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13243 
13244    ins_encode %{
13245      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13246             as_Register($src2$$reg), ext::uxtb);
13247    %}
13248   ins_pipe(ialu_reg_reg);
13249 %}
13250 
13251 // This pattern is automatically generated from aarch64_ad.m4
13252 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13253 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13254 %{
13255   match(Set dst (AddI src1 (AndI src2 mask)));
13256   ins_cost(INSN_COST);
13257   format %{ "addw  $dst, $src1, $src2, uxth" %}
13258 
13259    ins_encode %{
13260      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13261             as_Register($src2$$reg), ext::uxth);
13262    %}
13263   ins_pipe(ialu_reg_reg);
13264 %}
13265 
13266 // This pattern is automatically generated from aarch64_ad.m4
13267 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13268 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13269 %{
13270   match(Set dst (AddL src1 (AndL src2 mask)));
13271   ins_cost(INSN_COST);
13272   format %{ "add  $dst, $src1, $src2, uxtb" %}
13273 
13274    ins_encode %{
13275      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13276             as_Register($src2$$reg), ext::uxtb);
13277    %}
13278   ins_pipe(ialu_reg_reg);
13279 %}
13280 
13281 // This pattern is automatically generated from aarch64_ad.m4
13282 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13283 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13284 %{
13285   match(Set dst (AddL src1 (AndL src2 mask)));
13286   ins_cost(INSN_COST);
13287   format %{ "add  $dst, $src1, $src2, uxth" %}
13288 
13289    ins_encode %{
13290      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13291             as_Register($src2$$reg), ext::uxth);
13292    %}
13293   ins_pipe(ialu_reg_reg);
13294 %}
13295 
13296 // This pattern is automatically generated from aarch64_ad.m4
13297 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13298 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13299 %{
13300   match(Set dst (AddL src1 (AndL src2 mask)));
13301   ins_cost(INSN_COST);
13302   format %{ "add  $dst, $src1, $src2, uxtw" %}
13303 
13304    ins_encode %{
13305      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13306             as_Register($src2$$reg), ext::uxtw);
13307    %}
13308   ins_pipe(ialu_reg_reg);
13309 %}
13310 
13311 // This pattern is automatically generated from aarch64_ad.m4
13312 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13313 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13314 %{
13315   match(Set dst (SubI src1 (AndI src2 mask)));
13316   ins_cost(INSN_COST);
13317   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13318 
13319    ins_encode %{
13320      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13321             as_Register($src2$$reg), ext::uxtb);
13322    %}
13323   ins_pipe(ialu_reg_reg);
13324 %}
13325 
13326 // This pattern is automatically generated from aarch64_ad.m4
13327 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13328 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13329 %{
13330   match(Set dst (SubI src1 (AndI src2 mask)));
13331   ins_cost(INSN_COST);
13332   format %{ "subw  $dst, $src1, $src2, uxth" %}
13333 
13334    ins_encode %{
13335      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13336             as_Register($src2$$reg), ext::uxth);
13337    %}
13338   ins_pipe(ialu_reg_reg);
13339 %}
13340 
13341 // This pattern is automatically generated from aarch64_ad.m4
13342 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13343 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13344 %{
13345   match(Set dst (SubL src1 (AndL src2 mask)));
13346   ins_cost(INSN_COST);
13347   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13348 
13349    ins_encode %{
13350      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13351             as_Register($src2$$reg), ext::uxtb);
13352    %}
13353   ins_pipe(ialu_reg_reg);
13354 %}
13355 
13356 // This pattern is automatically generated from aarch64_ad.m4
13357 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13358 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13359 %{
13360   match(Set dst (SubL src1 (AndL src2 mask)));
13361   ins_cost(INSN_COST);
13362   format %{ "sub  $dst, $src1, $src2, uxth" %}
13363 
13364    ins_encode %{
13365      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13366             as_Register($src2$$reg), ext::uxth);
13367    %}
13368   ins_pipe(ialu_reg_reg);
13369 %}
13370 
13371 // This pattern is automatically generated from aarch64_ad.m4
13372 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13373 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13374 %{
13375   match(Set dst (SubL src1 (AndL src2 mask)));
13376   ins_cost(INSN_COST);
13377   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13378 
13379    ins_encode %{
13380      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13381             as_Register($src2$$reg), ext::uxtw);
13382    %}
13383   ins_pipe(ialu_reg_reg);
13384 %}
13385 
13386 
13387 // This pattern is automatically generated from aarch64_ad.m4
13388 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13389 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13390 %{
13391   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13392   ins_cost(1.9 * INSN_COST);
13393   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13394 
13395    ins_encode %{
13396      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13397             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13398    %}
13399   ins_pipe(ialu_reg_reg_shift);
13400 %}
13401 
13402 // This pattern is automatically generated from aarch64_ad.m4
13403 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13404 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13405 %{
13406   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13407   ins_cost(1.9 * INSN_COST);
13408   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13409 
13410    ins_encode %{
13411      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13412             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13413    %}
13414   ins_pipe(ialu_reg_reg_shift);
13415 %}
13416 
13417 // This pattern is automatically generated from aarch64_ad.m4
13418 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13419 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13420 %{
13421   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13422   ins_cost(1.9 * INSN_COST);
13423   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13424 
13425    ins_encode %{
13426      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13427             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13428    %}
13429   ins_pipe(ialu_reg_reg_shift);
13430 %}
13431 
13432 // This pattern is automatically generated from aarch64_ad.m4
13433 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13434 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13435 %{
13436   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13437   ins_cost(1.9 * INSN_COST);
13438   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13439 
13440    ins_encode %{
13441      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13442             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13443    %}
13444   ins_pipe(ialu_reg_reg_shift);
13445 %}
13446 
13447 // This pattern is automatically generated from aarch64_ad.m4
13448 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13449 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13450 %{
13451   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13452   ins_cost(1.9 * INSN_COST);
13453   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13454 
13455    ins_encode %{
13456      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13457             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13458    %}
13459   ins_pipe(ialu_reg_reg_shift);
13460 %}
13461 
13462 // This pattern is automatically generated from aarch64_ad.m4
13463 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13464 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13465 %{
13466   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13467   ins_cost(1.9 * INSN_COST);
13468   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13469 
13470    ins_encode %{
13471      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13472             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13473    %}
13474   ins_pipe(ialu_reg_reg_shift);
13475 %}
13476 
13477 // This pattern is automatically generated from aarch64_ad.m4
13478 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13479 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13480 %{
13481   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13482   ins_cost(1.9 * INSN_COST);
13483   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13484 
13485    ins_encode %{
13486      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13487             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13488    %}
13489   ins_pipe(ialu_reg_reg_shift);
13490 %}
13491 
13492 // This pattern is automatically generated from aarch64_ad.m4
13493 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13494 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13495 %{
13496   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13497   ins_cost(1.9 * INSN_COST);
13498   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13499 
13500    ins_encode %{
13501      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13502             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13503    %}
13504   ins_pipe(ialu_reg_reg_shift);
13505 %}
13506 
13507 // This pattern is automatically generated from aarch64_ad.m4
13508 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13509 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13510 %{
13511   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13512   ins_cost(1.9 * INSN_COST);
13513   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13514 
13515    ins_encode %{
13516      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13517             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13518    %}
13519   ins_pipe(ialu_reg_reg_shift);
13520 %}
13521 
13522 // This pattern is automatically generated from aarch64_ad.m4
13523 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13524 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13525 %{
13526   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13527   ins_cost(1.9 * INSN_COST);
13528   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13529 
13530    ins_encode %{
13531      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13532             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13533    %}
13534   ins_pipe(ialu_reg_reg_shift);
13535 %}
13536 
13537 // This pattern is automatically generated from aarch64_ad.m4
13538 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13539 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13540 %{
13541   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13542   ins_cost(1.9 * INSN_COST);
13543   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13544 
13545    ins_encode %{
13546      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13547             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13548    %}
13549   ins_pipe(ialu_reg_reg_shift);
13550 %}
13551 
13552 // This pattern is automatically generated from aarch64_ad.m4
13553 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13554 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13555 %{
13556   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13557   ins_cost(1.9 * INSN_COST);
13558   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13559 
13560    ins_encode %{
13561      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13562             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13563    %}
13564   ins_pipe(ialu_reg_reg_shift);
13565 %}
13566 
13567 // This pattern is automatically generated from aarch64_ad.m4
13568 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13569 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13570 %{
13571   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13572   ins_cost(1.9 * INSN_COST);
13573   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13574 
13575    ins_encode %{
13576      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13577             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13578    %}
13579   ins_pipe(ialu_reg_reg_shift);
13580 %}
13581 
13582 // This pattern is automatically generated from aarch64_ad.m4
13583 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13584 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13585 %{
13586   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13587   ins_cost(1.9 * INSN_COST);
13588   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13589 
13590    ins_encode %{
13591      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13592             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13593    %}
13594   ins_pipe(ialu_reg_reg_shift);
13595 %}
13596 
13597 // This pattern is automatically generated from aarch64_ad.m4
13598 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13599 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13600 %{
13601   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13602   ins_cost(1.9 * INSN_COST);
13603   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13604 
13605    ins_encode %{
13606      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13607             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13608    %}
13609   ins_pipe(ialu_reg_reg_shift);
13610 %}
13611 
13612 // This pattern is automatically generated from aarch64_ad.m4
13613 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13614 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13615 %{
13616   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13617   ins_cost(1.9 * INSN_COST);
13618   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13619 
13620    ins_encode %{
13621      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13622             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13623    %}
13624   ins_pipe(ialu_reg_reg_shift);
13625 %}
13626 
13627 // This pattern is automatically generated from aarch64_ad.m4
13628 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13629 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13630 %{
13631   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13632   ins_cost(1.9 * INSN_COST);
13633   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13634 
13635    ins_encode %{
13636      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13637             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13638    %}
13639   ins_pipe(ialu_reg_reg_shift);
13640 %}
13641 
13642 // This pattern is automatically generated from aarch64_ad.m4
13643 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13644 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13645 %{
13646   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13647   ins_cost(1.9 * INSN_COST);
13648   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13649 
13650    ins_encode %{
13651      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13652             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13653    %}
13654   ins_pipe(ialu_reg_reg_shift);
13655 %}
13656 
13657 // This pattern is automatically generated from aarch64_ad.m4
13658 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13659 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13660 %{
13661   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13662   ins_cost(1.9 * INSN_COST);
13663   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13664 
13665    ins_encode %{
13666      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13667             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13668    %}
13669   ins_pipe(ialu_reg_reg_shift);
13670 %}
13671 
13672 // This pattern is automatically generated from aarch64_ad.m4
13673 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13674 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13675 %{
13676   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13677   ins_cost(1.9 * INSN_COST);
13678   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13679 
13680    ins_encode %{
13681      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13682             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13683    %}
13684   ins_pipe(ialu_reg_reg_shift);
13685 %}
13686 
13687 // This pattern is automatically generated from aarch64_ad.m4
13688 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13689 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13690 %{
13691   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13692   ins_cost(1.9 * INSN_COST);
13693   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13694 
13695    ins_encode %{
13696      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13697             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13698    %}
13699   ins_pipe(ialu_reg_reg_shift);
13700 %}
13701 
13702 // This pattern is automatically generated from aarch64_ad.m4
13703 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13704 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13705 %{
13706   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13707   ins_cost(1.9 * INSN_COST);
13708   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13709 
13710    ins_encode %{
13711      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13712             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13713    %}
13714   ins_pipe(ialu_reg_reg_shift);
13715 %}
13716 
13717 // This pattern is automatically generated from aarch64_ad.m4
13718 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13719 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13720 %{
13721   effect(DEF dst, USE src1, USE src2, USE cr);
13722   ins_cost(INSN_COST * 2);
13723   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13724 
13725   ins_encode %{
13726     __ cselw($dst$$Register,
13727              $src1$$Register,
13728              $src2$$Register,
13729              Assembler::LT);
13730   %}
13731   ins_pipe(icond_reg_reg);
13732 %}
13733 
13734 // This pattern is automatically generated from aarch64_ad.m4
13735 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13736 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13737 %{
13738   effect(DEF dst, USE src1, USE src2, USE cr);
13739   ins_cost(INSN_COST * 2);
13740   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13741 
13742   ins_encode %{
13743     __ cselw($dst$$Register,
13744              $src1$$Register,
13745              $src2$$Register,
13746              Assembler::GT);
13747   %}
13748   ins_pipe(icond_reg_reg);
13749 %}
13750 
13751 // This pattern is automatically generated from aarch64_ad.m4
13752 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13753 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13754 %{
13755   effect(DEF dst, USE src1, USE cr);
13756   ins_cost(INSN_COST * 2);
13757   format %{ "cselw $dst, $src1, zr lt\t"  %}
13758 
13759   ins_encode %{
13760     __ cselw($dst$$Register,
13761              $src1$$Register,
13762              zr,
13763              Assembler::LT);
13764   %}
13765   ins_pipe(icond_reg);
13766 %}
13767 
13768 // This pattern is automatically generated from aarch64_ad.m4
13769 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13770 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13771 %{
13772   effect(DEF dst, USE src1, USE cr);
13773   ins_cost(INSN_COST * 2);
13774   format %{ "cselw $dst, $src1, zr gt\t"  %}
13775 
13776   ins_encode %{
13777     __ cselw($dst$$Register,
13778              $src1$$Register,
13779              zr,
13780              Assembler::GT);
13781   %}
13782   ins_pipe(icond_reg);
13783 %}
13784 
13785 // This pattern is automatically generated from aarch64_ad.m4
13786 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13787 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13788 %{
13789   effect(DEF dst, USE src1, USE cr);
13790   ins_cost(INSN_COST * 2);
13791   format %{ "csincw $dst, $src1, zr le\t"  %}
13792 
13793   ins_encode %{
13794     __ csincw($dst$$Register,
13795              $src1$$Register,
13796              zr,
13797              Assembler::LE);
13798   %}
13799   ins_pipe(icond_reg);
13800 %}
13801 
13802 // This pattern is automatically generated from aarch64_ad.m4
13803 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13804 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13805 %{
13806   effect(DEF dst, USE src1, USE cr);
13807   ins_cost(INSN_COST * 2);
13808   format %{ "csincw $dst, $src1, zr gt\t"  %}
13809 
13810   ins_encode %{
13811     __ csincw($dst$$Register,
13812              $src1$$Register,
13813              zr,
13814              Assembler::GT);
13815   %}
13816   ins_pipe(icond_reg);
13817 %}
13818 
13819 // This pattern is automatically generated from aarch64_ad.m4
13820 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13821 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13822 %{
13823   effect(DEF dst, USE src1, USE cr);
13824   ins_cost(INSN_COST * 2);
13825   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13826 
13827   ins_encode %{
13828     __ csinvw($dst$$Register,
13829              $src1$$Register,
13830              zr,
13831              Assembler::LT);
13832   %}
13833   ins_pipe(icond_reg);
13834 %}
13835 
13836 // This pattern is automatically generated from aarch64_ad.m4
13837 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13838 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13839 %{
13840   effect(DEF dst, USE src1, USE cr);
13841   ins_cost(INSN_COST * 2);
13842   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13843 
13844   ins_encode %{
13845     __ csinvw($dst$$Register,
13846              $src1$$Register,
13847              zr,
13848              Assembler::GE);
13849   %}
13850   ins_pipe(icond_reg);
13851 %}
13852 
13853 // This pattern is automatically generated from aarch64_ad.m4
13854 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13855 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13856 %{
13857   match(Set dst (MinI src imm));
13858   ins_cost(INSN_COST * 3);
13859   expand %{
13860     rFlagsReg cr;
13861     compI_reg_imm0(cr, src);
13862     cmovI_reg_imm0_lt(dst, src, cr);
13863   %}
13864 %}
13865 
13866 // This pattern is automatically generated from aarch64_ad.m4
13867 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13868 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13869 %{
13870   match(Set dst (MinI imm src));
13871   ins_cost(INSN_COST * 3);
13872   expand %{
13873     rFlagsReg cr;
13874     compI_reg_imm0(cr, src);
13875     cmovI_reg_imm0_lt(dst, src, cr);
13876   %}
13877 %}
13878 
13879 // This pattern is automatically generated from aarch64_ad.m4
13880 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13881 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13882 %{
13883   match(Set dst (MinI src imm));
13884   ins_cost(INSN_COST * 3);
13885   expand %{
13886     rFlagsReg cr;
13887     compI_reg_imm0(cr, src);
13888     cmovI_reg_imm1_le(dst, src, cr);
13889   %}
13890 %}
13891 
13892 // This pattern is automatically generated from aarch64_ad.m4
13893 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13894 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13895 %{
13896   match(Set dst (MinI imm src));
13897   ins_cost(INSN_COST * 3);
13898   expand %{
13899     rFlagsReg cr;
13900     compI_reg_imm0(cr, src);
13901     cmovI_reg_imm1_le(dst, src, cr);
13902   %}
13903 %}
13904 
13905 // This pattern is automatically generated from aarch64_ad.m4
13906 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13907 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13908 %{
13909   match(Set dst (MinI src imm));
13910   ins_cost(INSN_COST * 3);
13911   expand %{
13912     rFlagsReg cr;
13913     compI_reg_imm0(cr, src);
13914     cmovI_reg_immM1_lt(dst, src, cr);
13915   %}
13916 %}
13917 
13918 // This pattern is automatically generated from aarch64_ad.m4
13919 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13920 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13921 %{
13922   match(Set dst (MinI imm src));
13923   ins_cost(INSN_COST * 3);
13924   expand %{
13925     rFlagsReg cr;
13926     compI_reg_imm0(cr, src);
13927     cmovI_reg_immM1_lt(dst, src, cr);
13928   %}
13929 %}
13930 
13931 // This pattern is automatically generated from aarch64_ad.m4
13932 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13933 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13934 %{
13935   match(Set dst (MaxI src imm));
13936   ins_cost(INSN_COST * 3);
13937   expand %{
13938     rFlagsReg cr;
13939     compI_reg_imm0(cr, src);
13940     cmovI_reg_imm0_gt(dst, src, cr);
13941   %}
13942 %}
13943 
13944 // This pattern is automatically generated from aarch64_ad.m4
13945 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13946 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13947 %{
13948   match(Set dst (MaxI imm src));
13949   ins_cost(INSN_COST * 3);
13950   expand %{
13951     rFlagsReg cr;
13952     compI_reg_imm0(cr, src);
13953     cmovI_reg_imm0_gt(dst, src, cr);
13954   %}
13955 %}
13956 
13957 // This pattern is automatically generated from aarch64_ad.m4
13958 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13959 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13960 %{
13961   match(Set dst (MaxI src imm));
13962   ins_cost(INSN_COST * 3);
13963   expand %{
13964     rFlagsReg cr;
13965     compI_reg_imm0(cr, src);
13966     cmovI_reg_imm1_gt(dst, src, cr);
13967   %}
13968 %}
13969 
13970 // This pattern is automatically generated from aarch64_ad.m4
13971 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13972 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13973 %{
13974   match(Set dst (MaxI imm src));
13975   ins_cost(INSN_COST * 3);
13976   expand %{
13977     rFlagsReg cr;
13978     compI_reg_imm0(cr, src);
13979     cmovI_reg_imm1_gt(dst, src, cr);
13980   %}
13981 %}
13982 
13983 // This pattern is automatically generated from aarch64_ad.m4
13984 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13985 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13986 %{
13987   match(Set dst (MaxI src imm));
13988   ins_cost(INSN_COST * 3);
13989   expand %{
13990     rFlagsReg cr;
13991     compI_reg_imm0(cr, src);
13992     cmovI_reg_immM1_ge(dst, src, cr);
13993   %}
13994 %}
13995 
13996 // This pattern is automatically generated from aarch64_ad.m4
13997 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13998 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13999 %{
14000   match(Set dst (MaxI imm src));
14001   ins_cost(INSN_COST * 3);
14002   expand %{
14003     rFlagsReg cr;
14004     compI_reg_imm0(cr, src);
14005     cmovI_reg_immM1_ge(dst, src, cr);
14006   %}
14007 %}
14008 
14009 // This pattern is automatically generated from aarch64_ad.m4
14010 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14011 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
14012 %{
14013   match(Set dst (ReverseI src));
14014   ins_cost(INSN_COST);
14015   format %{ "rbitw  $dst, $src" %}
14016   ins_encode %{
14017     __ rbitw($dst$$Register, $src$$Register);
14018   %}
14019   ins_pipe(ialu_reg);
14020 %}
14021 
14022 // This pattern is automatically generated from aarch64_ad.m4
14023 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14024 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
14025 %{
14026   match(Set dst (ReverseL src));
14027   ins_cost(INSN_COST);
14028   format %{ "rbit  $dst, $src" %}
14029   ins_encode %{
14030     __ rbit($dst$$Register, $src$$Register);
14031   %}
14032   ins_pipe(ialu_reg);
14033 %}
14034 
14035 
14036 // END This section of the file is automatically generated. Do not edit --------------
14037 
14038 
14039 // ============================================================================
14040 // Floating Point Arithmetic Instructions
14041 
14042 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14043   match(Set dst (AddF src1 src2));
14044 
14045   ins_cost(INSN_COST * 5);
14046   format %{ "fadds   $dst, $src1, $src2" %}
14047 
14048   ins_encode %{
14049     __ fadds(as_FloatRegister($dst$$reg),
14050              as_FloatRegister($src1$$reg),
14051              as_FloatRegister($src2$$reg));
14052   %}
14053 
14054   ins_pipe(fp_dop_reg_reg_s);
14055 %}
14056 
14057 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14058   match(Set dst (AddD src1 src2));
14059 
14060   ins_cost(INSN_COST * 5);
14061   format %{ "faddd   $dst, $src1, $src2" %}
14062 
14063   ins_encode %{
14064     __ faddd(as_FloatRegister($dst$$reg),
14065              as_FloatRegister($src1$$reg),
14066              as_FloatRegister($src2$$reg));
14067   %}
14068 
14069   ins_pipe(fp_dop_reg_reg_d);
14070 %}
14071 
14072 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14073   match(Set dst (SubF src1 src2));
14074 
14075   ins_cost(INSN_COST * 5);
14076   format %{ "fsubs   $dst, $src1, $src2" %}
14077 
14078   ins_encode %{
14079     __ fsubs(as_FloatRegister($dst$$reg),
14080              as_FloatRegister($src1$$reg),
14081              as_FloatRegister($src2$$reg));
14082   %}
14083 
14084   ins_pipe(fp_dop_reg_reg_s);
14085 %}
14086 
14087 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14088   match(Set dst (SubD src1 src2));
14089 
14090   ins_cost(INSN_COST * 5);
14091   format %{ "fsubd   $dst, $src1, $src2" %}
14092 
14093   ins_encode %{
14094     __ fsubd(as_FloatRegister($dst$$reg),
14095              as_FloatRegister($src1$$reg),
14096              as_FloatRegister($src2$$reg));
14097   %}
14098 
14099   ins_pipe(fp_dop_reg_reg_d);
14100 %}
14101 
14102 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14103   match(Set dst (MulF src1 src2));
14104 
14105   ins_cost(INSN_COST * 6);
14106   format %{ "fmuls   $dst, $src1, $src2" %}
14107 
14108   ins_encode %{
14109     __ fmuls(as_FloatRegister($dst$$reg),
14110              as_FloatRegister($src1$$reg),
14111              as_FloatRegister($src2$$reg));
14112   %}
14113 
14114   ins_pipe(fp_dop_reg_reg_s);
14115 %}
14116 
14117 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14118   match(Set dst (MulD src1 src2));
14119 
14120   ins_cost(INSN_COST * 6);
14121   format %{ "fmuld   $dst, $src1, $src2" %}
14122 
14123   ins_encode %{
14124     __ fmuld(as_FloatRegister($dst$$reg),
14125              as_FloatRegister($src1$$reg),
14126              as_FloatRegister($src2$$reg));
14127   %}
14128 
14129   ins_pipe(fp_dop_reg_reg_d);
14130 %}
14131 
14132 // src1 * src2 + src3
14133 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14134   predicate(UseFMA);
14135   match(Set dst (FmaF src3 (Binary src1 src2)));
14136 
14137   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
14138 
14139   ins_encode %{
14140     __ fmadds(as_FloatRegister($dst$$reg),
14141              as_FloatRegister($src1$$reg),
14142              as_FloatRegister($src2$$reg),
14143              as_FloatRegister($src3$$reg));
14144   %}
14145 
14146   ins_pipe(pipe_class_default);
14147 %}
14148 
14149 // src1 * src2 + src3
14150 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14151   predicate(UseFMA);
14152   match(Set dst (FmaD src3 (Binary src1 src2)));
14153 
14154   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
14155 
14156   ins_encode %{
14157     __ fmaddd(as_FloatRegister($dst$$reg),
14158              as_FloatRegister($src1$$reg),
14159              as_FloatRegister($src2$$reg),
14160              as_FloatRegister($src3$$reg));
14161   %}
14162 
14163   ins_pipe(pipe_class_default);
14164 %}
14165 
14166 // -src1 * src2 + src3
14167 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14168   predicate(UseFMA);
14169   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
14170   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
14171 
14172   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
14173 
14174   ins_encode %{
14175     __ fmsubs(as_FloatRegister($dst$$reg),
14176               as_FloatRegister($src1$$reg),
14177               as_FloatRegister($src2$$reg),
14178               as_FloatRegister($src3$$reg));
14179   %}
14180 
14181   ins_pipe(pipe_class_default);
14182 %}
14183 
14184 // -src1 * src2 + src3
14185 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14186   predicate(UseFMA);
14187   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
14188   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
14189 
14190   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
14191 
14192   ins_encode %{
14193     __ fmsubd(as_FloatRegister($dst$$reg),
14194               as_FloatRegister($src1$$reg),
14195               as_FloatRegister($src2$$reg),
14196               as_FloatRegister($src3$$reg));
14197   %}
14198 
14199   ins_pipe(pipe_class_default);
14200 %}
14201 
14202 // -src1 * src2 - src3
14203 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14204   predicate(UseFMA);
14205   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
14206   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
14207 
14208   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
14209 
14210   ins_encode %{
14211     __ fnmadds(as_FloatRegister($dst$$reg),
14212                as_FloatRegister($src1$$reg),
14213                as_FloatRegister($src2$$reg),
14214                as_FloatRegister($src3$$reg));
14215   %}
14216 
14217   ins_pipe(pipe_class_default);
14218 %}
14219 
14220 // -src1 * src2 - src3
14221 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14222   predicate(UseFMA);
14223   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
14224   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
14225 
14226   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
14227 
14228   ins_encode %{
14229     __ fnmaddd(as_FloatRegister($dst$$reg),
14230                as_FloatRegister($src1$$reg),
14231                as_FloatRegister($src2$$reg),
14232                as_FloatRegister($src3$$reg));
14233   %}
14234 
14235   ins_pipe(pipe_class_default);
14236 %}
14237 
14238 // src1 * src2 - src3
14239 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
14240   predicate(UseFMA);
14241   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
14242 
14243   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
14244 
14245   ins_encode %{
14246     __ fnmsubs(as_FloatRegister($dst$$reg),
14247                as_FloatRegister($src1$$reg),
14248                as_FloatRegister($src2$$reg),
14249                as_FloatRegister($src3$$reg));
14250   %}
14251 
14252   ins_pipe(pipe_class_default);
14253 %}
14254 
14255 // src1 * src2 - src3
14256 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
14257   predicate(UseFMA);
14258   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
14259 
14260   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
14261 
14262   ins_encode %{
14263   // n.b. insn name should be fnmsubd
14264     __ fnmsub(as_FloatRegister($dst$$reg),
14265               as_FloatRegister($src1$$reg),
14266               as_FloatRegister($src2$$reg),
14267               as_FloatRegister($src3$$reg));
14268   %}
14269 
14270   ins_pipe(pipe_class_default);
14271 %}
14272 
14273 
14274 // Math.max(FF)F
14275 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14276   match(Set dst (MaxF src1 src2));
14277 
14278   format %{ "fmaxs   $dst, $src1, $src2" %}
14279   ins_encode %{
14280     __ fmaxs(as_FloatRegister($dst$$reg),
14281              as_FloatRegister($src1$$reg),
14282              as_FloatRegister($src2$$reg));
14283   %}
14284 
14285   ins_pipe(fp_dop_reg_reg_s);
14286 %}
14287 
14288 // Math.min(FF)F
14289 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14290   match(Set dst (MinF src1 src2));
14291 
14292   format %{ "fmins   $dst, $src1, $src2" %}
14293   ins_encode %{
14294     __ fmins(as_FloatRegister($dst$$reg),
14295              as_FloatRegister($src1$$reg),
14296              as_FloatRegister($src2$$reg));
14297   %}
14298 
14299   ins_pipe(fp_dop_reg_reg_s);
14300 %}
14301 
14302 // Math.max(DD)D
14303 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14304   match(Set dst (MaxD src1 src2));
14305 
14306   format %{ "fmaxd   $dst, $src1, $src2" %}
14307   ins_encode %{
14308     __ fmaxd(as_FloatRegister($dst$$reg),
14309              as_FloatRegister($src1$$reg),
14310              as_FloatRegister($src2$$reg));
14311   %}
14312 
14313   ins_pipe(fp_dop_reg_reg_d);
14314 %}
14315 
14316 // Math.min(DD)D
14317 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14318   match(Set dst (MinD src1 src2));
14319 
14320   format %{ "fmind   $dst, $src1, $src2" %}
14321   ins_encode %{
14322     __ fmind(as_FloatRegister($dst$$reg),
14323              as_FloatRegister($src1$$reg),
14324              as_FloatRegister($src2$$reg));
14325   %}
14326 
14327   ins_pipe(fp_dop_reg_reg_d);
14328 %}
14329 
14330 
14331 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14332   match(Set dst (DivF src1  src2));
14333 
14334   ins_cost(INSN_COST * 18);
14335   format %{ "fdivs   $dst, $src1, $src2" %}
14336 
14337   ins_encode %{
14338     __ fdivs(as_FloatRegister($dst$$reg),
14339              as_FloatRegister($src1$$reg),
14340              as_FloatRegister($src2$$reg));
14341   %}
14342 
14343   ins_pipe(fp_div_s);
14344 %}
14345 
14346 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14347   match(Set dst (DivD src1  src2));
14348 
14349   ins_cost(INSN_COST * 32);
14350   format %{ "fdivd   $dst, $src1, $src2" %}
14351 
14352   ins_encode %{
14353     __ fdivd(as_FloatRegister($dst$$reg),
14354              as_FloatRegister($src1$$reg),
14355              as_FloatRegister($src2$$reg));
14356   %}
14357 
14358   ins_pipe(fp_div_d);
14359 %}
14360 
14361 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14362   match(Set dst (NegF src));
14363 
14364   ins_cost(INSN_COST * 3);
14365   format %{ "fneg   $dst, $src" %}
14366 
14367   ins_encode %{
14368     __ fnegs(as_FloatRegister($dst$$reg),
14369              as_FloatRegister($src$$reg));
14370   %}
14371 
14372   ins_pipe(fp_uop_s);
14373 %}
14374 
14375 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14376   match(Set dst (NegD src));
14377 
14378   ins_cost(INSN_COST * 3);
14379   format %{ "fnegd   $dst, $src" %}
14380 
14381   ins_encode %{
14382     __ fnegd(as_FloatRegister($dst$$reg),
14383              as_FloatRegister($src$$reg));
14384   %}
14385 
14386   ins_pipe(fp_uop_d);
14387 %}
14388 
14389 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14390 %{
14391   match(Set dst (AbsI src));
14392 
14393   effect(KILL cr);
14394   ins_cost(INSN_COST * 2);
14395   format %{ "cmpw  $src, zr\n\t"
14396             "cnegw $dst, $src, Assembler::LT\t# int abs"
14397   %}
14398 
14399   ins_encode %{
14400     __ cmpw(as_Register($src$$reg), zr);
14401     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14402   %}
14403   ins_pipe(pipe_class_default);
14404 %}
14405 
14406 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
14407 %{
14408   match(Set dst (AbsL src));
14409 
14410   effect(KILL cr);
14411   ins_cost(INSN_COST * 2);
14412   format %{ "cmp  $src, zr\n\t"
14413             "cneg $dst, $src, Assembler::LT\t# long abs"
14414   %}
14415 
14416   ins_encode %{
14417     __ cmp(as_Register($src$$reg), zr);
14418     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14419   %}
14420   ins_pipe(pipe_class_default);
14421 %}
14422 
14423 instruct absF_reg(vRegF dst, vRegF src) %{
14424   match(Set dst (AbsF src));
14425 
14426   ins_cost(INSN_COST * 3);
14427   format %{ "fabss   $dst, $src" %}
14428   ins_encode %{
14429     __ fabss(as_FloatRegister($dst$$reg),
14430              as_FloatRegister($src$$reg));
14431   %}
14432 
14433   ins_pipe(fp_uop_s);
14434 %}
14435 
14436 instruct absD_reg(vRegD dst, vRegD src) %{
14437   match(Set dst (AbsD src));
14438 
14439   ins_cost(INSN_COST * 3);
14440   format %{ "fabsd   $dst, $src" %}
14441   ins_encode %{
14442     __ fabsd(as_FloatRegister($dst$$reg),
14443              as_FloatRegister($src$$reg));
14444   %}
14445 
14446   ins_pipe(fp_uop_d);
14447 %}
14448 
14449 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14450   match(Set dst (AbsF (SubF src1 src2)));
14451 
14452   ins_cost(INSN_COST * 3);
14453   format %{ "fabds   $dst, $src1, $src2" %}
14454   ins_encode %{
14455     __ fabds(as_FloatRegister($dst$$reg),
14456              as_FloatRegister($src1$$reg),
14457              as_FloatRegister($src2$$reg));
14458   %}
14459 
14460   ins_pipe(fp_uop_s);
14461 %}
14462 
14463 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14464   match(Set dst (AbsD (SubD src1 src2)));
14465 
14466   ins_cost(INSN_COST * 3);
14467   format %{ "fabdd   $dst, $src1, $src2" %}
14468   ins_encode %{
14469     __ fabdd(as_FloatRegister($dst$$reg),
14470              as_FloatRegister($src1$$reg),
14471              as_FloatRegister($src2$$reg));
14472   %}
14473 
14474   ins_pipe(fp_uop_d);
14475 %}
14476 
14477 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14478   match(Set dst (SqrtD src));
14479 
14480   ins_cost(INSN_COST * 50);
14481   format %{ "fsqrtd  $dst, $src" %}
14482   ins_encode %{
14483     __ fsqrtd(as_FloatRegister($dst$$reg),
14484              as_FloatRegister($src$$reg));
14485   %}
14486 
14487   ins_pipe(fp_div_s);
14488 %}
14489 
14490 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14491   match(Set dst (SqrtF src));
14492 
14493   ins_cost(INSN_COST * 50);
14494   format %{ "fsqrts  $dst, $src" %}
14495   ins_encode %{
14496     __ fsqrts(as_FloatRegister($dst$$reg),
14497              as_FloatRegister($src$$reg));
14498   %}
14499 
14500   ins_pipe(fp_div_d);
14501 %}
14502 
14503 // Math.rint, floor, ceil
14504 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14505   match(Set dst (RoundDoubleMode src rmode));
14506   format %{ "frint  $dst, $src, $rmode" %}
14507   ins_encode %{
14508     switch ($rmode$$constant) {
14509       case RoundDoubleModeNode::rmode_rint:
14510         __ frintnd(as_FloatRegister($dst$$reg),
14511                    as_FloatRegister($src$$reg));
14512         break;
14513       case RoundDoubleModeNode::rmode_floor:
14514         __ frintmd(as_FloatRegister($dst$$reg),
14515                    as_FloatRegister($src$$reg));
14516         break;
14517       case RoundDoubleModeNode::rmode_ceil:
14518         __ frintpd(as_FloatRegister($dst$$reg),
14519                    as_FloatRegister($src$$reg));
14520         break;
14521     }
14522   %}
14523   ins_pipe(fp_uop_d);
14524 %}
14525 
14526 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14527   match(Set dst (CopySignD src1 (Binary src2 zero)));
14528   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14529   format %{ "CopySignD  $dst $src1 $src2" %}
14530   ins_encode %{
14531     FloatRegister dst = as_FloatRegister($dst$$reg),
14532                   src1 = as_FloatRegister($src1$$reg),
14533                   src2 = as_FloatRegister($src2$$reg),
14534                   zero = as_FloatRegister($zero$$reg);
14535     __ fnegd(dst, zero);
14536     __ bsl(dst, __ T8B, src2, src1);
14537   %}
14538   ins_pipe(fp_uop_d);
14539 %}
14540 
14541 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14542   match(Set dst (CopySignF src1 src2));
14543   effect(TEMP_DEF dst, USE src1, USE src2);
14544   format %{ "CopySignF  $dst $src1 $src2" %}
14545   ins_encode %{
14546     FloatRegister dst = as_FloatRegister($dst$$reg),
14547                   src1 = as_FloatRegister($src1$$reg),
14548                   src2 = as_FloatRegister($src2$$reg);
14549     __ movi(dst, __ T2S, 0x80, 24);
14550     __ bsl(dst, __ T8B, src2, src1);
14551   %}
14552   ins_pipe(fp_uop_d);
14553 %}
14554 
14555 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14556   match(Set dst (SignumD src (Binary zero one)));
14557   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14558   format %{ "signumD  $dst, $src" %}
14559   ins_encode %{
14560     FloatRegister src = as_FloatRegister($src$$reg),
14561                   dst = as_FloatRegister($dst$$reg),
14562                   zero = as_FloatRegister($zero$$reg),
14563                   one = as_FloatRegister($one$$reg);
14564     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14565     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14566     // Bit selection instruction gets bit from "one" for each enabled bit in
14567     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14568     // NaN the whole "src" will be copied because "dst" is zero. For all other
14569     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14570     // from "src", and all other bits are copied from 1.0.
14571     __ bsl(dst, __ T8B, one, src);
14572   %}
14573   ins_pipe(fp_uop_d);
14574 %}
14575 
14576 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14577   match(Set dst (SignumF src (Binary zero one)));
14578   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14579   format %{ "signumF  $dst, $src" %}
14580   ins_encode %{
14581     FloatRegister src = as_FloatRegister($src$$reg),
14582                   dst = as_FloatRegister($dst$$reg),
14583                   zero = as_FloatRegister($zero$$reg),
14584                   one = as_FloatRegister($one$$reg);
14585     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14586     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14587     // Bit selection instruction gets bit from "one" for each enabled bit in
14588     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14589     // NaN the whole "src" will be copied because "dst" is zero. For all other
14590     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14591     // from "src", and all other bits are copied from 1.0.
14592     __ bsl(dst, __ T8B, one, src);
14593   %}
14594   ins_pipe(fp_uop_d);
14595 %}
14596 
14597 instruct onspinwait() %{
14598   match(OnSpinWait);
14599   ins_cost(INSN_COST);
14600 
14601   format %{ "onspinwait" %}
14602 
14603   ins_encode %{
14604     __ spin_wait();
14605   %}
14606   ins_pipe(pipe_class_empty);
14607 %}
14608 
14609 // ============================================================================
14610 // Logical Instructions
14611 
14612 // Integer Logical Instructions
14613 
14614 // And Instructions
14615 
14616 
14617 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14618   match(Set dst (AndI src1 src2));
14619 
14620   format %{ "andw  $dst, $src1, $src2\t# int" %}
14621 
14622   ins_cost(INSN_COST);
14623   ins_encode %{
14624     __ andw(as_Register($dst$$reg),
14625             as_Register($src1$$reg),
14626             as_Register($src2$$reg));
14627   %}
14628 
14629   ins_pipe(ialu_reg_reg);
14630 %}
14631 
14632 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14633   match(Set dst (AndI src1 src2));
14634 
14635   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14636 
14637   ins_cost(INSN_COST);
14638   ins_encode %{
14639     __ andw(as_Register($dst$$reg),
14640             as_Register($src1$$reg),
14641             (uint64_t)($src2$$constant));
14642   %}
14643 
14644   ins_pipe(ialu_reg_imm);
14645 %}
14646 
14647 // Or Instructions
14648 
14649 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14650   match(Set dst (OrI src1 src2));
14651 
14652   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14653 
14654   ins_cost(INSN_COST);
14655   ins_encode %{
14656     __ orrw(as_Register($dst$$reg),
14657             as_Register($src1$$reg),
14658             as_Register($src2$$reg));
14659   %}
14660 
14661   ins_pipe(ialu_reg_reg);
14662 %}
14663 
14664 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14665   match(Set dst (OrI src1 src2));
14666 
14667   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14668 
14669   ins_cost(INSN_COST);
14670   ins_encode %{
14671     __ orrw(as_Register($dst$$reg),
14672             as_Register($src1$$reg),
14673             (uint64_t)($src2$$constant));
14674   %}
14675 
14676   ins_pipe(ialu_reg_imm);
14677 %}
14678 
14679 // Xor Instructions
14680 
14681 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14682   match(Set dst (XorI src1 src2));
14683 
14684   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14685 
14686   ins_cost(INSN_COST);
14687   ins_encode %{
14688     __ eorw(as_Register($dst$$reg),
14689             as_Register($src1$$reg),
14690             as_Register($src2$$reg));
14691   %}
14692 
14693   ins_pipe(ialu_reg_reg);
14694 %}
14695 
14696 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14697   match(Set dst (XorI src1 src2));
14698 
14699   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14700 
14701   ins_cost(INSN_COST);
14702   ins_encode %{
14703     __ eorw(as_Register($dst$$reg),
14704             as_Register($src1$$reg),
14705             (uint64_t)($src2$$constant));
14706   %}
14707 
14708   ins_pipe(ialu_reg_imm);
14709 %}
14710 
14711 // Long Logical Instructions
14712 // TODO
14713 
14714 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14715   match(Set dst (AndL src1 src2));
14716 
14717   format %{ "and  $dst, $src1, $src2\t# int" %}
14718 
14719   ins_cost(INSN_COST);
14720   ins_encode %{
14721     __ andr(as_Register($dst$$reg),
14722             as_Register($src1$$reg),
14723             as_Register($src2$$reg));
14724   %}
14725 
14726   ins_pipe(ialu_reg_reg);
14727 %}
14728 
14729 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14730   match(Set dst (AndL src1 src2));
14731 
14732   format %{ "and  $dst, $src1, $src2\t# int" %}
14733 
14734   ins_cost(INSN_COST);
14735   ins_encode %{
14736     __ andr(as_Register($dst$$reg),
14737             as_Register($src1$$reg),
14738             (uint64_t)($src2$$constant));
14739   %}
14740 
14741   ins_pipe(ialu_reg_imm);
14742 %}
14743 
14744 // Or Instructions
14745 
14746 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14747   match(Set dst (OrL src1 src2));
14748 
14749   format %{ "orr  $dst, $src1, $src2\t# int" %}
14750 
14751   ins_cost(INSN_COST);
14752   ins_encode %{
14753     __ orr(as_Register($dst$$reg),
14754            as_Register($src1$$reg),
14755            as_Register($src2$$reg));
14756   %}
14757 
14758   ins_pipe(ialu_reg_reg);
14759 %}
14760 
14761 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14762   match(Set dst (OrL src1 src2));
14763 
14764   format %{ "orr  $dst, $src1, $src2\t# int" %}
14765 
14766   ins_cost(INSN_COST);
14767   ins_encode %{
14768     __ orr(as_Register($dst$$reg),
14769            as_Register($src1$$reg),
14770            (uint64_t)($src2$$constant));
14771   %}
14772 
14773   ins_pipe(ialu_reg_imm);
14774 %}
14775 
14776 // Xor Instructions
14777 
14778 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14779   match(Set dst (XorL src1 src2));
14780 
14781   format %{ "eor  $dst, $src1, $src2\t# int" %}
14782 
14783   ins_cost(INSN_COST);
14784   ins_encode %{
14785     __ eor(as_Register($dst$$reg),
14786            as_Register($src1$$reg),
14787            as_Register($src2$$reg));
14788   %}
14789 
14790   ins_pipe(ialu_reg_reg);
14791 %}
14792 
14793 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14794   match(Set dst (XorL src1 src2));
14795 
14796   ins_cost(INSN_COST);
14797   format %{ "eor  $dst, $src1, $src2\t# int" %}
14798 
14799   ins_encode %{
14800     __ eor(as_Register($dst$$reg),
14801            as_Register($src1$$reg),
14802            (uint64_t)($src2$$constant));
14803   %}
14804 
14805   ins_pipe(ialu_reg_imm);
14806 %}
14807 
14808 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14809 %{
14810   match(Set dst (ConvI2L src));
14811 
14812   ins_cost(INSN_COST);
14813   format %{ "sxtw  $dst, $src\t# i2l" %}
14814   ins_encode %{
14815     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14816   %}
14817   ins_pipe(ialu_reg_shift);
14818 %}
14819 
14820 // this pattern occurs in bigmath arithmetic
14821 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14822 %{
14823   match(Set dst (AndL (ConvI2L src) mask));
14824 
14825   ins_cost(INSN_COST);
14826   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14827   ins_encode %{
14828     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14829   %}
14830 
14831   ins_pipe(ialu_reg_shift);
14832 %}
14833 
14834 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14835   match(Set dst (ConvL2I src));
14836 
14837   ins_cost(INSN_COST);
14838   format %{ "movw  $dst, $src \t// l2i" %}
14839 
14840   ins_encode %{
14841     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14842   %}
14843 
14844   ins_pipe(ialu_reg);
14845 %}
14846 
14847 instruct convD2F_reg(vRegF dst, vRegD src) %{
14848   match(Set dst (ConvD2F src));
14849 
14850   ins_cost(INSN_COST * 5);
14851   format %{ "fcvtd  $dst, $src \t// d2f" %}
14852 
14853   ins_encode %{
14854     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14855   %}
14856 
14857   ins_pipe(fp_d2f);
14858 %}
14859 
14860 instruct convF2D_reg(vRegD dst, vRegF src) %{
14861   match(Set dst (ConvF2D src));
14862 
14863   ins_cost(INSN_COST * 5);
14864   format %{ "fcvts  $dst, $src \t// f2d" %}
14865 
14866   ins_encode %{
14867     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14868   %}
14869 
14870   ins_pipe(fp_f2d);
14871 %}
14872 
14873 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14874   match(Set dst (ConvF2I src));
14875 
14876   ins_cost(INSN_COST * 5);
14877   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14878 
14879   ins_encode %{
14880     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14881   %}
14882 
14883   ins_pipe(fp_f2i);
14884 %}
14885 
14886 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14887   match(Set dst (ConvF2L src));
14888 
14889   ins_cost(INSN_COST * 5);
14890   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14891 
14892   ins_encode %{
14893     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14894   %}
14895 
14896   ins_pipe(fp_f2l);
14897 %}
14898 
14899 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
14900   match(Set dst (ConvF2HF src));
14901   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
14902             "smov $dst, $tmp\t# move result from $tmp to $dst"
14903   %}
14904   effect(TEMP tmp);
14905   ins_encode %{
14906       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
14907   %}
14908   ins_pipe(pipe_slow);
14909 %}
14910 
14911 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
14912   match(Set dst (ConvHF2F src));
14913   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
14914             "fcvt $dst, $tmp\t# convert half to single precision"
14915   %}
14916   effect(TEMP tmp);
14917   ins_encode %{
14918       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
14919   %}
14920   ins_pipe(pipe_slow);
14921 %}
14922 
14923 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14924   match(Set dst (ConvI2F src));
14925 
14926   ins_cost(INSN_COST * 5);
14927   format %{ "scvtfws  $dst, $src \t// i2f" %}
14928 
14929   ins_encode %{
14930     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14931   %}
14932 
14933   ins_pipe(fp_i2f);
14934 %}
14935 
14936 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14937   match(Set dst (ConvL2F src));
14938 
14939   ins_cost(INSN_COST * 5);
14940   format %{ "scvtfs  $dst, $src \t// l2f" %}
14941 
14942   ins_encode %{
14943     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14944   %}
14945 
14946   ins_pipe(fp_l2f);
14947 %}
14948 
14949 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14950   match(Set dst (ConvD2I src));
14951 
14952   ins_cost(INSN_COST * 5);
14953   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14954 
14955   ins_encode %{
14956     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14957   %}
14958 
14959   ins_pipe(fp_d2i);
14960 %}
14961 
14962 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14963   match(Set dst (ConvD2L src));
14964 
14965   ins_cost(INSN_COST * 5);
14966   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14967 
14968   ins_encode %{
14969     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14970   %}
14971 
14972   ins_pipe(fp_d2l);
14973 %}
14974 
14975 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14976   match(Set dst (ConvI2D src));
14977 
14978   ins_cost(INSN_COST * 5);
14979   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14980 
14981   ins_encode %{
14982     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14983   %}
14984 
14985   ins_pipe(fp_i2d);
14986 %}
14987 
14988 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14989   match(Set dst (ConvL2D src));
14990 
14991   ins_cost(INSN_COST * 5);
14992   format %{ "scvtfd  $dst, $src \t// l2d" %}
14993 
14994   ins_encode %{
14995     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14996   %}
14997 
14998   ins_pipe(fp_l2d);
14999 %}
15000 
15001 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
15002 %{
15003   match(Set dst (RoundD src));
15004   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
15005   format %{ "java_round_double $dst,$src"%}
15006   ins_encode %{
15007     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
15008                          as_FloatRegister($ftmp$$reg));
15009   %}
15010   ins_pipe(pipe_slow);
15011 %}
15012 
15013 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
15014 %{
15015   match(Set dst (RoundF src));
15016   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
15017   format %{ "java_round_float $dst,$src"%}
15018   ins_encode %{
15019     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
15020                         as_FloatRegister($ftmp$$reg));
15021   %}
15022   ins_pipe(pipe_slow);
15023 %}
15024 
15025 // stack <-> reg and reg <-> reg shuffles with no conversion
15026 
15027 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
15028 
15029   match(Set dst (MoveF2I src));
15030 
15031   effect(DEF dst, USE src);
15032 
15033   ins_cost(4 * INSN_COST);
15034 
15035   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
15036 
15037   ins_encode %{
15038     __ ldrw($dst$$Register, Address(sp, $src$$disp));
15039   %}
15040 
15041   ins_pipe(iload_reg_reg);
15042 
15043 %}
15044 
15045 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
15046 
15047   match(Set dst (MoveI2F src));
15048 
15049   effect(DEF dst, USE src);
15050 
15051   ins_cost(4 * INSN_COST);
15052 
15053   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
15054 
15055   ins_encode %{
15056     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
15057   %}
15058 
15059   ins_pipe(pipe_class_memory);
15060 
15061 %}
15062 
15063 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
15064 
15065   match(Set dst (MoveD2L src));
15066 
15067   effect(DEF dst, USE src);
15068 
15069   ins_cost(4 * INSN_COST);
15070 
15071   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
15072 
15073   ins_encode %{
15074     __ ldr($dst$$Register, Address(sp, $src$$disp));
15075   %}
15076 
15077   ins_pipe(iload_reg_reg);
15078 
15079 %}
15080 
15081 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
15082 
15083   match(Set dst (MoveL2D src));
15084 
15085   effect(DEF dst, USE src);
15086 
15087   ins_cost(4 * INSN_COST);
15088 
15089   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
15090 
15091   ins_encode %{
15092     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
15093   %}
15094 
15095   ins_pipe(pipe_class_memory);
15096 
15097 %}
15098 
15099 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
15100 
15101   match(Set dst (MoveF2I src));
15102 
15103   effect(DEF dst, USE src);
15104 
15105   ins_cost(INSN_COST);
15106 
15107   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
15108 
15109   ins_encode %{
15110     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15111   %}
15112 
15113   ins_pipe(pipe_class_memory);
15114 
15115 %}
15116 
15117 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
15118 
15119   match(Set dst (MoveI2F src));
15120 
15121   effect(DEF dst, USE src);
15122 
15123   ins_cost(INSN_COST);
15124 
15125   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
15126 
15127   ins_encode %{
15128     __ strw($src$$Register, Address(sp, $dst$$disp));
15129   %}
15130 
15131   ins_pipe(istore_reg_reg);
15132 
15133 %}
15134 
15135 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
15136 
15137   match(Set dst (MoveD2L src));
15138 
15139   effect(DEF dst, USE src);
15140 
15141   ins_cost(INSN_COST);
15142 
15143   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
15144 
15145   ins_encode %{
15146     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15147   %}
15148 
15149   ins_pipe(pipe_class_memory);
15150 
15151 %}
15152 
15153 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
15154 
15155   match(Set dst (MoveL2D src));
15156 
15157   effect(DEF dst, USE src);
15158 
15159   ins_cost(INSN_COST);
15160 
15161   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
15162 
15163   ins_encode %{
15164     __ str($src$$Register, Address(sp, $dst$$disp));
15165   %}
15166 
15167   ins_pipe(istore_reg_reg);
15168 
15169 %}
15170 
15171 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
15172 
15173   match(Set dst (MoveF2I src));
15174 
15175   effect(DEF dst, USE src);
15176 
15177   ins_cost(INSN_COST);
15178 
15179   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
15180 
15181   ins_encode %{
15182     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
15183   %}
15184 
15185   ins_pipe(fp_f2i);
15186 
15187 %}
15188 
15189 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
15190 
15191   match(Set dst (MoveI2F src));
15192 
15193   effect(DEF dst, USE src);
15194 
15195   ins_cost(INSN_COST);
15196 
15197   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
15198 
15199   ins_encode %{
15200     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
15201   %}
15202 
15203   ins_pipe(fp_i2f);
15204 
15205 %}
15206 
15207 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
15208 
15209   match(Set dst (MoveD2L src));
15210 
15211   effect(DEF dst, USE src);
15212 
15213   ins_cost(INSN_COST);
15214 
15215   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
15216 
15217   ins_encode %{
15218     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
15219   %}
15220 
15221   ins_pipe(fp_d2l);
15222 
15223 %}
15224 
15225 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
15226 
15227   match(Set dst (MoveL2D src));
15228 
15229   effect(DEF dst, USE src);
15230 
15231   ins_cost(INSN_COST);
15232 
15233   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
15234 
15235   ins_encode %{
15236     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
15237   %}
15238 
15239   ins_pipe(fp_l2d);
15240 
15241 %}
15242 
15243 // ============================================================================
15244 // clearing of an array
15245 
15246 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
15247 %{
15248   match(Set dummy (ClearArray cnt base));
15249   effect(USE_KILL cnt, USE_KILL base, KILL cr);
15250 
15251   ins_cost(4 * INSN_COST);
15252   format %{ "ClearArray $cnt, $base" %}
15253 
15254   ins_encode %{
15255     address tpc = __ zero_words($base$$Register, $cnt$$Register);
15256     if (tpc == NULL) {
15257       ciEnv::current()->record_failure("CodeCache is full");
15258       return;
15259     }
15260   %}
15261 
15262   ins_pipe(pipe_class_memory);
15263 %}
15264 
15265 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15266 %{
15267   predicate((uint64_t)n->in(2)->get_long()
15268             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
15269   match(Set dummy (ClearArray cnt base));
15270   effect(TEMP temp, USE_KILL base, KILL cr);
15271 
15272   ins_cost(4 * INSN_COST);
15273   format %{ "ClearArray $cnt, $base" %}
15274 
15275   ins_encode %{
15276     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15277     if (tpc == NULL) {
15278       ciEnv::current()->record_failure("CodeCache is full");
15279       return;
15280     }
15281   %}
15282 
15283   ins_pipe(pipe_class_memory);
15284 %}
15285 
15286 // ============================================================================
15287 // Overflow Math Instructions
15288 
15289 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15290 %{
15291   match(Set cr (OverflowAddI op1 op2));
15292 
15293   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15294   ins_cost(INSN_COST);
15295   ins_encode %{
15296     __ cmnw($op1$$Register, $op2$$Register);
15297   %}
15298 
15299   ins_pipe(icmp_reg_reg);
15300 %}
15301 
15302 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15303 %{
15304   match(Set cr (OverflowAddI op1 op2));
15305 
15306   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15307   ins_cost(INSN_COST);
15308   ins_encode %{
15309     __ cmnw($op1$$Register, $op2$$constant);
15310   %}
15311 
15312   ins_pipe(icmp_reg_imm);
15313 %}
15314 
15315 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15316 %{
15317   match(Set cr (OverflowAddL op1 op2));
15318 
15319   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15320   ins_cost(INSN_COST);
15321   ins_encode %{
15322     __ cmn($op1$$Register, $op2$$Register);
15323   %}
15324 
15325   ins_pipe(icmp_reg_reg);
15326 %}
15327 
15328 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15329 %{
15330   match(Set cr (OverflowAddL op1 op2));
15331 
15332   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
15333   ins_cost(INSN_COST);
15334   ins_encode %{
15335     __ adds(zr, $op1$$Register, $op2$$constant);
15336   %}
15337 
15338   ins_pipe(icmp_reg_imm);
15339 %}
15340 
15341 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15342 %{
15343   match(Set cr (OverflowSubI op1 op2));
15344 
15345   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15346   ins_cost(INSN_COST);
15347   ins_encode %{
15348     __ cmpw($op1$$Register, $op2$$Register);
15349   %}
15350 
15351   ins_pipe(icmp_reg_reg);
15352 %}
15353 
15354 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15355 %{
15356   match(Set cr (OverflowSubI op1 op2));
15357 
15358   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15359   ins_cost(INSN_COST);
15360   ins_encode %{
15361     __ cmpw($op1$$Register, $op2$$constant);
15362   %}
15363 
15364   ins_pipe(icmp_reg_imm);
15365 %}
15366 
15367 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15368 %{
15369   match(Set cr (OverflowSubL op1 op2));
15370 
15371   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15372   ins_cost(INSN_COST);
15373   ins_encode %{
15374     __ cmp($op1$$Register, $op2$$Register);
15375   %}
15376 
15377   ins_pipe(icmp_reg_reg);
15378 %}
15379 
15380 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15381 %{
15382   match(Set cr (OverflowSubL op1 op2));
15383 
15384   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15385   ins_cost(INSN_COST);
15386   ins_encode %{
15387     __ subs(zr, $op1$$Register, $op2$$constant);
15388   %}
15389 
15390   ins_pipe(icmp_reg_imm);
15391 %}
15392 
15393 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15394 %{
15395   match(Set cr (OverflowSubI zero op1));
15396 
15397   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15398   ins_cost(INSN_COST);
15399   ins_encode %{
15400     __ cmpw(zr, $op1$$Register);
15401   %}
15402 
15403   ins_pipe(icmp_reg_imm);
15404 %}
15405 
15406 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15407 %{
15408   match(Set cr (OverflowSubL zero op1));
15409 
15410   format %{ "cmp   zr, $op1\t# overflow check long" %}
15411   ins_cost(INSN_COST);
15412   ins_encode %{
15413     __ cmp(zr, $op1$$Register);
15414   %}
15415 
15416   ins_pipe(icmp_reg_imm);
15417 %}
15418 
15419 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15420 %{
15421   match(Set cr (OverflowMulI op1 op2));
15422 
15423   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15424             "cmp   rscratch1, rscratch1, sxtw\n\t"
15425             "movw  rscratch1, #0x80000000\n\t"
15426             "cselw rscratch1, rscratch1, zr, NE\n\t"
15427             "cmpw  rscratch1, #1" %}
15428   ins_cost(5 * INSN_COST);
15429   ins_encode %{
15430     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15431     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15432     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15433     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15434     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15435   %}
15436 
15437   ins_pipe(pipe_slow);
15438 %}
15439 
15440 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15441 %{
15442   match(If cmp (OverflowMulI op1 op2));
15443   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15444             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15445   effect(USE labl, KILL cr);
15446 
15447   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15448             "cmp   rscratch1, rscratch1, sxtw\n\t"
15449             "b$cmp   $labl" %}
15450   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15451   ins_encode %{
15452     Label* L = $labl$$label;
15453     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15454     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15455     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15456     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15457   %}
15458 
15459   ins_pipe(pipe_serial);
15460 %}
15461 
15462 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15463 %{
15464   match(Set cr (OverflowMulL op1 op2));
15465 
15466   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15467             "smulh rscratch2, $op1, $op2\n\t"
15468             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15469             "movw  rscratch1, #0x80000000\n\t"
15470             "cselw rscratch1, rscratch1, zr, NE\n\t"
15471             "cmpw  rscratch1, #1" %}
15472   ins_cost(6 * INSN_COST);
15473   ins_encode %{
15474     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15475     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15476     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15477     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15478     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15479     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15480   %}
15481 
15482   ins_pipe(pipe_slow);
15483 %}
15484 
15485 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15486 %{
15487   match(If cmp (OverflowMulL op1 op2));
15488   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15489             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15490   effect(USE labl, KILL cr);
15491 
15492   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15493             "smulh rscratch2, $op1, $op2\n\t"
15494             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15495             "b$cmp $labl" %}
15496   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15497   ins_encode %{
15498     Label* L = $labl$$label;
15499     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15500     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15501     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15502     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15503     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15504   %}
15505 
15506   ins_pipe(pipe_serial);
15507 %}
15508 
15509 // ============================================================================
15510 // Compare Instructions
15511 
15512 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15513 %{
15514   match(Set cr (CmpI op1 op2));
15515 
15516   effect(DEF cr, USE op1, USE op2);
15517 
15518   ins_cost(INSN_COST);
15519   format %{ "cmpw  $op1, $op2" %}
15520 
15521   ins_encode(aarch64_enc_cmpw(op1, op2));
15522 
15523   ins_pipe(icmp_reg_reg);
15524 %}
15525 
15526 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15527 %{
15528   match(Set cr (CmpI op1 zero));
15529 
15530   effect(DEF cr, USE op1);
15531 
15532   ins_cost(INSN_COST);
15533   format %{ "cmpw $op1, 0" %}
15534 
15535   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15536 
15537   ins_pipe(icmp_reg_imm);
15538 %}
15539 
15540 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15541 %{
15542   match(Set cr (CmpI op1 op2));
15543 
15544   effect(DEF cr, USE op1);
15545 
15546   ins_cost(INSN_COST);
15547   format %{ "cmpw  $op1, $op2" %}
15548 
15549   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15550 
15551   ins_pipe(icmp_reg_imm);
15552 %}
15553 
15554 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15555 %{
15556   match(Set cr (CmpI op1 op2));
15557 
15558   effect(DEF cr, USE op1);
15559 
15560   ins_cost(INSN_COST * 2);
15561   format %{ "cmpw  $op1, $op2" %}
15562 
15563   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15564 
15565   ins_pipe(icmp_reg_imm);
15566 %}
15567 
15568 // Unsigned compare Instructions; really, same as signed compare
15569 // except it should only be used to feed an If or a CMovI which takes a
15570 // cmpOpU.
15571 
15572 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15573 %{
15574   match(Set cr (CmpU op1 op2));
15575 
15576   effect(DEF cr, USE op1, USE op2);
15577 
15578   ins_cost(INSN_COST);
15579   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15580 
15581   ins_encode(aarch64_enc_cmpw(op1, op2));
15582 
15583   ins_pipe(icmp_reg_reg);
15584 %}
15585 
15586 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15587 %{
15588   match(Set cr (CmpU op1 zero));
15589 
15590   effect(DEF cr, USE op1);
15591 
15592   ins_cost(INSN_COST);
15593   format %{ "cmpw $op1, #0\t# unsigned" %}
15594 
15595   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15596 
15597   ins_pipe(icmp_reg_imm);
15598 %}
15599 
15600 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15601 %{
15602   match(Set cr (CmpU op1 op2));
15603 
15604   effect(DEF cr, USE op1);
15605 
15606   ins_cost(INSN_COST);
15607   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15608 
15609   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15610 
15611   ins_pipe(icmp_reg_imm);
15612 %}
15613 
15614 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15615 %{
15616   match(Set cr (CmpU op1 op2));
15617 
15618   effect(DEF cr, USE op1);
15619 
15620   ins_cost(INSN_COST * 2);
15621   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15622 
15623   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15624 
15625   ins_pipe(icmp_reg_imm);
15626 %}
15627 
15628 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15629 %{
15630   match(Set cr (CmpL op1 op2));
15631 
15632   effect(DEF cr, USE op1, USE op2);
15633 
15634   ins_cost(INSN_COST);
15635   format %{ "cmp  $op1, $op2" %}
15636 
15637   ins_encode(aarch64_enc_cmp(op1, op2));
15638 
15639   ins_pipe(icmp_reg_reg);
15640 %}
15641 
15642 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15643 %{
15644   match(Set cr (CmpL op1 zero));
15645 
15646   effect(DEF cr, USE op1);
15647 
15648   ins_cost(INSN_COST);
15649   format %{ "tst  $op1" %}
15650 
15651   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15652 
15653   ins_pipe(icmp_reg_imm);
15654 %}
15655 
15656 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15657 %{
15658   match(Set cr (CmpL op1 op2));
15659 
15660   effect(DEF cr, USE op1);
15661 
15662   ins_cost(INSN_COST);
15663   format %{ "cmp  $op1, $op2" %}
15664 
15665   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15666 
15667   ins_pipe(icmp_reg_imm);
15668 %}
15669 
15670 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15671 %{
15672   match(Set cr (CmpL op1 op2));
15673 
15674   effect(DEF cr, USE op1);
15675 
15676   ins_cost(INSN_COST * 2);
15677   format %{ "cmp  $op1, $op2" %}
15678 
15679   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15680 
15681   ins_pipe(icmp_reg_imm);
15682 %}
15683 
15684 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15685 %{
15686   match(Set cr (CmpUL op1 op2));
15687 
15688   effect(DEF cr, USE op1, USE op2);
15689 
15690   ins_cost(INSN_COST);
15691   format %{ "cmp  $op1, $op2" %}
15692 
15693   ins_encode(aarch64_enc_cmp(op1, op2));
15694 
15695   ins_pipe(icmp_reg_reg);
15696 %}
15697 
15698 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15699 %{
15700   match(Set cr (CmpUL op1 zero));
15701 
15702   effect(DEF cr, USE op1);
15703 
15704   ins_cost(INSN_COST);
15705   format %{ "tst  $op1" %}
15706 
15707   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15708 
15709   ins_pipe(icmp_reg_imm);
15710 %}
15711 
15712 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15713 %{
15714   match(Set cr (CmpUL op1 op2));
15715 
15716   effect(DEF cr, USE op1);
15717 
15718   ins_cost(INSN_COST);
15719   format %{ "cmp  $op1, $op2" %}
15720 
15721   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15722 
15723   ins_pipe(icmp_reg_imm);
15724 %}
15725 
15726 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15727 %{
15728   match(Set cr (CmpUL op1 op2));
15729 
15730   effect(DEF cr, USE op1);
15731 
15732   ins_cost(INSN_COST * 2);
15733   format %{ "cmp  $op1, $op2" %}
15734 
15735   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15736 
15737   ins_pipe(icmp_reg_imm);
15738 %}
15739 
15740 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15741 %{
15742   match(Set cr (CmpP op1 op2));
15743 
15744   effect(DEF cr, USE op1, USE op2);
15745 
15746   ins_cost(INSN_COST);
15747   format %{ "cmp  $op1, $op2\t // ptr" %}
15748 
15749   ins_encode(aarch64_enc_cmpp(op1, op2));
15750 
15751   ins_pipe(icmp_reg_reg);
15752 %}
15753 
15754 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15755 %{
15756   match(Set cr (CmpN op1 op2));
15757 
15758   effect(DEF cr, USE op1, USE op2);
15759 
15760   ins_cost(INSN_COST);
15761   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15762 
15763   ins_encode(aarch64_enc_cmpn(op1, op2));
15764 
15765   ins_pipe(icmp_reg_reg);
15766 %}
15767 
15768 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15769 %{
15770   match(Set cr (CmpP op1 zero));
15771 
15772   effect(DEF cr, USE op1, USE zero);
15773 
15774   ins_cost(INSN_COST);
15775   format %{ "cmp  $op1, 0\t // ptr" %}
15776 
15777   ins_encode(aarch64_enc_testp(op1));
15778 
15779   ins_pipe(icmp_reg_imm);
15780 %}
15781 
15782 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15783 %{
15784   match(Set cr (CmpN op1 zero));
15785 
15786   effect(DEF cr, USE op1, USE zero);
15787 
15788   ins_cost(INSN_COST);
15789   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15790 
15791   ins_encode(aarch64_enc_testn(op1));
15792 
15793   ins_pipe(icmp_reg_imm);
15794 %}
15795 
15796 // FP comparisons
15797 //
15798 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15799 // using normal cmpOp. See declaration of rFlagsReg for details.
15800 
15801 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15802 %{
15803   match(Set cr (CmpF src1 src2));
15804 
15805   ins_cost(3 * INSN_COST);
15806   format %{ "fcmps $src1, $src2" %}
15807 
15808   ins_encode %{
15809     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15810   %}
15811 
15812   ins_pipe(pipe_class_compare);
15813 %}
15814 
15815 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15816 %{
15817   match(Set cr (CmpF src1 src2));
15818 
15819   ins_cost(3 * INSN_COST);
15820   format %{ "fcmps $src1, 0.0" %}
15821 
15822   ins_encode %{
15823     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15824   %}
15825 
15826   ins_pipe(pipe_class_compare);
15827 %}
15828 // FROM HERE
15829 
15830 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15831 %{
15832   match(Set cr (CmpD src1 src2));
15833 
15834   ins_cost(3 * INSN_COST);
15835   format %{ "fcmpd $src1, $src2" %}
15836 
15837   ins_encode %{
15838     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15839   %}
15840 
15841   ins_pipe(pipe_class_compare);
15842 %}
15843 
15844 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15845 %{
15846   match(Set cr (CmpD src1 src2));
15847 
15848   ins_cost(3 * INSN_COST);
15849   format %{ "fcmpd $src1, 0.0" %}
15850 
15851   ins_encode %{
15852     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15853   %}
15854 
15855   ins_pipe(pipe_class_compare);
15856 %}
15857 
15858 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15859 %{
15860   match(Set dst (CmpF3 src1 src2));
15861   effect(KILL cr);
15862 
15863   ins_cost(5 * INSN_COST);
15864   format %{ "fcmps $src1, $src2\n\t"
15865             "csinvw($dst, zr, zr, eq\n\t"
15866             "csnegw($dst, $dst, $dst, lt)"
15867   %}
15868 
15869   ins_encode %{
15870     Label done;
15871     FloatRegister s1 = as_FloatRegister($src1$$reg);
15872     FloatRegister s2 = as_FloatRegister($src2$$reg);
15873     Register d = as_Register($dst$$reg);
15874     __ fcmps(s1, s2);
15875     // installs 0 if EQ else -1
15876     __ csinvw(d, zr, zr, Assembler::EQ);
15877     // keeps -1 if less or unordered else installs 1
15878     __ csnegw(d, d, d, Assembler::LT);
15879     __ bind(done);
15880   %}
15881 
15882   ins_pipe(pipe_class_default);
15883 
15884 %}
15885 
15886 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15887 %{
15888   match(Set dst (CmpD3 src1 src2));
15889   effect(KILL cr);
15890 
15891   ins_cost(5 * INSN_COST);
15892   format %{ "fcmpd $src1, $src2\n\t"
15893             "csinvw($dst, zr, zr, eq\n\t"
15894             "csnegw($dst, $dst, $dst, lt)"
15895   %}
15896 
15897   ins_encode %{
15898     Label done;
15899     FloatRegister s1 = as_FloatRegister($src1$$reg);
15900     FloatRegister s2 = as_FloatRegister($src2$$reg);
15901     Register d = as_Register($dst$$reg);
15902     __ fcmpd(s1, s2);
15903     // installs 0 if EQ else -1
15904     __ csinvw(d, zr, zr, Assembler::EQ);
15905     // keeps -1 if less or unordered else installs 1
15906     __ csnegw(d, d, d, Assembler::LT);
15907     __ bind(done);
15908   %}
15909   ins_pipe(pipe_class_default);
15910 
15911 %}
15912 
15913 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15914 %{
15915   match(Set dst (CmpF3 src1 zero));
15916   effect(KILL cr);
15917 
15918   ins_cost(5 * INSN_COST);
15919   format %{ "fcmps $src1, 0.0\n\t"
15920             "csinvw($dst, zr, zr, eq\n\t"
15921             "csnegw($dst, $dst, $dst, lt)"
15922   %}
15923 
15924   ins_encode %{
15925     Label done;
15926     FloatRegister s1 = as_FloatRegister($src1$$reg);
15927     Register d = as_Register($dst$$reg);
15928     __ fcmps(s1, 0.0);
15929     // installs 0 if EQ else -1
15930     __ csinvw(d, zr, zr, Assembler::EQ);
15931     // keeps -1 if less or unordered else installs 1
15932     __ csnegw(d, d, d, Assembler::LT);
15933     __ bind(done);
15934   %}
15935 
15936   ins_pipe(pipe_class_default);
15937 
15938 %}
15939 
15940 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15941 %{
15942   match(Set dst (CmpD3 src1 zero));
15943   effect(KILL cr);
15944 
15945   ins_cost(5 * INSN_COST);
15946   format %{ "fcmpd $src1, 0.0\n\t"
15947             "csinvw($dst, zr, zr, eq\n\t"
15948             "csnegw($dst, $dst, $dst, lt)"
15949   %}
15950 
15951   ins_encode %{
15952     Label done;
15953     FloatRegister s1 = as_FloatRegister($src1$$reg);
15954     Register d = as_Register($dst$$reg);
15955     __ fcmpd(s1, 0.0);
15956     // installs 0 if EQ else -1
15957     __ csinvw(d, zr, zr, Assembler::EQ);
15958     // keeps -1 if less or unordered else installs 1
15959     __ csnegw(d, d, d, Assembler::LT);
15960     __ bind(done);
15961   %}
15962   ins_pipe(pipe_class_default);
15963 
15964 %}
15965 
15966 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15967 %{
15968   match(Set dst (CmpLTMask p q));
15969   effect(KILL cr);
15970 
15971   ins_cost(3 * INSN_COST);
15972 
15973   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15974             "csetw $dst, lt\n\t"
15975             "subw $dst, zr, $dst"
15976   %}
15977 
15978   ins_encode %{
15979     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15980     __ csetw(as_Register($dst$$reg), Assembler::LT);
15981     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15982   %}
15983 
15984   ins_pipe(ialu_reg_reg);
15985 %}
15986 
15987 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15988 %{
15989   match(Set dst (CmpLTMask src zero));
15990   effect(KILL cr);
15991 
15992   ins_cost(INSN_COST);
15993 
15994   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15995 
15996   ins_encode %{
15997     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15998   %}
15999 
16000   ins_pipe(ialu_reg_shift);
16001 %}
16002 
16003 // ============================================================================
16004 // Max and Min
16005 
16006 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
16007 
16008 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
16009 %{
16010   effect(DEF cr, USE src);
16011   ins_cost(INSN_COST);
16012   format %{ "cmpw $src, 0" %}
16013 
16014   ins_encode %{
16015     __ cmpw($src$$Register, 0);
16016   %}
16017   ins_pipe(icmp_reg_imm);
16018 %}
16019 
16020 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
16021 %{
16022   match(Set dst (MinI src1 src2));
16023   ins_cost(INSN_COST * 3);
16024 
16025   expand %{
16026     rFlagsReg cr;
16027     compI_reg_reg(cr, src1, src2);
16028     cmovI_reg_reg_lt(dst, src1, src2, cr);
16029   %}
16030 %}
16031 
16032 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
16033 %{
16034   match(Set dst (MaxI src1 src2));
16035   ins_cost(INSN_COST * 3);
16036 
16037   expand %{
16038     rFlagsReg cr;
16039     compI_reg_reg(cr, src1, src2);
16040     cmovI_reg_reg_gt(dst, src1, src2, cr);
16041   %}
16042 %}
16043 
16044 
16045 // ============================================================================
16046 // Branch Instructions
16047 
16048 // Direct Branch.
16049 instruct branch(label lbl)
16050 %{
16051   match(Goto);
16052 
16053   effect(USE lbl);
16054 
16055   ins_cost(BRANCH_COST);
16056   format %{ "b  $lbl" %}
16057 
16058   ins_encode(aarch64_enc_b(lbl));
16059 
16060   ins_pipe(pipe_branch);
16061 %}
16062 
16063 // Conditional Near Branch
16064 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
16065 %{
16066   // Same match rule as `branchConFar'.
16067   match(If cmp cr);
16068 
16069   effect(USE lbl);
16070 
16071   ins_cost(BRANCH_COST);
16072   // If set to 1 this indicates that the current instruction is a
16073   // short variant of a long branch. This avoids using this
16074   // instruction in first-pass matching. It will then only be used in
16075   // the `Shorten_branches' pass.
16076   // ins_short_branch(1);
16077   format %{ "b$cmp  $lbl" %}
16078 
16079   ins_encode(aarch64_enc_br_con(cmp, lbl));
16080 
16081   ins_pipe(pipe_branch_cond);
16082 %}
16083 
16084 // Conditional Near Branch Unsigned
16085 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
16086 %{
16087   // Same match rule as `branchConFar'.
16088   match(If cmp cr);
16089 
16090   effect(USE lbl);
16091 
16092   ins_cost(BRANCH_COST);
16093   // If set to 1 this indicates that the current instruction is a
16094   // short variant of a long branch. This avoids using this
16095   // instruction in first-pass matching. It will then only be used in
16096   // the `Shorten_branches' pass.
16097   // ins_short_branch(1);
16098   format %{ "b$cmp  $lbl\t# unsigned" %}
16099 
16100   ins_encode(aarch64_enc_br_conU(cmp, lbl));
16101 
16102   ins_pipe(pipe_branch_cond);
16103 %}
16104 
16105 // Make use of CBZ and CBNZ.  These instructions, as well as being
16106 // shorter than (cmp; branch), have the additional benefit of not
16107 // killing the flags.
16108 
16109 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
16110   match(If cmp (CmpI op1 op2));
16111   effect(USE labl);
16112 
16113   ins_cost(BRANCH_COST);
16114   format %{ "cbw$cmp   $op1, $labl" %}
16115   ins_encode %{
16116     Label* L = $labl$$label;
16117     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16118     if (cond == Assembler::EQ)
16119       __ cbzw($op1$$Register, *L);
16120     else
16121       __ cbnzw($op1$$Register, *L);
16122   %}
16123   ins_pipe(pipe_cmp_branch);
16124 %}
16125 
16126 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
16127   match(If cmp (CmpL op1 op2));
16128   effect(USE labl);
16129 
16130   ins_cost(BRANCH_COST);
16131   format %{ "cb$cmp   $op1, $labl" %}
16132   ins_encode %{
16133     Label* L = $labl$$label;
16134     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16135     if (cond == Assembler::EQ)
16136       __ cbz($op1$$Register, *L);
16137     else
16138       __ cbnz($op1$$Register, *L);
16139   %}
16140   ins_pipe(pipe_cmp_branch);
16141 %}
16142 
16143 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
16144   match(If cmp (CmpP op1 op2));
16145   effect(USE labl);
16146 
16147   ins_cost(BRANCH_COST);
16148   format %{ "cb$cmp   $op1, $labl" %}
16149   ins_encode %{
16150     Label* L = $labl$$label;
16151     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16152     if (cond == Assembler::EQ)
16153       __ cbz($op1$$Register, *L);
16154     else
16155       __ cbnz($op1$$Register, *L);
16156   %}
16157   ins_pipe(pipe_cmp_branch);
16158 %}
16159 
16160 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
16161   match(If cmp (CmpN op1 op2));
16162   effect(USE labl);
16163 
16164   ins_cost(BRANCH_COST);
16165   format %{ "cbw$cmp   $op1, $labl" %}
16166   ins_encode %{
16167     Label* L = $labl$$label;
16168     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16169     if (cond == Assembler::EQ)
16170       __ cbzw($op1$$Register, *L);
16171     else
16172       __ cbnzw($op1$$Register, *L);
16173   %}
16174   ins_pipe(pipe_cmp_branch);
16175 %}
16176 
16177 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
16178   match(If cmp (CmpP (DecodeN oop) zero));
16179   effect(USE labl);
16180 
16181   ins_cost(BRANCH_COST);
16182   format %{ "cb$cmp   $oop, $labl" %}
16183   ins_encode %{
16184     Label* L = $labl$$label;
16185     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16186     if (cond == Assembler::EQ)
16187       __ cbzw($oop$$Register, *L);
16188     else
16189       __ cbnzw($oop$$Register, *L);
16190   %}
16191   ins_pipe(pipe_cmp_branch);
16192 %}
16193 
16194 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
16195   match(If cmp (CmpU op1 op2));
16196   effect(USE labl);
16197 
16198   ins_cost(BRANCH_COST);
16199   format %{ "cbw$cmp   $op1, $labl" %}
16200   ins_encode %{
16201     Label* L = $labl$$label;
16202     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16203     if (cond == Assembler::EQ || cond == Assembler::LS)
16204       __ cbzw($op1$$Register, *L);
16205     else
16206       __ cbnzw($op1$$Register, *L);
16207   %}
16208   ins_pipe(pipe_cmp_branch);
16209 %}
16210 
16211 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
16212   match(If cmp (CmpUL op1 op2));
16213   effect(USE labl);
16214 
16215   ins_cost(BRANCH_COST);
16216   format %{ "cb$cmp   $op1, $labl" %}
16217   ins_encode %{
16218     Label* L = $labl$$label;
16219     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16220     if (cond == Assembler::EQ || cond == Assembler::LS)
16221       __ cbz($op1$$Register, *L);
16222     else
16223       __ cbnz($op1$$Register, *L);
16224   %}
16225   ins_pipe(pipe_cmp_branch);
16226 %}
16227 
16228 // Test bit and Branch
16229 
16230 // Patterns for short (< 32KiB) variants
16231 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16232   match(If cmp (CmpL op1 op2));
16233   effect(USE labl);
16234 
16235   ins_cost(BRANCH_COST);
16236   format %{ "cb$cmp   $op1, $labl # long" %}
16237   ins_encode %{
16238     Label* L = $labl$$label;
16239     Assembler::Condition cond =
16240       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16241     __ tbr(cond, $op1$$Register, 63, *L);
16242   %}
16243   ins_pipe(pipe_cmp_branch);
16244   ins_short_branch(1);
16245 %}
16246 
16247 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16248   match(If cmp (CmpI op1 op2));
16249   effect(USE labl);
16250 
16251   ins_cost(BRANCH_COST);
16252   format %{ "cb$cmp   $op1, $labl # int" %}
16253   ins_encode %{
16254     Label* L = $labl$$label;
16255     Assembler::Condition cond =
16256       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16257     __ tbr(cond, $op1$$Register, 31, *L);
16258   %}
16259   ins_pipe(pipe_cmp_branch);
16260   ins_short_branch(1);
16261 %}
16262 
16263 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16264   match(If cmp (CmpL (AndL op1 op2) op3));
16265   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16266   effect(USE labl);
16267 
16268   ins_cost(BRANCH_COST);
16269   format %{ "tb$cmp   $op1, $op2, $labl" %}
16270   ins_encode %{
16271     Label* L = $labl$$label;
16272     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16273     int bit = exact_log2_long($op2$$constant);
16274     __ tbr(cond, $op1$$Register, bit, *L);
16275   %}
16276   ins_pipe(pipe_cmp_branch);
16277   ins_short_branch(1);
16278 %}
16279 
16280 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16281   match(If cmp (CmpI (AndI op1 op2) op3));
16282   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16283   effect(USE labl);
16284 
16285   ins_cost(BRANCH_COST);
16286   format %{ "tb$cmp   $op1, $op2, $labl" %}
16287   ins_encode %{
16288     Label* L = $labl$$label;
16289     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16290     int bit = exact_log2((juint)$op2$$constant);
16291     __ tbr(cond, $op1$$Register, bit, *L);
16292   %}
16293   ins_pipe(pipe_cmp_branch);
16294   ins_short_branch(1);
16295 %}
16296 
16297 // And far variants
16298 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16299   match(If cmp (CmpL op1 op2));
16300   effect(USE labl);
16301 
16302   ins_cost(BRANCH_COST);
16303   format %{ "cb$cmp   $op1, $labl # long" %}
16304   ins_encode %{
16305     Label* L = $labl$$label;
16306     Assembler::Condition cond =
16307       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16308     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
16309   %}
16310   ins_pipe(pipe_cmp_branch);
16311 %}
16312 
16313 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16314   match(If cmp (CmpI op1 op2));
16315   effect(USE labl);
16316 
16317   ins_cost(BRANCH_COST);
16318   format %{ "cb$cmp   $op1, $labl # int" %}
16319   ins_encode %{
16320     Label* L = $labl$$label;
16321     Assembler::Condition cond =
16322       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16323     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16324   %}
16325   ins_pipe(pipe_cmp_branch);
16326 %}
16327 
16328 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16329   match(If cmp (CmpL (AndL op1 op2) op3));
16330   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16331   effect(USE labl);
16332 
16333   ins_cost(BRANCH_COST);
16334   format %{ "tb$cmp   $op1, $op2, $labl" %}
16335   ins_encode %{
16336     Label* L = $labl$$label;
16337     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16338     int bit = exact_log2_long($op2$$constant);
16339     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16340   %}
16341   ins_pipe(pipe_cmp_branch);
16342 %}
16343 
16344 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16345   match(If cmp (CmpI (AndI op1 op2) op3));
16346   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16347   effect(USE labl);
16348 
16349   ins_cost(BRANCH_COST);
16350   format %{ "tb$cmp   $op1, $op2, $labl" %}
16351   ins_encode %{
16352     Label* L = $labl$$label;
16353     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16354     int bit = exact_log2((juint)$op2$$constant);
16355     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16356   %}
16357   ins_pipe(pipe_cmp_branch);
16358 %}
16359 
16360 // Test bits
16361 
16362 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16363   match(Set cr (CmpL (AndL op1 op2) op3));
16364   predicate(Assembler::operand_valid_for_logical_immediate
16365             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16366 
16367   ins_cost(INSN_COST);
16368   format %{ "tst $op1, $op2 # long" %}
16369   ins_encode %{
16370     __ tst($op1$$Register, $op2$$constant);
16371   %}
16372   ins_pipe(ialu_reg_reg);
16373 %}
16374 
16375 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16376   match(Set cr (CmpI (AndI op1 op2) op3));
16377   predicate(Assembler::operand_valid_for_logical_immediate
16378             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16379 
16380   ins_cost(INSN_COST);
16381   format %{ "tst $op1, $op2 # int" %}
16382   ins_encode %{
16383     __ tstw($op1$$Register, $op2$$constant);
16384   %}
16385   ins_pipe(ialu_reg_reg);
16386 %}
16387 
16388 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16389   match(Set cr (CmpL (AndL op1 op2) op3));
16390 
16391   ins_cost(INSN_COST);
16392   format %{ "tst $op1, $op2 # long" %}
16393   ins_encode %{
16394     __ tst($op1$$Register, $op2$$Register);
16395   %}
16396   ins_pipe(ialu_reg_reg);
16397 %}
16398 
16399 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16400   match(Set cr (CmpI (AndI op1 op2) op3));
16401 
16402   ins_cost(INSN_COST);
16403   format %{ "tstw $op1, $op2 # int" %}
16404   ins_encode %{
16405     __ tstw($op1$$Register, $op2$$Register);
16406   %}
16407   ins_pipe(ialu_reg_reg);
16408 %}
16409 
16410 
16411 // Conditional Far Branch
16412 // Conditional Far Branch Unsigned
16413 // TODO: fixme
16414 
16415 // counted loop end branch near
16416 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16417 %{
16418   match(CountedLoopEnd cmp cr);
16419 
16420   effect(USE lbl);
16421 
16422   ins_cost(BRANCH_COST);
16423   // short variant.
16424   // ins_short_branch(1);
16425   format %{ "b$cmp $lbl \t// counted loop end" %}
16426 
16427   ins_encode(aarch64_enc_br_con(cmp, lbl));
16428 
16429   ins_pipe(pipe_branch);
16430 %}
16431 
16432 // counted loop end branch far
16433 // TODO: fixme
16434 
16435 // ============================================================================
16436 // inlined locking and unlocking
16437 
16438 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16439 %{
16440   match(Set cr (FastLock object box));
16441   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16442 
16443   // TODO
16444   // identify correct cost
16445   ins_cost(5 * INSN_COST);
16446   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
16447 
16448   ins_encode %{
16449     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16450   %}
16451 
16452   ins_pipe(pipe_serial);
16453 %}
16454 
16455 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16456 %{
16457   match(Set cr (FastUnlock object box));
16458   effect(TEMP tmp, TEMP tmp2);
16459 
16460   ins_cost(5 * INSN_COST);
16461   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16462 
16463   ins_encode %{
16464     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16465   %}
16466 
16467   ins_pipe(pipe_serial);
16468 %}
16469 
16470 
16471 // ============================================================================
16472 // Safepoint Instructions
16473 
16474 // TODO
16475 // provide a near and far version of this code
16476 
16477 instruct safePoint(rFlagsReg cr, iRegP poll)
16478 %{
16479   match(SafePoint poll);
16480   effect(KILL cr);
16481 
16482   format %{
16483     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16484   %}
16485   ins_encode %{
16486     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16487   %}
16488   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16489 %}
16490 
16491 
16492 // ============================================================================
16493 // Procedure Call/Return Instructions
16494 
16495 // Call Java Static Instruction
16496 
16497 instruct CallStaticJavaDirect(method meth)
16498 %{
16499   match(CallStaticJava);
16500 
16501   effect(USE meth);
16502 
16503   ins_cost(CALL_COST);
16504 
16505   format %{ "call,static $meth \t// ==> " %}
16506 
16507   ins_encode(aarch64_enc_java_static_call(meth),
16508              aarch64_enc_call_epilog);
16509 
16510   ins_pipe(pipe_class_call);
16511 %}
16512 
16513 // TO HERE
16514 
16515 // Call Java Dynamic Instruction
16516 instruct CallDynamicJavaDirect(method meth)
16517 %{
16518   match(CallDynamicJava);
16519 
16520   effect(USE meth);
16521 
16522   ins_cost(CALL_COST);
16523 
16524   format %{ "CALL,dynamic $meth \t// ==> " %}
16525 
16526   ins_encode(aarch64_enc_java_dynamic_call(meth),
16527              aarch64_enc_call_epilog);
16528 
16529   ins_pipe(pipe_class_call);
16530 %}
16531 
16532 // Call Runtime Instruction
16533 
16534 instruct CallRuntimeDirect(method meth)
16535 %{
16536   match(CallRuntime);
16537 
16538   effect(USE meth);
16539 
16540   ins_cost(CALL_COST);
16541 
16542   format %{ "CALL, runtime $meth" %}
16543 
16544   ins_encode( aarch64_enc_java_to_runtime(meth) );
16545 
16546   ins_pipe(pipe_class_call);
16547 %}
16548 
16549 // Call Runtime Instruction
16550 
16551 instruct CallLeafDirect(method meth)
16552 %{
16553   match(CallLeaf);
16554 
16555   effect(USE meth);
16556 
16557   ins_cost(CALL_COST);
16558 
16559   format %{ "CALL, runtime leaf $meth" %}
16560 
16561   ins_encode( aarch64_enc_java_to_runtime(meth) );
16562 
16563   ins_pipe(pipe_class_call);
16564 %}
16565 
16566 // Call Runtime Instruction
16567 
16568 instruct CallLeafNoFPDirect(method meth)
16569 %{
16570   match(CallLeafNoFP);
16571 
16572   effect(USE meth);
16573 
16574   ins_cost(CALL_COST);
16575 
16576   format %{ "CALL, runtime leaf nofp $meth" %}
16577 
16578   ins_encode( aarch64_enc_java_to_runtime(meth) );
16579 
16580   ins_pipe(pipe_class_call);
16581 %}
16582 
16583 // Tail Call; Jump from runtime stub to Java code.
16584 // Also known as an 'interprocedural jump'.
16585 // Target of jump will eventually return to caller.
16586 // TailJump below removes the return address.
16587 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16588 // emitted just above the TailCall which has reset rfp to the caller state.
16589 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
16590 %{
16591   match(TailCall jump_target method_ptr);
16592 
16593   ins_cost(CALL_COST);
16594 
16595   format %{ "br $jump_target\t# $method_ptr holds method" %}
16596 
16597   ins_encode(aarch64_enc_tail_call(jump_target));
16598 
16599   ins_pipe(pipe_class_call);
16600 %}
16601 
16602 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
16603 %{
16604   match(TailJump jump_target ex_oop);
16605 
16606   ins_cost(CALL_COST);
16607 
16608   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16609 
16610   ins_encode(aarch64_enc_tail_jmp(jump_target));
16611 
16612   ins_pipe(pipe_class_call);
16613 %}
16614 
16615 // Create exception oop: created by stack-crawling runtime code.
16616 // Created exception is now available to this handler, and is setup
16617 // just prior to jumping to this handler. No code emitted.
16618 // TODO check
16619 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16620 instruct CreateException(iRegP_R0 ex_oop)
16621 %{
16622   match(Set ex_oop (CreateEx));
16623 
16624   format %{ " -- \t// exception oop; no code emitted" %}
16625 
16626   size(0);
16627 
16628   ins_encode( /*empty*/ );
16629 
16630   ins_pipe(pipe_class_empty);
16631 %}
16632 
16633 // Rethrow exception: The exception oop will come in the first
16634 // argument position. Then JUMP (not call) to the rethrow stub code.
16635 instruct RethrowException() %{
16636   match(Rethrow);
16637   ins_cost(CALL_COST);
16638 
16639   format %{ "b rethrow_stub" %}
16640 
16641   ins_encode( aarch64_enc_rethrow() );
16642 
16643   ins_pipe(pipe_class_call);
16644 %}
16645 
16646 
16647 // Return Instruction
16648 // epilog node loads ret address into lr as part of frame pop
16649 instruct Ret()
16650 %{
16651   match(Return);
16652 
16653   format %{ "ret\t// return register" %}
16654 
16655   ins_encode( aarch64_enc_ret() );
16656 
16657   ins_pipe(pipe_branch);
16658 %}
16659 
16660 // Die now.
16661 instruct ShouldNotReachHere() %{
16662   match(Halt);
16663 
16664   ins_cost(CALL_COST);
16665   format %{ "ShouldNotReachHere" %}
16666 
16667   ins_encode %{
16668     if (is_reachable()) {
16669       __ stop(_halt_reason);
16670     }
16671   %}
16672 
16673   ins_pipe(pipe_class_default);
16674 %}
16675 
16676 // ============================================================================
16677 // Partial Subtype Check
16678 //
16679 // superklass array for an instance of the superklass.  Set a hidden
16680 // internal cache on a hit (cache is checked with exposed code in
16681 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16682 // encoding ALSO sets flags.
16683 
16684 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16685 %{
16686   match(Set result (PartialSubtypeCheck sub super));
16687   effect(KILL cr, KILL temp);
16688 
16689   ins_cost(1100);  // slightly larger than the next version
16690   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16691 
16692   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16693 
16694   opcode(0x1); // Force zero of result reg on hit
16695 
16696   ins_pipe(pipe_class_memory);
16697 %}
16698 
16699 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
16700 %{
16701   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
16702   effect(KILL temp, KILL result);
16703 
16704   ins_cost(1100);  // slightly larger than the next version
16705   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16706 
16707   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16708 
16709   opcode(0x0); // Don't zero result reg on hit
16710 
16711   ins_pipe(pipe_class_memory);
16712 %}
16713 
16714 // Intrisics for String.compareTo()
16715 
16716 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16717                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16718 %{
16719   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16720   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16721   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16722 
16723   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16724   ins_encode %{
16725     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16726     __ string_compare($str1$$Register, $str2$$Register,
16727                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16728                       $tmp1$$Register, $tmp2$$Register,
16729                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
16730   %}
16731   ins_pipe(pipe_class_memory);
16732 %}
16733 
16734 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16735                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16736 %{
16737   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16738   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16739   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16740 
16741   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16742   ins_encode %{
16743     __ string_compare($str1$$Register, $str2$$Register,
16744                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16745                       $tmp1$$Register, $tmp2$$Register,
16746                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
16747   %}
16748   ins_pipe(pipe_class_memory);
16749 %}
16750 
16751 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16752                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16753                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16754 %{
16755   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16756   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16757   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16758          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16759 
16760   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16761   ins_encode %{
16762     __ string_compare($str1$$Register, $str2$$Register,
16763                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16764                       $tmp1$$Register, $tmp2$$Register,
16765                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16766                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
16767   %}
16768   ins_pipe(pipe_class_memory);
16769 %}
16770 
16771 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16772                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16773                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16774 %{
16775   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16776   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16777   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16778          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16779 
16780   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16781   ins_encode %{
16782     __ string_compare($str1$$Register, $str2$$Register,
16783                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16784                       $tmp1$$Register, $tmp2$$Register,
16785                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16786                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
16787   %}
16788   ins_pipe(pipe_class_memory);
16789 %}
16790 
16791 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
16792 // these string_compare variants as NEON register type for convenience so that the prototype of
16793 // string_compare can be shared with all variants.
16794 
16795 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16796                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16797                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16798                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16799 %{
16800   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16801   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16802   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16803          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16804 
16805   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16806   ins_encode %{
16807     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16808     __ string_compare($str1$$Register, $str2$$Register,
16809                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16810                       $tmp1$$Register, $tmp2$$Register,
16811                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16812                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16813                       StrIntrinsicNode::LL);
16814   %}
16815   ins_pipe(pipe_class_memory);
16816 %}
16817 
16818 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16819                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16820                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16821                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16822 %{
16823   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16824   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16825   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16826          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16827 
16828   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16829   ins_encode %{
16830     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16831     __ string_compare($str1$$Register, $str2$$Register,
16832                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16833                       $tmp1$$Register, $tmp2$$Register,
16834                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16835                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16836                       StrIntrinsicNode::LU);
16837   %}
16838   ins_pipe(pipe_class_memory);
16839 %}
16840 
16841 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16842                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16843                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16844                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16845 %{
16846   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16847   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16848   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16849          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16850 
16851   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16852   ins_encode %{
16853     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16854     __ string_compare($str1$$Register, $str2$$Register,
16855                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16856                       $tmp1$$Register, $tmp2$$Register,
16857                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16858                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16859                       StrIntrinsicNode::UL);
16860   %}
16861   ins_pipe(pipe_class_memory);
16862 %}
16863 
16864 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16865                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16866                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16867                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16868 %{
16869   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16870   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16871   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16872          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16873 
16874   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16875   ins_encode %{
16876     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16877     __ string_compare($str1$$Register, $str2$$Register,
16878                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16879                       $tmp1$$Register, $tmp2$$Register,
16880                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16881                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16882                       StrIntrinsicNode::UU);
16883   %}
16884   ins_pipe(pipe_class_memory);
16885 %}
16886 
16887 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16888                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16889                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16890                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16891 %{
16892   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16893   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16894   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16895          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16896          TEMP vtmp0, TEMP vtmp1, KILL cr);
16897   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16898             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16899 
16900   ins_encode %{
16901     __ string_indexof($str1$$Register, $str2$$Register,
16902                       $cnt1$$Register, $cnt2$$Register,
16903                       $tmp1$$Register, $tmp2$$Register,
16904                       $tmp3$$Register, $tmp4$$Register,
16905                       $tmp5$$Register, $tmp6$$Register,
16906                       -1, $result$$Register, StrIntrinsicNode::UU);
16907   %}
16908   ins_pipe(pipe_class_memory);
16909 %}
16910 
16911 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16912                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16913                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16914                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16915 %{
16916   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16917   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16918   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16919          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16920          TEMP vtmp0, TEMP vtmp1, KILL cr);
16921   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16922             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16923 
16924   ins_encode %{
16925     __ string_indexof($str1$$Register, $str2$$Register,
16926                       $cnt1$$Register, $cnt2$$Register,
16927                       $tmp1$$Register, $tmp2$$Register,
16928                       $tmp3$$Register, $tmp4$$Register,
16929                       $tmp5$$Register, $tmp6$$Register,
16930                       -1, $result$$Register, StrIntrinsicNode::LL);
16931   %}
16932   ins_pipe(pipe_class_memory);
16933 %}
16934 
16935 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16936                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16937                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16938                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16939 %{
16940   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16941   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16942   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16943          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16944          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16945   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16946             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16947 
16948   ins_encode %{
16949     __ string_indexof($str1$$Register, $str2$$Register,
16950                       $cnt1$$Register, $cnt2$$Register,
16951                       $tmp1$$Register, $tmp2$$Register,
16952                       $tmp3$$Register, $tmp4$$Register,
16953                       $tmp5$$Register, $tmp6$$Register,
16954                       -1, $result$$Register, StrIntrinsicNode::UL);
16955   %}
16956   ins_pipe(pipe_class_memory);
16957 %}
16958 
16959 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16960                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16961                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16962 %{
16963   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16964   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16965   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16966          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16967   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
16968             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16969 
16970   ins_encode %{
16971     int icnt2 = (int)$int_cnt2$$constant;
16972     __ string_indexof($str1$$Register, $str2$$Register,
16973                       $cnt1$$Register, zr,
16974                       $tmp1$$Register, $tmp2$$Register,
16975                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16976                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16977   %}
16978   ins_pipe(pipe_class_memory);
16979 %}
16980 
16981 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16982                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16983                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16984 %{
16985   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16986   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16987   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16988          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16989   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
16990             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16991 
16992   ins_encode %{
16993     int icnt2 = (int)$int_cnt2$$constant;
16994     __ string_indexof($str1$$Register, $str2$$Register,
16995                       $cnt1$$Register, zr,
16996                       $tmp1$$Register, $tmp2$$Register,
16997                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16998                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16999   %}
17000   ins_pipe(pipe_class_memory);
17001 %}
17002 
17003 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17004                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17005                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17006 %{
17007   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
17008   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17009   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17010          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17011   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
17012             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17013 
17014   ins_encode %{
17015     int icnt2 = (int)$int_cnt2$$constant;
17016     __ string_indexof($str1$$Register, $str2$$Register,
17017                       $cnt1$$Register, zr,
17018                       $tmp1$$Register, $tmp2$$Register,
17019                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17020                       icnt2, $result$$Register, StrIntrinsicNode::UL);
17021   %}
17022   ins_pipe(pipe_class_memory);
17023 %}
17024 
17025 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17026                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
17027                              iRegINoSp tmp3, rFlagsReg cr)
17028 %{
17029   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17030   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
17031   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17032          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17033 
17034   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17035 
17036   ins_encode %{
17037     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17038                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
17039                            $tmp3$$Register);
17040   %}
17041   ins_pipe(pipe_class_memory);
17042 %}
17043 
17044 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17045                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
17046                               iRegINoSp tmp3, rFlagsReg cr)
17047 %{
17048   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17049   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
17050   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17051          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17052 
17053   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17054 
17055   ins_encode %{
17056     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17057                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
17058                             $tmp3$$Register);
17059   %}
17060   ins_pipe(pipe_class_memory);
17061 %}
17062 
17063 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17064                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17065                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17066   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
17067   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17068   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17069   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17070   ins_encode %{
17071     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17072                                $result$$Register, $ztmp1$$FloatRegister,
17073                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17074                                $ptmp$$PRegister, true /* isL */);
17075   %}
17076   ins_pipe(pipe_class_memory);
17077 %}
17078 
17079 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17080                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17081                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17082   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
17083   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17084   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17085   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17086   ins_encode %{
17087     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17088                                $result$$Register, $ztmp1$$FloatRegister,
17089                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17090                                $ptmp$$PRegister, false /* isL */);
17091   %}
17092   ins_pipe(pipe_class_memory);
17093 %}
17094 
17095 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17096                         iRegI_R0 result, rFlagsReg cr)
17097 %{
17098   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
17099   match(Set result (StrEquals (Binary str1 str2) cnt));
17100   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17101 
17102   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17103   ins_encode %{
17104     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17105     __ string_equals($str1$$Register, $str2$$Register,
17106                      $result$$Register, $cnt$$Register, 1);
17107   %}
17108   ins_pipe(pipe_class_memory);
17109 %}
17110 
17111 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17112                         iRegI_R0 result, rFlagsReg cr)
17113 %{
17114   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
17115   match(Set result (StrEquals (Binary str1 str2) cnt));
17116   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17117 
17118   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17119   ins_encode %{
17120     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17121     __ string_equals($str1$$Register, $str2$$Register,
17122                      $result$$Register, $cnt$$Register, 2);
17123   %}
17124   ins_pipe(pipe_class_memory);
17125 %}
17126 
17127 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17128                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17129                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17130                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17131                        iRegP_R10 tmp, rFlagsReg cr)
17132 %{
17133   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
17134   match(Set result (AryEq ary1 ary2));
17135   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17136          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17137          TEMP vtmp6, TEMP vtmp7, KILL cr);
17138 
17139   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17140   ins_encode %{
17141     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17142                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17143                                    $result$$Register, $tmp$$Register, 1);
17144     if (tpc == NULL) {
17145       ciEnv::current()->record_failure("CodeCache is full");
17146       return;
17147     }
17148   %}
17149   ins_pipe(pipe_class_memory);
17150 %}
17151 
17152 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17153                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17154                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17155                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17156                        iRegP_R10 tmp, rFlagsReg cr)
17157 %{
17158   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
17159   match(Set result (AryEq ary1 ary2));
17160   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17161          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17162          TEMP vtmp6, TEMP vtmp7, KILL cr);
17163 
17164   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17165   ins_encode %{
17166     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17167                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17168                                    $result$$Register, $tmp$$Register, 2);
17169     if (tpc == NULL) {
17170       ciEnv::current()->record_failure("CodeCache is full");
17171       return;
17172     }
17173   %}
17174   ins_pipe(pipe_class_memory);
17175 %}
17176 
17177 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
17178 %{
17179   match(Set result (CountPositives ary1 len));
17180   effect(USE_KILL ary1, USE_KILL len, KILL cr);
17181   format %{ "count positives byte[] $ary1,$len -> $result" %}
17182   ins_encode %{
17183     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
17184     if (tpc == NULL) {
17185       ciEnv::current()->record_failure("CodeCache is full");
17186       return;
17187     }
17188   %}
17189   ins_pipe( pipe_slow );
17190 %}
17191 
17192 // fast char[] to byte[] compression
17193 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17194                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17195                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17196                          iRegI_R0 result, rFlagsReg cr)
17197 %{
17198   match(Set result (StrCompressedCopy src (Binary dst len)));
17199   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17200          USE_KILL src, USE_KILL dst, USE len, KILL cr);
17201 
17202   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17203   ins_encode %{
17204     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
17205                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17206                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17207                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17208   %}
17209   ins_pipe(pipe_slow);
17210 %}
17211 
17212 // fast byte[] to char[] inflation
17213 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
17214                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17215                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
17216 %{
17217   match(Set dummy (StrInflatedCopy src (Binary dst len)));
17218   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
17219          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
17220          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
17221 
17222   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
17223   ins_encode %{
17224     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
17225                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17226                                         $vtmp2$$FloatRegister, $tmp$$Register);
17227     if (tpc == NULL) {
17228       ciEnv::current()->record_failure("CodeCache is full");
17229       return;
17230     }
17231   %}
17232   ins_pipe(pipe_class_memory);
17233 %}
17234 
17235 // encode char[] to byte[] in ISO_8859_1
17236 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17237                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17238                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17239                           iRegI_R0 result, rFlagsReg cr)
17240 %{
17241   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
17242   match(Set result (EncodeISOArray src (Binary dst len)));
17243   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17244          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17245 
17246   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17247   ins_encode %{
17248     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17249                         $result$$Register, false,
17250                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17251                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17252                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17253   %}
17254   ins_pipe(pipe_class_memory);
17255 %}
17256 
17257 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17258                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17259                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17260                             iRegI_R0 result, rFlagsReg cr)
17261 %{
17262   predicate(((EncodeISOArrayNode*)n)->is_ascii());
17263   match(Set result (EncodeISOArray src (Binary dst len)));
17264   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17265          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17266 
17267   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17268   ins_encode %{
17269     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17270                         $result$$Register, true,
17271                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17272                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17273                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17274   %}
17275   ins_pipe(pipe_class_memory);
17276 %}
17277 
17278 //----------------------------- CompressBits/ExpandBits ------------------------
17279 
17280 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17281                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17282   match(Set dst (CompressBits src mask));
17283   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17284   format %{ "mov    $tsrc, $src\n\t"
17285             "mov    $tmask, $mask\n\t"
17286             "bext   $tdst, $tsrc, $tmask\n\t"
17287             "mov    $dst, $tdst"
17288           %}
17289   ins_encode %{
17290     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17291     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17292     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17293     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17294   %}
17295   ins_pipe(pipe_slow);
17296 %}
17297 
17298 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17299                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17300   match(Set dst (CompressBits (LoadI mem) mask));
17301   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17302   format %{ "ldrs   $tsrc, $mem\n\t"
17303             "ldrs   $tmask, $mask\n\t"
17304             "bext   $tdst, $tsrc, $tmask\n\t"
17305             "mov    $dst, $tdst"
17306           %}
17307   ins_encode %{
17308     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17309               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17310     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17311     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17312     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17313   %}
17314   ins_pipe(pipe_slow);
17315 %}
17316 
17317 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17318                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
17319   match(Set dst (CompressBits src mask));
17320   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17321   format %{ "mov    $tsrc, $src\n\t"
17322             "mov    $tmask, $mask\n\t"
17323             "bext   $tdst, $tsrc, $tmask\n\t"
17324             "mov    $dst, $tdst"
17325           %}
17326   ins_encode %{
17327     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17328     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17329     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17330     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17331   %}
17332   ins_pipe(pipe_slow);
17333 %}
17334 
17335 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
17336                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17337   match(Set dst (CompressBits (LoadL mem) mask));
17338   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17339   format %{ "ldrd   $tsrc, $mem\n\t"
17340             "ldrd   $tmask, $mask\n\t"
17341             "bext   $tdst, $tsrc, $tmask\n\t"
17342             "mov    $dst, $tdst"
17343           %}
17344   ins_encode %{
17345     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17346               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17347     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17348     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17349     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17350   %}
17351   ins_pipe(pipe_slow);
17352 %}
17353 
17354 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17355                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17356   match(Set dst (ExpandBits src mask));
17357   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17358   format %{ "mov    $tsrc, $src\n\t"
17359             "mov    $tmask, $mask\n\t"
17360             "bdep   $tdst, $tsrc, $tmask\n\t"
17361             "mov    $dst, $tdst"
17362           %}
17363   ins_encode %{
17364     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17365     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17366     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17367     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17368   %}
17369   ins_pipe(pipe_slow);
17370 %}
17371 
17372 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17373                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17374   match(Set dst (ExpandBits (LoadI mem) mask));
17375   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17376   format %{ "ldrs   $tsrc, $mem\n\t"
17377             "ldrs   $tmask, $mask\n\t"
17378             "bdep   $tdst, $tsrc, $tmask\n\t"
17379             "mov    $dst, $tdst"
17380           %}
17381   ins_encode %{
17382     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17383               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17384     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17385     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17386     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17387   %}
17388   ins_pipe(pipe_slow);
17389 %}
17390 
17391 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17392                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
17393   match(Set dst (ExpandBits src mask));
17394   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17395   format %{ "mov    $tsrc, $src\n\t"
17396             "mov    $tmask, $mask\n\t"
17397             "bdep   $tdst, $tsrc, $tmask\n\t"
17398             "mov    $dst, $tdst"
17399           %}
17400   ins_encode %{
17401     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17402     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17403     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17404     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17405   %}
17406   ins_pipe(pipe_slow);
17407 %}
17408 
17409 
17410 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
17411                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17412   match(Set dst (ExpandBits (LoadL mem) mask));
17413   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17414   format %{ "ldrd   $tsrc, $mem\n\t"
17415             "ldrd   $tmask, $mask\n\t"
17416             "bdep   $tdst, $tsrc, $tmask\n\t"
17417             "mov    $dst, $tdst"
17418           %}
17419   ins_encode %{
17420     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17421               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17422     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17423     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17424     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17425   %}
17426   ins_pipe(pipe_slow);
17427 %}
17428 
17429 // ============================================================================
17430 // This name is KNOWN by the ADLC and cannot be changed.
17431 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17432 // for this guy.
17433 instruct tlsLoadP(thread_RegP dst)
17434 %{
17435   match(Set dst (ThreadLocal));
17436 
17437   ins_cost(0);
17438 
17439   format %{ " -- \t// $dst=Thread::current(), empty" %}
17440 
17441   size(0);
17442 
17443   ins_encode( /*empty*/ );
17444 
17445   ins_pipe(pipe_class_empty);
17446 %}
17447 
17448 //----------PEEPHOLE RULES-----------------------------------------------------
17449 // These must follow all instruction definitions as they use the names
17450 // defined in the instructions definitions.
17451 //
17452 // peepmatch ( root_instr_name [preceding_instruction]* );
17453 //
17454 // peepconstraint %{
17455 // (instruction_number.operand_name relational_op instruction_number.operand_name
17456 //  [, ...] );
17457 // // instruction numbers are zero-based using left to right order in peepmatch
17458 //
17459 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17460 // // provide an instruction_number.operand_name for each operand that appears
17461 // // in the replacement instruction's match rule
17462 //
17463 // ---------VM FLAGS---------------------------------------------------------
17464 //
17465 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17466 //
17467 // Each peephole rule is given an identifying number starting with zero and
17468 // increasing by one in the order seen by the parser.  An individual peephole
17469 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17470 // on the command-line.
17471 //
17472 // ---------CURRENT LIMITATIONS----------------------------------------------
17473 //
17474 // Only match adjacent instructions in same basic block
17475 // Only equality constraints
17476 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17477 // Only one replacement instruction
17478 //
17479 // ---------EXAMPLE----------------------------------------------------------
17480 //
17481 // // pertinent parts of existing instructions in architecture description
17482 // instruct movI(iRegINoSp dst, iRegI src)
17483 // %{
17484 //   match(Set dst (CopyI src));
17485 // %}
17486 //
17487 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17488 // %{
17489 //   match(Set dst (AddI dst src));
17490 //   effect(KILL cr);
17491 // %}
17492 //
17493 // // Change (inc mov) to lea
17494 // peephole %{
17495 //   // increment preceded by register-register move
17496 //   peepmatch ( incI_iReg movI );
17497 //   // require that the destination register of the increment
17498 //   // match the destination register of the move
17499 //   peepconstraint ( 0.dst == 1.dst );
17500 //   // construct a replacement instruction that sets
17501 //   // the destination to ( move's source register + one )
17502 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17503 // %}
17504 //
17505 
17506 // Implementation no longer uses movX instructions since
17507 // machine-independent system no longer uses CopyX nodes.
17508 //
17509 // peephole
17510 // %{
17511 //   peepmatch (incI_iReg movI);
17512 //   peepconstraint (0.dst == 1.dst);
17513 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17514 // %}
17515 
17516 // peephole
17517 // %{
17518 //   peepmatch (decI_iReg movI);
17519 //   peepconstraint (0.dst == 1.dst);
17520 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17521 // %}
17522 
17523 // peephole
17524 // %{
17525 //   peepmatch (addI_iReg_imm movI);
17526 //   peepconstraint (0.dst == 1.dst);
17527 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17528 // %}
17529 
17530 // peephole
17531 // %{
17532 //   peepmatch (incL_iReg movL);
17533 //   peepconstraint (0.dst == 1.dst);
17534 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17535 // %}
17536 
17537 // peephole
17538 // %{
17539 //   peepmatch (decL_iReg movL);
17540 //   peepconstraint (0.dst == 1.dst);
17541 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17542 // %}
17543 
17544 // peephole
17545 // %{
17546 //   peepmatch (addL_iReg_imm movL);
17547 //   peepconstraint (0.dst == 1.dst);
17548 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17549 // %}
17550 
17551 // peephole
17552 // %{
17553 //   peepmatch (addP_iReg_imm movP);
17554 //   peepconstraint (0.dst == 1.dst);
17555 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17556 // %}
17557 
17558 // // Change load of spilled value to only a spill
17559 // instruct storeI(memory mem, iRegI src)
17560 // %{
17561 //   match(Set mem (StoreI mem src));
17562 // %}
17563 //
17564 // instruct loadI(iRegINoSp dst, memory mem)
17565 // %{
17566 //   match(Set dst (LoadI mem));
17567 // %}
17568 //
17569 
17570 //----------SMARTSPILL RULES---------------------------------------------------
17571 // These must follow all instruction definitions as they use the names
17572 // defined in the instructions definitions.
17573 
17574 // Local Variables:
17575 // mode: c++
17576 // End: