1 //
    2 // Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // architecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182 
  183   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  184   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  185   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  186   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  187 
  188   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  189   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  190   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  191   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  192 
  193   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  194   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  195   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  196   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  197 
  198   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  199   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  200   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  201   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  202 
  203   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  204   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  205   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  206   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  207 
  208   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  209   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  210   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  211   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  212 
  213   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  214   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  215   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  216   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  217 
  218   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  219   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  220   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  221   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  222 
  223   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  224   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  225   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  226   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  227 
  228   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  229   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  230   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  231   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  232 
  233   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  234   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  235   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  236   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  237 
  238   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  239   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  240   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  241   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  242 
  243   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  244   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  245   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  246   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  247 
  248   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  249   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  250   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  251   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  252 
  253   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  254   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  255   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  256   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  257 
  258   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  259   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  260   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  261   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  262 
  263   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  264   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  265   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  266   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  267 
  268   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  269   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  270   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  271   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  272 
  273   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  274   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  275   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  276   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  277 
  278   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  279   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  280   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  281   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  282 
  283   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  284   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  285   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  286   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  287 
  288   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  289   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  290   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  291   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  292 
  293   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  294   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  295   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  296   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  297 
  298   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  299   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  300   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  301   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  302 
  303   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  304   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  305   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  306   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  307 
  308   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  309   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  310   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  311   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  312 
  313   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  314   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  315   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  316   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  317 
  318   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  319   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  320   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  321   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  322 
  323   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  324   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  325   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  326   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  327 
  328   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  329   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  330   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  331   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  332 
  333   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  334   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  335   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  336   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  337 
  338 // ----------------------------
  339 // SVE Predicate Registers
  340 // ----------------------------
  341   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  342   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  343   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  344   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  345   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  346   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  347   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  348   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  349   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  350   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  351   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  352   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  353   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  354   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  355   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  356   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  357 
  358 // ----------------------------
  359 // Special Registers
  360 // ----------------------------
  361 
  362 // the AArch64 CSPR status flag register is not directly accessible as
  363 // instruction operand. the FPSR status flag register is a system
  364 // register which can be written/read using MSR/MRS but again does not
  365 // appear as an operand (a code identifying the FSPR occurs as an
  366 // immediate value in the instruction).
  367 
  368 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  369 
  370 // Specify priority of register selection within phases of register
  371 // allocation.  Highest priority is first.  A useful heuristic is to
  372 // give registers a low priority when they are required by machine
  373 // instructions, like EAX and EDX on I486, and choose no-save registers
  374 // before save-on-call, & save-on-call before save-on-entry.  Registers
  375 // which participate in fixed calling sequences should come last.
  376 // Registers which are used as pairs must fall on an even boundary.
  377 
  378 alloc_class chunk0(
  379     // volatiles
  380     R10, R10_H,
  381     R11, R11_H,
  382     R12, R12_H,
  383     R13, R13_H,
  384     R14, R14_H,
  385     R15, R15_H,
  386     R16, R16_H,
  387     R17, R17_H,
  388     R18, R18_H,
  389 
  390     // arg registers
  391     R0, R0_H,
  392     R1, R1_H,
  393     R2, R2_H,
  394     R3, R3_H,
  395     R4, R4_H,
  396     R5, R5_H,
  397     R6, R6_H,
  398     R7, R7_H,
  399 
  400     // non-volatiles
  401     R19, R19_H,
  402     R20, R20_H,
  403     R21, R21_H,
  404     R22, R22_H,
  405     R23, R23_H,
  406     R24, R24_H,
  407     R25, R25_H,
  408     R26, R26_H,
  409 
  410     // non-allocatable registers
  411 
  412     R27, R27_H, // heapbase
  413     R28, R28_H, // thread
  414     R29, R29_H, // fp
  415     R30, R30_H, // lr
  416     R31, R31_H, // sp
  417     R8, R8_H,   // rscratch1
  418     R9, R9_H,   // rscratch2
  419 );
  420 
  421 alloc_class chunk1(
  422 
  423     // no save
  424     V16, V16_H, V16_J, V16_K,
  425     V17, V17_H, V17_J, V17_K,
  426     V18, V18_H, V18_J, V18_K,
  427     V19, V19_H, V19_J, V19_K,
  428     V20, V20_H, V20_J, V20_K,
  429     V21, V21_H, V21_J, V21_K,
  430     V22, V22_H, V22_J, V22_K,
  431     V23, V23_H, V23_J, V23_K,
  432     V24, V24_H, V24_J, V24_K,
  433     V25, V25_H, V25_J, V25_K,
  434     V26, V26_H, V26_J, V26_K,
  435     V27, V27_H, V27_J, V27_K,
  436     V28, V28_H, V28_J, V28_K,
  437     V29, V29_H, V29_J, V29_K,
  438     V30, V30_H, V30_J, V30_K,
  439     V31, V31_H, V31_J, V31_K,
  440 
  441     // arg registers
  442     V0, V0_H, V0_J, V0_K,
  443     V1, V1_H, V1_J, V1_K,
  444     V2, V2_H, V2_J, V2_K,
  445     V3, V3_H, V3_J, V3_K,
  446     V4, V4_H, V4_J, V4_K,
  447     V5, V5_H, V5_J, V5_K,
  448     V6, V6_H, V6_J, V6_K,
  449     V7, V7_H, V7_J, V7_K,
  450 
  451     // non-volatiles
  452     V8, V8_H, V8_J, V8_K,
  453     V9, V9_H, V9_J, V9_K,
  454     V10, V10_H, V10_J, V10_K,
  455     V11, V11_H, V11_J, V11_K,
  456     V12, V12_H, V12_J, V12_K,
  457     V13, V13_H, V13_J, V13_K,
  458     V14, V14_H, V14_J, V14_K,
  459     V15, V15_H, V15_J, V15_K,
  460 );
  461 
  462 alloc_class chunk2 (
  463     // Governing predicates for load/store and arithmetic
  464     P0,
  465     P1,
  466     P2,
  467     P3,
  468     P4,
  469     P5,
  470     P6,
  471 
  472     // Extra predicates
  473     P8,
  474     P9,
  475     P10,
  476     P11,
  477     P12,
  478     P13,
  479     P14,
  480     P15,
  481 
  482     // Preserved for all-true predicate
  483     P7,
  484 );
  485 
  486 alloc_class chunk3(RFLAGS);
  487 
  488 //----------Architecture Description Register Classes--------------------------
  489 // Several register classes are automatically defined based upon information in
  490 // this architecture description.
  491 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  492 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  493 //
  494 
  495 // Class for all 32 bit general purpose registers
  496 reg_class all_reg32(
  497     R0,
  498     R1,
  499     R2,
  500     R3,
  501     R4,
  502     R5,
  503     R6,
  504     R7,
  505     R10,
  506     R11,
  507     R12,
  508     R13,
  509     R14,
  510     R15,
  511     R16,
  512     R17,
  513     R18,
  514     R19,
  515     R20,
  516     R21,
  517     R22,
  518     R23,
  519     R24,
  520     R25,
  521     R26,
  522     R27,
  523     R28,
  524     R29,
  525     R30,
  526     R31
  527 );
  528 
  529 
  530 // Class for all 32 bit integer registers (excluding SP which
  531 // will never be used as an integer register)
  532 reg_class any_reg32 %{
  533   return _ANY_REG32_mask;
  534 %}
  535 
  536 // Singleton class for R0 int register
  537 reg_class int_r0_reg(R0);
  538 
  539 // Singleton class for R2 int register
  540 reg_class int_r2_reg(R2);
  541 
  542 // Singleton class for R3 int register
  543 reg_class int_r3_reg(R3);
  544 
  545 // Singleton class for R4 int register
  546 reg_class int_r4_reg(R4);
  547 
  548 // Singleton class for R31 int register
  549 reg_class int_r31_reg(R31);
  550 
  551 // Class for all 64 bit general purpose registers
  552 reg_class all_reg(
  553     R0, R0_H,
  554     R1, R1_H,
  555     R2, R2_H,
  556     R3, R3_H,
  557     R4, R4_H,
  558     R5, R5_H,
  559     R6, R6_H,
  560     R7, R7_H,
  561     R10, R10_H,
  562     R11, R11_H,
  563     R12, R12_H,
  564     R13, R13_H,
  565     R14, R14_H,
  566     R15, R15_H,
  567     R16, R16_H,
  568     R17, R17_H,
  569     R18, R18_H,
  570     R19, R19_H,
  571     R20, R20_H,
  572     R21, R21_H,
  573     R22, R22_H,
  574     R23, R23_H,
  575     R24, R24_H,
  576     R25, R25_H,
  577     R26, R26_H,
  578     R27, R27_H,
  579     R28, R28_H,
  580     R29, R29_H,
  581     R30, R30_H,
  582     R31, R31_H
  583 );
  584 
  585 // Class for all long integer registers (including SP)
  586 reg_class any_reg %{
  587   return _ANY_REG_mask;
  588 %}
  589 
  590 // Class for non-allocatable 32 bit registers
  591 reg_class non_allocatable_reg32(
  592 #ifdef R18_RESERVED
  593     // See comment in register_aarch64.hpp
  594     R18,                        // tls on Windows
  595 #endif
  596     R28,                        // thread
  597     R30,                        // lr
  598     R31                         // sp
  599 );
  600 
  601 // Class for non-allocatable 64 bit registers
  602 reg_class non_allocatable_reg(
  603 #ifdef R18_RESERVED
  604     // See comment in register_aarch64.hpp
  605     R18, R18_H,                 // tls on Windows, platform register on macOS
  606 #endif
  607     R28, R28_H,                 // thread
  608     R30, R30_H,                 // lr
  609     R31, R31_H                  // sp
  610 );
  611 
  612 // Class for all non-special integer registers
  613 reg_class no_special_reg32 %{
  614   return _NO_SPECIAL_REG32_mask;
  615 %}
  616 
  617 // Class for all non-special long integer registers
  618 reg_class no_special_reg %{
  619   return _NO_SPECIAL_REG_mask;
  620 %}
  621 
  622 // Class for 64 bit register r0
  623 reg_class r0_reg(
  624     R0, R0_H
  625 );
  626 
  627 // Class for 64 bit register r1
  628 reg_class r1_reg(
  629     R1, R1_H
  630 );
  631 
  632 // Class for 64 bit register r2
  633 reg_class r2_reg(
  634     R2, R2_H
  635 );
  636 
  637 // Class for 64 bit register r3
  638 reg_class r3_reg(
  639     R3, R3_H
  640 );
  641 
  642 // Class for 64 bit register r4
  643 reg_class r4_reg(
  644     R4, R4_H
  645 );
  646 
  647 // Class for 64 bit register r5
  648 reg_class r5_reg(
  649     R5, R5_H
  650 );
  651 
  652 // Class for 64 bit register r10
  653 reg_class r10_reg(
  654     R10, R10_H
  655 );
  656 
  657 // Class for 64 bit register r11
  658 reg_class r11_reg(
  659     R11, R11_H
  660 );
  661 
  662 // Class for method register
  663 reg_class method_reg(
  664     R12, R12_H
  665 );
  666 
  667 // Class for thread register
  668 reg_class thread_reg(
  669     R28, R28_H
  670 );
  671 
  672 // Class for frame pointer register
  673 reg_class fp_reg(
  674     R29, R29_H
  675 );
  676 
  677 // Class for link register
  678 reg_class lr_reg(
  679     R30, R30_H
  680 );
  681 
  682 // Class for long sp register
  683 reg_class sp_reg(
  684   R31, R31_H
  685 );
  686 
  687 // Class for all pointer registers
  688 reg_class ptr_reg %{
  689   return _PTR_REG_mask;
  690 %}
  691 
  692 // Class for all non_special pointer registers
  693 reg_class no_special_ptr_reg %{
  694   return _NO_SPECIAL_PTR_REG_mask;
  695 %}
  696 
  697 // Class for all non_special pointer registers (excluding rfp)
  698 reg_class no_special_no_rfp_ptr_reg %{
  699   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  700 %}
  701 
  702 // Class for all float registers
  703 reg_class float_reg(
  704     V0,
  705     V1,
  706     V2,
  707     V3,
  708     V4,
  709     V5,
  710     V6,
  711     V7,
  712     V8,
  713     V9,
  714     V10,
  715     V11,
  716     V12,
  717     V13,
  718     V14,
  719     V15,
  720     V16,
  721     V17,
  722     V18,
  723     V19,
  724     V20,
  725     V21,
  726     V22,
  727     V23,
  728     V24,
  729     V25,
  730     V26,
  731     V27,
  732     V28,
  733     V29,
  734     V30,
  735     V31
  736 );
  737 
  738 // Double precision float registers have virtual `high halves' that
  739 // are needed by the allocator.
  740 // Class for all double registers
  741 reg_class double_reg(
  742     V0, V0_H,
  743     V1, V1_H,
  744     V2, V2_H,
  745     V3, V3_H,
  746     V4, V4_H,
  747     V5, V5_H,
  748     V6, V6_H,
  749     V7, V7_H,
  750     V8, V8_H,
  751     V9, V9_H,
  752     V10, V10_H,
  753     V11, V11_H,
  754     V12, V12_H,
  755     V13, V13_H,
  756     V14, V14_H,
  757     V15, V15_H,
  758     V16, V16_H,
  759     V17, V17_H,
  760     V18, V18_H,
  761     V19, V19_H,
  762     V20, V20_H,
  763     V21, V21_H,
  764     V22, V22_H,
  765     V23, V23_H,
  766     V24, V24_H,
  767     V25, V25_H,
  768     V26, V26_H,
  769     V27, V27_H,
  770     V28, V28_H,
  771     V29, V29_H,
  772     V30, V30_H,
  773     V31, V31_H
  774 );
  775 
  776 // Class for all SVE vector registers.
  777 reg_class vectora_reg (
  778     V0, V0_H, V0_J, V0_K,
  779     V1, V1_H, V1_J, V1_K,
  780     V2, V2_H, V2_J, V2_K,
  781     V3, V3_H, V3_J, V3_K,
  782     V4, V4_H, V4_J, V4_K,
  783     V5, V5_H, V5_J, V5_K,
  784     V6, V6_H, V6_J, V6_K,
  785     V7, V7_H, V7_J, V7_K,
  786     V8, V8_H, V8_J, V8_K,
  787     V9, V9_H, V9_J, V9_K,
  788     V10, V10_H, V10_J, V10_K,
  789     V11, V11_H, V11_J, V11_K,
  790     V12, V12_H, V12_J, V12_K,
  791     V13, V13_H, V13_J, V13_K,
  792     V14, V14_H, V14_J, V14_K,
  793     V15, V15_H, V15_J, V15_K,
  794     V16, V16_H, V16_J, V16_K,
  795     V17, V17_H, V17_J, V17_K,
  796     V18, V18_H, V18_J, V18_K,
  797     V19, V19_H, V19_J, V19_K,
  798     V20, V20_H, V20_J, V20_K,
  799     V21, V21_H, V21_J, V21_K,
  800     V22, V22_H, V22_J, V22_K,
  801     V23, V23_H, V23_J, V23_K,
  802     V24, V24_H, V24_J, V24_K,
  803     V25, V25_H, V25_J, V25_K,
  804     V26, V26_H, V26_J, V26_K,
  805     V27, V27_H, V27_J, V27_K,
  806     V28, V28_H, V28_J, V28_K,
  807     V29, V29_H, V29_J, V29_K,
  808     V30, V30_H, V30_J, V30_K,
  809     V31, V31_H, V31_J, V31_K,
  810 );
  811 
  812 // Class for all 64bit vector registers
  813 reg_class vectord_reg(
  814     V0, V0_H,
  815     V1, V1_H,
  816     V2, V2_H,
  817     V3, V3_H,
  818     V4, V4_H,
  819     V5, V5_H,
  820     V6, V6_H,
  821     V7, V7_H,
  822     V8, V8_H,
  823     V9, V9_H,
  824     V10, V10_H,
  825     V11, V11_H,
  826     V12, V12_H,
  827     V13, V13_H,
  828     V14, V14_H,
  829     V15, V15_H,
  830     V16, V16_H,
  831     V17, V17_H,
  832     V18, V18_H,
  833     V19, V19_H,
  834     V20, V20_H,
  835     V21, V21_H,
  836     V22, V22_H,
  837     V23, V23_H,
  838     V24, V24_H,
  839     V25, V25_H,
  840     V26, V26_H,
  841     V27, V27_H,
  842     V28, V28_H,
  843     V29, V29_H,
  844     V30, V30_H,
  845     V31, V31_H
  846 );
  847 
  848 // Class for all 128bit vector registers
  849 reg_class vectorx_reg(
  850     V0, V0_H, V0_J, V0_K,
  851     V1, V1_H, V1_J, V1_K,
  852     V2, V2_H, V2_J, V2_K,
  853     V3, V3_H, V3_J, V3_K,
  854     V4, V4_H, V4_J, V4_K,
  855     V5, V5_H, V5_J, V5_K,
  856     V6, V6_H, V6_J, V6_K,
  857     V7, V7_H, V7_J, V7_K,
  858     V8, V8_H, V8_J, V8_K,
  859     V9, V9_H, V9_J, V9_K,
  860     V10, V10_H, V10_J, V10_K,
  861     V11, V11_H, V11_J, V11_K,
  862     V12, V12_H, V12_J, V12_K,
  863     V13, V13_H, V13_J, V13_K,
  864     V14, V14_H, V14_J, V14_K,
  865     V15, V15_H, V15_J, V15_K,
  866     V16, V16_H, V16_J, V16_K,
  867     V17, V17_H, V17_J, V17_K,
  868     V18, V18_H, V18_J, V18_K,
  869     V19, V19_H, V19_J, V19_K,
  870     V20, V20_H, V20_J, V20_K,
  871     V21, V21_H, V21_J, V21_K,
  872     V22, V22_H, V22_J, V22_K,
  873     V23, V23_H, V23_J, V23_K,
  874     V24, V24_H, V24_J, V24_K,
  875     V25, V25_H, V25_J, V25_K,
  876     V26, V26_H, V26_J, V26_K,
  877     V27, V27_H, V27_J, V27_K,
  878     V28, V28_H, V28_J, V28_K,
  879     V29, V29_H, V29_J, V29_K,
  880     V30, V30_H, V30_J, V30_K,
  881     V31, V31_H, V31_J, V31_K
  882 );
  883 
  884 // Class for 128 bit register v0
  885 reg_class v0_reg(
  886     V0, V0_H
  887 );
  888 
  889 // Class for 128 bit register v1
  890 reg_class v1_reg(
  891     V1, V1_H
  892 );
  893 
  894 // Class for 128 bit register v2
  895 reg_class v2_reg(
  896     V2, V2_H
  897 );
  898 
  899 // Class for 128 bit register v3
  900 reg_class v3_reg(
  901     V3, V3_H
  902 );
  903 
  904 // Class for 128 bit register v4
  905 reg_class v4_reg(
  906     V4, V4_H
  907 );
  908 
  909 // Class for 128 bit register v5
  910 reg_class v5_reg(
  911     V5, V5_H
  912 );
  913 
  914 // Class for 128 bit register v6
  915 reg_class v6_reg(
  916     V6, V6_H
  917 );
  918 
  919 // Class for 128 bit register v7
  920 reg_class v7_reg(
  921     V7, V7_H
  922 );
  923 
  924 // Class for 128 bit register v8
  925 reg_class v8_reg(
  926     V8, V8_H
  927 );
  928 
  929 // Class for 128 bit register v9
  930 reg_class v9_reg(
  931     V9, V9_H
  932 );
  933 
  934 // Class for 128 bit register v10
  935 reg_class v10_reg(
  936     V10, V10_H
  937 );
  938 
  939 // Class for 128 bit register v11
  940 reg_class v11_reg(
  941     V11, V11_H
  942 );
  943 
  944 // Class for 128 bit register v12
  945 reg_class v12_reg(
  946     V12, V12_H
  947 );
  948 
  949 // Class for 128 bit register v13
  950 reg_class v13_reg(
  951     V13, V13_H
  952 );
  953 
  954 // Class for 128 bit register v14
  955 reg_class v14_reg(
  956     V14, V14_H
  957 );
  958 
  959 // Class for 128 bit register v15
  960 reg_class v15_reg(
  961     V15, V15_H
  962 );
  963 
  964 // Class for 128 bit register v16
  965 reg_class v16_reg(
  966     V16, V16_H
  967 );
  968 
  969 // Class for 128 bit register v17
  970 reg_class v17_reg(
  971     V17, V17_H
  972 );
  973 
  974 // Class for 128 bit register v18
  975 reg_class v18_reg(
  976     V18, V18_H
  977 );
  978 
  979 // Class for 128 bit register v19
  980 reg_class v19_reg(
  981     V19, V19_H
  982 );
  983 
  984 // Class for 128 bit register v20
  985 reg_class v20_reg(
  986     V20, V20_H
  987 );
  988 
  989 // Class for 128 bit register v21
  990 reg_class v21_reg(
  991     V21, V21_H
  992 );
  993 
  994 // Class for 128 bit register v22
  995 reg_class v22_reg(
  996     V22, V22_H
  997 );
  998 
  999 // Class for 128 bit register v23
 1000 reg_class v23_reg(
 1001     V23, V23_H
 1002 );
 1003 
 1004 // Class for 128 bit register v24
 1005 reg_class v24_reg(
 1006     V24, V24_H
 1007 );
 1008 
 1009 // Class for 128 bit register v25
 1010 reg_class v25_reg(
 1011     V25, V25_H
 1012 );
 1013 
 1014 // Class for 128 bit register v26
 1015 reg_class v26_reg(
 1016     V26, V26_H
 1017 );
 1018 
 1019 // Class for 128 bit register v27
 1020 reg_class v27_reg(
 1021     V27, V27_H
 1022 );
 1023 
 1024 // Class for 128 bit register v28
 1025 reg_class v28_reg(
 1026     V28, V28_H
 1027 );
 1028 
 1029 // Class for 128 bit register v29
 1030 reg_class v29_reg(
 1031     V29, V29_H
 1032 );
 1033 
 1034 // Class for 128 bit register v30
 1035 reg_class v30_reg(
 1036     V30, V30_H
 1037 );
 1038 
 1039 // Class for 128 bit register v31
 1040 reg_class v31_reg(
 1041     V31, V31_H
 1042 );
 1043 
 1044 // Class for all SVE predicate registers.
 1045 reg_class pr_reg (
 1046     P0,
 1047     P1,
 1048     P2,
 1049     P3,
 1050     P4,
 1051     P5,
 1052     P6,
 1053     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1054     P8,
 1055     P9,
 1056     P10,
 1057     P11,
 1058     P12,
 1059     P13,
 1060     P14,
 1061     P15
 1062 );
 1063 
 1064 // Class for SVE governing predicate registers, which are used
 1065 // to determine the active elements of a predicated instruction.
 1066 reg_class gov_pr (
 1067     P0,
 1068     P1,
 1069     P2,
 1070     P3,
 1071     P4,
 1072     P5,
 1073     P6,
 1074     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1075 );
 1076 
 1077 reg_class p0_reg(P0);
 1078 reg_class p1_reg(P1);
 1079 
 1080 // Singleton class for condition codes
 1081 reg_class int_flags(RFLAGS);
 1082 
 1083 %}
 1084 
 1085 //----------DEFINITION BLOCK---------------------------------------------------
 1086 // Define name --> value mappings to inform the ADLC of an integer valued name
 1087 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1088 // Format:
 1089 //        int_def  <name>         ( <int_value>, <expression>);
 1090 // Generated Code in ad_<arch>.hpp
 1091 //        #define  <name>   (<expression>)
 1092 //        // value == <int_value>
 1093 // Generated code in ad_<arch>.cpp adlc_verification()
 1094 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1095 //
 1096 
 1097 // we follow the ppc-aix port in using a simple cost model which ranks
 1098 // register operations as cheap, memory ops as more expensive and
 1099 // branches as most expensive. the first two have a low as well as a
 1100 // normal cost. huge cost appears to be a way of saying don't do
 1101 // something
 1102 
 1103 definitions %{
 1104   // The default cost (of a register move instruction).
 1105   int_def INSN_COST            (    100,     100);
 1106   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1107   int_def CALL_COST            (    200,     2 * INSN_COST);
 1108   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1109 %}
 1110 
 1111 
 1112 //----------SOURCE BLOCK-------------------------------------------------------
 1113 // This is a block of C++ code which provides values, functions, and
 1114 // definitions necessary in the rest of the architecture description
 1115 
 1116 source_hpp %{
 1117 
 1118 #include "asm/macroAssembler.hpp"
 1119 #include "gc/shared/barrierSetAssembler.hpp"
 1120 #include "gc/shared/cardTable.hpp"
 1121 #include "gc/shared/cardTableBarrierSet.hpp"
 1122 #include "gc/shared/collectedHeap.hpp"
 1123 #include "opto/addnode.hpp"
 1124 #include "opto/convertnode.hpp"
 1125 #include "runtime/objectMonitor.hpp"
 1126 
 1127 extern RegMask _ANY_REG32_mask;
 1128 extern RegMask _ANY_REG_mask;
 1129 extern RegMask _PTR_REG_mask;
 1130 extern RegMask _NO_SPECIAL_REG32_mask;
 1131 extern RegMask _NO_SPECIAL_REG_mask;
 1132 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1133 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1134 
 1135 class CallStubImpl {
 1136 
 1137   //--------------------------------------------------------------
 1138   //---<  Used for optimization in Compile::shorten_branches  >---
 1139   //--------------------------------------------------------------
 1140 
 1141  public:
 1142   // Size of call trampoline stub.
 1143   static uint size_call_trampoline() {
 1144     return 0; // no call trampolines on this platform
 1145   }
 1146 
 1147   // number of relocations needed by a call trampoline stub
 1148   static uint reloc_call_trampoline() {
 1149     return 0; // no call trampolines on this platform
 1150   }
 1151 };
 1152 
 1153 class HandlerImpl {
 1154 
 1155  public:
 1156 
 1157   static int emit_exception_handler(CodeBuffer &cbuf);
 1158   static int emit_deopt_handler(CodeBuffer& cbuf);
 1159 
 1160   static uint size_exception_handler() {
 1161     return MacroAssembler::far_codestub_branch_size();
 1162   }
 1163 
 1164   static uint size_deopt_handler() {
 1165     // count one adr and one far branch instruction
 1166     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1167   }
 1168 };
 1169 
 1170 class Node::PD {
 1171 public:
 1172   enum NodeFlags {
 1173     _last_flag = Node::_last_flag
 1174   };
 1175 };
 1176 
 1177   bool is_CAS(int opcode, bool maybe_volatile);
 1178 
 1179   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1180 
 1181   bool unnecessary_acquire(const Node *barrier);
 1182   bool needs_acquiring_load(const Node *load);
 1183 
 1184   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1185 
 1186   bool unnecessary_release(const Node *barrier);
 1187   bool unnecessary_volatile(const Node *barrier);
 1188   bool needs_releasing_store(const Node *store);
 1189 
 1190   // predicate controlling translation of CompareAndSwapX
 1191   bool needs_acquiring_load_exclusive(const Node *load);
 1192 
 1193   // predicate controlling addressing modes
 1194   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1195 
 1196   // Convert BootTest condition to Assembler condition.
 1197   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1198   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1199 %}
 1200 
 1201 source %{
 1202 
 1203   // Derived RegMask with conditionally allocatable registers
 1204 
 1205   void PhaseOutput::pd_perform_mach_node_analysis() {
 1206   }
 1207 
 1208   int MachNode::pd_alignment_required() const {
 1209     return 1;
 1210   }
 1211 
 1212   int MachNode::compute_padding(int current_offset) const {
 1213     return 0;
 1214   }
 1215 
 1216   RegMask _ANY_REG32_mask;
 1217   RegMask _ANY_REG_mask;
 1218   RegMask _PTR_REG_mask;
 1219   RegMask _NO_SPECIAL_REG32_mask;
 1220   RegMask _NO_SPECIAL_REG_mask;
 1221   RegMask _NO_SPECIAL_PTR_REG_mask;
 1222   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1223 
 1224   void reg_mask_init() {
 1225     // We derive below RegMask(s) from the ones which are auto-generated from
 1226     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1227     // registers conditionally reserved.
 1228 
 1229     _ANY_REG32_mask = _ALL_REG32_mask;
 1230     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1231 
 1232     _ANY_REG_mask = _ALL_REG_mask;
 1233 
 1234     _PTR_REG_mask = _ALL_REG_mask;
 1235 
 1236     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1237     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1238 
 1239     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1240     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1241 
 1242     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1243     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1244 
 1245     // r27 is not allocatable when compressed oops is on and heapbase is not
 1246     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1247     if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL)) {
 1248       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1249       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1250       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1251     }
 1252 
 1253     // r29 is not allocatable when PreserveFramePointer is on
 1254     if (PreserveFramePointer) {
 1255       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1256       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1257       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1258     }
 1259 
 1260     _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1261     _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1262   }
 1263 
 1264   // Optimizaton of volatile gets and puts
 1265   // -------------------------------------
 1266   //
 1267   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1268   // use to implement volatile reads and writes. For a volatile read
 1269   // we simply need
 1270   //
 1271   //   ldar<x>
 1272   //
 1273   // and for a volatile write we need
 1274   //
 1275   //   stlr<x>
 1276   //
 1277   // Alternatively, we can implement them by pairing a normal
 1278   // load/store with a memory barrier. For a volatile read we need
 1279   //
 1280   //   ldr<x>
 1281   //   dmb ishld
 1282   //
 1283   // for a volatile write
 1284   //
 1285   //   dmb ish
 1286   //   str<x>
 1287   //   dmb ish
 1288   //
 1289   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1290   // sequences. These are normally translated to an instruction
 1291   // sequence like the following
 1292   //
 1293   //   dmb      ish
 1294   // retry:
 1295   //   ldxr<x>   rval raddr
 1296   //   cmp       rval rold
 1297   //   b.ne done
 1298   //   stlxr<x>  rval, rnew, rold
 1299   //   cbnz      rval retry
 1300   // done:
 1301   //   cset      r0, eq
 1302   //   dmb ishld
 1303   //
 1304   // Note that the exclusive store is already using an stlxr
 1305   // instruction. That is required to ensure visibility to other
 1306   // threads of the exclusive write (assuming it succeeds) before that
 1307   // of any subsequent writes.
 1308   //
 1309   // The following instruction sequence is an improvement on the above
 1310   //
 1311   // retry:
 1312   //   ldaxr<x>  rval raddr
 1313   //   cmp       rval rold
 1314   //   b.ne done
 1315   //   stlxr<x>  rval, rnew, rold
 1316   //   cbnz      rval retry
 1317   // done:
 1318   //   cset      r0, eq
 1319   //
 1320   // We don't need the leading dmb ish since the stlxr guarantees
 1321   // visibility of prior writes in the case that the swap is
 1322   // successful. Crucially we don't have to worry about the case where
 1323   // the swap is not successful since no valid program should be
 1324   // relying on visibility of prior changes by the attempting thread
 1325   // in the case where the CAS fails.
 1326   //
 1327   // Similarly, we don't need the trailing dmb ishld if we substitute
 1328   // an ldaxr instruction since that will provide all the guarantees we
 1329   // require regarding observation of changes made by other threads
 1330   // before any change to the CAS address observed by the load.
 1331   //
 1332   // In order to generate the desired instruction sequence we need to
 1333   // be able to identify specific 'signature' ideal graph node
 1334   // sequences which i) occur as a translation of a volatile reads or
 1335   // writes or CAS operations and ii) do not occur through any other
 1336   // translation or graph transformation. We can then provide
 1337   // alternative aldc matching rules which translate these node
 1338   // sequences to the desired machine code sequences. Selection of the
 1339   // alternative rules can be implemented by predicates which identify
 1340   // the relevant node sequences.
 1341   //
 1342   // The ideal graph generator translates a volatile read to the node
 1343   // sequence
 1344   //
 1345   //   LoadX[mo_acquire]
 1346   //   MemBarAcquire
 1347   //
 1348   // As a special case when using the compressed oops optimization we
 1349   // may also see this variant
 1350   //
 1351   //   LoadN[mo_acquire]
 1352   //   DecodeN
 1353   //   MemBarAcquire
 1354   //
 1355   // A volatile write is translated to the node sequence
 1356   //
 1357   //   MemBarRelease
 1358   //   StoreX[mo_release] {CardMark}-optional
 1359   //   MemBarVolatile
 1360   //
 1361   // n.b. the above node patterns are generated with a strict
 1362   // 'signature' configuration of input and output dependencies (see
 1363   // the predicates below for exact details). The card mark may be as
 1364   // simple as a few extra nodes or, in a few GC configurations, may
 1365   // include more complex control flow between the leading and
 1366   // trailing memory barriers. However, whatever the card mark
 1367   // configuration these signatures are unique to translated volatile
 1368   // reads/stores -- they will not appear as a result of any other
 1369   // bytecode translation or inlining nor as a consequence of
 1370   // optimizing transforms.
 1371   //
 1372   // We also want to catch inlined unsafe volatile gets and puts and
 1373   // be able to implement them using either ldar<x>/stlr<x> or some
 1374   // combination of ldr<x>/stlr<x> and dmb instructions.
 1375   //
 1376   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1377   // normal volatile put node sequence containing an extra cpuorder
 1378   // membar
 1379   //
 1380   //   MemBarRelease
 1381   //   MemBarCPUOrder
 1382   //   StoreX[mo_release] {CardMark}-optional
 1383   //   MemBarCPUOrder
 1384   //   MemBarVolatile
 1385   //
 1386   // n.b. as an aside, a cpuorder membar is not itself subject to
 1387   // matching and translation by adlc rules.  However, the rule
 1388   // predicates need to detect its presence in order to correctly
 1389   // select the desired adlc rules.
 1390   //
 1391   // Inlined unsafe volatile gets manifest as a slightly different
 1392   // node sequence to a normal volatile get because of the
 1393   // introduction of some CPUOrder memory barriers to bracket the
 1394   // Load. However, but the same basic skeleton of a LoadX feeding a
 1395   // MemBarAcquire, possibly through an optional DecodeN, is still
 1396   // present
 1397   //
 1398   //   MemBarCPUOrder
 1399   //        ||       \\
 1400   //   MemBarCPUOrder LoadX[mo_acquire]
 1401   //        ||            |
 1402   //        ||       {DecodeN} optional
 1403   //        ||       /
 1404   //     MemBarAcquire
 1405   //
 1406   // In this case the acquire membar does not directly depend on the
 1407   // load. However, we can be sure that the load is generated from an
 1408   // inlined unsafe volatile get if we see it dependent on this unique
 1409   // sequence of membar nodes. Similarly, given an acquire membar we
 1410   // can know that it was added because of an inlined unsafe volatile
 1411   // get if it is fed and feeds a cpuorder membar and if its feed
 1412   // membar also feeds an acquiring load.
 1413   //
 1414   // Finally an inlined (Unsafe) CAS operation is translated to the
 1415   // following ideal graph
 1416   //
 1417   //   MemBarRelease
 1418   //   MemBarCPUOrder
 1419   //   CompareAndSwapX {CardMark}-optional
 1420   //   MemBarCPUOrder
 1421   //   MemBarAcquire
 1422   //
 1423   // So, where we can identify these volatile read and write
 1424   // signatures we can choose to plant either of the above two code
 1425   // sequences. For a volatile read we can simply plant a normal
 1426   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1427   // also choose to inhibit translation of the MemBarAcquire and
 1428   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1429   //
 1430   // When we recognise a volatile store signature we can choose to
 1431   // plant at a dmb ish as a translation for the MemBarRelease, a
 1432   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1433   // Alternatively, we can inhibit translation of the MemBarRelease
 1434   // and MemBarVolatile and instead plant a simple stlr<x>
 1435   // instruction.
 1436   //
 1437   // when we recognise a CAS signature we can choose to plant a dmb
 1438   // ish as a translation for the MemBarRelease, the conventional
 1439   // macro-instruction sequence for the CompareAndSwap node (which
 1440   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1441   // Alternatively, we can elide generation of the dmb instructions
 1442   // and plant the alternative CompareAndSwap macro-instruction
 1443   // sequence (which uses ldaxr<x>).
 1444   //
 1445   // Of course, the above only applies when we see these signature
 1446   // configurations. We still want to plant dmb instructions in any
 1447   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1448   // MemBarVolatile. For example, at the end of a constructor which
 1449   // writes final/volatile fields we will see a MemBarRelease
 1450   // instruction and this needs a 'dmb ish' lest we risk the
 1451   // constructed object being visible without making the
 1452   // final/volatile field writes visible.
 1453   //
 1454   // n.b. the translation rules below which rely on detection of the
 1455   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1456   // If we see anything other than the signature configurations we
 1457   // always just translate the loads and stores to ldr<x> and str<x>
 1458   // and translate acquire, release and volatile membars to the
 1459   // relevant dmb instructions.
 1460   //
 1461 
 1462   // is_CAS(int opcode, bool maybe_volatile)
 1463   //
 1464   // return true if opcode is one of the possible CompareAndSwapX
 1465   // values otherwise false.
 1466 
 1467   bool is_CAS(int opcode, bool maybe_volatile)
 1468   {
 1469     switch(opcode) {
 1470       // We handle these
 1471     case Op_CompareAndSwapI:
 1472     case Op_CompareAndSwapL:
 1473     case Op_CompareAndSwapP:
 1474     case Op_CompareAndSwapN:
 1475     case Op_ShenandoahCompareAndSwapP:
 1476     case Op_ShenandoahCompareAndSwapN:
 1477     case Op_CompareAndSwapB:
 1478     case Op_CompareAndSwapS:
 1479     case Op_GetAndSetI:
 1480     case Op_GetAndSetL:
 1481     case Op_GetAndSetP:
 1482     case Op_GetAndSetN:
 1483     case Op_GetAndAddI:
 1484     case Op_GetAndAddL:
 1485       return true;
 1486     case Op_CompareAndExchangeI:
 1487     case Op_CompareAndExchangeN:
 1488     case Op_CompareAndExchangeB:
 1489     case Op_CompareAndExchangeS:
 1490     case Op_CompareAndExchangeL:
 1491     case Op_CompareAndExchangeP:
 1492     case Op_WeakCompareAndSwapB:
 1493     case Op_WeakCompareAndSwapS:
 1494     case Op_WeakCompareAndSwapI:
 1495     case Op_WeakCompareAndSwapL:
 1496     case Op_WeakCompareAndSwapP:
 1497     case Op_WeakCompareAndSwapN:
 1498     case Op_ShenandoahWeakCompareAndSwapP:
 1499     case Op_ShenandoahWeakCompareAndSwapN:
 1500     case Op_ShenandoahCompareAndExchangeP:
 1501     case Op_ShenandoahCompareAndExchangeN:
 1502       return maybe_volatile;
 1503     default:
 1504       return false;
 1505     }
 1506   }
 1507 
 1508   // helper to determine the maximum number of Phi nodes we may need to
 1509   // traverse when searching from a card mark membar for the merge mem
 1510   // feeding a trailing membar or vice versa
 1511 
 1512 // predicates controlling emit of ldr<x>/ldar<x>
 1513 
 1514 bool unnecessary_acquire(const Node *barrier)
 1515 {
 1516   assert(barrier->is_MemBar(), "expecting a membar");
 1517 
 1518   MemBarNode* mb = barrier->as_MemBar();
 1519 
 1520   if (mb->trailing_load()) {
 1521     return true;
 1522   }
 1523 
 1524   if (mb->trailing_load_store()) {
 1525     Node* load_store = mb->in(MemBarNode::Precedent);
 1526     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1527     return is_CAS(load_store->Opcode(), true);
 1528   }
 1529 
 1530   return false;
 1531 }
 1532 
 1533 bool needs_acquiring_load(const Node *n)
 1534 {
 1535   assert(n->is_Load(), "expecting a load");
 1536   LoadNode *ld = n->as_Load();
 1537   return ld->is_acquire();
 1538 }
 1539 
 1540 bool unnecessary_release(const Node *n)
 1541 {
 1542   assert((n->is_MemBar() &&
 1543           n->Opcode() == Op_MemBarRelease),
 1544          "expecting a release membar");
 1545 
 1546   MemBarNode *barrier = n->as_MemBar();
 1547   if (!barrier->leading()) {
 1548     return false;
 1549   } else {
 1550     Node* trailing = barrier->trailing_membar();
 1551     MemBarNode* trailing_mb = trailing->as_MemBar();
 1552     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1553     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1554 
 1555     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1556     if (mem->is_Store()) {
 1557       assert(mem->as_Store()->is_release(), "");
 1558       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1559       return true;
 1560     } else {
 1561       assert(mem->is_LoadStore(), "");
 1562       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1563       return is_CAS(mem->Opcode(), true);
 1564     }
 1565   }
 1566   return false;
 1567 }
 1568 
 1569 bool unnecessary_volatile(const Node *n)
 1570 {
 1571   // assert n->is_MemBar();
 1572   MemBarNode *mbvol = n->as_MemBar();
 1573 
 1574   bool release = mbvol->trailing_store();
 1575   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1576 #ifdef ASSERT
 1577   if (release) {
 1578     Node* leading = mbvol->leading_membar();
 1579     assert(leading->Opcode() == Op_MemBarRelease, "");
 1580     assert(leading->as_MemBar()->leading_store(), "");
 1581     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1582   }
 1583 #endif
 1584 
 1585   return release;
 1586 }
 1587 
 1588 // predicates controlling emit of str<x>/stlr<x>
 1589 
 1590 bool needs_releasing_store(const Node *n)
 1591 {
 1592   // assert n->is_Store();
 1593   StoreNode *st = n->as_Store();
 1594   return st->trailing_membar() != NULL;
 1595 }
 1596 
 1597 // predicate controlling translation of CAS
 1598 //
 1599 // returns true if CAS needs to use an acquiring load otherwise false
 1600 
 1601 bool needs_acquiring_load_exclusive(const Node *n)
 1602 {
 1603   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1604   LoadStoreNode* ldst = n->as_LoadStore();
 1605   if (is_CAS(n->Opcode(), false)) {
 1606     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
 1607   } else {
 1608     return ldst->trailing_membar() != NULL;
 1609   }
 1610 
 1611   // so we can just return true here
 1612   return true;
 1613 }
 1614 
 1615 #define __ _masm.
 1616 
 1617 // advance declarations for helper functions to convert register
 1618 // indices to register objects
 1619 
 1620 // the ad file has to provide implementations of certain methods
 1621 // expected by the generic code
 1622 //
 1623 // REQUIRED FUNCTIONALITY
 1624 
 1625 //=============================================================================
 1626 
 1627 // !!!!! Special hack to get all types of calls to specify the byte offset
 1628 //       from the start of the call to the point where the return address
 1629 //       will point.
 1630 
 1631 int MachCallStaticJavaNode::ret_addr_offset()
 1632 {
 1633   // call should be a simple bl
 1634   int off = 4;
 1635   return off;
 1636 }
 1637 
 1638 int MachCallDynamicJavaNode::ret_addr_offset()
 1639 {
 1640   return 16; // movz, movk, movk, bl
 1641 }
 1642 
 1643 int MachCallRuntimeNode::ret_addr_offset() {
 1644   // for generated stubs the call will be
 1645   //   bl(addr)
 1646   // or with far branches
 1647   //   bl(trampoline_stub)
 1648   // for real runtime callouts it will be six instructions
 1649   // see aarch64_enc_java_to_runtime
 1650   //   adr(rscratch2, retaddr)
 1651   //   lea(rscratch1, RuntimeAddress(addr)
 1652   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
 1653   //   blr(rscratch1)
 1654   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1655   if (cb) {
 1656     return 1 * NativeInstruction::instruction_size;
 1657   } else {
 1658     return 6 * NativeInstruction::instruction_size;
 1659   }
 1660 }
 1661 
 1662 //=============================================================================
 1663 
 1664 #ifndef PRODUCT
 1665 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1666   st->print("BREAKPOINT");
 1667 }
 1668 #endif
 1669 
 1670 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1671   C2_MacroAssembler _masm(&cbuf);
 1672   __ brk(0);
 1673 }
 1674 
 1675 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1676   return MachNode::size(ra_);
 1677 }
 1678 
 1679 //=============================================================================
 1680 
 1681 #ifndef PRODUCT
 1682   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1683     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1684   }
 1685 #endif
 1686 
 1687   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
 1688     C2_MacroAssembler _masm(&cbuf);
 1689     for (int i = 0; i < _count; i++) {
 1690       __ nop();
 1691     }
 1692   }
 1693 
 1694   uint MachNopNode::size(PhaseRegAlloc*) const {
 1695     return _count * NativeInstruction::instruction_size;
 1696   }
 1697 
 1698 //=============================================================================
 1699 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1700 
 1701 int ConstantTable::calculate_table_base_offset() const {
 1702   return 0;  // absolute addressing, no offset
 1703 }
 1704 
 1705 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1706 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1707   ShouldNotReachHere();
 1708 }
 1709 
 1710 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 1711   // Empty encoding
 1712 }
 1713 
 1714 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1715   return 0;
 1716 }
 1717 
 1718 #ifndef PRODUCT
 1719 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1720   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1721 }
 1722 #endif
 1723 
 1724 #ifndef PRODUCT
 1725 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1726   Compile* C = ra_->C;
 1727 
 1728   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1729 
 1730   if (C->output()->need_stack_bang(framesize))
 1731     st->print("# stack bang size=%d\n\t", framesize);
 1732 
 1733   if (VM_Version::use_rop_protection()) {
 1734     st->print("ldr  zr, [lr]\n\t");
 1735     st->print("paciaz\n\t");
 1736   }
 1737   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1738     st->print("sub  sp, sp, #%d\n\t", framesize);
 1739     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1740     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1741   } else {
 1742     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1743     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1744     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1745     st->print("sub  sp, sp, rscratch1");
 1746   }
 1747   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1748     st->print("\n\t");
 1749     st->print("ldr  rscratch1, [guard]\n\t");
 1750     st->print("dmb ishld\n\t");
 1751     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1752     st->print("cmp  rscratch1, rscratch2\n\t");
 1753     st->print("b.eq skip");
 1754     st->print("\n\t");
 1755     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1756     st->print("b skip\n\t");
 1757     st->print("guard: int\n\t");
 1758     st->print("\n\t");
 1759     st->print("skip:\n\t");
 1760   }
 1761 }
 1762 #endif
 1763 
 1764 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1765   Compile* C = ra_->C;
 1766   C2_MacroAssembler _masm(&cbuf);
 1767 
 1768   // n.b. frame size includes space for return pc and rfp
 1769   const int framesize = C->output()->frame_size_in_bytes();
 1770 
 1771   // insert a nop at the start of the prolog so we can patch in a
 1772   // branch if we need to invalidate the method later
 1773   __ nop();
 1774 
 1775   if (C->clinit_barrier_on_entry()) {
 1776     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1777 
 1778     Label L_skip_barrier;
 1779 
 1780     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1781     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1782     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1783     __ bind(L_skip_barrier);
 1784   }
 1785 
 1786   if (C->max_vector_size() > 0) {
 1787     __ reinitialize_ptrue();
 1788   }
 1789 
 1790   int bangsize = C->output()->bang_size_in_bytes();
 1791   if (C->output()->need_stack_bang(bangsize))
 1792     __ generate_stack_overflow_check(bangsize);
 1793 
 1794   __ build_frame(framesize);
 1795 
 1796   if (C->stub_function() == NULL) {
 1797     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1798     if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1799       // Dummy labels for just measuring the code size
 1800       Label dummy_slow_path;
 1801       Label dummy_continuation;
 1802       Label dummy_guard;
 1803       Label* slow_path = &dummy_slow_path;
 1804       Label* continuation = &dummy_continuation;
 1805       Label* guard = &dummy_guard;
 1806       if (!Compile::current()->output()->in_scratch_emit_size()) {
 1807         // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1808         C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1809         Compile::current()->output()->add_stub(stub);
 1810         slow_path = &stub->entry();
 1811         continuation = &stub->continuation();
 1812         guard = &stub->guard();
 1813       }
 1814       // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1815       bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
 1816     }
 1817   }
 1818 
 1819   if (VerifyStackAtCalls) {
 1820     Unimplemented();
 1821   }
 1822 
 1823   C->output()->set_frame_complete(cbuf.insts_size());
 1824 
 1825   if (C->has_mach_constant_base_node()) {
 1826     // NOTE: We set the table base offset here because users might be
 1827     // emitted before MachConstantBaseNode.
 1828     ConstantTable& constant_table = C->output()->constant_table();
 1829     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1830   }
 1831 }
 1832 
 1833 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1834 {
 1835   return MachNode::size(ra_); // too many variables; just compute it
 1836                               // the hard way
 1837 }
 1838 
 1839 int MachPrologNode::reloc() const
 1840 {
 1841   return 0;
 1842 }
 1843 
 1844 //=============================================================================
 1845 
 1846 #ifndef PRODUCT
 1847 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1848   Compile* C = ra_->C;
 1849   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1850 
 1851   st->print("# pop frame %d\n\t",framesize);
 1852 
 1853   if (framesize == 0) {
 1854     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1855   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1856     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1857     st->print("add  sp, sp, #%d\n\t", framesize);
 1858   } else {
 1859     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1860     st->print("add  sp, sp, rscratch1\n\t");
 1861     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1862   }
 1863   if (VM_Version::use_rop_protection()) {
 1864     st->print("autiaz\n\t");
 1865     st->print("ldr  zr, [lr]\n\t");
 1866   }
 1867 
 1868   if (do_polling() && C->is_method_compilation()) {
 1869     st->print("# test polling word\n\t");
 1870     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1871     st->print("cmp  sp, rscratch1\n\t");
 1872     st->print("bhi #slow_path");
 1873   }
 1874 }
 1875 #endif
 1876 
 1877 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1878   Compile* C = ra_->C;
 1879   C2_MacroAssembler _masm(&cbuf);
 1880   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1881 
 1882   __ remove_frame(framesize);
 1883 
 1884   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1885     __ reserved_stack_check();
 1886   }
 1887 
 1888   if (do_polling() && C->is_method_compilation()) {
 1889     Label dummy_label;
 1890     Label* code_stub = &dummy_label;
 1891     if (!C->output()->in_scratch_emit_size()) {
 1892       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1893       C->output()->add_stub(stub);
 1894       code_stub = &stub->entry();
 1895     }
 1896     __ relocate(relocInfo::poll_return_type);
 1897     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1898   }
 1899 }
 1900 
 1901 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1902   // Variable size. Determine dynamically.
 1903   return MachNode::size(ra_);
 1904 }
 1905 
 1906 int MachEpilogNode::reloc() const {
 1907   // Return number of relocatable values contained in this instruction.
 1908   return 1; // 1 for polling page.
 1909 }
 1910 
 1911 const Pipeline * MachEpilogNode::pipeline() const {
 1912   return MachNode::pipeline_class();
 1913 }
 1914 
 1915 //=============================================================================
 1916 
 1917 // Figure out which register class each belongs in: rc_int, rc_float or
 1918 // rc_stack.
 1919 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 1920 
 1921 static enum RC rc_class(OptoReg::Name reg) {
 1922 
 1923   if (reg == OptoReg::Bad) {
 1924     return rc_bad;
 1925   }
 1926 
 1927   // we have 32 int registers * 2 halves
 1928   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1929 
 1930   if (reg < slots_of_int_registers) {
 1931     return rc_int;
 1932   }
 1933 
 1934   // we have 32 float register * 8 halves
 1935   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1936   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1937     return rc_float;
 1938   }
 1939 
 1940   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1941   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1942     return rc_predicate;
 1943   }
 1944 
 1945   // Between predicate regs & stack is the flags.
 1946   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1947 
 1948   return rc_stack;
 1949 }
 1950 
 1951 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1952   Compile* C = ra_->C;
 1953 
 1954   // Get registers to move.
 1955   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1956   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1957   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1958   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1959 
 1960   enum RC src_hi_rc = rc_class(src_hi);
 1961   enum RC src_lo_rc = rc_class(src_lo);
 1962   enum RC dst_hi_rc = rc_class(dst_hi);
 1963   enum RC dst_lo_rc = rc_class(dst_lo);
 1964 
 1965   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1966 
 1967   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1968     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1969            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1970            "expected aligned-adjacent pairs");
 1971   }
 1972 
 1973   if (src_lo == dst_lo && src_hi == dst_hi) {
 1974     return 0;            // Self copy, no move.
 1975   }
 1976 
 1977   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1978               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1979   int src_offset = ra_->reg2offset(src_lo);
 1980   int dst_offset = ra_->reg2offset(dst_lo);
 1981 
 1982   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1983     uint ireg = ideal_reg();
 1984     if (ireg == Op_VecA && cbuf) {
 1985       C2_MacroAssembler _masm(cbuf);
 1986       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1987       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1988         // stack->stack
 1989         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 1990                                                 sve_vector_reg_size_in_bytes);
 1991       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1992         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 1993                             sve_vector_reg_size_in_bytes);
 1994       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 1995         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 1996                               sve_vector_reg_size_in_bytes);
 1997       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1998         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1999                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 2000                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 2001       } else {
 2002         ShouldNotReachHere();
 2003       }
 2004     } else if (cbuf) {
 2005       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2006       C2_MacroAssembler _masm(cbuf);
 2007       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2008       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2009         // stack->stack
 2010         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2011         if (ireg == Op_VecD) {
 2012           __ unspill(rscratch1, true, src_offset);
 2013           __ spill(rscratch1, true, dst_offset);
 2014         } else {
 2015           __ spill_copy128(src_offset, dst_offset);
 2016         }
 2017       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2018         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2019                ireg == Op_VecD ? __ T8B : __ T16B,
 2020                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2021       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2022         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2023                  ireg == Op_VecD ? __ D : __ Q,
 2024                  ra_->reg2offset(dst_lo));
 2025       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2026         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2027                    ireg == Op_VecD ? __ D : __ Q,
 2028                    ra_->reg2offset(src_lo));
 2029       } else {
 2030         ShouldNotReachHere();
 2031       }
 2032     }
 2033   } else if (cbuf) {
 2034     C2_MacroAssembler _masm(cbuf);
 2035     switch (src_lo_rc) {
 2036     case rc_int:
 2037       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2038         if (is64) {
 2039             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2040                    as_Register(Matcher::_regEncode[src_lo]));
 2041         } else {
 2042             C2_MacroAssembler _masm(cbuf);
 2043             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2044                     as_Register(Matcher::_regEncode[src_lo]));
 2045         }
 2046       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2047         if (is64) {
 2048             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2049                      as_Register(Matcher::_regEncode[src_lo]));
 2050         } else {
 2051             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2052                      as_Register(Matcher::_regEncode[src_lo]));
 2053         }
 2054       } else {                    // gpr --> stack spill
 2055         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2056         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2057       }
 2058       break;
 2059     case rc_float:
 2060       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2061         if (is64) {
 2062             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2063                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2064         } else {
 2065             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2066                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2067         }
 2068       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2069         if (is64) {
 2070             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2071                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2072         } else {
 2073             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2074                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2075         }
 2076       } else {                    // fpr --> stack spill
 2077         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2078         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2079                  is64 ? __ D : __ S, dst_offset);
 2080       }
 2081       break;
 2082     case rc_stack:
 2083       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2084         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2085       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2086         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2087                    is64 ? __ D : __ S, src_offset);
 2088       } else if (dst_lo_rc == rc_predicate) {
 2089         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2090                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2091       } else {                    // stack --> stack copy
 2092         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2093         if (ideal_reg() == Op_RegVectMask) {
 2094           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2095                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2096         } else {
 2097           __ unspill(rscratch1, is64, src_offset);
 2098           __ spill(rscratch1, is64, dst_offset);
 2099         }
 2100       }
 2101       break;
 2102     case rc_predicate:
 2103       if (dst_lo_rc == rc_predicate) {
 2104         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2105       } else if (dst_lo_rc == rc_stack) {
 2106         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2107                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2108       } else {
 2109         assert(false, "bad src and dst rc_class combination.");
 2110         ShouldNotReachHere();
 2111       }
 2112       break;
 2113     default:
 2114       assert(false, "bad rc_class for spill");
 2115       ShouldNotReachHere();
 2116     }
 2117   }
 2118 
 2119   if (st) {
 2120     st->print("spill ");
 2121     if (src_lo_rc == rc_stack) {
 2122       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2123     } else {
 2124       st->print("%s -> ", Matcher::regName[src_lo]);
 2125     }
 2126     if (dst_lo_rc == rc_stack) {
 2127       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2128     } else {
 2129       st->print("%s", Matcher::regName[dst_lo]);
 2130     }
 2131     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2132       int vsize = 0;
 2133       switch (ideal_reg()) {
 2134       case Op_VecD:
 2135         vsize = 64;
 2136         break;
 2137       case Op_VecX:
 2138         vsize = 128;
 2139         break;
 2140       case Op_VecA:
 2141         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2142         break;
 2143       default:
 2144         assert(false, "bad register type for spill");
 2145         ShouldNotReachHere();
 2146       }
 2147       st->print("\t# vector spill size = %d", vsize);
 2148     } else if (ideal_reg() == Op_RegVectMask) {
 2149       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2150       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2151       st->print("\t# predicate spill size = %d", vsize);
 2152     } else {
 2153       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2154     }
 2155   }
 2156 
 2157   return 0;
 2158 
 2159 }
 2160 
 2161 #ifndef PRODUCT
 2162 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2163   if (!ra_)
 2164     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2165   else
 2166     implementation(NULL, ra_, false, st);
 2167 }
 2168 #endif
 2169 
 2170 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2171   implementation(&cbuf, ra_, false, NULL);
 2172 }
 2173 
 2174 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2175   return MachNode::size(ra_);
 2176 }
 2177 
 2178 //=============================================================================
 2179 
 2180 #ifndef PRODUCT
 2181 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2182   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2183   int reg = ra_->get_reg_first(this);
 2184   st->print("add %s, rsp, #%d]\t# box lock",
 2185             Matcher::regName[reg], offset);
 2186 }
 2187 #endif
 2188 
 2189 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2190   C2_MacroAssembler _masm(&cbuf);
 2191 
 2192   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2193   int reg    = ra_->get_encode(this);
 2194 
 2195   // This add will handle any 24-bit signed offset. 24 bits allows an
 2196   // 8 megabyte stack frame.
 2197   __ add(as_Register(reg), sp, offset);
 2198 }
 2199 
 2200 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2201   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2202   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2203 
 2204   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2205     return NativeInstruction::instruction_size;
 2206   } else {
 2207     return 2 * NativeInstruction::instruction_size;
 2208   }
 2209 }
 2210 
 2211 //=============================================================================
 2212 
 2213 #ifndef PRODUCT
 2214 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2215 {
 2216   st->print_cr("# MachUEPNode");
 2217   if (UseCompressedClassPointers) {
 2218     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2219     if (CompressedKlassPointers::shift() != 0) {
 2220       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2221     }
 2222   } else {
 2223    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2224   }
 2225   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2226   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2227 }
 2228 #endif
 2229 
 2230 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2231 {
 2232   // This is the unverified entry point.
 2233   C2_MacroAssembler _masm(&cbuf);
 2234 
 2235   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2236   Label skip;
 2237   // TODO
 2238   // can we avoid this skip and still use a reloc?
 2239   __ br(Assembler::EQ, skip);
 2240   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2241   __ bind(skip);
 2242 }
 2243 
 2244 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2245 {
 2246   return MachNode::size(ra_);
 2247 }
 2248 
 2249 // REQUIRED EMIT CODE
 2250 
 2251 //=============================================================================
 2252 
 2253 // Emit exception handler code.
 2254 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2255 {
 2256   // mov rscratch1 #exception_blob_entry_point
 2257   // br rscratch1
 2258   // Note that the code buffer's insts_mark is always relative to insts.
 2259   // That's why we must use the macroassembler to generate a handler.
 2260   C2_MacroAssembler _masm(&cbuf);
 2261   address base = __ start_a_stub(size_exception_handler());
 2262   if (base == NULL) {
 2263     ciEnv::current()->record_failure("CodeCache is full");
 2264     return 0;  // CodeBuffer::expand failed
 2265   }
 2266   int offset = __ offset();
 2267   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2268   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2269   __ end_a_stub();
 2270   return offset;
 2271 }
 2272 
 2273 // Emit deopt handler code.
 2274 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 2275 {
 2276   // Note that the code buffer's insts_mark is always relative to insts.
 2277   // That's why we must use the macroassembler to generate a handler.
 2278   C2_MacroAssembler _masm(&cbuf);
 2279   address base = __ start_a_stub(size_deopt_handler());
 2280   if (base == NULL) {
 2281     ciEnv::current()->record_failure("CodeCache is full");
 2282     return 0;  // CodeBuffer::expand failed
 2283   }
 2284   int offset = __ offset();
 2285 
 2286   __ adr(lr, __ pc());
 2287   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2288 
 2289   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2290   __ end_a_stub();
 2291   return offset;
 2292 }
 2293 
 2294 // REQUIRED MATCHER CODE
 2295 
 2296 //=============================================================================
 2297 
 2298 const bool Matcher::match_rule_supported(int opcode) {
 2299   if (!has_match_rule(opcode))
 2300     return false;
 2301 
 2302   bool ret_value = true;
 2303   switch (opcode) {
 2304     case Op_OnSpinWait:
 2305       return VM_Version::supports_on_spin_wait();
 2306     case Op_CacheWB:
 2307     case Op_CacheWBPreSync:
 2308     case Op_CacheWBPostSync:
 2309       if (!VM_Version::supports_data_cache_line_flush()) {
 2310         ret_value = false;
 2311       }
 2312       break;
 2313     case Op_ExpandBits:
 2314     case Op_CompressBits:
 2315       if (!(UseSVE > 1 && VM_Version::supports_svebitperm())) {
 2316         ret_value = false;
 2317       }
 2318       break;
 2319   }
 2320 
 2321   return ret_value; // Per default match rules are supported.
 2322 }
 2323 
 2324 const RegMask* Matcher::predicate_reg_mask(void) {
 2325   return &_PR_REG_mask;
 2326 }
 2327 
 2328 const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) {
 2329   return new TypeVectMask(elemTy, length);
 2330 }
 2331 
 2332 // Vector calling convention not yet implemented.
 2333 const bool Matcher::supports_vector_calling_convention(void) {
 2334   return false;
 2335 }
 2336 
 2337 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2338   Unimplemented();
 2339   return OptoRegPair(0, 0);
 2340 }
 2341 
 2342 // Is this branch offset short enough that a short branch can be used?
 2343 //
 2344 // NOTE: If the platform does not provide any short branch variants, then
 2345 //       this method should return false for offset 0.
 2346 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2347   // The passed offset is relative to address of the branch.
 2348 
 2349   return (-32768 <= offset && offset < 32768);
 2350 }
 2351 
 2352 // Vector width in bytes.
 2353 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2354   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2355   int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize);
 2356   // Minimum 2 values in vector
 2357   if (size < 2*type2aelembytes(bt)) size = 0;
 2358   // But never < 4
 2359   if (size < 4) size = 0;
 2360   return size;
 2361 }
 2362 
 2363 // Limits on vector size (number of elements) loaded into vector.
 2364 const int Matcher::max_vector_size(const BasicType bt) {
 2365   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2366 }
 2367 
 2368 const int Matcher::min_vector_size(const BasicType bt) {
 2369   int max_size = max_vector_size(bt);
 2370   // Limit the min vector size to 8 bytes.
 2371   int size = 8 / type2aelembytes(bt);
 2372   if (bt == T_BYTE) {
 2373     // To support vector api shuffle/rearrange.
 2374     size = 4;
 2375   } else if (bt == T_BOOLEAN) {
 2376     // To support vector api load/store mask.
 2377     size = 2;
 2378   }
 2379   if (size < 2) size = 2;
 2380   return MIN2(size, max_size);
 2381 }
 2382 
 2383 const int Matcher::superword_max_vector_size(const BasicType bt) {
 2384   return Matcher::max_vector_size(bt);
 2385 }
 2386 
 2387 // Actual max scalable vector register length.
 2388 const int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2389   return Matcher::max_vector_size(bt);
 2390 }
 2391 
 2392 // Vector ideal reg.
 2393 const uint Matcher::vector_ideal_reg(int len) {
 2394   if (UseSVE > 0 && 16 < len && len <= 256) {
 2395     return Op_VecA;
 2396   }
 2397   switch(len) {
 2398     // For 16-bit/32-bit mask vector, reuse VecD.
 2399     case  2:
 2400     case  4:
 2401     case  8: return Op_VecD;
 2402     case 16: return Op_VecX;
 2403   }
 2404   ShouldNotReachHere();
 2405   return 0;
 2406 }
 2407 
 2408 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2409   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2410   switch (ideal_reg) {
 2411     case Op_VecA: return new vecAOper();
 2412     case Op_VecD: return new vecDOper();
 2413     case Op_VecX: return new vecXOper();
 2414   }
 2415   ShouldNotReachHere();
 2416   return NULL;
 2417 }
 2418 
 2419 bool Matcher::is_reg2reg_move(MachNode* m) {
 2420   return false;
 2421 }
 2422 
 2423 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2424   return opnd->opcode() == VREG;
 2425 }
 2426 
 2427 // Return whether or not this register is ever used as an argument.
 2428 // This function is used on startup to build the trampoline stubs in
 2429 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2430 // call in the trampoline, and arguments in those registers not be
 2431 // available to the callee.
 2432 bool Matcher::can_be_java_arg(int reg)
 2433 {
 2434   return
 2435     reg ==  R0_num || reg == R0_H_num ||
 2436     reg ==  R1_num || reg == R1_H_num ||
 2437     reg ==  R2_num || reg == R2_H_num ||
 2438     reg ==  R3_num || reg == R3_H_num ||
 2439     reg ==  R4_num || reg == R4_H_num ||
 2440     reg ==  R5_num || reg == R5_H_num ||
 2441     reg ==  R6_num || reg == R6_H_num ||
 2442     reg ==  R7_num || reg == R7_H_num ||
 2443     reg ==  V0_num || reg == V0_H_num ||
 2444     reg ==  V1_num || reg == V1_H_num ||
 2445     reg ==  V2_num || reg == V2_H_num ||
 2446     reg ==  V3_num || reg == V3_H_num ||
 2447     reg ==  V4_num || reg == V4_H_num ||
 2448     reg ==  V5_num || reg == V5_H_num ||
 2449     reg ==  V6_num || reg == V6_H_num ||
 2450     reg ==  V7_num || reg == V7_H_num;
 2451 }
 2452 
 2453 bool Matcher::is_spillable_arg(int reg)
 2454 {
 2455   return can_be_java_arg(reg);
 2456 }
 2457 
 2458 uint Matcher::int_pressure_limit()
 2459 {
 2460   // JDK-8183543: When taking the number of available registers as int
 2461   // register pressure threshold, the jtreg test:
 2462   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2463   // failed due to C2 compilation failure with
 2464   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2465   //
 2466   // A derived pointer is live at CallNode and then is flagged by RA
 2467   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2468   // derived pointers and lastly fail to spill after reaching maximum
 2469   // number of iterations. Lowering the default pressure threshold to
 2470   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2471   // a high register pressure area of the code so that split_DEF can
 2472   // generate DefinitionSpillCopy for the derived pointer.
 2473   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2474   if (!PreserveFramePointer) {
 2475     // When PreserveFramePointer is off, frame pointer is allocatable,
 2476     // but different from other SOC registers, it is excluded from
 2477     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2478     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2479     // See check_pressure_at_fatproj().
 2480     default_int_pressure_threshold--;
 2481   }
 2482   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2483 }
 2484 
 2485 uint Matcher::float_pressure_limit()
 2486 {
 2487   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2488   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2489 }
 2490 
 2491 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2492   return false;
 2493 }
 2494 
 2495 RegMask Matcher::divI_proj_mask() {
 2496   ShouldNotReachHere();
 2497   return RegMask();
 2498 }
 2499 
 2500 // Register for MODI projection of divmodI.
 2501 RegMask Matcher::modI_proj_mask() {
 2502   ShouldNotReachHere();
 2503   return RegMask();
 2504 }
 2505 
 2506 // Register for DIVL projection of divmodL.
 2507 RegMask Matcher::divL_proj_mask() {
 2508   ShouldNotReachHere();
 2509   return RegMask();
 2510 }
 2511 
 2512 // Register for MODL projection of divmodL.
 2513 RegMask Matcher::modL_proj_mask() {
 2514   ShouldNotReachHere();
 2515   return RegMask();
 2516 }
 2517 
 2518 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2519   return FP_REG_mask();
 2520 }
 2521 
 2522 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2523   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2524     Node* u = addp->fast_out(i);
 2525     if (u->is_LoadStore()) {
 2526       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2527       // instructions) only take register indirect as an operand, so
 2528       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2529       // must fail.
 2530       return false;
 2531     }
 2532     if (u->is_Mem()) {
 2533       int opsize = u->as_Mem()->memory_size();
 2534       assert(opsize > 0, "unexpected memory operand size");
 2535       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2536         return false;
 2537       }
 2538     }
 2539   }
 2540   return true;
 2541 }
 2542 
 2543 // Convert BootTest condition to Assembler condition.
 2544 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2545 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2546   Assembler::Condition result;
 2547   switch(cond) {
 2548     case BoolTest::eq:
 2549       result = Assembler::EQ; break;
 2550     case BoolTest::ne:
 2551       result = Assembler::NE; break;
 2552     case BoolTest::le:
 2553       result = Assembler::LE; break;
 2554     case BoolTest::ge:
 2555       result = Assembler::GE; break;
 2556     case BoolTest::lt:
 2557       result = Assembler::LT; break;
 2558     case BoolTest::gt:
 2559       result = Assembler::GT; break;
 2560     case BoolTest::ule:
 2561       result = Assembler::LS; break;
 2562     case BoolTest::uge:
 2563       result = Assembler::HS; break;
 2564     case BoolTest::ult:
 2565       result = Assembler::LO; break;
 2566     case BoolTest::ugt:
 2567       result = Assembler::HI; break;
 2568     case BoolTest::overflow:
 2569       result = Assembler::VS; break;
 2570     case BoolTest::no_overflow:
 2571       result = Assembler::VC; break;
 2572     default:
 2573       ShouldNotReachHere();
 2574       return Assembler::Condition(-1);
 2575   }
 2576 
 2577   // Check conversion
 2578   if (cond & BoolTest::unsigned_compare) {
 2579     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2580   } else {
 2581     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2582   }
 2583 
 2584   return result;
 2585 }
 2586 
 2587 // Binary src (Replicate con)
 2588 bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2589   if (n == NULL || m == NULL) {
 2590     return false;
 2591   }
 2592 
 2593   if (UseSVE == 0 || !VectorNode::is_invariant_vector(m)) {
 2594     return false;
 2595   }
 2596 
 2597   Node* imm_node = m->in(1);
 2598   if (!imm_node->is_Con()) {
 2599     return false;
 2600   }
 2601 
 2602   const Type* t = imm_node->bottom_type();
 2603   if (!(t->isa_int() || t->isa_long())) {
 2604     return false;
 2605   }
 2606 
 2607   switch (n->Opcode()) {
 2608   case Op_AndV:
 2609   case Op_OrV:
 2610   case Op_XorV: {
 2611     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2612     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2613     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2614   }
 2615   case Op_AddVB:
 2616     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2617   case Op_AddVS:
 2618   case Op_AddVI:
 2619     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2620   case Op_AddVL:
 2621     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2622   default:
 2623     return false;
 2624   }
 2625 }
 2626 
 2627 // (XorV src (Replicate m1))
 2628 // (XorVMask src (MaskAll m1))
 2629 bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2630   if (n != NULL && m != NULL) {
 2631     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2632            VectorNode::is_all_ones_vector(m);
 2633   }
 2634   return false;
 2635 }
 2636 
 2637 // Should the matcher clone input 'm' of node 'n'?
 2638 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2639   if (is_vshift_con_pattern(n, m) ||
 2640       is_vector_bitwise_not_pattern(n, m) ||
 2641       is_valid_sve_arith_imm_pattern(n, m)) {
 2642     mstack.push(m, Visit);
 2643     return true;
 2644   }
 2645   return false;
 2646 }
 2647 
 2648 // Should the Matcher clone shifts on addressing modes, expecting them
 2649 // to be subsumed into complex addressing expressions or compute them
 2650 // into registers?
 2651 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2652   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2653     return true;
 2654   }
 2655 
 2656   Node *off = m->in(AddPNode::Offset);
 2657   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2658       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2659       // Are there other uses besides address expressions?
 2660       !is_visited(off)) {
 2661     address_visited.set(off->_idx); // Flag as address_visited
 2662     mstack.push(off->in(2), Visit);
 2663     Node *conv = off->in(1);
 2664     if (conv->Opcode() == Op_ConvI2L &&
 2665         // Are there other uses besides address expressions?
 2666         !is_visited(conv)) {
 2667       address_visited.set(conv->_idx); // Flag as address_visited
 2668       mstack.push(conv->in(1), Pre_Visit);
 2669     } else {
 2670       mstack.push(conv, Pre_Visit);
 2671     }
 2672     address_visited.test_set(m->_idx); // Flag as address_visited
 2673     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2674     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2675     return true;
 2676   } else if (off->Opcode() == Op_ConvI2L &&
 2677              // Are there other uses besides address expressions?
 2678              !is_visited(off)) {
 2679     address_visited.test_set(m->_idx); // Flag as address_visited
 2680     address_visited.set(off->_idx); // Flag as address_visited
 2681     mstack.push(off->in(1), Pre_Visit);
 2682     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2683     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2684     return true;
 2685   }
 2686   return false;
 2687 }
 2688 
 2689 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2690   C2_MacroAssembler _masm(&cbuf);                                       \
 2691   {                                                                     \
 2692     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2693     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2694     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2695     __ INSN(REG, as_Register(BASE));                                    \
 2696   }
 2697 
 2698 
 2699 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2700   {
 2701     Address::extend scale;
 2702 
 2703     // Hooboy, this is fugly.  We need a way to communicate to the
 2704     // encoder that the index needs to be sign extended, so we have to
 2705     // enumerate all the cases.
 2706     switch (opcode) {
 2707     case INDINDEXSCALEDI2L:
 2708     case INDINDEXSCALEDI2LN:
 2709     case INDINDEXI2L:
 2710     case INDINDEXI2LN:
 2711       scale = Address::sxtw(size);
 2712       break;
 2713     default:
 2714       scale = Address::lsl(size);
 2715     }
 2716 
 2717     if (index == -1) {
 2718       return Address(base, disp);
 2719     } else {
 2720       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2721       return Address(base, as_Register(index), scale);
 2722     }
 2723   }
 2724 
 2725 
 2726 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2727 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2728 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2729 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2730                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2731 
 2732   // Used for all non-volatile memory accesses.  The use of
 2733   // $mem->opcode() to discover whether this pattern uses sign-extended
 2734   // offsets is something of a kludge.
 2735   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
 2736                         Register reg, int opcode,
 2737                         Register base, int index, int scale, int disp,
 2738                         int size_in_memory)
 2739   {
 2740     Address addr = mem2address(opcode, base, index, scale, disp);
 2741     if (addr.getMode() == Address::base_plus_offset) {
 2742       /* If we get an out-of-range offset it is a bug in the compiler,
 2743          so we assert here. */
 2744       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
 2745              "c2 compiler bug");
 2746       /* Fix up any out-of-range offsets. */
 2747       assert_different_registers(rscratch1, base);
 2748       assert_different_registers(rscratch1, reg);
 2749       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2750     }
 2751     (masm.*insn)(reg, addr);
 2752   }
 2753 
 2754   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
 2755                         FloatRegister reg, int opcode,
 2756                         Register base, int index, int size, int disp,
 2757                         int size_in_memory)
 2758   {
 2759     Address::extend scale;
 2760 
 2761     switch (opcode) {
 2762     case INDINDEXSCALEDI2L:
 2763     case INDINDEXSCALEDI2LN:
 2764       scale = Address::sxtw(size);
 2765       break;
 2766     default:
 2767       scale = Address::lsl(size);
 2768     }
 2769 
 2770     if (index == -1) {
 2771       /* If we get an out-of-range offset it is a bug in the compiler,
 2772          so we assert here. */
 2773       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
 2774       /* Fix up any out-of-range offsets. */
 2775       assert_different_registers(rscratch1, base);
 2776       Address addr = Address(base, disp);
 2777       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2778       (masm.*insn)(reg, addr);
 2779     } else {
 2780       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2781       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 2782     }
 2783   }
 2784 
 2785   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
 2786                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2787                         int opcode, Register base, int index, int size, int disp)
 2788   {
 2789     if (index == -1) {
 2790       (masm.*insn)(reg, T, Address(base, disp));
 2791     } else {
 2792       assert(disp == 0, "unsupported address mode");
 2793       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2794     }
 2795   }
 2796 
 2797 %}
 2798 
 2799 
 2800 
 2801 //----------ENCODING BLOCK-----------------------------------------------------
 2802 // This block specifies the encoding classes used by the compiler to
 2803 // output byte streams.  Encoding classes are parameterized macros
 2804 // used by Machine Instruction Nodes in order to generate the bit
 2805 // encoding of the instruction.  Operands specify their base encoding
 2806 // interface with the interface keyword.  There are currently
 2807 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2808 // COND_INTER.  REG_INTER causes an operand to generate a function
 2809 // which returns its register number when queried.  CONST_INTER causes
 2810 // an operand to generate a function which returns the value of the
 2811 // constant when queried.  MEMORY_INTER causes an operand to generate
 2812 // four functions which return the Base Register, the Index Register,
 2813 // the Scale Value, and the Offset Value of the operand when queried.
 2814 // COND_INTER causes an operand to generate six functions which return
 2815 // the encoding code (ie - encoding bits for the instruction)
 2816 // associated with each basic boolean condition for a conditional
 2817 // instruction.
 2818 //
 2819 // Instructions specify two basic values for encoding.  Again, a
 2820 // function is available to check if the constant displacement is an
 2821 // oop. They use the ins_encode keyword to specify their encoding
 2822 // classes (which must be a sequence of enc_class names, and their
 2823 // parameters, specified in the encoding block), and they use the
 2824 // opcode keyword to specify, in order, their primary, secondary, and
 2825 // tertiary opcode.  Only the opcode sections which a particular
 2826 // instruction needs for encoding need to be specified.
 2827 encode %{
 2828   // Build emit functions for each basic byte or larger field in the
 2829   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2830   // from C++ code in the enc_class source block.  Emit functions will
 2831   // live in the main source block for now.  In future, we can
 2832   // generalize this by adding a syntax that specifies the sizes of
 2833   // fields in an order, so that the adlc can build the emit functions
 2834   // automagically
 2835 
 2836   // catch all for unimplemented encodings
 2837   enc_class enc_unimplemented %{
 2838     C2_MacroAssembler _masm(&cbuf);
 2839     __ unimplemented("C2 catch all");
 2840   %}
 2841 
 2842   // BEGIN Non-volatile memory access
 2843 
 2844   // This encoding class is generated automatically from ad_encode.m4.
 2845   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2846   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2847     Register dst_reg = as_Register($dst$$reg);
 2848     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2849                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2850   %}
 2851 
 2852   // This encoding class is generated automatically from ad_encode.m4.
 2853   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2854   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2855     Register dst_reg = as_Register($dst$$reg);
 2856     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2857                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2858   %}
 2859 
 2860   // This encoding class is generated automatically from ad_encode.m4.
 2861   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2862   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2863     Register dst_reg = as_Register($dst$$reg);
 2864     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2865                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2866   %}
 2867 
 2868   // This encoding class is generated automatically from ad_encode.m4.
 2869   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2870   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2871     Register dst_reg = as_Register($dst$$reg);
 2872     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2873                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2874   %}
 2875 
 2876   // This encoding class is generated automatically from ad_encode.m4.
 2877   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2878   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2879     Register dst_reg = as_Register($dst$$reg);
 2880     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2881                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2882   %}
 2883 
 2884   // This encoding class is generated automatically from ad_encode.m4.
 2885   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2886   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2887     Register dst_reg = as_Register($dst$$reg);
 2888     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2889                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2890   %}
 2891 
 2892   // This encoding class is generated automatically from ad_encode.m4.
 2893   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2894   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2895     Register dst_reg = as_Register($dst$$reg);
 2896     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2897                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2898   %}
 2899 
 2900   // This encoding class is generated automatically from ad_encode.m4.
 2901   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2902   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2903     Register dst_reg = as_Register($dst$$reg);
 2904     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2905                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2906   %}
 2907 
 2908   // This encoding class is generated automatically from ad_encode.m4.
 2909   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2910   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2911     Register dst_reg = as_Register($dst$$reg);
 2912     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2913                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2914   %}
 2915 
 2916   // This encoding class is generated automatically from ad_encode.m4.
 2917   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2918   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2919     Register dst_reg = as_Register($dst$$reg);
 2920     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2921                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2922   %}
 2923 
 2924   // This encoding class is generated automatically from ad_encode.m4.
 2925   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2926   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2927     Register dst_reg = as_Register($dst$$reg);
 2928     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2929                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2930   %}
 2931 
 2932   // This encoding class is generated automatically from ad_encode.m4.
 2933   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2934   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2935     Register dst_reg = as_Register($dst$$reg);
 2936     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2937                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2938   %}
 2939 
 2940   // This encoding class is generated automatically from ad_encode.m4.
 2941   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2942   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2943     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2944     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2945                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2946   %}
 2947 
 2948   // This encoding class is generated automatically from ad_encode.m4.
 2949   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2950   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2951     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2952     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2953                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2954   %}
 2955 
 2956   // This encoding class is generated automatically from ad_encode.m4.
 2957   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2958   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2959     Register src_reg = as_Register($src$$reg);
 2960     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
 2961                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2962   %}
 2963 
 2964   // This encoding class is generated automatically from ad_encode.m4.
 2965   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2966   enc_class aarch64_enc_strb0(memory1 mem) %{
 2967     C2_MacroAssembler _masm(&cbuf);
 2968     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2969                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2970   %}
 2971 
 2972   // This encoding class is generated automatically from ad_encode.m4.
 2973   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2974   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2975     Register src_reg = as_Register($src$$reg);
 2976     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
 2977                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2978   %}
 2979 
 2980   // This encoding class is generated automatically from ad_encode.m4.
 2981   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2982   enc_class aarch64_enc_strh0(memory2 mem) %{
 2983     C2_MacroAssembler _masm(&cbuf);
 2984     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2985                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2986   %}
 2987 
 2988   // This encoding class is generated automatically from ad_encode.m4.
 2989   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2990   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 2991     Register src_reg = as_Register($src$$reg);
 2992     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
 2993                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2994   %}
 2995 
 2996   // This encoding class is generated automatically from ad_encode.m4.
 2997   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2998   enc_class aarch64_enc_strw0(memory4 mem) %{
 2999     C2_MacroAssembler _masm(&cbuf);
 3000     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3001                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3002   %}
 3003 
 3004   // This encoding class is generated automatically from ad_encode.m4.
 3005   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3006   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3007     Register src_reg = as_Register($src$$reg);
 3008     // we sometimes get asked to store the stack pointer into the
 3009     // current thread -- we cannot do that directly on AArch64
 3010     if (src_reg == r31_sp) {
 3011       C2_MacroAssembler _masm(&cbuf);
 3012       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3013       __ mov(rscratch2, sp);
 3014       src_reg = rscratch2;
 3015     }
 3016     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
 3017                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3018   %}
 3019 
 3020   // This encoding class is generated automatically from ad_encode.m4.
 3021   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3022   enc_class aarch64_enc_str0(memory8 mem) %{
 3023     C2_MacroAssembler _masm(&cbuf);
 3024     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
 3025                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3026   %}
 3027 
 3028   // This encoding class is generated automatically from ad_encode.m4.
 3029   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3030   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3031     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3032     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
 3033                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3034   %}
 3035 
 3036   // This encoding class is generated automatically from ad_encode.m4.
 3037   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3038   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3039     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3040     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
 3041                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3042   %}
 3043 
 3044   // This encoding class is generated automatically from ad_encode.m4.
 3045   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3046   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3047       C2_MacroAssembler _masm(&cbuf);
 3048       __ membar(Assembler::StoreStore);
 3049       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3050                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3051   %}
 3052 
 3053   // END Non-volatile memory access
 3054 
 3055   // Vector loads and stores
 3056   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3057     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3058     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3059        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3060   %}
 3061 
 3062   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3063     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3064     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3065        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3066   %}
 3067 
 3068   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3069     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3070     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3071        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3072   %}
 3073 
 3074   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3075     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3076     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3077        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3078   %}
 3079 
 3080   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3081     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3082     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
 3083        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3084   %}
 3085 
 3086   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3087     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3088     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
 3089        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3090   %}
 3091 
 3092   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3093     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3094     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
 3095        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3096   %}
 3097 
 3098   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3099     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3100     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3101        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3102   %}
 3103 
 3104   // volatile loads and stores
 3105 
 3106   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3107     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3108                  rscratch1, stlrb);
 3109   %}
 3110 
 3111   enc_class aarch64_enc_stlrb0(memory mem) %{
 3112     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3113                  rscratch1, stlrb);
 3114   %}
 3115 
 3116   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3117     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3118                  rscratch1, stlrh);
 3119   %}
 3120 
 3121   enc_class aarch64_enc_stlrh0(memory mem) %{
 3122     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3123                  rscratch1, stlrh);
 3124   %}
 3125 
 3126   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3127     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3128                  rscratch1, stlrw);
 3129   %}
 3130 
 3131   enc_class aarch64_enc_stlrw0(memory mem) %{
 3132     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3133                  rscratch1, stlrw);
 3134   %}
 3135 
 3136   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3137     Register dst_reg = as_Register($dst$$reg);
 3138     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3139              rscratch1, ldarb);
 3140     __ sxtbw(dst_reg, dst_reg);
 3141   %}
 3142 
 3143   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3144     Register dst_reg = as_Register($dst$$reg);
 3145     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3146              rscratch1, ldarb);
 3147     __ sxtb(dst_reg, dst_reg);
 3148   %}
 3149 
 3150   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3151     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3152              rscratch1, ldarb);
 3153   %}
 3154 
 3155   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3156     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3157              rscratch1, ldarb);
 3158   %}
 3159 
 3160   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3161     Register dst_reg = as_Register($dst$$reg);
 3162     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3163              rscratch1, ldarh);
 3164     __ sxthw(dst_reg, dst_reg);
 3165   %}
 3166 
 3167   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3168     Register dst_reg = as_Register($dst$$reg);
 3169     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3170              rscratch1, ldarh);
 3171     __ sxth(dst_reg, dst_reg);
 3172   %}
 3173 
 3174   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3175     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3176              rscratch1, ldarh);
 3177   %}
 3178 
 3179   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3180     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3181              rscratch1, ldarh);
 3182   %}
 3183 
 3184   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3185     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3186              rscratch1, ldarw);
 3187   %}
 3188 
 3189   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3190     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3191              rscratch1, ldarw);
 3192   %}
 3193 
 3194   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3195     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3196              rscratch1, ldar);
 3197   %}
 3198 
 3199   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3200     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3201              rscratch1, ldarw);
 3202     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3203   %}
 3204 
 3205   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3206     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3207              rscratch1, ldar);
 3208     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3209   %}
 3210 
 3211   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3212     Register src_reg = as_Register($src$$reg);
 3213     // we sometimes get asked to store the stack pointer into the
 3214     // current thread -- we cannot do that directly on AArch64
 3215     if (src_reg == r31_sp) {
 3216       C2_MacroAssembler _masm(&cbuf);
 3217       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3218       __ mov(rscratch2, sp);
 3219       src_reg = rscratch2;
 3220     }
 3221     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3222                  rscratch1, stlr);
 3223   %}
 3224 
 3225   enc_class aarch64_enc_stlr0(memory mem) %{
 3226     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3227                  rscratch1, stlr);
 3228   %}
 3229 
 3230   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3231     {
 3232       C2_MacroAssembler _masm(&cbuf);
 3233       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3234       __ fmovs(rscratch2, src_reg);
 3235     }
 3236     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3237                  rscratch1, stlrw);
 3238   %}
 3239 
 3240   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3241     {
 3242       C2_MacroAssembler _masm(&cbuf);
 3243       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3244       __ fmovd(rscratch2, src_reg);
 3245     }
 3246     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3247                  rscratch1, stlr);
 3248   %}
 3249 
 3250   // synchronized read/update encodings
 3251 
 3252   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3253     C2_MacroAssembler _masm(&cbuf);
 3254     Register dst_reg = as_Register($dst$$reg);
 3255     Register base = as_Register($mem$$base);
 3256     int index = $mem$$index;
 3257     int scale = $mem$$scale;
 3258     int disp = $mem$$disp;
 3259     if (index == -1) {
 3260        if (disp != 0) {
 3261         __ lea(rscratch1, Address(base, disp));
 3262         __ ldaxr(dst_reg, rscratch1);
 3263       } else {
 3264         // TODO
 3265         // should we ever get anything other than this case?
 3266         __ ldaxr(dst_reg, base);
 3267       }
 3268     } else {
 3269       Register index_reg = as_Register(index);
 3270       if (disp == 0) {
 3271         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3272         __ ldaxr(dst_reg, rscratch1);
 3273       } else {
 3274         __ lea(rscratch1, Address(base, disp));
 3275         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3276         __ ldaxr(dst_reg, rscratch1);
 3277       }
 3278     }
 3279   %}
 3280 
 3281   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3282     C2_MacroAssembler _masm(&cbuf);
 3283     Register src_reg = as_Register($src$$reg);
 3284     Register base = as_Register($mem$$base);
 3285     int index = $mem$$index;
 3286     int scale = $mem$$scale;
 3287     int disp = $mem$$disp;
 3288     if (index == -1) {
 3289        if (disp != 0) {
 3290         __ lea(rscratch2, Address(base, disp));
 3291         __ stlxr(rscratch1, src_reg, rscratch2);
 3292       } else {
 3293         // TODO
 3294         // should we ever get anything other than this case?
 3295         __ stlxr(rscratch1, src_reg, base);
 3296       }
 3297     } else {
 3298       Register index_reg = as_Register(index);
 3299       if (disp == 0) {
 3300         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3301         __ stlxr(rscratch1, src_reg, rscratch2);
 3302       } else {
 3303         __ lea(rscratch2, Address(base, disp));
 3304         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3305         __ stlxr(rscratch1, src_reg, rscratch2);
 3306       }
 3307     }
 3308     __ cmpw(rscratch1, zr);
 3309   %}
 3310 
 3311   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3312     C2_MacroAssembler _masm(&cbuf);
 3313     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3314     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3315                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3316                /*weak*/ false, noreg);
 3317   %}
 3318 
 3319   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3320     C2_MacroAssembler _masm(&cbuf);
 3321     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3322     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3323                Assembler::word, /*acquire*/ false, /*release*/ true,
 3324                /*weak*/ false, noreg);
 3325   %}
 3326 
 3327   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3328     C2_MacroAssembler _masm(&cbuf);
 3329     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3330     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3331                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3332                /*weak*/ false, noreg);
 3333   %}
 3334 
 3335   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3336     C2_MacroAssembler _masm(&cbuf);
 3337     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3338     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3339                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3340                /*weak*/ false, noreg);
 3341   %}
 3342 
 3343 
 3344   // The only difference between aarch64_enc_cmpxchg and
 3345   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3346   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3347   // lock.
 3348   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3349     C2_MacroAssembler _masm(&cbuf);
 3350     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3351     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3352                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3353                /*weak*/ false, noreg);
 3354   %}
 3355 
 3356   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3357     C2_MacroAssembler _masm(&cbuf);
 3358     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3359     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3360                Assembler::word, /*acquire*/ true, /*release*/ true,
 3361                /*weak*/ false, noreg);
 3362   %}
 3363 
 3364   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3365     C2_MacroAssembler _masm(&cbuf);
 3366     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3367     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3368                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3369                /*weak*/ false, noreg);
 3370   %}
 3371 
 3372   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3373     C2_MacroAssembler _masm(&cbuf);
 3374     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3375     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3376                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3377                /*weak*/ false, noreg);
 3378   %}
 3379 
 3380   // auxiliary used for CompareAndSwapX to set result register
 3381   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3382     C2_MacroAssembler _masm(&cbuf);
 3383     Register res_reg = as_Register($res$$reg);
 3384     __ cset(res_reg, Assembler::EQ);
 3385   %}
 3386 
 3387   // prefetch encodings
 3388 
 3389   enc_class aarch64_enc_prefetchw(memory mem) %{
 3390     C2_MacroAssembler _masm(&cbuf);
 3391     Register base = as_Register($mem$$base);
 3392     int index = $mem$$index;
 3393     int scale = $mem$$scale;
 3394     int disp = $mem$$disp;
 3395     if (index == -1) {
 3396       __ prfm(Address(base, disp), PSTL1KEEP);
 3397     } else {
 3398       Register index_reg = as_Register(index);
 3399       if (disp == 0) {
 3400         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3401       } else {
 3402         __ lea(rscratch1, Address(base, disp));
 3403 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3404       }
 3405     }
 3406   %}
 3407 
 3408   /// mov envcodings
 3409 
 3410   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3411     C2_MacroAssembler _masm(&cbuf);
 3412     uint32_t con = (uint32_t)$src$$constant;
 3413     Register dst_reg = as_Register($dst$$reg);
 3414     if (con == 0) {
 3415       __ movw(dst_reg, zr);
 3416     } else {
 3417       __ movw(dst_reg, con);
 3418     }
 3419   %}
 3420 
 3421   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3422     C2_MacroAssembler _masm(&cbuf);
 3423     Register dst_reg = as_Register($dst$$reg);
 3424     uint64_t con = (uint64_t)$src$$constant;
 3425     if (con == 0) {
 3426       __ mov(dst_reg, zr);
 3427     } else {
 3428       __ mov(dst_reg, con);
 3429     }
 3430   %}
 3431 
 3432   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3433     C2_MacroAssembler _masm(&cbuf);
 3434     Register dst_reg = as_Register($dst$$reg);
 3435     address con = (address)$src$$constant;
 3436     if (con == NULL || con == (address)1) {
 3437       ShouldNotReachHere();
 3438     } else {
 3439       relocInfo::relocType rtype = $src->constant_reloc();
 3440       if (rtype == relocInfo::oop_type) {
 3441         __ movoop(dst_reg, (jobject)con);
 3442       } else if (rtype == relocInfo::metadata_type) {
 3443         __ mov_metadata(dst_reg, (Metadata*)con);
 3444       } else {
 3445         assert(rtype == relocInfo::none, "unexpected reloc type");
 3446         if (! __ is_valid_AArch64_address(con) ||
 3447             con < (address)(uintptr_t)os::vm_page_size()) {
 3448           __ mov(dst_reg, con);
 3449         } else {
 3450           uint64_t offset;
 3451           __ adrp(dst_reg, con, offset);
 3452           __ add(dst_reg, dst_reg, offset);
 3453         }
 3454       }
 3455     }
 3456   %}
 3457 
 3458   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3459     C2_MacroAssembler _masm(&cbuf);
 3460     Register dst_reg = as_Register($dst$$reg);
 3461     __ mov(dst_reg, zr);
 3462   %}
 3463 
 3464   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3465     C2_MacroAssembler _masm(&cbuf);
 3466     Register dst_reg = as_Register($dst$$reg);
 3467     __ mov(dst_reg, (uint64_t)1);
 3468   %}
 3469 
 3470   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3471     C2_MacroAssembler _masm(&cbuf);
 3472     __ load_byte_map_base($dst$$Register);
 3473   %}
 3474 
 3475   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3476     C2_MacroAssembler _masm(&cbuf);
 3477     Register dst_reg = as_Register($dst$$reg);
 3478     address con = (address)$src$$constant;
 3479     if (con == NULL) {
 3480       ShouldNotReachHere();
 3481     } else {
 3482       relocInfo::relocType rtype = $src->constant_reloc();
 3483       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3484       __ set_narrow_oop(dst_reg, (jobject)con);
 3485     }
 3486   %}
 3487 
 3488   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3489     C2_MacroAssembler _masm(&cbuf);
 3490     Register dst_reg = as_Register($dst$$reg);
 3491     __ mov(dst_reg, zr);
 3492   %}
 3493 
 3494   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3495     C2_MacroAssembler _masm(&cbuf);
 3496     Register dst_reg = as_Register($dst$$reg);
 3497     address con = (address)$src$$constant;
 3498     if (con == NULL) {
 3499       ShouldNotReachHere();
 3500     } else {
 3501       relocInfo::relocType rtype = $src->constant_reloc();
 3502       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3503       __ set_narrow_klass(dst_reg, (Klass *)con);
 3504     }
 3505   %}
 3506 
 3507   // arithmetic encodings
 3508 
 3509   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3510     C2_MacroAssembler _masm(&cbuf);
 3511     Register dst_reg = as_Register($dst$$reg);
 3512     Register src_reg = as_Register($src1$$reg);
 3513     int32_t con = (int32_t)$src2$$constant;
 3514     // add has primary == 0, subtract has primary == 1
 3515     if ($primary) { con = -con; }
 3516     if (con < 0) {
 3517       __ subw(dst_reg, src_reg, -con);
 3518     } else {
 3519       __ addw(dst_reg, src_reg, con);
 3520     }
 3521   %}
 3522 
 3523   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3524     C2_MacroAssembler _masm(&cbuf);
 3525     Register dst_reg = as_Register($dst$$reg);
 3526     Register src_reg = as_Register($src1$$reg);
 3527     int32_t con = (int32_t)$src2$$constant;
 3528     // add has primary == 0, subtract has primary == 1
 3529     if ($primary) { con = -con; }
 3530     if (con < 0) {
 3531       __ sub(dst_reg, src_reg, -con);
 3532     } else {
 3533       __ add(dst_reg, src_reg, con);
 3534     }
 3535   %}
 3536 
 3537   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3538     C2_MacroAssembler _masm(&cbuf);
 3539    Register dst_reg = as_Register($dst$$reg);
 3540    Register src1_reg = as_Register($src1$$reg);
 3541    Register src2_reg = as_Register($src2$$reg);
 3542     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3543   %}
 3544 
 3545   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3546     C2_MacroAssembler _masm(&cbuf);
 3547    Register dst_reg = as_Register($dst$$reg);
 3548    Register src1_reg = as_Register($src1$$reg);
 3549    Register src2_reg = as_Register($src2$$reg);
 3550     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3551   %}
 3552 
 3553   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3554     C2_MacroAssembler _masm(&cbuf);
 3555    Register dst_reg = as_Register($dst$$reg);
 3556    Register src1_reg = as_Register($src1$$reg);
 3557    Register src2_reg = as_Register($src2$$reg);
 3558     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3559   %}
 3560 
 3561   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3562     C2_MacroAssembler _masm(&cbuf);
 3563    Register dst_reg = as_Register($dst$$reg);
 3564    Register src1_reg = as_Register($src1$$reg);
 3565    Register src2_reg = as_Register($src2$$reg);
 3566     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3567   %}
 3568 
 3569   // compare instruction encodings
 3570 
 3571   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3572     C2_MacroAssembler _masm(&cbuf);
 3573     Register reg1 = as_Register($src1$$reg);
 3574     Register reg2 = as_Register($src2$$reg);
 3575     __ cmpw(reg1, reg2);
 3576   %}
 3577 
 3578   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3579     C2_MacroAssembler _masm(&cbuf);
 3580     Register reg = as_Register($src1$$reg);
 3581     int32_t val = $src2$$constant;
 3582     if (val >= 0) {
 3583       __ subsw(zr, reg, val);
 3584     } else {
 3585       __ addsw(zr, reg, -val);
 3586     }
 3587   %}
 3588 
 3589   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3590     C2_MacroAssembler _masm(&cbuf);
 3591     Register reg1 = as_Register($src1$$reg);
 3592     uint32_t val = (uint32_t)$src2$$constant;
 3593     __ movw(rscratch1, val);
 3594     __ cmpw(reg1, rscratch1);
 3595   %}
 3596 
 3597   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3598     C2_MacroAssembler _masm(&cbuf);
 3599     Register reg1 = as_Register($src1$$reg);
 3600     Register reg2 = as_Register($src2$$reg);
 3601     __ cmp(reg1, reg2);
 3602   %}
 3603 
 3604   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3605     C2_MacroAssembler _masm(&cbuf);
 3606     Register reg = as_Register($src1$$reg);
 3607     int64_t val = $src2$$constant;
 3608     if (val >= 0) {
 3609       __ subs(zr, reg, val);
 3610     } else if (val != -val) {
 3611       __ adds(zr, reg, -val);
 3612     } else {
 3613     // aargh, Long.MIN_VALUE is a special case
 3614       __ orr(rscratch1, zr, (uint64_t)val);
 3615       __ subs(zr, reg, rscratch1);
 3616     }
 3617   %}
 3618 
 3619   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3620     C2_MacroAssembler _masm(&cbuf);
 3621     Register reg1 = as_Register($src1$$reg);
 3622     uint64_t val = (uint64_t)$src2$$constant;
 3623     __ mov(rscratch1, val);
 3624     __ cmp(reg1, rscratch1);
 3625   %}
 3626 
 3627   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3628     C2_MacroAssembler _masm(&cbuf);
 3629     Register reg1 = as_Register($src1$$reg);
 3630     Register reg2 = as_Register($src2$$reg);
 3631     __ cmp(reg1, reg2);
 3632   %}
 3633 
 3634   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3635     C2_MacroAssembler _masm(&cbuf);
 3636     Register reg1 = as_Register($src1$$reg);
 3637     Register reg2 = as_Register($src2$$reg);
 3638     __ cmpw(reg1, reg2);
 3639   %}
 3640 
 3641   enc_class aarch64_enc_testp(iRegP src) %{
 3642     C2_MacroAssembler _masm(&cbuf);
 3643     Register reg = as_Register($src$$reg);
 3644     __ cmp(reg, zr);
 3645   %}
 3646 
 3647   enc_class aarch64_enc_testn(iRegN src) %{
 3648     C2_MacroAssembler _masm(&cbuf);
 3649     Register reg = as_Register($src$$reg);
 3650     __ cmpw(reg, zr);
 3651   %}
 3652 
 3653   enc_class aarch64_enc_b(label lbl) %{
 3654     C2_MacroAssembler _masm(&cbuf);
 3655     Label *L = $lbl$$label;
 3656     __ b(*L);
 3657   %}
 3658 
 3659   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3660     C2_MacroAssembler _masm(&cbuf);
 3661     Label *L = $lbl$$label;
 3662     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3663   %}
 3664 
 3665   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3666     C2_MacroAssembler _masm(&cbuf);
 3667     Label *L = $lbl$$label;
 3668     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3669   %}
 3670 
 3671   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3672   %{
 3673      Register sub_reg = as_Register($sub$$reg);
 3674      Register super_reg = as_Register($super$$reg);
 3675      Register temp_reg = as_Register($temp$$reg);
 3676      Register result_reg = as_Register($result$$reg);
 3677 
 3678      Label miss;
 3679      C2_MacroAssembler _masm(&cbuf);
 3680      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3681                                      NULL, &miss,
 3682                                      /*set_cond_codes:*/ true);
 3683      if ($primary) {
 3684        __ mov(result_reg, zr);
 3685      }
 3686      __ bind(miss);
 3687   %}
 3688 
 3689   enc_class aarch64_enc_java_static_call(method meth) %{
 3690     C2_MacroAssembler _masm(&cbuf);
 3691 
 3692     address addr = (address)$meth$$method;
 3693     address call;
 3694     if (!_method) {
 3695       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3696       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3697       if (call == NULL) {
 3698         ciEnv::current()->record_failure("CodeCache is full");
 3699         return;
 3700       }
 3701     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3702       // The NOP here is purely to ensure that eliding a call to
 3703       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3704       __ nop();
 3705       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3706     } else {
 3707       int method_index = resolved_method_index(cbuf);
 3708       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3709                                                   : static_call_Relocation::spec(method_index);
 3710       call = __ trampoline_call(Address(addr, rspec));
 3711       if (call == NULL) {
 3712         ciEnv::current()->record_failure("CodeCache is full");
 3713         return;
 3714       }
 3715       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3716         // Calls of the same statically bound method can share
 3717         // a stub to the interpreter.
 3718         cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
 3719       } else {
 3720         // Emit stub for static call
 3721         address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
 3722         if (stub == NULL) {
 3723           ciEnv::current()->record_failure("CodeCache is full");
 3724           return;
 3725         }
 3726       }
 3727     }
 3728 
 3729     __ post_call_nop();
 3730 
 3731     // Only non uncommon_trap calls need to reinitialize ptrue.
 3732     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3733       __ reinitialize_ptrue();
 3734     }
 3735   %}
 3736 
 3737   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3738     C2_MacroAssembler _masm(&cbuf);
 3739     int method_index = resolved_method_index(cbuf);
 3740     address call = __ ic_call((address)$meth$$method, method_index);
 3741     if (call == NULL) {
 3742       ciEnv::current()->record_failure("CodeCache is full");
 3743       return;
 3744     }
 3745     __ post_call_nop();
 3746     if (Compile::current()->max_vector_size() > 0) {
 3747       __ reinitialize_ptrue();
 3748     }
 3749   %}
 3750 
 3751   enc_class aarch64_enc_call_epilog() %{
 3752     C2_MacroAssembler _masm(&cbuf);
 3753     if (VerifyStackAtCalls) {
 3754       // Check that stack depth is unchanged: find majik cookie on stack
 3755       __ call_Unimplemented();
 3756     }
 3757   %}
 3758 
 3759   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3760     C2_MacroAssembler _masm(&cbuf);
 3761 
 3762     // some calls to generated routines (arraycopy code) are scheduled
 3763     // by C2 as runtime calls. if so we can call them using a br (they
 3764     // will be in a reachable segment) otherwise we have to use a blr
 3765     // which loads the absolute address into a register.
 3766     address entry = (address)$meth$$method;
 3767     CodeBlob *cb = CodeCache::find_blob(entry);
 3768     if (cb) {
 3769       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3770       if (call == NULL) {
 3771         ciEnv::current()->record_failure("CodeCache is full");
 3772         return;
 3773       }
 3774       __ post_call_nop();
 3775     } else {
 3776       Label retaddr;
 3777       __ adr(rscratch2, retaddr);
 3778       __ lea(rscratch1, RuntimeAddress(entry));
 3779       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
 3780       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
 3781       __ blr(rscratch1);
 3782       __ bind(retaddr);
 3783       __ post_call_nop();
 3784       __ add(sp, sp, 2 * wordSize);
 3785     }
 3786     if (Compile::current()->max_vector_size() > 0) {
 3787       __ reinitialize_ptrue();
 3788     }
 3789   %}
 3790 
 3791   enc_class aarch64_enc_rethrow() %{
 3792     C2_MacroAssembler _masm(&cbuf);
 3793     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3794   %}
 3795 
 3796   enc_class aarch64_enc_ret() %{
 3797     C2_MacroAssembler _masm(&cbuf);
 3798 #ifdef ASSERT
 3799     if (Compile::current()->max_vector_size() > 0) {
 3800       __ verify_ptrue();
 3801     }
 3802 #endif
 3803     __ ret(lr);
 3804   %}
 3805 
 3806   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3807     C2_MacroAssembler _masm(&cbuf);
 3808     Register target_reg = as_Register($jump_target$$reg);
 3809     __ br(target_reg);
 3810   %}
 3811 
 3812   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3813     C2_MacroAssembler _masm(&cbuf);
 3814     Register target_reg = as_Register($jump_target$$reg);
 3815     // exception oop should be in r0
 3816     // ret addr has been popped into lr
 3817     // callee expects it in r3
 3818     __ mov(r3, lr);
 3819     __ br(target_reg);
 3820   %}
 3821 
 3822 %}
 3823 
 3824 //----------FRAME--------------------------------------------------------------
 3825 // Definition of frame structure and management information.
 3826 //
 3827 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3828 //                             |   (to get allocators register number
 3829 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3830 //  r   CALLER     |        |
 3831 //  o     |        +--------+      pad to even-align allocators stack-slot
 3832 //  w     V        |  pad0  |        numbers; owned by CALLER
 3833 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3834 //  h     ^        |   in   |  5
 3835 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3836 //  |     |        |        |  3
 3837 //  |     |        +--------+
 3838 //  V     |        | old out|      Empty on Intel, window on Sparc
 3839 //        |    old |preserve|      Must be even aligned.
 3840 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3841 //        |        |   in   |  3   area for Intel ret address
 3842 //     Owned by    |preserve|      Empty on Sparc.
 3843 //       SELF      +--------+
 3844 //        |        |  pad2  |  2   pad to align old SP
 3845 //        |        +--------+  1
 3846 //        |        | locks  |  0
 3847 //        |        +--------+----> OptoReg::stack0(), even aligned
 3848 //        |        |  pad1  | 11   pad to align new SP
 3849 //        |        +--------+
 3850 //        |        |        | 10
 3851 //        |        | spills |  9   spills
 3852 //        V        |        |  8   (pad0 slot for callee)
 3853 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3854 //        ^        |  out   |  7
 3855 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3856 //     Owned by    +--------+
 3857 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3858 //        |    new |preserve|      Must be even-aligned.
 3859 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3860 //        |        |        |
 3861 //
 3862 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3863 //         known from SELF's arguments and the Java calling convention.
 3864 //         Region 6-7 is determined per call site.
 3865 // Note 2: If the calling convention leaves holes in the incoming argument
 3866 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3867 //         are owned by the CALLEE.  Holes should not be necessary in the
 3868 //         incoming area, as the Java calling convention is completely under
 3869 //         the control of the AD file.  Doubles can be sorted and packed to
 3870 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3871 //         varargs C calling conventions.
 3872 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3873 //         even aligned with pad0 as needed.
 3874 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3875 //           (the latter is true on Intel but is it false on AArch64?)
 3876 //         region 6-11 is even aligned; it may be padded out more so that
 3877 //         the region from SP to FP meets the minimum stack alignment.
 3878 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3879 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3880 //         SP meets the minimum alignment.
 3881 
 3882 frame %{
 3883   // These three registers define part of the calling convention
 3884   // between compiled code and the interpreter.
 3885 
 3886   // Inline Cache Register or Method for I2C.
 3887   inline_cache_reg(R12);
 3888 
 3889   // Number of stack slots consumed by locking an object
 3890   sync_stack_slots(2);
 3891 
 3892   // Compiled code's Frame Pointer
 3893   frame_pointer(R31);
 3894 
 3895   // Interpreter stores its frame pointer in a register which is
 3896   // stored to the stack by I2CAdaptors.
 3897   // I2CAdaptors convert from interpreted java to compiled java.
 3898   interpreter_frame_pointer(R29);
 3899 
 3900   // Stack alignment requirement
 3901   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3902 
 3903   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3904   // for calls to C.  Supports the var-args backing area for register parms.
 3905   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3906 
 3907   // The after-PROLOG location of the return address.  Location of
 3908   // return address specifies a type (REG or STACK) and a number
 3909   // representing the register number (i.e. - use a register name) or
 3910   // stack slot.
 3911   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3912   // Otherwise, it is above the locks and verification slot and alignment word
 3913   // TODO this may well be correct but need to check why that - 2 is there
 3914   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3915   // which folds in the space used for monitors
 3916   return_addr(STACK - 2 +
 3917               align_up((Compile::current()->in_preserve_stack_slots() +
 3918                         Compile::current()->fixed_slots()),
 3919                        stack_alignment_in_slots()));
 3920 
 3921   // Location of compiled Java return values.  Same as C for now.
 3922   return_value
 3923   %{
 3924     // TODO do we allow ideal_reg == Op_RegN???
 3925     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3926            "only return normal values");
 3927 
 3928     static const int lo[Op_RegL + 1] = { // enum name
 3929       0,                                 // Op_Node
 3930       0,                                 // Op_Set
 3931       R0_num,                            // Op_RegN
 3932       R0_num,                            // Op_RegI
 3933       R0_num,                            // Op_RegP
 3934       V0_num,                            // Op_RegF
 3935       V0_num,                            // Op_RegD
 3936       R0_num                             // Op_RegL
 3937     };
 3938 
 3939     static const int hi[Op_RegL + 1] = { // enum name
 3940       0,                                 // Op_Node
 3941       0,                                 // Op_Set
 3942       OptoReg::Bad,                      // Op_RegN
 3943       OptoReg::Bad,                      // Op_RegI
 3944       R0_H_num,                          // Op_RegP
 3945       OptoReg::Bad,                      // Op_RegF
 3946       V0_H_num,                          // Op_RegD
 3947       R0_H_num                           // Op_RegL
 3948     };
 3949 
 3950     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3951   %}
 3952 %}
 3953 
 3954 //----------ATTRIBUTES---------------------------------------------------------
 3955 //----------Operand Attributes-------------------------------------------------
 3956 op_attrib op_cost(1);        // Required cost attribute
 3957 
 3958 //----------Instruction Attributes---------------------------------------------
 3959 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3960 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3961 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3962                                 // a non-matching short branch variant
 3963                                 // of some long branch?
 3964 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3965                                 // be a power of 2) specifies the
 3966                                 // alignment that some part of the
 3967                                 // instruction (not necessarily the
 3968                                 // start) requires.  If > 1, a
 3969                                 // compute_padding() function must be
 3970                                 // provided for the instruction
 3971 
 3972 //----------OPERANDS-----------------------------------------------------------
 3973 // Operand definitions must precede instruction definitions for correct parsing
 3974 // in the ADLC because operands constitute user defined types which are used in
 3975 // instruction definitions.
 3976 
 3977 //----------Simple Operands----------------------------------------------------
 3978 
 3979 // Integer operands 32 bit
 3980 // 32 bit immediate
 3981 operand immI()
 3982 %{
 3983   match(ConI);
 3984 
 3985   op_cost(0);
 3986   format %{ %}
 3987   interface(CONST_INTER);
 3988 %}
 3989 
 3990 // 32 bit zero
 3991 operand immI0()
 3992 %{
 3993   predicate(n->get_int() == 0);
 3994   match(ConI);
 3995 
 3996   op_cost(0);
 3997   format %{ %}
 3998   interface(CONST_INTER);
 3999 %}
 4000 
 4001 // 32 bit unit increment
 4002 operand immI_1()
 4003 %{
 4004   predicate(n->get_int() == 1);
 4005   match(ConI);
 4006 
 4007   op_cost(0);
 4008   format %{ %}
 4009   interface(CONST_INTER);
 4010 %}
 4011 
 4012 // 32 bit unit decrement
 4013 operand immI_M1()
 4014 %{
 4015   predicate(n->get_int() == -1);
 4016   match(ConI);
 4017 
 4018   op_cost(0);
 4019   format %{ %}
 4020   interface(CONST_INTER);
 4021 %}
 4022 
 4023 // Shift values for add/sub extension shift
 4024 operand immIExt()
 4025 %{
 4026   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 4027   match(ConI);
 4028 
 4029   op_cost(0);
 4030   format %{ %}
 4031   interface(CONST_INTER);
 4032 %}
 4033 
 4034 operand immI_gt_1()
 4035 %{
 4036   predicate(n->get_int() > 1);
 4037   match(ConI);
 4038 
 4039   op_cost(0);
 4040   format %{ %}
 4041   interface(CONST_INTER);
 4042 %}
 4043 
 4044 operand immI_le_4()
 4045 %{
 4046   predicate(n->get_int() <= 4);
 4047   match(ConI);
 4048 
 4049   op_cost(0);
 4050   format %{ %}
 4051   interface(CONST_INTER);
 4052 %}
 4053 
 4054 operand immI_16()
 4055 %{
 4056   predicate(n->get_int() == 16);
 4057   match(ConI);
 4058 
 4059   op_cost(0);
 4060   format %{ %}
 4061   interface(CONST_INTER);
 4062 %}
 4063 
 4064 operand immI_24()
 4065 %{
 4066   predicate(n->get_int() == 24);
 4067   match(ConI);
 4068 
 4069   op_cost(0);
 4070   format %{ %}
 4071   interface(CONST_INTER);
 4072 %}
 4073 
 4074 operand immI_32()
 4075 %{
 4076   predicate(n->get_int() == 32);
 4077   match(ConI);
 4078 
 4079   op_cost(0);
 4080   format %{ %}
 4081   interface(CONST_INTER);
 4082 %}
 4083 
 4084 operand immI_48()
 4085 %{
 4086   predicate(n->get_int() == 48);
 4087   match(ConI);
 4088 
 4089   op_cost(0);
 4090   format %{ %}
 4091   interface(CONST_INTER);
 4092 %}
 4093 
 4094 operand immI_56()
 4095 %{
 4096   predicate(n->get_int() == 56);
 4097   match(ConI);
 4098 
 4099   op_cost(0);
 4100   format %{ %}
 4101   interface(CONST_INTER);
 4102 %}
 4103 
 4104 operand immI_63()
 4105 %{
 4106   predicate(n->get_int() == 63);
 4107   match(ConI);
 4108 
 4109   op_cost(0);
 4110   format %{ %}
 4111   interface(CONST_INTER);
 4112 %}
 4113 
 4114 operand immI_64()
 4115 %{
 4116   predicate(n->get_int() == 64);
 4117   match(ConI);
 4118 
 4119   op_cost(0);
 4120   format %{ %}
 4121   interface(CONST_INTER);
 4122 %}
 4123 
 4124 operand immI_255()
 4125 %{
 4126   predicate(n->get_int() == 255);
 4127   match(ConI);
 4128 
 4129   op_cost(0);
 4130   format %{ %}
 4131   interface(CONST_INTER);
 4132 %}
 4133 
 4134 operand immI_65535()
 4135 %{
 4136   predicate(n->get_int() == 65535);
 4137   match(ConI);
 4138 
 4139   op_cost(0);
 4140   format %{ %}
 4141   interface(CONST_INTER);
 4142 %}
 4143 
 4144 operand immI_positive()
 4145 %{
 4146   predicate(n->get_int() > 0);
 4147   match(ConI);
 4148 
 4149   op_cost(0);
 4150   format %{ %}
 4151   interface(CONST_INTER);
 4152 %}
 4153 
 4154 // BoolTest condition for signed compare
 4155 operand immI_cmp_cond()
 4156 %{
 4157   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4158   match(ConI);
 4159 
 4160   op_cost(0);
 4161   format %{ %}
 4162   interface(CONST_INTER);
 4163 %}
 4164 
 4165 // BoolTest condition for unsigned compare
 4166 operand immI_cmpU_cond()
 4167 %{
 4168   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4169   match(ConI);
 4170 
 4171   op_cost(0);
 4172   format %{ %}
 4173   interface(CONST_INTER);
 4174 %}
 4175 
 4176 operand immL_255()
 4177 %{
 4178   predicate(n->get_long() == 255L);
 4179   match(ConL);
 4180 
 4181   op_cost(0);
 4182   format %{ %}
 4183   interface(CONST_INTER);
 4184 %}
 4185 
 4186 operand immL_65535()
 4187 %{
 4188   predicate(n->get_long() == 65535L);
 4189   match(ConL);
 4190 
 4191   op_cost(0);
 4192   format %{ %}
 4193   interface(CONST_INTER);
 4194 %}
 4195 
 4196 operand immL_4294967295()
 4197 %{
 4198   predicate(n->get_long() == 4294967295L);
 4199   match(ConL);
 4200 
 4201   op_cost(0);
 4202   format %{ %}
 4203   interface(CONST_INTER);
 4204 %}
 4205 
 4206 operand immL_bitmask()
 4207 %{
 4208   predicate((n->get_long() != 0)
 4209             && ((n->get_long() & 0xc000000000000000l) == 0)
 4210             && is_power_of_2(n->get_long() + 1));
 4211   match(ConL);
 4212 
 4213   op_cost(0);
 4214   format %{ %}
 4215   interface(CONST_INTER);
 4216 %}
 4217 
 4218 operand immI_bitmask()
 4219 %{
 4220   predicate((n->get_int() != 0)
 4221             && ((n->get_int() & 0xc0000000) == 0)
 4222             && is_power_of_2(n->get_int() + 1));
 4223   match(ConI);
 4224 
 4225   op_cost(0);
 4226   format %{ %}
 4227   interface(CONST_INTER);
 4228 %}
 4229 
 4230 operand immL_positive_bitmaskI()
 4231 %{
 4232   predicate((n->get_long() != 0)
 4233             && ((julong)n->get_long() < 0x80000000ULL)
 4234             && is_power_of_2(n->get_long() + 1));
 4235   match(ConL);
 4236 
 4237   op_cost(0);
 4238   format %{ %}
 4239   interface(CONST_INTER);
 4240 %}
 4241 
 4242 // Scale values for scaled offset addressing modes (up to long but not quad)
 4243 operand immIScale()
 4244 %{
 4245   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4246   match(ConI);
 4247 
 4248   op_cost(0);
 4249   format %{ %}
 4250   interface(CONST_INTER);
 4251 %}
 4252 
 4253 // 26 bit signed offset -- for pc-relative branches
 4254 operand immI26()
 4255 %{
 4256   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
 4257   match(ConI);
 4258 
 4259   op_cost(0);
 4260   format %{ %}
 4261   interface(CONST_INTER);
 4262 %}
 4263 
 4264 // 19 bit signed offset -- for pc-relative loads
 4265 operand immI19()
 4266 %{
 4267   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
 4268   match(ConI);
 4269 
 4270   op_cost(0);
 4271   format %{ %}
 4272   interface(CONST_INTER);
 4273 %}
 4274 
 4275 // 5 bit signed integer
 4276 operand immI5()
 4277 %{
 4278   predicate(Assembler::is_simm(n->get_int(), 5));
 4279   match(ConI);
 4280 
 4281   op_cost(0);
 4282   format %{ %}
 4283   interface(CONST_INTER);
 4284 %}
 4285 
 4286 // 7 bit unsigned integer
 4287 operand immIU7()
 4288 %{
 4289   predicate(Assembler::is_uimm(n->get_int(), 7));
 4290   match(ConI);
 4291 
 4292   op_cost(0);
 4293   format %{ %}
 4294   interface(CONST_INTER);
 4295 %}
 4296 
 4297 // 12 bit unsigned offset -- for base plus immediate loads
 4298 operand immIU12()
 4299 %{
 4300   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
 4301   match(ConI);
 4302 
 4303   op_cost(0);
 4304   format %{ %}
 4305   interface(CONST_INTER);
 4306 %}
 4307 
 4308 operand immLU12()
 4309 %{
 4310   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
 4311   match(ConL);
 4312 
 4313   op_cost(0);
 4314   format %{ %}
 4315   interface(CONST_INTER);
 4316 %}
 4317 
 4318 // Offset for scaled or unscaled immediate loads and stores
 4319 operand immIOffset()
 4320 %{
 4321   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4322   match(ConI);
 4323 
 4324   op_cost(0);
 4325   format %{ %}
 4326   interface(CONST_INTER);
 4327 %}
 4328 
 4329 operand immIOffset1()
 4330 %{
 4331   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4332   match(ConI);
 4333 
 4334   op_cost(0);
 4335   format %{ %}
 4336   interface(CONST_INTER);
 4337 %}
 4338 
 4339 operand immIOffset2()
 4340 %{
 4341   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4342   match(ConI);
 4343 
 4344   op_cost(0);
 4345   format %{ %}
 4346   interface(CONST_INTER);
 4347 %}
 4348 
 4349 operand immIOffset4()
 4350 %{
 4351   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4352   match(ConI);
 4353 
 4354   op_cost(0);
 4355   format %{ %}
 4356   interface(CONST_INTER);
 4357 %}
 4358 
 4359 operand immIOffset8()
 4360 %{
 4361   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4362   match(ConI);
 4363 
 4364   op_cost(0);
 4365   format %{ %}
 4366   interface(CONST_INTER);
 4367 %}
 4368 
 4369 operand immIOffset16()
 4370 %{
 4371   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4372   match(ConI);
 4373 
 4374   op_cost(0);
 4375   format %{ %}
 4376   interface(CONST_INTER);
 4377 %}
 4378 
 4379 operand immLoffset()
 4380 %{
 4381   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4382   match(ConL);
 4383 
 4384   op_cost(0);
 4385   format %{ %}
 4386   interface(CONST_INTER);
 4387 %}
 4388 
 4389 operand immLoffset1()
 4390 %{
 4391   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4392   match(ConL);
 4393 
 4394   op_cost(0);
 4395   format %{ %}
 4396   interface(CONST_INTER);
 4397 %}
 4398 
 4399 operand immLoffset2()
 4400 %{
 4401   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4402   match(ConL);
 4403 
 4404   op_cost(0);
 4405   format %{ %}
 4406   interface(CONST_INTER);
 4407 %}
 4408 
 4409 operand immLoffset4()
 4410 %{
 4411   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4412   match(ConL);
 4413 
 4414   op_cost(0);
 4415   format %{ %}
 4416   interface(CONST_INTER);
 4417 %}
 4418 
 4419 operand immLoffset8()
 4420 %{
 4421   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4422   match(ConL);
 4423 
 4424   op_cost(0);
 4425   format %{ %}
 4426   interface(CONST_INTER);
 4427 %}
 4428 
 4429 operand immLoffset16()
 4430 %{
 4431   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4432   match(ConL);
 4433 
 4434   op_cost(0);
 4435   format %{ %}
 4436   interface(CONST_INTER);
 4437 %}
 4438 
 4439 // 5 bit signed long integer
 4440 operand immL5()
 4441 %{
 4442   predicate(Assembler::is_simm(n->get_long(), 5));
 4443   match(ConL);
 4444 
 4445   op_cost(0);
 4446   format %{ %}
 4447   interface(CONST_INTER);
 4448 %}
 4449 
 4450 // 7 bit unsigned long integer
 4451 operand immLU7()
 4452 %{
 4453   predicate(Assembler::is_uimm(n->get_long(), 7));
 4454   match(ConL);
 4455 
 4456   op_cost(0);
 4457   format %{ %}
 4458   interface(CONST_INTER);
 4459 %}
 4460 
 4461 // 8 bit signed value.
 4462 operand immI8()
 4463 %{
 4464   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4465   match(ConI);
 4466 
 4467   op_cost(0);
 4468   format %{ %}
 4469   interface(CONST_INTER);
 4470 %}
 4471 
 4472 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4473 operand immI8_shift8()
 4474 %{
 4475   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4476             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4477   match(ConI);
 4478 
 4479   op_cost(0);
 4480   format %{ %}
 4481   interface(CONST_INTER);
 4482 %}
 4483 
 4484 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4485 operand immL8_shift8()
 4486 %{
 4487   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4488             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4489   match(ConL);
 4490 
 4491   op_cost(0);
 4492   format %{ %}
 4493   interface(CONST_INTER);
 4494 %}
 4495 
 4496 // 8 bit integer valid for vector add sub immediate
 4497 operand immBAddSubV()
 4498 %{
 4499   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4500   match(ConI);
 4501 
 4502   op_cost(0);
 4503   format %{ %}
 4504   interface(CONST_INTER);
 4505 %}
 4506 
 4507 // 32 bit integer valid for add sub immediate
 4508 operand immIAddSub()
 4509 %{
 4510   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4511   match(ConI);
 4512   op_cost(0);
 4513   format %{ %}
 4514   interface(CONST_INTER);
 4515 %}
 4516 
 4517 // 32 bit integer valid for vector add sub immediate
 4518 operand immIAddSubV()
 4519 %{
 4520   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4521   match(ConI);
 4522 
 4523   op_cost(0);
 4524   format %{ %}
 4525   interface(CONST_INTER);
 4526 %}
 4527 
 4528 // 32 bit unsigned integer valid for logical immediate
 4529 
 4530 operand immBLog()
 4531 %{
 4532   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4533   match(ConI);
 4534 
 4535   op_cost(0);
 4536   format %{ %}
 4537   interface(CONST_INTER);
 4538 %}
 4539 
 4540 operand immSLog()
 4541 %{
 4542   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4543   match(ConI);
 4544 
 4545   op_cost(0);
 4546   format %{ %}
 4547   interface(CONST_INTER);
 4548 %}
 4549 
 4550 operand immILog()
 4551 %{
 4552   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4553   match(ConI);
 4554 
 4555   op_cost(0);
 4556   format %{ %}
 4557   interface(CONST_INTER);
 4558 %}
 4559 
 4560 // Integer operands 64 bit
 4561 // 64 bit immediate
 4562 operand immL()
 4563 %{
 4564   match(ConL);
 4565 
 4566   op_cost(0);
 4567   format %{ %}
 4568   interface(CONST_INTER);
 4569 %}
 4570 
 4571 // 64 bit zero
 4572 operand immL0()
 4573 %{
 4574   predicate(n->get_long() == 0);
 4575   match(ConL);
 4576 
 4577   op_cost(0);
 4578   format %{ %}
 4579   interface(CONST_INTER);
 4580 %}
 4581 
 4582 // 64 bit unit increment
 4583 operand immL_1()
 4584 %{
 4585   predicate(n->get_long() == 1);
 4586   match(ConL);
 4587 
 4588   op_cost(0);
 4589   format %{ %}
 4590   interface(CONST_INTER);
 4591 %}
 4592 
 4593 // 64 bit unit decrement
 4594 operand immL_M1()
 4595 %{
 4596   predicate(n->get_long() == -1);
 4597   match(ConL);
 4598 
 4599   op_cost(0);
 4600   format %{ %}
 4601   interface(CONST_INTER);
 4602 %}
 4603 
 4604 // 32 bit offset of pc in thread anchor
 4605 
 4606 operand immL_pc_off()
 4607 %{
 4608   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
 4609                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 4610   match(ConL);
 4611 
 4612   op_cost(0);
 4613   format %{ %}
 4614   interface(CONST_INTER);
 4615 %}
 4616 
 4617 // 64 bit integer valid for add sub immediate
 4618 operand immLAddSub()
 4619 %{
 4620   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4621   match(ConL);
 4622   op_cost(0);
 4623   format %{ %}
 4624   interface(CONST_INTER);
 4625 %}
 4626 
 4627 // 64 bit integer valid for addv subv immediate
 4628 operand immLAddSubV()
 4629 %{
 4630   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4631   match(ConL);
 4632 
 4633   op_cost(0);
 4634   format %{ %}
 4635   interface(CONST_INTER);
 4636 %}
 4637 
 4638 // 64 bit integer valid for logical immediate
 4639 operand immLLog()
 4640 %{
 4641   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4642   match(ConL);
 4643   op_cost(0);
 4644   format %{ %}
 4645   interface(CONST_INTER);
 4646 %}
 4647 
 4648 // Long Immediate: low 32-bit mask
 4649 operand immL_32bits()
 4650 %{
 4651   predicate(n->get_long() == 0xFFFFFFFFL);
 4652   match(ConL);
 4653   op_cost(0);
 4654   format %{ %}
 4655   interface(CONST_INTER);
 4656 %}
 4657 
 4658 // Pointer operands
 4659 // Pointer Immediate
 4660 operand immP()
 4661 %{
 4662   match(ConP);
 4663 
 4664   op_cost(0);
 4665   format %{ %}
 4666   interface(CONST_INTER);
 4667 %}
 4668 
 4669 // NULL Pointer Immediate
 4670 operand immP0()
 4671 %{
 4672   predicate(n->get_ptr() == 0);
 4673   match(ConP);
 4674 
 4675   op_cost(0);
 4676   format %{ %}
 4677   interface(CONST_INTER);
 4678 %}
 4679 
 4680 // Pointer Immediate One
 4681 // this is used in object initialization (initial object header)
 4682 operand immP_1()
 4683 %{
 4684   predicate(n->get_ptr() == 1);
 4685   match(ConP);
 4686 
 4687   op_cost(0);
 4688   format %{ %}
 4689   interface(CONST_INTER);
 4690 %}
 4691 
 4692 // Card Table Byte Map Base
 4693 operand immByteMapBase()
 4694 %{
 4695   // Get base of card map
 4696   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4697             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4698   match(ConP);
 4699 
 4700   op_cost(0);
 4701   format %{ %}
 4702   interface(CONST_INTER);
 4703 %}
 4704 
 4705 // Pointer Immediate Minus One
 4706 // this is used when we want to write the current PC to the thread anchor
 4707 operand immP_M1()
 4708 %{
 4709   predicate(n->get_ptr() == -1);
 4710   match(ConP);
 4711 
 4712   op_cost(0);
 4713   format %{ %}
 4714   interface(CONST_INTER);
 4715 %}
 4716 
 4717 // Pointer Immediate Minus Two
 4718 // this is used when we want to write the current PC to the thread anchor
 4719 operand immP_M2()
 4720 %{
 4721   predicate(n->get_ptr() == -2);
 4722   match(ConP);
 4723 
 4724   op_cost(0);
 4725   format %{ %}
 4726   interface(CONST_INTER);
 4727 %}
 4728 
 4729 // Float and Double operands
 4730 // Double Immediate
 4731 operand immD()
 4732 %{
 4733   match(ConD);
 4734   op_cost(0);
 4735   format %{ %}
 4736   interface(CONST_INTER);
 4737 %}
 4738 
 4739 // Double Immediate: +0.0d
 4740 operand immD0()
 4741 %{
 4742   predicate(jlong_cast(n->getd()) == 0);
 4743   match(ConD);
 4744 
 4745   op_cost(0);
 4746   format %{ %}
 4747   interface(CONST_INTER);
 4748 %}
 4749 
 4750 // constant 'double +0.0'.
 4751 operand immDPacked()
 4752 %{
 4753   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4754   match(ConD);
 4755   op_cost(0);
 4756   format %{ %}
 4757   interface(CONST_INTER);
 4758 %}
 4759 
 4760 // Float Immediate
 4761 operand immF()
 4762 %{
 4763   match(ConF);
 4764   op_cost(0);
 4765   format %{ %}
 4766   interface(CONST_INTER);
 4767 %}
 4768 
 4769 // Float Immediate: +0.0f.
 4770 operand immF0()
 4771 %{
 4772   predicate(jint_cast(n->getf()) == 0);
 4773   match(ConF);
 4774 
 4775   op_cost(0);
 4776   format %{ %}
 4777   interface(CONST_INTER);
 4778 %}
 4779 
 4780 //
 4781 operand immFPacked()
 4782 %{
 4783   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4784   match(ConF);
 4785   op_cost(0);
 4786   format %{ %}
 4787   interface(CONST_INTER);
 4788 %}
 4789 
 4790 // Narrow pointer operands
 4791 // Narrow Pointer Immediate
 4792 operand immN()
 4793 %{
 4794   match(ConN);
 4795 
 4796   op_cost(0);
 4797   format %{ %}
 4798   interface(CONST_INTER);
 4799 %}
 4800 
 4801 // Narrow NULL Pointer Immediate
 4802 operand immN0()
 4803 %{
 4804   predicate(n->get_narrowcon() == 0);
 4805   match(ConN);
 4806 
 4807   op_cost(0);
 4808   format %{ %}
 4809   interface(CONST_INTER);
 4810 %}
 4811 
 4812 operand immNKlass()
 4813 %{
 4814   match(ConNKlass);
 4815 
 4816   op_cost(0);
 4817   format %{ %}
 4818   interface(CONST_INTER);
 4819 %}
 4820 
 4821 // Integer 32 bit Register Operands
 4822 // Integer 32 bitRegister (excludes SP)
 4823 operand iRegI()
 4824 %{
 4825   constraint(ALLOC_IN_RC(any_reg32));
 4826   match(RegI);
 4827   match(iRegINoSp);
 4828   op_cost(0);
 4829   format %{ %}
 4830   interface(REG_INTER);
 4831 %}
 4832 
 4833 // Integer 32 bit Register not Special
 4834 operand iRegINoSp()
 4835 %{
 4836   constraint(ALLOC_IN_RC(no_special_reg32));
 4837   match(RegI);
 4838   op_cost(0);
 4839   format %{ %}
 4840   interface(REG_INTER);
 4841 %}
 4842 
 4843 // Integer 64 bit Register Operands
 4844 // Integer 64 bit Register (includes SP)
 4845 operand iRegL()
 4846 %{
 4847   constraint(ALLOC_IN_RC(any_reg));
 4848   match(RegL);
 4849   match(iRegLNoSp);
 4850   op_cost(0);
 4851   format %{ %}
 4852   interface(REG_INTER);
 4853 %}
 4854 
 4855 // Integer 64 bit Register not Special
 4856 operand iRegLNoSp()
 4857 %{
 4858   constraint(ALLOC_IN_RC(no_special_reg));
 4859   match(RegL);
 4860   match(iRegL_R0);
 4861   format %{ %}
 4862   interface(REG_INTER);
 4863 %}
 4864 
 4865 // Pointer Register Operands
 4866 // Pointer Register
 4867 operand iRegP()
 4868 %{
 4869   constraint(ALLOC_IN_RC(ptr_reg));
 4870   match(RegP);
 4871   match(iRegPNoSp);
 4872   match(iRegP_R0);
 4873   //match(iRegP_R2);
 4874   //match(iRegP_R4);
 4875   match(iRegP_R5);
 4876   match(thread_RegP);
 4877   op_cost(0);
 4878   format %{ %}
 4879   interface(REG_INTER);
 4880 %}
 4881 
 4882 // Pointer 64 bit Register not Special
 4883 operand iRegPNoSp()
 4884 %{
 4885   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4886   match(RegP);
 4887   // match(iRegP);
 4888   // match(iRegP_R0);
 4889   // match(iRegP_R2);
 4890   // match(iRegP_R4);
 4891   // match(iRegP_R5);
 4892   // match(thread_RegP);
 4893   op_cost(0);
 4894   format %{ %}
 4895   interface(REG_INTER);
 4896 %}
 4897 
 4898 // This operand is not allowed to use rfp even if
 4899 // rfp is not used to hold the frame pointer.
 4900 operand iRegPNoSpNoRfp()
 4901 %{
 4902   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4903   match(RegP);
 4904   match(iRegPNoSp);
 4905   op_cost(0);
 4906   format %{ %}
 4907   interface(REG_INTER);
 4908 %}
 4909 
 4910 // Pointer 64 bit Register R0 only
 4911 operand iRegP_R0()
 4912 %{
 4913   constraint(ALLOC_IN_RC(r0_reg));
 4914   match(RegP);
 4915   // match(iRegP);
 4916   match(iRegPNoSp);
 4917   op_cost(0);
 4918   format %{ %}
 4919   interface(REG_INTER);
 4920 %}
 4921 
 4922 // Pointer 64 bit Register R1 only
 4923 operand iRegP_R1()
 4924 %{
 4925   constraint(ALLOC_IN_RC(r1_reg));
 4926   match(RegP);
 4927   // match(iRegP);
 4928   match(iRegPNoSp);
 4929   op_cost(0);
 4930   format %{ %}
 4931   interface(REG_INTER);
 4932 %}
 4933 
 4934 // Pointer 64 bit Register R2 only
 4935 operand iRegP_R2()
 4936 %{
 4937   constraint(ALLOC_IN_RC(r2_reg));
 4938   match(RegP);
 4939   // match(iRegP);
 4940   match(iRegPNoSp);
 4941   op_cost(0);
 4942   format %{ %}
 4943   interface(REG_INTER);
 4944 %}
 4945 
 4946 // Pointer 64 bit Register R3 only
 4947 operand iRegP_R3()
 4948 %{
 4949   constraint(ALLOC_IN_RC(r3_reg));
 4950   match(RegP);
 4951   // match(iRegP);
 4952   match(iRegPNoSp);
 4953   op_cost(0);
 4954   format %{ %}
 4955   interface(REG_INTER);
 4956 %}
 4957 
 4958 // Pointer 64 bit Register R4 only
 4959 operand iRegP_R4()
 4960 %{
 4961   constraint(ALLOC_IN_RC(r4_reg));
 4962   match(RegP);
 4963   // match(iRegP);
 4964   match(iRegPNoSp);
 4965   op_cost(0);
 4966   format %{ %}
 4967   interface(REG_INTER);
 4968 %}
 4969 
 4970 // Pointer 64 bit Register R5 only
 4971 operand iRegP_R5()
 4972 %{
 4973   constraint(ALLOC_IN_RC(r5_reg));
 4974   match(RegP);
 4975   // match(iRegP);
 4976   match(iRegPNoSp);
 4977   op_cost(0);
 4978   format %{ %}
 4979   interface(REG_INTER);
 4980 %}
 4981 
 4982 // Pointer 64 bit Register R10 only
 4983 operand iRegP_R10()
 4984 %{
 4985   constraint(ALLOC_IN_RC(r10_reg));
 4986   match(RegP);
 4987   // match(iRegP);
 4988   match(iRegPNoSp);
 4989   op_cost(0);
 4990   format %{ %}
 4991   interface(REG_INTER);
 4992 %}
 4993 
 4994 // Long 64 bit Register R0 only
 4995 operand iRegL_R0()
 4996 %{
 4997   constraint(ALLOC_IN_RC(r0_reg));
 4998   match(RegL);
 4999   match(iRegLNoSp);
 5000   op_cost(0);
 5001   format %{ %}
 5002   interface(REG_INTER);
 5003 %}
 5004 
 5005 // Long 64 bit Register R2 only
 5006 operand iRegL_R2()
 5007 %{
 5008   constraint(ALLOC_IN_RC(r2_reg));
 5009   match(RegL);
 5010   match(iRegLNoSp);
 5011   op_cost(0);
 5012   format %{ %}
 5013   interface(REG_INTER);
 5014 %}
 5015 
 5016 // Long 64 bit Register R3 only
 5017 operand iRegL_R3()
 5018 %{
 5019   constraint(ALLOC_IN_RC(r3_reg));
 5020   match(RegL);
 5021   match(iRegLNoSp);
 5022   op_cost(0);
 5023   format %{ %}
 5024   interface(REG_INTER);
 5025 %}
 5026 
 5027 // Long 64 bit Register R11 only
 5028 operand iRegL_R11()
 5029 %{
 5030   constraint(ALLOC_IN_RC(r11_reg));
 5031   match(RegL);
 5032   match(iRegLNoSp);
 5033   op_cost(0);
 5034   format %{ %}
 5035   interface(REG_INTER);
 5036 %}
 5037 
 5038 // Pointer 64 bit Register FP only
 5039 operand iRegP_FP()
 5040 %{
 5041   constraint(ALLOC_IN_RC(fp_reg));
 5042   match(RegP);
 5043   // match(iRegP);
 5044   op_cost(0);
 5045   format %{ %}
 5046   interface(REG_INTER);
 5047 %}
 5048 
 5049 // Register R0 only
 5050 operand iRegI_R0()
 5051 %{
 5052   constraint(ALLOC_IN_RC(int_r0_reg));
 5053   match(RegI);
 5054   match(iRegINoSp);
 5055   op_cost(0);
 5056   format %{ %}
 5057   interface(REG_INTER);
 5058 %}
 5059 
 5060 // Register R2 only
 5061 operand iRegI_R2()
 5062 %{
 5063   constraint(ALLOC_IN_RC(int_r2_reg));
 5064   match(RegI);
 5065   match(iRegINoSp);
 5066   op_cost(0);
 5067   format %{ %}
 5068   interface(REG_INTER);
 5069 %}
 5070 
 5071 // Register R3 only
 5072 operand iRegI_R3()
 5073 %{
 5074   constraint(ALLOC_IN_RC(int_r3_reg));
 5075   match(RegI);
 5076   match(iRegINoSp);
 5077   op_cost(0);
 5078   format %{ %}
 5079   interface(REG_INTER);
 5080 %}
 5081 
 5082 
 5083 // Register R4 only
 5084 operand iRegI_R4()
 5085 %{
 5086   constraint(ALLOC_IN_RC(int_r4_reg));
 5087   match(RegI);
 5088   match(iRegINoSp);
 5089   op_cost(0);
 5090   format %{ %}
 5091   interface(REG_INTER);
 5092 %}
 5093 
 5094 
 5095 // Pointer Register Operands
 5096 // Narrow Pointer Register
 5097 operand iRegN()
 5098 %{
 5099   constraint(ALLOC_IN_RC(any_reg32));
 5100   match(RegN);
 5101   match(iRegNNoSp);
 5102   op_cost(0);
 5103   format %{ %}
 5104   interface(REG_INTER);
 5105 %}
 5106 
 5107 operand iRegN_R0()
 5108 %{
 5109   constraint(ALLOC_IN_RC(r0_reg));
 5110   match(iRegN);
 5111   op_cost(0);
 5112   format %{ %}
 5113   interface(REG_INTER);
 5114 %}
 5115 
 5116 operand iRegN_R2()
 5117 %{
 5118   constraint(ALLOC_IN_RC(r2_reg));
 5119   match(iRegN);
 5120   op_cost(0);
 5121   format %{ %}
 5122   interface(REG_INTER);
 5123 %}
 5124 
 5125 operand iRegN_R3()
 5126 %{
 5127   constraint(ALLOC_IN_RC(r3_reg));
 5128   match(iRegN);
 5129   op_cost(0);
 5130   format %{ %}
 5131   interface(REG_INTER);
 5132 %}
 5133 
 5134 // Integer 64 bit Register not Special
 5135 operand iRegNNoSp()
 5136 %{
 5137   constraint(ALLOC_IN_RC(no_special_reg32));
 5138   match(RegN);
 5139   op_cost(0);
 5140   format %{ %}
 5141   interface(REG_INTER);
 5142 %}
 5143 
 5144 // Float Register
 5145 // Float register operands
 5146 operand vRegF()
 5147 %{
 5148   constraint(ALLOC_IN_RC(float_reg));
 5149   match(RegF);
 5150 
 5151   op_cost(0);
 5152   format %{ %}
 5153   interface(REG_INTER);
 5154 %}
 5155 
 5156 // Double Register
 5157 // Double register operands
 5158 operand vRegD()
 5159 %{
 5160   constraint(ALLOC_IN_RC(double_reg));
 5161   match(RegD);
 5162 
 5163   op_cost(0);
 5164   format %{ %}
 5165   interface(REG_INTER);
 5166 %}
 5167 
 5168 // Generic vector class. This will be used for
 5169 // all vector operands, including NEON and SVE.
 5170 operand vReg()
 5171 %{
 5172   constraint(ALLOC_IN_RC(dynamic));
 5173   match(VecA);
 5174   match(VecD);
 5175   match(VecX);
 5176 
 5177   op_cost(0);
 5178   format %{ %}
 5179   interface(REG_INTER);
 5180 %}
 5181 
 5182 operand vecA()
 5183 %{
 5184   constraint(ALLOC_IN_RC(vectora_reg));
 5185   match(VecA);
 5186 
 5187   op_cost(0);
 5188   format %{ %}
 5189   interface(REG_INTER);
 5190 %}
 5191 
 5192 operand vecD()
 5193 %{
 5194   constraint(ALLOC_IN_RC(vectord_reg));
 5195   match(VecD);
 5196 
 5197   op_cost(0);
 5198   format %{ %}
 5199   interface(REG_INTER);
 5200 %}
 5201 
 5202 operand vecX()
 5203 %{
 5204   constraint(ALLOC_IN_RC(vectorx_reg));
 5205   match(VecX);
 5206 
 5207   op_cost(0);
 5208   format %{ %}
 5209   interface(REG_INTER);
 5210 %}
 5211 
 5212 operand vRegD_V0()
 5213 %{
 5214   constraint(ALLOC_IN_RC(v0_reg));
 5215   match(RegD);
 5216   op_cost(0);
 5217   format %{ %}
 5218   interface(REG_INTER);
 5219 %}
 5220 
 5221 operand vRegD_V1()
 5222 %{
 5223   constraint(ALLOC_IN_RC(v1_reg));
 5224   match(RegD);
 5225   op_cost(0);
 5226   format %{ %}
 5227   interface(REG_INTER);
 5228 %}
 5229 
 5230 operand vRegD_V2()
 5231 %{
 5232   constraint(ALLOC_IN_RC(v2_reg));
 5233   match(RegD);
 5234   op_cost(0);
 5235   format %{ %}
 5236   interface(REG_INTER);
 5237 %}
 5238 
 5239 operand vRegD_V3()
 5240 %{
 5241   constraint(ALLOC_IN_RC(v3_reg));
 5242   match(RegD);
 5243   op_cost(0);
 5244   format %{ %}
 5245   interface(REG_INTER);
 5246 %}
 5247 
 5248 operand vRegD_V4()
 5249 %{
 5250   constraint(ALLOC_IN_RC(v4_reg));
 5251   match(RegD);
 5252   op_cost(0);
 5253   format %{ %}
 5254   interface(REG_INTER);
 5255 %}
 5256 
 5257 operand vRegD_V5()
 5258 %{
 5259   constraint(ALLOC_IN_RC(v5_reg));
 5260   match(RegD);
 5261   op_cost(0);
 5262   format %{ %}
 5263   interface(REG_INTER);
 5264 %}
 5265 
 5266 operand vRegD_V6()
 5267 %{
 5268   constraint(ALLOC_IN_RC(v6_reg));
 5269   match(RegD);
 5270   op_cost(0);
 5271   format %{ %}
 5272   interface(REG_INTER);
 5273 %}
 5274 
 5275 operand vRegD_V7()
 5276 %{
 5277   constraint(ALLOC_IN_RC(v7_reg));
 5278   match(RegD);
 5279   op_cost(0);
 5280   format %{ %}
 5281   interface(REG_INTER);
 5282 %}
 5283 
 5284 operand vRegD_V8()
 5285 %{
 5286   constraint(ALLOC_IN_RC(v8_reg));
 5287   match(RegD);
 5288   op_cost(0);
 5289   format %{ %}
 5290   interface(REG_INTER);
 5291 %}
 5292 
 5293 operand vRegD_V9()
 5294 %{
 5295   constraint(ALLOC_IN_RC(v9_reg));
 5296   match(RegD);
 5297   op_cost(0);
 5298   format %{ %}
 5299   interface(REG_INTER);
 5300 %}
 5301 
 5302 operand vRegD_V10()
 5303 %{
 5304   constraint(ALLOC_IN_RC(v10_reg));
 5305   match(RegD);
 5306   op_cost(0);
 5307   format %{ %}
 5308   interface(REG_INTER);
 5309 %}
 5310 
 5311 operand vRegD_V11()
 5312 %{
 5313   constraint(ALLOC_IN_RC(v11_reg));
 5314   match(RegD);
 5315   op_cost(0);
 5316   format %{ %}
 5317   interface(REG_INTER);
 5318 %}
 5319 
 5320 operand vRegD_V12()
 5321 %{
 5322   constraint(ALLOC_IN_RC(v12_reg));
 5323   match(RegD);
 5324   op_cost(0);
 5325   format %{ %}
 5326   interface(REG_INTER);
 5327 %}
 5328 
 5329 operand vRegD_V13()
 5330 %{
 5331   constraint(ALLOC_IN_RC(v13_reg));
 5332   match(RegD);
 5333   op_cost(0);
 5334   format %{ %}
 5335   interface(REG_INTER);
 5336 %}
 5337 
 5338 operand vRegD_V14()
 5339 %{
 5340   constraint(ALLOC_IN_RC(v14_reg));
 5341   match(RegD);
 5342   op_cost(0);
 5343   format %{ %}
 5344   interface(REG_INTER);
 5345 %}
 5346 
 5347 operand vRegD_V15()
 5348 %{
 5349   constraint(ALLOC_IN_RC(v15_reg));
 5350   match(RegD);
 5351   op_cost(0);
 5352   format %{ %}
 5353   interface(REG_INTER);
 5354 %}
 5355 
 5356 operand vRegD_V16()
 5357 %{
 5358   constraint(ALLOC_IN_RC(v16_reg));
 5359   match(RegD);
 5360   op_cost(0);
 5361   format %{ %}
 5362   interface(REG_INTER);
 5363 %}
 5364 
 5365 operand vRegD_V17()
 5366 %{
 5367   constraint(ALLOC_IN_RC(v17_reg));
 5368   match(RegD);
 5369   op_cost(0);
 5370   format %{ %}
 5371   interface(REG_INTER);
 5372 %}
 5373 
 5374 operand vRegD_V18()
 5375 %{
 5376   constraint(ALLOC_IN_RC(v18_reg));
 5377   match(RegD);
 5378   op_cost(0);
 5379   format %{ %}
 5380   interface(REG_INTER);
 5381 %}
 5382 
 5383 operand vRegD_V19()
 5384 %{
 5385   constraint(ALLOC_IN_RC(v19_reg));
 5386   match(RegD);
 5387   op_cost(0);
 5388   format %{ %}
 5389   interface(REG_INTER);
 5390 %}
 5391 
 5392 operand vRegD_V20()
 5393 %{
 5394   constraint(ALLOC_IN_RC(v20_reg));
 5395   match(RegD);
 5396   op_cost(0);
 5397   format %{ %}
 5398   interface(REG_INTER);
 5399 %}
 5400 
 5401 operand vRegD_V21()
 5402 %{
 5403   constraint(ALLOC_IN_RC(v21_reg));
 5404   match(RegD);
 5405   op_cost(0);
 5406   format %{ %}
 5407   interface(REG_INTER);
 5408 %}
 5409 
 5410 operand vRegD_V22()
 5411 %{
 5412   constraint(ALLOC_IN_RC(v22_reg));
 5413   match(RegD);
 5414   op_cost(0);
 5415   format %{ %}
 5416   interface(REG_INTER);
 5417 %}
 5418 
 5419 operand vRegD_V23()
 5420 %{
 5421   constraint(ALLOC_IN_RC(v23_reg));
 5422   match(RegD);
 5423   op_cost(0);
 5424   format %{ %}
 5425   interface(REG_INTER);
 5426 %}
 5427 
 5428 operand vRegD_V24()
 5429 %{
 5430   constraint(ALLOC_IN_RC(v24_reg));
 5431   match(RegD);
 5432   op_cost(0);
 5433   format %{ %}
 5434   interface(REG_INTER);
 5435 %}
 5436 
 5437 operand vRegD_V25()
 5438 %{
 5439   constraint(ALLOC_IN_RC(v25_reg));
 5440   match(RegD);
 5441   op_cost(0);
 5442   format %{ %}
 5443   interface(REG_INTER);
 5444 %}
 5445 
 5446 operand vRegD_V26()
 5447 %{
 5448   constraint(ALLOC_IN_RC(v26_reg));
 5449   match(RegD);
 5450   op_cost(0);
 5451   format %{ %}
 5452   interface(REG_INTER);
 5453 %}
 5454 
 5455 operand vRegD_V27()
 5456 %{
 5457   constraint(ALLOC_IN_RC(v27_reg));
 5458   match(RegD);
 5459   op_cost(0);
 5460   format %{ %}
 5461   interface(REG_INTER);
 5462 %}
 5463 
 5464 operand vRegD_V28()
 5465 %{
 5466   constraint(ALLOC_IN_RC(v28_reg));
 5467   match(RegD);
 5468   op_cost(0);
 5469   format %{ %}
 5470   interface(REG_INTER);
 5471 %}
 5472 
 5473 operand vRegD_V29()
 5474 %{
 5475   constraint(ALLOC_IN_RC(v29_reg));
 5476   match(RegD);
 5477   op_cost(0);
 5478   format %{ %}
 5479   interface(REG_INTER);
 5480 %}
 5481 
 5482 operand vRegD_V30()
 5483 %{
 5484   constraint(ALLOC_IN_RC(v30_reg));
 5485   match(RegD);
 5486   op_cost(0);
 5487   format %{ %}
 5488   interface(REG_INTER);
 5489 %}
 5490 
 5491 operand vRegD_V31()
 5492 %{
 5493   constraint(ALLOC_IN_RC(v31_reg));
 5494   match(RegD);
 5495   op_cost(0);
 5496   format %{ %}
 5497   interface(REG_INTER);
 5498 %}
 5499 
 5500 operand pReg()
 5501 %{
 5502   constraint(ALLOC_IN_RC(pr_reg));
 5503   match(RegVectMask);
 5504   match(pRegGov);
 5505   op_cost(0);
 5506   format %{ %}
 5507   interface(REG_INTER);
 5508 %}
 5509 
 5510 operand pRegGov()
 5511 %{
 5512   constraint(ALLOC_IN_RC(gov_pr));
 5513   match(RegVectMask);
 5514   match(pReg);
 5515   op_cost(0);
 5516   format %{ %}
 5517   interface(REG_INTER);
 5518 %}
 5519 
 5520 operand pRegGov_P0()
 5521 %{
 5522   constraint(ALLOC_IN_RC(p0_reg));
 5523   match(RegVectMask);
 5524   op_cost(0);
 5525   format %{ %}
 5526   interface(REG_INTER);
 5527 %}
 5528 
 5529 operand pRegGov_P1()
 5530 %{
 5531   constraint(ALLOC_IN_RC(p1_reg));
 5532   match(RegVectMask);
 5533   op_cost(0);
 5534   format %{ %}
 5535   interface(REG_INTER);
 5536 %}
 5537 
 5538 // Flags register, used as output of signed compare instructions
 5539 
 5540 // note that on AArch64 we also use this register as the output for
 5541 // for floating point compare instructions (CmpF CmpD). this ensures
 5542 // that ordered inequality tests use GT, GE, LT or LE none of which
 5543 // pass through cases where the result is unordered i.e. one or both
 5544 // inputs to the compare is a NaN. this means that the ideal code can
 5545 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5546 // (where the comparison should always fail). EQ and NE tests are
 5547 // always generated in ideal code so that unordered folds into the NE
 5548 // case, matching the behaviour of AArch64 NE.
 5549 //
 5550 // This differs from x86 where the outputs of FP compares use a
 5551 // special FP flags registers and where compares based on this
 5552 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5553 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5554 // to explicitly handle the unordered case in branches. x86 also has
 5555 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5556 
 5557 operand rFlagsReg()
 5558 %{
 5559   constraint(ALLOC_IN_RC(int_flags));
 5560   match(RegFlags);
 5561 
 5562   op_cost(0);
 5563   format %{ "RFLAGS" %}
 5564   interface(REG_INTER);
 5565 %}
 5566 
 5567 // Flags register, used as output of unsigned compare instructions
 5568 operand rFlagsRegU()
 5569 %{
 5570   constraint(ALLOC_IN_RC(int_flags));
 5571   match(RegFlags);
 5572 
 5573   op_cost(0);
 5574   format %{ "RFLAGSU" %}
 5575   interface(REG_INTER);
 5576 %}
 5577 
 5578 // Special Registers
 5579 
 5580 // Method Register
 5581 operand inline_cache_RegP(iRegP reg)
 5582 %{
 5583   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5584   match(reg);
 5585   match(iRegPNoSp);
 5586   op_cost(0);
 5587   format %{ %}
 5588   interface(REG_INTER);
 5589 %}
 5590 
 5591 // Thread Register
 5592 operand thread_RegP(iRegP reg)
 5593 %{
 5594   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5595   match(reg);
 5596   op_cost(0);
 5597   format %{ %}
 5598   interface(REG_INTER);
 5599 %}
 5600 
 5601 operand lr_RegP(iRegP reg)
 5602 %{
 5603   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
 5604   match(reg);
 5605   op_cost(0);
 5606   format %{ %}
 5607   interface(REG_INTER);
 5608 %}
 5609 
 5610 //----------Memory Operands----------------------------------------------------
 5611 
 5612 operand indirect(iRegP reg)
 5613 %{
 5614   constraint(ALLOC_IN_RC(ptr_reg));
 5615   match(reg);
 5616   op_cost(0);
 5617   format %{ "[$reg]" %}
 5618   interface(MEMORY_INTER) %{
 5619     base($reg);
 5620     index(0xffffffff);
 5621     scale(0x0);
 5622     disp(0x0);
 5623   %}
 5624 %}
 5625 
 5626 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5627 %{
 5628   constraint(ALLOC_IN_RC(ptr_reg));
 5629   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5630   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5631   op_cost(0);
 5632   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5633   interface(MEMORY_INTER) %{
 5634     base($reg);
 5635     index($ireg);
 5636     scale($scale);
 5637     disp(0x0);
 5638   %}
 5639 %}
 5640 
 5641 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5642 %{
 5643   constraint(ALLOC_IN_RC(ptr_reg));
 5644   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5645   match(AddP reg (LShiftL lreg scale));
 5646   op_cost(0);
 5647   format %{ "$reg, $lreg lsl($scale)" %}
 5648   interface(MEMORY_INTER) %{
 5649     base($reg);
 5650     index($lreg);
 5651     scale($scale);
 5652     disp(0x0);
 5653   %}
 5654 %}
 5655 
 5656 operand indIndexI2L(iRegP reg, iRegI ireg)
 5657 %{
 5658   constraint(ALLOC_IN_RC(ptr_reg));
 5659   match(AddP reg (ConvI2L ireg));
 5660   op_cost(0);
 5661   format %{ "$reg, $ireg, 0, I2L" %}
 5662   interface(MEMORY_INTER) %{
 5663     base($reg);
 5664     index($ireg);
 5665     scale(0x0);
 5666     disp(0x0);
 5667   %}
 5668 %}
 5669 
 5670 operand indIndex(iRegP reg, iRegL lreg)
 5671 %{
 5672   constraint(ALLOC_IN_RC(ptr_reg));
 5673   match(AddP reg lreg);
 5674   op_cost(0);
 5675   format %{ "$reg, $lreg" %}
 5676   interface(MEMORY_INTER) %{
 5677     base($reg);
 5678     index($lreg);
 5679     scale(0x0);
 5680     disp(0x0);
 5681   %}
 5682 %}
 5683 
 5684 operand indOffI(iRegP reg, immIOffset off)
 5685 %{
 5686   constraint(ALLOC_IN_RC(ptr_reg));
 5687   match(AddP reg off);
 5688   op_cost(0);
 5689   format %{ "[$reg, $off]" %}
 5690   interface(MEMORY_INTER) %{
 5691     base($reg);
 5692     index(0xffffffff);
 5693     scale(0x0);
 5694     disp($off);
 5695   %}
 5696 %}
 5697 
 5698 operand indOffI1(iRegP reg, immIOffset1 off)
 5699 %{
 5700   constraint(ALLOC_IN_RC(ptr_reg));
 5701   match(AddP reg off);
 5702   op_cost(0);
 5703   format %{ "[$reg, $off]" %}
 5704   interface(MEMORY_INTER) %{
 5705     base($reg);
 5706     index(0xffffffff);
 5707     scale(0x0);
 5708     disp($off);
 5709   %}
 5710 %}
 5711 
 5712 operand indOffI2(iRegP reg, immIOffset2 off)
 5713 %{
 5714   constraint(ALLOC_IN_RC(ptr_reg));
 5715   match(AddP reg off);
 5716   op_cost(0);
 5717   format %{ "[$reg, $off]" %}
 5718   interface(MEMORY_INTER) %{
 5719     base($reg);
 5720     index(0xffffffff);
 5721     scale(0x0);
 5722     disp($off);
 5723   %}
 5724 %}
 5725 
 5726 operand indOffI4(iRegP reg, immIOffset4 off)
 5727 %{
 5728   constraint(ALLOC_IN_RC(ptr_reg));
 5729   match(AddP reg off);
 5730   op_cost(0);
 5731   format %{ "[$reg, $off]" %}
 5732   interface(MEMORY_INTER) %{
 5733     base($reg);
 5734     index(0xffffffff);
 5735     scale(0x0);
 5736     disp($off);
 5737   %}
 5738 %}
 5739 
 5740 operand indOffI8(iRegP reg, immIOffset8 off)
 5741 %{
 5742   constraint(ALLOC_IN_RC(ptr_reg));
 5743   match(AddP reg off);
 5744   op_cost(0);
 5745   format %{ "[$reg, $off]" %}
 5746   interface(MEMORY_INTER) %{
 5747     base($reg);
 5748     index(0xffffffff);
 5749     scale(0x0);
 5750     disp($off);
 5751   %}
 5752 %}
 5753 
 5754 operand indOffI16(iRegP reg, immIOffset16 off)
 5755 %{
 5756   constraint(ALLOC_IN_RC(ptr_reg));
 5757   match(AddP reg off);
 5758   op_cost(0);
 5759   format %{ "[$reg, $off]" %}
 5760   interface(MEMORY_INTER) %{
 5761     base($reg);
 5762     index(0xffffffff);
 5763     scale(0x0);
 5764     disp($off);
 5765   %}
 5766 %}
 5767 
 5768 operand indOffL(iRegP reg, immLoffset off)
 5769 %{
 5770   constraint(ALLOC_IN_RC(ptr_reg));
 5771   match(AddP reg off);
 5772   op_cost(0);
 5773   format %{ "[$reg, $off]" %}
 5774   interface(MEMORY_INTER) %{
 5775     base($reg);
 5776     index(0xffffffff);
 5777     scale(0x0);
 5778     disp($off);
 5779   %}
 5780 %}
 5781 
 5782 operand indOffL1(iRegP reg, immLoffset1 off)
 5783 %{
 5784   constraint(ALLOC_IN_RC(ptr_reg));
 5785   match(AddP reg off);
 5786   op_cost(0);
 5787   format %{ "[$reg, $off]" %}
 5788   interface(MEMORY_INTER) %{
 5789     base($reg);
 5790     index(0xffffffff);
 5791     scale(0x0);
 5792     disp($off);
 5793   %}
 5794 %}
 5795 
 5796 operand indOffL2(iRegP reg, immLoffset2 off)
 5797 %{
 5798   constraint(ALLOC_IN_RC(ptr_reg));
 5799   match(AddP reg off);
 5800   op_cost(0);
 5801   format %{ "[$reg, $off]" %}
 5802   interface(MEMORY_INTER) %{
 5803     base($reg);
 5804     index(0xffffffff);
 5805     scale(0x0);
 5806     disp($off);
 5807   %}
 5808 %}
 5809 
 5810 operand indOffL4(iRegP reg, immLoffset4 off)
 5811 %{
 5812   constraint(ALLOC_IN_RC(ptr_reg));
 5813   match(AddP reg off);
 5814   op_cost(0);
 5815   format %{ "[$reg, $off]" %}
 5816   interface(MEMORY_INTER) %{
 5817     base($reg);
 5818     index(0xffffffff);
 5819     scale(0x0);
 5820     disp($off);
 5821   %}
 5822 %}
 5823 
 5824 operand indOffL8(iRegP reg, immLoffset8 off)
 5825 %{
 5826   constraint(ALLOC_IN_RC(ptr_reg));
 5827   match(AddP reg off);
 5828   op_cost(0);
 5829   format %{ "[$reg, $off]" %}
 5830   interface(MEMORY_INTER) %{
 5831     base($reg);
 5832     index(0xffffffff);
 5833     scale(0x0);
 5834     disp($off);
 5835   %}
 5836 %}
 5837 
 5838 operand indOffL16(iRegP reg, immLoffset16 off)
 5839 %{
 5840   constraint(ALLOC_IN_RC(ptr_reg));
 5841   match(AddP reg off);
 5842   op_cost(0);
 5843   format %{ "[$reg, $off]" %}
 5844   interface(MEMORY_INTER) %{
 5845     base($reg);
 5846     index(0xffffffff);
 5847     scale(0x0);
 5848     disp($off);
 5849   %}
 5850 %}
 5851 
 5852 operand indirectN(iRegN reg)
 5853 %{
 5854   predicate(CompressedOops::shift() == 0);
 5855   constraint(ALLOC_IN_RC(ptr_reg));
 5856   match(DecodeN reg);
 5857   op_cost(0);
 5858   format %{ "[$reg]\t# narrow" %}
 5859   interface(MEMORY_INTER) %{
 5860     base($reg);
 5861     index(0xffffffff);
 5862     scale(0x0);
 5863     disp(0x0);
 5864   %}
 5865 %}
 5866 
 5867 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5868 %{
 5869   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5870   constraint(ALLOC_IN_RC(ptr_reg));
 5871   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5872   op_cost(0);
 5873   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5874   interface(MEMORY_INTER) %{
 5875     base($reg);
 5876     index($ireg);
 5877     scale($scale);
 5878     disp(0x0);
 5879   %}
 5880 %}
 5881 
 5882 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5883 %{
 5884   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5885   constraint(ALLOC_IN_RC(ptr_reg));
 5886   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5887   op_cost(0);
 5888   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5889   interface(MEMORY_INTER) %{
 5890     base($reg);
 5891     index($lreg);
 5892     scale($scale);
 5893     disp(0x0);
 5894   %}
 5895 %}
 5896 
 5897 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5898 %{
 5899   predicate(CompressedOops::shift() == 0);
 5900   constraint(ALLOC_IN_RC(ptr_reg));
 5901   match(AddP (DecodeN reg) (ConvI2L ireg));
 5902   op_cost(0);
 5903   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5904   interface(MEMORY_INTER) %{
 5905     base($reg);
 5906     index($ireg);
 5907     scale(0x0);
 5908     disp(0x0);
 5909   %}
 5910 %}
 5911 
 5912 operand indIndexN(iRegN reg, iRegL lreg)
 5913 %{
 5914   predicate(CompressedOops::shift() == 0);
 5915   constraint(ALLOC_IN_RC(ptr_reg));
 5916   match(AddP (DecodeN reg) lreg);
 5917   op_cost(0);
 5918   format %{ "$reg, $lreg\t# narrow" %}
 5919   interface(MEMORY_INTER) %{
 5920     base($reg);
 5921     index($lreg);
 5922     scale(0x0);
 5923     disp(0x0);
 5924   %}
 5925 %}
 5926 
 5927 operand indOffIN(iRegN reg, immIOffset off)
 5928 %{
 5929   predicate(CompressedOops::shift() == 0);
 5930   constraint(ALLOC_IN_RC(ptr_reg));
 5931   match(AddP (DecodeN reg) off);
 5932   op_cost(0);
 5933   format %{ "[$reg, $off]\t# narrow" %}
 5934   interface(MEMORY_INTER) %{
 5935     base($reg);
 5936     index(0xffffffff);
 5937     scale(0x0);
 5938     disp($off);
 5939   %}
 5940 %}
 5941 
 5942 operand indOffLN(iRegN reg, immLoffset off)
 5943 %{
 5944   predicate(CompressedOops::shift() == 0);
 5945   constraint(ALLOC_IN_RC(ptr_reg));
 5946   match(AddP (DecodeN reg) off);
 5947   op_cost(0);
 5948   format %{ "[$reg, $off]\t# narrow" %}
 5949   interface(MEMORY_INTER) %{
 5950     base($reg);
 5951     index(0xffffffff);
 5952     scale(0x0);
 5953     disp($off);
 5954   %}
 5955 %}
 5956 
 5957 
 5958 
 5959 // AArch64 opto stubs need to write to the pc slot in the thread anchor
 5960 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
 5961 %{
 5962   constraint(ALLOC_IN_RC(ptr_reg));
 5963   match(AddP reg off);
 5964   op_cost(0);
 5965   format %{ "[$reg, $off]" %}
 5966   interface(MEMORY_INTER) %{
 5967     base($reg);
 5968     index(0xffffffff);
 5969     scale(0x0);
 5970     disp($off);
 5971   %}
 5972 %}
 5973 
 5974 //----------Special Memory Operands--------------------------------------------
 5975 // Stack Slot Operand - This operand is used for loading and storing temporary
 5976 //                      values on the stack where a match requires a value to
 5977 //                      flow through memory.
 5978 operand stackSlotP(sRegP reg)
 5979 %{
 5980   constraint(ALLOC_IN_RC(stack_slots));
 5981   op_cost(100);
 5982   // No match rule because this operand is only generated in matching
 5983   // match(RegP);
 5984   format %{ "[$reg]" %}
 5985   interface(MEMORY_INTER) %{
 5986     base(0x1e);  // RSP
 5987     index(0x0);  // No Index
 5988     scale(0x0);  // No Scale
 5989     disp($reg);  // Stack Offset
 5990   %}
 5991 %}
 5992 
 5993 operand stackSlotI(sRegI reg)
 5994 %{
 5995   constraint(ALLOC_IN_RC(stack_slots));
 5996   // No match rule because this operand is only generated in matching
 5997   // match(RegI);
 5998   format %{ "[$reg]" %}
 5999   interface(MEMORY_INTER) %{
 6000     base(0x1e);  // RSP
 6001     index(0x0);  // No Index
 6002     scale(0x0);  // No Scale
 6003     disp($reg);  // Stack Offset
 6004   %}
 6005 %}
 6006 
 6007 operand stackSlotF(sRegF reg)
 6008 %{
 6009   constraint(ALLOC_IN_RC(stack_slots));
 6010   // No match rule because this operand is only generated in matching
 6011   // match(RegF);
 6012   format %{ "[$reg]" %}
 6013   interface(MEMORY_INTER) %{
 6014     base(0x1e);  // RSP
 6015     index(0x0);  // No Index
 6016     scale(0x0);  // No Scale
 6017     disp($reg);  // Stack Offset
 6018   %}
 6019 %}
 6020 
 6021 operand stackSlotD(sRegD reg)
 6022 %{
 6023   constraint(ALLOC_IN_RC(stack_slots));
 6024   // No match rule because this operand is only generated in matching
 6025   // match(RegD);
 6026   format %{ "[$reg]" %}
 6027   interface(MEMORY_INTER) %{
 6028     base(0x1e);  // RSP
 6029     index(0x0);  // No Index
 6030     scale(0x0);  // No Scale
 6031     disp($reg);  // Stack Offset
 6032   %}
 6033 %}
 6034 
 6035 operand stackSlotL(sRegL reg)
 6036 %{
 6037   constraint(ALLOC_IN_RC(stack_slots));
 6038   // No match rule because this operand is only generated in matching
 6039   // match(RegL);
 6040   format %{ "[$reg]" %}
 6041   interface(MEMORY_INTER) %{
 6042     base(0x1e);  // RSP
 6043     index(0x0);  // No Index
 6044     scale(0x0);  // No Scale
 6045     disp($reg);  // Stack Offset
 6046   %}
 6047 %}
 6048 
 6049 // Operands for expressing Control Flow
 6050 // NOTE: Label is a predefined operand which should not be redefined in
 6051 //       the AD file. It is generically handled within the ADLC.
 6052 
 6053 //----------Conditional Branch Operands----------------------------------------
 6054 // Comparison Op  - This is the operation of the comparison, and is limited to
 6055 //                  the following set of codes:
 6056 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 6057 //
 6058 // Other attributes of the comparison, such as unsignedness, are specified
 6059 // by the comparison instruction that sets a condition code flags register.
 6060 // That result is represented by a flags operand whose subtype is appropriate
 6061 // to the unsignedness (etc.) of the comparison.
 6062 //
 6063 // Later, the instruction which matches both the Comparison Op (a Bool) and
 6064 // the flags (produced by the Cmp) specifies the coding of the comparison op
 6065 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 6066 
 6067 // used for signed integral comparisons and fp comparisons
 6068 
 6069 operand cmpOp()
 6070 %{
 6071   match(Bool);
 6072 
 6073   format %{ "" %}
 6074   interface(COND_INTER) %{
 6075     equal(0x0, "eq");
 6076     not_equal(0x1, "ne");
 6077     less(0xb, "lt");
 6078     greater_equal(0xa, "ge");
 6079     less_equal(0xd, "le");
 6080     greater(0xc, "gt");
 6081     overflow(0x6, "vs");
 6082     no_overflow(0x7, "vc");
 6083   %}
 6084 %}
 6085 
 6086 // used for unsigned integral comparisons
 6087 
 6088 operand cmpOpU()
 6089 %{
 6090   match(Bool);
 6091 
 6092   format %{ "" %}
 6093   interface(COND_INTER) %{
 6094     equal(0x0, "eq");
 6095     not_equal(0x1, "ne");
 6096     less(0x3, "lo");
 6097     greater_equal(0x2, "hs");
 6098     less_equal(0x9, "ls");
 6099     greater(0x8, "hi");
 6100     overflow(0x6, "vs");
 6101     no_overflow(0x7, "vc");
 6102   %}
 6103 %}
 6104 
 6105 // used for certain integral comparisons which can be
 6106 // converted to cbxx or tbxx instructions
 6107 
 6108 operand cmpOpEqNe()
 6109 %{
 6110   match(Bool);
 6111   op_cost(0);
 6112   predicate(n->as_Bool()->_test._test == BoolTest::ne
 6113             || n->as_Bool()->_test._test == BoolTest::eq);
 6114 
 6115   format %{ "" %}
 6116   interface(COND_INTER) %{
 6117     equal(0x0, "eq");
 6118     not_equal(0x1, "ne");
 6119     less(0xb, "lt");
 6120     greater_equal(0xa, "ge");
 6121     less_equal(0xd, "le");
 6122     greater(0xc, "gt");
 6123     overflow(0x6, "vs");
 6124     no_overflow(0x7, "vc");
 6125   %}
 6126 %}
 6127 
 6128 // used for certain integral comparisons which can be
 6129 // converted to cbxx or tbxx instructions
 6130 
 6131 operand cmpOpLtGe()
 6132 %{
 6133   match(Bool);
 6134   op_cost(0);
 6135 
 6136   predicate(n->as_Bool()->_test._test == BoolTest::lt
 6137             || n->as_Bool()->_test._test == BoolTest::ge);
 6138 
 6139   format %{ "" %}
 6140   interface(COND_INTER) %{
 6141     equal(0x0, "eq");
 6142     not_equal(0x1, "ne");
 6143     less(0xb, "lt");
 6144     greater_equal(0xa, "ge");
 6145     less_equal(0xd, "le");
 6146     greater(0xc, "gt");
 6147     overflow(0x6, "vs");
 6148     no_overflow(0x7, "vc");
 6149   %}
 6150 %}
 6151 
 6152 // used for certain unsigned integral comparisons which can be
 6153 // converted to cbxx or tbxx instructions
 6154 
 6155 operand cmpOpUEqNeLtGe()
 6156 %{
 6157   match(Bool);
 6158   op_cost(0);
 6159 
 6160   predicate(n->as_Bool()->_test._test == BoolTest::eq
 6161             || n->as_Bool()->_test._test == BoolTest::ne
 6162             || n->as_Bool()->_test._test == BoolTest::lt
 6163             || n->as_Bool()->_test._test == BoolTest::ge);
 6164 
 6165   format %{ "" %}
 6166   interface(COND_INTER) %{
 6167     equal(0x0, "eq");
 6168     not_equal(0x1, "ne");
 6169     less(0xb, "lt");
 6170     greater_equal(0xa, "ge");
 6171     less_equal(0xd, "le");
 6172     greater(0xc, "gt");
 6173     overflow(0x6, "vs");
 6174     no_overflow(0x7, "vc");
 6175   %}
 6176 %}
 6177 
 6178 // Special operand allowing long args to int ops to be truncated for free
 6179 
 6180 operand iRegL2I(iRegL reg) %{
 6181 
 6182   op_cost(0);
 6183 
 6184   match(ConvL2I reg);
 6185 
 6186   format %{ "l2i($reg)" %}
 6187 
 6188   interface(REG_INTER)
 6189 %}
 6190 
 6191 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 6192 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 6193 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 6194 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 6195 
 6196 //----------OPERAND CLASSES----------------------------------------------------
 6197 // Operand Classes are groups of operands that are used as to simplify
 6198 // instruction definitions by not requiring the AD writer to specify
 6199 // separate instructions for every form of operand when the
 6200 // instruction accepts multiple operand types with the same basic
 6201 // encoding and format. The classic case of this is memory operands.
 6202 
 6203 // memory is used to define read/write location for load/store
 6204 // instruction defs. we can turn a memory op into an Address
 6205 
 6206 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 6207                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6208 
 6209 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 6210                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6211 
 6212 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 6213                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6214 
 6215 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 6216                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6217 
 6218 // All of the memory operands. For the pipeline description.
 6219 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 6220                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 6221                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6222 
 6223 
 6224 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 6225 // operations. it allows the src to be either an iRegI or a (ConvL2I
 6226 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 6227 // can be elided because the 32-bit instruction will just employ the
 6228 // lower 32 bits anyway.
 6229 //
 6230 // n.b. this does not elide all L2I conversions. if the truncated
 6231 // value is consumed by more than one operation then the ConvL2I
 6232 // cannot be bundled into the consuming nodes so an l2i gets planted
 6233 // (actually a movw $dst $src) and the downstream instructions consume
 6234 // the result of the l2i as an iRegI input. That's a shame since the
 6235 // movw is actually redundant but its not too costly.
 6236 
 6237 opclass iRegIorL2I(iRegI, iRegL2I);
 6238 
 6239 //----------PIPELINE-----------------------------------------------------------
 6240 // Rules which define the behavior of the target architectures pipeline.
 6241 
 6242 // For specific pipelines, eg A53, define the stages of that pipeline
 6243 //pipe_desc(ISS, EX1, EX2, WR);
 6244 #define ISS S0
 6245 #define EX1 S1
 6246 #define EX2 S2
 6247 #define WR  S3
 6248 
 6249 // Integer ALU reg operation
 6250 pipeline %{
 6251 
 6252 attributes %{
 6253   // ARM instructions are of fixed length
 6254   fixed_size_instructions;        // Fixed size instructions TODO does
 6255   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 6256   // ARM instructions come in 32-bit word units
 6257   instruction_unit_size = 4;         // An instruction is 4 bytes long
 6258   instruction_fetch_unit_size = 64;  // The processor fetches one line
 6259   instruction_fetch_units = 1;       // of 64 bytes
 6260 
 6261   // List of nop instructions
 6262   nops( MachNop );
 6263 %}
 6264 
 6265 // We don't use an actual pipeline model so don't care about resources
 6266 // or description. we do use pipeline classes to introduce fixed
 6267 // latencies
 6268 
 6269 //----------RESOURCES----------------------------------------------------------
 6270 // Resources are the functional units available to the machine
 6271 
 6272 resources( INS0, INS1, INS01 = INS0 | INS1,
 6273            ALU0, ALU1, ALU = ALU0 | ALU1,
 6274            MAC,
 6275            DIV,
 6276            BRANCH,
 6277            LDST,
 6278            NEON_FP);
 6279 
 6280 //----------PIPELINE DESCRIPTION-----------------------------------------------
 6281 // Pipeline Description specifies the stages in the machine's pipeline
 6282 
 6283 // Define the pipeline as a generic 6 stage pipeline
 6284 pipe_desc(S0, S1, S2, S3, S4, S5);
 6285 
 6286 //----------PIPELINE CLASSES---------------------------------------------------
 6287 // Pipeline Classes describe the stages in which input and output are
 6288 // referenced by the hardware pipeline.
 6289 
 6290 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 6291 %{
 6292   single_instruction;
 6293   src1   : S1(read);
 6294   src2   : S2(read);
 6295   dst    : S5(write);
 6296   INS01  : ISS;
 6297   NEON_FP : S5;
 6298 %}
 6299 
 6300 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6301 %{
 6302   single_instruction;
 6303   src1   : S1(read);
 6304   src2   : S2(read);
 6305   dst    : S5(write);
 6306   INS01  : ISS;
 6307   NEON_FP : S5;
 6308 %}
 6309 
 6310 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6311 %{
 6312   single_instruction;
 6313   src    : S1(read);
 6314   dst    : S5(write);
 6315   INS01  : ISS;
 6316   NEON_FP : S5;
 6317 %}
 6318 
 6319 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6320 %{
 6321   single_instruction;
 6322   src    : S1(read);
 6323   dst    : S5(write);
 6324   INS01  : ISS;
 6325   NEON_FP : S5;
 6326 %}
 6327 
 6328 pipe_class fp_d2f(vRegF dst, vRegD src)
 6329 %{
 6330   single_instruction;
 6331   src    : S1(read);
 6332   dst    : S5(write);
 6333   INS01  : ISS;
 6334   NEON_FP : S5;
 6335 %}
 6336 
 6337 pipe_class fp_f2d(vRegD dst, vRegF src)
 6338 %{
 6339   single_instruction;
 6340   src    : S1(read);
 6341   dst    : S5(write);
 6342   INS01  : ISS;
 6343   NEON_FP : S5;
 6344 %}
 6345 
 6346 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6347 %{
 6348   single_instruction;
 6349   src    : S1(read);
 6350   dst    : S5(write);
 6351   INS01  : ISS;
 6352   NEON_FP : S5;
 6353 %}
 6354 
 6355 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6356 %{
 6357   single_instruction;
 6358   src    : S1(read);
 6359   dst    : S5(write);
 6360   INS01  : ISS;
 6361   NEON_FP : S5;
 6362 %}
 6363 
 6364 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6365 %{
 6366   single_instruction;
 6367   src    : S1(read);
 6368   dst    : S5(write);
 6369   INS01  : ISS;
 6370   NEON_FP : S5;
 6371 %}
 6372 
 6373 pipe_class fp_l2f(vRegF dst, iRegL src)
 6374 %{
 6375   single_instruction;
 6376   src    : S1(read);
 6377   dst    : S5(write);
 6378   INS01  : ISS;
 6379   NEON_FP : S5;
 6380 %}
 6381 
 6382 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6383 %{
 6384   single_instruction;
 6385   src    : S1(read);
 6386   dst    : S5(write);
 6387   INS01  : ISS;
 6388   NEON_FP : S5;
 6389 %}
 6390 
 6391 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6392 %{
 6393   single_instruction;
 6394   src    : S1(read);
 6395   dst    : S5(write);
 6396   INS01  : ISS;
 6397   NEON_FP : S5;
 6398 %}
 6399 
 6400 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6401 %{
 6402   single_instruction;
 6403   src    : S1(read);
 6404   dst    : S5(write);
 6405   INS01  : ISS;
 6406   NEON_FP : S5;
 6407 %}
 6408 
 6409 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6410 %{
 6411   single_instruction;
 6412   src    : S1(read);
 6413   dst    : S5(write);
 6414   INS01  : ISS;
 6415   NEON_FP : S5;
 6416 %}
 6417 
 6418 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6419 %{
 6420   single_instruction;
 6421   src1   : S1(read);
 6422   src2   : S2(read);
 6423   dst    : S5(write);
 6424   INS0   : ISS;
 6425   NEON_FP : S5;
 6426 %}
 6427 
 6428 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6429 %{
 6430   single_instruction;
 6431   src1   : S1(read);
 6432   src2   : S2(read);
 6433   dst    : S5(write);
 6434   INS0   : ISS;
 6435   NEON_FP : S5;
 6436 %}
 6437 
 6438 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6439 %{
 6440   single_instruction;
 6441   cr     : S1(read);
 6442   src1   : S1(read);
 6443   src2   : S1(read);
 6444   dst    : S3(write);
 6445   INS01  : ISS;
 6446   NEON_FP : S3;
 6447 %}
 6448 
 6449 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6450 %{
 6451   single_instruction;
 6452   cr     : S1(read);
 6453   src1   : S1(read);
 6454   src2   : S1(read);
 6455   dst    : S3(write);
 6456   INS01  : ISS;
 6457   NEON_FP : S3;
 6458 %}
 6459 
 6460 pipe_class fp_imm_s(vRegF dst)
 6461 %{
 6462   single_instruction;
 6463   dst    : S3(write);
 6464   INS01  : ISS;
 6465   NEON_FP : S3;
 6466 %}
 6467 
 6468 pipe_class fp_imm_d(vRegD dst)
 6469 %{
 6470   single_instruction;
 6471   dst    : S3(write);
 6472   INS01  : ISS;
 6473   NEON_FP : S3;
 6474 %}
 6475 
 6476 pipe_class fp_load_constant_s(vRegF dst)
 6477 %{
 6478   single_instruction;
 6479   dst    : S4(write);
 6480   INS01  : ISS;
 6481   NEON_FP : S4;
 6482 %}
 6483 
 6484 pipe_class fp_load_constant_d(vRegD dst)
 6485 %{
 6486   single_instruction;
 6487   dst    : S4(write);
 6488   INS01  : ISS;
 6489   NEON_FP : S4;
 6490 %}
 6491 
 6492 //------- Integer ALU operations --------------------------
 6493 
 6494 // Integer ALU reg-reg operation
 6495 // Operands needed in EX1, result generated in EX2
 6496 // Eg.  ADD     x0, x1, x2
 6497 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6498 %{
 6499   single_instruction;
 6500   dst    : EX2(write);
 6501   src1   : EX1(read);
 6502   src2   : EX1(read);
 6503   INS01  : ISS; // Dual issue as instruction 0 or 1
 6504   ALU    : EX2;
 6505 %}
 6506 
 6507 // Integer ALU reg-reg operation with constant shift
 6508 // Shifted register must be available in LATE_ISS instead of EX1
 6509 // Eg.  ADD     x0, x1, x2, LSL #2
 6510 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6511 %{
 6512   single_instruction;
 6513   dst    : EX2(write);
 6514   src1   : EX1(read);
 6515   src2   : ISS(read);
 6516   INS01  : ISS;
 6517   ALU    : EX2;
 6518 %}
 6519 
 6520 // Integer ALU reg operation with constant shift
 6521 // Eg.  LSL     x0, x1, #shift
 6522 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6523 %{
 6524   single_instruction;
 6525   dst    : EX2(write);
 6526   src1   : ISS(read);
 6527   INS01  : ISS;
 6528   ALU    : EX2;
 6529 %}
 6530 
 6531 // Integer ALU reg-reg operation with variable shift
 6532 // Both operands must be available in LATE_ISS instead of EX1
 6533 // Result is available in EX1 instead of EX2
 6534 // Eg.  LSLV    x0, x1, x2
 6535 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6536 %{
 6537   single_instruction;
 6538   dst    : EX1(write);
 6539   src1   : ISS(read);
 6540   src2   : ISS(read);
 6541   INS01  : ISS;
 6542   ALU    : EX1;
 6543 %}
 6544 
 6545 // Integer ALU reg-reg operation with extract
 6546 // As for _vshift above, but result generated in EX2
 6547 // Eg.  EXTR    x0, x1, x2, #N
 6548 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6549 %{
 6550   single_instruction;
 6551   dst    : EX2(write);
 6552   src1   : ISS(read);
 6553   src2   : ISS(read);
 6554   INS1   : ISS; // Can only dual issue as Instruction 1
 6555   ALU    : EX1;
 6556 %}
 6557 
 6558 // Integer ALU reg operation
 6559 // Eg.  NEG     x0, x1
 6560 pipe_class ialu_reg(iRegI dst, iRegI src)
 6561 %{
 6562   single_instruction;
 6563   dst    : EX2(write);
 6564   src    : EX1(read);
 6565   INS01  : ISS;
 6566   ALU    : EX2;
 6567 %}
 6568 
 6569 // Integer ALU reg mmediate operation
 6570 // Eg.  ADD     x0, x1, #N
 6571 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6572 %{
 6573   single_instruction;
 6574   dst    : EX2(write);
 6575   src1   : EX1(read);
 6576   INS01  : ISS;
 6577   ALU    : EX2;
 6578 %}
 6579 
 6580 // Integer ALU immediate operation (no source operands)
 6581 // Eg.  MOV     x0, #N
 6582 pipe_class ialu_imm(iRegI dst)
 6583 %{
 6584   single_instruction;
 6585   dst    : EX1(write);
 6586   INS01  : ISS;
 6587   ALU    : EX1;
 6588 %}
 6589 
 6590 //------- Compare operation -------------------------------
 6591 
 6592 // Compare reg-reg
 6593 // Eg.  CMP     x0, x1
 6594 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6595 %{
 6596   single_instruction;
 6597 //  fixed_latency(16);
 6598   cr     : EX2(write);
 6599   op1    : EX1(read);
 6600   op2    : EX1(read);
 6601   INS01  : ISS;
 6602   ALU    : EX2;
 6603 %}
 6604 
 6605 // Compare reg-reg
 6606 // Eg.  CMP     x0, #N
 6607 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6608 %{
 6609   single_instruction;
 6610 //  fixed_latency(16);
 6611   cr     : EX2(write);
 6612   op1    : EX1(read);
 6613   INS01  : ISS;
 6614   ALU    : EX2;
 6615 %}
 6616 
 6617 //------- Conditional instructions ------------------------
 6618 
 6619 // Conditional no operands
 6620 // Eg.  CSINC   x0, zr, zr, <cond>
 6621 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6622 %{
 6623   single_instruction;
 6624   cr     : EX1(read);
 6625   dst    : EX2(write);
 6626   INS01  : ISS;
 6627   ALU    : EX2;
 6628 %}
 6629 
 6630 // Conditional 2 operand
 6631 // EG.  CSEL    X0, X1, X2, <cond>
 6632 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6633 %{
 6634   single_instruction;
 6635   cr     : EX1(read);
 6636   src1   : EX1(read);
 6637   src2   : EX1(read);
 6638   dst    : EX2(write);
 6639   INS01  : ISS;
 6640   ALU    : EX2;
 6641 %}
 6642 
 6643 // Conditional 2 operand
 6644 // EG.  CSEL    X0, X1, X2, <cond>
 6645 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6646 %{
 6647   single_instruction;
 6648   cr     : EX1(read);
 6649   src    : EX1(read);
 6650   dst    : EX2(write);
 6651   INS01  : ISS;
 6652   ALU    : EX2;
 6653 %}
 6654 
 6655 //------- Multiply pipeline operations --------------------
 6656 
 6657 // Multiply reg-reg
 6658 // Eg.  MUL     w0, w1, w2
 6659 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6660 %{
 6661   single_instruction;
 6662   dst    : WR(write);
 6663   src1   : ISS(read);
 6664   src2   : ISS(read);
 6665   INS01  : ISS;
 6666   MAC    : WR;
 6667 %}
 6668 
 6669 // Multiply accumulate
 6670 // Eg.  MADD    w0, w1, w2, w3
 6671 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6672 %{
 6673   single_instruction;
 6674   dst    : WR(write);
 6675   src1   : ISS(read);
 6676   src2   : ISS(read);
 6677   src3   : ISS(read);
 6678   INS01  : ISS;
 6679   MAC    : WR;
 6680 %}
 6681 
 6682 // Eg.  MUL     w0, w1, w2
 6683 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6684 %{
 6685   single_instruction;
 6686   fixed_latency(3); // Maximum latency for 64 bit mul
 6687   dst    : WR(write);
 6688   src1   : ISS(read);
 6689   src2   : ISS(read);
 6690   INS01  : ISS;
 6691   MAC    : WR;
 6692 %}
 6693 
 6694 // Multiply accumulate
 6695 // Eg.  MADD    w0, w1, w2, w3
 6696 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6697 %{
 6698   single_instruction;
 6699   fixed_latency(3); // Maximum latency for 64 bit mul
 6700   dst    : WR(write);
 6701   src1   : ISS(read);
 6702   src2   : ISS(read);
 6703   src3   : ISS(read);
 6704   INS01  : ISS;
 6705   MAC    : WR;
 6706 %}
 6707 
 6708 //------- Divide pipeline operations --------------------
 6709 
 6710 // Eg.  SDIV    w0, w1, w2
 6711 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6712 %{
 6713   single_instruction;
 6714   fixed_latency(8); // Maximum latency for 32 bit divide
 6715   dst    : WR(write);
 6716   src1   : ISS(read);
 6717   src2   : ISS(read);
 6718   INS0   : ISS; // Can only dual issue as instruction 0
 6719   DIV    : WR;
 6720 %}
 6721 
 6722 // Eg.  SDIV    x0, x1, x2
 6723 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6724 %{
 6725   single_instruction;
 6726   fixed_latency(16); // Maximum latency for 64 bit divide
 6727   dst    : WR(write);
 6728   src1   : ISS(read);
 6729   src2   : ISS(read);
 6730   INS0   : ISS; // Can only dual issue as instruction 0
 6731   DIV    : WR;
 6732 %}
 6733 
 6734 //------- Load pipeline operations ------------------------
 6735 
 6736 // Load - prefetch
 6737 // Eg.  PFRM    <mem>
 6738 pipe_class iload_prefetch(memory mem)
 6739 %{
 6740   single_instruction;
 6741   mem    : ISS(read);
 6742   INS01  : ISS;
 6743   LDST   : WR;
 6744 %}
 6745 
 6746 // Load - reg, mem
 6747 // Eg.  LDR     x0, <mem>
 6748 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6749 %{
 6750   single_instruction;
 6751   dst    : WR(write);
 6752   mem    : ISS(read);
 6753   INS01  : ISS;
 6754   LDST   : WR;
 6755 %}
 6756 
 6757 // Load - reg, reg
 6758 // Eg.  LDR     x0, [sp, x1]
 6759 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6760 %{
 6761   single_instruction;
 6762   dst    : WR(write);
 6763   src    : ISS(read);
 6764   INS01  : ISS;
 6765   LDST   : WR;
 6766 %}
 6767 
 6768 //------- Store pipeline operations -----------------------
 6769 
 6770 // Store - zr, mem
 6771 // Eg.  STR     zr, <mem>
 6772 pipe_class istore_mem(memory mem)
 6773 %{
 6774   single_instruction;
 6775   mem    : ISS(read);
 6776   INS01  : ISS;
 6777   LDST   : WR;
 6778 %}
 6779 
 6780 // Store - reg, mem
 6781 // Eg.  STR     x0, <mem>
 6782 pipe_class istore_reg_mem(iRegI src, memory mem)
 6783 %{
 6784   single_instruction;
 6785   mem    : ISS(read);
 6786   src    : EX2(read);
 6787   INS01  : ISS;
 6788   LDST   : WR;
 6789 %}
 6790 
 6791 // Store - reg, reg
 6792 // Eg. STR      x0, [sp, x1]
 6793 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6794 %{
 6795   single_instruction;
 6796   dst    : ISS(read);
 6797   src    : EX2(read);
 6798   INS01  : ISS;
 6799   LDST   : WR;
 6800 %}
 6801 
 6802 //------- Store pipeline operations -----------------------
 6803 
 6804 // Branch
 6805 pipe_class pipe_branch()
 6806 %{
 6807   single_instruction;
 6808   INS01  : ISS;
 6809   BRANCH : EX1;
 6810 %}
 6811 
 6812 // Conditional branch
 6813 pipe_class pipe_branch_cond(rFlagsReg cr)
 6814 %{
 6815   single_instruction;
 6816   cr     : EX1(read);
 6817   INS01  : ISS;
 6818   BRANCH : EX1;
 6819 %}
 6820 
 6821 // Compare & Branch
 6822 // EG.  CBZ/CBNZ
 6823 pipe_class pipe_cmp_branch(iRegI op1)
 6824 %{
 6825   single_instruction;
 6826   op1    : EX1(read);
 6827   INS01  : ISS;
 6828   BRANCH : EX1;
 6829 %}
 6830 
 6831 //------- Synchronisation operations ----------------------
 6832 
 6833 // Any operation requiring serialization.
 6834 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6835 pipe_class pipe_serial()
 6836 %{
 6837   single_instruction;
 6838   force_serialization;
 6839   fixed_latency(16);
 6840   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6841   LDST   : WR;
 6842 %}
 6843 
 6844 // Generic big/slow expanded idiom - also serialized
 6845 pipe_class pipe_slow()
 6846 %{
 6847   instruction_count(10);
 6848   multiple_bundles;
 6849   force_serialization;
 6850   fixed_latency(16);
 6851   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6852   LDST   : WR;
 6853 %}
 6854 
 6855 // Empty pipeline class
 6856 pipe_class pipe_class_empty()
 6857 %{
 6858   single_instruction;
 6859   fixed_latency(0);
 6860 %}
 6861 
 6862 // Default pipeline class.
 6863 pipe_class pipe_class_default()
 6864 %{
 6865   single_instruction;
 6866   fixed_latency(2);
 6867 %}
 6868 
 6869 // Pipeline class for compares.
 6870 pipe_class pipe_class_compare()
 6871 %{
 6872   single_instruction;
 6873   fixed_latency(16);
 6874 %}
 6875 
 6876 // Pipeline class for memory operations.
 6877 pipe_class pipe_class_memory()
 6878 %{
 6879   single_instruction;
 6880   fixed_latency(16);
 6881 %}
 6882 
 6883 // Pipeline class for call.
 6884 pipe_class pipe_class_call()
 6885 %{
 6886   single_instruction;
 6887   fixed_latency(100);
 6888 %}
 6889 
 6890 // Define the class for the Nop node.
 6891 define %{
 6892    MachNop = pipe_class_empty;
 6893 %}
 6894 
 6895 %}
 6896 //----------INSTRUCTIONS-------------------------------------------------------
 6897 //
 6898 // match      -- States which machine-independent subtree may be replaced
 6899 //               by this instruction.
 6900 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6901 //               selection to identify a minimum cost tree of machine
 6902 //               instructions that matches a tree of machine-independent
 6903 //               instructions.
 6904 // format     -- A string providing the disassembly for this instruction.
 6905 //               The value of an instruction's operand may be inserted
 6906 //               by referring to it with a '$' prefix.
 6907 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6908 //               to within an encode class as $primary, $secondary, and $tertiary
 6909 //               rrspectively.  The primary opcode is commonly used to
 6910 //               indicate the type of machine instruction, while secondary
 6911 //               and tertiary are often used for prefix options or addressing
 6912 //               modes.
 6913 // ins_encode -- A list of encode classes with parameters. The encode class
 6914 //               name must have been defined in an 'enc_class' specification
 6915 //               in the encode section of the architecture description.
 6916 
 6917 // ============================================================================
 6918 // Memory (Load/Store) Instructions
 6919 
 6920 // Load Instructions
 6921 
 6922 // Load Byte (8 bit signed)
 6923 instruct loadB(iRegINoSp dst, memory1 mem)
 6924 %{
 6925   match(Set dst (LoadB mem));
 6926   predicate(!needs_acquiring_load(n));
 6927 
 6928   ins_cost(4 * INSN_COST);
 6929   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6930 
 6931   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6932 
 6933   ins_pipe(iload_reg_mem);
 6934 %}
 6935 
 6936 // Load Byte (8 bit signed) into long
 6937 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6938 %{
 6939   match(Set dst (ConvI2L (LoadB mem)));
 6940   predicate(!needs_acquiring_load(n->in(1)));
 6941 
 6942   ins_cost(4 * INSN_COST);
 6943   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6944 
 6945   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6946 
 6947   ins_pipe(iload_reg_mem);
 6948 %}
 6949 
 6950 // Load Byte (8 bit unsigned)
 6951 instruct loadUB(iRegINoSp dst, memory1 mem)
 6952 %{
 6953   match(Set dst (LoadUB mem));
 6954   predicate(!needs_acquiring_load(n));
 6955 
 6956   ins_cost(4 * INSN_COST);
 6957   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6958 
 6959   ins_encode(aarch64_enc_ldrb(dst, mem));
 6960 
 6961   ins_pipe(iload_reg_mem);
 6962 %}
 6963 
 6964 // Load Byte (8 bit unsigned) into long
 6965 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6966 %{
 6967   match(Set dst (ConvI2L (LoadUB mem)));
 6968   predicate(!needs_acquiring_load(n->in(1)));
 6969 
 6970   ins_cost(4 * INSN_COST);
 6971   format %{ "ldrb  $dst, $mem\t# byte" %}
 6972 
 6973   ins_encode(aarch64_enc_ldrb(dst, mem));
 6974 
 6975   ins_pipe(iload_reg_mem);
 6976 %}
 6977 
 6978 // Load Short (16 bit signed)
 6979 instruct loadS(iRegINoSp dst, memory2 mem)
 6980 %{
 6981   match(Set dst (LoadS mem));
 6982   predicate(!needs_acquiring_load(n));
 6983 
 6984   ins_cost(4 * INSN_COST);
 6985   format %{ "ldrshw  $dst, $mem\t# short" %}
 6986 
 6987   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6988 
 6989   ins_pipe(iload_reg_mem);
 6990 %}
 6991 
 6992 // Load Short (16 bit signed) into long
 6993 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6994 %{
 6995   match(Set dst (ConvI2L (LoadS mem)));
 6996   predicate(!needs_acquiring_load(n->in(1)));
 6997 
 6998   ins_cost(4 * INSN_COST);
 6999   format %{ "ldrsh  $dst, $mem\t# short" %}
 7000 
 7001   ins_encode(aarch64_enc_ldrsh(dst, mem));
 7002 
 7003   ins_pipe(iload_reg_mem);
 7004 %}
 7005 
 7006 // Load Char (16 bit unsigned)
 7007 instruct loadUS(iRegINoSp dst, memory2 mem)
 7008 %{
 7009   match(Set dst (LoadUS mem));
 7010   predicate(!needs_acquiring_load(n));
 7011 
 7012   ins_cost(4 * INSN_COST);
 7013   format %{ "ldrh  $dst, $mem\t# short" %}
 7014 
 7015   ins_encode(aarch64_enc_ldrh(dst, mem));
 7016 
 7017   ins_pipe(iload_reg_mem);
 7018 %}
 7019 
 7020 // Load Short/Char (16 bit unsigned) into long
 7021 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 7022 %{
 7023   match(Set dst (ConvI2L (LoadUS mem)));
 7024   predicate(!needs_acquiring_load(n->in(1)));
 7025 
 7026   ins_cost(4 * INSN_COST);
 7027   format %{ "ldrh  $dst, $mem\t# short" %}
 7028 
 7029   ins_encode(aarch64_enc_ldrh(dst, mem));
 7030 
 7031   ins_pipe(iload_reg_mem);
 7032 %}
 7033 
 7034 // Load Integer (32 bit signed)
 7035 instruct loadI(iRegINoSp dst, memory4 mem)
 7036 %{
 7037   match(Set dst (LoadI mem));
 7038   predicate(!needs_acquiring_load(n));
 7039 
 7040   ins_cost(4 * INSN_COST);
 7041   format %{ "ldrw  $dst, $mem\t# int" %}
 7042 
 7043   ins_encode(aarch64_enc_ldrw(dst, mem));
 7044 
 7045   ins_pipe(iload_reg_mem);
 7046 %}
 7047 
 7048 // Load Integer (32 bit signed) into long
 7049 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 7050 %{
 7051   match(Set dst (ConvI2L (LoadI mem)));
 7052   predicate(!needs_acquiring_load(n->in(1)));
 7053 
 7054   ins_cost(4 * INSN_COST);
 7055   format %{ "ldrsw  $dst, $mem\t# int" %}
 7056 
 7057   ins_encode(aarch64_enc_ldrsw(dst, mem));
 7058 
 7059   ins_pipe(iload_reg_mem);
 7060 %}
 7061 
 7062 // Load Integer (32 bit unsigned) into long
 7063 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 7064 %{
 7065   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7066   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 7067 
 7068   ins_cost(4 * INSN_COST);
 7069   format %{ "ldrw  $dst, $mem\t# int" %}
 7070 
 7071   ins_encode(aarch64_enc_ldrw(dst, mem));
 7072 
 7073   ins_pipe(iload_reg_mem);
 7074 %}
 7075 
 7076 // Load Long (64 bit signed)
 7077 instruct loadL(iRegLNoSp dst, memory8 mem)
 7078 %{
 7079   match(Set dst (LoadL mem));
 7080   predicate(!needs_acquiring_load(n));
 7081 
 7082   ins_cost(4 * INSN_COST);
 7083   format %{ "ldr  $dst, $mem\t# int" %}
 7084 
 7085   ins_encode(aarch64_enc_ldr(dst, mem));
 7086 
 7087   ins_pipe(iload_reg_mem);
 7088 %}
 7089 
 7090 // Load Range
 7091 instruct loadRange(iRegINoSp dst, memory4 mem)
 7092 %{
 7093   match(Set dst (LoadRange mem));
 7094 
 7095   ins_cost(4 * INSN_COST);
 7096   format %{ "ldrw  $dst, $mem\t# range" %}
 7097 
 7098   ins_encode(aarch64_enc_ldrw(dst, mem));
 7099 
 7100   ins_pipe(iload_reg_mem);
 7101 %}
 7102 
 7103 // Load Pointer
 7104 instruct loadP(iRegPNoSp dst, memory8 mem)
 7105 %{
 7106   match(Set dst (LoadP mem));
 7107   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 7108 
 7109   ins_cost(4 * INSN_COST);
 7110   format %{ "ldr  $dst, $mem\t# ptr" %}
 7111 
 7112   ins_encode(aarch64_enc_ldr(dst, mem));
 7113 
 7114   ins_pipe(iload_reg_mem);
 7115 %}
 7116 
 7117 // Load Compressed Pointer
 7118 instruct loadN(iRegNNoSp dst, memory4 mem)
 7119 %{
 7120   match(Set dst (LoadN mem));
 7121   predicate(!needs_acquiring_load(n));
 7122 
 7123   ins_cost(4 * INSN_COST);
 7124   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 7125 
 7126   ins_encode(aarch64_enc_ldrw(dst, mem));
 7127 
 7128   ins_pipe(iload_reg_mem);
 7129 %}
 7130 
 7131 // Load Klass Pointer
 7132 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7133 %{
 7134   match(Set dst (LoadKlass mem));
 7135   predicate(!needs_acquiring_load(n));
 7136 
 7137   ins_cost(4 * INSN_COST);
 7138   format %{ "ldr  $dst, $mem\t# class" %}
 7139 
 7140   ins_encode(aarch64_enc_ldr(dst, mem));
 7141 
 7142   ins_pipe(iload_reg_mem);
 7143 %}
 7144 
 7145 // Load Narrow Klass Pointer
 7146 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7147 %{
 7148   match(Set dst (LoadNKlass mem));
 7149   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
 7150 
 7151   ins_cost(4 * INSN_COST);
 7152   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7153 
 7154   ins_encode(aarch64_enc_ldrw(dst, mem));
 7155 
 7156   ins_pipe(iload_reg_mem);
 7157 %}
 7158 
 7159 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem, rFlagsReg cr)
 7160 %{
 7161   match(Set dst (LoadNKlass mem));
 7162   effect(KILL cr);
 7163   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
 7164 
 7165   ins_cost(4 * INSN_COST);
 7166   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7167   ins_encode %{
 7168     __ load_nklass_compact($dst$$Register, $mem$$base$$Register, $mem$$index$$Register, $mem$$scale, $mem$$disp);
 7169   %}
 7170   ins_pipe(pipe_slow);
 7171 %}
 7172 
 7173 // Load Float
 7174 instruct loadF(vRegF dst, memory4 mem)
 7175 %{
 7176   match(Set dst (LoadF mem));
 7177   predicate(!needs_acquiring_load(n));
 7178 
 7179   ins_cost(4 * INSN_COST);
 7180   format %{ "ldrs  $dst, $mem\t# float" %}
 7181 
 7182   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7183 
 7184   ins_pipe(pipe_class_memory);
 7185 %}
 7186 
 7187 // Load Double
 7188 instruct loadD(vRegD dst, memory8 mem)
 7189 %{
 7190   match(Set dst (LoadD mem));
 7191   predicate(!needs_acquiring_load(n));
 7192 
 7193   ins_cost(4 * INSN_COST);
 7194   format %{ "ldrd  $dst, $mem\t# double" %}
 7195 
 7196   ins_encode( aarch64_enc_ldrd(dst, mem) );
 7197 
 7198   ins_pipe(pipe_class_memory);
 7199 %}
 7200 
 7201 
 7202 // Load Int Constant
 7203 instruct loadConI(iRegINoSp dst, immI src)
 7204 %{
 7205   match(Set dst src);
 7206 
 7207   ins_cost(INSN_COST);
 7208   format %{ "mov $dst, $src\t# int" %}
 7209 
 7210   ins_encode( aarch64_enc_movw_imm(dst, src) );
 7211 
 7212   ins_pipe(ialu_imm);
 7213 %}
 7214 
 7215 // Load Long Constant
 7216 instruct loadConL(iRegLNoSp dst, immL src)
 7217 %{
 7218   match(Set dst src);
 7219 
 7220   ins_cost(INSN_COST);
 7221   format %{ "mov $dst, $src\t# long" %}
 7222 
 7223   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7224 
 7225   ins_pipe(ialu_imm);
 7226 %}
 7227 
 7228 // Load Pointer Constant
 7229 
 7230 instruct loadConP(iRegPNoSp dst, immP con)
 7231 %{
 7232   match(Set dst con);
 7233 
 7234   ins_cost(INSN_COST * 4);
 7235   format %{
 7236     "mov  $dst, $con\t# ptr\n\t"
 7237   %}
 7238 
 7239   ins_encode(aarch64_enc_mov_p(dst, con));
 7240 
 7241   ins_pipe(ialu_imm);
 7242 %}
 7243 
 7244 // Load Null Pointer Constant
 7245 
 7246 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7247 %{
 7248   match(Set dst con);
 7249 
 7250   ins_cost(INSN_COST);
 7251   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7252 
 7253   ins_encode(aarch64_enc_mov_p0(dst, con));
 7254 
 7255   ins_pipe(ialu_imm);
 7256 %}
 7257 
 7258 // Load Pointer Constant One
 7259 
 7260 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 7261 %{
 7262   match(Set dst con);
 7263 
 7264   ins_cost(INSN_COST);
 7265   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7266 
 7267   ins_encode(aarch64_enc_mov_p1(dst, con));
 7268 
 7269   ins_pipe(ialu_imm);
 7270 %}
 7271 
 7272 // Load Byte Map Base Constant
 7273 
 7274 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 7275 %{
 7276   match(Set dst con);
 7277 
 7278   ins_cost(INSN_COST);
 7279   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 7280 
 7281   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 7282 
 7283   ins_pipe(ialu_imm);
 7284 %}
 7285 
 7286 // Load Narrow Pointer Constant
 7287 
 7288 instruct loadConN(iRegNNoSp dst, immN con)
 7289 %{
 7290   match(Set dst con);
 7291 
 7292   ins_cost(INSN_COST * 4);
 7293   format %{ "mov  $dst, $con\t# compressed ptr" %}
 7294 
 7295   ins_encode(aarch64_enc_mov_n(dst, con));
 7296 
 7297   ins_pipe(ialu_imm);
 7298 %}
 7299 
 7300 // Load Narrow Null Pointer Constant
 7301 
 7302 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7303 %{
 7304   match(Set dst con);
 7305 
 7306   ins_cost(INSN_COST);
 7307   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
 7308 
 7309   ins_encode(aarch64_enc_mov_n0(dst, con));
 7310 
 7311   ins_pipe(ialu_imm);
 7312 %}
 7313 
 7314 // Load Narrow Klass Constant
 7315 
 7316 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7317 %{
 7318   match(Set dst con);
 7319 
 7320   ins_cost(INSN_COST);
 7321   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7322 
 7323   ins_encode(aarch64_enc_mov_nk(dst, con));
 7324 
 7325   ins_pipe(ialu_imm);
 7326 %}
 7327 
 7328 // Load Packed Float Constant
 7329 
 7330 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7331   match(Set dst con);
 7332   ins_cost(INSN_COST * 4);
 7333   format %{ "fmovs  $dst, $con"%}
 7334   ins_encode %{
 7335     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7336   %}
 7337 
 7338   ins_pipe(fp_imm_s);
 7339 %}
 7340 
 7341 // Load Float Constant
 7342 
 7343 instruct loadConF(vRegF dst, immF con) %{
 7344   match(Set dst con);
 7345 
 7346   ins_cost(INSN_COST * 4);
 7347 
 7348   format %{
 7349     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7350   %}
 7351 
 7352   ins_encode %{
 7353     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7354   %}
 7355 
 7356   ins_pipe(fp_load_constant_s);
 7357 %}
 7358 
 7359 // Load Packed Double Constant
 7360 
 7361 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7362   match(Set dst con);
 7363   ins_cost(INSN_COST);
 7364   format %{ "fmovd  $dst, $con"%}
 7365   ins_encode %{
 7366     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7367   %}
 7368 
 7369   ins_pipe(fp_imm_d);
 7370 %}
 7371 
 7372 // Load Double Constant
 7373 
 7374 instruct loadConD(vRegD dst, immD con) %{
 7375   match(Set dst con);
 7376 
 7377   ins_cost(INSN_COST * 5);
 7378   format %{
 7379     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7380   %}
 7381 
 7382   ins_encode %{
 7383     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7384   %}
 7385 
 7386   ins_pipe(fp_load_constant_d);
 7387 %}
 7388 
 7389 // Store Instructions
 7390 
 7391 // Store CMS card-mark Immediate
 7392 instruct storeimmCM0(immI0 zero, memory1 mem)
 7393 %{
 7394   match(Set mem (StoreCM mem zero));
 7395 
 7396   ins_cost(INSN_COST);
 7397   format %{ "storestore (elided)\n\t"
 7398             "strb zr, $mem\t# byte" %}
 7399 
 7400   ins_encode(aarch64_enc_strb0(mem));
 7401 
 7402   ins_pipe(istore_mem);
 7403 %}
 7404 
 7405 // Store CMS card-mark Immediate with intervening StoreStore
 7406 // needed when using CMS with no conditional card marking
 7407 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
 7408 %{
 7409   match(Set mem (StoreCM mem zero));
 7410 
 7411   ins_cost(INSN_COST * 2);
 7412   format %{ "storestore\n\t"
 7413             "dmb ishst"
 7414             "\n\tstrb zr, $mem\t# byte" %}
 7415 
 7416   ins_encode(aarch64_enc_strb0_ordered(mem));
 7417 
 7418   ins_pipe(istore_mem);
 7419 %}
 7420 
 7421 // Store Byte
 7422 instruct storeB(iRegIorL2I src, memory1 mem)
 7423 %{
 7424   match(Set mem (StoreB mem src));
 7425   predicate(!needs_releasing_store(n));
 7426 
 7427   ins_cost(INSN_COST);
 7428   format %{ "strb  $src, $mem\t# byte" %}
 7429 
 7430   ins_encode(aarch64_enc_strb(src, mem));
 7431 
 7432   ins_pipe(istore_reg_mem);
 7433 %}
 7434 
 7435 
 7436 instruct storeimmB0(immI0 zero, memory1 mem)
 7437 %{
 7438   match(Set mem (StoreB mem zero));
 7439   predicate(!needs_releasing_store(n));
 7440 
 7441   ins_cost(INSN_COST);
 7442   format %{ "strb rscractch2, $mem\t# byte" %}
 7443 
 7444   ins_encode(aarch64_enc_strb0(mem));
 7445 
 7446   ins_pipe(istore_mem);
 7447 %}
 7448 
 7449 // Store Char/Short
 7450 instruct storeC(iRegIorL2I src, memory2 mem)
 7451 %{
 7452   match(Set mem (StoreC mem src));
 7453   predicate(!needs_releasing_store(n));
 7454 
 7455   ins_cost(INSN_COST);
 7456   format %{ "strh  $src, $mem\t# short" %}
 7457 
 7458   ins_encode(aarch64_enc_strh(src, mem));
 7459 
 7460   ins_pipe(istore_reg_mem);
 7461 %}
 7462 
 7463 instruct storeimmC0(immI0 zero, memory2 mem)
 7464 %{
 7465   match(Set mem (StoreC mem zero));
 7466   predicate(!needs_releasing_store(n));
 7467 
 7468   ins_cost(INSN_COST);
 7469   format %{ "strh  zr, $mem\t# short" %}
 7470 
 7471   ins_encode(aarch64_enc_strh0(mem));
 7472 
 7473   ins_pipe(istore_mem);
 7474 %}
 7475 
 7476 // Store Integer
 7477 
 7478 instruct storeI(iRegIorL2I src, memory4 mem)
 7479 %{
 7480   match(Set mem(StoreI mem src));
 7481   predicate(!needs_releasing_store(n));
 7482 
 7483   ins_cost(INSN_COST);
 7484   format %{ "strw  $src, $mem\t# int" %}
 7485 
 7486   ins_encode(aarch64_enc_strw(src, mem));
 7487 
 7488   ins_pipe(istore_reg_mem);
 7489 %}
 7490 
 7491 instruct storeimmI0(immI0 zero, memory4 mem)
 7492 %{
 7493   match(Set mem(StoreI mem zero));
 7494   predicate(!needs_releasing_store(n));
 7495 
 7496   ins_cost(INSN_COST);
 7497   format %{ "strw  zr, $mem\t# int" %}
 7498 
 7499   ins_encode(aarch64_enc_strw0(mem));
 7500 
 7501   ins_pipe(istore_mem);
 7502 %}
 7503 
 7504 // Store Long (64 bit signed)
 7505 instruct storeL(iRegL src, memory8 mem)
 7506 %{
 7507   match(Set mem (StoreL mem src));
 7508   predicate(!needs_releasing_store(n));
 7509 
 7510   ins_cost(INSN_COST);
 7511   format %{ "str  $src, $mem\t# int" %}
 7512 
 7513   ins_encode(aarch64_enc_str(src, mem));
 7514 
 7515   ins_pipe(istore_reg_mem);
 7516 %}
 7517 
 7518 // Store Long (64 bit signed)
 7519 instruct storeimmL0(immL0 zero, memory8 mem)
 7520 %{
 7521   match(Set mem (StoreL mem zero));
 7522   predicate(!needs_releasing_store(n));
 7523 
 7524   ins_cost(INSN_COST);
 7525   format %{ "str  zr, $mem\t# int" %}
 7526 
 7527   ins_encode(aarch64_enc_str0(mem));
 7528 
 7529   ins_pipe(istore_mem);
 7530 %}
 7531 
 7532 // Store Pointer
 7533 instruct storeP(iRegP src, memory8 mem)
 7534 %{
 7535   match(Set mem (StoreP mem src));
 7536   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7537 
 7538   ins_cost(INSN_COST);
 7539   format %{ "str  $src, $mem\t# ptr" %}
 7540 
 7541   ins_encode(aarch64_enc_str(src, mem));
 7542 
 7543   ins_pipe(istore_reg_mem);
 7544 %}
 7545 
 7546 // Store Pointer
 7547 instruct storeimmP0(immP0 zero, memory8 mem)
 7548 %{
 7549   match(Set mem (StoreP mem zero));
 7550   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7551 
 7552   ins_cost(INSN_COST);
 7553   format %{ "str zr, $mem\t# ptr" %}
 7554 
 7555   ins_encode(aarch64_enc_str0(mem));
 7556 
 7557   ins_pipe(istore_mem);
 7558 %}
 7559 
 7560 // Store Compressed Pointer
 7561 instruct storeN(iRegN src, memory4 mem)
 7562 %{
 7563   match(Set mem (StoreN mem src));
 7564   predicate(!needs_releasing_store(n));
 7565 
 7566   ins_cost(INSN_COST);
 7567   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7568 
 7569   ins_encode(aarch64_enc_strw(src, mem));
 7570 
 7571   ins_pipe(istore_reg_mem);
 7572 %}
 7573 
 7574 instruct storeImmN0(immN0 zero, memory4 mem)
 7575 %{
 7576   match(Set mem (StoreN mem zero));
 7577   predicate(!needs_releasing_store(n));
 7578 
 7579   ins_cost(INSN_COST);
 7580   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7581 
 7582   ins_encode(aarch64_enc_strw0(mem));
 7583 
 7584   ins_pipe(istore_mem);
 7585 %}
 7586 
 7587 // Store Float
 7588 instruct storeF(vRegF src, memory4 mem)
 7589 %{
 7590   match(Set mem (StoreF mem src));
 7591   predicate(!needs_releasing_store(n));
 7592 
 7593   ins_cost(INSN_COST);
 7594   format %{ "strs  $src, $mem\t# float" %}
 7595 
 7596   ins_encode( aarch64_enc_strs(src, mem) );
 7597 
 7598   ins_pipe(pipe_class_memory);
 7599 %}
 7600 
 7601 // TODO
 7602 // implement storeImmF0 and storeFImmPacked
 7603 
 7604 // Store Double
 7605 instruct storeD(vRegD src, memory8 mem)
 7606 %{
 7607   match(Set mem (StoreD mem src));
 7608   predicate(!needs_releasing_store(n));
 7609 
 7610   ins_cost(INSN_COST);
 7611   format %{ "strd  $src, $mem\t# double" %}
 7612 
 7613   ins_encode( aarch64_enc_strd(src, mem) );
 7614 
 7615   ins_pipe(pipe_class_memory);
 7616 %}
 7617 
 7618 // Store Compressed Klass Pointer
 7619 instruct storeNKlass(iRegN src, memory4 mem)
 7620 %{
 7621   predicate(!needs_releasing_store(n));
 7622   match(Set mem (StoreNKlass mem src));
 7623 
 7624   ins_cost(INSN_COST);
 7625   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7626 
 7627   ins_encode(aarch64_enc_strw(src, mem));
 7628 
 7629   ins_pipe(istore_reg_mem);
 7630 %}
 7631 
 7632 // TODO
 7633 // implement storeImmD0 and storeDImmPacked
 7634 
 7635 // prefetch instructions
 7636 // Must be safe to execute with invalid address (cannot fault).
 7637 
 7638 instruct prefetchalloc( memory8 mem ) %{
 7639   match(PrefetchAllocation mem);
 7640 
 7641   ins_cost(INSN_COST);
 7642   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7643 
 7644   ins_encode( aarch64_enc_prefetchw(mem) );
 7645 
 7646   ins_pipe(iload_prefetch);
 7647 %}
 7648 
 7649 //  ---------------- volatile loads and stores ----------------
 7650 
 7651 // Load Byte (8 bit signed)
 7652 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7653 %{
 7654   match(Set dst (LoadB mem));
 7655 
 7656   ins_cost(VOLATILE_REF_COST);
 7657   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7658 
 7659   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7660 
 7661   ins_pipe(pipe_serial);
 7662 %}
 7663 
 7664 // Load Byte (8 bit signed) into long
 7665 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7666 %{
 7667   match(Set dst (ConvI2L (LoadB mem)));
 7668 
 7669   ins_cost(VOLATILE_REF_COST);
 7670   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7671 
 7672   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7673 
 7674   ins_pipe(pipe_serial);
 7675 %}
 7676 
 7677 // Load Byte (8 bit unsigned)
 7678 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7679 %{
 7680   match(Set dst (LoadUB mem));
 7681 
 7682   ins_cost(VOLATILE_REF_COST);
 7683   format %{ "ldarb  $dst, $mem\t# byte" %}
 7684 
 7685   ins_encode(aarch64_enc_ldarb(dst, mem));
 7686 
 7687   ins_pipe(pipe_serial);
 7688 %}
 7689 
 7690 // Load Byte (8 bit unsigned) into long
 7691 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7692 %{
 7693   match(Set dst (ConvI2L (LoadUB mem)));
 7694 
 7695   ins_cost(VOLATILE_REF_COST);
 7696   format %{ "ldarb  $dst, $mem\t# byte" %}
 7697 
 7698   ins_encode(aarch64_enc_ldarb(dst, mem));
 7699 
 7700   ins_pipe(pipe_serial);
 7701 %}
 7702 
 7703 // Load Short (16 bit signed)
 7704 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7705 %{
 7706   match(Set dst (LoadS mem));
 7707 
 7708   ins_cost(VOLATILE_REF_COST);
 7709   format %{ "ldarshw  $dst, $mem\t# short" %}
 7710 
 7711   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7712 
 7713   ins_pipe(pipe_serial);
 7714 %}
 7715 
 7716 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7717 %{
 7718   match(Set dst (LoadUS mem));
 7719 
 7720   ins_cost(VOLATILE_REF_COST);
 7721   format %{ "ldarhw  $dst, $mem\t# short" %}
 7722 
 7723   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7724 
 7725   ins_pipe(pipe_serial);
 7726 %}
 7727 
 7728 // Load Short/Char (16 bit unsigned) into long
 7729 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7730 %{
 7731   match(Set dst (ConvI2L (LoadUS mem)));
 7732 
 7733   ins_cost(VOLATILE_REF_COST);
 7734   format %{ "ldarh  $dst, $mem\t# short" %}
 7735 
 7736   ins_encode(aarch64_enc_ldarh(dst, mem));
 7737 
 7738   ins_pipe(pipe_serial);
 7739 %}
 7740 
 7741 // Load Short/Char (16 bit signed) into long
 7742 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7743 %{
 7744   match(Set dst (ConvI2L (LoadS mem)));
 7745 
 7746   ins_cost(VOLATILE_REF_COST);
 7747   format %{ "ldarh  $dst, $mem\t# short" %}
 7748 
 7749   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7750 
 7751   ins_pipe(pipe_serial);
 7752 %}
 7753 
 7754 // Load Integer (32 bit signed)
 7755 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7756 %{
 7757   match(Set dst (LoadI mem));
 7758 
 7759   ins_cost(VOLATILE_REF_COST);
 7760   format %{ "ldarw  $dst, $mem\t# int" %}
 7761 
 7762   ins_encode(aarch64_enc_ldarw(dst, mem));
 7763 
 7764   ins_pipe(pipe_serial);
 7765 %}
 7766 
 7767 // Load Integer (32 bit unsigned) into long
 7768 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7769 %{
 7770   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7771 
 7772   ins_cost(VOLATILE_REF_COST);
 7773   format %{ "ldarw  $dst, $mem\t# int" %}
 7774 
 7775   ins_encode(aarch64_enc_ldarw(dst, mem));
 7776 
 7777   ins_pipe(pipe_serial);
 7778 %}
 7779 
 7780 // Load Long (64 bit signed)
 7781 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7782 %{
 7783   match(Set dst (LoadL mem));
 7784 
 7785   ins_cost(VOLATILE_REF_COST);
 7786   format %{ "ldar  $dst, $mem\t# int" %}
 7787 
 7788   ins_encode(aarch64_enc_ldar(dst, mem));
 7789 
 7790   ins_pipe(pipe_serial);
 7791 %}
 7792 
 7793 // Load Pointer
 7794 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7795 %{
 7796   match(Set dst (LoadP mem));
 7797   predicate(n->as_Load()->barrier_data() == 0);
 7798 
 7799   ins_cost(VOLATILE_REF_COST);
 7800   format %{ "ldar  $dst, $mem\t# ptr" %}
 7801 
 7802   ins_encode(aarch64_enc_ldar(dst, mem));
 7803 
 7804   ins_pipe(pipe_serial);
 7805 %}
 7806 
 7807 // Load Compressed Pointer
 7808 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7809 %{
 7810   match(Set dst (LoadN mem));
 7811 
 7812   ins_cost(VOLATILE_REF_COST);
 7813   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7814 
 7815   ins_encode(aarch64_enc_ldarw(dst, mem));
 7816 
 7817   ins_pipe(pipe_serial);
 7818 %}
 7819 
 7820 // Load Float
 7821 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7822 %{
 7823   match(Set dst (LoadF mem));
 7824 
 7825   ins_cost(VOLATILE_REF_COST);
 7826   format %{ "ldars  $dst, $mem\t# float" %}
 7827 
 7828   ins_encode( aarch64_enc_fldars(dst, mem) );
 7829 
 7830   ins_pipe(pipe_serial);
 7831 %}
 7832 
 7833 // Load Double
 7834 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7835 %{
 7836   match(Set dst (LoadD mem));
 7837 
 7838   ins_cost(VOLATILE_REF_COST);
 7839   format %{ "ldard  $dst, $mem\t# double" %}
 7840 
 7841   ins_encode( aarch64_enc_fldard(dst, mem) );
 7842 
 7843   ins_pipe(pipe_serial);
 7844 %}
 7845 
 7846 // Store Byte
 7847 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7848 %{
 7849   match(Set mem (StoreB mem src));
 7850 
 7851   ins_cost(VOLATILE_REF_COST);
 7852   format %{ "stlrb  $src, $mem\t# byte" %}
 7853 
 7854   ins_encode(aarch64_enc_stlrb(src, mem));
 7855 
 7856   ins_pipe(pipe_class_memory);
 7857 %}
 7858 
 7859 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7860 %{
 7861   match(Set mem (StoreB mem zero));
 7862 
 7863   ins_cost(VOLATILE_REF_COST);
 7864   format %{ "stlrb  zr, $mem\t# byte" %}
 7865 
 7866   ins_encode(aarch64_enc_stlrb0(mem));
 7867 
 7868   ins_pipe(pipe_class_memory);
 7869 %}
 7870 
 7871 // Store Char/Short
 7872 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7873 %{
 7874   match(Set mem (StoreC mem src));
 7875 
 7876   ins_cost(VOLATILE_REF_COST);
 7877   format %{ "stlrh  $src, $mem\t# short" %}
 7878 
 7879   ins_encode(aarch64_enc_stlrh(src, mem));
 7880 
 7881   ins_pipe(pipe_class_memory);
 7882 %}
 7883 
 7884 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7885 %{
 7886   match(Set mem (StoreC mem zero));
 7887 
 7888   ins_cost(VOLATILE_REF_COST);
 7889   format %{ "stlrh  zr, $mem\t# short" %}
 7890 
 7891   ins_encode(aarch64_enc_stlrh0(mem));
 7892 
 7893   ins_pipe(pipe_class_memory);
 7894 %}
 7895 
 7896 // Store Integer
 7897 
 7898 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7899 %{
 7900   match(Set mem(StoreI mem src));
 7901 
 7902   ins_cost(VOLATILE_REF_COST);
 7903   format %{ "stlrw  $src, $mem\t# int" %}
 7904 
 7905   ins_encode(aarch64_enc_stlrw(src, mem));
 7906 
 7907   ins_pipe(pipe_class_memory);
 7908 %}
 7909 
 7910 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7911 %{
 7912   match(Set mem(StoreI mem zero));
 7913 
 7914   ins_cost(VOLATILE_REF_COST);
 7915   format %{ "stlrw  zr, $mem\t# int" %}
 7916 
 7917   ins_encode(aarch64_enc_stlrw0(mem));
 7918 
 7919   ins_pipe(pipe_class_memory);
 7920 %}
 7921 
 7922 // Store Long (64 bit signed)
 7923 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7924 %{
 7925   match(Set mem (StoreL mem src));
 7926 
 7927   ins_cost(VOLATILE_REF_COST);
 7928   format %{ "stlr  $src, $mem\t# int" %}
 7929 
 7930   ins_encode(aarch64_enc_stlr(src, mem));
 7931 
 7932   ins_pipe(pipe_class_memory);
 7933 %}
 7934 
 7935 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7936 %{
 7937   match(Set mem (StoreL mem zero));
 7938 
 7939   ins_cost(VOLATILE_REF_COST);
 7940   format %{ "stlr  zr, $mem\t# int" %}
 7941 
 7942   ins_encode(aarch64_enc_stlr0(mem));
 7943 
 7944   ins_pipe(pipe_class_memory);
 7945 %}
 7946 
 7947 // Store Pointer
 7948 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7949 %{
 7950   match(Set mem (StoreP mem src));
 7951   predicate(n->as_Store()->barrier_data() == 0);
 7952 
 7953   ins_cost(VOLATILE_REF_COST);
 7954   format %{ "stlr  $src, $mem\t# ptr" %}
 7955 
 7956   ins_encode(aarch64_enc_stlr(src, mem));
 7957 
 7958   ins_pipe(pipe_class_memory);
 7959 %}
 7960 
 7961 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7962 %{
 7963   match(Set mem (StoreP mem zero));
 7964   predicate(n->as_Store()->barrier_data() == 0);
 7965 
 7966   ins_cost(VOLATILE_REF_COST);
 7967   format %{ "stlr  zr, $mem\t# ptr" %}
 7968 
 7969   ins_encode(aarch64_enc_stlr0(mem));
 7970 
 7971   ins_pipe(pipe_class_memory);
 7972 %}
 7973 
 7974 // Store Compressed Pointer
 7975 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7976 %{
 7977   match(Set mem (StoreN mem src));
 7978 
 7979   ins_cost(VOLATILE_REF_COST);
 7980   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7981 
 7982   ins_encode(aarch64_enc_stlrw(src, mem));
 7983 
 7984   ins_pipe(pipe_class_memory);
 7985 %}
 7986 
 7987 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7988 %{
 7989   match(Set mem (StoreN mem zero));
 7990 
 7991   ins_cost(VOLATILE_REF_COST);
 7992   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7993 
 7994   ins_encode(aarch64_enc_stlrw0(mem));
 7995 
 7996   ins_pipe(pipe_class_memory);
 7997 %}
 7998 
 7999 // Store Float
 8000 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 8001 %{
 8002   match(Set mem (StoreF mem src));
 8003 
 8004   ins_cost(VOLATILE_REF_COST);
 8005   format %{ "stlrs  $src, $mem\t# float" %}
 8006 
 8007   ins_encode( aarch64_enc_fstlrs(src, mem) );
 8008 
 8009   ins_pipe(pipe_class_memory);
 8010 %}
 8011 
 8012 // TODO
 8013 // implement storeImmF0 and storeFImmPacked
 8014 
 8015 // Store Double
 8016 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 8017 %{
 8018   match(Set mem (StoreD mem src));
 8019 
 8020   ins_cost(VOLATILE_REF_COST);
 8021   format %{ "stlrd  $src, $mem\t# double" %}
 8022 
 8023   ins_encode( aarch64_enc_fstlrd(src, mem) );
 8024 
 8025   ins_pipe(pipe_class_memory);
 8026 %}
 8027 
 8028 //  ---------------- end of volatile loads and stores ----------------
 8029 
 8030 instruct cacheWB(indirect addr)
 8031 %{
 8032   predicate(VM_Version::supports_data_cache_line_flush());
 8033   match(CacheWB addr);
 8034 
 8035   ins_cost(100);
 8036   format %{"cache wb $addr" %}
 8037   ins_encode %{
 8038     assert($addr->index_position() < 0, "should be");
 8039     assert($addr$$disp == 0, "should be");
 8040     __ cache_wb(Address($addr$$base$$Register, 0));
 8041   %}
 8042   ins_pipe(pipe_slow); // XXX
 8043 %}
 8044 
 8045 instruct cacheWBPreSync()
 8046 %{
 8047   predicate(VM_Version::supports_data_cache_line_flush());
 8048   match(CacheWBPreSync);
 8049 
 8050   ins_cost(100);
 8051   format %{"cache wb presync" %}
 8052   ins_encode %{
 8053     __ cache_wbsync(true);
 8054   %}
 8055   ins_pipe(pipe_slow); // XXX
 8056 %}
 8057 
 8058 instruct cacheWBPostSync()
 8059 %{
 8060   predicate(VM_Version::supports_data_cache_line_flush());
 8061   match(CacheWBPostSync);
 8062 
 8063   ins_cost(100);
 8064   format %{"cache wb postsync" %}
 8065   ins_encode %{
 8066     __ cache_wbsync(false);
 8067   %}
 8068   ins_pipe(pipe_slow); // XXX
 8069 %}
 8070 
 8071 // ============================================================================
 8072 // BSWAP Instructions
 8073 
 8074 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 8075   match(Set dst (ReverseBytesI src));
 8076 
 8077   ins_cost(INSN_COST);
 8078   format %{ "revw  $dst, $src" %}
 8079 
 8080   ins_encode %{
 8081     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 8082   %}
 8083 
 8084   ins_pipe(ialu_reg);
 8085 %}
 8086 
 8087 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 8088   match(Set dst (ReverseBytesL src));
 8089 
 8090   ins_cost(INSN_COST);
 8091   format %{ "rev  $dst, $src" %}
 8092 
 8093   ins_encode %{
 8094     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 8095   %}
 8096 
 8097   ins_pipe(ialu_reg);
 8098 %}
 8099 
 8100 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 8101   match(Set dst (ReverseBytesUS src));
 8102 
 8103   ins_cost(INSN_COST);
 8104   format %{ "rev16w  $dst, $src" %}
 8105 
 8106   ins_encode %{
 8107     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8108   %}
 8109 
 8110   ins_pipe(ialu_reg);
 8111 %}
 8112 
 8113 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 8114   match(Set dst (ReverseBytesS src));
 8115 
 8116   ins_cost(INSN_COST);
 8117   format %{ "rev16w  $dst, $src\n\t"
 8118             "sbfmw $dst, $dst, #0, #15" %}
 8119 
 8120   ins_encode %{
 8121     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8122     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 8123   %}
 8124 
 8125   ins_pipe(ialu_reg);
 8126 %}
 8127 
 8128 // ============================================================================
 8129 // Zero Count Instructions
 8130 
 8131 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8132   match(Set dst (CountLeadingZerosI src));
 8133 
 8134   ins_cost(INSN_COST);
 8135   format %{ "clzw  $dst, $src" %}
 8136   ins_encode %{
 8137     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 8138   %}
 8139 
 8140   ins_pipe(ialu_reg);
 8141 %}
 8142 
 8143 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 8144   match(Set dst (CountLeadingZerosL src));
 8145 
 8146   ins_cost(INSN_COST);
 8147   format %{ "clz   $dst, $src" %}
 8148   ins_encode %{
 8149     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 8150   %}
 8151 
 8152   ins_pipe(ialu_reg);
 8153 %}
 8154 
 8155 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8156   match(Set dst (CountTrailingZerosI src));
 8157 
 8158   ins_cost(INSN_COST * 2);
 8159   format %{ "rbitw  $dst, $src\n\t"
 8160             "clzw   $dst, $dst" %}
 8161   ins_encode %{
 8162     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 8163     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 8164   %}
 8165 
 8166   ins_pipe(ialu_reg);
 8167 %}
 8168 
 8169 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 8170   match(Set dst (CountTrailingZerosL src));
 8171 
 8172   ins_cost(INSN_COST * 2);
 8173   format %{ "rbit   $dst, $src\n\t"
 8174             "clz    $dst, $dst" %}
 8175   ins_encode %{
 8176     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 8177     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 8178   %}
 8179 
 8180   ins_pipe(ialu_reg);
 8181 %}
 8182 
 8183 //---------- Population Count Instructions -------------------------------------
 8184 //
 8185 
 8186 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 8187   match(Set dst (PopCountI src));
 8188   effect(TEMP tmp);
 8189   ins_cost(INSN_COST * 13);
 8190 
 8191   format %{ "movw   $src, $src\n\t"
 8192             "mov    $tmp, $src\t# vector (1D)\n\t"
 8193             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8194             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8195             "mov    $dst, $tmp\t# vector (1D)" %}
 8196   ins_encode %{
 8197     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 8198     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 8199     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8200     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8201     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8202   %}
 8203 
 8204   ins_pipe(pipe_class_default);
 8205 %}
 8206 
 8207 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 8208   match(Set dst (PopCountI (LoadI mem)));
 8209   effect(TEMP tmp);
 8210   ins_cost(INSN_COST * 13);
 8211 
 8212   format %{ "ldrs   $tmp, $mem\n\t"
 8213             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8214             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8215             "mov    $dst, $tmp\t# vector (1D)" %}
 8216   ins_encode %{
 8217     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8218     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 8219               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 8220     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8221     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8222     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8223   %}
 8224 
 8225   ins_pipe(pipe_class_default);
 8226 %}
 8227 
 8228 // Note: Long.bitCount(long) returns an int.
 8229 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 8230   match(Set dst (PopCountL src));
 8231   effect(TEMP tmp);
 8232   ins_cost(INSN_COST * 13);
 8233 
 8234   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 8235             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8236             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8237             "mov    $dst, $tmp\t# vector (1D)" %}
 8238   ins_encode %{
 8239     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 8240     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8241     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8242     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8243   %}
 8244 
 8245   ins_pipe(pipe_class_default);
 8246 %}
 8247 
 8248 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 8249   match(Set dst (PopCountL (LoadL mem)));
 8250   effect(TEMP tmp);
 8251   ins_cost(INSN_COST * 13);
 8252 
 8253   format %{ "ldrd   $tmp, $mem\n\t"
 8254             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8255             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8256             "mov    $dst, $tmp\t# vector (1D)" %}
 8257   ins_encode %{
 8258     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8259     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 8260               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 8261     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8262     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8263     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8264   %}
 8265 
 8266   ins_pipe(pipe_class_default);
 8267 %}
 8268 
 8269 // ============================================================================
 8270 // MemBar Instruction
 8271 
 8272 instruct load_fence() %{
 8273   match(LoadFence);
 8274   ins_cost(VOLATILE_REF_COST);
 8275 
 8276   format %{ "load_fence" %}
 8277 
 8278   ins_encode %{
 8279     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8280   %}
 8281   ins_pipe(pipe_serial);
 8282 %}
 8283 
 8284 instruct unnecessary_membar_acquire() %{
 8285   predicate(unnecessary_acquire(n));
 8286   match(MemBarAcquire);
 8287   ins_cost(0);
 8288 
 8289   format %{ "membar_acquire (elided)" %}
 8290 
 8291   ins_encode %{
 8292     __ block_comment("membar_acquire (elided)");
 8293   %}
 8294 
 8295   ins_pipe(pipe_class_empty);
 8296 %}
 8297 
 8298 instruct membar_acquire() %{
 8299   match(MemBarAcquire);
 8300   ins_cost(VOLATILE_REF_COST);
 8301 
 8302   format %{ "membar_acquire\n\t"
 8303             "dmb ish" %}
 8304 
 8305   ins_encode %{
 8306     __ block_comment("membar_acquire");
 8307     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8308   %}
 8309 
 8310   ins_pipe(pipe_serial);
 8311 %}
 8312 
 8313 
 8314 instruct membar_acquire_lock() %{
 8315   match(MemBarAcquireLock);
 8316   ins_cost(VOLATILE_REF_COST);
 8317 
 8318   format %{ "membar_acquire_lock (elided)" %}
 8319 
 8320   ins_encode %{
 8321     __ block_comment("membar_acquire_lock (elided)");
 8322   %}
 8323 
 8324   ins_pipe(pipe_serial);
 8325 %}
 8326 
 8327 instruct store_fence() %{
 8328   match(StoreFence);
 8329   ins_cost(VOLATILE_REF_COST);
 8330 
 8331   format %{ "store_fence" %}
 8332 
 8333   ins_encode %{
 8334     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8335   %}
 8336   ins_pipe(pipe_serial);
 8337 %}
 8338 
 8339 instruct unnecessary_membar_release() %{
 8340   predicate(unnecessary_release(n));
 8341   match(MemBarRelease);
 8342   ins_cost(0);
 8343 
 8344   format %{ "membar_release (elided)" %}
 8345 
 8346   ins_encode %{
 8347     __ block_comment("membar_release (elided)");
 8348   %}
 8349   ins_pipe(pipe_serial);
 8350 %}
 8351 
 8352 instruct membar_release() %{
 8353   match(MemBarRelease);
 8354   ins_cost(VOLATILE_REF_COST);
 8355 
 8356   format %{ "membar_release\n\t"
 8357             "dmb ish" %}
 8358 
 8359   ins_encode %{
 8360     __ block_comment("membar_release");
 8361     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8362   %}
 8363   ins_pipe(pipe_serial);
 8364 %}
 8365 
 8366 instruct membar_storestore() %{
 8367   match(MemBarStoreStore);
 8368   match(StoreStoreFence);
 8369   ins_cost(VOLATILE_REF_COST);
 8370 
 8371   format %{ "MEMBAR-store-store" %}
 8372 
 8373   ins_encode %{
 8374     __ membar(Assembler::StoreStore);
 8375   %}
 8376   ins_pipe(pipe_serial);
 8377 %}
 8378 
 8379 instruct membar_release_lock() %{
 8380   match(MemBarReleaseLock);
 8381   ins_cost(VOLATILE_REF_COST);
 8382 
 8383   format %{ "membar_release_lock (elided)" %}
 8384 
 8385   ins_encode %{
 8386     __ block_comment("membar_release_lock (elided)");
 8387   %}
 8388 
 8389   ins_pipe(pipe_serial);
 8390 %}
 8391 
 8392 instruct unnecessary_membar_volatile() %{
 8393   predicate(unnecessary_volatile(n));
 8394   match(MemBarVolatile);
 8395   ins_cost(0);
 8396 
 8397   format %{ "membar_volatile (elided)" %}
 8398 
 8399   ins_encode %{
 8400     __ block_comment("membar_volatile (elided)");
 8401   %}
 8402 
 8403   ins_pipe(pipe_serial);
 8404 %}
 8405 
 8406 instruct membar_volatile() %{
 8407   match(MemBarVolatile);
 8408   ins_cost(VOLATILE_REF_COST*100);
 8409 
 8410   format %{ "membar_volatile\n\t"
 8411              "dmb ish"%}
 8412 
 8413   ins_encode %{
 8414     __ block_comment("membar_volatile");
 8415     __ membar(Assembler::StoreLoad);
 8416   %}
 8417 
 8418   ins_pipe(pipe_serial);
 8419 %}
 8420 
 8421 // ============================================================================
 8422 // Cast/Convert Instructions
 8423 
 8424 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8425   match(Set dst (CastX2P src));
 8426 
 8427   ins_cost(INSN_COST);
 8428   format %{ "mov $dst, $src\t# long -> ptr" %}
 8429 
 8430   ins_encode %{
 8431     if ($dst$$reg != $src$$reg) {
 8432       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8433     }
 8434   %}
 8435 
 8436   ins_pipe(ialu_reg);
 8437 %}
 8438 
 8439 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8440   match(Set dst (CastP2X src));
 8441 
 8442   ins_cost(INSN_COST);
 8443   format %{ "mov $dst, $src\t# ptr -> long" %}
 8444 
 8445   ins_encode %{
 8446     if ($dst$$reg != $src$$reg) {
 8447       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8448     }
 8449   %}
 8450 
 8451   ins_pipe(ialu_reg);
 8452 %}
 8453 
 8454 // Convert oop into int for vectors alignment masking
 8455 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8456   match(Set dst (ConvL2I (CastP2X src)));
 8457 
 8458   ins_cost(INSN_COST);
 8459   format %{ "movw $dst, $src\t# ptr -> int" %}
 8460   ins_encode %{
 8461     __ movw($dst$$Register, $src$$Register);
 8462   %}
 8463 
 8464   ins_pipe(ialu_reg);
 8465 %}
 8466 
 8467 // Convert compressed oop into int for vectors alignment masking
 8468 // in case of 32bit oops (heap < 4Gb).
 8469 instruct convN2I(iRegINoSp dst, iRegN src)
 8470 %{
 8471   predicate(CompressedOops::shift() == 0);
 8472   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8473 
 8474   ins_cost(INSN_COST);
 8475   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8476   ins_encode %{
 8477     __ movw($dst$$Register, $src$$Register);
 8478   %}
 8479 
 8480   ins_pipe(ialu_reg);
 8481 %}
 8482 
 8483 
 8484 // Convert oop pointer into compressed form
 8485 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8486   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8487   match(Set dst (EncodeP src));
 8488   effect(KILL cr);
 8489   ins_cost(INSN_COST * 3);
 8490   format %{ "encode_heap_oop $dst, $src" %}
 8491   ins_encode %{
 8492     Register s = $src$$Register;
 8493     Register d = $dst$$Register;
 8494     __ encode_heap_oop(d, s);
 8495   %}
 8496   ins_pipe(ialu_reg);
 8497 %}
 8498 
 8499 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8500   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8501   match(Set dst (EncodeP src));
 8502   ins_cost(INSN_COST * 3);
 8503   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8504   ins_encode %{
 8505     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8506   %}
 8507   ins_pipe(ialu_reg);
 8508 %}
 8509 
 8510 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8511   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8512             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8513   match(Set dst (DecodeN src));
 8514   ins_cost(INSN_COST * 3);
 8515   format %{ "decode_heap_oop $dst, $src" %}
 8516   ins_encode %{
 8517     Register s = $src$$Register;
 8518     Register d = $dst$$Register;
 8519     __ decode_heap_oop(d, s);
 8520   %}
 8521   ins_pipe(ialu_reg);
 8522 %}
 8523 
 8524 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8525   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8526             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8527   match(Set dst (DecodeN src));
 8528   ins_cost(INSN_COST * 3);
 8529   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8530   ins_encode %{
 8531     Register s = $src$$Register;
 8532     Register d = $dst$$Register;
 8533     __ decode_heap_oop_not_null(d, s);
 8534   %}
 8535   ins_pipe(ialu_reg);
 8536 %}
 8537 
 8538 // n.b. AArch64 implementations of encode_klass_not_null and
 8539 // decode_klass_not_null do not modify the flags register so, unlike
 8540 // Intel, we don't kill CR as a side effect here
 8541 
 8542 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8543   match(Set dst (EncodePKlass src));
 8544 
 8545   ins_cost(INSN_COST * 3);
 8546   format %{ "encode_klass_not_null $dst,$src" %}
 8547 
 8548   ins_encode %{
 8549     Register src_reg = as_Register($src$$reg);
 8550     Register dst_reg = as_Register($dst$$reg);
 8551     __ encode_klass_not_null(dst_reg, src_reg);
 8552   %}
 8553 
 8554    ins_pipe(ialu_reg);
 8555 %}
 8556 
 8557 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8558   match(Set dst (DecodeNKlass src));
 8559 
 8560   ins_cost(INSN_COST * 3);
 8561   format %{ "decode_klass_not_null $dst,$src" %}
 8562 
 8563   ins_encode %{
 8564     Register src_reg = as_Register($src$$reg);
 8565     Register dst_reg = as_Register($dst$$reg);
 8566     if (dst_reg != src_reg) {
 8567       __ decode_klass_not_null(dst_reg, src_reg);
 8568     } else {
 8569       __ decode_klass_not_null(dst_reg);
 8570     }
 8571   %}
 8572 
 8573    ins_pipe(ialu_reg);
 8574 %}
 8575 
 8576 instruct checkCastPP(iRegPNoSp dst)
 8577 %{
 8578   match(Set dst (CheckCastPP dst));
 8579 
 8580   size(0);
 8581   format %{ "# checkcastPP of $dst" %}
 8582   ins_encode(/* empty encoding */);
 8583   ins_pipe(pipe_class_empty);
 8584 %}
 8585 
 8586 instruct castPP(iRegPNoSp dst)
 8587 %{
 8588   match(Set dst (CastPP dst));
 8589 
 8590   size(0);
 8591   format %{ "# castPP of $dst" %}
 8592   ins_encode(/* empty encoding */);
 8593   ins_pipe(pipe_class_empty);
 8594 %}
 8595 
 8596 instruct castII(iRegI dst)
 8597 %{
 8598   match(Set dst (CastII dst));
 8599 
 8600   size(0);
 8601   format %{ "# castII of $dst" %}
 8602   ins_encode(/* empty encoding */);
 8603   ins_cost(0);
 8604   ins_pipe(pipe_class_empty);
 8605 %}
 8606 
 8607 instruct castLL(iRegL dst)
 8608 %{
 8609   match(Set dst (CastLL dst));
 8610 
 8611   size(0);
 8612   format %{ "# castLL of $dst" %}
 8613   ins_encode(/* empty encoding */);
 8614   ins_cost(0);
 8615   ins_pipe(pipe_class_empty);
 8616 %}
 8617 
 8618 instruct castFF(vRegF dst)
 8619 %{
 8620   match(Set dst (CastFF dst));
 8621 
 8622   size(0);
 8623   format %{ "# castFF of $dst" %}
 8624   ins_encode(/* empty encoding */);
 8625   ins_cost(0);
 8626   ins_pipe(pipe_class_empty);
 8627 %}
 8628 
 8629 instruct castDD(vRegD dst)
 8630 %{
 8631   match(Set dst (CastDD dst));
 8632 
 8633   size(0);
 8634   format %{ "# castDD of $dst" %}
 8635   ins_encode(/* empty encoding */);
 8636   ins_cost(0);
 8637   ins_pipe(pipe_class_empty);
 8638 %}
 8639 
 8640 instruct castVV(vReg dst)
 8641 %{
 8642   match(Set dst (CastVV dst));
 8643 
 8644   size(0);
 8645   format %{ "# castVV of $dst" %}
 8646   ins_encode(/* empty encoding */);
 8647   ins_cost(0);
 8648   ins_pipe(pipe_class_empty);
 8649 %}
 8650 
 8651 instruct castVVMask(pRegGov dst)
 8652 %{
 8653   match(Set dst (CastVV dst));
 8654 
 8655   size(0);
 8656   format %{ "# castVV of $dst" %}
 8657   ins_encode(/* empty encoding */);
 8658   ins_cost(0);
 8659   ins_pipe(pipe_class_empty);
 8660 %}
 8661 
 8662 // ============================================================================
 8663 // Atomic operation instructions
 8664 //
 8665 
 8666 // standard CompareAndSwapX when we are using barriers
 8667 // these have higher priority than the rules selected by a predicate
 8668 
 8669 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8670 // can't match them
 8671 
 8672 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8673 
 8674   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8675   ins_cost(2 * VOLATILE_REF_COST);
 8676 
 8677   effect(KILL cr);
 8678 
 8679   format %{
 8680     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8681     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8682   %}
 8683 
 8684   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8685             aarch64_enc_cset_eq(res));
 8686 
 8687   ins_pipe(pipe_slow);
 8688 %}
 8689 
 8690 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8691 
 8692   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8693   ins_cost(2 * VOLATILE_REF_COST);
 8694 
 8695   effect(KILL cr);
 8696 
 8697   format %{
 8698     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8699     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8700   %}
 8701 
 8702   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8703             aarch64_enc_cset_eq(res));
 8704 
 8705   ins_pipe(pipe_slow);
 8706 %}
 8707 
 8708 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8709 
 8710   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8711   ins_cost(2 * VOLATILE_REF_COST);
 8712 
 8713   effect(KILL cr);
 8714 
 8715  format %{
 8716     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8717     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8718  %}
 8719 
 8720  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8721             aarch64_enc_cset_eq(res));
 8722 
 8723   ins_pipe(pipe_slow);
 8724 %}
 8725 
 8726 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8727 
 8728   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8729   ins_cost(2 * VOLATILE_REF_COST);
 8730 
 8731   effect(KILL cr);
 8732 
 8733  format %{
 8734     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8735     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8736  %}
 8737 
 8738  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8739             aarch64_enc_cset_eq(res));
 8740 
 8741   ins_pipe(pipe_slow);
 8742 %}
 8743 
 8744 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8745 
 8746   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8747   predicate(n->as_LoadStore()->barrier_data() == 0);
 8748   ins_cost(2 * VOLATILE_REF_COST);
 8749 
 8750   effect(KILL cr);
 8751 
 8752  format %{
 8753     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8754     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8755  %}
 8756 
 8757  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8758             aarch64_enc_cset_eq(res));
 8759 
 8760   ins_pipe(pipe_slow);
 8761 %}
 8762 
 8763 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8764 
 8765   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8766   ins_cost(2 * VOLATILE_REF_COST);
 8767 
 8768   effect(KILL cr);
 8769 
 8770  format %{
 8771     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8772     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8773  %}
 8774 
 8775  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8776             aarch64_enc_cset_eq(res));
 8777 
 8778   ins_pipe(pipe_slow);
 8779 %}
 8780 
 8781 // alternative CompareAndSwapX when we are eliding barriers
 8782 
 8783 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8784 
 8785   predicate(needs_acquiring_load_exclusive(n));
 8786   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8787   ins_cost(VOLATILE_REF_COST);
 8788 
 8789   effect(KILL cr);
 8790 
 8791   format %{
 8792     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8793     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8794   %}
 8795 
 8796   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8797             aarch64_enc_cset_eq(res));
 8798 
 8799   ins_pipe(pipe_slow);
 8800 %}
 8801 
 8802 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8803 
 8804   predicate(needs_acquiring_load_exclusive(n));
 8805   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8806   ins_cost(VOLATILE_REF_COST);
 8807 
 8808   effect(KILL cr);
 8809 
 8810   format %{
 8811     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8812     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8813   %}
 8814 
 8815   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8816             aarch64_enc_cset_eq(res));
 8817 
 8818   ins_pipe(pipe_slow);
 8819 %}
 8820 
 8821 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8822 
 8823   predicate(needs_acquiring_load_exclusive(n));
 8824   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8825   ins_cost(VOLATILE_REF_COST);
 8826 
 8827   effect(KILL cr);
 8828 
 8829  format %{
 8830     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8831     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8832  %}
 8833 
 8834  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8835             aarch64_enc_cset_eq(res));
 8836 
 8837   ins_pipe(pipe_slow);
 8838 %}
 8839 
 8840 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8841 
 8842   predicate(needs_acquiring_load_exclusive(n));
 8843   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8844   ins_cost(VOLATILE_REF_COST);
 8845 
 8846   effect(KILL cr);
 8847 
 8848  format %{
 8849     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8850     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8851  %}
 8852 
 8853  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8854             aarch64_enc_cset_eq(res));
 8855 
 8856   ins_pipe(pipe_slow);
 8857 %}
 8858 
 8859 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8860 
 8861   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8862   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8863   ins_cost(VOLATILE_REF_COST);
 8864 
 8865   effect(KILL cr);
 8866 
 8867  format %{
 8868     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8869     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8870  %}
 8871 
 8872  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8873             aarch64_enc_cset_eq(res));
 8874 
 8875   ins_pipe(pipe_slow);
 8876 %}
 8877 
 8878 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8879 
 8880   predicate(needs_acquiring_load_exclusive(n));
 8881   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8882   ins_cost(VOLATILE_REF_COST);
 8883 
 8884   effect(KILL cr);
 8885 
 8886  format %{
 8887     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8888     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8889  %}
 8890 
 8891  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8892             aarch64_enc_cset_eq(res));
 8893 
 8894   ins_pipe(pipe_slow);
 8895 %}
 8896 
 8897 
 8898 // ---------------------------------------------------------------------
 8899 
 8900 // BEGIN This section of the file is automatically generated. Do not edit --------------
 8901 
 8902 // Sundry CAS operations.  Note that release is always true,
 8903 // regardless of the memory ordering of the CAS.  This is because we
 8904 // need the volatile case to be sequentially consistent but there is
 8905 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 8906 // can't check the type of memory ordering here, so we always emit a
 8907 // STLXR.
 8908 
 8909 // This section is generated from cas.m4
 8910 
 8911 
 8912 // This pattern is generated automatically from cas.m4.
 8913 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8914 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8915   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8916   ins_cost(2 * VOLATILE_REF_COST);
 8917   effect(TEMP_DEF res, KILL cr);
 8918   format %{
 8919     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8920   %}
 8921   ins_encode %{
 8922     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8923                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8924                /*weak*/ false, $res$$Register);
 8925     __ sxtbw($res$$Register, $res$$Register);
 8926   %}
 8927   ins_pipe(pipe_slow);
 8928 %}
 8929 
 8930 // This pattern is generated automatically from cas.m4.
 8931 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8932 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8933   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8934   ins_cost(2 * VOLATILE_REF_COST);
 8935   effect(TEMP_DEF res, KILL cr);
 8936   format %{
 8937     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8938   %}
 8939   ins_encode %{
 8940     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8941                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8942                /*weak*/ false, $res$$Register);
 8943     __ sxthw($res$$Register, $res$$Register);
 8944   %}
 8945   ins_pipe(pipe_slow);
 8946 %}
 8947 
 8948 // This pattern is generated automatically from cas.m4.
 8949 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8950 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8951   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8952   ins_cost(2 * VOLATILE_REF_COST);
 8953   effect(TEMP_DEF res, KILL cr);
 8954   format %{
 8955     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8956   %}
 8957   ins_encode %{
 8958     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8959                Assembler::word, /*acquire*/ false, /*release*/ true,
 8960                /*weak*/ false, $res$$Register);
 8961   %}
 8962   ins_pipe(pipe_slow);
 8963 %}
 8964 
 8965 // This pattern is generated automatically from cas.m4.
 8966 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8967 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8968   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8969   ins_cost(2 * VOLATILE_REF_COST);
 8970   effect(TEMP_DEF res, KILL cr);
 8971   format %{
 8972     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8973   %}
 8974   ins_encode %{
 8975     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8976                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8977                /*weak*/ false, $res$$Register);
 8978   %}
 8979   ins_pipe(pipe_slow);
 8980 %}
 8981 
 8982 // This pattern is generated automatically from cas.m4.
 8983 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8984 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8985   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8986   ins_cost(2 * VOLATILE_REF_COST);
 8987   effect(TEMP_DEF res, KILL cr);
 8988   format %{
 8989     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8990   %}
 8991   ins_encode %{
 8992     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8993                Assembler::word, /*acquire*/ false, /*release*/ true,
 8994                /*weak*/ false, $res$$Register);
 8995   %}
 8996   ins_pipe(pipe_slow);
 8997 %}
 8998 
 8999 // This pattern is generated automatically from cas.m4.
 9000 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9001 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9002   predicate(n->as_LoadStore()->barrier_data() == 0);
 9003   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9004   ins_cost(2 * VOLATILE_REF_COST);
 9005   effect(TEMP_DEF res, KILL cr);
 9006   format %{
 9007     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9008   %}
 9009   ins_encode %{
 9010     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9011                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9012                /*weak*/ false, $res$$Register);
 9013   %}
 9014   ins_pipe(pipe_slow);
 9015 %}
 9016 
 9017 // This pattern is generated automatically from cas.m4.
 9018 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9019 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9020   predicate(needs_acquiring_load_exclusive(n));
 9021   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9022   ins_cost(VOLATILE_REF_COST);
 9023   effect(TEMP_DEF res, KILL cr);
 9024   format %{
 9025     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9026   %}
 9027   ins_encode %{
 9028     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9029                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9030                /*weak*/ false, $res$$Register);
 9031     __ sxtbw($res$$Register, $res$$Register);
 9032   %}
 9033   ins_pipe(pipe_slow);
 9034 %}
 9035 
 9036 // This pattern is generated automatically from cas.m4.
 9037 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9038 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9039   predicate(needs_acquiring_load_exclusive(n));
 9040   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9041   ins_cost(VOLATILE_REF_COST);
 9042   effect(TEMP_DEF res, KILL cr);
 9043   format %{
 9044     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9045   %}
 9046   ins_encode %{
 9047     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9048                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9049                /*weak*/ false, $res$$Register);
 9050     __ sxthw($res$$Register, $res$$Register);
 9051   %}
 9052   ins_pipe(pipe_slow);
 9053 %}
 9054 
 9055 // This pattern is generated automatically from cas.m4.
 9056 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9057 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9058   predicate(needs_acquiring_load_exclusive(n));
 9059   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9060   ins_cost(VOLATILE_REF_COST);
 9061   effect(TEMP_DEF res, KILL cr);
 9062   format %{
 9063     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9064   %}
 9065   ins_encode %{
 9066     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9067                Assembler::word, /*acquire*/ true, /*release*/ true,
 9068                /*weak*/ false, $res$$Register);
 9069   %}
 9070   ins_pipe(pipe_slow);
 9071 %}
 9072 
 9073 // This pattern is generated automatically from cas.m4.
 9074 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9075 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9076   predicate(needs_acquiring_load_exclusive(n));
 9077   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9078   ins_cost(VOLATILE_REF_COST);
 9079   effect(TEMP_DEF res, KILL cr);
 9080   format %{
 9081     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9082   %}
 9083   ins_encode %{
 9084     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9085                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9086                /*weak*/ false, $res$$Register);
 9087   %}
 9088   ins_pipe(pipe_slow);
 9089 %}
 9090 
 9091 // This pattern is generated automatically from cas.m4.
 9092 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9093 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9094   predicate(needs_acquiring_load_exclusive(n));
 9095   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9096   ins_cost(VOLATILE_REF_COST);
 9097   effect(TEMP_DEF res, KILL cr);
 9098   format %{
 9099     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9100   %}
 9101   ins_encode %{
 9102     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9103                Assembler::word, /*acquire*/ true, /*release*/ true,
 9104                /*weak*/ false, $res$$Register);
 9105   %}
 9106   ins_pipe(pipe_slow);
 9107 %}
 9108 
 9109 // This pattern is generated automatically from cas.m4.
 9110 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9111 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9112   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9113   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9114   ins_cost(VOLATILE_REF_COST);
 9115   effect(TEMP_DEF res, KILL cr);
 9116   format %{
 9117     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9118   %}
 9119   ins_encode %{
 9120     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9121                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9122                /*weak*/ false, $res$$Register);
 9123   %}
 9124   ins_pipe(pipe_slow);
 9125 %}
 9126 
 9127 // This pattern is generated automatically from cas.m4.
 9128 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9129 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9130   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9131   ins_cost(2 * VOLATILE_REF_COST);
 9132   effect(KILL cr);
 9133   format %{
 9134     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9135     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9136   %}
 9137   ins_encode %{
 9138     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9139                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9140                /*weak*/ true, noreg);
 9141     __ csetw($res$$Register, Assembler::EQ);
 9142   %}
 9143   ins_pipe(pipe_slow);
 9144 %}
 9145 
 9146 // This pattern is generated automatically from cas.m4.
 9147 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9148 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9149   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9150   ins_cost(2 * VOLATILE_REF_COST);
 9151   effect(KILL cr);
 9152   format %{
 9153     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9154     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9155   %}
 9156   ins_encode %{
 9157     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9158                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9159                /*weak*/ true, noreg);
 9160     __ csetw($res$$Register, Assembler::EQ);
 9161   %}
 9162   ins_pipe(pipe_slow);
 9163 %}
 9164 
 9165 // This pattern is generated automatically from cas.m4.
 9166 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9167 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9168   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9169   ins_cost(2 * VOLATILE_REF_COST);
 9170   effect(KILL cr);
 9171   format %{
 9172     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9173     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9174   %}
 9175   ins_encode %{
 9176     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9177                Assembler::word, /*acquire*/ false, /*release*/ true,
 9178                /*weak*/ true, noreg);
 9179     __ csetw($res$$Register, Assembler::EQ);
 9180   %}
 9181   ins_pipe(pipe_slow);
 9182 %}
 9183 
 9184 // This pattern is generated automatically from cas.m4.
 9185 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9186 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9187   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9188   ins_cost(2 * VOLATILE_REF_COST);
 9189   effect(KILL cr);
 9190   format %{
 9191     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9192     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9193   %}
 9194   ins_encode %{
 9195     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9196                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9197                /*weak*/ true, noreg);
 9198     __ csetw($res$$Register, Assembler::EQ);
 9199   %}
 9200   ins_pipe(pipe_slow);
 9201 %}
 9202 
 9203 // This pattern is generated automatically from cas.m4.
 9204 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9205 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9206   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9207   ins_cost(2 * VOLATILE_REF_COST);
 9208   effect(KILL cr);
 9209   format %{
 9210     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9211     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9212   %}
 9213   ins_encode %{
 9214     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9215                Assembler::word, /*acquire*/ false, /*release*/ true,
 9216                /*weak*/ true, noreg);
 9217     __ csetw($res$$Register, Assembler::EQ);
 9218   %}
 9219   ins_pipe(pipe_slow);
 9220 %}
 9221 
 9222 // This pattern is generated automatically from cas.m4.
 9223 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9224 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9225   predicate(n->as_LoadStore()->barrier_data() == 0);
 9226   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9227   ins_cost(2 * VOLATILE_REF_COST);
 9228   effect(KILL cr);
 9229   format %{
 9230     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9231     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9232   %}
 9233   ins_encode %{
 9234     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9235                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9236                /*weak*/ true, noreg);
 9237     __ csetw($res$$Register, Assembler::EQ);
 9238   %}
 9239   ins_pipe(pipe_slow);
 9240 %}
 9241 
 9242 // This pattern is generated automatically from cas.m4.
 9243 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9244 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9245   predicate(needs_acquiring_load_exclusive(n));
 9246   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9247   ins_cost(VOLATILE_REF_COST);
 9248   effect(KILL cr);
 9249   format %{
 9250     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9251     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9252   %}
 9253   ins_encode %{
 9254     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9255                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9256                /*weak*/ true, noreg);
 9257     __ csetw($res$$Register, Assembler::EQ);
 9258   %}
 9259   ins_pipe(pipe_slow);
 9260 %}
 9261 
 9262 // This pattern is generated automatically from cas.m4.
 9263 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9264 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9265   predicate(needs_acquiring_load_exclusive(n));
 9266   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9267   ins_cost(VOLATILE_REF_COST);
 9268   effect(KILL cr);
 9269   format %{
 9270     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9271     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9272   %}
 9273   ins_encode %{
 9274     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9275                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9276                /*weak*/ true, noreg);
 9277     __ csetw($res$$Register, Assembler::EQ);
 9278   %}
 9279   ins_pipe(pipe_slow);
 9280 %}
 9281 
 9282 // This pattern is generated automatically from cas.m4.
 9283 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9284 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9285   predicate(needs_acquiring_load_exclusive(n));
 9286   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9287   ins_cost(VOLATILE_REF_COST);
 9288   effect(KILL cr);
 9289   format %{
 9290     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9291     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9292   %}
 9293   ins_encode %{
 9294     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9295                Assembler::word, /*acquire*/ true, /*release*/ true,
 9296                /*weak*/ true, noreg);
 9297     __ csetw($res$$Register, Assembler::EQ);
 9298   %}
 9299   ins_pipe(pipe_slow);
 9300 %}
 9301 
 9302 // This pattern is generated automatically from cas.m4.
 9303 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9304 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9305   predicate(needs_acquiring_load_exclusive(n));
 9306   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9307   ins_cost(VOLATILE_REF_COST);
 9308   effect(KILL cr);
 9309   format %{
 9310     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9311     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9312   %}
 9313   ins_encode %{
 9314     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9315                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9316                /*weak*/ true, noreg);
 9317     __ csetw($res$$Register, Assembler::EQ);
 9318   %}
 9319   ins_pipe(pipe_slow);
 9320 %}
 9321 
 9322 // This pattern is generated automatically from cas.m4.
 9323 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9324 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9325   predicate(needs_acquiring_load_exclusive(n));
 9326   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9327   ins_cost(VOLATILE_REF_COST);
 9328   effect(KILL cr);
 9329   format %{
 9330     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9331     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9332   %}
 9333   ins_encode %{
 9334     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9335                Assembler::word, /*acquire*/ true, /*release*/ true,
 9336                /*weak*/ true, noreg);
 9337     __ csetw($res$$Register, Assembler::EQ);
 9338   %}
 9339   ins_pipe(pipe_slow);
 9340 %}
 9341 
 9342 // This pattern is generated automatically from cas.m4.
 9343 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9344 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9345   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9346   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9347   ins_cost(VOLATILE_REF_COST);
 9348   effect(KILL cr);
 9349   format %{
 9350     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9351     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9352   %}
 9353   ins_encode %{
 9354     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9355                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9356                /*weak*/ true, noreg);
 9357     __ csetw($res$$Register, Assembler::EQ);
 9358   %}
 9359   ins_pipe(pipe_slow);
 9360 %}
 9361 
 9362 // END This section of the file is automatically generated. Do not edit --------------
 9363 // ---------------------------------------------------------------------
 9364 
 9365 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9366   match(Set prev (GetAndSetI mem newv));
 9367   ins_cost(2 * VOLATILE_REF_COST);
 9368   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9369   ins_encode %{
 9370     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9371   %}
 9372   ins_pipe(pipe_serial);
 9373 %}
 9374 
 9375 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9376   match(Set prev (GetAndSetL mem newv));
 9377   ins_cost(2 * VOLATILE_REF_COST);
 9378   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9379   ins_encode %{
 9380     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9381   %}
 9382   ins_pipe(pipe_serial);
 9383 %}
 9384 
 9385 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9386   match(Set prev (GetAndSetN mem newv));
 9387   ins_cost(2 * VOLATILE_REF_COST);
 9388   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9389   ins_encode %{
 9390     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9391   %}
 9392   ins_pipe(pipe_serial);
 9393 %}
 9394 
 9395 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9396   predicate(n->as_LoadStore()->barrier_data() == 0);
 9397   match(Set prev (GetAndSetP mem newv));
 9398   ins_cost(2 * VOLATILE_REF_COST);
 9399   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9400   ins_encode %{
 9401     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9402   %}
 9403   ins_pipe(pipe_serial);
 9404 %}
 9405 
 9406 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9407   predicate(needs_acquiring_load_exclusive(n));
 9408   match(Set prev (GetAndSetI mem newv));
 9409   ins_cost(VOLATILE_REF_COST);
 9410   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9411   ins_encode %{
 9412     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9413   %}
 9414   ins_pipe(pipe_serial);
 9415 %}
 9416 
 9417 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9418   predicate(needs_acquiring_load_exclusive(n));
 9419   match(Set prev (GetAndSetL mem newv));
 9420   ins_cost(VOLATILE_REF_COST);
 9421   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9422   ins_encode %{
 9423     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9424   %}
 9425   ins_pipe(pipe_serial);
 9426 %}
 9427 
 9428 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9429   predicate(needs_acquiring_load_exclusive(n));
 9430   match(Set prev (GetAndSetN mem newv));
 9431   ins_cost(VOLATILE_REF_COST);
 9432   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9433   ins_encode %{
 9434     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9435   %}
 9436   ins_pipe(pipe_serial);
 9437 %}
 9438 
 9439 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9440   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9441   match(Set prev (GetAndSetP mem newv));
 9442   ins_cost(VOLATILE_REF_COST);
 9443   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9444   ins_encode %{
 9445     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9446   %}
 9447   ins_pipe(pipe_serial);
 9448 %}
 9449 
 9450 
 9451 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9452   match(Set newval (GetAndAddL mem incr));
 9453   ins_cost(2 * VOLATILE_REF_COST + 1);
 9454   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9455   ins_encode %{
 9456     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9457   %}
 9458   ins_pipe(pipe_serial);
 9459 %}
 9460 
 9461 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9462   predicate(n->as_LoadStore()->result_not_used());
 9463   match(Set dummy (GetAndAddL mem incr));
 9464   ins_cost(2 * VOLATILE_REF_COST);
 9465   format %{ "get_and_addL [$mem], $incr" %}
 9466   ins_encode %{
 9467     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9468   %}
 9469   ins_pipe(pipe_serial);
 9470 %}
 9471 
 9472 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9473   match(Set newval (GetAndAddL mem incr));
 9474   ins_cost(2 * VOLATILE_REF_COST + 1);
 9475   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9476   ins_encode %{
 9477     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9478   %}
 9479   ins_pipe(pipe_serial);
 9480 %}
 9481 
 9482 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9483   predicate(n->as_LoadStore()->result_not_used());
 9484   match(Set dummy (GetAndAddL mem incr));
 9485   ins_cost(2 * VOLATILE_REF_COST);
 9486   format %{ "get_and_addL [$mem], $incr" %}
 9487   ins_encode %{
 9488     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9489   %}
 9490   ins_pipe(pipe_serial);
 9491 %}
 9492 
 9493 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9494   match(Set newval (GetAndAddI mem incr));
 9495   ins_cost(2 * VOLATILE_REF_COST + 1);
 9496   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9497   ins_encode %{
 9498     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9499   %}
 9500   ins_pipe(pipe_serial);
 9501 %}
 9502 
 9503 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9504   predicate(n->as_LoadStore()->result_not_used());
 9505   match(Set dummy (GetAndAddI mem incr));
 9506   ins_cost(2 * VOLATILE_REF_COST);
 9507   format %{ "get_and_addI [$mem], $incr" %}
 9508   ins_encode %{
 9509     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9510   %}
 9511   ins_pipe(pipe_serial);
 9512 %}
 9513 
 9514 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9515   match(Set newval (GetAndAddI mem incr));
 9516   ins_cost(2 * VOLATILE_REF_COST + 1);
 9517   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9518   ins_encode %{
 9519     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9520   %}
 9521   ins_pipe(pipe_serial);
 9522 %}
 9523 
 9524 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9525   predicate(n->as_LoadStore()->result_not_used());
 9526   match(Set dummy (GetAndAddI mem incr));
 9527   ins_cost(2 * VOLATILE_REF_COST);
 9528   format %{ "get_and_addI [$mem], $incr" %}
 9529   ins_encode %{
 9530     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9531   %}
 9532   ins_pipe(pipe_serial);
 9533 %}
 9534 
 9535 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9536   predicate(needs_acquiring_load_exclusive(n));
 9537   match(Set newval (GetAndAddL mem incr));
 9538   ins_cost(VOLATILE_REF_COST + 1);
 9539   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9540   ins_encode %{
 9541     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9542   %}
 9543   ins_pipe(pipe_serial);
 9544 %}
 9545 
 9546 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9547   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9548   match(Set dummy (GetAndAddL mem incr));
 9549   ins_cost(VOLATILE_REF_COST);
 9550   format %{ "get_and_addL_acq [$mem], $incr" %}
 9551   ins_encode %{
 9552     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9553   %}
 9554   ins_pipe(pipe_serial);
 9555 %}
 9556 
 9557 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9558   predicate(needs_acquiring_load_exclusive(n));
 9559   match(Set newval (GetAndAddL mem incr));
 9560   ins_cost(VOLATILE_REF_COST + 1);
 9561   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9562   ins_encode %{
 9563     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9564   %}
 9565   ins_pipe(pipe_serial);
 9566 %}
 9567 
 9568 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9569   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9570   match(Set dummy (GetAndAddL mem incr));
 9571   ins_cost(VOLATILE_REF_COST);
 9572   format %{ "get_and_addL_acq [$mem], $incr" %}
 9573   ins_encode %{
 9574     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9575   %}
 9576   ins_pipe(pipe_serial);
 9577 %}
 9578 
 9579 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9580   predicate(needs_acquiring_load_exclusive(n));
 9581   match(Set newval (GetAndAddI mem incr));
 9582   ins_cost(VOLATILE_REF_COST + 1);
 9583   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9584   ins_encode %{
 9585     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9586   %}
 9587   ins_pipe(pipe_serial);
 9588 %}
 9589 
 9590 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9591   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9592   match(Set dummy (GetAndAddI mem incr));
 9593   ins_cost(VOLATILE_REF_COST);
 9594   format %{ "get_and_addI_acq [$mem], $incr" %}
 9595   ins_encode %{
 9596     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9597   %}
 9598   ins_pipe(pipe_serial);
 9599 %}
 9600 
 9601 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9602   predicate(needs_acquiring_load_exclusive(n));
 9603   match(Set newval (GetAndAddI mem incr));
 9604   ins_cost(VOLATILE_REF_COST + 1);
 9605   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9606   ins_encode %{
 9607     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9608   %}
 9609   ins_pipe(pipe_serial);
 9610 %}
 9611 
 9612 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9613   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9614   match(Set dummy (GetAndAddI mem incr));
 9615   ins_cost(VOLATILE_REF_COST);
 9616   format %{ "get_and_addI_acq [$mem], $incr" %}
 9617   ins_encode %{
 9618     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9619   %}
 9620   ins_pipe(pipe_serial);
 9621 %}
 9622 
 9623 // Manifest a CmpU result in an integer register.
 9624 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9625 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 9626 %{
 9627   match(Set dst (CmpU3 src1 src2));
 9628   effect(KILL flags);
 9629 
 9630   ins_cost(INSN_COST * 3);
 9631   format %{
 9632       "cmpw $src1, $src2\n\t"
 9633       "csetw $dst, ne\n\t"
 9634       "cnegw $dst, lo\t# CmpU3(reg)"
 9635   %}
 9636   ins_encode %{
 9637     __ cmpw($src1$$Register, $src2$$Register);
 9638     __ csetw($dst$$Register, Assembler::NE);
 9639     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9640   %}
 9641 
 9642   ins_pipe(pipe_class_default);
 9643 %}
 9644 
 9645 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 9646 %{
 9647   match(Set dst (CmpU3 src1 src2));
 9648   effect(KILL flags);
 9649 
 9650   ins_cost(INSN_COST * 3);
 9651   format %{
 9652       "subsw zr, $src1, $src2\n\t"
 9653       "csetw $dst, ne\n\t"
 9654       "cnegw $dst, lo\t# CmpU3(imm)"
 9655   %}
 9656   ins_encode %{
 9657     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 9658     __ csetw($dst$$Register, Assembler::NE);
 9659     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9660   %}
 9661 
 9662   ins_pipe(pipe_class_default);
 9663 %}
 9664 
 9665 // Manifest a CmpUL result in an integer register.
 9666 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9667 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9668 %{
 9669   match(Set dst (CmpUL3 src1 src2));
 9670   effect(KILL flags);
 9671 
 9672   ins_cost(INSN_COST * 3);
 9673   format %{
 9674       "cmp $src1, $src2\n\t"
 9675       "csetw $dst, ne\n\t"
 9676       "cnegw $dst, lo\t# CmpUL3(reg)"
 9677   %}
 9678   ins_encode %{
 9679     __ cmp($src1$$Register, $src2$$Register);
 9680     __ csetw($dst$$Register, Assembler::NE);
 9681     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9682   %}
 9683 
 9684   ins_pipe(pipe_class_default);
 9685 %}
 9686 
 9687 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9688 %{
 9689   match(Set dst (CmpUL3 src1 src2));
 9690   effect(KILL flags);
 9691 
 9692   ins_cost(INSN_COST * 3);
 9693   format %{
 9694       "subs zr, $src1, $src2\n\t"
 9695       "csetw $dst, ne\n\t"
 9696       "cnegw $dst, lo\t# CmpUL3(imm)"
 9697   %}
 9698   ins_encode %{
 9699     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9700     __ csetw($dst$$Register, Assembler::NE);
 9701     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9702   %}
 9703 
 9704   ins_pipe(pipe_class_default);
 9705 %}
 9706 
 9707 // Manifest a CmpL result in an integer register.
 9708 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9709 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9710 %{
 9711   match(Set dst (CmpL3 src1 src2));
 9712   effect(KILL flags);
 9713 
 9714   ins_cost(INSN_COST * 3);
 9715   format %{
 9716       "cmp $src1, $src2\n\t"
 9717       "csetw $dst, ne\n\t"
 9718       "cnegw $dst, lt\t# CmpL3(reg)"
 9719   %}
 9720   ins_encode %{
 9721     __ cmp($src1$$Register, $src2$$Register);
 9722     __ csetw($dst$$Register, Assembler::NE);
 9723     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9724   %}
 9725 
 9726   ins_pipe(pipe_class_default);
 9727 %}
 9728 
 9729 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9730 %{
 9731   match(Set dst (CmpL3 src1 src2));
 9732   effect(KILL flags);
 9733 
 9734   ins_cost(INSN_COST * 3);
 9735   format %{
 9736       "subs zr, $src1, $src2\n\t"
 9737       "csetw $dst, ne\n\t"
 9738       "cnegw $dst, lt\t# CmpL3(imm)"
 9739   %}
 9740   ins_encode %{
 9741     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9742     __ csetw($dst$$Register, Assembler::NE);
 9743     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9744   %}
 9745 
 9746   ins_pipe(pipe_class_default);
 9747 %}
 9748 
 9749 // ============================================================================
 9750 // Conditional Move Instructions
 9751 
 9752 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9753 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9754 // define an op class which merged both inputs and use it to type the
 9755 // argument to a single rule. unfortunatelyt his fails because the
 9756 // opclass does not live up to the COND_INTER interface of its
 9757 // component operands. When the generic code tries to negate the
 9758 // operand it ends up running the generci Machoper::negate method
 9759 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9760 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9761 
 9762 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9763   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9764 
 9765   ins_cost(INSN_COST * 2);
 9766   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9767 
 9768   ins_encode %{
 9769     __ cselw(as_Register($dst$$reg),
 9770              as_Register($src2$$reg),
 9771              as_Register($src1$$reg),
 9772              (Assembler::Condition)$cmp$$cmpcode);
 9773   %}
 9774 
 9775   ins_pipe(icond_reg_reg);
 9776 %}
 9777 
 9778 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9779   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9780 
 9781   ins_cost(INSN_COST * 2);
 9782   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9783 
 9784   ins_encode %{
 9785     __ cselw(as_Register($dst$$reg),
 9786              as_Register($src2$$reg),
 9787              as_Register($src1$$reg),
 9788              (Assembler::Condition)$cmp$$cmpcode);
 9789   %}
 9790 
 9791   ins_pipe(icond_reg_reg);
 9792 %}
 9793 
 9794 // special cases where one arg is zero
 9795 
 9796 // n.b. this is selected in preference to the rule above because it
 9797 // avoids loading constant 0 into a source register
 9798 
 9799 // TODO
 9800 // we ought only to be able to cull one of these variants as the ideal
 9801 // transforms ought always to order the zero consistently (to left/right?)
 9802 
 9803 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9804   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9805 
 9806   ins_cost(INSN_COST * 2);
 9807   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9808 
 9809   ins_encode %{
 9810     __ cselw(as_Register($dst$$reg),
 9811              as_Register($src$$reg),
 9812              zr,
 9813              (Assembler::Condition)$cmp$$cmpcode);
 9814   %}
 9815 
 9816   ins_pipe(icond_reg);
 9817 %}
 9818 
 9819 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9820   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9821 
 9822   ins_cost(INSN_COST * 2);
 9823   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9824 
 9825   ins_encode %{
 9826     __ cselw(as_Register($dst$$reg),
 9827              as_Register($src$$reg),
 9828              zr,
 9829              (Assembler::Condition)$cmp$$cmpcode);
 9830   %}
 9831 
 9832   ins_pipe(icond_reg);
 9833 %}
 9834 
 9835 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9836   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9837 
 9838   ins_cost(INSN_COST * 2);
 9839   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9840 
 9841   ins_encode %{
 9842     __ cselw(as_Register($dst$$reg),
 9843              zr,
 9844              as_Register($src$$reg),
 9845              (Assembler::Condition)$cmp$$cmpcode);
 9846   %}
 9847 
 9848   ins_pipe(icond_reg);
 9849 %}
 9850 
 9851 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9852   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9853 
 9854   ins_cost(INSN_COST * 2);
 9855   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9856 
 9857   ins_encode %{
 9858     __ cselw(as_Register($dst$$reg),
 9859              zr,
 9860              as_Register($src$$reg),
 9861              (Assembler::Condition)$cmp$$cmpcode);
 9862   %}
 9863 
 9864   ins_pipe(icond_reg);
 9865 %}
 9866 
 9867 // special case for creating a boolean 0 or 1
 9868 
 9869 // n.b. this is selected in preference to the rule above because it
 9870 // avoids loading constants 0 and 1 into a source register
 9871 
 9872 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9873   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9874 
 9875   ins_cost(INSN_COST * 2);
 9876   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9877 
 9878   ins_encode %{
 9879     // equivalently
 9880     // cset(as_Register($dst$$reg),
 9881     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9882     __ csincw(as_Register($dst$$reg),
 9883              zr,
 9884              zr,
 9885              (Assembler::Condition)$cmp$$cmpcode);
 9886   %}
 9887 
 9888   ins_pipe(icond_none);
 9889 %}
 9890 
 9891 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9892   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9893 
 9894   ins_cost(INSN_COST * 2);
 9895   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9896 
 9897   ins_encode %{
 9898     // equivalently
 9899     // cset(as_Register($dst$$reg),
 9900     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9901     __ csincw(as_Register($dst$$reg),
 9902              zr,
 9903              zr,
 9904              (Assembler::Condition)$cmp$$cmpcode);
 9905   %}
 9906 
 9907   ins_pipe(icond_none);
 9908 %}
 9909 
 9910 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9911   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9912 
 9913   ins_cost(INSN_COST * 2);
 9914   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9915 
 9916   ins_encode %{
 9917     __ csel(as_Register($dst$$reg),
 9918             as_Register($src2$$reg),
 9919             as_Register($src1$$reg),
 9920             (Assembler::Condition)$cmp$$cmpcode);
 9921   %}
 9922 
 9923   ins_pipe(icond_reg_reg);
 9924 %}
 9925 
 9926 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9927   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9928 
 9929   ins_cost(INSN_COST * 2);
 9930   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9931 
 9932   ins_encode %{
 9933     __ csel(as_Register($dst$$reg),
 9934             as_Register($src2$$reg),
 9935             as_Register($src1$$reg),
 9936             (Assembler::Condition)$cmp$$cmpcode);
 9937   %}
 9938 
 9939   ins_pipe(icond_reg_reg);
 9940 %}
 9941 
 9942 // special cases where one arg is zero
 9943 
 9944 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9945   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9946 
 9947   ins_cost(INSN_COST * 2);
 9948   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 9949 
 9950   ins_encode %{
 9951     __ csel(as_Register($dst$$reg),
 9952             zr,
 9953             as_Register($src$$reg),
 9954             (Assembler::Condition)$cmp$$cmpcode);
 9955   %}
 9956 
 9957   ins_pipe(icond_reg);
 9958 %}
 9959 
 9960 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9961   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9962 
 9963   ins_cost(INSN_COST * 2);
 9964   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 9965 
 9966   ins_encode %{
 9967     __ csel(as_Register($dst$$reg),
 9968             zr,
 9969             as_Register($src$$reg),
 9970             (Assembler::Condition)$cmp$$cmpcode);
 9971   %}
 9972 
 9973   ins_pipe(icond_reg);
 9974 %}
 9975 
 9976 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9977   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9978 
 9979   ins_cost(INSN_COST * 2);
 9980   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 9981 
 9982   ins_encode %{
 9983     __ csel(as_Register($dst$$reg),
 9984             as_Register($src$$reg),
 9985             zr,
 9986             (Assembler::Condition)$cmp$$cmpcode);
 9987   %}
 9988 
 9989   ins_pipe(icond_reg);
 9990 %}
 9991 
 9992 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9993   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9994 
 9995   ins_cost(INSN_COST * 2);
 9996   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 9997 
 9998   ins_encode %{
 9999     __ csel(as_Register($dst$$reg),
10000             as_Register($src$$reg),
10001             zr,
10002             (Assembler::Condition)$cmp$$cmpcode);
10003   %}
10004 
10005   ins_pipe(icond_reg);
10006 %}
10007 
10008 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10009   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10010 
10011   ins_cost(INSN_COST * 2);
10012   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10013 
10014   ins_encode %{
10015     __ csel(as_Register($dst$$reg),
10016             as_Register($src2$$reg),
10017             as_Register($src1$$reg),
10018             (Assembler::Condition)$cmp$$cmpcode);
10019   %}
10020 
10021   ins_pipe(icond_reg_reg);
10022 %}
10023 
10024 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10025   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10026 
10027   ins_cost(INSN_COST * 2);
10028   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10029 
10030   ins_encode %{
10031     __ csel(as_Register($dst$$reg),
10032             as_Register($src2$$reg),
10033             as_Register($src1$$reg),
10034             (Assembler::Condition)$cmp$$cmpcode);
10035   %}
10036 
10037   ins_pipe(icond_reg_reg);
10038 %}
10039 
10040 // special cases where one arg is zero
10041 
10042 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10043   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10044 
10045   ins_cost(INSN_COST * 2);
10046   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10047 
10048   ins_encode %{
10049     __ csel(as_Register($dst$$reg),
10050             zr,
10051             as_Register($src$$reg),
10052             (Assembler::Condition)$cmp$$cmpcode);
10053   %}
10054 
10055   ins_pipe(icond_reg);
10056 %}
10057 
10058 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10059   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10060 
10061   ins_cost(INSN_COST * 2);
10062   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10063 
10064   ins_encode %{
10065     __ csel(as_Register($dst$$reg),
10066             zr,
10067             as_Register($src$$reg),
10068             (Assembler::Condition)$cmp$$cmpcode);
10069   %}
10070 
10071   ins_pipe(icond_reg);
10072 %}
10073 
10074 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10075   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10076 
10077   ins_cost(INSN_COST * 2);
10078   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10079 
10080   ins_encode %{
10081     __ csel(as_Register($dst$$reg),
10082             as_Register($src$$reg),
10083             zr,
10084             (Assembler::Condition)$cmp$$cmpcode);
10085   %}
10086 
10087   ins_pipe(icond_reg);
10088 %}
10089 
10090 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10091   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10092 
10093   ins_cost(INSN_COST * 2);
10094   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10095 
10096   ins_encode %{
10097     __ csel(as_Register($dst$$reg),
10098             as_Register($src$$reg),
10099             zr,
10100             (Assembler::Condition)$cmp$$cmpcode);
10101   %}
10102 
10103   ins_pipe(icond_reg);
10104 %}
10105 
10106 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10107   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10108 
10109   ins_cost(INSN_COST * 2);
10110   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10111 
10112   ins_encode %{
10113     __ cselw(as_Register($dst$$reg),
10114              as_Register($src2$$reg),
10115              as_Register($src1$$reg),
10116              (Assembler::Condition)$cmp$$cmpcode);
10117   %}
10118 
10119   ins_pipe(icond_reg_reg);
10120 %}
10121 
10122 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10123   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10124 
10125   ins_cost(INSN_COST * 2);
10126   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10127 
10128   ins_encode %{
10129     __ cselw(as_Register($dst$$reg),
10130              as_Register($src2$$reg),
10131              as_Register($src1$$reg),
10132              (Assembler::Condition)$cmp$$cmpcode);
10133   %}
10134 
10135   ins_pipe(icond_reg_reg);
10136 %}
10137 
10138 // special cases where one arg is zero
10139 
10140 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10141   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10142 
10143   ins_cost(INSN_COST * 2);
10144   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10145 
10146   ins_encode %{
10147     __ cselw(as_Register($dst$$reg),
10148              zr,
10149              as_Register($src$$reg),
10150              (Assembler::Condition)$cmp$$cmpcode);
10151   %}
10152 
10153   ins_pipe(icond_reg);
10154 %}
10155 
10156 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10157   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10158 
10159   ins_cost(INSN_COST * 2);
10160   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10161 
10162   ins_encode %{
10163     __ cselw(as_Register($dst$$reg),
10164              zr,
10165              as_Register($src$$reg),
10166              (Assembler::Condition)$cmp$$cmpcode);
10167   %}
10168 
10169   ins_pipe(icond_reg);
10170 %}
10171 
10172 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10173   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10174 
10175   ins_cost(INSN_COST * 2);
10176   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10177 
10178   ins_encode %{
10179     __ cselw(as_Register($dst$$reg),
10180              as_Register($src$$reg),
10181              zr,
10182              (Assembler::Condition)$cmp$$cmpcode);
10183   %}
10184 
10185   ins_pipe(icond_reg);
10186 %}
10187 
10188 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10189   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10190 
10191   ins_cost(INSN_COST * 2);
10192   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10193 
10194   ins_encode %{
10195     __ cselw(as_Register($dst$$reg),
10196              as_Register($src$$reg),
10197              zr,
10198              (Assembler::Condition)$cmp$$cmpcode);
10199   %}
10200 
10201   ins_pipe(icond_reg);
10202 %}
10203 
10204 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10205 %{
10206   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10207 
10208   ins_cost(INSN_COST * 3);
10209 
10210   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10211   ins_encode %{
10212     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10213     __ fcsels(as_FloatRegister($dst$$reg),
10214               as_FloatRegister($src2$$reg),
10215               as_FloatRegister($src1$$reg),
10216               cond);
10217   %}
10218 
10219   ins_pipe(fp_cond_reg_reg_s);
10220 %}
10221 
10222 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10223 %{
10224   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10225 
10226   ins_cost(INSN_COST * 3);
10227 
10228   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10229   ins_encode %{
10230     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10231     __ fcsels(as_FloatRegister($dst$$reg),
10232               as_FloatRegister($src2$$reg),
10233               as_FloatRegister($src1$$reg),
10234               cond);
10235   %}
10236 
10237   ins_pipe(fp_cond_reg_reg_s);
10238 %}
10239 
10240 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10241 %{
10242   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10243 
10244   ins_cost(INSN_COST * 3);
10245 
10246   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10247   ins_encode %{
10248     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10249     __ fcseld(as_FloatRegister($dst$$reg),
10250               as_FloatRegister($src2$$reg),
10251               as_FloatRegister($src1$$reg),
10252               cond);
10253   %}
10254 
10255   ins_pipe(fp_cond_reg_reg_d);
10256 %}
10257 
10258 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10259 %{
10260   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10261 
10262   ins_cost(INSN_COST * 3);
10263 
10264   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10265   ins_encode %{
10266     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10267     __ fcseld(as_FloatRegister($dst$$reg),
10268               as_FloatRegister($src2$$reg),
10269               as_FloatRegister($src1$$reg),
10270               cond);
10271   %}
10272 
10273   ins_pipe(fp_cond_reg_reg_d);
10274 %}
10275 
10276 // ============================================================================
10277 // Arithmetic Instructions
10278 //
10279 
10280 // Integer Addition
10281 
10282 // TODO
10283 // these currently employ operations which do not set CR and hence are
10284 // not flagged as killing CR but we would like to isolate the cases
10285 // where we want to set flags from those where we don't. need to work
10286 // out how to do that.
10287 
10288 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10289   match(Set dst (AddI src1 src2));
10290 
10291   ins_cost(INSN_COST);
10292   format %{ "addw  $dst, $src1, $src2" %}
10293 
10294   ins_encode %{
10295     __ addw(as_Register($dst$$reg),
10296             as_Register($src1$$reg),
10297             as_Register($src2$$reg));
10298   %}
10299 
10300   ins_pipe(ialu_reg_reg);
10301 %}
10302 
10303 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10304   match(Set dst (AddI src1 src2));
10305 
10306   ins_cost(INSN_COST);
10307   format %{ "addw $dst, $src1, $src2" %}
10308 
10309   // use opcode to indicate that this is an add not a sub
10310   opcode(0x0);
10311 
10312   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10313 
10314   ins_pipe(ialu_reg_imm);
10315 %}
10316 
10317 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10318   match(Set dst (AddI (ConvL2I src1) src2));
10319 
10320   ins_cost(INSN_COST);
10321   format %{ "addw $dst, $src1, $src2" %}
10322 
10323   // use opcode to indicate that this is an add not a sub
10324   opcode(0x0);
10325 
10326   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10327 
10328   ins_pipe(ialu_reg_imm);
10329 %}
10330 
10331 // Pointer Addition
10332 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10333   match(Set dst (AddP src1 src2));
10334 
10335   ins_cost(INSN_COST);
10336   format %{ "add $dst, $src1, $src2\t# ptr" %}
10337 
10338   ins_encode %{
10339     __ add(as_Register($dst$$reg),
10340            as_Register($src1$$reg),
10341            as_Register($src2$$reg));
10342   %}
10343 
10344   ins_pipe(ialu_reg_reg);
10345 %}
10346 
10347 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10348   match(Set dst (AddP src1 (ConvI2L src2)));
10349 
10350   ins_cost(1.9 * INSN_COST);
10351   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10352 
10353   ins_encode %{
10354     __ add(as_Register($dst$$reg),
10355            as_Register($src1$$reg),
10356            as_Register($src2$$reg), ext::sxtw);
10357   %}
10358 
10359   ins_pipe(ialu_reg_reg);
10360 %}
10361 
10362 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10363   match(Set dst (AddP src1 (LShiftL src2 scale)));
10364 
10365   ins_cost(1.9 * INSN_COST);
10366   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10367 
10368   ins_encode %{
10369     __ lea(as_Register($dst$$reg),
10370            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10371                    Address::lsl($scale$$constant)));
10372   %}
10373 
10374   ins_pipe(ialu_reg_reg_shift);
10375 %}
10376 
10377 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10378   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10379 
10380   ins_cost(1.9 * INSN_COST);
10381   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10382 
10383   ins_encode %{
10384     __ lea(as_Register($dst$$reg),
10385            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10386                    Address::sxtw($scale$$constant)));
10387   %}
10388 
10389   ins_pipe(ialu_reg_reg_shift);
10390 %}
10391 
10392 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10393   match(Set dst (LShiftL (ConvI2L src) scale));
10394 
10395   ins_cost(INSN_COST);
10396   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10397 
10398   ins_encode %{
10399     __ sbfiz(as_Register($dst$$reg),
10400           as_Register($src$$reg),
10401           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10402   %}
10403 
10404   ins_pipe(ialu_reg_shift);
10405 %}
10406 
10407 // Pointer Immediate Addition
10408 // n.b. this needs to be more expensive than using an indirect memory
10409 // operand
10410 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10411   match(Set dst (AddP src1 src2));
10412 
10413   ins_cost(INSN_COST);
10414   format %{ "add $dst, $src1, $src2\t# ptr" %}
10415 
10416   // use opcode to indicate that this is an add not a sub
10417   opcode(0x0);
10418 
10419   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10420 
10421   ins_pipe(ialu_reg_imm);
10422 %}
10423 
10424 // Long Addition
10425 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10426 
10427   match(Set dst (AddL src1 src2));
10428 
10429   ins_cost(INSN_COST);
10430   format %{ "add  $dst, $src1, $src2" %}
10431 
10432   ins_encode %{
10433     __ add(as_Register($dst$$reg),
10434            as_Register($src1$$reg),
10435            as_Register($src2$$reg));
10436   %}
10437 
10438   ins_pipe(ialu_reg_reg);
10439 %}
10440 
10441 // No constant pool entries requiredLong Immediate Addition.
10442 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10443   match(Set dst (AddL src1 src2));
10444 
10445   ins_cost(INSN_COST);
10446   format %{ "add $dst, $src1, $src2" %}
10447 
10448   // use opcode to indicate that this is an add not a sub
10449   opcode(0x0);
10450 
10451   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10452 
10453   ins_pipe(ialu_reg_imm);
10454 %}
10455 
10456 // Integer Subtraction
10457 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10458   match(Set dst (SubI src1 src2));
10459 
10460   ins_cost(INSN_COST);
10461   format %{ "subw  $dst, $src1, $src2" %}
10462 
10463   ins_encode %{
10464     __ subw(as_Register($dst$$reg),
10465             as_Register($src1$$reg),
10466             as_Register($src2$$reg));
10467   %}
10468 
10469   ins_pipe(ialu_reg_reg);
10470 %}
10471 
10472 // Immediate Subtraction
10473 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10474   match(Set dst (SubI src1 src2));
10475 
10476   ins_cost(INSN_COST);
10477   format %{ "subw $dst, $src1, $src2" %}
10478 
10479   // use opcode to indicate that this is a sub not an add
10480   opcode(0x1);
10481 
10482   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10483 
10484   ins_pipe(ialu_reg_imm);
10485 %}
10486 
10487 // Long Subtraction
10488 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10489 
10490   match(Set dst (SubL src1 src2));
10491 
10492   ins_cost(INSN_COST);
10493   format %{ "sub  $dst, $src1, $src2" %}
10494 
10495   ins_encode %{
10496     __ sub(as_Register($dst$$reg),
10497            as_Register($src1$$reg),
10498            as_Register($src2$$reg));
10499   %}
10500 
10501   ins_pipe(ialu_reg_reg);
10502 %}
10503 
10504 // No constant pool entries requiredLong Immediate Subtraction.
10505 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10506   match(Set dst (SubL src1 src2));
10507 
10508   ins_cost(INSN_COST);
10509   format %{ "sub$dst, $src1, $src2" %}
10510 
10511   // use opcode to indicate that this is a sub not an add
10512   opcode(0x1);
10513 
10514   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10515 
10516   ins_pipe(ialu_reg_imm);
10517 %}
10518 
10519 // Integer Negation (special case for sub)
10520 
10521 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10522   match(Set dst (SubI zero src));
10523 
10524   ins_cost(INSN_COST);
10525   format %{ "negw $dst, $src\t# int" %}
10526 
10527   ins_encode %{
10528     __ negw(as_Register($dst$$reg),
10529             as_Register($src$$reg));
10530   %}
10531 
10532   ins_pipe(ialu_reg);
10533 %}
10534 
10535 // Long Negation
10536 
10537 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10538   match(Set dst (SubL zero src));
10539 
10540   ins_cost(INSN_COST);
10541   format %{ "neg $dst, $src\t# long" %}
10542 
10543   ins_encode %{
10544     __ neg(as_Register($dst$$reg),
10545            as_Register($src$$reg));
10546   %}
10547 
10548   ins_pipe(ialu_reg);
10549 %}
10550 
10551 // Integer Multiply
10552 
10553 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10554   match(Set dst (MulI src1 src2));
10555 
10556   ins_cost(INSN_COST * 3);
10557   format %{ "mulw  $dst, $src1, $src2" %}
10558 
10559   ins_encode %{
10560     __ mulw(as_Register($dst$$reg),
10561             as_Register($src1$$reg),
10562             as_Register($src2$$reg));
10563   %}
10564 
10565   ins_pipe(imul_reg_reg);
10566 %}
10567 
10568 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10569   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10570 
10571   ins_cost(INSN_COST * 3);
10572   format %{ "smull  $dst, $src1, $src2" %}
10573 
10574   ins_encode %{
10575     __ smull(as_Register($dst$$reg),
10576              as_Register($src1$$reg),
10577              as_Register($src2$$reg));
10578   %}
10579 
10580   ins_pipe(imul_reg_reg);
10581 %}
10582 
10583 // Long Multiply
10584 
10585 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10586   match(Set dst (MulL src1 src2));
10587 
10588   ins_cost(INSN_COST * 5);
10589   format %{ "mul  $dst, $src1, $src2" %}
10590 
10591   ins_encode %{
10592     __ mul(as_Register($dst$$reg),
10593            as_Register($src1$$reg),
10594            as_Register($src2$$reg));
10595   %}
10596 
10597   ins_pipe(lmul_reg_reg);
10598 %}
10599 
10600 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10601 %{
10602   match(Set dst (MulHiL src1 src2));
10603 
10604   ins_cost(INSN_COST * 7);
10605   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
10606 
10607   ins_encode %{
10608     __ smulh(as_Register($dst$$reg),
10609              as_Register($src1$$reg),
10610              as_Register($src2$$reg));
10611   %}
10612 
10613   ins_pipe(lmul_reg_reg);
10614 %}
10615 
10616 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10617 %{
10618   match(Set dst (UMulHiL src1 src2));
10619 
10620   ins_cost(INSN_COST * 7);
10621   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
10622 
10623   ins_encode %{
10624     __ umulh(as_Register($dst$$reg),
10625              as_Register($src1$$reg),
10626              as_Register($src2$$reg));
10627   %}
10628 
10629   ins_pipe(lmul_reg_reg);
10630 %}
10631 
10632 // Combined Integer Multiply & Add/Sub
10633 
10634 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10635   match(Set dst (AddI src3 (MulI src1 src2)));
10636 
10637   ins_cost(INSN_COST * 3);
10638   format %{ "madd  $dst, $src1, $src2, $src3" %}
10639 
10640   ins_encode %{
10641     __ maddw(as_Register($dst$$reg),
10642              as_Register($src1$$reg),
10643              as_Register($src2$$reg),
10644              as_Register($src3$$reg));
10645   %}
10646 
10647   ins_pipe(imac_reg_reg);
10648 %}
10649 
10650 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10651   match(Set dst (SubI src3 (MulI src1 src2)));
10652 
10653   ins_cost(INSN_COST * 3);
10654   format %{ "msub  $dst, $src1, $src2, $src3" %}
10655 
10656   ins_encode %{
10657     __ msubw(as_Register($dst$$reg),
10658              as_Register($src1$$reg),
10659              as_Register($src2$$reg),
10660              as_Register($src3$$reg));
10661   %}
10662 
10663   ins_pipe(imac_reg_reg);
10664 %}
10665 
10666 // Combined Integer Multiply & Neg
10667 
10668 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10669   match(Set dst (MulI (SubI zero src1) src2));
10670 
10671   ins_cost(INSN_COST * 3);
10672   format %{ "mneg  $dst, $src1, $src2" %}
10673 
10674   ins_encode %{
10675     __ mnegw(as_Register($dst$$reg),
10676              as_Register($src1$$reg),
10677              as_Register($src2$$reg));
10678   %}
10679 
10680   ins_pipe(imac_reg_reg);
10681 %}
10682 
10683 // Combined Long Multiply & Add/Sub
10684 
10685 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10686   match(Set dst (AddL src3 (MulL src1 src2)));
10687 
10688   ins_cost(INSN_COST * 5);
10689   format %{ "madd  $dst, $src1, $src2, $src3" %}
10690 
10691   ins_encode %{
10692     __ madd(as_Register($dst$$reg),
10693             as_Register($src1$$reg),
10694             as_Register($src2$$reg),
10695             as_Register($src3$$reg));
10696   %}
10697 
10698   ins_pipe(lmac_reg_reg);
10699 %}
10700 
10701 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10702   match(Set dst (SubL src3 (MulL src1 src2)));
10703 
10704   ins_cost(INSN_COST * 5);
10705   format %{ "msub  $dst, $src1, $src2, $src3" %}
10706 
10707   ins_encode %{
10708     __ msub(as_Register($dst$$reg),
10709             as_Register($src1$$reg),
10710             as_Register($src2$$reg),
10711             as_Register($src3$$reg));
10712   %}
10713 
10714   ins_pipe(lmac_reg_reg);
10715 %}
10716 
10717 // Combined Long Multiply & Neg
10718 
10719 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10720   match(Set dst (MulL (SubL zero src1) src2));
10721 
10722   ins_cost(INSN_COST * 5);
10723   format %{ "mneg  $dst, $src1, $src2" %}
10724 
10725   ins_encode %{
10726     __ mneg(as_Register($dst$$reg),
10727             as_Register($src1$$reg),
10728             as_Register($src2$$reg));
10729   %}
10730 
10731   ins_pipe(lmac_reg_reg);
10732 %}
10733 
10734 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10735 
10736 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10737   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10738 
10739   ins_cost(INSN_COST * 3);
10740   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10741 
10742   ins_encode %{
10743     __ smaddl(as_Register($dst$$reg),
10744               as_Register($src1$$reg),
10745               as_Register($src2$$reg),
10746               as_Register($src3$$reg));
10747   %}
10748 
10749   ins_pipe(imac_reg_reg);
10750 %}
10751 
10752 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10753   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10754 
10755   ins_cost(INSN_COST * 3);
10756   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10757 
10758   ins_encode %{
10759     __ smsubl(as_Register($dst$$reg),
10760               as_Register($src1$$reg),
10761               as_Register($src2$$reg),
10762               as_Register($src3$$reg));
10763   %}
10764 
10765   ins_pipe(imac_reg_reg);
10766 %}
10767 
10768 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10769   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10770 
10771   ins_cost(INSN_COST * 3);
10772   format %{ "smnegl  $dst, $src1, $src2" %}
10773 
10774   ins_encode %{
10775     __ smnegl(as_Register($dst$$reg),
10776               as_Register($src1$$reg),
10777               as_Register($src2$$reg));
10778   %}
10779 
10780   ins_pipe(imac_reg_reg);
10781 %}
10782 
10783 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10784 
10785 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10786   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10787 
10788   ins_cost(INSN_COST * 5);
10789   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10790             "maddw $dst, $src3, $src4, rscratch1" %}
10791 
10792   ins_encode %{
10793     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10794     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10795 
10796   ins_pipe(imac_reg_reg);
10797 %}
10798 
10799 // Integer Divide
10800 
10801 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10802   match(Set dst (DivI src1 src2));
10803 
10804   ins_cost(INSN_COST * 19);
10805   format %{ "sdivw  $dst, $src1, $src2" %}
10806 
10807   ins_encode(aarch64_enc_divw(dst, src1, src2));
10808   ins_pipe(idiv_reg_reg);
10809 %}
10810 
10811 // Long Divide
10812 
10813 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10814   match(Set dst (DivL src1 src2));
10815 
10816   ins_cost(INSN_COST * 35);
10817   format %{ "sdiv   $dst, $src1, $src2" %}
10818 
10819   ins_encode(aarch64_enc_div(dst, src1, src2));
10820   ins_pipe(ldiv_reg_reg);
10821 %}
10822 
10823 // Integer Remainder
10824 
10825 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10826   match(Set dst (ModI src1 src2));
10827 
10828   ins_cost(INSN_COST * 22);
10829   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10830             "msubw  $dst, rscratch1, $src2, $src1" %}
10831 
10832   ins_encode(aarch64_enc_modw(dst, src1, src2));
10833   ins_pipe(idiv_reg_reg);
10834 %}
10835 
10836 // Long Remainder
10837 
10838 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10839   match(Set dst (ModL src1 src2));
10840 
10841   ins_cost(INSN_COST * 38);
10842   format %{ "sdiv   rscratch1, $src1, $src2\n"
10843             "msub   $dst, rscratch1, $src2, $src1" %}
10844 
10845   ins_encode(aarch64_enc_mod(dst, src1, src2));
10846   ins_pipe(ldiv_reg_reg);
10847 %}
10848 
10849 // Unsigned Integer Divide
10850 
10851 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10852   match(Set dst (UDivI src1 src2));
10853 
10854   ins_cost(INSN_COST * 19);
10855   format %{ "udivw  $dst, $src1, $src2" %}
10856 
10857   ins_encode %{
10858     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
10859   %}
10860 
10861   ins_pipe(idiv_reg_reg);
10862 %}
10863 
10864 //  Unsigned Long Divide
10865 
10866 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10867   match(Set dst (UDivL src1 src2));
10868 
10869   ins_cost(INSN_COST * 35);
10870   format %{ "udiv   $dst, $src1, $src2" %}
10871 
10872   ins_encode %{
10873     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
10874   %}
10875 
10876   ins_pipe(ldiv_reg_reg);
10877 %}
10878 
10879 // Unsigned Integer Remainder
10880 
10881 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10882   match(Set dst (UModI src1 src2));
10883 
10884   ins_cost(INSN_COST * 22);
10885   format %{ "udivw  rscratch1, $src1, $src2\n\t"
10886             "msubw  $dst, rscratch1, $src2, $src1" %}
10887 
10888   ins_encode %{
10889     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
10890     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10891   %}
10892 
10893   ins_pipe(idiv_reg_reg);
10894 %}
10895 
10896 // Unsigned Long Remainder
10897 
10898 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10899   match(Set dst (UModL src1 src2));
10900 
10901   ins_cost(INSN_COST * 38);
10902   format %{ "udiv   rscratch1, $src1, $src2\n"
10903             "msub   $dst, rscratch1, $src2, $src1" %}
10904 
10905   ins_encode %{
10906     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
10907     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10908   %}
10909 
10910   ins_pipe(ldiv_reg_reg);
10911 %}
10912 
10913 // Integer Shifts
10914 
10915 // Shift Left Register
10916 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10917   match(Set dst (LShiftI src1 src2));
10918 
10919   ins_cost(INSN_COST * 2);
10920   format %{ "lslvw  $dst, $src1, $src2" %}
10921 
10922   ins_encode %{
10923     __ lslvw(as_Register($dst$$reg),
10924              as_Register($src1$$reg),
10925              as_Register($src2$$reg));
10926   %}
10927 
10928   ins_pipe(ialu_reg_reg_vshift);
10929 %}
10930 
10931 // Shift Left Immediate
10932 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10933   match(Set dst (LShiftI src1 src2));
10934 
10935   ins_cost(INSN_COST);
10936   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10937 
10938   ins_encode %{
10939     __ lslw(as_Register($dst$$reg),
10940             as_Register($src1$$reg),
10941             $src2$$constant & 0x1f);
10942   %}
10943 
10944   ins_pipe(ialu_reg_shift);
10945 %}
10946 
10947 // Shift Right Logical Register
10948 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10949   match(Set dst (URShiftI src1 src2));
10950 
10951   ins_cost(INSN_COST * 2);
10952   format %{ "lsrvw  $dst, $src1, $src2" %}
10953 
10954   ins_encode %{
10955     __ lsrvw(as_Register($dst$$reg),
10956              as_Register($src1$$reg),
10957              as_Register($src2$$reg));
10958   %}
10959 
10960   ins_pipe(ialu_reg_reg_vshift);
10961 %}
10962 
10963 // Shift Right Logical Immediate
10964 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10965   match(Set dst (URShiftI src1 src2));
10966 
10967   ins_cost(INSN_COST);
10968   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10969 
10970   ins_encode %{
10971     __ lsrw(as_Register($dst$$reg),
10972             as_Register($src1$$reg),
10973             $src2$$constant & 0x1f);
10974   %}
10975 
10976   ins_pipe(ialu_reg_shift);
10977 %}
10978 
10979 // Shift Right Arithmetic Register
10980 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10981   match(Set dst (RShiftI src1 src2));
10982 
10983   ins_cost(INSN_COST * 2);
10984   format %{ "asrvw  $dst, $src1, $src2" %}
10985 
10986   ins_encode %{
10987     __ asrvw(as_Register($dst$$reg),
10988              as_Register($src1$$reg),
10989              as_Register($src2$$reg));
10990   %}
10991 
10992   ins_pipe(ialu_reg_reg_vshift);
10993 %}
10994 
10995 // Shift Right Arithmetic Immediate
10996 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10997   match(Set dst (RShiftI src1 src2));
10998 
10999   ins_cost(INSN_COST);
11000   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11001 
11002   ins_encode %{
11003     __ asrw(as_Register($dst$$reg),
11004             as_Register($src1$$reg),
11005             $src2$$constant & 0x1f);
11006   %}
11007 
11008   ins_pipe(ialu_reg_shift);
11009 %}
11010 
11011 // Combined Int Mask and Right Shift (using UBFM)
11012 // TODO
11013 
11014 // Long Shifts
11015 
11016 // Shift Left Register
11017 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11018   match(Set dst (LShiftL src1 src2));
11019 
11020   ins_cost(INSN_COST * 2);
11021   format %{ "lslv  $dst, $src1, $src2" %}
11022 
11023   ins_encode %{
11024     __ lslv(as_Register($dst$$reg),
11025             as_Register($src1$$reg),
11026             as_Register($src2$$reg));
11027   %}
11028 
11029   ins_pipe(ialu_reg_reg_vshift);
11030 %}
11031 
11032 // Shift Left Immediate
11033 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11034   match(Set dst (LShiftL src1 src2));
11035 
11036   ins_cost(INSN_COST);
11037   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11038 
11039   ins_encode %{
11040     __ lsl(as_Register($dst$$reg),
11041             as_Register($src1$$reg),
11042             $src2$$constant & 0x3f);
11043   %}
11044 
11045   ins_pipe(ialu_reg_shift);
11046 %}
11047 
11048 // Shift Right Logical Register
11049 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11050   match(Set dst (URShiftL src1 src2));
11051 
11052   ins_cost(INSN_COST * 2);
11053   format %{ "lsrv  $dst, $src1, $src2" %}
11054 
11055   ins_encode %{
11056     __ lsrv(as_Register($dst$$reg),
11057             as_Register($src1$$reg),
11058             as_Register($src2$$reg));
11059   %}
11060 
11061   ins_pipe(ialu_reg_reg_vshift);
11062 %}
11063 
11064 // Shift Right Logical Immediate
11065 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11066   match(Set dst (URShiftL src1 src2));
11067 
11068   ins_cost(INSN_COST);
11069   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11070 
11071   ins_encode %{
11072     __ lsr(as_Register($dst$$reg),
11073            as_Register($src1$$reg),
11074            $src2$$constant & 0x3f);
11075   %}
11076 
11077   ins_pipe(ialu_reg_shift);
11078 %}
11079 
11080 // A special-case pattern for card table stores.
11081 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11082   match(Set dst (URShiftL (CastP2X src1) src2));
11083 
11084   ins_cost(INSN_COST);
11085   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11086 
11087   ins_encode %{
11088     __ lsr(as_Register($dst$$reg),
11089            as_Register($src1$$reg),
11090            $src2$$constant & 0x3f);
11091   %}
11092 
11093   ins_pipe(ialu_reg_shift);
11094 %}
11095 
11096 // Shift Right Arithmetic Register
11097 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11098   match(Set dst (RShiftL src1 src2));
11099 
11100   ins_cost(INSN_COST * 2);
11101   format %{ "asrv  $dst, $src1, $src2" %}
11102 
11103   ins_encode %{
11104     __ asrv(as_Register($dst$$reg),
11105             as_Register($src1$$reg),
11106             as_Register($src2$$reg));
11107   %}
11108 
11109   ins_pipe(ialu_reg_reg_vshift);
11110 %}
11111 
11112 // Shift Right Arithmetic Immediate
11113 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11114   match(Set dst (RShiftL src1 src2));
11115 
11116   ins_cost(INSN_COST);
11117   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11118 
11119   ins_encode %{
11120     __ asr(as_Register($dst$$reg),
11121            as_Register($src1$$reg),
11122            $src2$$constant & 0x3f);
11123   %}
11124 
11125   ins_pipe(ialu_reg_shift);
11126 %}
11127 
11128 // BEGIN This section of the file is automatically generated. Do not edit --------------
11129 // This section is generated from aarch64_ad.m4
11130 
11131 // This pattern is automatically generated from aarch64_ad.m4
11132 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11133 instruct regL_not_reg(iRegLNoSp dst,
11134                          iRegL src1, immL_M1 m1,
11135                          rFlagsReg cr) %{
11136   match(Set dst (XorL src1 m1));
11137   ins_cost(INSN_COST);
11138   format %{ "eon  $dst, $src1, zr" %}
11139 
11140   ins_encode %{
11141     __ eon(as_Register($dst$$reg),
11142               as_Register($src1$$reg),
11143               zr,
11144               Assembler::LSL, 0);
11145   %}
11146 
11147   ins_pipe(ialu_reg);
11148 %}
11149 
11150 // This pattern is automatically generated from aarch64_ad.m4
11151 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11152 instruct regI_not_reg(iRegINoSp dst,
11153                          iRegIorL2I src1, immI_M1 m1,
11154                          rFlagsReg cr) %{
11155   match(Set dst (XorI src1 m1));
11156   ins_cost(INSN_COST);
11157   format %{ "eonw  $dst, $src1, zr" %}
11158 
11159   ins_encode %{
11160     __ eonw(as_Register($dst$$reg),
11161               as_Register($src1$$reg),
11162               zr,
11163               Assembler::LSL, 0);
11164   %}
11165 
11166   ins_pipe(ialu_reg);
11167 %}
11168 
11169 // This pattern is automatically generated from aarch64_ad.m4
11170 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11171 instruct NegI_reg_URShift_reg(iRegINoSp dst,
11172                               immI0 zero, iRegIorL2I src1, immI src2) %{
11173   match(Set dst (SubI zero (URShiftI src1 src2)));
11174 
11175   ins_cost(1.9 * INSN_COST);
11176   format %{ "negw  $dst, $src1, LSR $src2" %}
11177 
11178   ins_encode %{
11179     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11180             Assembler::LSR, $src2$$constant & 0x1f);
11181   %}
11182 
11183   ins_pipe(ialu_reg_shift);
11184 %}
11185 
11186 // This pattern is automatically generated from aarch64_ad.m4
11187 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11188 instruct NegI_reg_RShift_reg(iRegINoSp dst,
11189                               immI0 zero, iRegIorL2I src1, immI src2) %{
11190   match(Set dst (SubI zero (RShiftI src1 src2)));
11191 
11192   ins_cost(1.9 * INSN_COST);
11193   format %{ "negw  $dst, $src1, ASR $src2" %}
11194 
11195   ins_encode %{
11196     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11197             Assembler::ASR, $src2$$constant & 0x1f);
11198   %}
11199 
11200   ins_pipe(ialu_reg_shift);
11201 %}
11202 
11203 // This pattern is automatically generated from aarch64_ad.m4
11204 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11205 instruct NegI_reg_LShift_reg(iRegINoSp dst,
11206                               immI0 zero, iRegIorL2I src1, immI src2) %{
11207   match(Set dst (SubI zero (LShiftI src1 src2)));
11208 
11209   ins_cost(1.9 * INSN_COST);
11210   format %{ "negw  $dst, $src1, LSL $src2" %}
11211 
11212   ins_encode %{
11213     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11214             Assembler::LSL, $src2$$constant & 0x1f);
11215   %}
11216 
11217   ins_pipe(ialu_reg_shift);
11218 %}
11219 
11220 // This pattern is automatically generated from aarch64_ad.m4
11221 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11222 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
11223                               immL0 zero, iRegL src1, immI src2) %{
11224   match(Set dst (SubL zero (URShiftL src1 src2)));
11225 
11226   ins_cost(1.9 * INSN_COST);
11227   format %{ "neg  $dst, $src1, LSR $src2" %}
11228 
11229   ins_encode %{
11230     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11231             Assembler::LSR, $src2$$constant & 0x3f);
11232   %}
11233 
11234   ins_pipe(ialu_reg_shift);
11235 %}
11236 
11237 // This pattern is automatically generated from aarch64_ad.m4
11238 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11239 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
11240                               immL0 zero, iRegL src1, immI src2) %{
11241   match(Set dst (SubL zero (RShiftL src1 src2)));
11242 
11243   ins_cost(1.9 * INSN_COST);
11244   format %{ "neg  $dst, $src1, ASR $src2" %}
11245 
11246   ins_encode %{
11247     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11248             Assembler::ASR, $src2$$constant & 0x3f);
11249   %}
11250 
11251   ins_pipe(ialu_reg_shift);
11252 %}
11253 
11254 // This pattern is automatically generated from aarch64_ad.m4
11255 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11256 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
11257                               immL0 zero, iRegL src1, immI src2) %{
11258   match(Set dst (SubL zero (LShiftL src1 src2)));
11259 
11260   ins_cost(1.9 * INSN_COST);
11261   format %{ "neg  $dst, $src1, LSL $src2" %}
11262 
11263   ins_encode %{
11264     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11265             Assembler::LSL, $src2$$constant & 0x3f);
11266   %}
11267 
11268   ins_pipe(ialu_reg_shift);
11269 %}
11270 
11271 // This pattern is automatically generated from aarch64_ad.m4
11272 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11273 instruct AndI_reg_not_reg(iRegINoSp dst,
11274                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11275   match(Set dst (AndI src1 (XorI src2 m1)));
11276   ins_cost(INSN_COST);
11277   format %{ "bicw  $dst, $src1, $src2" %}
11278 
11279   ins_encode %{
11280     __ bicw(as_Register($dst$$reg),
11281               as_Register($src1$$reg),
11282               as_Register($src2$$reg),
11283               Assembler::LSL, 0);
11284   %}
11285 
11286   ins_pipe(ialu_reg_reg);
11287 %}
11288 
11289 // This pattern is automatically generated from aarch64_ad.m4
11290 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11291 instruct AndL_reg_not_reg(iRegLNoSp dst,
11292                          iRegL src1, iRegL src2, immL_M1 m1) %{
11293   match(Set dst (AndL src1 (XorL src2 m1)));
11294   ins_cost(INSN_COST);
11295   format %{ "bic  $dst, $src1, $src2" %}
11296 
11297   ins_encode %{
11298     __ bic(as_Register($dst$$reg),
11299               as_Register($src1$$reg),
11300               as_Register($src2$$reg),
11301               Assembler::LSL, 0);
11302   %}
11303 
11304   ins_pipe(ialu_reg_reg);
11305 %}
11306 
11307 // This pattern is automatically generated from aarch64_ad.m4
11308 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11309 instruct OrI_reg_not_reg(iRegINoSp dst,
11310                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11311   match(Set dst (OrI src1 (XorI src2 m1)));
11312   ins_cost(INSN_COST);
11313   format %{ "ornw  $dst, $src1, $src2" %}
11314 
11315   ins_encode %{
11316     __ ornw(as_Register($dst$$reg),
11317               as_Register($src1$$reg),
11318               as_Register($src2$$reg),
11319               Assembler::LSL, 0);
11320   %}
11321 
11322   ins_pipe(ialu_reg_reg);
11323 %}
11324 
11325 // This pattern is automatically generated from aarch64_ad.m4
11326 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11327 instruct OrL_reg_not_reg(iRegLNoSp dst,
11328                          iRegL src1, iRegL src2, immL_M1 m1) %{
11329   match(Set dst (OrL src1 (XorL src2 m1)));
11330   ins_cost(INSN_COST);
11331   format %{ "orn  $dst, $src1, $src2" %}
11332 
11333   ins_encode %{
11334     __ orn(as_Register($dst$$reg),
11335               as_Register($src1$$reg),
11336               as_Register($src2$$reg),
11337               Assembler::LSL, 0);
11338   %}
11339 
11340   ins_pipe(ialu_reg_reg);
11341 %}
11342 
11343 // This pattern is automatically generated from aarch64_ad.m4
11344 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11345 instruct XorI_reg_not_reg(iRegINoSp dst,
11346                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11347   match(Set dst (XorI m1 (XorI src2 src1)));
11348   ins_cost(INSN_COST);
11349   format %{ "eonw  $dst, $src1, $src2" %}
11350 
11351   ins_encode %{
11352     __ eonw(as_Register($dst$$reg),
11353               as_Register($src1$$reg),
11354               as_Register($src2$$reg),
11355               Assembler::LSL, 0);
11356   %}
11357 
11358   ins_pipe(ialu_reg_reg);
11359 %}
11360 
11361 // This pattern is automatically generated from aarch64_ad.m4
11362 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11363 instruct XorL_reg_not_reg(iRegLNoSp dst,
11364                          iRegL src1, iRegL src2, immL_M1 m1) %{
11365   match(Set dst (XorL m1 (XorL src2 src1)));
11366   ins_cost(INSN_COST);
11367   format %{ "eon  $dst, $src1, $src2" %}
11368 
11369   ins_encode %{
11370     __ eon(as_Register($dst$$reg),
11371               as_Register($src1$$reg),
11372               as_Register($src2$$reg),
11373               Assembler::LSL, 0);
11374   %}
11375 
11376   ins_pipe(ialu_reg_reg);
11377 %}
11378 
11379 // This pattern is automatically generated from aarch64_ad.m4
11380 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11381 // val & (-1 ^ (val >>> shift)) ==> bicw
11382 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11383                          iRegIorL2I src1, iRegIorL2I src2,
11384                          immI src3, immI_M1 src4) %{
11385   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11386   ins_cost(1.9 * INSN_COST);
11387   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11388 
11389   ins_encode %{
11390     __ bicw(as_Register($dst$$reg),
11391               as_Register($src1$$reg),
11392               as_Register($src2$$reg),
11393               Assembler::LSR,
11394               $src3$$constant & 0x1f);
11395   %}
11396 
11397   ins_pipe(ialu_reg_reg_shift);
11398 %}
11399 
11400 // This pattern is automatically generated from aarch64_ad.m4
11401 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11402 // val & (-1 ^ (val >>> shift)) ==> bic
11403 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11404                          iRegL src1, iRegL src2,
11405                          immI src3, immL_M1 src4) %{
11406   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11407   ins_cost(1.9 * INSN_COST);
11408   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11409 
11410   ins_encode %{
11411     __ bic(as_Register($dst$$reg),
11412               as_Register($src1$$reg),
11413               as_Register($src2$$reg),
11414               Assembler::LSR,
11415               $src3$$constant & 0x3f);
11416   %}
11417 
11418   ins_pipe(ialu_reg_reg_shift);
11419 %}
11420 
11421 // This pattern is automatically generated from aarch64_ad.m4
11422 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11423 // val & (-1 ^ (val >> shift)) ==> bicw
11424 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11425                          iRegIorL2I src1, iRegIorL2I src2,
11426                          immI src3, immI_M1 src4) %{
11427   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11428   ins_cost(1.9 * INSN_COST);
11429   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11430 
11431   ins_encode %{
11432     __ bicw(as_Register($dst$$reg),
11433               as_Register($src1$$reg),
11434               as_Register($src2$$reg),
11435               Assembler::ASR,
11436               $src3$$constant & 0x1f);
11437   %}
11438 
11439   ins_pipe(ialu_reg_reg_shift);
11440 %}
11441 
11442 // This pattern is automatically generated from aarch64_ad.m4
11443 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11444 // val & (-1 ^ (val >> shift)) ==> bic
11445 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11446                          iRegL src1, iRegL src2,
11447                          immI src3, immL_M1 src4) %{
11448   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11449   ins_cost(1.9 * INSN_COST);
11450   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11451 
11452   ins_encode %{
11453     __ bic(as_Register($dst$$reg),
11454               as_Register($src1$$reg),
11455               as_Register($src2$$reg),
11456               Assembler::ASR,
11457               $src3$$constant & 0x3f);
11458   %}
11459 
11460   ins_pipe(ialu_reg_reg_shift);
11461 %}
11462 
11463 // This pattern is automatically generated from aarch64_ad.m4
11464 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11465 // val & (-1 ^ (val ror shift)) ==> bicw
11466 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11467                          iRegIorL2I src1, iRegIorL2I src2,
11468                          immI src3, immI_M1 src4) %{
11469   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11470   ins_cost(1.9 * INSN_COST);
11471   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11472 
11473   ins_encode %{
11474     __ bicw(as_Register($dst$$reg),
11475               as_Register($src1$$reg),
11476               as_Register($src2$$reg),
11477               Assembler::ROR,
11478               $src3$$constant & 0x1f);
11479   %}
11480 
11481   ins_pipe(ialu_reg_reg_shift);
11482 %}
11483 
11484 // This pattern is automatically generated from aarch64_ad.m4
11485 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11486 // val & (-1 ^ (val ror shift)) ==> bic
11487 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11488                          iRegL src1, iRegL src2,
11489                          immI src3, immL_M1 src4) %{
11490   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11491   ins_cost(1.9 * INSN_COST);
11492   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11493 
11494   ins_encode %{
11495     __ bic(as_Register($dst$$reg),
11496               as_Register($src1$$reg),
11497               as_Register($src2$$reg),
11498               Assembler::ROR,
11499               $src3$$constant & 0x3f);
11500   %}
11501 
11502   ins_pipe(ialu_reg_reg_shift);
11503 %}
11504 
11505 // This pattern is automatically generated from aarch64_ad.m4
11506 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11507 // val & (-1 ^ (val << shift)) ==> bicw
11508 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11509                          iRegIorL2I src1, iRegIorL2I src2,
11510                          immI src3, immI_M1 src4) %{
11511   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11512   ins_cost(1.9 * INSN_COST);
11513   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11514 
11515   ins_encode %{
11516     __ bicw(as_Register($dst$$reg),
11517               as_Register($src1$$reg),
11518               as_Register($src2$$reg),
11519               Assembler::LSL,
11520               $src3$$constant & 0x1f);
11521   %}
11522 
11523   ins_pipe(ialu_reg_reg_shift);
11524 %}
11525 
11526 // This pattern is automatically generated from aarch64_ad.m4
11527 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11528 // val & (-1 ^ (val << shift)) ==> bic
11529 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11530                          iRegL src1, iRegL src2,
11531                          immI src3, immL_M1 src4) %{
11532   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11533   ins_cost(1.9 * INSN_COST);
11534   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11535 
11536   ins_encode %{
11537     __ bic(as_Register($dst$$reg),
11538               as_Register($src1$$reg),
11539               as_Register($src2$$reg),
11540               Assembler::LSL,
11541               $src3$$constant & 0x3f);
11542   %}
11543 
11544   ins_pipe(ialu_reg_reg_shift);
11545 %}
11546 
11547 // This pattern is automatically generated from aarch64_ad.m4
11548 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11549 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11550 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11551                          iRegIorL2I src1, iRegIorL2I src2,
11552                          immI src3, immI_M1 src4) %{
11553   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11554   ins_cost(1.9 * INSN_COST);
11555   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11556 
11557   ins_encode %{
11558     __ eonw(as_Register($dst$$reg),
11559               as_Register($src1$$reg),
11560               as_Register($src2$$reg),
11561               Assembler::LSR,
11562               $src3$$constant & 0x1f);
11563   %}
11564 
11565   ins_pipe(ialu_reg_reg_shift);
11566 %}
11567 
11568 // This pattern is automatically generated from aarch64_ad.m4
11569 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11570 // val ^ (-1 ^ (val >>> shift)) ==> eon
11571 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11572                          iRegL src1, iRegL src2,
11573                          immI src3, immL_M1 src4) %{
11574   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11575   ins_cost(1.9 * INSN_COST);
11576   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11577 
11578   ins_encode %{
11579     __ eon(as_Register($dst$$reg),
11580               as_Register($src1$$reg),
11581               as_Register($src2$$reg),
11582               Assembler::LSR,
11583               $src3$$constant & 0x3f);
11584   %}
11585 
11586   ins_pipe(ialu_reg_reg_shift);
11587 %}
11588 
11589 // This pattern is automatically generated from aarch64_ad.m4
11590 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11591 // val ^ (-1 ^ (val >> shift)) ==> eonw
11592 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11593                          iRegIorL2I src1, iRegIorL2I src2,
11594                          immI src3, immI_M1 src4) %{
11595   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11596   ins_cost(1.9 * INSN_COST);
11597   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11598 
11599   ins_encode %{
11600     __ eonw(as_Register($dst$$reg),
11601               as_Register($src1$$reg),
11602               as_Register($src2$$reg),
11603               Assembler::ASR,
11604               $src3$$constant & 0x1f);
11605   %}
11606 
11607   ins_pipe(ialu_reg_reg_shift);
11608 %}
11609 
11610 // This pattern is automatically generated from aarch64_ad.m4
11611 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11612 // val ^ (-1 ^ (val >> shift)) ==> eon
11613 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11614                          iRegL src1, iRegL src2,
11615                          immI src3, immL_M1 src4) %{
11616   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11617   ins_cost(1.9 * INSN_COST);
11618   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11619 
11620   ins_encode %{
11621     __ eon(as_Register($dst$$reg),
11622               as_Register($src1$$reg),
11623               as_Register($src2$$reg),
11624               Assembler::ASR,
11625               $src3$$constant & 0x3f);
11626   %}
11627 
11628   ins_pipe(ialu_reg_reg_shift);
11629 %}
11630 
11631 // This pattern is automatically generated from aarch64_ad.m4
11632 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11633 // val ^ (-1 ^ (val ror shift)) ==> eonw
11634 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11635                          iRegIorL2I src1, iRegIorL2I src2,
11636                          immI src3, immI_M1 src4) %{
11637   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11638   ins_cost(1.9 * INSN_COST);
11639   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11640 
11641   ins_encode %{
11642     __ eonw(as_Register($dst$$reg),
11643               as_Register($src1$$reg),
11644               as_Register($src2$$reg),
11645               Assembler::ROR,
11646               $src3$$constant & 0x1f);
11647   %}
11648 
11649   ins_pipe(ialu_reg_reg_shift);
11650 %}
11651 
11652 // This pattern is automatically generated from aarch64_ad.m4
11653 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11654 // val ^ (-1 ^ (val ror shift)) ==> eon
11655 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11656                          iRegL src1, iRegL src2,
11657                          immI src3, immL_M1 src4) %{
11658   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11659   ins_cost(1.9 * INSN_COST);
11660   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11661 
11662   ins_encode %{
11663     __ eon(as_Register($dst$$reg),
11664               as_Register($src1$$reg),
11665               as_Register($src2$$reg),
11666               Assembler::ROR,
11667               $src3$$constant & 0x3f);
11668   %}
11669 
11670   ins_pipe(ialu_reg_reg_shift);
11671 %}
11672 
11673 // This pattern is automatically generated from aarch64_ad.m4
11674 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11675 // val ^ (-1 ^ (val << shift)) ==> eonw
11676 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11677                          iRegIorL2I src1, iRegIorL2I src2,
11678                          immI src3, immI_M1 src4) %{
11679   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11680   ins_cost(1.9 * INSN_COST);
11681   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11682 
11683   ins_encode %{
11684     __ eonw(as_Register($dst$$reg),
11685               as_Register($src1$$reg),
11686               as_Register($src2$$reg),
11687               Assembler::LSL,
11688               $src3$$constant & 0x1f);
11689   %}
11690 
11691   ins_pipe(ialu_reg_reg_shift);
11692 %}
11693 
11694 // This pattern is automatically generated from aarch64_ad.m4
11695 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11696 // val ^ (-1 ^ (val << shift)) ==> eon
11697 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11698                          iRegL src1, iRegL src2,
11699                          immI src3, immL_M1 src4) %{
11700   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11701   ins_cost(1.9 * INSN_COST);
11702   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11703 
11704   ins_encode %{
11705     __ eon(as_Register($dst$$reg),
11706               as_Register($src1$$reg),
11707               as_Register($src2$$reg),
11708               Assembler::LSL,
11709               $src3$$constant & 0x3f);
11710   %}
11711 
11712   ins_pipe(ialu_reg_reg_shift);
11713 %}
11714 
11715 // This pattern is automatically generated from aarch64_ad.m4
11716 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11717 // val | (-1 ^ (val >>> shift)) ==> ornw
11718 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11719                          iRegIorL2I src1, iRegIorL2I src2,
11720                          immI src3, immI_M1 src4) %{
11721   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11722   ins_cost(1.9 * INSN_COST);
11723   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11724 
11725   ins_encode %{
11726     __ ornw(as_Register($dst$$reg),
11727               as_Register($src1$$reg),
11728               as_Register($src2$$reg),
11729               Assembler::LSR,
11730               $src3$$constant & 0x1f);
11731   %}
11732 
11733   ins_pipe(ialu_reg_reg_shift);
11734 %}
11735 
11736 // This pattern is automatically generated from aarch64_ad.m4
11737 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11738 // val | (-1 ^ (val >>> shift)) ==> orn
11739 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11740                          iRegL src1, iRegL src2,
11741                          immI src3, immL_M1 src4) %{
11742   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11743   ins_cost(1.9 * INSN_COST);
11744   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11745 
11746   ins_encode %{
11747     __ orn(as_Register($dst$$reg),
11748               as_Register($src1$$reg),
11749               as_Register($src2$$reg),
11750               Assembler::LSR,
11751               $src3$$constant & 0x3f);
11752   %}
11753 
11754   ins_pipe(ialu_reg_reg_shift);
11755 %}
11756 
11757 // This pattern is automatically generated from aarch64_ad.m4
11758 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11759 // val | (-1 ^ (val >> shift)) ==> ornw
11760 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11761                          iRegIorL2I src1, iRegIorL2I src2,
11762                          immI src3, immI_M1 src4) %{
11763   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11764   ins_cost(1.9 * INSN_COST);
11765   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11766 
11767   ins_encode %{
11768     __ ornw(as_Register($dst$$reg),
11769               as_Register($src1$$reg),
11770               as_Register($src2$$reg),
11771               Assembler::ASR,
11772               $src3$$constant & 0x1f);
11773   %}
11774 
11775   ins_pipe(ialu_reg_reg_shift);
11776 %}
11777 
11778 // This pattern is automatically generated from aarch64_ad.m4
11779 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11780 // val | (-1 ^ (val >> shift)) ==> orn
11781 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11782                          iRegL src1, iRegL src2,
11783                          immI src3, immL_M1 src4) %{
11784   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11785   ins_cost(1.9 * INSN_COST);
11786   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11787 
11788   ins_encode %{
11789     __ orn(as_Register($dst$$reg),
11790               as_Register($src1$$reg),
11791               as_Register($src2$$reg),
11792               Assembler::ASR,
11793               $src3$$constant & 0x3f);
11794   %}
11795 
11796   ins_pipe(ialu_reg_reg_shift);
11797 %}
11798 
11799 // This pattern is automatically generated from aarch64_ad.m4
11800 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11801 // val | (-1 ^ (val ror shift)) ==> ornw
11802 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11803                          iRegIorL2I src1, iRegIorL2I src2,
11804                          immI src3, immI_M1 src4) %{
11805   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11806   ins_cost(1.9 * INSN_COST);
11807   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11808 
11809   ins_encode %{
11810     __ ornw(as_Register($dst$$reg),
11811               as_Register($src1$$reg),
11812               as_Register($src2$$reg),
11813               Assembler::ROR,
11814               $src3$$constant & 0x1f);
11815   %}
11816 
11817   ins_pipe(ialu_reg_reg_shift);
11818 %}
11819 
11820 // This pattern is automatically generated from aarch64_ad.m4
11821 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11822 // val | (-1 ^ (val ror shift)) ==> orn
11823 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11824                          iRegL src1, iRegL src2,
11825                          immI src3, immL_M1 src4) %{
11826   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11827   ins_cost(1.9 * INSN_COST);
11828   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11829 
11830   ins_encode %{
11831     __ orn(as_Register($dst$$reg),
11832               as_Register($src1$$reg),
11833               as_Register($src2$$reg),
11834               Assembler::ROR,
11835               $src3$$constant & 0x3f);
11836   %}
11837 
11838   ins_pipe(ialu_reg_reg_shift);
11839 %}
11840 
11841 // This pattern is automatically generated from aarch64_ad.m4
11842 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11843 // val | (-1 ^ (val << shift)) ==> ornw
11844 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11845                          iRegIorL2I src1, iRegIorL2I src2,
11846                          immI src3, immI_M1 src4) %{
11847   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11848   ins_cost(1.9 * INSN_COST);
11849   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11850 
11851   ins_encode %{
11852     __ ornw(as_Register($dst$$reg),
11853               as_Register($src1$$reg),
11854               as_Register($src2$$reg),
11855               Assembler::LSL,
11856               $src3$$constant & 0x1f);
11857   %}
11858 
11859   ins_pipe(ialu_reg_reg_shift);
11860 %}
11861 
11862 // This pattern is automatically generated from aarch64_ad.m4
11863 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11864 // val | (-1 ^ (val << shift)) ==> orn
11865 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11866                          iRegL src1, iRegL src2,
11867                          immI src3, immL_M1 src4) %{
11868   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11869   ins_cost(1.9 * INSN_COST);
11870   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11871 
11872   ins_encode %{
11873     __ orn(as_Register($dst$$reg),
11874               as_Register($src1$$reg),
11875               as_Register($src2$$reg),
11876               Assembler::LSL,
11877               $src3$$constant & 0x3f);
11878   %}
11879 
11880   ins_pipe(ialu_reg_reg_shift);
11881 %}
11882 
11883 // This pattern is automatically generated from aarch64_ad.m4
11884 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11885 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11886                          iRegIorL2I src1, iRegIorL2I src2,
11887                          immI src3) %{
11888   match(Set dst (AndI src1 (URShiftI src2 src3)));
11889 
11890   ins_cost(1.9 * INSN_COST);
11891   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11892 
11893   ins_encode %{
11894     __ andw(as_Register($dst$$reg),
11895               as_Register($src1$$reg),
11896               as_Register($src2$$reg),
11897               Assembler::LSR,
11898               $src3$$constant & 0x1f);
11899   %}
11900 
11901   ins_pipe(ialu_reg_reg_shift);
11902 %}
11903 
11904 // This pattern is automatically generated from aarch64_ad.m4
11905 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11906 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11907                          iRegL src1, iRegL src2,
11908                          immI src3) %{
11909   match(Set dst (AndL src1 (URShiftL src2 src3)));
11910 
11911   ins_cost(1.9 * INSN_COST);
11912   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11913 
11914   ins_encode %{
11915     __ andr(as_Register($dst$$reg),
11916               as_Register($src1$$reg),
11917               as_Register($src2$$reg),
11918               Assembler::LSR,
11919               $src3$$constant & 0x3f);
11920   %}
11921 
11922   ins_pipe(ialu_reg_reg_shift);
11923 %}
11924 
11925 // This pattern is automatically generated from aarch64_ad.m4
11926 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11927 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11928                          iRegIorL2I src1, iRegIorL2I src2,
11929                          immI src3) %{
11930   match(Set dst (AndI src1 (RShiftI src2 src3)));
11931 
11932   ins_cost(1.9 * INSN_COST);
11933   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11934 
11935   ins_encode %{
11936     __ andw(as_Register($dst$$reg),
11937               as_Register($src1$$reg),
11938               as_Register($src2$$reg),
11939               Assembler::ASR,
11940               $src3$$constant & 0x1f);
11941   %}
11942 
11943   ins_pipe(ialu_reg_reg_shift);
11944 %}
11945 
11946 // This pattern is automatically generated from aarch64_ad.m4
11947 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11948 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11949                          iRegL src1, iRegL src2,
11950                          immI src3) %{
11951   match(Set dst (AndL src1 (RShiftL src2 src3)));
11952 
11953   ins_cost(1.9 * INSN_COST);
11954   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11955 
11956   ins_encode %{
11957     __ andr(as_Register($dst$$reg),
11958               as_Register($src1$$reg),
11959               as_Register($src2$$reg),
11960               Assembler::ASR,
11961               $src3$$constant & 0x3f);
11962   %}
11963 
11964   ins_pipe(ialu_reg_reg_shift);
11965 %}
11966 
11967 // This pattern is automatically generated from aarch64_ad.m4
11968 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11969 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11970                          iRegIorL2I src1, iRegIorL2I src2,
11971                          immI src3) %{
11972   match(Set dst (AndI src1 (LShiftI src2 src3)));
11973 
11974   ins_cost(1.9 * INSN_COST);
11975   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11976 
11977   ins_encode %{
11978     __ andw(as_Register($dst$$reg),
11979               as_Register($src1$$reg),
11980               as_Register($src2$$reg),
11981               Assembler::LSL,
11982               $src3$$constant & 0x1f);
11983   %}
11984 
11985   ins_pipe(ialu_reg_reg_shift);
11986 %}
11987 
11988 // This pattern is automatically generated from aarch64_ad.m4
11989 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11990 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11991                          iRegL src1, iRegL src2,
11992                          immI src3) %{
11993   match(Set dst (AndL src1 (LShiftL src2 src3)));
11994 
11995   ins_cost(1.9 * INSN_COST);
11996   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11997 
11998   ins_encode %{
11999     __ andr(as_Register($dst$$reg),
12000               as_Register($src1$$reg),
12001               as_Register($src2$$reg),
12002               Assembler::LSL,
12003               $src3$$constant & 0x3f);
12004   %}
12005 
12006   ins_pipe(ialu_reg_reg_shift);
12007 %}
12008 
12009 // This pattern is automatically generated from aarch64_ad.m4
12010 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12011 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
12012                          iRegIorL2I src1, iRegIorL2I src2,
12013                          immI src3) %{
12014   match(Set dst (AndI src1 (RotateRight src2 src3)));
12015 
12016   ins_cost(1.9 * INSN_COST);
12017   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
12018 
12019   ins_encode %{
12020     __ andw(as_Register($dst$$reg),
12021               as_Register($src1$$reg),
12022               as_Register($src2$$reg),
12023               Assembler::ROR,
12024               $src3$$constant & 0x1f);
12025   %}
12026 
12027   ins_pipe(ialu_reg_reg_shift);
12028 %}
12029 
12030 // This pattern is automatically generated from aarch64_ad.m4
12031 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12032 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
12033                          iRegL src1, iRegL src2,
12034                          immI src3) %{
12035   match(Set dst (AndL src1 (RotateRight src2 src3)));
12036 
12037   ins_cost(1.9 * INSN_COST);
12038   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
12039 
12040   ins_encode %{
12041     __ andr(as_Register($dst$$reg),
12042               as_Register($src1$$reg),
12043               as_Register($src2$$reg),
12044               Assembler::ROR,
12045               $src3$$constant & 0x3f);
12046   %}
12047 
12048   ins_pipe(ialu_reg_reg_shift);
12049 %}
12050 
12051 // This pattern is automatically generated from aarch64_ad.m4
12052 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12053 instruct XorI_reg_URShift_reg(iRegINoSp dst,
12054                          iRegIorL2I src1, iRegIorL2I src2,
12055                          immI src3) %{
12056   match(Set dst (XorI src1 (URShiftI src2 src3)));
12057 
12058   ins_cost(1.9 * INSN_COST);
12059   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
12060 
12061   ins_encode %{
12062     __ eorw(as_Register($dst$$reg),
12063               as_Register($src1$$reg),
12064               as_Register($src2$$reg),
12065               Assembler::LSR,
12066               $src3$$constant & 0x1f);
12067   %}
12068 
12069   ins_pipe(ialu_reg_reg_shift);
12070 %}
12071 
12072 // This pattern is automatically generated from aarch64_ad.m4
12073 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12074 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12075                          iRegL src1, iRegL src2,
12076                          immI src3) %{
12077   match(Set dst (XorL src1 (URShiftL src2 src3)));
12078 
12079   ins_cost(1.9 * INSN_COST);
12080   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12081 
12082   ins_encode %{
12083     __ eor(as_Register($dst$$reg),
12084               as_Register($src1$$reg),
12085               as_Register($src2$$reg),
12086               Assembler::LSR,
12087               $src3$$constant & 0x3f);
12088   %}
12089 
12090   ins_pipe(ialu_reg_reg_shift);
12091 %}
12092 
12093 // This pattern is automatically generated from aarch64_ad.m4
12094 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12095 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12096                          iRegIorL2I src1, iRegIorL2I src2,
12097                          immI src3) %{
12098   match(Set dst (XorI src1 (RShiftI src2 src3)));
12099 
12100   ins_cost(1.9 * INSN_COST);
12101   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12102 
12103   ins_encode %{
12104     __ eorw(as_Register($dst$$reg),
12105               as_Register($src1$$reg),
12106               as_Register($src2$$reg),
12107               Assembler::ASR,
12108               $src3$$constant & 0x1f);
12109   %}
12110 
12111   ins_pipe(ialu_reg_reg_shift);
12112 %}
12113 
12114 // This pattern is automatically generated from aarch64_ad.m4
12115 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12116 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12117                          iRegL src1, iRegL src2,
12118                          immI src3) %{
12119   match(Set dst (XorL src1 (RShiftL src2 src3)));
12120 
12121   ins_cost(1.9 * INSN_COST);
12122   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12123 
12124   ins_encode %{
12125     __ eor(as_Register($dst$$reg),
12126               as_Register($src1$$reg),
12127               as_Register($src2$$reg),
12128               Assembler::ASR,
12129               $src3$$constant & 0x3f);
12130   %}
12131 
12132   ins_pipe(ialu_reg_reg_shift);
12133 %}
12134 
12135 // This pattern is automatically generated from aarch64_ad.m4
12136 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12137 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12138                          iRegIorL2I src1, iRegIorL2I src2,
12139                          immI src3) %{
12140   match(Set dst (XorI src1 (LShiftI src2 src3)));
12141 
12142   ins_cost(1.9 * INSN_COST);
12143   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12144 
12145   ins_encode %{
12146     __ eorw(as_Register($dst$$reg),
12147               as_Register($src1$$reg),
12148               as_Register($src2$$reg),
12149               Assembler::LSL,
12150               $src3$$constant & 0x1f);
12151   %}
12152 
12153   ins_pipe(ialu_reg_reg_shift);
12154 %}
12155 
12156 // This pattern is automatically generated from aarch64_ad.m4
12157 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12158 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12159                          iRegL src1, iRegL src2,
12160                          immI src3) %{
12161   match(Set dst (XorL src1 (LShiftL src2 src3)));
12162 
12163   ins_cost(1.9 * INSN_COST);
12164   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12165 
12166   ins_encode %{
12167     __ eor(as_Register($dst$$reg),
12168               as_Register($src1$$reg),
12169               as_Register($src2$$reg),
12170               Assembler::LSL,
12171               $src3$$constant & 0x3f);
12172   %}
12173 
12174   ins_pipe(ialu_reg_reg_shift);
12175 %}
12176 
12177 // This pattern is automatically generated from aarch64_ad.m4
12178 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12179 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
12180                          iRegIorL2I src1, iRegIorL2I src2,
12181                          immI src3) %{
12182   match(Set dst (XorI src1 (RotateRight src2 src3)));
12183 
12184   ins_cost(1.9 * INSN_COST);
12185   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
12186 
12187   ins_encode %{
12188     __ eorw(as_Register($dst$$reg),
12189               as_Register($src1$$reg),
12190               as_Register($src2$$reg),
12191               Assembler::ROR,
12192               $src3$$constant & 0x1f);
12193   %}
12194 
12195   ins_pipe(ialu_reg_reg_shift);
12196 %}
12197 
12198 // This pattern is automatically generated from aarch64_ad.m4
12199 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12200 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
12201                          iRegL src1, iRegL src2,
12202                          immI src3) %{
12203   match(Set dst (XorL src1 (RotateRight src2 src3)));
12204 
12205   ins_cost(1.9 * INSN_COST);
12206   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
12207 
12208   ins_encode %{
12209     __ eor(as_Register($dst$$reg),
12210               as_Register($src1$$reg),
12211               as_Register($src2$$reg),
12212               Assembler::ROR,
12213               $src3$$constant & 0x3f);
12214   %}
12215 
12216   ins_pipe(ialu_reg_reg_shift);
12217 %}
12218 
12219 // This pattern is automatically generated from aarch64_ad.m4
12220 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12221 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12222                          iRegIorL2I src1, iRegIorL2I src2,
12223                          immI src3) %{
12224   match(Set dst (OrI src1 (URShiftI src2 src3)));
12225 
12226   ins_cost(1.9 * INSN_COST);
12227   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12228 
12229   ins_encode %{
12230     __ orrw(as_Register($dst$$reg),
12231               as_Register($src1$$reg),
12232               as_Register($src2$$reg),
12233               Assembler::LSR,
12234               $src3$$constant & 0x1f);
12235   %}
12236 
12237   ins_pipe(ialu_reg_reg_shift);
12238 %}
12239 
12240 // This pattern is automatically generated from aarch64_ad.m4
12241 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12242 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12243                          iRegL src1, iRegL src2,
12244                          immI src3) %{
12245   match(Set dst (OrL src1 (URShiftL src2 src3)));
12246 
12247   ins_cost(1.9 * INSN_COST);
12248   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12249 
12250   ins_encode %{
12251     __ orr(as_Register($dst$$reg),
12252               as_Register($src1$$reg),
12253               as_Register($src2$$reg),
12254               Assembler::LSR,
12255               $src3$$constant & 0x3f);
12256   %}
12257 
12258   ins_pipe(ialu_reg_reg_shift);
12259 %}
12260 
12261 // This pattern is automatically generated from aarch64_ad.m4
12262 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12263 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12264                          iRegIorL2I src1, iRegIorL2I src2,
12265                          immI src3) %{
12266   match(Set dst (OrI src1 (RShiftI src2 src3)));
12267 
12268   ins_cost(1.9 * INSN_COST);
12269   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12270 
12271   ins_encode %{
12272     __ orrw(as_Register($dst$$reg),
12273               as_Register($src1$$reg),
12274               as_Register($src2$$reg),
12275               Assembler::ASR,
12276               $src3$$constant & 0x1f);
12277   %}
12278 
12279   ins_pipe(ialu_reg_reg_shift);
12280 %}
12281 
12282 // This pattern is automatically generated from aarch64_ad.m4
12283 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12284 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12285                          iRegL src1, iRegL src2,
12286                          immI src3) %{
12287   match(Set dst (OrL src1 (RShiftL src2 src3)));
12288 
12289   ins_cost(1.9 * INSN_COST);
12290   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12291 
12292   ins_encode %{
12293     __ orr(as_Register($dst$$reg),
12294               as_Register($src1$$reg),
12295               as_Register($src2$$reg),
12296               Assembler::ASR,
12297               $src3$$constant & 0x3f);
12298   %}
12299 
12300   ins_pipe(ialu_reg_reg_shift);
12301 %}
12302 
12303 // This pattern is automatically generated from aarch64_ad.m4
12304 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12305 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12306                          iRegIorL2I src1, iRegIorL2I src2,
12307                          immI src3) %{
12308   match(Set dst (OrI src1 (LShiftI src2 src3)));
12309 
12310   ins_cost(1.9 * INSN_COST);
12311   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12312 
12313   ins_encode %{
12314     __ orrw(as_Register($dst$$reg),
12315               as_Register($src1$$reg),
12316               as_Register($src2$$reg),
12317               Assembler::LSL,
12318               $src3$$constant & 0x1f);
12319   %}
12320 
12321   ins_pipe(ialu_reg_reg_shift);
12322 %}
12323 
12324 // This pattern is automatically generated from aarch64_ad.m4
12325 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12326 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12327                          iRegL src1, iRegL src2,
12328                          immI src3) %{
12329   match(Set dst (OrL src1 (LShiftL src2 src3)));
12330 
12331   ins_cost(1.9 * INSN_COST);
12332   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12333 
12334   ins_encode %{
12335     __ orr(as_Register($dst$$reg),
12336               as_Register($src1$$reg),
12337               as_Register($src2$$reg),
12338               Assembler::LSL,
12339               $src3$$constant & 0x3f);
12340   %}
12341 
12342   ins_pipe(ialu_reg_reg_shift);
12343 %}
12344 
12345 // This pattern is automatically generated from aarch64_ad.m4
12346 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12347 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
12348                          iRegIorL2I src1, iRegIorL2I src2,
12349                          immI src3) %{
12350   match(Set dst (OrI src1 (RotateRight src2 src3)));
12351 
12352   ins_cost(1.9 * INSN_COST);
12353   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
12354 
12355   ins_encode %{
12356     __ orrw(as_Register($dst$$reg),
12357               as_Register($src1$$reg),
12358               as_Register($src2$$reg),
12359               Assembler::ROR,
12360               $src3$$constant & 0x1f);
12361   %}
12362 
12363   ins_pipe(ialu_reg_reg_shift);
12364 %}
12365 
12366 // This pattern is automatically generated from aarch64_ad.m4
12367 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12368 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
12369                          iRegL src1, iRegL src2,
12370                          immI src3) %{
12371   match(Set dst (OrL src1 (RotateRight src2 src3)));
12372 
12373   ins_cost(1.9 * INSN_COST);
12374   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
12375 
12376   ins_encode %{
12377     __ orr(as_Register($dst$$reg),
12378               as_Register($src1$$reg),
12379               as_Register($src2$$reg),
12380               Assembler::ROR,
12381               $src3$$constant & 0x3f);
12382   %}
12383 
12384   ins_pipe(ialu_reg_reg_shift);
12385 %}
12386 
12387 // This pattern is automatically generated from aarch64_ad.m4
12388 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12389 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12390                          iRegIorL2I src1, iRegIorL2I src2,
12391                          immI src3) %{
12392   match(Set dst (AddI src1 (URShiftI src2 src3)));
12393 
12394   ins_cost(1.9 * INSN_COST);
12395   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12396 
12397   ins_encode %{
12398     __ addw(as_Register($dst$$reg),
12399               as_Register($src1$$reg),
12400               as_Register($src2$$reg),
12401               Assembler::LSR,
12402               $src3$$constant & 0x1f);
12403   %}
12404 
12405   ins_pipe(ialu_reg_reg_shift);
12406 %}
12407 
12408 // This pattern is automatically generated from aarch64_ad.m4
12409 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12410 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12411                          iRegL src1, iRegL src2,
12412                          immI src3) %{
12413   match(Set dst (AddL src1 (URShiftL src2 src3)));
12414 
12415   ins_cost(1.9 * INSN_COST);
12416   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12417 
12418   ins_encode %{
12419     __ add(as_Register($dst$$reg),
12420               as_Register($src1$$reg),
12421               as_Register($src2$$reg),
12422               Assembler::LSR,
12423               $src3$$constant & 0x3f);
12424   %}
12425 
12426   ins_pipe(ialu_reg_reg_shift);
12427 %}
12428 
12429 // This pattern is automatically generated from aarch64_ad.m4
12430 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12431 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12432                          iRegIorL2I src1, iRegIorL2I src2,
12433                          immI src3) %{
12434   match(Set dst (AddI src1 (RShiftI src2 src3)));
12435 
12436   ins_cost(1.9 * INSN_COST);
12437   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12438 
12439   ins_encode %{
12440     __ addw(as_Register($dst$$reg),
12441               as_Register($src1$$reg),
12442               as_Register($src2$$reg),
12443               Assembler::ASR,
12444               $src3$$constant & 0x1f);
12445   %}
12446 
12447   ins_pipe(ialu_reg_reg_shift);
12448 %}
12449 
12450 // This pattern is automatically generated from aarch64_ad.m4
12451 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12452 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12453                          iRegL src1, iRegL src2,
12454                          immI src3) %{
12455   match(Set dst (AddL src1 (RShiftL src2 src3)));
12456 
12457   ins_cost(1.9 * INSN_COST);
12458   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12459 
12460   ins_encode %{
12461     __ add(as_Register($dst$$reg),
12462               as_Register($src1$$reg),
12463               as_Register($src2$$reg),
12464               Assembler::ASR,
12465               $src3$$constant & 0x3f);
12466   %}
12467 
12468   ins_pipe(ialu_reg_reg_shift);
12469 %}
12470 
12471 // This pattern is automatically generated from aarch64_ad.m4
12472 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12473 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12474                          iRegIorL2I src1, iRegIorL2I src2,
12475                          immI src3) %{
12476   match(Set dst (AddI src1 (LShiftI src2 src3)));
12477 
12478   ins_cost(1.9 * INSN_COST);
12479   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12480 
12481   ins_encode %{
12482     __ addw(as_Register($dst$$reg),
12483               as_Register($src1$$reg),
12484               as_Register($src2$$reg),
12485               Assembler::LSL,
12486               $src3$$constant & 0x1f);
12487   %}
12488 
12489   ins_pipe(ialu_reg_reg_shift);
12490 %}
12491 
12492 // This pattern is automatically generated from aarch64_ad.m4
12493 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12494 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12495                          iRegL src1, iRegL src2,
12496                          immI src3) %{
12497   match(Set dst (AddL src1 (LShiftL src2 src3)));
12498 
12499   ins_cost(1.9 * INSN_COST);
12500   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12501 
12502   ins_encode %{
12503     __ add(as_Register($dst$$reg),
12504               as_Register($src1$$reg),
12505               as_Register($src2$$reg),
12506               Assembler::LSL,
12507               $src3$$constant & 0x3f);
12508   %}
12509 
12510   ins_pipe(ialu_reg_reg_shift);
12511 %}
12512 
12513 // This pattern is automatically generated from aarch64_ad.m4
12514 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12515 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12516                          iRegIorL2I src1, iRegIorL2I src2,
12517                          immI src3) %{
12518   match(Set dst (SubI src1 (URShiftI src2 src3)));
12519 
12520   ins_cost(1.9 * INSN_COST);
12521   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12522 
12523   ins_encode %{
12524     __ subw(as_Register($dst$$reg),
12525               as_Register($src1$$reg),
12526               as_Register($src2$$reg),
12527               Assembler::LSR,
12528               $src3$$constant & 0x1f);
12529   %}
12530 
12531   ins_pipe(ialu_reg_reg_shift);
12532 %}
12533 
12534 // This pattern is automatically generated from aarch64_ad.m4
12535 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12536 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12537                          iRegL src1, iRegL src2,
12538                          immI src3) %{
12539   match(Set dst (SubL src1 (URShiftL src2 src3)));
12540 
12541   ins_cost(1.9 * INSN_COST);
12542   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12543 
12544   ins_encode %{
12545     __ sub(as_Register($dst$$reg),
12546               as_Register($src1$$reg),
12547               as_Register($src2$$reg),
12548               Assembler::LSR,
12549               $src3$$constant & 0x3f);
12550   %}
12551 
12552   ins_pipe(ialu_reg_reg_shift);
12553 %}
12554 
12555 // This pattern is automatically generated from aarch64_ad.m4
12556 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12557 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12558                          iRegIorL2I src1, iRegIorL2I src2,
12559                          immI src3) %{
12560   match(Set dst (SubI src1 (RShiftI src2 src3)));
12561 
12562   ins_cost(1.9 * INSN_COST);
12563   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12564 
12565   ins_encode %{
12566     __ subw(as_Register($dst$$reg),
12567               as_Register($src1$$reg),
12568               as_Register($src2$$reg),
12569               Assembler::ASR,
12570               $src3$$constant & 0x1f);
12571   %}
12572 
12573   ins_pipe(ialu_reg_reg_shift);
12574 %}
12575 
12576 // This pattern is automatically generated from aarch64_ad.m4
12577 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12578 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12579                          iRegL src1, iRegL src2,
12580                          immI src3) %{
12581   match(Set dst (SubL src1 (RShiftL src2 src3)));
12582 
12583   ins_cost(1.9 * INSN_COST);
12584   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12585 
12586   ins_encode %{
12587     __ sub(as_Register($dst$$reg),
12588               as_Register($src1$$reg),
12589               as_Register($src2$$reg),
12590               Assembler::ASR,
12591               $src3$$constant & 0x3f);
12592   %}
12593 
12594   ins_pipe(ialu_reg_reg_shift);
12595 %}
12596 
12597 // This pattern is automatically generated from aarch64_ad.m4
12598 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12599 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12600                          iRegIorL2I src1, iRegIorL2I src2,
12601                          immI src3) %{
12602   match(Set dst (SubI src1 (LShiftI src2 src3)));
12603 
12604   ins_cost(1.9 * INSN_COST);
12605   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12606 
12607   ins_encode %{
12608     __ subw(as_Register($dst$$reg),
12609               as_Register($src1$$reg),
12610               as_Register($src2$$reg),
12611               Assembler::LSL,
12612               $src3$$constant & 0x1f);
12613   %}
12614 
12615   ins_pipe(ialu_reg_reg_shift);
12616 %}
12617 
12618 // This pattern is automatically generated from aarch64_ad.m4
12619 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12620 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12621                          iRegL src1, iRegL src2,
12622                          immI src3) %{
12623   match(Set dst (SubL src1 (LShiftL src2 src3)));
12624 
12625   ins_cost(1.9 * INSN_COST);
12626   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12627 
12628   ins_encode %{
12629     __ sub(as_Register($dst$$reg),
12630               as_Register($src1$$reg),
12631               as_Register($src2$$reg),
12632               Assembler::LSL,
12633               $src3$$constant & 0x3f);
12634   %}
12635 
12636   ins_pipe(ialu_reg_reg_shift);
12637 %}
12638 
12639 // This pattern is automatically generated from aarch64_ad.m4
12640 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12641 
12642 // Shift Left followed by Shift Right.
12643 // This idiom is used by the compiler for the i2b bytecode etc.
12644 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12645 %{
12646   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12647   ins_cost(INSN_COST * 2);
12648   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12649   ins_encode %{
12650     int lshift = $lshift_count$$constant & 63;
12651     int rshift = $rshift_count$$constant & 63;
12652     int s = 63 - lshift;
12653     int r = (rshift - lshift) & 63;
12654     __ sbfm(as_Register($dst$$reg),
12655             as_Register($src$$reg),
12656             r, s);
12657   %}
12658 
12659   ins_pipe(ialu_reg_shift);
12660 %}
12661 
12662 // This pattern is automatically generated from aarch64_ad.m4
12663 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12664 
12665 // Shift Left followed by Shift Right.
12666 // This idiom is used by the compiler for the i2b bytecode etc.
12667 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12668 %{
12669   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12670   ins_cost(INSN_COST * 2);
12671   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12672   ins_encode %{
12673     int lshift = $lshift_count$$constant & 31;
12674     int rshift = $rshift_count$$constant & 31;
12675     int s = 31 - lshift;
12676     int r = (rshift - lshift) & 31;
12677     __ sbfmw(as_Register($dst$$reg),
12678             as_Register($src$$reg),
12679             r, s);
12680   %}
12681 
12682   ins_pipe(ialu_reg_shift);
12683 %}
12684 
12685 // This pattern is automatically generated from aarch64_ad.m4
12686 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12687 
12688 // Shift Left followed by Shift Right.
12689 // This idiom is used by the compiler for the i2b bytecode etc.
12690 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12691 %{
12692   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12693   ins_cost(INSN_COST * 2);
12694   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12695   ins_encode %{
12696     int lshift = $lshift_count$$constant & 63;
12697     int rshift = $rshift_count$$constant & 63;
12698     int s = 63 - lshift;
12699     int r = (rshift - lshift) & 63;
12700     __ ubfm(as_Register($dst$$reg),
12701             as_Register($src$$reg),
12702             r, s);
12703   %}
12704 
12705   ins_pipe(ialu_reg_shift);
12706 %}
12707 
12708 // This pattern is automatically generated from aarch64_ad.m4
12709 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12710 
12711 // Shift Left followed by Shift Right.
12712 // This idiom is used by the compiler for the i2b bytecode etc.
12713 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12714 %{
12715   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12716   ins_cost(INSN_COST * 2);
12717   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12718   ins_encode %{
12719     int lshift = $lshift_count$$constant & 31;
12720     int rshift = $rshift_count$$constant & 31;
12721     int s = 31 - lshift;
12722     int r = (rshift - lshift) & 31;
12723     __ ubfmw(as_Register($dst$$reg),
12724             as_Register($src$$reg),
12725             r, s);
12726   %}
12727 
12728   ins_pipe(ialu_reg_shift);
12729 %}
12730 
12731 // Bitfield extract with shift & mask
12732 
12733 // This pattern is automatically generated from aarch64_ad.m4
12734 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12735 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12736 %{
12737   match(Set dst (AndI (URShiftI src rshift) mask));
12738   // Make sure we are not going to exceed what ubfxw can do.
12739   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12740 
12741   ins_cost(INSN_COST);
12742   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12743   ins_encode %{
12744     int rshift = $rshift$$constant & 31;
12745     intptr_t mask = $mask$$constant;
12746     int width = exact_log2(mask+1);
12747     __ ubfxw(as_Register($dst$$reg),
12748             as_Register($src$$reg), rshift, width);
12749   %}
12750   ins_pipe(ialu_reg_shift);
12751 %}
12752 
12753 // This pattern is automatically generated from aarch64_ad.m4
12754 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12755 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12756 %{
12757   match(Set dst (AndL (URShiftL src rshift) mask));
12758   // Make sure we are not going to exceed what ubfx can do.
12759   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12760 
12761   ins_cost(INSN_COST);
12762   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12763   ins_encode %{
12764     int rshift = $rshift$$constant & 63;
12765     intptr_t mask = $mask$$constant;
12766     int width = exact_log2_long(mask+1);
12767     __ ubfx(as_Register($dst$$reg),
12768             as_Register($src$$reg), rshift, width);
12769   %}
12770   ins_pipe(ialu_reg_shift);
12771 %}
12772 
12773 
12774 // This pattern is automatically generated from aarch64_ad.m4
12775 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12776 
12777 // We can use ubfx when extending an And with a mask when we know mask
12778 // is positive.  We know that because immI_bitmask guarantees it.
12779 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12780 %{
12781   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12782   // Make sure we are not going to exceed what ubfxw can do.
12783   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12784 
12785   ins_cost(INSN_COST * 2);
12786   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12787   ins_encode %{
12788     int rshift = $rshift$$constant & 31;
12789     intptr_t mask = $mask$$constant;
12790     int width = exact_log2(mask+1);
12791     __ ubfx(as_Register($dst$$reg),
12792             as_Register($src$$reg), rshift, width);
12793   %}
12794   ins_pipe(ialu_reg_shift);
12795 %}
12796 
12797 
12798 // This pattern is automatically generated from aarch64_ad.m4
12799 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12800 
12801 // We can use ubfiz when masking by a positive number and then left shifting the result.
12802 // We know that the mask is positive because immI_bitmask guarantees it.
12803 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12804 %{
12805   match(Set dst (LShiftI (AndI src mask) lshift));
12806   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12807 
12808   ins_cost(INSN_COST);
12809   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12810   ins_encode %{
12811     int lshift = $lshift$$constant & 31;
12812     intptr_t mask = $mask$$constant;
12813     int width = exact_log2(mask+1);
12814     __ ubfizw(as_Register($dst$$reg),
12815           as_Register($src$$reg), lshift, width);
12816   %}
12817   ins_pipe(ialu_reg_shift);
12818 %}
12819 
12820 // This pattern is automatically generated from aarch64_ad.m4
12821 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12822 
12823 // We can use ubfiz when masking by a positive number and then left shifting the result.
12824 // We know that the mask is positive because immL_bitmask guarantees it.
12825 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12826 %{
12827   match(Set dst (LShiftL (AndL src mask) lshift));
12828   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12829 
12830   ins_cost(INSN_COST);
12831   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12832   ins_encode %{
12833     int lshift = $lshift$$constant & 63;
12834     intptr_t mask = $mask$$constant;
12835     int width = exact_log2_long(mask+1);
12836     __ ubfiz(as_Register($dst$$reg),
12837           as_Register($src$$reg), lshift, width);
12838   %}
12839   ins_pipe(ialu_reg_shift);
12840 %}
12841 
12842 // This pattern is automatically generated from aarch64_ad.m4
12843 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12844 
12845 // We can use ubfiz when masking by a positive number and then left shifting the result.
12846 // We know that the mask is positive because immI_bitmask guarantees it.
12847 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12848 %{
12849   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12850   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12851 
12852   ins_cost(INSN_COST);
12853   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12854   ins_encode %{
12855     int lshift = $lshift$$constant & 31;
12856     intptr_t mask = $mask$$constant;
12857     int width = exact_log2(mask+1);
12858     __ ubfizw(as_Register($dst$$reg),
12859           as_Register($src$$reg), lshift, width);
12860   %}
12861   ins_pipe(ialu_reg_shift);
12862 %}
12863 
12864 // This pattern is automatically generated from aarch64_ad.m4
12865 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12866 
12867 // We can use ubfiz when masking by a positive number and then left shifting the result.
12868 // We know that the mask is positive because immL_bitmask guarantees it.
12869 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12870 %{
12871   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12872   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12873 
12874   ins_cost(INSN_COST);
12875   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12876   ins_encode %{
12877     int lshift = $lshift$$constant & 63;
12878     intptr_t mask = $mask$$constant;
12879     int width = exact_log2_long(mask+1);
12880     __ ubfiz(as_Register($dst$$reg),
12881           as_Register($src$$reg), lshift, width);
12882   %}
12883   ins_pipe(ialu_reg_shift);
12884 %}
12885 
12886 
12887 // This pattern is automatically generated from aarch64_ad.m4
12888 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12889 
12890 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12891 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12892 %{
12893   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12894   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12895 
12896   ins_cost(INSN_COST);
12897   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12898   ins_encode %{
12899     int lshift = $lshift$$constant & 63;
12900     intptr_t mask = $mask$$constant;
12901     int width = exact_log2(mask+1);
12902     __ ubfiz(as_Register($dst$$reg),
12903              as_Register($src$$reg), lshift, width);
12904   %}
12905   ins_pipe(ialu_reg_shift);
12906 %}
12907 
12908 // This pattern is automatically generated from aarch64_ad.m4
12909 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12910 
12911 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12912 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12913 %{
12914   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12915   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12916 
12917   ins_cost(INSN_COST);
12918   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12919   ins_encode %{
12920     int lshift = $lshift$$constant & 31;
12921     intptr_t mask = $mask$$constant;
12922     int width = exact_log2(mask+1);
12923     __ ubfiz(as_Register($dst$$reg),
12924              as_Register($src$$reg), lshift, width);
12925   %}
12926   ins_pipe(ialu_reg_shift);
12927 %}
12928 
12929 // This pattern is automatically generated from aarch64_ad.m4
12930 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12931 
12932 // Can skip int2long conversions after AND with small bitmask
12933 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12934 %{
12935   match(Set dst (ConvI2L (AndI src msk)));
12936   ins_cost(INSN_COST);
12937   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12938   ins_encode %{
12939     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12940   %}
12941   ins_pipe(ialu_reg_shift);
12942 %}
12943 
12944 
12945 // Rotations
12946 
12947 // This pattern is automatically generated from aarch64_ad.m4
12948 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12949 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12950 %{
12951   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12952   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12953 
12954   ins_cost(INSN_COST);
12955   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12956 
12957   ins_encode %{
12958     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12959             $rshift$$constant & 63);
12960   %}
12961   ins_pipe(ialu_reg_reg_extr);
12962 %}
12963 
12964 
12965 // This pattern is automatically generated from aarch64_ad.m4
12966 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12967 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12968 %{
12969   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12970   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12971 
12972   ins_cost(INSN_COST);
12973   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12974 
12975   ins_encode %{
12976     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12977             $rshift$$constant & 31);
12978   %}
12979   ins_pipe(ialu_reg_reg_extr);
12980 %}
12981 
12982 
12983 // This pattern is automatically generated from aarch64_ad.m4
12984 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12985 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12986 %{
12987   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12988   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12989 
12990   ins_cost(INSN_COST);
12991   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12992 
12993   ins_encode %{
12994     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12995             $rshift$$constant & 63);
12996   %}
12997   ins_pipe(ialu_reg_reg_extr);
12998 %}
12999 
13000 
13001 // This pattern is automatically generated from aarch64_ad.m4
13002 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13003 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
13004 %{
13005   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
13006   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
13007 
13008   ins_cost(INSN_COST);
13009   format %{ "extr $dst, $src1, $src2, #$rshift" %}
13010 
13011   ins_encode %{
13012     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
13013             $rshift$$constant & 31);
13014   %}
13015   ins_pipe(ialu_reg_reg_extr);
13016 %}
13017 
13018 // This pattern is automatically generated from aarch64_ad.m4
13019 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13020 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
13021 %{
13022   match(Set dst (RotateRight src shift));
13023 
13024   ins_cost(INSN_COST);
13025   format %{ "ror    $dst, $src, $shift" %}
13026 
13027   ins_encode %{
13028      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
13029                $shift$$constant & 0x1f);
13030   %}
13031   ins_pipe(ialu_reg_reg_vshift);
13032 %}
13033 
13034 // This pattern is automatically generated from aarch64_ad.m4
13035 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13036 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
13037 %{
13038   match(Set dst (RotateRight src shift));
13039 
13040   ins_cost(INSN_COST);
13041   format %{ "ror    $dst, $src, $shift" %}
13042 
13043   ins_encode %{
13044      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
13045                $shift$$constant & 0x3f);
13046   %}
13047   ins_pipe(ialu_reg_reg_vshift);
13048 %}
13049 
13050 // This pattern is automatically generated from aarch64_ad.m4
13051 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13052 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
13053 %{
13054   match(Set dst (RotateRight src shift));
13055 
13056   ins_cost(INSN_COST);
13057   format %{ "ror    $dst, $src, $shift" %}
13058 
13059   ins_encode %{
13060      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
13061   %}
13062   ins_pipe(ialu_reg_reg_vshift);
13063 %}
13064 
13065 // This pattern is automatically generated from aarch64_ad.m4
13066 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13067 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
13068 %{
13069   match(Set dst (RotateRight src shift));
13070 
13071   ins_cost(INSN_COST);
13072   format %{ "ror    $dst, $src, $shift" %}
13073 
13074   ins_encode %{
13075      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
13076   %}
13077   ins_pipe(ialu_reg_reg_vshift);
13078 %}
13079 
13080 // This pattern is automatically generated from aarch64_ad.m4
13081 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13082 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
13083 %{
13084   match(Set dst (RotateLeft src shift));
13085 
13086   ins_cost(INSN_COST);
13087   format %{ "rol    $dst, $src, $shift" %}
13088 
13089   ins_encode %{
13090      __ subw(rscratch1, zr, as_Register($shift$$reg));
13091      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
13092   %}
13093   ins_pipe(ialu_reg_reg_vshift);
13094 %}
13095 
13096 // This pattern is automatically generated from aarch64_ad.m4
13097 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13098 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
13099 %{
13100   match(Set dst (RotateLeft src shift));
13101 
13102   ins_cost(INSN_COST);
13103   format %{ "rol    $dst, $src, $shift" %}
13104 
13105   ins_encode %{
13106      __ subw(rscratch1, zr, as_Register($shift$$reg));
13107      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
13108   %}
13109   ins_pipe(ialu_reg_reg_vshift);
13110 %}
13111 
13112 
13113 // Add/subtract (extended)
13114 
13115 // This pattern is automatically generated from aarch64_ad.m4
13116 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13117 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13118 %{
13119   match(Set dst (AddL src1 (ConvI2L src2)));
13120   ins_cost(INSN_COST);
13121   format %{ "add  $dst, $src1, $src2, sxtw" %}
13122 
13123    ins_encode %{
13124      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13125             as_Register($src2$$reg), ext::sxtw);
13126    %}
13127   ins_pipe(ialu_reg_reg);
13128 %}
13129 
13130 // This pattern is automatically generated from aarch64_ad.m4
13131 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13132 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13133 %{
13134   match(Set dst (SubL src1 (ConvI2L src2)));
13135   ins_cost(INSN_COST);
13136   format %{ "sub  $dst, $src1, $src2, sxtw" %}
13137 
13138    ins_encode %{
13139      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13140             as_Register($src2$$reg), ext::sxtw);
13141    %}
13142   ins_pipe(ialu_reg_reg);
13143 %}
13144 
13145 // This pattern is automatically generated from aarch64_ad.m4
13146 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13147 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
13148 %{
13149   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13150   ins_cost(INSN_COST);
13151   format %{ "add  $dst, $src1, $src2, sxth" %}
13152 
13153    ins_encode %{
13154      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13155             as_Register($src2$$reg), ext::sxth);
13156    %}
13157   ins_pipe(ialu_reg_reg);
13158 %}
13159 
13160 // This pattern is automatically generated from aarch64_ad.m4
13161 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13162 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13163 %{
13164   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13165   ins_cost(INSN_COST);
13166   format %{ "add  $dst, $src1, $src2, sxtb" %}
13167 
13168    ins_encode %{
13169      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13170             as_Register($src2$$reg), ext::sxtb);
13171    %}
13172   ins_pipe(ialu_reg_reg);
13173 %}
13174 
13175 // This pattern is automatically generated from aarch64_ad.m4
13176 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13177 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13178 %{
13179   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
13180   ins_cost(INSN_COST);
13181   format %{ "add  $dst, $src1, $src2, uxtb" %}
13182 
13183    ins_encode %{
13184      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13185             as_Register($src2$$reg), ext::uxtb);
13186    %}
13187   ins_pipe(ialu_reg_reg);
13188 %}
13189 
13190 // This pattern is automatically generated from aarch64_ad.m4
13191 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13192 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
13193 %{
13194   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13195   ins_cost(INSN_COST);
13196   format %{ "add  $dst, $src1, $src2, sxth" %}
13197 
13198    ins_encode %{
13199      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13200             as_Register($src2$$reg), ext::sxth);
13201    %}
13202   ins_pipe(ialu_reg_reg);
13203 %}
13204 
13205 // This pattern is automatically generated from aarch64_ad.m4
13206 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13207 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
13208 %{
13209   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13210   ins_cost(INSN_COST);
13211   format %{ "add  $dst, $src1, $src2, sxtw" %}
13212 
13213    ins_encode %{
13214      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13215             as_Register($src2$$reg), ext::sxtw);
13216    %}
13217   ins_pipe(ialu_reg_reg);
13218 %}
13219 
13220 // This pattern is automatically generated from aarch64_ad.m4
13221 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13222 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13223 %{
13224   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13225   ins_cost(INSN_COST);
13226   format %{ "add  $dst, $src1, $src2, sxtb" %}
13227 
13228    ins_encode %{
13229      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13230             as_Register($src2$$reg), ext::sxtb);
13231    %}
13232   ins_pipe(ialu_reg_reg);
13233 %}
13234 
13235 // This pattern is automatically generated from aarch64_ad.m4
13236 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13237 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13238 %{
13239   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13240   ins_cost(INSN_COST);
13241   format %{ "add  $dst, $src1, $src2, uxtb" %}
13242 
13243    ins_encode %{
13244      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13245             as_Register($src2$$reg), ext::uxtb);
13246    %}
13247   ins_pipe(ialu_reg_reg);
13248 %}
13249 
13250 // This pattern is automatically generated from aarch64_ad.m4
13251 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13252 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13253 %{
13254   match(Set dst (AddI src1 (AndI src2 mask)));
13255   ins_cost(INSN_COST);
13256   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13257 
13258    ins_encode %{
13259      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13260             as_Register($src2$$reg), ext::uxtb);
13261    %}
13262   ins_pipe(ialu_reg_reg);
13263 %}
13264 
13265 // This pattern is automatically generated from aarch64_ad.m4
13266 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13267 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13268 %{
13269   match(Set dst (AddI src1 (AndI src2 mask)));
13270   ins_cost(INSN_COST);
13271   format %{ "addw  $dst, $src1, $src2, uxth" %}
13272 
13273    ins_encode %{
13274      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13275             as_Register($src2$$reg), ext::uxth);
13276    %}
13277   ins_pipe(ialu_reg_reg);
13278 %}
13279 
13280 // This pattern is automatically generated from aarch64_ad.m4
13281 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13282 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13283 %{
13284   match(Set dst (AddL src1 (AndL src2 mask)));
13285   ins_cost(INSN_COST);
13286   format %{ "add  $dst, $src1, $src2, uxtb" %}
13287 
13288    ins_encode %{
13289      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13290             as_Register($src2$$reg), ext::uxtb);
13291    %}
13292   ins_pipe(ialu_reg_reg);
13293 %}
13294 
13295 // This pattern is automatically generated from aarch64_ad.m4
13296 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13297 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13298 %{
13299   match(Set dst (AddL src1 (AndL src2 mask)));
13300   ins_cost(INSN_COST);
13301   format %{ "add  $dst, $src1, $src2, uxth" %}
13302 
13303    ins_encode %{
13304      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13305             as_Register($src2$$reg), ext::uxth);
13306    %}
13307   ins_pipe(ialu_reg_reg);
13308 %}
13309 
13310 // This pattern is automatically generated from aarch64_ad.m4
13311 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13312 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13313 %{
13314   match(Set dst (AddL src1 (AndL src2 mask)));
13315   ins_cost(INSN_COST);
13316   format %{ "add  $dst, $src1, $src2, uxtw" %}
13317 
13318    ins_encode %{
13319      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13320             as_Register($src2$$reg), ext::uxtw);
13321    %}
13322   ins_pipe(ialu_reg_reg);
13323 %}
13324 
13325 // This pattern is automatically generated from aarch64_ad.m4
13326 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13327 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13328 %{
13329   match(Set dst (SubI src1 (AndI src2 mask)));
13330   ins_cost(INSN_COST);
13331   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13332 
13333    ins_encode %{
13334      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13335             as_Register($src2$$reg), ext::uxtb);
13336    %}
13337   ins_pipe(ialu_reg_reg);
13338 %}
13339 
13340 // This pattern is automatically generated from aarch64_ad.m4
13341 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13342 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13343 %{
13344   match(Set dst (SubI src1 (AndI src2 mask)));
13345   ins_cost(INSN_COST);
13346   format %{ "subw  $dst, $src1, $src2, uxth" %}
13347 
13348    ins_encode %{
13349      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13350             as_Register($src2$$reg), ext::uxth);
13351    %}
13352   ins_pipe(ialu_reg_reg);
13353 %}
13354 
13355 // This pattern is automatically generated from aarch64_ad.m4
13356 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13357 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13358 %{
13359   match(Set dst (SubL src1 (AndL src2 mask)));
13360   ins_cost(INSN_COST);
13361   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13362 
13363    ins_encode %{
13364      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13365             as_Register($src2$$reg), ext::uxtb);
13366    %}
13367   ins_pipe(ialu_reg_reg);
13368 %}
13369 
13370 // This pattern is automatically generated from aarch64_ad.m4
13371 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13372 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13373 %{
13374   match(Set dst (SubL src1 (AndL src2 mask)));
13375   ins_cost(INSN_COST);
13376   format %{ "sub  $dst, $src1, $src2, uxth" %}
13377 
13378    ins_encode %{
13379      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13380             as_Register($src2$$reg), ext::uxth);
13381    %}
13382   ins_pipe(ialu_reg_reg);
13383 %}
13384 
13385 // This pattern is automatically generated from aarch64_ad.m4
13386 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13387 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13388 %{
13389   match(Set dst (SubL src1 (AndL src2 mask)));
13390   ins_cost(INSN_COST);
13391   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13392 
13393    ins_encode %{
13394      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13395             as_Register($src2$$reg), ext::uxtw);
13396    %}
13397   ins_pipe(ialu_reg_reg);
13398 %}
13399 
13400 
13401 // This pattern is automatically generated from aarch64_ad.m4
13402 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13403 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13404 %{
13405   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13406   ins_cost(1.9 * INSN_COST);
13407   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13408 
13409    ins_encode %{
13410      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13411             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13412    %}
13413   ins_pipe(ialu_reg_reg_shift);
13414 %}
13415 
13416 // This pattern is automatically generated from aarch64_ad.m4
13417 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13418 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13419 %{
13420   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13421   ins_cost(1.9 * INSN_COST);
13422   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13423 
13424    ins_encode %{
13425      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13426             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13427    %}
13428   ins_pipe(ialu_reg_reg_shift);
13429 %}
13430 
13431 // This pattern is automatically generated from aarch64_ad.m4
13432 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13433 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13434 %{
13435   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13436   ins_cost(1.9 * INSN_COST);
13437   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13438 
13439    ins_encode %{
13440      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13441             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13442    %}
13443   ins_pipe(ialu_reg_reg_shift);
13444 %}
13445 
13446 // This pattern is automatically generated from aarch64_ad.m4
13447 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13448 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13449 %{
13450   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13451   ins_cost(1.9 * INSN_COST);
13452   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13453 
13454    ins_encode %{
13455      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13456             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13457    %}
13458   ins_pipe(ialu_reg_reg_shift);
13459 %}
13460 
13461 // This pattern is automatically generated from aarch64_ad.m4
13462 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13463 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13464 %{
13465   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13466   ins_cost(1.9 * INSN_COST);
13467   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13468 
13469    ins_encode %{
13470      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13471             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13472    %}
13473   ins_pipe(ialu_reg_reg_shift);
13474 %}
13475 
13476 // This pattern is automatically generated from aarch64_ad.m4
13477 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13478 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13479 %{
13480   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13481   ins_cost(1.9 * INSN_COST);
13482   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13483 
13484    ins_encode %{
13485      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13486             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13487    %}
13488   ins_pipe(ialu_reg_reg_shift);
13489 %}
13490 
13491 // This pattern is automatically generated from aarch64_ad.m4
13492 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13493 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13494 %{
13495   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13496   ins_cost(1.9 * INSN_COST);
13497   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13498 
13499    ins_encode %{
13500      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13501             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13502    %}
13503   ins_pipe(ialu_reg_reg_shift);
13504 %}
13505 
13506 // This pattern is automatically generated from aarch64_ad.m4
13507 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13508 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13509 %{
13510   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13511   ins_cost(1.9 * INSN_COST);
13512   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13513 
13514    ins_encode %{
13515      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13516             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13517    %}
13518   ins_pipe(ialu_reg_reg_shift);
13519 %}
13520 
13521 // This pattern is automatically generated from aarch64_ad.m4
13522 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13523 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13524 %{
13525   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13526   ins_cost(1.9 * INSN_COST);
13527   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13528 
13529    ins_encode %{
13530      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13531             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13532    %}
13533   ins_pipe(ialu_reg_reg_shift);
13534 %}
13535 
13536 // This pattern is automatically generated from aarch64_ad.m4
13537 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13538 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13539 %{
13540   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13541   ins_cost(1.9 * INSN_COST);
13542   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13543 
13544    ins_encode %{
13545      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13546             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13547    %}
13548   ins_pipe(ialu_reg_reg_shift);
13549 %}
13550 
13551 // This pattern is automatically generated from aarch64_ad.m4
13552 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13553 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13554 %{
13555   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13556   ins_cost(1.9 * INSN_COST);
13557   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13558 
13559    ins_encode %{
13560      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13561             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13562    %}
13563   ins_pipe(ialu_reg_reg_shift);
13564 %}
13565 
13566 // This pattern is automatically generated from aarch64_ad.m4
13567 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13568 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13569 %{
13570   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13571   ins_cost(1.9 * INSN_COST);
13572   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13573 
13574    ins_encode %{
13575      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13576             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13577    %}
13578   ins_pipe(ialu_reg_reg_shift);
13579 %}
13580 
13581 // This pattern is automatically generated from aarch64_ad.m4
13582 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13583 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13584 %{
13585   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13586   ins_cost(1.9 * INSN_COST);
13587   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13588 
13589    ins_encode %{
13590      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13591             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13592    %}
13593   ins_pipe(ialu_reg_reg_shift);
13594 %}
13595 
13596 // This pattern is automatically generated from aarch64_ad.m4
13597 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13598 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13599 %{
13600   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13601   ins_cost(1.9 * INSN_COST);
13602   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13603 
13604    ins_encode %{
13605      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13606             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13607    %}
13608   ins_pipe(ialu_reg_reg_shift);
13609 %}
13610 
13611 // This pattern is automatically generated from aarch64_ad.m4
13612 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13613 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13614 %{
13615   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13616   ins_cost(1.9 * INSN_COST);
13617   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13618 
13619    ins_encode %{
13620      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13621             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13622    %}
13623   ins_pipe(ialu_reg_reg_shift);
13624 %}
13625 
13626 // This pattern is automatically generated from aarch64_ad.m4
13627 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13628 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13629 %{
13630   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13631   ins_cost(1.9 * INSN_COST);
13632   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13633 
13634    ins_encode %{
13635      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13636             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13637    %}
13638   ins_pipe(ialu_reg_reg_shift);
13639 %}
13640 
13641 // This pattern is automatically generated from aarch64_ad.m4
13642 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13643 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13644 %{
13645   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13646   ins_cost(1.9 * INSN_COST);
13647   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13648 
13649    ins_encode %{
13650      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13651             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13652    %}
13653   ins_pipe(ialu_reg_reg_shift);
13654 %}
13655 
13656 // This pattern is automatically generated from aarch64_ad.m4
13657 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13658 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13659 %{
13660   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13661   ins_cost(1.9 * INSN_COST);
13662   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13663 
13664    ins_encode %{
13665      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13666             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13667    %}
13668   ins_pipe(ialu_reg_reg_shift);
13669 %}
13670 
13671 // This pattern is automatically generated from aarch64_ad.m4
13672 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13673 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13674 %{
13675   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13676   ins_cost(1.9 * INSN_COST);
13677   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13678 
13679    ins_encode %{
13680      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13681             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13682    %}
13683   ins_pipe(ialu_reg_reg_shift);
13684 %}
13685 
13686 // This pattern is automatically generated from aarch64_ad.m4
13687 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13688 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13689 %{
13690   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13691   ins_cost(1.9 * INSN_COST);
13692   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13693 
13694    ins_encode %{
13695      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13696             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13697    %}
13698   ins_pipe(ialu_reg_reg_shift);
13699 %}
13700 
13701 // This pattern is automatically generated from aarch64_ad.m4
13702 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13703 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13704 %{
13705   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13706   ins_cost(1.9 * INSN_COST);
13707   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13708 
13709    ins_encode %{
13710      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13711             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13712    %}
13713   ins_pipe(ialu_reg_reg_shift);
13714 %}
13715 
13716 // This pattern is automatically generated from aarch64_ad.m4
13717 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13718 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13719 %{
13720   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13721   ins_cost(1.9 * INSN_COST);
13722   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13723 
13724    ins_encode %{
13725      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13726             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13727    %}
13728   ins_pipe(ialu_reg_reg_shift);
13729 %}
13730 
13731 // This pattern is automatically generated from aarch64_ad.m4
13732 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13733 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13734 %{
13735   effect(DEF dst, USE src1, USE src2, USE cr);
13736   ins_cost(INSN_COST * 2);
13737   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13738 
13739   ins_encode %{
13740     __ cselw($dst$$Register,
13741              $src1$$Register,
13742              $src2$$Register,
13743              Assembler::LT);
13744   %}
13745   ins_pipe(icond_reg_reg);
13746 %}
13747 
13748 // This pattern is automatically generated from aarch64_ad.m4
13749 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13750 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13751 %{
13752   effect(DEF dst, USE src1, USE src2, USE cr);
13753   ins_cost(INSN_COST * 2);
13754   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13755 
13756   ins_encode %{
13757     __ cselw($dst$$Register,
13758              $src1$$Register,
13759              $src2$$Register,
13760              Assembler::GT);
13761   %}
13762   ins_pipe(icond_reg_reg);
13763 %}
13764 
13765 // This pattern is automatically generated from aarch64_ad.m4
13766 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13767 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13768 %{
13769   effect(DEF dst, USE src1, USE cr);
13770   ins_cost(INSN_COST * 2);
13771   format %{ "cselw $dst, $src1, zr lt\t"  %}
13772 
13773   ins_encode %{
13774     __ cselw($dst$$Register,
13775              $src1$$Register,
13776              zr,
13777              Assembler::LT);
13778   %}
13779   ins_pipe(icond_reg);
13780 %}
13781 
13782 // This pattern is automatically generated from aarch64_ad.m4
13783 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13784 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13785 %{
13786   effect(DEF dst, USE src1, USE cr);
13787   ins_cost(INSN_COST * 2);
13788   format %{ "cselw $dst, $src1, zr gt\t"  %}
13789 
13790   ins_encode %{
13791     __ cselw($dst$$Register,
13792              $src1$$Register,
13793              zr,
13794              Assembler::GT);
13795   %}
13796   ins_pipe(icond_reg);
13797 %}
13798 
13799 // This pattern is automatically generated from aarch64_ad.m4
13800 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13801 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13802 %{
13803   effect(DEF dst, USE src1, USE cr);
13804   ins_cost(INSN_COST * 2);
13805   format %{ "csincw $dst, $src1, zr le\t"  %}
13806 
13807   ins_encode %{
13808     __ csincw($dst$$Register,
13809              $src1$$Register,
13810              zr,
13811              Assembler::LE);
13812   %}
13813   ins_pipe(icond_reg);
13814 %}
13815 
13816 // This pattern is automatically generated from aarch64_ad.m4
13817 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13818 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13819 %{
13820   effect(DEF dst, USE src1, USE cr);
13821   ins_cost(INSN_COST * 2);
13822   format %{ "csincw $dst, $src1, zr gt\t"  %}
13823 
13824   ins_encode %{
13825     __ csincw($dst$$Register,
13826              $src1$$Register,
13827              zr,
13828              Assembler::GT);
13829   %}
13830   ins_pipe(icond_reg);
13831 %}
13832 
13833 // This pattern is automatically generated from aarch64_ad.m4
13834 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13835 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13836 %{
13837   effect(DEF dst, USE src1, USE cr);
13838   ins_cost(INSN_COST * 2);
13839   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13840 
13841   ins_encode %{
13842     __ csinvw($dst$$Register,
13843              $src1$$Register,
13844              zr,
13845              Assembler::LT);
13846   %}
13847   ins_pipe(icond_reg);
13848 %}
13849 
13850 // This pattern is automatically generated from aarch64_ad.m4
13851 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13852 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13853 %{
13854   effect(DEF dst, USE src1, USE cr);
13855   ins_cost(INSN_COST * 2);
13856   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13857 
13858   ins_encode %{
13859     __ csinvw($dst$$Register,
13860              $src1$$Register,
13861              zr,
13862              Assembler::GE);
13863   %}
13864   ins_pipe(icond_reg);
13865 %}
13866 
13867 // This pattern is automatically generated from aarch64_ad.m4
13868 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13869 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13870 %{
13871   match(Set dst (MinI src imm));
13872   ins_cost(INSN_COST * 3);
13873   expand %{
13874     rFlagsReg cr;
13875     compI_reg_imm0(cr, src);
13876     cmovI_reg_imm0_lt(dst, src, cr);
13877   %}
13878 %}
13879 
13880 // This pattern is automatically generated from aarch64_ad.m4
13881 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13882 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13883 %{
13884   match(Set dst (MinI imm src));
13885   ins_cost(INSN_COST * 3);
13886   expand %{
13887     rFlagsReg cr;
13888     compI_reg_imm0(cr, src);
13889     cmovI_reg_imm0_lt(dst, src, cr);
13890   %}
13891 %}
13892 
13893 // This pattern is automatically generated from aarch64_ad.m4
13894 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13895 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13896 %{
13897   match(Set dst (MinI src imm));
13898   ins_cost(INSN_COST * 3);
13899   expand %{
13900     rFlagsReg cr;
13901     compI_reg_imm0(cr, src);
13902     cmovI_reg_imm1_le(dst, src, cr);
13903   %}
13904 %}
13905 
13906 // This pattern is automatically generated from aarch64_ad.m4
13907 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13908 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13909 %{
13910   match(Set dst (MinI imm src));
13911   ins_cost(INSN_COST * 3);
13912   expand %{
13913     rFlagsReg cr;
13914     compI_reg_imm0(cr, src);
13915     cmovI_reg_imm1_le(dst, src, cr);
13916   %}
13917 %}
13918 
13919 // This pattern is automatically generated from aarch64_ad.m4
13920 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13921 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13922 %{
13923   match(Set dst (MinI src imm));
13924   ins_cost(INSN_COST * 3);
13925   expand %{
13926     rFlagsReg cr;
13927     compI_reg_imm0(cr, src);
13928     cmovI_reg_immM1_lt(dst, src, cr);
13929   %}
13930 %}
13931 
13932 // This pattern is automatically generated from aarch64_ad.m4
13933 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13934 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13935 %{
13936   match(Set dst (MinI imm src));
13937   ins_cost(INSN_COST * 3);
13938   expand %{
13939     rFlagsReg cr;
13940     compI_reg_imm0(cr, src);
13941     cmovI_reg_immM1_lt(dst, src, cr);
13942   %}
13943 %}
13944 
13945 // This pattern is automatically generated from aarch64_ad.m4
13946 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13947 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13948 %{
13949   match(Set dst (MaxI src imm));
13950   ins_cost(INSN_COST * 3);
13951   expand %{
13952     rFlagsReg cr;
13953     compI_reg_imm0(cr, src);
13954     cmovI_reg_imm0_gt(dst, src, cr);
13955   %}
13956 %}
13957 
13958 // This pattern is automatically generated from aarch64_ad.m4
13959 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13960 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13961 %{
13962   match(Set dst (MaxI imm src));
13963   ins_cost(INSN_COST * 3);
13964   expand %{
13965     rFlagsReg cr;
13966     compI_reg_imm0(cr, src);
13967     cmovI_reg_imm0_gt(dst, src, cr);
13968   %}
13969 %}
13970 
13971 // This pattern is automatically generated from aarch64_ad.m4
13972 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13973 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13974 %{
13975   match(Set dst (MaxI src imm));
13976   ins_cost(INSN_COST * 3);
13977   expand %{
13978     rFlagsReg cr;
13979     compI_reg_imm0(cr, src);
13980     cmovI_reg_imm1_gt(dst, src, cr);
13981   %}
13982 %}
13983 
13984 // This pattern is automatically generated from aarch64_ad.m4
13985 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13986 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13987 %{
13988   match(Set dst (MaxI imm src));
13989   ins_cost(INSN_COST * 3);
13990   expand %{
13991     rFlagsReg cr;
13992     compI_reg_imm0(cr, src);
13993     cmovI_reg_imm1_gt(dst, src, cr);
13994   %}
13995 %}
13996 
13997 // This pattern is automatically generated from aarch64_ad.m4
13998 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13999 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
14000 %{
14001   match(Set dst (MaxI src imm));
14002   ins_cost(INSN_COST * 3);
14003   expand %{
14004     rFlagsReg cr;
14005     compI_reg_imm0(cr, src);
14006     cmovI_reg_immM1_ge(dst, src, cr);
14007   %}
14008 %}
14009 
14010 // This pattern is automatically generated from aarch64_ad.m4
14011 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14012 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
14013 %{
14014   match(Set dst (MaxI imm src));
14015   ins_cost(INSN_COST * 3);
14016   expand %{
14017     rFlagsReg cr;
14018     compI_reg_imm0(cr, src);
14019     cmovI_reg_immM1_ge(dst, src, cr);
14020   %}
14021 %}
14022 
14023 // This pattern is automatically generated from aarch64_ad.m4
14024 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14025 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
14026 %{
14027   match(Set dst (ReverseI src));
14028   ins_cost(INSN_COST);
14029   format %{ "rbitw  $dst, $src" %}
14030   ins_encode %{
14031     __ rbitw($dst$$Register, $src$$Register);
14032   %}
14033   ins_pipe(ialu_reg);
14034 %}
14035 
14036 // This pattern is automatically generated from aarch64_ad.m4
14037 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14038 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
14039 %{
14040   match(Set dst (ReverseL src));
14041   ins_cost(INSN_COST);
14042   format %{ "rbit  $dst, $src" %}
14043   ins_encode %{
14044     __ rbit($dst$$Register, $src$$Register);
14045   %}
14046   ins_pipe(ialu_reg);
14047 %}
14048 
14049 
14050 // END This section of the file is automatically generated. Do not edit --------------
14051 
14052 
14053 // ============================================================================
14054 // Floating Point Arithmetic Instructions
14055 
14056 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14057   match(Set dst (AddF src1 src2));
14058 
14059   ins_cost(INSN_COST * 5);
14060   format %{ "fadds   $dst, $src1, $src2" %}
14061 
14062   ins_encode %{
14063     __ fadds(as_FloatRegister($dst$$reg),
14064              as_FloatRegister($src1$$reg),
14065              as_FloatRegister($src2$$reg));
14066   %}
14067 
14068   ins_pipe(fp_dop_reg_reg_s);
14069 %}
14070 
14071 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14072   match(Set dst (AddD src1 src2));
14073 
14074   ins_cost(INSN_COST * 5);
14075   format %{ "faddd   $dst, $src1, $src2" %}
14076 
14077   ins_encode %{
14078     __ faddd(as_FloatRegister($dst$$reg),
14079              as_FloatRegister($src1$$reg),
14080              as_FloatRegister($src2$$reg));
14081   %}
14082 
14083   ins_pipe(fp_dop_reg_reg_d);
14084 %}
14085 
14086 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14087   match(Set dst (SubF src1 src2));
14088 
14089   ins_cost(INSN_COST * 5);
14090   format %{ "fsubs   $dst, $src1, $src2" %}
14091 
14092   ins_encode %{
14093     __ fsubs(as_FloatRegister($dst$$reg),
14094              as_FloatRegister($src1$$reg),
14095              as_FloatRegister($src2$$reg));
14096   %}
14097 
14098   ins_pipe(fp_dop_reg_reg_s);
14099 %}
14100 
14101 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14102   match(Set dst (SubD src1 src2));
14103 
14104   ins_cost(INSN_COST * 5);
14105   format %{ "fsubd   $dst, $src1, $src2" %}
14106 
14107   ins_encode %{
14108     __ fsubd(as_FloatRegister($dst$$reg),
14109              as_FloatRegister($src1$$reg),
14110              as_FloatRegister($src2$$reg));
14111   %}
14112 
14113   ins_pipe(fp_dop_reg_reg_d);
14114 %}
14115 
14116 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14117   match(Set dst (MulF src1 src2));
14118 
14119   ins_cost(INSN_COST * 6);
14120   format %{ "fmuls   $dst, $src1, $src2" %}
14121 
14122   ins_encode %{
14123     __ fmuls(as_FloatRegister($dst$$reg),
14124              as_FloatRegister($src1$$reg),
14125              as_FloatRegister($src2$$reg));
14126   %}
14127 
14128   ins_pipe(fp_dop_reg_reg_s);
14129 %}
14130 
14131 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14132   match(Set dst (MulD src1 src2));
14133 
14134   ins_cost(INSN_COST * 6);
14135   format %{ "fmuld   $dst, $src1, $src2" %}
14136 
14137   ins_encode %{
14138     __ fmuld(as_FloatRegister($dst$$reg),
14139              as_FloatRegister($src1$$reg),
14140              as_FloatRegister($src2$$reg));
14141   %}
14142 
14143   ins_pipe(fp_dop_reg_reg_d);
14144 %}
14145 
14146 // src1 * src2 + src3
14147 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14148   predicate(UseFMA);
14149   match(Set dst (FmaF src3 (Binary src1 src2)));
14150 
14151   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
14152 
14153   ins_encode %{
14154     __ fmadds(as_FloatRegister($dst$$reg),
14155              as_FloatRegister($src1$$reg),
14156              as_FloatRegister($src2$$reg),
14157              as_FloatRegister($src3$$reg));
14158   %}
14159 
14160   ins_pipe(pipe_class_default);
14161 %}
14162 
14163 // src1 * src2 + src3
14164 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14165   predicate(UseFMA);
14166   match(Set dst (FmaD src3 (Binary src1 src2)));
14167 
14168   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
14169 
14170   ins_encode %{
14171     __ fmaddd(as_FloatRegister($dst$$reg),
14172              as_FloatRegister($src1$$reg),
14173              as_FloatRegister($src2$$reg),
14174              as_FloatRegister($src3$$reg));
14175   %}
14176 
14177   ins_pipe(pipe_class_default);
14178 %}
14179 
14180 // -src1 * src2 + src3
14181 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14182   predicate(UseFMA);
14183   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
14184   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
14185 
14186   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
14187 
14188   ins_encode %{
14189     __ fmsubs(as_FloatRegister($dst$$reg),
14190               as_FloatRegister($src1$$reg),
14191               as_FloatRegister($src2$$reg),
14192               as_FloatRegister($src3$$reg));
14193   %}
14194 
14195   ins_pipe(pipe_class_default);
14196 %}
14197 
14198 // -src1 * src2 + src3
14199 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14200   predicate(UseFMA);
14201   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
14202   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
14203 
14204   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
14205 
14206   ins_encode %{
14207     __ fmsubd(as_FloatRegister($dst$$reg),
14208               as_FloatRegister($src1$$reg),
14209               as_FloatRegister($src2$$reg),
14210               as_FloatRegister($src3$$reg));
14211   %}
14212 
14213   ins_pipe(pipe_class_default);
14214 %}
14215 
14216 // -src1 * src2 - src3
14217 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14218   predicate(UseFMA);
14219   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
14220   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
14221 
14222   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
14223 
14224   ins_encode %{
14225     __ fnmadds(as_FloatRegister($dst$$reg),
14226                as_FloatRegister($src1$$reg),
14227                as_FloatRegister($src2$$reg),
14228                as_FloatRegister($src3$$reg));
14229   %}
14230 
14231   ins_pipe(pipe_class_default);
14232 %}
14233 
14234 // -src1 * src2 - src3
14235 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14236   predicate(UseFMA);
14237   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
14238   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
14239 
14240   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
14241 
14242   ins_encode %{
14243     __ fnmaddd(as_FloatRegister($dst$$reg),
14244                as_FloatRegister($src1$$reg),
14245                as_FloatRegister($src2$$reg),
14246                as_FloatRegister($src3$$reg));
14247   %}
14248 
14249   ins_pipe(pipe_class_default);
14250 %}
14251 
14252 // src1 * src2 - src3
14253 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
14254   predicate(UseFMA);
14255   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
14256 
14257   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
14258 
14259   ins_encode %{
14260     __ fnmsubs(as_FloatRegister($dst$$reg),
14261                as_FloatRegister($src1$$reg),
14262                as_FloatRegister($src2$$reg),
14263                as_FloatRegister($src3$$reg));
14264   %}
14265 
14266   ins_pipe(pipe_class_default);
14267 %}
14268 
14269 // src1 * src2 - src3
14270 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
14271   predicate(UseFMA);
14272   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
14273 
14274   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
14275 
14276   ins_encode %{
14277   // n.b. insn name should be fnmsubd
14278     __ fnmsub(as_FloatRegister($dst$$reg),
14279               as_FloatRegister($src1$$reg),
14280               as_FloatRegister($src2$$reg),
14281               as_FloatRegister($src3$$reg));
14282   %}
14283 
14284   ins_pipe(pipe_class_default);
14285 %}
14286 
14287 
14288 // Math.max(FF)F
14289 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14290   match(Set dst (MaxF src1 src2));
14291 
14292   format %{ "fmaxs   $dst, $src1, $src2" %}
14293   ins_encode %{
14294     __ fmaxs(as_FloatRegister($dst$$reg),
14295              as_FloatRegister($src1$$reg),
14296              as_FloatRegister($src2$$reg));
14297   %}
14298 
14299   ins_pipe(fp_dop_reg_reg_s);
14300 %}
14301 
14302 // Math.min(FF)F
14303 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14304   match(Set dst (MinF src1 src2));
14305 
14306   format %{ "fmins   $dst, $src1, $src2" %}
14307   ins_encode %{
14308     __ fmins(as_FloatRegister($dst$$reg),
14309              as_FloatRegister($src1$$reg),
14310              as_FloatRegister($src2$$reg));
14311   %}
14312 
14313   ins_pipe(fp_dop_reg_reg_s);
14314 %}
14315 
14316 // Math.max(DD)D
14317 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14318   match(Set dst (MaxD src1 src2));
14319 
14320   format %{ "fmaxd   $dst, $src1, $src2" %}
14321   ins_encode %{
14322     __ fmaxd(as_FloatRegister($dst$$reg),
14323              as_FloatRegister($src1$$reg),
14324              as_FloatRegister($src2$$reg));
14325   %}
14326 
14327   ins_pipe(fp_dop_reg_reg_d);
14328 %}
14329 
14330 // Math.min(DD)D
14331 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14332   match(Set dst (MinD src1 src2));
14333 
14334   format %{ "fmind   $dst, $src1, $src2" %}
14335   ins_encode %{
14336     __ fmind(as_FloatRegister($dst$$reg),
14337              as_FloatRegister($src1$$reg),
14338              as_FloatRegister($src2$$reg));
14339   %}
14340 
14341   ins_pipe(fp_dop_reg_reg_d);
14342 %}
14343 
14344 
14345 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14346   match(Set dst (DivF src1  src2));
14347 
14348   ins_cost(INSN_COST * 18);
14349   format %{ "fdivs   $dst, $src1, $src2" %}
14350 
14351   ins_encode %{
14352     __ fdivs(as_FloatRegister($dst$$reg),
14353              as_FloatRegister($src1$$reg),
14354              as_FloatRegister($src2$$reg));
14355   %}
14356 
14357   ins_pipe(fp_div_s);
14358 %}
14359 
14360 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14361   match(Set dst (DivD src1  src2));
14362 
14363   ins_cost(INSN_COST * 32);
14364   format %{ "fdivd   $dst, $src1, $src2" %}
14365 
14366   ins_encode %{
14367     __ fdivd(as_FloatRegister($dst$$reg),
14368              as_FloatRegister($src1$$reg),
14369              as_FloatRegister($src2$$reg));
14370   %}
14371 
14372   ins_pipe(fp_div_d);
14373 %}
14374 
14375 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14376   match(Set dst (NegF src));
14377 
14378   ins_cost(INSN_COST * 3);
14379   format %{ "fneg   $dst, $src" %}
14380 
14381   ins_encode %{
14382     __ fnegs(as_FloatRegister($dst$$reg),
14383              as_FloatRegister($src$$reg));
14384   %}
14385 
14386   ins_pipe(fp_uop_s);
14387 %}
14388 
14389 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14390   match(Set dst (NegD src));
14391 
14392   ins_cost(INSN_COST * 3);
14393   format %{ "fnegd   $dst, $src" %}
14394 
14395   ins_encode %{
14396     __ fnegd(as_FloatRegister($dst$$reg),
14397              as_FloatRegister($src$$reg));
14398   %}
14399 
14400   ins_pipe(fp_uop_d);
14401 %}
14402 
14403 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14404 %{
14405   match(Set dst (AbsI src));
14406 
14407   effect(KILL cr);
14408   ins_cost(INSN_COST * 2);
14409   format %{ "cmpw  $src, zr\n\t"
14410             "cnegw $dst, $src, Assembler::LT\t# int abs"
14411   %}
14412 
14413   ins_encode %{
14414     __ cmpw(as_Register($src$$reg), zr);
14415     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14416   %}
14417   ins_pipe(pipe_class_default);
14418 %}
14419 
14420 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
14421 %{
14422   match(Set dst (AbsL src));
14423 
14424   effect(KILL cr);
14425   ins_cost(INSN_COST * 2);
14426   format %{ "cmp  $src, zr\n\t"
14427             "cneg $dst, $src, Assembler::LT\t# long abs"
14428   %}
14429 
14430   ins_encode %{
14431     __ cmp(as_Register($src$$reg), zr);
14432     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14433   %}
14434   ins_pipe(pipe_class_default);
14435 %}
14436 
14437 instruct absF_reg(vRegF dst, vRegF src) %{
14438   match(Set dst (AbsF src));
14439 
14440   ins_cost(INSN_COST * 3);
14441   format %{ "fabss   $dst, $src" %}
14442   ins_encode %{
14443     __ fabss(as_FloatRegister($dst$$reg),
14444              as_FloatRegister($src$$reg));
14445   %}
14446 
14447   ins_pipe(fp_uop_s);
14448 %}
14449 
14450 instruct absD_reg(vRegD dst, vRegD src) %{
14451   match(Set dst (AbsD src));
14452 
14453   ins_cost(INSN_COST * 3);
14454   format %{ "fabsd   $dst, $src" %}
14455   ins_encode %{
14456     __ fabsd(as_FloatRegister($dst$$reg),
14457              as_FloatRegister($src$$reg));
14458   %}
14459 
14460   ins_pipe(fp_uop_d);
14461 %}
14462 
14463 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14464   match(Set dst (AbsF (SubF src1 src2)));
14465 
14466   ins_cost(INSN_COST * 3);
14467   format %{ "fabds   $dst, $src1, $src2" %}
14468   ins_encode %{
14469     __ fabds(as_FloatRegister($dst$$reg),
14470              as_FloatRegister($src1$$reg),
14471              as_FloatRegister($src2$$reg));
14472   %}
14473 
14474   ins_pipe(fp_uop_s);
14475 %}
14476 
14477 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14478   match(Set dst (AbsD (SubD src1 src2)));
14479 
14480   ins_cost(INSN_COST * 3);
14481   format %{ "fabdd   $dst, $src1, $src2" %}
14482   ins_encode %{
14483     __ fabdd(as_FloatRegister($dst$$reg),
14484              as_FloatRegister($src1$$reg),
14485              as_FloatRegister($src2$$reg));
14486   %}
14487 
14488   ins_pipe(fp_uop_d);
14489 %}
14490 
14491 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14492   match(Set dst (SqrtD src));
14493 
14494   ins_cost(INSN_COST * 50);
14495   format %{ "fsqrtd  $dst, $src" %}
14496   ins_encode %{
14497     __ fsqrtd(as_FloatRegister($dst$$reg),
14498              as_FloatRegister($src$$reg));
14499   %}
14500 
14501   ins_pipe(fp_div_s);
14502 %}
14503 
14504 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14505   match(Set dst (SqrtF src));
14506 
14507   ins_cost(INSN_COST * 50);
14508   format %{ "fsqrts  $dst, $src" %}
14509   ins_encode %{
14510     __ fsqrts(as_FloatRegister($dst$$reg),
14511              as_FloatRegister($src$$reg));
14512   %}
14513 
14514   ins_pipe(fp_div_d);
14515 %}
14516 
14517 // Math.rint, floor, ceil
14518 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14519   match(Set dst (RoundDoubleMode src rmode));
14520   format %{ "frint  $dst, $src, $rmode" %}
14521   ins_encode %{
14522     switch ($rmode$$constant) {
14523       case RoundDoubleModeNode::rmode_rint:
14524         __ frintnd(as_FloatRegister($dst$$reg),
14525                    as_FloatRegister($src$$reg));
14526         break;
14527       case RoundDoubleModeNode::rmode_floor:
14528         __ frintmd(as_FloatRegister($dst$$reg),
14529                    as_FloatRegister($src$$reg));
14530         break;
14531       case RoundDoubleModeNode::rmode_ceil:
14532         __ frintpd(as_FloatRegister($dst$$reg),
14533                    as_FloatRegister($src$$reg));
14534         break;
14535     }
14536   %}
14537   ins_pipe(fp_uop_d);
14538 %}
14539 
14540 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14541   match(Set dst (CopySignD src1 (Binary src2 zero)));
14542   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14543   format %{ "CopySignD  $dst $src1 $src2" %}
14544   ins_encode %{
14545     FloatRegister dst = as_FloatRegister($dst$$reg),
14546                   src1 = as_FloatRegister($src1$$reg),
14547                   src2 = as_FloatRegister($src2$$reg),
14548                   zero = as_FloatRegister($zero$$reg);
14549     __ fnegd(dst, zero);
14550     __ bsl(dst, __ T8B, src2, src1);
14551   %}
14552   ins_pipe(fp_uop_d);
14553 %}
14554 
14555 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14556   match(Set dst (CopySignF src1 src2));
14557   effect(TEMP_DEF dst, USE src1, USE src2);
14558   format %{ "CopySignF  $dst $src1 $src2" %}
14559   ins_encode %{
14560     FloatRegister dst = as_FloatRegister($dst$$reg),
14561                   src1 = as_FloatRegister($src1$$reg),
14562                   src2 = as_FloatRegister($src2$$reg);
14563     __ movi(dst, __ T2S, 0x80, 24);
14564     __ bsl(dst, __ T8B, src2, src1);
14565   %}
14566   ins_pipe(fp_uop_d);
14567 %}
14568 
14569 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14570   match(Set dst (SignumD src (Binary zero one)));
14571   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14572   format %{ "signumD  $dst, $src" %}
14573   ins_encode %{
14574     FloatRegister src = as_FloatRegister($src$$reg),
14575                   dst = as_FloatRegister($dst$$reg),
14576                   zero = as_FloatRegister($zero$$reg),
14577                   one = as_FloatRegister($one$$reg);
14578     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14579     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14580     // Bit selection instruction gets bit from "one" for each enabled bit in
14581     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14582     // NaN the whole "src" will be copied because "dst" is zero. For all other
14583     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14584     // from "src", and all other bits are copied from 1.0.
14585     __ bsl(dst, __ T8B, one, src);
14586   %}
14587   ins_pipe(fp_uop_d);
14588 %}
14589 
14590 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14591   match(Set dst (SignumF src (Binary zero one)));
14592   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14593   format %{ "signumF  $dst, $src" %}
14594   ins_encode %{
14595     FloatRegister src = as_FloatRegister($src$$reg),
14596                   dst = as_FloatRegister($dst$$reg),
14597                   zero = as_FloatRegister($zero$$reg),
14598                   one = as_FloatRegister($one$$reg);
14599     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14600     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14601     // Bit selection instruction gets bit from "one" for each enabled bit in
14602     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14603     // NaN the whole "src" will be copied because "dst" is zero. For all other
14604     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14605     // from "src", and all other bits are copied from 1.0.
14606     __ bsl(dst, __ T8B, one, src);
14607   %}
14608   ins_pipe(fp_uop_d);
14609 %}
14610 
14611 instruct onspinwait() %{
14612   match(OnSpinWait);
14613   ins_cost(INSN_COST);
14614 
14615   format %{ "onspinwait" %}
14616 
14617   ins_encode %{
14618     __ spin_wait();
14619   %}
14620   ins_pipe(pipe_class_empty);
14621 %}
14622 
14623 // ============================================================================
14624 // Logical Instructions
14625 
14626 // Integer Logical Instructions
14627 
14628 // And Instructions
14629 
14630 
14631 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14632   match(Set dst (AndI src1 src2));
14633 
14634   format %{ "andw  $dst, $src1, $src2\t# int" %}
14635 
14636   ins_cost(INSN_COST);
14637   ins_encode %{
14638     __ andw(as_Register($dst$$reg),
14639             as_Register($src1$$reg),
14640             as_Register($src2$$reg));
14641   %}
14642 
14643   ins_pipe(ialu_reg_reg);
14644 %}
14645 
14646 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14647   match(Set dst (AndI src1 src2));
14648 
14649   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14650 
14651   ins_cost(INSN_COST);
14652   ins_encode %{
14653     __ andw(as_Register($dst$$reg),
14654             as_Register($src1$$reg),
14655             (uint64_t)($src2$$constant));
14656   %}
14657 
14658   ins_pipe(ialu_reg_imm);
14659 %}
14660 
14661 // Or Instructions
14662 
14663 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14664   match(Set dst (OrI src1 src2));
14665 
14666   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14667 
14668   ins_cost(INSN_COST);
14669   ins_encode %{
14670     __ orrw(as_Register($dst$$reg),
14671             as_Register($src1$$reg),
14672             as_Register($src2$$reg));
14673   %}
14674 
14675   ins_pipe(ialu_reg_reg);
14676 %}
14677 
14678 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14679   match(Set dst (OrI src1 src2));
14680 
14681   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14682 
14683   ins_cost(INSN_COST);
14684   ins_encode %{
14685     __ orrw(as_Register($dst$$reg),
14686             as_Register($src1$$reg),
14687             (uint64_t)($src2$$constant));
14688   %}
14689 
14690   ins_pipe(ialu_reg_imm);
14691 %}
14692 
14693 // Xor Instructions
14694 
14695 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14696   match(Set dst (XorI src1 src2));
14697 
14698   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14699 
14700   ins_cost(INSN_COST);
14701   ins_encode %{
14702     __ eorw(as_Register($dst$$reg),
14703             as_Register($src1$$reg),
14704             as_Register($src2$$reg));
14705   %}
14706 
14707   ins_pipe(ialu_reg_reg);
14708 %}
14709 
14710 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14711   match(Set dst (XorI src1 src2));
14712 
14713   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14714 
14715   ins_cost(INSN_COST);
14716   ins_encode %{
14717     __ eorw(as_Register($dst$$reg),
14718             as_Register($src1$$reg),
14719             (uint64_t)($src2$$constant));
14720   %}
14721 
14722   ins_pipe(ialu_reg_imm);
14723 %}
14724 
14725 // Long Logical Instructions
14726 // TODO
14727 
14728 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14729   match(Set dst (AndL src1 src2));
14730 
14731   format %{ "and  $dst, $src1, $src2\t# int" %}
14732 
14733   ins_cost(INSN_COST);
14734   ins_encode %{
14735     __ andr(as_Register($dst$$reg),
14736             as_Register($src1$$reg),
14737             as_Register($src2$$reg));
14738   %}
14739 
14740   ins_pipe(ialu_reg_reg);
14741 %}
14742 
14743 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14744   match(Set dst (AndL src1 src2));
14745 
14746   format %{ "and  $dst, $src1, $src2\t# int" %}
14747 
14748   ins_cost(INSN_COST);
14749   ins_encode %{
14750     __ andr(as_Register($dst$$reg),
14751             as_Register($src1$$reg),
14752             (uint64_t)($src2$$constant));
14753   %}
14754 
14755   ins_pipe(ialu_reg_imm);
14756 %}
14757 
14758 // Or Instructions
14759 
14760 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14761   match(Set dst (OrL src1 src2));
14762 
14763   format %{ "orr  $dst, $src1, $src2\t# int" %}
14764 
14765   ins_cost(INSN_COST);
14766   ins_encode %{
14767     __ orr(as_Register($dst$$reg),
14768            as_Register($src1$$reg),
14769            as_Register($src2$$reg));
14770   %}
14771 
14772   ins_pipe(ialu_reg_reg);
14773 %}
14774 
14775 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14776   match(Set dst (OrL src1 src2));
14777 
14778   format %{ "orr  $dst, $src1, $src2\t# int" %}
14779 
14780   ins_cost(INSN_COST);
14781   ins_encode %{
14782     __ orr(as_Register($dst$$reg),
14783            as_Register($src1$$reg),
14784            (uint64_t)($src2$$constant));
14785   %}
14786 
14787   ins_pipe(ialu_reg_imm);
14788 %}
14789 
14790 // Xor Instructions
14791 
14792 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14793   match(Set dst (XorL src1 src2));
14794 
14795   format %{ "eor  $dst, $src1, $src2\t# int" %}
14796 
14797   ins_cost(INSN_COST);
14798   ins_encode %{
14799     __ eor(as_Register($dst$$reg),
14800            as_Register($src1$$reg),
14801            as_Register($src2$$reg));
14802   %}
14803 
14804   ins_pipe(ialu_reg_reg);
14805 %}
14806 
14807 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14808   match(Set dst (XorL src1 src2));
14809 
14810   ins_cost(INSN_COST);
14811   format %{ "eor  $dst, $src1, $src2\t# int" %}
14812 
14813   ins_encode %{
14814     __ eor(as_Register($dst$$reg),
14815            as_Register($src1$$reg),
14816            (uint64_t)($src2$$constant));
14817   %}
14818 
14819   ins_pipe(ialu_reg_imm);
14820 %}
14821 
14822 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14823 %{
14824   match(Set dst (ConvI2L src));
14825 
14826   ins_cost(INSN_COST);
14827   format %{ "sxtw  $dst, $src\t# i2l" %}
14828   ins_encode %{
14829     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14830   %}
14831   ins_pipe(ialu_reg_shift);
14832 %}
14833 
14834 // this pattern occurs in bigmath arithmetic
14835 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14836 %{
14837   match(Set dst (AndL (ConvI2L src) mask));
14838 
14839   ins_cost(INSN_COST);
14840   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14841   ins_encode %{
14842     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14843   %}
14844 
14845   ins_pipe(ialu_reg_shift);
14846 %}
14847 
14848 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14849   match(Set dst (ConvL2I src));
14850 
14851   ins_cost(INSN_COST);
14852   format %{ "movw  $dst, $src \t// l2i" %}
14853 
14854   ins_encode %{
14855     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14856   %}
14857 
14858   ins_pipe(ialu_reg);
14859 %}
14860 
14861 instruct convD2F_reg(vRegF dst, vRegD src) %{
14862   match(Set dst (ConvD2F src));
14863 
14864   ins_cost(INSN_COST * 5);
14865   format %{ "fcvtd  $dst, $src \t// d2f" %}
14866 
14867   ins_encode %{
14868     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14869   %}
14870 
14871   ins_pipe(fp_d2f);
14872 %}
14873 
14874 instruct convF2D_reg(vRegD dst, vRegF src) %{
14875   match(Set dst (ConvF2D src));
14876 
14877   ins_cost(INSN_COST * 5);
14878   format %{ "fcvts  $dst, $src \t// f2d" %}
14879 
14880   ins_encode %{
14881     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14882   %}
14883 
14884   ins_pipe(fp_f2d);
14885 %}
14886 
14887 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14888   match(Set dst (ConvF2I src));
14889 
14890   ins_cost(INSN_COST * 5);
14891   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14892 
14893   ins_encode %{
14894     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14895   %}
14896 
14897   ins_pipe(fp_f2i);
14898 %}
14899 
14900 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14901   match(Set dst (ConvF2L src));
14902 
14903   ins_cost(INSN_COST * 5);
14904   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14905 
14906   ins_encode %{
14907     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14908   %}
14909 
14910   ins_pipe(fp_f2l);
14911 %}
14912 
14913 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
14914   match(Set dst (ConvF2HF src));
14915   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
14916             "smov $dst, $tmp\t# move result from $tmp to $dst"
14917   %}
14918   effect(TEMP tmp);
14919   ins_encode %{
14920       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
14921   %}
14922   ins_pipe(pipe_slow);
14923 %}
14924 
14925 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
14926   match(Set dst (ConvHF2F src));
14927   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
14928             "fcvt $dst, $tmp\t# convert half to single precision"
14929   %}
14930   effect(TEMP tmp);
14931   ins_encode %{
14932       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
14933   %}
14934   ins_pipe(pipe_slow);
14935 %}
14936 
14937 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14938   match(Set dst (ConvI2F src));
14939 
14940   ins_cost(INSN_COST * 5);
14941   format %{ "scvtfws  $dst, $src \t// i2f" %}
14942 
14943   ins_encode %{
14944     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14945   %}
14946 
14947   ins_pipe(fp_i2f);
14948 %}
14949 
14950 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14951   match(Set dst (ConvL2F src));
14952 
14953   ins_cost(INSN_COST * 5);
14954   format %{ "scvtfs  $dst, $src \t// l2f" %}
14955 
14956   ins_encode %{
14957     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14958   %}
14959 
14960   ins_pipe(fp_l2f);
14961 %}
14962 
14963 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14964   match(Set dst (ConvD2I src));
14965 
14966   ins_cost(INSN_COST * 5);
14967   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14968 
14969   ins_encode %{
14970     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14971   %}
14972 
14973   ins_pipe(fp_d2i);
14974 %}
14975 
14976 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14977   match(Set dst (ConvD2L src));
14978 
14979   ins_cost(INSN_COST * 5);
14980   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14981 
14982   ins_encode %{
14983     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14984   %}
14985 
14986   ins_pipe(fp_d2l);
14987 %}
14988 
14989 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14990   match(Set dst (ConvI2D src));
14991 
14992   ins_cost(INSN_COST * 5);
14993   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14994 
14995   ins_encode %{
14996     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14997   %}
14998 
14999   ins_pipe(fp_i2d);
15000 %}
15001 
15002 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
15003   match(Set dst (ConvL2D src));
15004 
15005   ins_cost(INSN_COST * 5);
15006   format %{ "scvtfd  $dst, $src \t// l2d" %}
15007 
15008   ins_encode %{
15009     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
15010   %}
15011 
15012   ins_pipe(fp_l2d);
15013 %}
15014 
15015 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
15016 %{
15017   match(Set dst (RoundD src));
15018   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
15019   format %{ "java_round_double $dst,$src"%}
15020   ins_encode %{
15021     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
15022                          as_FloatRegister($ftmp$$reg));
15023   %}
15024   ins_pipe(pipe_slow);
15025 %}
15026 
15027 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
15028 %{
15029   match(Set dst (RoundF src));
15030   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
15031   format %{ "java_round_float $dst,$src"%}
15032   ins_encode %{
15033     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
15034                         as_FloatRegister($ftmp$$reg));
15035   %}
15036   ins_pipe(pipe_slow);
15037 %}
15038 
15039 // stack <-> reg and reg <-> reg shuffles with no conversion
15040 
15041 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
15042 
15043   match(Set dst (MoveF2I src));
15044 
15045   effect(DEF dst, USE src);
15046 
15047   ins_cost(4 * INSN_COST);
15048 
15049   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
15050 
15051   ins_encode %{
15052     __ ldrw($dst$$Register, Address(sp, $src$$disp));
15053   %}
15054 
15055   ins_pipe(iload_reg_reg);
15056 
15057 %}
15058 
15059 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
15060 
15061   match(Set dst (MoveI2F src));
15062 
15063   effect(DEF dst, USE src);
15064 
15065   ins_cost(4 * INSN_COST);
15066 
15067   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
15068 
15069   ins_encode %{
15070     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
15071   %}
15072 
15073   ins_pipe(pipe_class_memory);
15074 
15075 %}
15076 
15077 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
15078 
15079   match(Set dst (MoveD2L src));
15080 
15081   effect(DEF dst, USE src);
15082 
15083   ins_cost(4 * INSN_COST);
15084 
15085   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
15086 
15087   ins_encode %{
15088     __ ldr($dst$$Register, Address(sp, $src$$disp));
15089   %}
15090 
15091   ins_pipe(iload_reg_reg);
15092 
15093 %}
15094 
15095 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
15096 
15097   match(Set dst (MoveL2D src));
15098 
15099   effect(DEF dst, USE src);
15100 
15101   ins_cost(4 * INSN_COST);
15102 
15103   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
15104 
15105   ins_encode %{
15106     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
15107   %}
15108 
15109   ins_pipe(pipe_class_memory);
15110 
15111 %}
15112 
15113 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
15114 
15115   match(Set dst (MoveF2I src));
15116 
15117   effect(DEF dst, USE src);
15118 
15119   ins_cost(INSN_COST);
15120 
15121   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
15122 
15123   ins_encode %{
15124     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15125   %}
15126 
15127   ins_pipe(pipe_class_memory);
15128 
15129 %}
15130 
15131 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
15132 
15133   match(Set dst (MoveI2F src));
15134 
15135   effect(DEF dst, USE src);
15136 
15137   ins_cost(INSN_COST);
15138 
15139   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
15140 
15141   ins_encode %{
15142     __ strw($src$$Register, Address(sp, $dst$$disp));
15143   %}
15144 
15145   ins_pipe(istore_reg_reg);
15146 
15147 %}
15148 
15149 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
15150 
15151   match(Set dst (MoveD2L src));
15152 
15153   effect(DEF dst, USE src);
15154 
15155   ins_cost(INSN_COST);
15156 
15157   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
15158 
15159   ins_encode %{
15160     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15161   %}
15162 
15163   ins_pipe(pipe_class_memory);
15164 
15165 %}
15166 
15167 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
15168 
15169   match(Set dst (MoveL2D src));
15170 
15171   effect(DEF dst, USE src);
15172 
15173   ins_cost(INSN_COST);
15174 
15175   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
15176 
15177   ins_encode %{
15178     __ str($src$$Register, Address(sp, $dst$$disp));
15179   %}
15180 
15181   ins_pipe(istore_reg_reg);
15182 
15183 %}
15184 
15185 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
15186 
15187   match(Set dst (MoveF2I src));
15188 
15189   effect(DEF dst, USE src);
15190 
15191   ins_cost(INSN_COST);
15192 
15193   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
15194 
15195   ins_encode %{
15196     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
15197   %}
15198 
15199   ins_pipe(fp_f2i);
15200 
15201 %}
15202 
15203 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
15204 
15205   match(Set dst (MoveI2F src));
15206 
15207   effect(DEF dst, USE src);
15208 
15209   ins_cost(INSN_COST);
15210 
15211   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
15212 
15213   ins_encode %{
15214     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
15215   %}
15216 
15217   ins_pipe(fp_i2f);
15218 
15219 %}
15220 
15221 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
15222 
15223   match(Set dst (MoveD2L src));
15224 
15225   effect(DEF dst, USE src);
15226 
15227   ins_cost(INSN_COST);
15228 
15229   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
15230 
15231   ins_encode %{
15232     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
15233   %}
15234 
15235   ins_pipe(fp_d2l);
15236 
15237 %}
15238 
15239 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
15240 
15241   match(Set dst (MoveL2D src));
15242 
15243   effect(DEF dst, USE src);
15244 
15245   ins_cost(INSN_COST);
15246 
15247   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
15248 
15249   ins_encode %{
15250     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
15251   %}
15252 
15253   ins_pipe(fp_l2d);
15254 
15255 %}
15256 
15257 // ============================================================================
15258 // clearing of an array
15259 
15260 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
15261 %{
15262   match(Set dummy (ClearArray cnt base));
15263   effect(USE_KILL cnt, USE_KILL base, KILL cr);
15264 
15265   ins_cost(4 * INSN_COST);
15266   format %{ "ClearArray $cnt, $base" %}
15267 
15268   ins_encode %{
15269     address tpc = __ zero_words($base$$Register, $cnt$$Register);
15270     if (tpc == NULL) {
15271       ciEnv::current()->record_failure("CodeCache is full");
15272       return;
15273     }
15274   %}
15275 
15276   ins_pipe(pipe_class_memory);
15277 %}
15278 
15279 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15280 %{
15281   predicate((uint64_t)n->in(2)->get_long()
15282             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
15283   match(Set dummy (ClearArray cnt base));
15284   effect(TEMP temp, USE_KILL base, KILL cr);
15285 
15286   ins_cost(4 * INSN_COST);
15287   format %{ "ClearArray $cnt, $base" %}
15288 
15289   ins_encode %{
15290     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15291     if (tpc == NULL) {
15292       ciEnv::current()->record_failure("CodeCache is full");
15293       return;
15294     }
15295   %}
15296 
15297   ins_pipe(pipe_class_memory);
15298 %}
15299 
15300 // ============================================================================
15301 // Overflow Math Instructions
15302 
15303 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15304 %{
15305   match(Set cr (OverflowAddI op1 op2));
15306 
15307   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15308   ins_cost(INSN_COST);
15309   ins_encode %{
15310     __ cmnw($op1$$Register, $op2$$Register);
15311   %}
15312 
15313   ins_pipe(icmp_reg_reg);
15314 %}
15315 
15316 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15317 %{
15318   match(Set cr (OverflowAddI op1 op2));
15319 
15320   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15321   ins_cost(INSN_COST);
15322   ins_encode %{
15323     __ cmnw($op1$$Register, $op2$$constant);
15324   %}
15325 
15326   ins_pipe(icmp_reg_imm);
15327 %}
15328 
15329 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15330 %{
15331   match(Set cr (OverflowAddL op1 op2));
15332 
15333   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15334   ins_cost(INSN_COST);
15335   ins_encode %{
15336     __ cmn($op1$$Register, $op2$$Register);
15337   %}
15338 
15339   ins_pipe(icmp_reg_reg);
15340 %}
15341 
15342 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15343 %{
15344   match(Set cr (OverflowAddL op1 op2));
15345 
15346   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
15347   ins_cost(INSN_COST);
15348   ins_encode %{
15349     __ adds(zr, $op1$$Register, $op2$$constant);
15350   %}
15351 
15352   ins_pipe(icmp_reg_imm);
15353 %}
15354 
15355 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15356 %{
15357   match(Set cr (OverflowSubI op1 op2));
15358 
15359   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15360   ins_cost(INSN_COST);
15361   ins_encode %{
15362     __ cmpw($op1$$Register, $op2$$Register);
15363   %}
15364 
15365   ins_pipe(icmp_reg_reg);
15366 %}
15367 
15368 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15369 %{
15370   match(Set cr (OverflowSubI op1 op2));
15371 
15372   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15373   ins_cost(INSN_COST);
15374   ins_encode %{
15375     __ cmpw($op1$$Register, $op2$$constant);
15376   %}
15377 
15378   ins_pipe(icmp_reg_imm);
15379 %}
15380 
15381 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15382 %{
15383   match(Set cr (OverflowSubL op1 op2));
15384 
15385   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15386   ins_cost(INSN_COST);
15387   ins_encode %{
15388     __ cmp($op1$$Register, $op2$$Register);
15389   %}
15390 
15391   ins_pipe(icmp_reg_reg);
15392 %}
15393 
15394 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15395 %{
15396   match(Set cr (OverflowSubL op1 op2));
15397 
15398   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15399   ins_cost(INSN_COST);
15400   ins_encode %{
15401     __ subs(zr, $op1$$Register, $op2$$constant);
15402   %}
15403 
15404   ins_pipe(icmp_reg_imm);
15405 %}
15406 
15407 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15408 %{
15409   match(Set cr (OverflowSubI zero op1));
15410 
15411   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15412   ins_cost(INSN_COST);
15413   ins_encode %{
15414     __ cmpw(zr, $op1$$Register);
15415   %}
15416 
15417   ins_pipe(icmp_reg_imm);
15418 %}
15419 
15420 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15421 %{
15422   match(Set cr (OverflowSubL zero op1));
15423 
15424   format %{ "cmp   zr, $op1\t# overflow check long" %}
15425   ins_cost(INSN_COST);
15426   ins_encode %{
15427     __ cmp(zr, $op1$$Register);
15428   %}
15429 
15430   ins_pipe(icmp_reg_imm);
15431 %}
15432 
15433 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15434 %{
15435   match(Set cr (OverflowMulI op1 op2));
15436 
15437   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15438             "cmp   rscratch1, rscratch1, sxtw\n\t"
15439             "movw  rscratch1, #0x80000000\n\t"
15440             "cselw rscratch1, rscratch1, zr, NE\n\t"
15441             "cmpw  rscratch1, #1" %}
15442   ins_cost(5 * INSN_COST);
15443   ins_encode %{
15444     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15445     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15446     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15447     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15448     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15449   %}
15450 
15451   ins_pipe(pipe_slow);
15452 %}
15453 
15454 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15455 %{
15456   match(If cmp (OverflowMulI op1 op2));
15457   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15458             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15459   effect(USE labl, KILL cr);
15460 
15461   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15462             "cmp   rscratch1, rscratch1, sxtw\n\t"
15463             "b$cmp   $labl" %}
15464   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15465   ins_encode %{
15466     Label* L = $labl$$label;
15467     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15468     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15469     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15470     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15471   %}
15472 
15473   ins_pipe(pipe_serial);
15474 %}
15475 
15476 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15477 %{
15478   match(Set cr (OverflowMulL op1 op2));
15479 
15480   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15481             "smulh rscratch2, $op1, $op2\n\t"
15482             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15483             "movw  rscratch1, #0x80000000\n\t"
15484             "cselw rscratch1, rscratch1, zr, NE\n\t"
15485             "cmpw  rscratch1, #1" %}
15486   ins_cost(6 * INSN_COST);
15487   ins_encode %{
15488     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15489     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15490     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15491     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15492     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15493     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15494   %}
15495 
15496   ins_pipe(pipe_slow);
15497 %}
15498 
15499 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15500 %{
15501   match(If cmp (OverflowMulL op1 op2));
15502   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15503             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15504   effect(USE labl, KILL cr);
15505 
15506   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15507             "smulh rscratch2, $op1, $op2\n\t"
15508             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15509             "b$cmp $labl" %}
15510   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15511   ins_encode %{
15512     Label* L = $labl$$label;
15513     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15514     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15515     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15516     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15517     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15518   %}
15519 
15520   ins_pipe(pipe_serial);
15521 %}
15522 
15523 // ============================================================================
15524 // Compare Instructions
15525 
15526 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15527 %{
15528   match(Set cr (CmpI op1 op2));
15529 
15530   effect(DEF cr, USE op1, USE op2);
15531 
15532   ins_cost(INSN_COST);
15533   format %{ "cmpw  $op1, $op2" %}
15534 
15535   ins_encode(aarch64_enc_cmpw(op1, op2));
15536 
15537   ins_pipe(icmp_reg_reg);
15538 %}
15539 
15540 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15541 %{
15542   match(Set cr (CmpI op1 zero));
15543 
15544   effect(DEF cr, USE op1);
15545 
15546   ins_cost(INSN_COST);
15547   format %{ "cmpw $op1, 0" %}
15548 
15549   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15550 
15551   ins_pipe(icmp_reg_imm);
15552 %}
15553 
15554 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15555 %{
15556   match(Set cr (CmpI op1 op2));
15557 
15558   effect(DEF cr, USE op1);
15559 
15560   ins_cost(INSN_COST);
15561   format %{ "cmpw  $op1, $op2" %}
15562 
15563   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15564 
15565   ins_pipe(icmp_reg_imm);
15566 %}
15567 
15568 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15569 %{
15570   match(Set cr (CmpI op1 op2));
15571 
15572   effect(DEF cr, USE op1);
15573 
15574   ins_cost(INSN_COST * 2);
15575   format %{ "cmpw  $op1, $op2" %}
15576 
15577   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15578 
15579   ins_pipe(icmp_reg_imm);
15580 %}
15581 
15582 // Unsigned compare Instructions; really, same as signed compare
15583 // except it should only be used to feed an If or a CMovI which takes a
15584 // cmpOpU.
15585 
15586 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15587 %{
15588   match(Set cr (CmpU op1 op2));
15589 
15590   effect(DEF cr, USE op1, USE op2);
15591 
15592   ins_cost(INSN_COST);
15593   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15594 
15595   ins_encode(aarch64_enc_cmpw(op1, op2));
15596 
15597   ins_pipe(icmp_reg_reg);
15598 %}
15599 
15600 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15601 %{
15602   match(Set cr (CmpU op1 zero));
15603 
15604   effect(DEF cr, USE op1);
15605 
15606   ins_cost(INSN_COST);
15607   format %{ "cmpw $op1, #0\t# unsigned" %}
15608 
15609   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15610 
15611   ins_pipe(icmp_reg_imm);
15612 %}
15613 
15614 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15615 %{
15616   match(Set cr (CmpU op1 op2));
15617 
15618   effect(DEF cr, USE op1);
15619 
15620   ins_cost(INSN_COST);
15621   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15622 
15623   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15624 
15625   ins_pipe(icmp_reg_imm);
15626 %}
15627 
15628 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15629 %{
15630   match(Set cr (CmpU op1 op2));
15631 
15632   effect(DEF cr, USE op1);
15633 
15634   ins_cost(INSN_COST * 2);
15635   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15636 
15637   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15638 
15639   ins_pipe(icmp_reg_imm);
15640 %}
15641 
15642 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15643 %{
15644   match(Set cr (CmpL op1 op2));
15645 
15646   effect(DEF cr, USE op1, USE op2);
15647 
15648   ins_cost(INSN_COST);
15649   format %{ "cmp  $op1, $op2" %}
15650 
15651   ins_encode(aarch64_enc_cmp(op1, op2));
15652 
15653   ins_pipe(icmp_reg_reg);
15654 %}
15655 
15656 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15657 %{
15658   match(Set cr (CmpL op1 zero));
15659 
15660   effect(DEF cr, USE op1);
15661 
15662   ins_cost(INSN_COST);
15663   format %{ "tst  $op1" %}
15664 
15665   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15666 
15667   ins_pipe(icmp_reg_imm);
15668 %}
15669 
15670 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15671 %{
15672   match(Set cr (CmpL op1 op2));
15673 
15674   effect(DEF cr, USE op1);
15675 
15676   ins_cost(INSN_COST);
15677   format %{ "cmp  $op1, $op2" %}
15678 
15679   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15680 
15681   ins_pipe(icmp_reg_imm);
15682 %}
15683 
15684 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15685 %{
15686   match(Set cr (CmpL op1 op2));
15687 
15688   effect(DEF cr, USE op1);
15689 
15690   ins_cost(INSN_COST * 2);
15691   format %{ "cmp  $op1, $op2" %}
15692 
15693   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15694 
15695   ins_pipe(icmp_reg_imm);
15696 %}
15697 
15698 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15699 %{
15700   match(Set cr (CmpUL op1 op2));
15701 
15702   effect(DEF cr, USE op1, USE op2);
15703 
15704   ins_cost(INSN_COST);
15705   format %{ "cmp  $op1, $op2" %}
15706 
15707   ins_encode(aarch64_enc_cmp(op1, op2));
15708 
15709   ins_pipe(icmp_reg_reg);
15710 %}
15711 
15712 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15713 %{
15714   match(Set cr (CmpUL op1 zero));
15715 
15716   effect(DEF cr, USE op1);
15717 
15718   ins_cost(INSN_COST);
15719   format %{ "tst  $op1" %}
15720 
15721   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15722 
15723   ins_pipe(icmp_reg_imm);
15724 %}
15725 
15726 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15727 %{
15728   match(Set cr (CmpUL op1 op2));
15729 
15730   effect(DEF cr, USE op1);
15731 
15732   ins_cost(INSN_COST);
15733   format %{ "cmp  $op1, $op2" %}
15734 
15735   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15736 
15737   ins_pipe(icmp_reg_imm);
15738 %}
15739 
15740 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15741 %{
15742   match(Set cr (CmpUL op1 op2));
15743 
15744   effect(DEF cr, USE op1);
15745 
15746   ins_cost(INSN_COST * 2);
15747   format %{ "cmp  $op1, $op2" %}
15748 
15749   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15750 
15751   ins_pipe(icmp_reg_imm);
15752 %}
15753 
15754 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15755 %{
15756   match(Set cr (CmpP op1 op2));
15757 
15758   effect(DEF cr, USE op1, USE op2);
15759 
15760   ins_cost(INSN_COST);
15761   format %{ "cmp  $op1, $op2\t // ptr" %}
15762 
15763   ins_encode(aarch64_enc_cmpp(op1, op2));
15764 
15765   ins_pipe(icmp_reg_reg);
15766 %}
15767 
15768 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15769 %{
15770   match(Set cr (CmpN op1 op2));
15771 
15772   effect(DEF cr, USE op1, USE op2);
15773 
15774   ins_cost(INSN_COST);
15775   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15776 
15777   ins_encode(aarch64_enc_cmpn(op1, op2));
15778 
15779   ins_pipe(icmp_reg_reg);
15780 %}
15781 
15782 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15783 %{
15784   match(Set cr (CmpP op1 zero));
15785 
15786   effect(DEF cr, USE op1, USE zero);
15787 
15788   ins_cost(INSN_COST);
15789   format %{ "cmp  $op1, 0\t // ptr" %}
15790 
15791   ins_encode(aarch64_enc_testp(op1));
15792 
15793   ins_pipe(icmp_reg_imm);
15794 %}
15795 
15796 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15797 %{
15798   match(Set cr (CmpN op1 zero));
15799 
15800   effect(DEF cr, USE op1, USE zero);
15801 
15802   ins_cost(INSN_COST);
15803   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15804 
15805   ins_encode(aarch64_enc_testn(op1));
15806 
15807   ins_pipe(icmp_reg_imm);
15808 %}
15809 
15810 // FP comparisons
15811 //
15812 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15813 // using normal cmpOp. See declaration of rFlagsReg for details.
15814 
15815 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15816 %{
15817   match(Set cr (CmpF src1 src2));
15818 
15819   ins_cost(3 * INSN_COST);
15820   format %{ "fcmps $src1, $src2" %}
15821 
15822   ins_encode %{
15823     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15824   %}
15825 
15826   ins_pipe(pipe_class_compare);
15827 %}
15828 
15829 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15830 %{
15831   match(Set cr (CmpF src1 src2));
15832 
15833   ins_cost(3 * INSN_COST);
15834   format %{ "fcmps $src1, 0.0" %}
15835 
15836   ins_encode %{
15837     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15838   %}
15839 
15840   ins_pipe(pipe_class_compare);
15841 %}
15842 // FROM HERE
15843 
15844 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15845 %{
15846   match(Set cr (CmpD src1 src2));
15847 
15848   ins_cost(3 * INSN_COST);
15849   format %{ "fcmpd $src1, $src2" %}
15850 
15851   ins_encode %{
15852     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15853   %}
15854 
15855   ins_pipe(pipe_class_compare);
15856 %}
15857 
15858 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15859 %{
15860   match(Set cr (CmpD src1 src2));
15861 
15862   ins_cost(3 * INSN_COST);
15863   format %{ "fcmpd $src1, 0.0" %}
15864 
15865   ins_encode %{
15866     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15867   %}
15868 
15869   ins_pipe(pipe_class_compare);
15870 %}
15871 
15872 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15873 %{
15874   match(Set dst (CmpF3 src1 src2));
15875   effect(KILL cr);
15876 
15877   ins_cost(5 * INSN_COST);
15878   format %{ "fcmps $src1, $src2\n\t"
15879             "csinvw($dst, zr, zr, eq\n\t"
15880             "csnegw($dst, $dst, $dst, lt)"
15881   %}
15882 
15883   ins_encode %{
15884     Label done;
15885     FloatRegister s1 = as_FloatRegister($src1$$reg);
15886     FloatRegister s2 = as_FloatRegister($src2$$reg);
15887     Register d = as_Register($dst$$reg);
15888     __ fcmps(s1, s2);
15889     // installs 0 if EQ else -1
15890     __ csinvw(d, zr, zr, Assembler::EQ);
15891     // keeps -1 if less or unordered else installs 1
15892     __ csnegw(d, d, d, Assembler::LT);
15893     __ bind(done);
15894   %}
15895 
15896   ins_pipe(pipe_class_default);
15897 
15898 %}
15899 
15900 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15901 %{
15902   match(Set dst (CmpD3 src1 src2));
15903   effect(KILL cr);
15904 
15905   ins_cost(5 * INSN_COST);
15906   format %{ "fcmpd $src1, $src2\n\t"
15907             "csinvw($dst, zr, zr, eq\n\t"
15908             "csnegw($dst, $dst, $dst, lt)"
15909   %}
15910 
15911   ins_encode %{
15912     Label done;
15913     FloatRegister s1 = as_FloatRegister($src1$$reg);
15914     FloatRegister s2 = as_FloatRegister($src2$$reg);
15915     Register d = as_Register($dst$$reg);
15916     __ fcmpd(s1, s2);
15917     // installs 0 if EQ else -1
15918     __ csinvw(d, zr, zr, Assembler::EQ);
15919     // keeps -1 if less or unordered else installs 1
15920     __ csnegw(d, d, d, Assembler::LT);
15921     __ bind(done);
15922   %}
15923   ins_pipe(pipe_class_default);
15924 
15925 %}
15926 
15927 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15928 %{
15929   match(Set dst (CmpF3 src1 zero));
15930   effect(KILL cr);
15931 
15932   ins_cost(5 * INSN_COST);
15933   format %{ "fcmps $src1, 0.0\n\t"
15934             "csinvw($dst, zr, zr, eq\n\t"
15935             "csnegw($dst, $dst, $dst, lt)"
15936   %}
15937 
15938   ins_encode %{
15939     Label done;
15940     FloatRegister s1 = as_FloatRegister($src1$$reg);
15941     Register d = as_Register($dst$$reg);
15942     __ fcmps(s1, 0.0);
15943     // installs 0 if EQ else -1
15944     __ csinvw(d, zr, zr, Assembler::EQ);
15945     // keeps -1 if less or unordered else installs 1
15946     __ csnegw(d, d, d, Assembler::LT);
15947     __ bind(done);
15948   %}
15949 
15950   ins_pipe(pipe_class_default);
15951 
15952 %}
15953 
15954 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15955 %{
15956   match(Set dst (CmpD3 src1 zero));
15957   effect(KILL cr);
15958 
15959   ins_cost(5 * INSN_COST);
15960   format %{ "fcmpd $src1, 0.0\n\t"
15961             "csinvw($dst, zr, zr, eq\n\t"
15962             "csnegw($dst, $dst, $dst, lt)"
15963   %}
15964 
15965   ins_encode %{
15966     Label done;
15967     FloatRegister s1 = as_FloatRegister($src1$$reg);
15968     Register d = as_Register($dst$$reg);
15969     __ fcmpd(s1, 0.0);
15970     // installs 0 if EQ else -1
15971     __ csinvw(d, zr, zr, Assembler::EQ);
15972     // keeps -1 if less or unordered else installs 1
15973     __ csnegw(d, d, d, Assembler::LT);
15974     __ bind(done);
15975   %}
15976   ins_pipe(pipe_class_default);
15977 
15978 %}
15979 
15980 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15981 %{
15982   match(Set dst (CmpLTMask p q));
15983   effect(KILL cr);
15984 
15985   ins_cost(3 * INSN_COST);
15986 
15987   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15988             "csetw $dst, lt\n\t"
15989             "subw $dst, zr, $dst"
15990   %}
15991 
15992   ins_encode %{
15993     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15994     __ csetw(as_Register($dst$$reg), Assembler::LT);
15995     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15996   %}
15997 
15998   ins_pipe(ialu_reg_reg);
15999 %}
16000 
16001 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
16002 %{
16003   match(Set dst (CmpLTMask src zero));
16004   effect(KILL cr);
16005 
16006   ins_cost(INSN_COST);
16007 
16008   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
16009 
16010   ins_encode %{
16011     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
16012   %}
16013 
16014   ins_pipe(ialu_reg_shift);
16015 %}
16016 
16017 // ============================================================================
16018 // Max and Min
16019 
16020 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
16021 
16022 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
16023 %{
16024   effect(DEF cr, USE src);
16025   ins_cost(INSN_COST);
16026   format %{ "cmpw $src, 0" %}
16027 
16028   ins_encode %{
16029     __ cmpw($src$$Register, 0);
16030   %}
16031   ins_pipe(icmp_reg_imm);
16032 %}
16033 
16034 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
16035 %{
16036   match(Set dst (MinI src1 src2));
16037   ins_cost(INSN_COST * 3);
16038 
16039   expand %{
16040     rFlagsReg cr;
16041     compI_reg_reg(cr, src1, src2);
16042     cmovI_reg_reg_lt(dst, src1, src2, cr);
16043   %}
16044 %}
16045 
16046 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
16047 %{
16048   match(Set dst (MaxI src1 src2));
16049   ins_cost(INSN_COST * 3);
16050 
16051   expand %{
16052     rFlagsReg cr;
16053     compI_reg_reg(cr, src1, src2);
16054     cmovI_reg_reg_gt(dst, src1, src2, cr);
16055   %}
16056 %}
16057 
16058 
16059 // ============================================================================
16060 // Branch Instructions
16061 
16062 // Direct Branch.
16063 instruct branch(label lbl)
16064 %{
16065   match(Goto);
16066 
16067   effect(USE lbl);
16068 
16069   ins_cost(BRANCH_COST);
16070   format %{ "b  $lbl" %}
16071 
16072   ins_encode(aarch64_enc_b(lbl));
16073 
16074   ins_pipe(pipe_branch);
16075 %}
16076 
16077 // Conditional Near Branch
16078 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
16079 %{
16080   // Same match rule as `branchConFar'.
16081   match(If cmp cr);
16082 
16083   effect(USE lbl);
16084 
16085   ins_cost(BRANCH_COST);
16086   // If set to 1 this indicates that the current instruction is a
16087   // short variant of a long branch. This avoids using this
16088   // instruction in first-pass matching. It will then only be used in
16089   // the `Shorten_branches' pass.
16090   // ins_short_branch(1);
16091   format %{ "b$cmp  $lbl" %}
16092 
16093   ins_encode(aarch64_enc_br_con(cmp, lbl));
16094 
16095   ins_pipe(pipe_branch_cond);
16096 %}
16097 
16098 // Conditional Near Branch Unsigned
16099 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
16100 %{
16101   // Same match rule as `branchConFar'.
16102   match(If cmp cr);
16103 
16104   effect(USE lbl);
16105 
16106   ins_cost(BRANCH_COST);
16107   // If set to 1 this indicates that the current instruction is a
16108   // short variant of a long branch. This avoids using this
16109   // instruction in first-pass matching. It will then only be used in
16110   // the `Shorten_branches' pass.
16111   // ins_short_branch(1);
16112   format %{ "b$cmp  $lbl\t# unsigned" %}
16113 
16114   ins_encode(aarch64_enc_br_conU(cmp, lbl));
16115 
16116   ins_pipe(pipe_branch_cond);
16117 %}
16118 
16119 // Make use of CBZ and CBNZ.  These instructions, as well as being
16120 // shorter than (cmp; branch), have the additional benefit of not
16121 // killing the flags.
16122 
16123 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
16124   match(If cmp (CmpI op1 op2));
16125   effect(USE labl);
16126 
16127   ins_cost(BRANCH_COST);
16128   format %{ "cbw$cmp   $op1, $labl" %}
16129   ins_encode %{
16130     Label* L = $labl$$label;
16131     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16132     if (cond == Assembler::EQ)
16133       __ cbzw($op1$$Register, *L);
16134     else
16135       __ cbnzw($op1$$Register, *L);
16136   %}
16137   ins_pipe(pipe_cmp_branch);
16138 %}
16139 
16140 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
16141   match(If cmp (CmpL op1 op2));
16142   effect(USE labl);
16143 
16144   ins_cost(BRANCH_COST);
16145   format %{ "cb$cmp   $op1, $labl" %}
16146   ins_encode %{
16147     Label* L = $labl$$label;
16148     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16149     if (cond == Assembler::EQ)
16150       __ cbz($op1$$Register, *L);
16151     else
16152       __ cbnz($op1$$Register, *L);
16153   %}
16154   ins_pipe(pipe_cmp_branch);
16155 %}
16156 
16157 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
16158   match(If cmp (CmpP op1 op2));
16159   effect(USE labl);
16160 
16161   ins_cost(BRANCH_COST);
16162   format %{ "cb$cmp   $op1, $labl" %}
16163   ins_encode %{
16164     Label* L = $labl$$label;
16165     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16166     if (cond == Assembler::EQ)
16167       __ cbz($op1$$Register, *L);
16168     else
16169       __ cbnz($op1$$Register, *L);
16170   %}
16171   ins_pipe(pipe_cmp_branch);
16172 %}
16173 
16174 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
16175   match(If cmp (CmpN op1 op2));
16176   effect(USE labl);
16177 
16178   ins_cost(BRANCH_COST);
16179   format %{ "cbw$cmp   $op1, $labl" %}
16180   ins_encode %{
16181     Label* L = $labl$$label;
16182     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16183     if (cond == Assembler::EQ)
16184       __ cbzw($op1$$Register, *L);
16185     else
16186       __ cbnzw($op1$$Register, *L);
16187   %}
16188   ins_pipe(pipe_cmp_branch);
16189 %}
16190 
16191 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
16192   match(If cmp (CmpP (DecodeN oop) zero));
16193   effect(USE labl);
16194 
16195   ins_cost(BRANCH_COST);
16196   format %{ "cb$cmp   $oop, $labl" %}
16197   ins_encode %{
16198     Label* L = $labl$$label;
16199     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16200     if (cond == Assembler::EQ)
16201       __ cbzw($oop$$Register, *L);
16202     else
16203       __ cbnzw($oop$$Register, *L);
16204   %}
16205   ins_pipe(pipe_cmp_branch);
16206 %}
16207 
16208 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
16209   match(If cmp (CmpU op1 op2));
16210   effect(USE labl);
16211 
16212   ins_cost(BRANCH_COST);
16213   format %{ "cbw$cmp   $op1, $labl" %}
16214   ins_encode %{
16215     Label* L = $labl$$label;
16216     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16217     if (cond == Assembler::EQ || cond == Assembler::LS)
16218       __ cbzw($op1$$Register, *L);
16219     else
16220       __ cbnzw($op1$$Register, *L);
16221   %}
16222   ins_pipe(pipe_cmp_branch);
16223 %}
16224 
16225 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
16226   match(If cmp (CmpUL op1 op2));
16227   effect(USE labl);
16228 
16229   ins_cost(BRANCH_COST);
16230   format %{ "cb$cmp   $op1, $labl" %}
16231   ins_encode %{
16232     Label* L = $labl$$label;
16233     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16234     if (cond == Assembler::EQ || cond == Assembler::LS)
16235       __ cbz($op1$$Register, *L);
16236     else
16237       __ cbnz($op1$$Register, *L);
16238   %}
16239   ins_pipe(pipe_cmp_branch);
16240 %}
16241 
16242 // Test bit and Branch
16243 
16244 // Patterns for short (< 32KiB) variants
16245 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16246   match(If cmp (CmpL op1 op2));
16247   effect(USE labl);
16248 
16249   ins_cost(BRANCH_COST);
16250   format %{ "cb$cmp   $op1, $labl # long" %}
16251   ins_encode %{
16252     Label* L = $labl$$label;
16253     Assembler::Condition cond =
16254       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16255     __ tbr(cond, $op1$$Register, 63, *L);
16256   %}
16257   ins_pipe(pipe_cmp_branch);
16258   ins_short_branch(1);
16259 %}
16260 
16261 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16262   match(If cmp (CmpI op1 op2));
16263   effect(USE labl);
16264 
16265   ins_cost(BRANCH_COST);
16266   format %{ "cb$cmp   $op1, $labl # int" %}
16267   ins_encode %{
16268     Label* L = $labl$$label;
16269     Assembler::Condition cond =
16270       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16271     __ tbr(cond, $op1$$Register, 31, *L);
16272   %}
16273   ins_pipe(pipe_cmp_branch);
16274   ins_short_branch(1);
16275 %}
16276 
16277 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16278   match(If cmp (CmpL (AndL op1 op2) op3));
16279   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16280   effect(USE labl);
16281 
16282   ins_cost(BRANCH_COST);
16283   format %{ "tb$cmp   $op1, $op2, $labl" %}
16284   ins_encode %{
16285     Label* L = $labl$$label;
16286     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16287     int bit = exact_log2_long($op2$$constant);
16288     __ tbr(cond, $op1$$Register, bit, *L);
16289   %}
16290   ins_pipe(pipe_cmp_branch);
16291   ins_short_branch(1);
16292 %}
16293 
16294 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16295   match(If cmp (CmpI (AndI op1 op2) op3));
16296   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16297   effect(USE labl);
16298 
16299   ins_cost(BRANCH_COST);
16300   format %{ "tb$cmp   $op1, $op2, $labl" %}
16301   ins_encode %{
16302     Label* L = $labl$$label;
16303     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16304     int bit = exact_log2((juint)$op2$$constant);
16305     __ tbr(cond, $op1$$Register, bit, *L);
16306   %}
16307   ins_pipe(pipe_cmp_branch);
16308   ins_short_branch(1);
16309 %}
16310 
16311 // And far variants
16312 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16313   match(If cmp (CmpL op1 op2));
16314   effect(USE labl);
16315 
16316   ins_cost(BRANCH_COST);
16317   format %{ "cb$cmp   $op1, $labl # long" %}
16318   ins_encode %{
16319     Label* L = $labl$$label;
16320     Assembler::Condition cond =
16321       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16322     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
16323   %}
16324   ins_pipe(pipe_cmp_branch);
16325 %}
16326 
16327 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16328   match(If cmp (CmpI op1 op2));
16329   effect(USE labl);
16330 
16331   ins_cost(BRANCH_COST);
16332   format %{ "cb$cmp   $op1, $labl # int" %}
16333   ins_encode %{
16334     Label* L = $labl$$label;
16335     Assembler::Condition cond =
16336       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16337     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16338   %}
16339   ins_pipe(pipe_cmp_branch);
16340 %}
16341 
16342 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16343   match(If cmp (CmpL (AndL op1 op2) op3));
16344   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16345   effect(USE labl);
16346 
16347   ins_cost(BRANCH_COST);
16348   format %{ "tb$cmp   $op1, $op2, $labl" %}
16349   ins_encode %{
16350     Label* L = $labl$$label;
16351     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16352     int bit = exact_log2_long($op2$$constant);
16353     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16354   %}
16355   ins_pipe(pipe_cmp_branch);
16356 %}
16357 
16358 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16359   match(If cmp (CmpI (AndI op1 op2) op3));
16360   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16361   effect(USE labl);
16362 
16363   ins_cost(BRANCH_COST);
16364   format %{ "tb$cmp   $op1, $op2, $labl" %}
16365   ins_encode %{
16366     Label* L = $labl$$label;
16367     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16368     int bit = exact_log2((juint)$op2$$constant);
16369     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16370   %}
16371   ins_pipe(pipe_cmp_branch);
16372 %}
16373 
16374 // Test bits
16375 
16376 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16377   match(Set cr (CmpL (AndL op1 op2) op3));
16378   predicate(Assembler::operand_valid_for_logical_immediate
16379             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16380 
16381   ins_cost(INSN_COST);
16382   format %{ "tst $op1, $op2 # long" %}
16383   ins_encode %{
16384     __ tst($op1$$Register, $op2$$constant);
16385   %}
16386   ins_pipe(ialu_reg_reg);
16387 %}
16388 
16389 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16390   match(Set cr (CmpI (AndI op1 op2) op3));
16391   predicate(Assembler::operand_valid_for_logical_immediate
16392             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16393 
16394   ins_cost(INSN_COST);
16395   format %{ "tst $op1, $op2 # int" %}
16396   ins_encode %{
16397     __ tstw($op1$$Register, $op2$$constant);
16398   %}
16399   ins_pipe(ialu_reg_reg);
16400 %}
16401 
16402 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16403   match(Set cr (CmpL (AndL op1 op2) op3));
16404 
16405   ins_cost(INSN_COST);
16406   format %{ "tst $op1, $op2 # long" %}
16407   ins_encode %{
16408     __ tst($op1$$Register, $op2$$Register);
16409   %}
16410   ins_pipe(ialu_reg_reg);
16411 %}
16412 
16413 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16414   match(Set cr (CmpI (AndI op1 op2) op3));
16415 
16416   ins_cost(INSN_COST);
16417   format %{ "tstw $op1, $op2 # int" %}
16418   ins_encode %{
16419     __ tstw($op1$$Register, $op2$$Register);
16420   %}
16421   ins_pipe(ialu_reg_reg);
16422 %}
16423 
16424 
16425 // Conditional Far Branch
16426 // Conditional Far Branch Unsigned
16427 // TODO: fixme
16428 
16429 // counted loop end branch near
16430 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16431 %{
16432   match(CountedLoopEnd cmp cr);
16433 
16434   effect(USE lbl);
16435 
16436   ins_cost(BRANCH_COST);
16437   // short variant.
16438   // ins_short_branch(1);
16439   format %{ "b$cmp $lbl \t// counted loop end" %}
16440 
16441   ins_encode(aarch64_enc_br_con(cmp, lbl));
16442 
16443   ins_pipe(pipe_branch);
16444 %}
16445 
16446 // counted loop end branch far
16447 // TODO: fixme
16448 
16449 // ============================================================================
16450 // inlined locking and unlocking
16451 
16452 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16453 %{
16454   predicate(LockingMode != LM_LIGHTWEIGHT);
16455   match(Set cr (FastLock object box));
16456   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16457 
16458   ins_cost(5 * INSN_COST);
16459   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16460 
16461   ins_encode %{
16462     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16463   %}
16464 
16465   ins_pipe(pipe_serial);
16466 %}
16467 
16468 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16469 %{
16470   predicate(LockingMode != LM_LIGHTWEIGHT);
16471   match(Set cr (FastUnlock object box));
16472   effect(TEMP tmp, TEMP tmp2);
16473 
16474   ins_cost(5 * INSN_COST);
16475   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16476 
16477   ins_encode %{
16478     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16479   %}
16480 
16481   ins_pipe(pipe_serial);
16482 %}
16483 
16484 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16485 %{
16486   predicate(LockingMode == LM_LIGHTWEIGHT);
16487   match(Set cr (FastLock object box));
16488   effect(TEMP tmp, TEMP tmp2);
16489 
16490   ins_cost(5 * INSN_COST);
16491   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
16492 
16493   ins_encode %{
16494     __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16495   %}
16496 
16497   ins_pipe(pipe_serial);
16498 %}
16499 
16500 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16501 %{
16502   predicate(LockingMode == LM_LIGHTWEIGHT);
16503   match(Set cr (FastUnlock object box));
16504   effect(TEMP tmp, TEMP tmp2);
16505 
16506   ins_cost(5 * INSN_COST);
16507   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16508 
16509   ins_encode %{
16510     __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16511   %}
16512 
16513   ins_pipe(pipe_serial);
16514 %}
16515 
16516 // ============================================================================
16517 // Safepoint Instructions
16518 
16519 // TODO
16520 // provide a near and far version of this code
16521 
16522 instruct safePoint(rFlagsReg cr, iRegP poll)
16523 %{
16524   match(SafePoint poll);
16525   effect(KILL cr);
16526 
16527   format %{
16528     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16529   %}
16530   ins_encode %{
16531     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16532   %}
16533   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16534 %}
16535 
16536 
16537 // ============================================================================
16538 // Procedure Call/Return Instructions
16539 
16540 // Call Java Static Instruction
16541 
16542 instruct CallStaticJavaDirect(method meth)
16543 %{
16544   match(CallStaticJava);
16545 
16546   effect(USE meth);
16547 
16548   ins_cost(CALL_COST);
16549 
16550   format %{ "call,static $meth \t// ==> " %}
16551 
16552   ins_encode(aarch64_enc_java_static_call(meth),
16553              aarch64_enc_call_epilog);
16554 
16555   ins_pipe(pipe_class_call);
16556 %}
16557 
16558 // TO HERE
16559 
16560 // Call Java Dynamic Instruction
16561 instruct CallDynamicJavaDirect(method meth)
16562 %{
16563   match(CallDynamicJava);
16564 
16565   effect(USE meth);
16566 
16567   ins_cost(CALL_COST);
16568 
16569   format %{ "CALL,dynamic $meth \t// ==> " %}
16570 
16571   ins_encode(aarch64_enc_java_dynamic_call(meth),
16572              aarch64_enc_call_epilog);
16573 
16574   ins_pipe(pipe_class_call);
16575 %}
16576 
16577 // Call Runtime Instruction
16578 
16579 instruct CallRuntimeDirect(method meth)
16580 %{
16581   match(CallRuntime);
16582 
16583   effect(USE meth);
16584 
16585   ins_cost(CALL_COST);
16586 
16587   format %{ "CALL, runtime $meth" %}
16588 
16589   ins_encode( aarch64_enc_java_to_runtime(meth) );
16590 
16591   ins_pipe(pipe_class_call);
16592 %}
16593 
16594 // Call Runtime Instruction
16595 
16596 instruct CallLeafDirect(method meth)
16597 %{
16598   match(CallLeaf);
16599 
16600   effect(USE meth);
16601 
16602   ins_cost(CALL_COST);
16603 
16604   format %{ "CALL, runtime leaf $meth" %}
16605 
16606   ins_encode( aarch64_enc_java_to_runtime(meth) );
16607 
16608   ins_pipe(pipe_class_call);
16609 %}
16610 
16611 // Call Runtime Instruction
16612 
16613 instruct CallLeafNoFPDirect(method meth)
16614 %{
16615   match(CallLeafNoFP);
16616 
16617   effect(USE meth);
16618 
16619   ins_cost(CALL_COST);
16620 
16621   format %{ "CALL, runtime leaf nofp $meth" %}
16622 
16623   ins_encode( aarch64_enc_java_to_runtime(meth) );
16624 
16625   ins_pipe(pipe_class_call);
16626 %}
16627 
16628 // Tail Call; Jump from runtime stub to Java code.
16629 // Also known as an 'interprocedural jump'.
16630 // Target of jump will eventually return to caller.
16631 // TailJump below removes the return address.
16632 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16633 // emitted just above the TailCall which has reset rfp to the caller state.
16634 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
16635 %{
16636   match(TailCall jump_target method_ptr);
16637 
16638   ins_cost(CALL_COST);
16639 
16640   format %{ "br $jump_target\t# $method_ptr holds method" %}
16641 
16642   ins_encode(aarch64_enc_tail_call(jump_target));
16643 
16644   ins_pipe(pipe_class_call);
16645 %}
16646 
16647 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
16648 %{
16649   match(TailJump jump_target ex_oop);
16650 
16651   ins_cost(CALL_COST);
16652 
16653   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16654 
16655   ins_encode(aarch64_enc_tail_jmp(jump_target));
16656 
16657   ins_pipe(pipe_class_call);
16658 %}
16659 
16660 // Create exception oop: created by stack-crawling runtime code.
16661 // Created exception is now available to this handler, and is setup
16662 // just prior to jumping to this handler. No code emitted.
16663 // TODO check
16664 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16665 instruct CreateException(iRegP_R0 ex_oop)
16666 %{
16667   match(Set ex_oop (CreateEx));
16668 
16669   format %{ " -- \t// exception oop; no code emitted" %}
16670 
16671   size(0);
16672 
16673   ins_encode( /*empty*/ );
16674 
16675   ins_pipe(pipe_class_empty);
16676 %}
16677 
16678 // Rethrow exception: The exception oop will come in the first
16679 // argument position. Then JUMP (not call) to the rethrow stub code.
16680 instruct RethrowException() %{
16681   match(Rethrow);
16682   ins_cost(CALL_COST);
16683 
16684   format %{ "b rethrow_stub" %}
16685 
16686   ins_encode( aarch64_enc_rethrow() );
16687 
16688   ins_pipe(pipe_class_call);
16689 %}
16690 
16691 
16692 // Return Instruction
16693 // epilog node loads ret address into lr as part of frame pop
16694 instruct Ret()
16695 %{
16696   match(Return);
16697 
16698   format %{ "ret\t// return register" %}
16699 
16700   ins_encode( aarch64_enc_ret() );
16701 
16702   ins_pipe(pipe_branch);
16703 %}
16704 
16705 // Die now.
16706 instruct ShouldNotReachHere() %{
16707   match(Halt);
16708 
16709   ins_cost(CALL_COST);
16710   format %{ "ShouldNotReachHere" %}
16711 
16712   ins_encode %{
16713     if (is_reachable()) {
16714       __ stop(_halt_reason);
16715     }
16716   %}
16717 
16718   ins_pipe(pipe_class_default);
16719 %}
16720 
16721 // ============================================================================
16722 // Partial Subtype Check
16723 //
16724 // superklass array for an instance of the superklass.  Set a hidden
16725 // internal cache on a hit (cache is checked with exposed code in
16726 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16727 // encoding ALSO sets flags.
16728 
16729 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16730 %{
16731   match(Set result (PartialSubtypeCheck sub super));
16732   effect(KILL cr, KILL temp);
16733 
16734   ins_cost(1100);  // slightly larger than the next version
16735   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16736 
16737   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16738 
16739   opcode(0x1); // Force zero of result reg on hit
16740 
16741   ins_pipe(pipe_class_memory);
16742 %}
16743 
16744 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
16745 %{
16746   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
16747   effect(KILL temp, KILL result);
16748 
16749   ins_cost(1100);  // slightly larger than the next version
16750   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16751 
16752   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16753 
16754   opcode(0x0); // Don't zero result reg on hit
16755 
16756   ins_pipe(pipe_class_memory);
16757 %}
16758 
16759 // Intrisics for String.compareTo()
16760 
16761 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16762                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16763 %{
16764   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16765   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16766   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16767 
16768   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16769   ins_encode %{
16770     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16771     __ string_compare($str1$$Register, $str2$$Register,
16772                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16773                       $tmp1$$Register, $tmp2$$Register,
16774                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
16775   %}
16776   ins_pipe(pipe_class_memory);
16777 %}
16778 
16779 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16780                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16781 %{
16782   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16783   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16784   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16785 
16786   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16787   ins_encode %{
16788     __ string_compare($str1$$Register, $str2$$Register,
16789                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16790                       $tmp1$$Register, $tmp2$$Register,
16791                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
16792   %}
16793   ins_pipe(pipe_class_memory);
16794 %}
16795 
16796 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16797                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16798                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16799 %{
16800   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16801   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16802   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16803          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16804 
16805   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16806   ins_encode %{
16807     __ string_compare($str1$$Register, $str2$$Register,
16808                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16809                       $tmp1$$Register, $tmp2$$Register,
16810                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16811                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
16812   %}
16813   ins_pipe(pipe_class_memory);
16814 %}
16815 
16816 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16817                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16818                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16819 %{
16820   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16821   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16822   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16823          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16824 
16825   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16826   ins_encode %{
16827     __ string_compare($str1$$Register, $str2$$Register,
16828                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16829                       $tmp1$$Register, $tmp2$$Register,
16830                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16831                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
16832   %}
16833   ins_pipe(pipe_class_memory);
16834 %}
16835 
16836 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
16837 // these string_compare variants as NEON register type for convenience so that the prototype of
16838 // string_compare can be shared with all variants.
16839 
16840 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16841                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16842                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16843                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16844 %{
16845   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16846   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16847   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16848          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16849 
16850   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16851   ins_encode %{
16852     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16853     __ string_compare($str1$$Register, $str2$$Register,
16854                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16855                       $tmp1$$Register, $tmp2$$Register,
16856                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16857                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16858                       StrIntrinsicNode::LL);
16859   %}
16860   ins_pipe(pipe_class_memory);
16861 %}
16862 
16863 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16864                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16865                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16866                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16867 %{
16868   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16869   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16870   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16871          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16872 
16873   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16874   ins_encode %{
16875     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16876     __ string_compare($str1$$Register, $str2$$Register,
16877                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16878                       $tmp1$$Register, $tmp2$$Register,
16879                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16880                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16881                       StrIntrinsicNode::LU);
16882   %}
16883   ins_pipe(pipe_class_memory);
16884 %}
16885 
16886 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16887                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16888                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16889                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16890 %{
16891   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16892   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16893   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16894          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16895 
16896   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16897   ins_encode %{
16898     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16899     __ string_compare($str1$$Register, $str2$$Register,
16900                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16901                       $tmp1$$Register, $tmp2$$Register,
16902                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16903                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16904                       StrIntrinsicNode::UL);
16905   %}
16906   ins_pipe(pipe_class_memory);
16907 %}
16908 
16909 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16910                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16911                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16912                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16913 %{
16914   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16915   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16916   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16917          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16918 
16919   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16920   ins_encode %{
16921     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16922     __ string_compare($str1$$Register, $str2$$Register,
16923                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16924                       $tmp1$$Register, $tmp2$$Register,
16925                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16926                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16927                       StrIntrinsicNode::UU);
16928   %}
16929   ins_pipe(pipe_class_memory);
16930 %}
16931 
16932 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16933                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16934                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16935                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16936 %{
16937   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16938   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16939   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16940          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16941          TEMP vtmp0, TEMP vtmp1, KILL cr);
16942   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16943             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16944 
16945   ins_encode %{
16946     __ string_indexof($str1$$Register, $str2$$Register,
16947                       $cnt1$$Register, $cnt2$$Register,
16948                       $tmp1$$Register, $tmp2$$Register,
16949                       $tmp3$$Register, $tmp4$$Register,
16950                       $tmp5$$Register, $tmp6$$Register,
16951                       -1, $result$$Register, StrIntrinsicNode::UU);
16952   %}
16953   ins_pipe(pipe_class_memory);
16954 %}
16955 
16956 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16957                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16958                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16959                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16960 %{
16961   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16962   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16963   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16964          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16965          TEMP vtmp0, TEMP vtmp1, KILL cr);
16966   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16967             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16968 
16969   ins_encode %{
16970     __ string_indexof($str1$$Register, $str2$$Register,
16971                       $cnt1$$Register, $cnt2$$Register,
16972                       $tmp1$$Register, $tmp2$$Register,
16973                       $tmp3$$Register, $tmp4$$Register,
16974                       $tmp5$$Register, $tmp6$$Register,
16975                       -1, $result$$Register, StrIntrinsicNode::LL);
16976   %}
16977   ins_pipe(pipe_class_memory);
16978 %}
16979 
16980 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16981                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16982                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16983                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16984 %{
16985   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16986   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16987   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16988          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16989          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16990   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16991             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16992 
16993   ins_encode %{
16994     __ string_indexof($str1$$Register, $str2$$Register,
16995                       $cnt1$$Register, $cnt2$$Register,
16996                       $tmp1$$Register, $tmp2$$Register,
16997                       $tmp3$$Register, $tmp4$$Register,
16998                       $tmp5$$Register, $tmp6$$Register,
16999                       -1, $result$$Register, StrIntrinsicNode::UL);
17000   %}
17001   ins_pipe(pipe_class_memory);
17002 %}
17003 
17004 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17005                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17006                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17007 %{
17008   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
17009   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17010   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17011          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17012   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
17013             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17014 
17015   ins_encode %{
17016     int icnt2 = (int)$int_cnt2$$constant;
17017     __ string_indexof($str1$$Register, $str2$$Register,
17018                       $cnt1$$Register, zr,
17019                       $tmp1$$Register, $tmp2$$Register,
17020                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17021                       icnt2, $result$$Register, StrIntrinsicNode::UU);
17022   %}
17023   ins_pipe(pipe_class_memory);
17024 %}
17025 
17026 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17027                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17028                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17029 %{
17030   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
17031   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17032   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17033          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17034   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
17035             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17036 
17037   ins_encode %{
17038     int icnt2 = (int)$int_cnt2$$constant;
17039     __ string_indexof($str1$$Register, $str2$$Register,
17040                       $cnt1$$Register, zr,
17041                       $tmp1$$Register, $tmp2$$Register,
17042                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17043                       icnt2, $result$$Register, StrIntrinsicNode::LL);
17044   %}
17045   ins_pipe(pipe_class_memory);
17046 %}
17047 
17048 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17049                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17050                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17051 %{
17052   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
17053   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17054   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17055          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17056   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
17057             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17058 
17059   ins_encode %{
17060     int icnt2 = (int)$int_cnt2$$constant;
17061     __ string_indexof($str1$$Register, $str2$$Register,
17062                       $cnt1$$Register, zr,
17063                       $tmp1$$Register, $tmp2$$Register,
17064                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17065                       icnt2, $result$$Register, StrIntrinsicNode::UL);
17066   %}
17067   ins_pipe(pipe_class_memory);
17068 %}
17069 
17070 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17071                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
17072                              iRegINoSp tmp3, rFlagsReg cr)
17073 %{
17074   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17075   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
17076   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17077          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17078 
17079   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17080 
17081   ins_encode %{
17082     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17083                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
17084                            $tmp3$$Register);
17085   %}
17086   ins_pipe(pipe_class_memory);
17087 %}
17088 
17089 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17090                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
17091                               iRegINoSp tmp3, rFlagsReg cr)
17092 %{
17093   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17094   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
17095   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17096          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17097 
17098   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17099 
17100   ins_encode %{
17101     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17102                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
17103                             $tmp3$$Register);
17104   %}
17105   ins_pipe(pipe_class_memory);
17106 %}
17107 
17108 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17109                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17110                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17111   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
17112   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17113   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17114   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17115   ins_encode %{
17116     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17117                                $result$$Register, $ztmp1$$FloatRegister,
17118                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17119                                $ptmp$$PRegister, true /* isL */);
17120   %}
17121   ins_pipe(pipe_class_memory);
17122 %}
17123 
17124 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17125                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17126                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17127   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
17128   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17129   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17130   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17131   ins_encode %{
17132     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17133                                $result$$Register, $ztmp1$$FloatRegister,
17134                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17135                                $ptmp$$PRegister, false /* isL */);
17136   %}
17137   ins_pipe(pipe_class_memory);
17138 %}
17139 
17140 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17141                         iRegI_R0 result, rFlagsReg cr)
17142 %{
17143   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
17144   match(Set result (StrEquals (Binary str1 str2) cnt));
17145   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17146 
17147   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17148   ins_encode %{
17149     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17150     __ string_equals($str1$$Register, $str2$$Register,
17151                      $result$$Register, $cnt$$Register, 1);
17152   %}
17153   ins_pipe(pipe_class_memory);
17154 %}
17155 
17156 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17157                         iRegI_R0 result, rFlagsReg cr)
17158 %{
17159   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
17160   match(Set result (StrEquals (Binary str1 str2) cnt));
17161   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17162 
17163   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17164   ins_encode %{
17165     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17166     __ string_equals($str1$$Register, $str2$$Register,
17167                      $result$$Register, $cnt$$Register, 2);
17168   %}
17169   ins_pipe(pipe_class_memory);
17170 %}
17171 
17172 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17173                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17174                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17175                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17176                        iRegP_R10 tmp, rFlagsReg cr)
17177 %{
17178   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
17179   match(Set result (AryEq ary1 ary2));
17180   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17181          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17182          TEMP vtmp6, TEMP vtmp7, KILL cr);
17183 
17184   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17185   ins_encode %{
17186     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17187                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17188                                    $result$$Register, $tmp$$Register, 1);
17189     if (tpc == NULL) {
17190       ciEnv::current()->record_failure("CodeCache is full");
17191       return;
17192     }
17193   %}
17194   ins_pipe(pipe_class_memory);
17195 %}
17196 
17197 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17198                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17199                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17200                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17201                        iRegP_R10 tmp, rFlagsReg cr)
17202 %{
17203   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
17204   match(Set result (AryEq ary1 ary2));
17205   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17206          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17207          TEMP vtmp6, TEMP vtmp7, KILL cr);
17208 
17209   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17210   ins_encode %{
17211     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17212                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17213                                    $result$$Register, $tmp$$Register, 2);
17214     if (tpc == NULL) {
17215       ciEnv::current()->record_failure("CodeCache is full");
17216       return;
17217     }
17218   %}
17219   ins_pipe(pipe_class_memory);
17220 %}
17221 
17222 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
17223 %{
17224   match(Set result (CountPositives ary1 len));
17225   effect(USE_KILL ary1, USE_KILL len, KILL cr);
17226   format %{ "count positives byte[] $ary1,$len -> $result" %}
17227   ins_encode %{
17228     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
17229     if (tpc == NULL) {
17230       ciEnv::current()->record_failure("CodeCache is full");
17231       return;
17232     }
17233   %}
17234   ins_pipe( pipe_slow );
17235 %}
17236 
17237 // fast char[] to byte[] compression
17238 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17239                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17240                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17241                          iRegI_R0 result, rFlagsReg cr)
17242 %{
17243   match(Set result (StrCompressedCopy src (Binary dst len)));
17244   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17245          USE_KILL src, USE_KILL dst, USE len, KILL cr);
17246 
17247   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17248   ins_encode %{
17249     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
17250                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17251                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17252                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17253   %}
17254   ins_pipe(pipe_slow);
17255 %}
17256 
17257 // fast byte[] to char[] inflation
17258 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
17259                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17260                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
17261 %{
17262   match(Set dummy (StrInflatedCopy src (Binary dst len)));
17263   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
17264          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
17265          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
17266 
17267   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
17268   ins_encode %{
17269     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
17270                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17271                                         $vtmp2$$FloatRegister, $tmp$$Register);
17272     if (tpc == NULL) {
17273       ciEnv::current()->record_failure("CodeCache is full");
17274       return;
17275     }
17276   %}
17277   ins_pipe(pipe_class_memory);
17278 %}
17279 
17280 // encode char[] to byte[] in ISO_8859_1
17281 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17282                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17283                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17284                           iRegI_R0 result, rFlagsReg cr)
17285 %{
17286   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
17287   match(Set result (EncodeISOArray src (Binary dst len)));
17288   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17289          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17290 
17291   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17292   ins_encode %{
17293     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17294                         $result$$Register, false,
17295                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17296                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17297                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17298   %}
17299   ins_pipe(pipe_class_memory);
17300 %}
17301 
17302 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17303                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17304                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17305                             iRegI_R0 result, rFlagsReg cr)
17306 %{
17307   predicate(((EncodeISOArrayNode*)n)->is_ascii());
17308   match(Set result (EncodeISOArray src (Binary dst len)));
17309   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17310          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17311 
17312   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17313   ins_encode %{
17314     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17315                         $result$$Register, true,
17316                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17317                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17318                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17319   %}
17320   ins_pipe(pipe_class_memory);
17321 %}
17322 
17323 //----------------------------- CompressBits/ExpandBits ------------------------
17324 
17325 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17326                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17327   match(Set dst (CompressBits src mask));
17328   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17329   format %{ "mov    $tsrc, $src\n\t"
17330             "mov    $tmask, $mask\n\t"
17331             "bext   $tdst, $tsrc, $tmask\n\t"
17332             "mov    $dst, $tdst"
17333           %}
17334   ins_encode %{
17335     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17336     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17337     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17338     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17339   %}
17340   ins_pipe(pipe_slow);
17341 %}
17342 
17343 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17344                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17345   match(Set dst (CompressBits (LoadI mem) mask));
17346   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17347   format %{ "ldrs   $tsrc, $mem\n\t"
17348             "ldrs   $tmask, $mask\n\t"
17349             "bext   $tdst, $tsrc, $tmask\n\t"
17350             "mov    $dst, $tdst"
17351           %}
17352   ins_encode %{
17353     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17354               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17355     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17356     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17357     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17358   %}
17359   ins_pipe(pipe_slow);
17360 %}
17361 
17362 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17363                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
17364   match(Set dst (CompressBits src mask));
17365   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17366   format %{ "mov    $tsrc, $src\n\t"
17367             "mov    $tmask, $mask\n\t"
17368             "bext   $tdst, $tsrc, $tmask\n\t"
17369             "mov    $dst, $tdst"
17370           %}
17371   ins_encode %{
17372     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17373     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17374     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17375     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17376   %}
17377   ins_pipe(pipe_slow);
17378 %}
17379 
17380 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
17381                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17382   match(Set dst (CompressBits (LoadL mem) mask));
17383   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17384   format %{ "ldrd   $tsrc, $mem\n\t"
17385             "ldrd   $tmask, $mask\n\t"
17386             "bext   $tdst, $tsrc, $tmask\n\t"
17387             "mov    $dst, $tdst"
17388           %}
17389   ins_encode %{
17390     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17391               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17392     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17393     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17394     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17395   %}
17396   ins_pipe(pipe_slow);
17397 %}
17398 
17399 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17400                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17401   match(Set dst (ExpandBits src mask));
17402   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17403   format %{ "mov    $tsrc, $src\n\t"
17404             "mov    $tmask, $mask\n\t"
17405             "bdep   $tdst, $tsrc, $tmask\n\t"
17406             "mov    $dst, $tdst"
17407           %}
17408   ins_encode %{
17409     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17410     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17411     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17412     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17413   %}
17414   ins_pipe(pipe_slow);
17415 %}
17416 
17417 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17418                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17419   match(Set dst (ExpandBits (LoadI mem) mask));
17420   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17421   format %{ "ldrs   $tsrc, $mem\n\t"
17422             "ldrs   $tmask, $mask\n\t"
17423             "bdep   $tdst, $tsrc, $tmask\n\t"
17424             "mov    $dst, $tdst"
17425           %}
17426   ins_encode %{
17427     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17428               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17429     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17430     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17431     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17432   %}
17433   ins_pipe(pipe_slow);
17434 %}
17435 
17436 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17437                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
17438   match(Set dst (ExpandBits src mask));
17439   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17440   format %{ "mov    $tsrc, $src\n\t"
17441             "mov    $tmask, $mask\n\t"
17442             "bdep   $tdst, $tsrc, $tmask\n\t"
17443             "mov    $dst, $tdst"
17444           %}
17445   ins_encode %{
17446     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17447     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17448     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17449     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17450   %}
17451   ins_pipe(pipe_slow);
17452 %}
17453 
17454 
17455 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
17456                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17457   match(Set dst (ExpandBits (LoadL mem) mask));
17458   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17459   format %{ "ldrd   $tsrc, $mem\n\t"
17460             "ldrd   $tmask, $mask\n\t"
17461             "bdep   $tdst, $tsrc, $tmask\n\t"
17462             "mov    $dst, $tdst"
17463           %}
17464   ins_encode %{
17465     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17466               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17467     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17468     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17469     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17470   %}
17471   ins_pipe(pipe_slow);
17472 %}
17473 
17474 // ============================================================================
17475 // This name is KNOWN by the ADLC and cannot be changed.
17476 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17477 // for this guy.
17478 instruct tlsLoadP(thread_RegP dst)
17479 %{
17480   match(Set dst (ThreadLocal));
17481 
17482   ins_cost(0);
17483 
17484   format %{ " -- \t// $dst=Thread::current(), empty" %}
17485 
17486   size(0);
17487 
17488   ins_encode( /*empty*/ );
17489 
17490   ins_pipe(pipe_class_empty);
17491 %}
17492 
17493 //----------PEEPHOLE RULES-----------------------------------------------------
17494 // These must follow all instruction definitions as they use the names
17495 // defined in the instructions definitions.
17496 //
17497 // peepmatch ( root_instr_name [preceding_instruction]* );
17498 //
17499 // peepconstraint %{
17500 // (instruction_number.operand_name relational_op instruction_number.operand_name
17501 //  [, ...] );
17502 // // instruction numbers are zero-based using left to right order in peepmatch
17503 //
17504 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17505 // // provide an instruction_number.operand_name for each operand that appears
17506 // // in the replacement instruction's match rule
17507 //
17508 // ---------VM FLAGS---------------------------------------------------------
17509 //
17510 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17511 //
17512 // Each peephole rule is given an identifying number starting with zero and
17513 // increasing by one in the order seen by the parser.  An individual peephole
17514 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17515 // on the command-line.
17516 //
17517 // ---------CURRENT LIMITATIONS----------------------------------------------
17518 //
17519 // Only match adjacent instructions in same basic block
17520 // Only equality constraints
17521 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17522 // Only one replacement instruction
17523 //
17524 // ---------EXAMPLE----------------------------------------------------------
17525 //
17526 // // pertinent parts of existing instructions in architecture description
17527 // instruct movI(iRegINoSp dst, iRegI src)
17528 // %{
17529 //   match(Set dst (CopyI src));
17530 // %}
17531 //
17532 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17533 // %{
17534 //   match(Set dst (AddI dst src));
17535 //   effect(KILL cr);
17536 // %}
17537 //
17538 // // Change (inc mov) to lea
17539 // peephole %{
17540 //   // increment preceded by register-register move
17541 //   peepmatch ( incI_iReg movI );
17542 //   // require that the destination register of the increment
17543 //   // match the destination register of the move
17544 //   peepconstraint ( 0.dst == 1.dst );
17545 //   // construct a replacement instruction that sets
17546 //   // the destination to ( move's source register + one )
17547 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17548 // %}
17549 //
17550 
17551 // Implementation no longer uses movX instructions since
17552 // machine-independent system no longer uses CopyX nodes.
17553 //
17554 // peephole
17555 // %{
17556 //   peepmatch (incI_iReg movI);
17557 //   peepconstraint (0.dst == 1.dst);
17558 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17559 // %}
17560 
17561 // peephole
17562 // %{
17563 //   peepmatch (decI_iReg movI);
17564 //   peepconstraint (0.dst == 1.dst);
17565 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17566 // %}
17567 
17568 // peephole
17569 // %{
17570 //   peepmatch (addI_iReg_imm movI);
17571 //   peepconstraint (0.dst == 1.dst);
17572 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17573 // %}
17574 
17575 // peephole
17576 // %{
17577 //   peepmatch (incL_iReg movL);
17578 //   peepconstraint (0.dst == 1.dst);
17579 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17580 // %}
17581 
17582 // peephole
17583 // %{
17584 //   peepmatch (decL_iReg movL);
17585 //   peepconstraint (0.dst == 1.dst);
17586 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17587 // %}
17588 
17589 // peephole
17590 // %{
17591 //   peepmatch (addL_iReg_imm movL);
17592 //   peepconstraint (0.dst == 1.dst);
17593 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17594 // %}
17595 
17596 // peephole
17597 // %{
17598 //   peepmatch (addP_iReg_imm movP);
17599 //   peepconstraint (0.dst == 1.dst);
17600 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17601 // %}
17602 
17603 // // Change load of spilled value to only a spill
17604 // instruct storeI(memory mem, iRegI src)
17605 // %{
17606 //   match(Set mem (StoreI mem src));
17607 // %}
17608 //
17609 // instruct loadI(iRegINoSp dst, memory mem)
17610 // %{
17611 //   match(Set dst (LoadI mem));
17612 // %}
17613 //
17614 
17615 //----------SMARTSPILL RULES---------------------------------------------------
17616 // These must follow all instruction definitions as they use the names
17617 // defined in the instructions definitions.
17618 
17619 // Local Variables:
17620 // mode: c++
17621 // End: