1 //
    2 // Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // architecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182 
  183   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  184   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  185   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  186   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  187 
  188   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  189   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  190   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  191   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  192 
  193   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  194   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  195   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  196   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  197 
  198   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  199   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  200   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  201   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  202 
  203   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  204   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  205   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  206   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  207 
  208   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  209   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  210   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  211   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  212 
  213   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  214   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  215   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  216   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  217 
  218   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  219   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  220   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  221   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  222 
  223   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  224   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  225   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  226   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  227 
  228   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  229   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  230   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  231   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  232 
  233   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  234   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  235   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  236   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  237 
  238   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  239   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  240   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  241   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  242 
  243   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  244   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  245   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  246   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  247 
  248   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  249   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  250   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  251   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  252 
  253   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  254   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  255   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  256   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  257 
  258   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  259   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  260   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  261   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  262 
  263   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  264   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  265   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  266   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  267 
  268   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  269   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  270   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  271   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  272 
  273   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  274   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  275   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  276   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  277 
  278   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  279   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  280   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  281   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  282 
  283   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  284   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  285   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  286   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  287 
  288   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  289   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  290   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  291   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  292 
  293   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  294   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  295   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  296   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  297 
  298   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  299   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  300   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  301   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  302 
  303   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  304   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  305   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  306   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  307 
  308   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  309   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  310   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  311   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  312 
  313   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  314   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  315   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  316   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  317 
  318   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  319   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  320   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  321   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  322 
  323   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  324   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  325   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  326   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  327 
  328   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  329   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  330   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  331   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  332 
  333   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  334   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  335   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  336   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  337 
  338 // ----------------------------
  339 // SVE Predicate Registers
  340 // ----------------------------
  341   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  342   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  343   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  344   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  345   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  346   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  347   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  348   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  349   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  350   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  351   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  352   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  353   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  354   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  355   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  356   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  357 
  358 // ----------------------------
  359 // Special Registers
  360 // ----------------------------
  361 
  362 // the AArch64 CSPR status flag register is not directly accessible as
  363 // instruction operand. the FPSR status flag register is a system
  364 // register which can be written/read using MSR/MRS but again does not
  365 // appear as an operand (a code identifying the FSPR occurs as an
  366 // immediate value in the instruction).
  367 
  368 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  369 
  370 // Specify priority of register selection within phases of register
  371 // allocation.  Highest priority is first.  A useful heuristic is to
  372 // give registers a low priority when they are required by machine
  373 // instructions, like EAX and EDX on I486, and choose no-save registers
  374 // before save-on-call, & save-on-call before save-on-entry.  Registers
  375 // which participate in fixed calling sequences should come last.
  376 // Registers which are used as pairs must fall on an even boundary.
  377 
  378 alloc_class chunk0(
  379     // volatiles
  380     R10, R10_H,
  381     R11, R11_H,
  382     R12, R12_H,
  383     R13, R13_H,
  384     R14, R14_H,
  385     R15, R15_H,
  386     R16, R16_H,
  387     R17, R17_H,
  388     R18, R18_H,
  389 
  390     // arg registers
  391     R0, R0_H,
  392     R1, R1_H,
  393     R2, R2_H,
  394     R3, R3_H,
  395     R4, R4_H,
  396     R5, R5_H,
  397     R6, R6_H,
  398     R7, R7_H,
  399 
  400     // non-volatiles
  401     R19, R19_H,
  402     R20, R20_H,
  403     R21, R21_H,
  404     R22, R22_H,
  405     R23, R23_H,
  406     R24, R24_H,
  407     R25, R25_H,
  408     R26, R26_H,
  409 
  410     // non-allocatable registers
  411 
  412     R27, R27_H, // heapbase
  413     R28, R28_H, // thread
  414     R29, R29_H, // fp
  415     R30, R30_H, // lr
  416     R31, R31_H, // sp
  417     R8, R8_H,   // rscratch1
  418     R9, R9_H,   // rscratch2
  419 );
  420 
  421 alloc_class chunk1(
  422 
  423     // no save
  424     V16, V16_H, V16_J, V16_K,
  425     V17, V17_H, V17_J, V17_K,
  426     V18, V18_H, V18_J, V18_K,
  427     V19, V19_H, V19_J, V19_K,
  428     V20, V20_H, V20_J, V20_K,
  429     V21, V21_H, V21_J, V21_K,
  430     V22, V22_H, V22_J, V22_K,
  431     V23, V23_H, V23_J, V23_K,
  432     V24, V24_H, V24_J, V24_K,
  433     V25, V25_H, V25_J, V25_K,
  434     V26, V26_H, V26_J, V26_K,
  435     V27, V27_H, V27_J, V27_K,
  436     V28, V28_H, V28_J, V28_K,
  437     V29, V29_H, V29_J, V29_K,
  438     V30, V30_H, V30_J, V30_K,
  439     V31, V31_H, V31_J, V31_K,
  440 
  441     // arg registers
  442     V0, V0_H, V0_J, V0_K,
  443     V1, V1_H, V1_J, V1_K,
  444     V2, V2_H, V2_J, V2_K,
  445     V3, V3_H, V3_J, V3_K,
  446     V4, V4_H, V4_J, V4_K,
  447     V5, V5_H, V5_J, V5_K,
  448     V6, V6_H, V6_J, V6_K,
  449     V7, V7_H, V7_J, V7_K,
  450 
  451     // non-volatiles
  452     V8, V8_H, V8_J, V8_K,
  453     V9, V9_H, V9_J, V9_K,
  454     V10, V10_H, V10_J, V10_K,
  455     V11, V11_H, V11_J, V11_K,
  456     V12, V12_H, V12_J, V12_K,
  457     V13, V13_H, V13_J, V13_K,
  458     V14, V14_H, V14_J, V14_K,
  459     V15, V15_H, V15_J, V15_K,
  460 );
  461 
  462 alloc_class chunk2 (
  463     // Governing predicates for load/store and arithmetic
  464     P0,
  465     P1,
  466     P2,
  467     P3,
  468     P4,
  469     P5,
  470     P6,
  471 
  472     // Extra predicates
  473     P8,
  474     P9,
  475     P10,
  476     P11,
  477     P12,
  478     P13,
  479     P14,
  480     P15,
  481 
  482     // Preserved for all-true predicate
  483     P7,
  484 );
  485 
  486 alloc_class chunk3(RFLAGS);
  487 
  488 //----------Architecture Description Register Classes--------------------------
  489 // Several register classes are automatically defined based upon information in
  490 // this architecture description.
  491 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  492 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  493 //
  494 
  495 // Class for all 32 bit general purpose registers
  496 reg_class all_reg32(
  497     R0,
  498     R1,
  499     R2,
  500     R3,
  501     R4,
  502     R5,
  503     R6,
  504     R7,
  505     R10,
  506     R11,
  507     R12,
  508     R13,
  509     R14,
  510     R15,
  511     R16,
  512     R17,
  513     R18,
  514     R19,
  515     R20,
  516     R21,
  517     R22,
  518     R23,
  519     R24,
  520     R25,
  521     R26,
  522     R27,
  523     R28,
  524     R29,
  525     R30,
  526     R31
  527 );
  528 
  529 
  530 // Class for all 32 bit integer registers (excluding SP which
  531 // will never be used as an integer register)
  532 reg_class any_reg32 %{
  533   return _ANY_REG32_mask;
  534 %}
  535 
  536 // Singleton class for R0 int register
  537 reg_class int_r0_reg(R0);
  538 
  539 // Singleton class for R2 int register
  540 reg_class int_r2_reg(R2);
  541 
  542 // Singleton class for R3 int register
  543 reg_class int_r3_reg(R3);
  544 
  545 // Singleton class for R4 int register
  546 reg_class int_r4_reg(R4);
  547 
  548 // Singleton class for R31 int register
  549 reg_class int_r31_reg(R31);
  550 
  551 // Class for all 64 bit general purpose registers
  552 reg_class all_reg(
  553     R0, R0_H,
  554     R1, R1_H,
  555     R2, R2_H,
  556     R3, R3_H,
  557     R4, R4_H,
  558     R5, R5_H,
  559     R6, R6_H,
  560     R7, R7_H,
  561     R10, R10_H,
  562     R11, R11_H,
  563     R12, R12_H,
  564     R13, R13_H,
  565     R14, R14_H,
  566     R15, R15_H,
  567     R16, R16_H,
  568     R17, R17_H,
  569     R18, R18_H,
  570     R19, R19_H,
  571     R20, R20_H,
  572     R21, R21_H,
  573     R22, R22_H,
  574     R23, R23_H,
  575     R24, R24_H,
  576     R25, R25_H,
  577     R26, R26_H,
  578     R27, R27_H,
  579     R28, R28_H,
  580     R29, R29_H,
  581     R30, R30_H,
  582     R31, R31_H
  583 );
  584 
  585 // Class for all long integer registers (including SP)
  586 reg_class any_reg %{
  587   return _ANY_REG_mask;
  588 %}
  589 
  590 // Class for non-allocatable 32 bit registers
  591 reg_class non_allocatable_reg32(
  592 #ifdef R18_RESERVED
  593     // See comment in register_aarch64.hpp
  594     R18,                        // tls on Windows
  595 #endif
  596     R28,                        // thread
  597     R30,                        // lr
  598     R31                         // sp
  599 );
  600 
  601 // Class for non-allocatable 64 bit registers
  602 reg_class non_allocatable_reg(
  603 #ifdef R18_RESERVED
  604     // See comment in register_aarch64.hpp
  605     R18, R18_H,                 // tls on Windows, platform register on macOS
  606 #endif
  607     R28, R28_H,                 // thread
  608     R30, R30_H,                 // lr
  609     R31, R31_H                  // sp
  610 );
  611 
  612 // Class for all non-special integer registers
  613 reg_class no_special_reg32 %{
  614   return _NO_SPECIAL_REG32_mask;
  615 %}
  616 
  617 // Class for all non-special long integer registers
  618 reg_class no_special_reg %{
  619   return _NO_SPECIAL_REG_mask;
  620 %}
  621 
  622 // Class for 64 bit register r0
  623 reg_class r0_reg(
  624     R0, R0_H
  625 );
  626 
  627 // Class for 64 bit register r1
  628 reg_class r1_reg(
  629     R1, R1_H
  630 );
  631 
  632 // Class for 64 bit register r2
  633 reg_class r2_reg(
  634     R2, R2_H
  635 );
  636 
  637 // Class for 64 bit register r3
  638 reg_class r3_reg(
  639     R3, R3_H
  640 );
  641 
  642 // Class for 64 bit register r4
  643 reg_class r4_reg(
  644     R4, R4_H
  645 );
  646 
  647 // Class for 64 bit register r5
  648 reg_class r5_reg(
  649     R5, R5_H
  650 );
  651 
  652 // Class for 64 bit register r10
  653 reg_class r10_reg(
  654     R10, R10_H
  655 );
  656 
  657 // Class for 64 bit register r11
  658 reg_class r11_reg(
  659     R11, R11_H
  660 );
  661 
  662 // Class for method register
  663 reg_class method_reg(
  664     R12, R12_H
  665 );
  666 
  667 // Class for thread register
  668 reg_class thread_reg(
  669     R28, R28_H
  670 );
  671 
  672 // Class for frame pointer register
  673 reg_class fp_reg(
  674     R29, R29_H
  675 );
  676 
  677 // Class for link register
  678 reg_class lr_reg(
  679     R30, R30_H
  680 );
  681 
  682 // Class for long sp register
  683 reg_class sp_reg(
  684   R31, R31_H
  685 );
  686 
  687 // Class for all pointer registers
  688 reg_class ptr_reg %{
  689   return _PTR_REG_mask;
  690 %}
  691 
  692 // Class for all non_special pointer registers
  693 reg_class no_special_ptr_reg %{
  694   return _NO_SPECIAL_PTR_REG_mask;
  695 %}
  696 
  697 // Class for all non_special pointer registers (excluding rfp)
  698 reg_class no_special_no_rfp_ptr_reg %{
  699   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  700 %}
  701 
  702 // Class for all float registers
  703 reg_class float_reg(
  704     V0,
  705     V1,
  706     V2,
  707     V3,
  708     V4,
  709     V5,
  710     V6,
  711     V7,
  712     V8,
  713     V9,
  714     V10,
  715     V11,
  716     V12,
  717     V13,
  718     V14,
  719     V15,
  720     V16,
  721     V17,
  722     V18,
  723     V19,
  724     V20,
  725     V21,
  726     V22,
  727     V23,
  728     V24,
  729     V25,
  730     V26,
  731     V27,
  732     V28,
  733     V29,
  734     V30,
  735     V31
  736 );
  737 
  738 // Double precision float registers have virtual `high halves' that
  739 // are needed by the allocator.
  740 // Class for all double registers
  741 reg_class double_reg(
  742     V0, V0_H,
  743     V1, V1_H,
  744     V2, V2_H,
  745     V3, V3_H,
  746     V4, V4_H,
  747     V5, V5_H,
  748     V6, V6_H,
  749     V7, V7_H,
  750     V8, V8_H,
  751     V9, V9_H,
  752     V10, V10_H,
  753     V11, V11_H,
  754     V12, V12_H,
  755     V13, V13_H,
  756     V14, V14_H,
  757     V15, V15_H,
  758     V16, V16_H,
  759     V17, V17_H,
  760     V18, V18_H,
  761     V19, V19_H,
  762     V20, V20_H,
  763     V21, V21_H,
  764     V22, V22_H,
  765     V23, V23_H,
  766     V24, V24_H,
  767     V25, V25_H,
  768     V26, V26_H,
  769     V27, V27_H,
  770     V28, V28_H,
  771     V29, V29_H,
  772     V30, V30_H,
  773     V31, V31_H
  774 );
  775 
  776 // Class for all SVE vector registers.
  777 reg_class vectora_reg (
  778     V0, V0_H, V0_J, V0_K,
  779     V1, V1_H, V1_J, V1_K,
  780     V2, V2_H, V2_J, V2_K,
  781     V3, V3_H, V3_J, V3_K,
  782     V4, V4_H, V4_J, V4_K,
  783     V5, V5_H, V5_J, V5_K,
  784     V6, V6_H, V6_J, V6_K,
  785     V7, V7_H, V7_J, V7_K,
  786     V8, V8_H, V8_J, V8_K,
  787     V9, V9_H, V9_J, V9_K,
  788     V10, V10_H, V10_J, V10_K,
  789     V11, V11_H, V11_J, V11_K,
  790     V12, V12_H, V12_J, V12_K,
  791     V13, V13_H, V13_J, V13_K,
  792     V14, V14_H, V14_J, V14_K,
  793     V15, V15_H, V15_J, V15_K,
  794     V16, V16_H, V16_J, V16_K,
  795     V17, V17_H, V17_J, V17_K,
  796     V18, V18_H, V18_J, V18_K,
  797     V19, V19_H, V19_J, V19_K,
  798     V20, V20_H, V20_J, V20_K,
  799     V21, V21_H, V21_J, V21_K,
  800     V22, V22_H, V22_J, V22_K,
  801     V23, V23_H, V23_J, V23_K,
  802     V24, V24_H, V24_J, V24_K,
  803     V25, V25_H, V25_J, V25_K,
  804     V26, V26_H, V26_J, V26_K,
  805     V27, V27_H, V27_J, V27_K,
  806     V28, V28_H, V28_J, V28_K,
  807     V29, V29_H, V29_J, V29_K,
  808     V30, V30_H, V30_J, V30_K,
  809     V31, V31_H, V31_J, V31_K,
  810 );
  811 
  812 // Class for all 64bit vector registers
  813 reg_class vectord_reg(
  814     V0, V0_H,
  815     V1, V1_H,
  816     V2, V2_H,
  817     V3, V3_H,
  818     V4, V4_H,
  819     V5, V5_H,
  820     V6, V6_H,
  821     V7, V7_H,
  822     V8, V8_H,
  823     V9, V9_H,
  824     V10, V10_H,
  825     V11, V11_H,
  826     V12, V12_H,
  827     V13, V13_H,
  828     V14, V14_H,
  829     V15, V15_H,
  830     V16, V16_H,
  831     V17, V17_H,
  832     V18, V18_H,
  833     V19, V19_H,
  834     V20, V20_H,
  835     V21, V21_H,
  836     V22, V22_H,
  837     V23, V23_H,
  838     V24, V24_H,
  839     V25, V25_H,
  840     V26, V26_H,
  841     V27, V27_H,
  842     V28, V28_H,
  843     V29, V29_H,
  844     V30, V30_H,
  845     V31, V31_H
  846 );
  847 
  848 // Class for all 128bit vector registers
  849 reg_class vectorx_reg(
  850     V0, V0_H, V0_J, V0_K,
  851     V1, V1_H, V1_J, V1_K,
  852     V2, V2_H, V2_J, V2_K,
  853     V3, V3_H, V3_J, V3_K,
  854     V4, V4_H, V4_J, V4_K,
  855     V5, V5_H, V5_J, V5_K,
  856     V6, V6_H, V6_J, V6_K,
  857     V7, V7_H, V7_J, V7_K,
  858     V8, V8_H, V8_J, V8_K,
  859     V9, V9_H, V9_J, V9_K,
  860     V10, V10_H, V10_J, V10_K,
  861     V11, V11_H, V11_J, V11_K,
  862     V12, V12_H, V12_J, V12_K,
  863     V13, V13_H, V13_J, V13_K,
  864     V14, V14_H, V14_J, V14_K,
  865     V15, V15_H, V15_J, V15_K,
  866     V16, V16_H, V16_J, V16_K,
  867     V17, V17_H, V17_J, V17_K,
  868     V18, V18_H, V18_J, V18_K,
  869     V19, V19_H, V19_J, V19_K,
  870     V20, V20_H, V20_J, V20_K,
  871     V21, V21_H, V21_J, V21_K,
  872     V22, V22_H, V22_J, V22_K,
  873     V23, V23_H, V23_J, V23_K,
  874     V24, V24_H, V24_J, V24_K,
  875     V25, V25_H, V25_J, V25_K,
  876     V26, V26_H, V26_J, V26_K,
  877     V27, V27_H, V27_J, V27_K,
  878     V28, V28_H, V28_J, V28_K,
  879     V29, V29_H, V29_J, V29_K,
  880     V30, V30_H, V30_J, V30_K,
  881     V31, V31_H, V31_J, V31_K
  882 );
  883 
  884 // Class for vector register V10
  885 reg_class v10_veca_reg(
  886     V10, V10_H, V10_J, V10_K
  887 );
  888 
  889 // Class for vector register V11
  890 reg_class v11_veca_reg(
  891     V11, V11_H, V11_J, V11_K
  892 );
  893 
  894 // Class for vector register V12
  895 reg_class v12_veca_reg(
  896     V12, V12_H, V12_J, V12_K
  897 );
  898 
  899 // Class for vector register V13
  900 reg_class v13_veca_reg(
  901     V13, V13_H, V13_J, V13_K
  902 );
  903 
  904 // Class for vector register V17
  905 reg_class v17_veca_reg(
  906     V17, V17_H, V17_J, V17_K
  907 );
  908 
  909 // Class for vector register V18
  910 reg_class v18_veca_reg(
  911     V18, V18_H, V18_J, V18_K
  912 );
  913 
  914 // Class for vector register V23
  915 reg_class v23_veca_reg(
  916     V23, V23_H, V23_J, V23_K
  917 );
  918 
  919 // Class for vector register V24
  920 reg_class v24_veca_reg(
  921     V24, V24_H, V24_J, V24_K
  922 );
  923 
  924 // Class for 128 bit register v0
  925 reg_class v0_reg(
  926     V0, V0_H
  927 );
  928 
  929 // Class for 128 bit register v1
  930 reg_class v1_reg(
  931     V1, V1_H
  932 );
  933 
  934 // Class for 128 bit register v2
  935 reg_class v2_reg(
  936     V2, V2_H
  937 );
  938 
  939 // Class for 128 bit register v3
  940 reg_class v3_reg(
  941     V3, V3_H
  942 );
  943 
  944 // Class for 128 bit register v4
  945 reg_class v4_reg(
  946     V4, V4_H
  947 );
  948 
  949 // Class for 128 bit register v5
  950 reg_class v5_reg(
  951     V5, V5_H
  952 );
  953 
  954 // Class for 128 bit register v6
  955 reg_class v6_reg(
  956     V6, V6_H
  957 );
  958 
  959 // Class for 128 bit register v7
  960 reg_class v7_reg(
  961     V7, V7_H
  962 );
  963 
  964 // Class for 128 bit register v8
  965 reg_class v8_reg(
  966     V8, V8_H
  967 );
  968 
  969 // Class for 128 bit register v9
  970 reg_class v9_reg(
  971     V9, V9_H
  972 );
  973 
  974 // Class for 128 bit register v10
  975 reg_class v10_reg(
  976     V10, V10_H
  977 );
  978 
  979 // Class for 128 bit register v11
  980 reg_class v11_reg(
  981     V11, V11_H
  982 );
  983 
  984 // Class for 128 bit register v12
  985 reg_class v12_reg(
  986     V12, V12_H
  987 );
  988 
  989 // Class for 128 bit register v13
  990 reg_class v13_reg(
  991     V13, V13_H
  992 );
  993 
  994 // Class for 128 bit register v14
  995 reg_class v14_reg(
  996     V14, V14_H
  997 );
  998 
  999 // Class for 128 bit register v15
 1000 reg_class v15_reg(
 1001     V15, V15_H
 1002 );
 1003 
 1004 // Class for 128 bit register v16
 1005 reg_class v16_reg(
 1006     V16, V16_H
 1007 );
 1008 
 1009 // Class for 128 bit register v17
 1010 reg_class v17_reg(
 1011     V17, V17_H
 1012 );
 1013 
 1014 // Class for 128 bit register v18
 1015 reg_class v18_reg(
 1016     V18, V18_H
 1017 );
 1018 
 1019 // Class for 128 bit register v19
 1020 reg_class v19_reg(
 1021     V19, V19_H
 1022 );
 1023 
 1024 // Class for 128 bit register v20
 1025 reg_class v20_reg(
 1026     V20, V20_H
 1027 );
 1028 
 1029 // Class for 128 bit register v21
 1030 reg_class v21_reg(
 1031     V21, V21_H
 1032 );
 1033 
 1034 // Class for 128 bit register v22
 1035 reg_class v22_reg(
 1036     V22, V22_H
 1037 );
 1038 
 1039 // Class for 128 bit register v23
 1040 reg_class v23_reg(
 1041     V23, V23_H
 1042 );
 1043 
 1044 // Class for 128 bit register v24
 1045 reg_class v24_reg(
 1046     V24, V24_H
 1047 );
 1048 
 1049 // Class for 128 bit register v25
 1050 reg_class v25_reg(
 1051     V25, V25_H
 1052 );
 1053 
 1054 // Class for 128 bit register v26
 1055 reg_class v26_reg(
 1056     V26, V26_H
 1057 );
 1058 
 1059 // Class for 128 bit register v27
 1060 reg_class v27_reg(
 1061     V27, V27_H
 1062 );
 1063 
 1064 // Class for 128 bit register v28
 1065 reg_class v28_reg(
 1066     V28, V28_H
 1067 );
 1068 
 1069 // Class for 128 bit register v29
 1070 reg_class v29_reg(
 1071     V29, V29_H
 1072 );
 1073 
 1074 // Class for 128 bit register v30
 1075 reg_class v30_reg(
 1076     V30, V30_H
 1077 );
 1078 
 1079 // Class for 128 bit register v31
 1080 reg_class v31_reg(
 1081     V31, V31_H
 1082 );
 1083 
 1084 // Class for all SVE predicate registers.
 1085 reg_class pr_reg (
 1086     P0,
 1087     P1,
 1088     P2,
 1089     P3,
 1090     P4,
 1091     P5,
 1092     P6,
 1093     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1094     P8,
 1095     P9,
 1096     P10,
 1097     P11,
 1098     P12,
 1099     P13,
 1100     P14,
 1101     P15
 1102 );
 1103 
 1104 // Class for SVE governing predicate registers, which are used
 1105 // to determine the active elements of a predicated instruction.
 1106 reg_class gov_pr (
 1107     P0,
 1108     P1,
 1109     P2,
 1110     P3,
 1111     P4,
 1112     P5,
 1113     P6,
 1114     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1115 );
 1116 
 1117 reg_class p0_reg(P0);
 1118 reg_class p1_reg(P1);
 1119 
 1120 // Singleton class for condition codes
 1121 reg_class int_flags(RFLAGS);
 1122 
 1123 %}
 1124 
 1125 //----------DEFINITION BLOCK---------------------------------------------------
 1126 // Define name --> value mappings to inform the ADLC of an integer valued name
 1127 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1128 // Format:
 1129 //        int_def  <name>         ( <int_value>, <expression>);
 1130 // Generated Code in ad_<arch>.hpp
 1131 //        #define  <name>   (<expression>)
 1132 //        // value == <int_value>
 1133 // Generated code in ad_<arch>.cpp adlc_verification()
 1134 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1135 //
 1136 
 1137 // we follow the ppc-aix port in using a simple cost model which ranks
 1138 // register operations as cheap, memory ops as more expensive and
 1139 // branches as most expensive. the first two have a low as well as a
 1140 // normal cost. huge cost appears to be a way of saying don't do
 1141 // something
 1142 
 1143 definitions %{
 1144   // The default cost (of a register move instruction).
 1145   int_def INSN_COST            (    100,     100);
 1146   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1147   int_def CALL_COST            (    200,     2 * INSN_COST);
 1148   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1149 %}
 1150 
 1151 
 1152 //----------SOURCE BLOCK-------------------------------------------------------
 1153 // This is a block of C++ code which provides values, functions, and
 1154 // definitions necessary in the rest of the architecture description
 1155 
 1156 source_hpp %{
 1157 
 1158 #include "asm/macroAssembler.hpp"
 1159 #include "gc/shared/barrierSetAssembler.hpp"
 1160 #include "gc/shared/cardTable.hpp"
 1161 #include "gc/shared/cardTableBarrierSet.hpp"
 1162 #include "gc/shared/collectedHeap.hpp"
 1163 #include "opto/addnode.hpp"
 1164 #include "opto/convertnode.hpp"
 1165 #include "runtime/objectMonitor.hpp"
 1166 
 1167 extern RegMask _ANY_REG32_mask;
 1168 extern RegMask _ANY_REG_mask;
 1169 extern RegMask _PTR_REG_mask;
 1170 extern RegMask _NO_SPECIAL_REG32_mask;
 1171 extern RegMask _NO_SPECIAL_REG_mask;
 1172 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1173 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1174 
 1175 class CallStubImpl {
 1176 
 1177   //--------------------------------------------------------------
 1178   //---<  Used for optimization in Compile::shorten_branches  >---
 1179   //--------------------------------------------------------------
 1180 
 1181  public:
 1182   // Size of call trampoline stub.
 1183   static uint size_call_trampoline() {
 1184     return 0; // no call trampolines on this platform
 1185   }
 1186 
 1187   // number of relocations needed by a call trampoline stub
 1188   static uint reloc_call_trampoline() {
 1189     return 0; // no call trampolines on this platform
 1190   }
 1191 };
 1192 
 1193 class HandlerImpl {
 1194 
 1195  public:
 1196 
 1197   static int emit_exception_handler(C2_MacroAssembler *masm);
 1198   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1199 
 1200   static uint size_exception_handler() {
 1201     return MacroAssembler::far_codestub_branch_size();
 1202   }
 1203 
 1204   static uint size_deopt_handler() {
 1205     // count one adr and one far branch instruction
 1206     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1207   }
 1208 };
 1209 
 1210 class Node::PD {
 1211 public:
 1212   enum NodeFlags {
 1213     _last_flag = Node::_last_flag
 1214   };
 1215 };
 1216 
 1217   bool is_CAS(int opcode, bool maybe_volatile);
 1218 
 1219   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1220 
 1221   bool unnecessary_acquire(const Node *barrier);
 1222   bool needs_acquiring_load(const Node *load);
 1223 
 1224   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1225 
 1226   bool unnecessary_release(const Node *barrier);
 1227   bool unnecessary_volatile(const Node *barrier);
 1228   bool needs_releasing_store(const Node *store);
 1229 
 1230   // predicate controlling translation of CompareAndSwapX
 1231   bool needs_acquiring_load_exclusive(const Node *load);
 1232 
 1233   // predicate controlling addressing modes
 1234   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1235 
 1236   // Convert BootTest condition to Assembler condition.
 1237   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1238   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1239 %}
 1240 
 1241 source %{
 1242 
 1243   // Derived RegMask with conditionally allocatable registers
 1244 
 1245   void PhaseOutput::pd_perform_mach_node_analysis() {
 1246   }
 1247 
 1248   int MachNode::pd_alignment_required() const {
 1249     return 1;
 1250   }
 1251 
 1252   int MachNode::compute_padding(int current_offset) const {
 1253     return 0;
 1254   }
 1255 
 1256   RegMask _ANY_REG32_mask;
 1257   RegMask _ANY_REG_mask;
 1258   RegMask _PTR_REG_mask;
 1259   RegMask _NO_SPECIAL_REG32_mask;
 1260   RegMask _NO_SPECIAL_REG_mask;
 1261   RegMask _NO_SPECIAL_PTR_REG_mask;
 1262   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1263 
 1264   void reg_mask_init() {
 1265     // We derive below RegMask(s) from the ones which are auto-generated from
 1266     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1267     // registers conditionally reserved.
 1268 
 1269     _ANY_REG32_mask = _ALL_REG32_mask;
 1270     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1271 
 1272     _ANY_REG_mask = _ALL_REG_mask;
 1273 
 1274     _PTR_REG_mask = _ALL_REG_mask;
 1275 
 1276     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1277     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1278 
 1279     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1280     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1281 
 1282     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1283     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1284 
 1285     // r27 is not allocatable when compressed oops is on and heapbase is not
 1286     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1287     if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
 1288       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1289       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1290       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1291     }
 1292 
 1293     // r29 is not allocatable when PreserveFramePointer is on
 1294     if (PreserveFramePointer) {
 1295       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1296       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1297       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1298     }
 1299 
 1300     _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1301     _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1302   }
 1303 
 1304   // Optimizaton of volatile gets and puts
 1305   // -------------------------------------
 1306   //
 1307   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1308   // use to implement volatile reads and writes. For a volatile read
 1309   // we simply need
 1310   //
 1311   //   ldar<x>
 1312   //
 1313   // and for a volatile write we need
 1314   //
 1315   //   stlr<x>
 1316   //
 1317   // Alternatively, we can implement them by pairing a normal
 1318   // load/store with a memory barrier. For a volatile read we need
 1319   //
 1320   //   ldr<x>
 1321   //   dmb ishld
 1322   //
 1323   // for a volatile write
 1324   //
 1325   //   dmb ish
 1326   //   str<x>
 1327   //   dmb ish
 1328   //
 1329   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1330   // sequences. These are normally translated to an instruction
 1331   // sequence like the following
 1332   //
 1333   //   dmb      ish
 1334   // retry:
 1335   //   ldxr<x>   rval raddr
 1336   //   cmp       rval rold
 1337   //   b.ne done
 1338   //   stlxr<x>  rval, rnew, rold
 1339   //   cbnz      rval retry
 1340   // done:
 1341   //   cset      r0, eq
 1342   //   dmb ishld
 1343   //
 1344   // Note that the exclusive store is already using an stlxr
 1345   // instruction. That is required to ensure visibility to other
 1346   // threads of the exclusive write (assuming it succeeds) before that
 1347   // of any subsequent writes.
 1348   //
 1349   // The following instruction sequence is an improvement on the above
 1350   //
 1351   // retry:
 1352   //   ldaxr<x>  rval raddr
 1353   //   cmp       rval rold
 1354   //   b.ne done
 1355   //   stlxr<x>  rval, rnew, rold
 1356   //   cbnz      rval retry
 1357   // done:
 1358   //   cset      r0, eq
 1359   //
 1360   // We don't need the leading dmb ish since the stlxr guarantees
 1361   // visibility of prior writes in the case that the swap is
 1362   // successful. Crucially we don't have to worry about the case where
 1363   // the swap is not successful since no valid program should be
 1364   // relying on visibility of prior changes by the attempting thread
 1365   // in the case where the CAS fails.
 1366   //
 1367   // Similarly, we don't need the trailing dmb ishld if we substitute
 1368   // an ldaxr instruction since that will provide all the guarantees we
 1369   // require regarding observation of changes made by other threads
 1370   // before any change to the CAS address observed by the load.
 1371   //
 1372   // In order to generate the desired instruction sequence we need to
 1373   // be able to identify specific 'signature' ideal graph node
 1374   // sequences which i) occur as a translation of a volatile reads or
 1375   // writes or CAS operations and ii) do not occur through any other
 1376   // translation or graph transformation. We can then provide
 1377   // alternative aldc matching rules which translate these node
 1378   // sequences to the desired machine code sequences. Selection of the
 1379   // alternative rules can be implemented by predicates which identify
 1380   // the relevant node sequences.
 1381   //
 1382   // The ideal graph generator translates a volatile read to the node
 1383   // sequence
 1384   //
 1385   //   LoadX[mo_acquire]
 1386   //   MemBarAcquire
 1387   //
 1388   // As a special case when using the compressed oops optimization we
 1389   // may also see this variant
 1390   //
 1391   //   LoadN[mo_acquire]
 1392   //   DecodeN
 1393   //   MemBarAcquire
 1394   //
 1395   // A volatile write is translated to the node sequence
 1396   //
 1397   //   MemBarRelease
 1398   //   StoreX[mo_release] {CardMark}-optional
 1399   //   MemBarVolatile
 1400   //
 1401   // n.b. the above node patterns are generated with a strict
 1402   // 'signature' configuration of input and output dependencies (see
 1403   // the predicates below for exact details). The card mark may be as
 1404   // simple as a few extra nodes or, in a few GC configurations, may
 1405   // include more complex control flow between the leading and
 1406   // trailing memory barriers. However, whatever the card mark
 1407   // configuration these signatures are unique to translated volatile
 1408   // reads/stores -- they will not appear as a result of any other
 1409   // bytecode translation or inlining nor as a consequence of
 1410   // optimizing transforms.
 1411   //
 1412   // We also want to catch inlined unsafe volatile gets and puts and
 1413   // be able to implement them using either ldar<x>/stlr<x> or some
 1414   // combination of ldr<x>/stlr<x> and dmb instructions.
 1415   //
 1416   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1417   // normal volatile put node sequence containing an extra cpuorder
 1418   // membar
 1419   //
 1420   //   MemBarRelease
 1421   //   MemBarCPUOrder
 1422   //   StoreX[mo_release] {CardMark}-optional
 1423   //   MemBarCPUOrder
 1424   //   MemBarVolatile
 1425   //
 1426   // n.b. as an aside, a cpuorder membar is not itself subject to
 1427   // matching and translation by adlc rules.  However, the rule
 1428   // predicates need to detect its presence in order to correctly
 1429   // select the desired adlc rules.
 1430   //
 1431   // Inlined unsafe volatile gets manifest as a slightly different
 1432   // node sequence to a normal volatile get because of the
 1433   // introduction of some CPUOrder memory barriers to bracket the
 1434   // Load. However, but the same basic skeleton of a LoadX feeding a
 1435   // MemBarAcquire, possibly through an optional DecodeN, is still
 1436   // present
 1437   //
 1438   //   MemBarCPUOrder
 1439   //        ||       \\
 1440   //   MemBarCPUOrder LoadX[mo_acquire]
 1441   //        ||            |
 1442   //        ||       {DecodeN} optional
 1443   //        ||       /
 1444   //     MemBarAcquire
 1445   //
 1446   // In this case the acquire membar does not directly depend on the
 1447   // load. However, we can be sure that the load is generated from an
 1448   // inlined unsafe volatile get if we see it dependent on this unique
 1449   // sequence of membar nodes. Similarly, given an acquire membar we
 1450   // can know that it was added because of an inlined unsafe volatile
 1451   // get if it is fed and feeds a cpuorder membar and if its feed
 1452   // membar also feeds an acquiring load.
 1453   //
 1454   // Finally an inlined (Unsafe) CAS operation is translated to the
 1455   // following ideal graph
 1456   //
 1457   //   MemBarRelease
 1458   //   MemBarCPUOrder
 1459   //   CompareAndSwapX {CardMark}-optional
 1460   //   MemBarCPUOrder
 1461   //   MemBarAcquire
 1462   //
 1463   // So, where we can identify these volatile read and write
 1464   // signatures we can choose to plant either of the above two code
 1465   // sequences. For a volatile read we can simply plant a normal
 1466   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1467   // also choose to inhibit translation of the MemBarAcquire and
 1468   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1469   //
 1470   // When we recognise a volatile store signature we can choose to
 1471   // plant at a dmb ish as a translation for the MemBarRelease, a
 1472   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1473   // Alternatively, we can inhibit translation of the MemBarRelease
 1474   // and MemBarVolatile and instead plant a simple stlr<x>
 1475   // instruction.
 1476   //
 1477   // when we recognise a CAS signature we can choose to plant a dmb
 1478   // ish as a translation for the MemBarRelease, the conventional
 1479   // macro-instruction sequence for the CompareAndSwap node (which
 1480   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1481   // Alternatively, we can elide generation of the dmb instructions
 1482   // and plant the alternative CompareAndSwap macro-instruction
 1483   // sequence (which uses ldaxr<x>).
 1484   //
 1485   // Of course, the above only applies when we see these signature
 1486   // configurations. We still want to plant dmb instructions in any
 1487   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1488   // MemBarVolatile. For example, at the end of a constructor which
 1489   // writes final/volatile fields we will see a MemBarRelease
 1490   // instruction and this needs a 'dmb ish' lest we risk the
 1491   // constructed object being visible without making the
 1492   // final/volatile field writes visible.
 1493   //
 1494   // n.b. the translation rules below which rely on detection of the
 1495   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1496   // If we see anything other than the signature configurations we
 1497   // always just translate the loads and stores to ldr<x> and str<x>
 1498   // and translate acquire, release and volatile membars to the
 1499   // relevant dmb instructions.
 1500   //
 1501 
 1502   // is_CAS(int opcode, bool maybe_volatile)
 1503   //
 1504   // return true if opcode is one of the possible CompareAndSwapX
 1505   // values otherwise false.
 1506 
 1507   bool is_CAS(int opcode, bool maybe_volatile)
 1508   {
 1509     switch(opcode) {
 1510       // We handle these
 1511     case Op_CompareAndSwapI:
 1512     case Op_CompareAndSwapL:
 1513     case Op_CompareAndSwapP:
 1514     case Op_CompareAndSwapN:
 1515     case Op_ShenandoahCompareAndSwapP:
 1516     case Op_ShenandoahCompareAndSwapN:
 1517     case Op_CompareAndSwapB:
 1518     case Op_CompareAndSwapS:
 1519     case Op_GetAndSetI:
 1520     case Op_GetAndSetL:
 1521     case Op_GetAndSetP:
 1522     case Op_GetAndSetN:
 1523     case Op_GetAndAddI:
 1524     case Op_GetAndAddL:
 1525       return true;
 1526     case Op_CompareAndExchangeI:
 1527     case Op_CompareAndExchangeN:
 1528     case Op_CompareAndExchangeB:
 1529     case Op_CompareAndExchangeS:
 1530     case Op_CompareAndExchangeL:
 1531     case Op_CompareAndExchangeP:
 1532     case Op_WeakCompareAndSwapB:
 1533     case Op_WeakCompareAndSwapS:
 1534     case Op_WeakCompareAndSwapI:
 1535     case Op_WeakCompareAndSwapL:
 1536     case Op_WeakCompareAndSwapP:
 1537     case Op_WeakCompareAndSwapN:
 1538     case Op_ShenandoahWeakCompareAndSwapP:
 1539     case Op_ShenandoahWeakCompareAndSwapN:
 1540     case Op_ShenandoahCompareAndExchangeP:
 1541     case Op_ShenandoahCompareAndExchangeN:
 1542       return maybe_volatile;
 1543     default:
 1544       return false;
 1545     }
 1546   }
 1547 
 1548   // helper to determine the maximum number of Phi nodes we may need to
 1549   // traverse when searching from a card mark membar for the merge mem
 1550   // feeding a trailing membar or vice versa
 1551 
 1552 // predicates controlling emit of ldr<x>/ldar<x>
 1553 
 1554 bool unnecessary_acquire(const Node *barrier)
 1555 {
 1556   assert(barrier->is_MemBar(), "expecting a membar");
 1557 
 1558   MemBarNode* mb = barrier->as_MemBar();
 1559 
 1560   if (mb->trailing_load()) {
 1561     return true;
 1562   }
 1563 
 1564   if (mb->trailing_load_store()) {
 1565     Node* load_store = mb->in(MemBarNode::Precedent);
 1566     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1567     return is_CAS(load_store->Opcode(), true);
 1568   }
 1569 
 1570   return false;
 1571 }
 1572 
 1573 bool needs_acquiring_load(const Node *n)
 1574 {
 1575   assert(n->is_Load(), "expecting a load");
 1576   LoadNode *ld = n->as_Load();
 1577   return ld->is_acquire();
 1578 }
 1579 
 1580 bool unnecessary_release(const Node *n)
 1581 {
 1582   assert((n->is_MemBar() &&
 1583           n->Opcode() == Op_MemBarRelease),
 1584          "expecting a release membar");
 1585 
 1586   MemBarNode *barrier = n->as_MemBar();
 1587   if (!barrier->leading()) {
 1588     return false;
 1589   } else {
 1590     Node* trailing = barrier->trailing_membar();
 1591     MemBarNode* trailing_mb = trailing->as_MemBar();
 1592     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1593     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1594 
 1595     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1596     if (mem->is_Store()) {
 1597       assert(mem->as_Store()->is_release(), "");
 1598       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1599       return true;
 1600     } else {
 1601       assert(mem->is_LoadStore(), "");
 1602       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1603       return is_CAS(mem->Opcode(), true);
 1604     }
 1605   }
 1606   return false;
 1607 }
 1608 
 1609 bool unnecessary_volatile(const Node *n)
 1610 {
 1611   // assert n->is_MemBar();
 1612   MemBarNode *mbvol = n->as_MemBar();
 1613 
 1614   bool release = mbvol->trailing_store();
 1615   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1616 #ifdef ASSERT
 1617   if (release) {
 1618     Node* leading = mbvol->leading_membar();
 1619     assert(leading->Opcode() == Op_MemBarRelease, "");
 1620     assert(leading->as_MemBar()->leading_store(), "");
 1621     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1622   }
 1623 #endif
 1624 
 1625   return release;
 1626 }
 1627 
 1628 // predicates controlling emit of str<x>/stlr<x>
 1629 
 1630 bool needs_releasing_store(const Node *n)
 1631 {
 1632   // assert n->is_Store();
 1633   StoreNode *st = n->as_Store();
 1634   return st->trailing_membar() != nullptr;
 1635 }
 1636 
 1637 // predicate controlling translation of CAS
 1638 //
 1639 // returns true if CAS needs to use an acquiring load otherwise false
 1640 
 1641 bool needs_acquiring_load_exclusive(const Node *n)
 1642 {
 1643   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1644   LoadStoreNode* ldst = n->as_LoadStore();
 1645   if (is_CAS(n->Opcode(), false)) {
 1646     assert(ldst->trailing_membar() != nullptr, "expected trailing membar");
 1647   } else {
 1648     return ldst->trailing_membar() != nullptr;
 1649   }
 1650 
 1651   // so we can just return true here
 1652   return true;
 1653 }
 1654 
 1655 #define __ masm->
 1656 
 1657 // advance declarations for helper functions to convert register
 1658 // indices to register objects
 1659 
 1660 // the ad file has to provide implementations of certain methods
 1661 // expected by the generic code
 1662 //
 1663 // REQUIRED FUNCTIONALITY
 1664 
 1665 //=============================================================================
 1666 
 1667 // !!!!! Special hack to get all types of calls to specify the byte offset
 1668 //       from the start of the call to the point where the return address
 1669 //       will point.
 1670 
 1671 int MachCallStaticJavaNode::ret_addr_offset()
 1672 {
 1673   // call should be a simple bl
 1674   int off = 4;
 1675   return off;
 1676 }
 1677 
 1678 int MachCallDynamicJavaNode::ret_addr_offset()
 1679 {
 1680   return 16; // movz, movk, movk, bl
 1681 }
 1682 
 1683 int MachCallRuntimeNode::ret_addr_offset() {
 1684   // for generated stubs the call will be
 1685   //   bl(addr)
 1686   // or with far branches
 1687   //   bl(trampoline_stub)
 1688   // for real runtime callouts it will be six instructions
 1689   // see aarch64_enc_java_to_runtime
 1690   //   adr(rscratch2, retaddr)
 1691   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1692   //   lea(rscratch1, RuntimeAddress(addr)
 1693   //   blr(rscratch1)
 1694   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1695   if (cb) {
 1696     return 1 * NativeInstruction::instruction_size;
 1697   } else {
 1698     return 6 * NativeInstruction::instruction_size;
 1699   }
 1700 }
 1701 
 1702 //=============================================================================
 1703 
 1704 #ifndef PRODUCT
 1705 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1706   st->print("BREAKPOINT");
 1707 }
 1708 #endif
 1709 
 1710 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1711   __ brk(0);
 1712 }
 1713 
 1714 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1715   return MachNode::size(ra_);
 1716 }
 1717 
 1718 //=============================================================================
 1719 
 1720 #ifndef PRODUCT
 1721   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1722     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1723   }
 1724 #endif
 1725 
 1726   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1727     for (int i = 0; i < _count; i++) {
 1728       __ nop();
 1729     }
 1730   }
 1731 
 1732   uint MachNopNode::size(PhaseRegAlloc*) const {
 1733     return _count * NativeInstruction::instruction_size;
 1734   }
 1735 
 1736 //=============================================================================
 1737 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1738 
 1739 int ConstantTable::calculate_table_base_offset() const {
 1740   return 0;  // absolute addressing, no offset
 1741 }
 1742 
 1743 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1744 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1745   ShouldNotReachHere();
 1746 }
 1747 
 1748 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1749   // Empty encoding
 1750 }
 1751 
 1752 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1753   return 0;
 1754 }
 1755 
 1756 #ifndef PRODUCT
 1757 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1758   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1759 }
 1760 #endif
 1761 
 1762 #ifndef PRODUCT
 1763 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1764   Compile* C = ra_->C;
 1765 
 1766   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1767 
 1768   if (C->output()->need_stack_bang(framesize))
 1769     st->print("# stack bang size=%d\n\t", framesize);
 1770 
 1771   if (VM_Version::use_rop_protection()) {
 1772     st->print("ldr  zr, [lr]\n\t");
 1773     st->print("paciaz\n\t");
 1774   }
 1775   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1776     st->print("sub  sp, sp, #%d\n\t", framesize);
 1777     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1778     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1779   } else {
 1780     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1781     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1782     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1783     st->print("sub  sp, sp, rscratch1");
 1784   }
 1785   if (C->stub_function() == nullptr) {
 1786     st->print("\n\t");
 1787     st->print("ldr  rscratch1, [guard]\n\t");
 1788     st->print("dmb ishld\n\t");
 1789     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1790     st->print("cmp  rscratch1, rscratch2\n\t");
 1791     st->print("b.eq skip");
 1792     st->print("\n\t");
 1793     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1794     st->print("b skip\n\t");
 1795     st->print("guard: int\n\t");
 1796     st->print("\n\t");
 1797     st->print("skip:\n\t");
 1798   }
 1799 }
 1800 #endif
 1801 
 1802 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1803   Compile* C = ra_->C;
 1804 
 1805   // n.b. frame size includes space for return pc and rfp
 1806   const int framesize = C->output()->frame_size_in_bytes();
 1807 
 1808   if (C->clinit_barrier_on_entry()) {
 1809     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1810 
 1811     Label L_skip_barrier;
 1812 
 1813     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1814     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1815     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1816     __ bind(L_skip_barrier);
 1817   }
 1818 
 1819   if (C->max_vector_size() > 0) {
 1820     __ reinitialize_ptrue();
 1821   }
 1822 
 1823   int bangsize = C->output()->bang_size_in_bytes();
 1824   if (C->output()->need_stack_bang(bangsize))
 1825     __ generate_stack_overflow_check(bangsize);
 1826 
 1827   __ build_frame(framesize);
 1828 
 1829   if (C->stub_function() == nullptr) {
 1830     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1831     // Dummy labels for just measuring the code size
 1832     Label dummy_slow_path;
 1833     Label dummy_continuation;
 1834     Label dummy_guard;
 1835     Label* slow_path = &dummy_slow_path;
 1836     Label* continuation = &dummy_continuation;
 1837     Label* guard = &dummy_guard;
 1838     if (!Compile::current()->output()->in_scratch_emit_size()) {
 1839       // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1840       C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1841       Compile::current()->output()->add_stub(stub);
 1842       slow_path = &stub->entry();
 1843       continuation = &stub->continuation();
 1844       guard = &stub->guard();
 1845     }
 1846     // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1847     bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1848   }
 1849 
 1850   if (VerifyStackAtCalls) {
 1851     Unimplemented();
 1852   }
 1853 
 1854   C->output()->set_frame_complete(__ offset());
 1855 
 1856   if (C->has_mach_constant_base_node()) {
 1857     // NOTE: We set the table base offset here because users might be
 1858     // emitted before MachConstantBaseNode.
 1859     ConstantTable& constant_table = C->output()->constant_table();
 1860     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1861   }
 1862 }
 1863 
 1864 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1865 {
 1866   return MachNode::size(ra_); // too many variables; just compute it
 1867                               // the hard way
 1868 }
 1869 
 1870 int MachPrologNode::reloc() const
 1871 {
 1872   return 0;
 1873 }
 1874 
 1875 //=============================================================================
 1876 
 1877 #ifndef PRODUCT
 1878 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1879   Compile* C = ra_->C;
 1880   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1881 
 1882   st->print("# pop frame %d\n\t",framesize);
 1883 
 1884   if (framesize == 0) {
 1885     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1886   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1887     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1888     st->print("add  sp, sp, #%d\n\t", framesize);
 1889   } else {
 1890     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1891     st->print("add  sp, sp, rscratch1\n\t");
 1892     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1893   }
 1894   if (VM_Version::use_rop_protection()) {
 1895     st->print("autiaz\n\t");
 1896     st->print("ldr  zr, [lr]\n\t");
 1897   }
 1898 
 1899   if (do_polling() && C->is_method_compilation()) {
 1900     st->print("# test polling word\n\t");
 1901     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1902     st->print("cmp  sp, rscratch1\n\t");
 1903     st->print("bhi #slow_path");
 1904   }
 1905 }
 1906 #endif
 1907 
 1908 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1909   Compile* C = ra_->C;
 1910   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1911 
 1912   __ remove_frame(framesize);
 1913 
 1914   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1915     __ reserved_stack_check();
 1916   }
 1917 
 1918   if (do_polling() && C->is_method_compilation()) {
 1919     Label dummy_label;
 1920     Label* code_stub = &dummy_label;
 1921     if (!C->output()->in_scratch_emit_size()) {
 1922       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1923       C->output()->add_stub(stub);
 1924       code_stub = &stub->entry();
 1925     }
 1926     __ relocate(relocInfo::poll_return_type);
 1927     __ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
 1928   }
 1929 }
 1930 
 1931 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1932   // Variable size. Determine dynamically.
 1933   return MachNode::size(ra_);
 1934 }
 1935 
 1936 int MachEpilogNode::reloc() const {
 1937   // Return number of relocatable values contained in this instruction.
 1938   return 1; // 1 for polling page.
 1939 }
 1940 
 1941 const Pipeline * MachEpilogNode::pipeline() const {
 1942   return MachNode::pipeline_class();
 1943 }
 1944 
 1945 //=============================================================================
 1946 
 1947 static enum RC rc_class(OptoReg::Name reg) {
 1948 
 1949   if (reg == OptoReg::Bad) {
 1950     return rc_bad;
 1951   }
 1952 
 1953   // we have 32 int registers * 2 halves
 1954   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1955 
 1956   if (reg < slots_of_int_registers) {
 1957     return rc_int;
 1958   }
 1959 
 1960   // we have 32 float register * 8 halves
 1961   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1962   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1963     return rc_float;
 1964   }
 1965 
 1966   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1967   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1968     return rc_predicate;
 1969   }
 1970 
 1971   // Between predicate regs & stack is the flags.
 1972   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1973 
 1974   return rc_stack;
 1975 }
 1976 
 1977 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1978   Compile* C = ra_->C;
 1979 
 1980   // Get registers to move.
 1981   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1982   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1983   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1984   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1985 
 1986   enum RC src_hi_rc = rc_class(src_hi);
 1987   enum RC src_lo_rc = rc_class(src_lo);
 1988   enum RC dst_hi_rc = rc_class(dst_hi);
 1989   enum RC dst_lo_rc = rc_class(dst_lo);
 1990 
 1991   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1992 
 1993   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1994     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1995            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1996            "expected aligned-adjacent pairs");
 1997   }
 1998 
 1999   if (src_lo == dst_lo && src_hi == dst_hi) {
 2000     return 0;            // Self copy, no move.
 2001   }
 2002 
 2003   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 2004               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 2005   int src_offset = ra_->reg2offset(src_lo);
 2006   int dst_offset = ra_->reg2offset(dst_lo);
 2007 
 2008   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2009     uint ireg = ideal_reg();
 2010     if (ireg == Op_VecA && masm) {
 2011       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 2012       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2013         // stack->stack
 2014         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 2015                                                 sve_vector_reg_size_in_bytes);
 2016       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2017         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2018                             sve_vector_reg_size_in_bytes);
 2019       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2020         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2021                               sve_vector_reg_size_in_bytes);
 2022       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2023         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2024                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 2025                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 2026       } else {
 2027         ShouldNotReachHere();
 2028       }
 2029     } else if (masm) {
 2030       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2031       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2032       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2033         // stack->stack
 2034         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2035         if (ireg == Op_VecD) {
 2036           __ unspill(rscratch1, true, src_offset);
 2037           __ spill(rscratch1, true, dst_offset);
 2038         } else {
 2039           __ spill_copy128(src_offset, dst_offset);
 2040         }
 2041       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2042         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2043                ireg == Op_VecD ? __ T8B : __ T16B,
 2044                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2045       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2046         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2047                  ireg == Op_VecD ? __ D : __ Q,
 2048                  ra_->reg2offset(dst_lo));
 2049       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2050         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2051                    ireg == Op_VecD ? __ D : __ Q,
 2052                    ra_->reg2offset(src_lo));
 2053       } else {
 2054         ShouldNotReachHere();
 2055       }
 2056     }
 2057   } else if (masm) {
 2058     switch (src_lo_rc) {
 2059     case rc_int:
 2060       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2061         if (is64) {
 2062             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2063                    as_Register(Matcher::_regEncode[src_lo]));
 2064         } else {
 2065             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2066                     as_Register(Matcher::_regEncode[src_lo]));
 2067         }
 2068       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2069         if (is64) {
 2070             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2071                      as_Register(Matcher::_regEncode[src_lo]));
 2072         } else {
 2073             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2074                      as_Register(Matcher::_regEncode[src_lo]));
 2075         }
 2076       } else {                    // gpr --> stack spill
 2077         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2078         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2079       }
 2080       break;
 2081     case rc_float:
 2082       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2083         if (is64) {
 2084             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2085                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2086         } else {
 2087             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2088                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2089         }
 2090       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2091         if (is64) {
 2092             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2093                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2094         } else {
 2095             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2096                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2097         }
 2098       } else {                    // fpr --> stack spill
 2099         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2100         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2101                  is64 ? __ D : __ S, dst_offset);
 2102       }
 2103       break;
 2104     case rc_stack:
 2105       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2106         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2107       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2108         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2109                    is64 ? __ D : __ S, src_offset);
 2110       } else if (dst_lo_rc == rc_predicate) {
 2111         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2112                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2113       } else {                    // stack --> stack copy
 2114         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2115         if (ideal_reg() == Op_RegVectMask) {
 2116           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2117                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2118         } else {
 2119           __ unspill(rscratch1, is64, src_offset);
 2120           __ spill(rscratch1, is64, dst_offset);
 2121         }
 2122       }
 2123       break;
 2124     case rc_predicate:
 2125       if (dst_lo_rc == rc_predicate) {
 2126         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2127       } else if (dst_lo_rc == rc_stack) {
 2128         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2129                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2130       } else {
 2131         assert(false, "bad src and dst rc_class combination.");
 2132         ShouldNotReachHere();
 2133       }
 2134       break;
 2135     default:
 2136       assert(false, "bad rc_class for spill");
 2137       ShouldNotReachHere();
 2138     }
 2139   }
 2140 
 2141   if (st) {
 2142     st->print("spill ");
 2143     if (src_lo_rc == rc_stack) {
 2144       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2145     } else {
 2146       st->print("%s -> ", Matcher::regName[src_lo]);
 2147     }
 2148     if (dst_lo_rc == rc_stack) {
 2149       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2150     } else {
 2151       st->print("%s", Matcher::regName[dst_lo]);
 2152     }
 2153     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2154       int vsize = 0;
 2155       switch (ideal_reg()) {
 2156       case Op_VecD:
 2157         vsize = 64;
 2158         break;
 2159       case Op_VecX:
 2160         vsize = 128;
 2161         break;
 2162       case Op_VecA:
 2163         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2164         break;
 2165       default:
 2166         assert(false, "bad register type for spill");
 2167         ShouldNotReachHere();
 2168       }
 2169       st->print("\t# vector spill size = %d", vsize);
 2170     } else if (ideal_reg() == Op_RegVectMask) {
 2171       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2172       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2173       st->print("\t# predicate spill size = %d", vsize);
 2174     } else {
 2175       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2176     }
 2177   }
 2178 
 2179   return 0;
 2180 
 2181 }
 2182 
 2183 #ifndef PRODUCT
 2184 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2185   if (!ra_)
 2186     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2187   else
 2188     implementation(nullptr, ra_, false, st);
 2189 }
 2190 #endif
 2191 
 2192 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2193   implementation(masm, ra_, false, nullptr);
 2194 }
 2195 
 2196 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2197   return MachNode::size(ra_);
 2198 }
 2199 
 2200 //=============================================================================
 2201 
 2202 #ifndef PRODUCT
 2203 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2204   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2205   int reg = ra_->get_reg_first(this);
 2206   st->print("add %s, rsp, #%d]\t# box lock",
 2207             Matcher::regName[reg], offset);
 2208 }
 2209 #endif
 2210 
 2211 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2212   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2213   int reg    = ra_->get_encode(this);
 2214 
 2215   // This add will handle any 24-bit signed offset. 24 bits allows an
 2216   // 8 megabyte stack frame.
 2217   __ add(as_Register(reg), sp, offset);
 2218 }
 2219 
 2220 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2221   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2222   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2223 
 2224   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2225     return NativeInstruction::instruction_size;
 2226   } else {
 2227     return 2 * NativeInstruction::instruction_size;
 2228   }
 2229 }
 2230 
 2231 //=============================================================================
 2232 
 2233 #ifndef PRODUCT
 2234 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2235 {
 2236   st->print_cr("# MachUEPNode");
 2237   if (UseCompressedClassPointers) {
 2238     st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2239     st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2240     st->print_cr("\tcmpw rscratch1, r10");
 2241   } else {
 2242     st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2243     st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2244     st->print_cr("\tcmp rscratch1, r10");
 2245   }
 2246   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2247 }
 2248 #endif
 2249 
 2250 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 2251 {
 2252   __ ic_check(InteriorEntryAlignment);
 2253 }
 2254 
 2255 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2256 {
 2257   return MachNode::size(ra_);
 2258 }
 2259 
 2260 // REQUIRED EMIT CODE
 2261 
 2262 //=============================================================================
 2263 
 2264 // Emit exception handler code.
 2265 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 2266 {
 2267   // mov rscratch1 #exception_blob_entry_point
 2268   // br rscratch1
 2269   // Note that the code buffer's insts_mark is always relative to insts.
 2270   // That's why we must use the macroassembler to generate a handler.
 2271   address base = __ start_a_stub(size_exception_handler());
 2272   if (base == nullptr) {
 2273     ciEnv::current()->record_failure("CodeCache is full");
 2274     return 0;  // CodeBuffer::expand failed
 2275   }
 2276   int offset = __ offset();
 2277   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2278   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2279   __ end_a_stub();
 2280   return offset;
 2281 }
 2282 
 2283 // Emit deopt handler code.
 2284 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 2285 {
 2286   // Note that the code buffer's insts_mark is always relative to insts.
 2287   // That's why we must use the macroassembler to generate a handler.
 2288   address base = __ start_a_stub(size_deopt_handler());
 2289   if (base == nullptr) {
 2290     ciEnv::current()->record_failure("CodeCache is full");
 2291     return 0;  // CodeBuffer::expand failed
 2292   }
 2293   int offset = __ offset();
 2294 
 2295   __ adr(lr, __ pc());
 2296   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2297 
 2298   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2299   __ end_a_stub();
 2300   return offset;
 2301 }
 2302 
 2303 // REQUIRED MATCHER CODE
 2304 
 2305 //=============================================================================
 2306 
 2307 bool Matcher::match_rule_supported(int opcode) {
 2308   if (!has_match_rule(opcode))
 2309     return false;
 2310 
 2311   switch (opcode) {
 2312     case Op_OnSpinWait:
 2313       return VM_Version::supports_on_spin_wait();
 2314     case Op_CacheWB:
 2315     case Op_CacheWBPreSync:
 2316     case Op_CacheWBPostSync:
 2317       if (!VM_Version::supports_data_cache_line_flush()) {
 2318         return false;
 2319       }
 2320       break;
 2321     case Op_ExpandBits:
 2322     case Op_CompressBits:
 2323       if (!VM_Version::supports_svebitperm()) {
 2324         return false;
 2325       }
 2326       break;
 2327     case Op_FmaF:
 2328     case Op_FmaD:
 2329     case Op_FmaVF:
 2330     case Op_FmaVD:
 2331       if (!UseFMA) {
 2332         return false;
 2333       }
 2334       break;
 2335     case Op_FmaHF:
 2336       // UseFMA flag also needs to be checked along with FEAT_FP16
 2337       if (!UseFMA || !is_feat_fp16_supported()) {
 2338         return false;
 2339       }
 2340       break;
 2341     case Op_AddHF:
 2342     case Op_SubHF:
 2343     case Op_MulHF:
 2344     case Op_DivHF:
 2345     case Op_MinHF:
 2346     case Op_MaxHF:
 2347     case Op_SqrtHF:
 2348       // Half-precision floating point scalar operations require FEAT_FP16
 2349       // to be available. FEAT_FP16 is enabled if both "fphp" and "asimdhp"
 2350       // features are supported.
 2351       if (!is_feat_fp16_supported()) {
 2352         return false;
 2353       }
 2354       break;
 2355   }
 2356 
 2357   return true; // Per default match rules are supported.
 2358 }
 2359 
 2360 const RegMask* Matcher::predicate_reg_mask(void) {
 2361   return &_PR_REG_mask;
 2362 }
 2363 
 2364 bool Matcher::supports_vector_calling_convention(void) {
 2365   return EnableVectorSupport;
 2366 }
 2367 
 2368 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2369   assert(EnableVectorSupport, "sanity");
 2370   int lo = V0_num;
 2371   int hi = V0_H_num;
 2372   if (ideal_reg == Op_VecX || ideal_reg == Op_VecA) {
 2373     hi = V0_K_num;
 2374   }
 2375   return OptoRegPair(hi, lo);
 2376 }
 2377 
 2378 // Is this branch offset short enough that a short branch can be used?
 2379 //
 2380 // NOTE: If the platform does not provide any short branch variants, then
 2381 //       this method should return false for offset 0.
 2382 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2383   // The passed offset is relative to address of the branch.
 2384 
 2385   return (-32768 <= offset && offset < 32768);
 2386 }
 2387 
 2388 // Vector width in bytes.
 2389 int Matcher::vector_width_in_bytes(BasicType bt) {
 2390   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2391   int size = MIN2((UseSVE > 0) ? (int)FloatRegister::sve_vl_max : (int)FloatRegister::neon_vl, (int)MaxVectorSize);
 2392   // Minimum 2 values in vector
 2393   if (size < 2*type2aelembytes(bt)) size = 0;
 2394   // But never < 4
 2395   if (size < 4) size = 0;
 2396   return size;
 2397 }
 2398 
 2399 // Limits on vector size (number of elements) loaded into vector.
 2400 int Matcher::max_vector_size(const BasicType bt) {
 2401   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2402 }
 2403 
 2404 int Matcher::min_vector_size(const BasicType bt) {
 2405   // Usually, the shortest vector length supported by AArch64 ISA and
 2406   // Vector API species is 64 bits. However, we allow 32-bit or 16-bit
 2407   // vectors in a few special cases.
 2408   int size;
 2409   switch(bt) {
 2410     case T_BOOLEAN:
 2411       // Load/store a vector mask with only 2 elements for vector types
 2412       // such as "2I/2F/2L/2D".
 2413       size = 2;
 2414       break;
 2415     case T_BYTE:
 2416       // Generate a "4B" vector, to support vector cast between "8B/16B"
 2417       // and "4S/4I/4L/4F/4D".
 2418       size = 4;
 2419       break;
 2420     case T_SHORT:
 2421       // Generate a "2S" vector, to support vector cast between "4S/8S"
 2422       // and "2I/2L/2F/2D".
 2423       size = 2;
 2424       break;
 2425     default:
 2426       // Limit the min vector length to 64-bit.
 2427       size = 8 / type2aelembytes(bt);
 2428       // The number of elements in a vector should be at least 2.
 2429       size = MAX2(size, 2);
 2430   }
 2431 
 2432   int max_size = max_vector_size(bt);
 2433   return MIN2(size, max_size);
 2434 }
 2435 
 2436 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2437   return Matcher::max_vector_size(bt);
 2438 }
 2439 
 2440 // Actual max scalable vector register length.
 2441 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2442   return Matcher::max_vector_size(bt);
 2443 }
 2444 
 2445 // Vector ideal reg.
 2446 uint Matcher::vector_ideal_reg(int len) {
 2447   if (UseSVE > 0 && FloatRegister::neon_vl < len && len <= FloatRegister::sve_vl_max) {
 2448     return Op_VecA;
 2449   }
 2450   switch(len) {
 2451     // For 16-bit/32-bit mask vector, reuse VecD.
 2452     case  2:
 2453     case  4:
 2454     case  8: return Op_VecD;
 2455     case 16: return Op_VecX;
 2456   }
 2457   ShouldNotReachHere();
 2458   return 0;
 2459 }
 2460 
 2461 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2462   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2463   switch (ideal_reg) {
 2464     case Op_VecA: return new vecAOper();
 2465     case Op_VecD: return new vecDOper();
 2466     case Op_VecX: return new vecXOper();
 2467   }
 2468   ShouldNotReachHere();
 2469   return nullptr;
 2470 }
 2471 
 2472 bool Matcher::is_reg2reg_move(MachNode* m) {
 2473   return false;
 2474 }
 2475 
 2476 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2477   return opnd->opcode() == VREG;
 2478 }
 2479 
 2480 // Return whether or not this register is ever used as an argument.
 2481 // This function is used on startup to build the trampoline stubs in
 2482 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2483 // call in the trampoline, and arguments in those registers not be
 2484 // available to the callee.
 2485 bool Matcher::can_be_java_arg(int reg)
 2486 {
 2487   return
 2488     reg ==  R0_num || reg == R0_H_num ||
 2489     reg ==  R1_num || reg == R1_H_num ||
 2490     reg ==  R2_num || reg == R2_H_num ||
 2491     reg ==  R3_num || reg == R3_H_num ||
 2492     reg ==  R4_num || reg == R4_H_num ||
 2493     reg ==  R5_num || reg == R5_H_num ||
 2494     reg ==  R6_num || reg == R6_H_num ||
 2495     reg ==  R7_num || reg == R7_H_num ||
 2496     reg ==  V0_num || reg == V0_H_num ||
 2497     reg ==  V1_num || reg == V1_H_num ||
 2498     reg ==  V2_num || reg == V2_H_num ||
 2499     reg ==  V3_num || reg == V3_H_num ||
 2500     reg ==  V4_num || reg == V4_H_num ||
 2501     reg ==  V5_num || reg == V5_H_num ||
 2502     reg ==  V6_num || reg == V6_H_num ||
 2503     reg ==  V7_num || reg == V7_H_num;
 2504 }
 2505 
 2506 bool Matcher::is_spillable_arg(int reg)
 2507 {
 2508   return can_be_java_arg(reg);
 2509 }
 2510 
 2511 uint Matcher::int_pressure_limit()
 2512 {
 2513   // JDK-8183543: When taking the number of available registers as int
 2514   // register pressure threshold, the jtreg test:
 2515   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2516   // failed due to C2 compilation failure with
 2517   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2518   //
 2519   // A derived pointer is live at CallNode and then is flagged by RA
 2520   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2521   // derived pointers and lastly fail to spill after reaching maximum
 2522   // number of iterations. Lowering the default pressure threshold to
 2523   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2524   // a high register pressure area of the code so that split_DEF can
 2525   // generate DefinitionSpillCopy for the derived pointer.
 2526   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2527   if (!PreserveFramePointer) {
 2528     // When PreserveFramePointer is off, frame pointer is allocatable,
 2529     // but different from other SOC registers, it is excluded from
 2530     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2531     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2532     // See check_pressure_at_fatproj().
 2533     default_int_pressure_threshold--;
 2534   }
 2535   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2536 }
 2537 
 2538 uint Matcher::float_pressure_limit()
 2539 {
 2540   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2541   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2542 }
 2543 
 2544 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2545   return false;
 2546 }
 2547 
 2548 RegMask Matcher::divI_proj_mask() {
 2549   ShouldNotReachHere();
 2550   return RegMask();
 2551 }
 2552 
 2553 // Register for MODI projection of divmodI.
 2554 RegMask Matcher::modI_proj_mask() {
 2555   ShouldNotReachHere();
 2556   return RegMask();
 2557 }
 2558 
 2559 // Register for DIVL projection of divmodL.
 2560 RegMask Matcher::divL_proj_mask() {
 2561   ShouldNotReachHere();
 2562   return RegMask();
 2563 }
 2564 
 2565 // Register for MODL projection of divmodL.
 2566 RegMask Matcher::modL_proj_mask() {
 2567   ShouldNotReachHere();
 2568   return RegMask();
 2569 }
 2570 
 2571 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2572   return FP_REG_mask();
 2573 }
 2574 
 2575 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2576   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2577     Node* u = addp->fast_out(i);
 2578     if (u->is_LoadStore()) {
 2579       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2580       // instructions) only take register indirect as an operand, so
 2581       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2582       // must fail.
 2583       return false;
 2584     }
 2585     if (u->is_Mem()) {
 2586       int opsize = u->as_Mem()->memory_size();
 2587       assert(opsize > 0, "unexpected memory operand size");
 2588       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2589         return false;
 2590       }
 2591     }
 2592   }
 2593   return true;
 2594 }
 2595 
 2596 // Convert BootTest condition to Assembler condition.
 2597 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2598 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2599   Assembler::Condition result;
 2600   switch(cond) {
 2601     case BoolTest::eq:
 2602       result = Assembler::EQ; break;
 2603     case BoolTest::ne:
 2604       result = Assembler::NE; break;
 2605     case BoolTest::le:
 2606       result = Assembler::LE; break;
 2607     case BoolTest::ge:
 2608       result = Assembler::GE; break;
 2609     case BoolTest::lt:
 2610       result = Assembler::LT; break;
 2611     case BoolTest::gt:
 2612       result = Assembler::GT; break;
 2613     case BoolTest::ule:
 2614       result = Assembler::LS; break;
 2615     case BoolTest::uge:
 2616       result = Assembler::HS; break;
 2617     case BoolTest::ult:
 2618       result = Assembler::LO; break;
 2619     case BoolTest::ugt:
 2620       result = Assembler::HI; break;
 2621     case BoolTest::overflow:
 2622       result = Assembler::VS; break;
 2623     case BoolTest::no_overflow:
 2624       result = Assembler::VC; break;
 2625     default:
 2626       ShouldNotReachHere();
 2627       return Assembler::Condition(-1);
 2628   }
 2629 
 2630   // Check conversion
 2631   if (cond & BoolTest::unsigned_compare) {
 2632     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2633   } else {
 2634     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2635   }
 2636 
 2637   return result;
 2638 }
 2639 
 2640 // Binary src (Replicate con)
 2641 static bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2642   if (n == nullptr || m == nullptr) {
 2643     return false;
 2644   }
 2645 
 2646   if (UseSVE == 0 || m->Opcode() != Op_Replicate) {
 2647     return false;
 2648   }
 2649 
 2650   Node* imm_node = m->in(1);
 2651   if (!imm_node->is_Con()) {
 2652     return false;
 2653   }
 2654 
 2655   const Type* t = imm_node->bottom_type();
 2656   if (!(t->isa_int() || t->isa_long())) {
 2657     return false;
 2658   }
 2659 
 2660   switch (n->Opcode()) {
 2661   case Op_AndV:
 2662   case Op_OrV:
 2663   case Op_XorV: {
 2664     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2665     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2666     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2667   }
 2668   case Op_AddVB:
 2669     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2670   case Op_AddVS:
 2671   case Op_AddVI:
 2672     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2673   case Op_AddVL:
 2674     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2675   default:
 2676     return false;
 2677   }
 2678 }
 2679 
 2680 // (XorV src (Replicate m1))
 2681 // (XorVMask src (MaskAll m1))
 2682 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2683   if (n != nullptr && m != nullptr) {
 2684     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2685            VectorNode::is_all_ones_vector(m);
 2686   }
 2687   return false;
 2688 }
 2689 
 2690 // Should the matcher clone input 'm' of node 'n'?
 2691 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2692   if (is_vshift_con_pattern(n, m) ||
 2693       is_vector_bitwise_not_pattern(n, m) ||
 2694       is_valid_sve_arith_imm_pattern(n, m) ||
 2695       is_encode_and_store_pattern(n, m)) {
 2696     mstack.push(m, Visit);
 2697     return true;
 2698   }
 2699   return false;
 2700 }
 2701 
 2702 // Should the Matcher clone shifts on addressing modes, expecting them
 2703 // to be subsumed into complex addressing expressions or compute them
 2704 // into registers?
 2705 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2706 
 2707   // Loads and stores with indirect memory input (e.g., volatile loads and
 2708   // stores) do not subsume the input into complex addressing expressions. If
 2709   // the addressing expression is input to at least one such load or store, do
 2710   // not clone the addressing expression. Query needs_acquiring_load and
 2711   // needs_releasing_store as a proxy for indirect memory input, as it is not
 2712   // possible to directly query for indirect memory input at this stage.
 2713   for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
 2714     Node* n = m->fast_out(i);
 2715     if (n->is_Load() && needs_acquiring_load(n)) {
 2716       return false;
 2717     }
 2718     if (n->is_Store() && needs_releasing_store(n)) {
 2719       return false;
 2720     }
 2721   }
 2722 
 2723   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2724     return true;
 2725   }
 2726 
 2727   Node *off = m->in(AddPNode::Offset);
 2728   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2729       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2730       // Are there other uses besides address expressions?
 2731       !is_visited(off)) {
 2732     address_visited.set(off->_idx); // Flag as address_visited
 2733     mstack.push(off->in(2), Visit);
 2734     Node *conv = off->in(1);
 2735     if (conv->Opcode() == Op_ConvI2L &&
 2736         // Are there other uses besides address expressions?
 2737         !is_visited(conv)) {
 2738       address_visited.set(conv->_idx); // Flag as address_visited
 2739       mstack.push(conv->in(1), Pre_Visit);
 2740     } else {
 2741       mstack.push(conv, Pre_Visit);
 2742     }
 2743     address_visited.test_set(m->_idx); // Flag as address_visited
 2744     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2745     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2746     return true;
 2747   } else if (off->Opcode() == Op_ConvI2L &&
 2748              // Are there other uses besides address expressions?
 2749              !is_visited(off)) {
 2750     address_visited.test_set(m->_idx); // Flag as address_visited
 2751     address_visited.set(off->_idx); // Flag as address_visited
 2752     mstack.push(off->in(1), Pre_Visit);
 2753     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2754     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2755     return true;
 2756   }
 2757   return false;
 2758 }
 2759 
 2760 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2761   {                                                                     \
 2762     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2763     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2764     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2765     __ INSN(REG, as_Register(BASE));                                    \
 2766   }
 2767 
 2768 
 2769 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2770   {
 2771     Address::extend scale;
 2772 
 2773     // Hooboy, this is fugly.  We need a way to communicate to the
 2774     // encoder that the index needs to be sign extended, so we have to
 2775     // enumerate all the cases.
 2776     switch (opcode) {
 2777     case INDINDEXSCALEDI2L:
 2778     case INDINDEXSCALEDI2LN:
 2779     case INDINDEXI2L:
 2780     case INDINDEXI2LN:
 2781       scale = Address::sxtw(size);
 2782       break;
 2783     default:
 2784       scale = Address::lsl(size);
 2785     }
 2786 
 2787     if (index == -1) {
 2788       return Address(base, disp);
 2789     } else {
 2790       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2791       return Address(base, as_Register(index), scale);
 2792     }
 2793   }
 2794 
 2795 
 2796 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2797 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2798 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2799 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2800                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2801 
 2802   // Used for all non-volatile memory accesses.  The use of
 2803   // $mem->opcode() to discover whether this pattern uses sign-extended
 2804   // offsets is something of a kludge.
 2805   static void loadStore(C2_MacroAssembler* masm, mem_insn insn,
 2806                         Register reg, int opcode,
 2807                         Register base, int index, int scale, int disp,
 2808                         int size_in_memory)
 2809   {
 2810     Address addr = mem2address(opcode, base, index, scale, disp);
 2811     if (addr.getMode() == Address::base_plus_offset) {
 2812       /* Fix up any out-of-range offsets. */
 2813       assert_different_registers(rscratch1, base);
 2814       assert_different_registers(rscratch1, reg);
 2815       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2816     }
 2817     (masm->*insn)(reg, addr);
 2818   }
 2819 
 2820   static void loadStore(C2_MacroAssembler* masm, mem_float_insn insn,
 2821                         FloatRegister reg, int opcode,
 2822                         Register base, int index, int size, int disp,
 2823                         int size_in_memory)
 2824   {
 2825     Address::extend scale;
 2826 
 2827     switch (opcode) {
 2828     case INDINDEXSCALEDI2L:
 2829     case INDINDEXSCALEDI2LN:
 2830       scale = Address::sxtw(size);
 2831       break;
 2832     default:
 2833       scale = Address::lsl(size);
 2834     }
 2835 
 2836     if (index == -1) {
 2837       // Fix up any out-of-range offsets.
 2838       assert_different_registers(rscratch1, base);
 2839       Address addr = Address(base, disp);
 2840       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2841       (masm->*insn)(reg, addr);
 2842     } else {
 2843       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2844       (masm->*insn)(reg, Address(base, as_Register(index), scale));
 2845     }
 2846   }
 2847 
 2848   static void loadStore(C2_MacroAssembler* masm, mem_vector_insn insn,
 2849                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2850                         int opcode, Register base, int index, int size, int disp)
 2851   {
 2852     if (index == -1) {
 2853       (masm->*insn)(reg, T, Address(base, disp));
 2854     } else {
 2855       assert(disp == 0, "unsupported address mode");
 2856       (masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2857     }
 2858   }
 2859 
 2860 %}
 2861 
 2862 
 2863 
 2864 //----------ENCODING BLOCK-----------------------------------------------------
 2865 // This block specifies the encoding classes used by the compiler to
 2866 // output byte streams.  Encoding classes are parameterized macros
 2867 // used by Machine Instruction Nodes in order to generate the bit
 2868 // encoding of the instruction.  Operands specify their base encoding
 2869 // interface with the interface keyword.  There are currently
 2870 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2871 // COND_INTER.  REG_INTER causes an operand to generate a function
 2872 // which returns its register number when queried.  CONST_INTER causes
 2873 // an operand to generate a function which returns the value of the
 2874 // constant when queried.  MEMORY_INTER causes an operand to generate
 2875 // four functions which return the Base Register, the Index Register,
 2876 // the Scale Value, and the Offset Value of the operand when queried.
 2877 // COND_INTER causes an operand to generate six functions which return
 2878 // the encoding code (ie - encoding bits for the instruction)
 2879 // associated with each basic boolean condition for a conditional
 2880 // instruction.
 2881 //
 2882 // Instructions specify two basic values for encoding.  Again, a
 2883 // function is available to check if the constant displacement is an
 2884 // oop. They use the ins_encode keyword to specify their encoding
 2885 // classes (which must be a sequence of enc_class names, and their
 2886 // parameters, specified in the encoding block), and they use the
 2887 // opcode keyword to specify, in order, their primary, secondary, and
 2888 // tertiary opcode.  Only the opcode sections which a particular
 2889 // instruction needs for encoding need to be specified.
 2890 encode %{
 2891   // Build emit functions for each basic byte or larger field in the
 2892   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2893   // from C++ code in the enc_class source block.  Emit functions will
 2894   // live in the main source block for now.  In future, we can
 2895   // generalize this by adding a syntax that specifies the sizes of
 2896   // fields in an order, so that the adlc can build the emit functions
 2897   // automagically
 2898 
 2899   // catch all for unimplemented encodings
 2900   enc_class enc_unimplemented %{
 2901     __ unimplemented("C2 catch all");
 2902   %}
 2903 
 2904   // BEGIN Non-volatile memory access
 2905 
 2906   // This encoding class is generated automatically from ad_encode.m4.
 2907   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2908   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2909     Register dst_reg = as_Register($dst$$reg);
 2910     loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2911                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2912   %}
 2913 
 2914   // This encoding class is generated automatically from ad_encode.m4.
 2915   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2916   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2917     Register dst_reg = as_Register($dst$$reg);
 2918     loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2919                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2920   %}
 2921 
 2922   // This encoding class is generated automatically from ad_encode.m4.
 2923   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2924   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2925     Register dst_reg = as_Register($dst$$reg);
 2926     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2927                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2928   %}
 2929 
 2930   // This encoding class is generated automatically from ad_encode.m4.
 2931   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2932   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2933     Register dst_reg = as_Register($dst$$reg);
 2934     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2935                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2936   %}
 2937 
 2938   // This encoding class is generated automatically from ad_encode.m4.
 2939   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2940   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2941     Register dst_reg = as_Register($dst$$reg);
 2942     loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2943                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2944   %}
 2945 
 2946   // This encoding class is generated automatically from ad_encode.m4.
 2947   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2948   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2949     Register dst_reg = as_Register($dst$$reg);
 2950     loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2951                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2952   %}
 2953 
 2954   // This encoding class is generated automatically from ad_encode.m4.
 2955   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2956   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2957     Register dst_reg = as_Register($dst$$reg);
 2958     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2959                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2960   %}
 2961 
 2962   // This encoding class is generated automatically from ad_encode.m4.
 2963   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2964   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2965     Register dst_reg = as_Register($dst$$reg);
 2966     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2967                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2968   %}
 2969 
 2970   // This encoding class is generated automatically from ad_encode.m4.
 2971   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2972   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2973     Register dst_reg = as_Register($dst$$reg);
 2974     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2975                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2976   %}
 2977 
 2978   // This encoding class is generated automatically from ad_encode.m4.
 2979   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2980   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2981     Register dst_reg = as_Register($dst$$reg);
 2982     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2983                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2984   %}
 2985 
 2986   // This encoding class is generated automatically from ad_encode.m4.
 2987   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2988   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2989     Register dst_reg = as_Register($dst$$reg);
 2990     loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2991                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2992   %}
 2993 
 2994   // This encoding class is generated automatically from ad_encode.m4.
 2995   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2996   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2997     Register dst_reg = as_Register($dst$$reg);
 2998     loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2999                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3000   %}
 3001 
 3002   // This encoding class is generated automatically from ad_encode.m4.
 3003   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3004   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 3005     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3006     loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 3007                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3008   %}
 3009 
 3010   // This encoding class is generated automatically from ad_encode.m4.
 3011   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3012   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 3013     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3014     loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 3015                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3016   %}
 3017 
 3018   // This encoding class is generated automatically from ad_encode.m4.
 3019   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3020   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 3021     Register src_reg = as_Register($src$$reg);
 3022     loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
 3023                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3024   %}
 3025 
 3026   // This encoding class is generated automatically from ad_encode.m4.
 3027   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3028   enc_class aarch64_enc_strb0(memory1 mem) %{
 3029     loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3030                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3031   %}
 3032 
 3033   // This encoding class is generated automatically from ad_encode.m4.
 3034   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3035   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 3036     Register src_reg = as_Register($src$$reg);
 3037     loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
 3038                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 3039   %}
 3040 
 3041   // This encoding class is generated automatically from ad_encode.m4.
 3042   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3043   enc_class aarch64_enc_strh0(memory2 mem) %{
 3044     loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
 3045                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 3046   %}
 3047 
 3048   // This encoding class is generated automatically from ad_encode.m4.
 3049   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3050   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 3051     Register src_reg = as_Register($src$$reg);
 3052     loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
 3053                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3054   %}
 3055 
 3056   // This encoding class is generated automatically from ad_encode.m4.
 3057   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3058   enc_class aarch64_enc_strw0(memory4 mem) %{
 3059     loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3060                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3061   %}
 3062 
 3063   // This encoding class is generated automatically from ad_encode.m4.
 3064   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3065   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3066     Register src_reg = as_Register($src$$reg);
 3067     // we sometimes get asked to store the stack pointer into the
 3068     // current thread -- we cannot do that directly on AArch64
 3069     if (src_reg == r31_sp) {
 3070       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3071       __ mov(rscratch2, sp);
 3072       src_reg = rscratch2;
 3073     }
 3074     loadStore(masm, &MacroAssembler::str, src_reg, $mem->opcode(),
 3075                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3076   %}
 3077 
 3078   // This encoding class is generated automatically from ad_encode.m4.
 3079   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3080   enc_class aarch64_enc_str0(memory8 mem) %{
 3081     loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
 3082                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3083   %}
 3084 
 3085   // This encoding class is generated automatically from ad_encode.m4.
 3086   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3087   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3088     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3089     loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
 3090                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3091   %}
 3092 
 3093   // This encoding class is generated automatically from ad_encode.m4.
 3094   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3095   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3096     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3097     loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
 3098                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3099   %}
 3100 
 3101   // This encoding class is generated automatically from ad_encode.m4.
 3102   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3103   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3104       __ membar(Assembler::StoreStore);
 3105       loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3106                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3107   %}
 3108 
 3109   // END Non-volatile memory access
 3110 
 3111   // Vector loads and stores
 3112   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3113     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3114     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3115        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3116   %}
 3117 
 3118   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3119     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3120     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3121        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3122   %}
 3123 
 3124   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3125     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3126     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3127        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3128   %}
 3129 
 3130   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3131     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3132     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3133        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3134   %}
 3135 
 3136   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3137     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3138     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::H,
 3139        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3140   %}
 3141 
 3142   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3143     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3144     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::S,
 3145        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3146   %}
 3147 
 3148   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3149     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3150     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::D,
 3151        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3152   %}
 3153 
 3154   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3155     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3156     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3157        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3158   %}
 3159 
 3160   // volatile loads and stores
 3161 
 3162   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3163     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3164                  rscratch1, stlrb);
 3165   %}
 3166 
 3167   enc_class aarch64_enc_stlrb0(memory mem) %{
 3168     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3169                  rscratch1, stlrb);
 3170   %}
 3171 
 3172   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3173     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3174                  rscratch1, stlrh);
 3175   %}
 3176 
 3177   enc_class aarch64_enc_stlrh0(memory mem) %{
 3178     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3179                  rscratch1, stlrh);
 3180   %}
 3181 
 3182   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3183     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3184                  rscratch1, stlrw);
 3185   %}
 3186 
 3187   enc_class aarch64_enc_stlrw0(memory mem) %{
 3188     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3189                  rscratch1, stlrw);
 3190   %}
 3191 
 3192   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3193     Register dst_reg = as_Register($dst$$reg);
 3194     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3195              rscratch1, ldarb);
 3196     __ sxtbw(dst_reg, dst_reg);
 3197   %}
 3198 
 3199   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3200     Register dst_reg = as_Register($dst$$reg);
 3201     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3202              rscratch1, ldarb);
 3203     __ sxtb(dst_reg, dst_reg);
 3204   %}
 3205 
 3206   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3207     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3208              rscratch1, ldarb);
 3209   %}
 3210 
 3211   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3212     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3213              rscratch1, ldarb);
 3214   %}
 3215 
 3216   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3217     Register dst_reg = as_Register($dst$$reg);
 3218     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3219              rscratch1, ldarh);
 3220     __ sxthw(dst_reg, dst_reg);
 3221   %}
 3222 
 3223   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3224     Register dst_reg = as_Register($dst$$reg);
 3225     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3226              rscratch1, ldarh);
 3227     __ sxth(dst_reg, dst_reg);
 3228   %}
 3229 
 3230   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3231     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3232              rscratch1, ldarh);
 3233   %}
 3234 
 3235   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3236     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3237              rscratch1, ldarh);
 3238   %}
 3239 
 3240   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3241     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3242              rscratch1, ldarw);
 3243   %}
 3244 
 3245   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3246     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3247              rscratch1, ldarw);
 3248   %}
 3249 
 3250   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3251     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3252              rscratch1, ldar);
 3253   %}
 3254 
 3255   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3256     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3257              rscratch1, ldarw);
 3258     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3259   %}
 3260 
 3261   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3262     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3263              rscratch1, ldar);
 3264     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3265   %}
 3266 
 3267   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3268     Register src_reg = as_Register($src$$reg);
 3269     // we sometimes get asked to store the stack pointer into the
 3270     // current thread -- we cannot do that directly on AArch64
 3271     if (src_reg == r31_sp) {
 3272       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3273       __ mov(rscratch2, sp);
 3274       src_reg = rscratch2;
 3275     }
 3276     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3277                  rscratch1, stlr);
 3278   %}
 3279 
 3280   enc_class aarch64_enc_stlr0(memory mem) %{
 3281     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3282                  rscratch1, stlr);
 3283   %}
 3284 
 3285   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3286     {
 3287       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3288       __ fmovs(rscratch2, src_reg);
 3289     }
 3290     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3291                  rscratch1, stlrw);
 3292   %}
 3293 
 3294   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3295     {
 3296       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3297       __ fmovd(rscratch2, src_reg);
 3298     }
 3299     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3300                  rscratch1, stlr);
 3301   %}
 3302 
 3303   // synchronized read/update encodings
 3304 
 3305   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3306     Register dst_reg = as_Register($dst$$reg);
 3307     Register base = as_Register($mem$$base);
 3308     int index = $mem$$index;
 3309     int scale = $mem$$scale;
 3310     int disp = $mem$$disp;
 3311     if (index == -1) {
 3312        if (disp != 0) {
 3313         __ lea(rscratch1, Address(base, disp));
 3314         __ ldaxr(dst_reg, rscratch1);
 3315       } else {
 3316         // TODO
 3317         // should we ever get anything other than this case?
 3318         __ ldaxr(dst_reg, base);
 3319       }
 3320     } else {
 3321       Register index_reg = as_Register(index);
 3322       if (disp == 0) {
 3323         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3324         __ ldaxr(dst_reg, rscratch1);
 3325       } else {
 3326         __ lea(rscratch1, Address(base, disp));
 3327         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3328         __ ldaxr(dst_reg, rscratch1);
 3329       }
 3330     }
 3331   %}
 3332 
 3333   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3334     Register src_reg = as_Register($src$$reg);
 3335     Register base = as_Register($mem$$base);
 3336     int index = $mem$$index;
 3337     int scale = $mem$$scale;
 3338     int disp = $mem$$disp;
 3339     if (index == -1) {
 3340        if (disp != 0) {
 3341         __ lea(rscratch2, Address(base, disp));
 3342         __ stlxr(rscratch1, src_reg, rscratch2);
 3343       } else {
 3344         // TODO
 3345         // should we ever get anything other than this case?
 3346         __ stlxr(rscratch1, src_reg, base);
 3347       }
 3348     } else {
 3349       Register index_reg = as_Register(index);
 3350       if (disp == 0) {
 3351         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3352         __ stlxr(rscratch1, src_reg, rscratch2);
 3353       } else {
 3354         __ lea(rscratch2, Address(base, disp));
 3355         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3356         __ stlxr(rscratch1, src_reg, rscratch2);
 3357       }
 3358     }
 3359     __ cmpw(rscratch1, zr);
 3360   %}
 3361 
 3362   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3363     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3364     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3365                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3366                /*weak*/ false, noreg);
 3367   %}
 3368 
 3369   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3370     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3371     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3372                Assembler::word, /*acquire*/ false, /*release*/ true,
 3373                /*weak*/ false, noreg);
 3374   %}
 3375 
 3376   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3377     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3378     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3379                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3380                /*weak*/ false, noreg);
 3381   %}
 3382 
 3383   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3384     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3385     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3386                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3387                /*weak*/ false, noreg);
 3388   %}
 3389 
 3390 
 3391   // The only difference between aarch64_enc_cmpxchg and
 3392   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3393   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3394   // lock.
 3395   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3396     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3397     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3398                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3399                /*weak*/ false, noreg);
 3400   %}
 3401 
 3402   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3403     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3404     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3405                Assembler::word, /*acquire*/ true, /*release*/ true,
 3406                /*weak*/ false, noreg);
 3407   %}
 3408 
 3409   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3410     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3411     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3412                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3413                /*weak*/ false, noreg);
 3414   %}
 3415 
 3416   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3417     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3418     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3419                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3420                /*weak*/ false, noreg);
 3421   %}
 3422 
 3423   // auxiliary used for CompareAndSwapX to set result register
 3424   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3425     Register res_reg = as_Register($res$$reg);
 3426     __ cset(res_reg, Assembler::EQ);
 3427   %}
 3428 
 3429   // prefetch encodings
 3430 
 3431   enc_class aarch64_enc_prefetchw(memory mem) %{
 3432     Register base = as_Register($mem$$base);
 3433     int index = $mem$$index;
 3434     int scale = $mem$$scale;
 3435     int disp = $mem$$disp;
 3436     if (index == -1) {
 3437       // Fix up any out-of-range offsets.
 3438       assert_different_registers(rscratch1, base);
 3439       Address addr = Address(base, disp);
 3440       addr = __ legitimize_address(addr, 8, rscratch1);
 3441       __ prfm(addr, PSTL1KEEP);
 3442     } else {
 3443       Register index_reg = as_Register(index);
 3444       if (disp == 0) {
 3445         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3446       } else {
 3447         __ lea(rscratch1, Address(base, disp));
 3448 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3449       }
 3450     }
 3451   %}
 3452 
 3453   // mov encodings
 3454 
 3455   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3456     uint32_t con = (uint32_t)$src$$constant;
 3457     Register dst_reg = as_Register($dst$$reg);
 3458     if (con == 0) {
 3459       __ movw(dst_reg, zr);
 3460     } else {
 3461       __ movw(dst_reg, con);
 3462     }
 3463   %}
 3464 
 3465   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3466     Register dst_reg = as_Register($dst$$reg);
 3467     uint64_t con = (uint64_t)$src$$constant;
 3468     if (con == 0) {
 3469       __ mov(dst_reg, zr);
 3470     } else {
 3471       __ mov(dst_reg, con);
 3472     }
 3473   %}
 3474 
 3475   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3476     Register dst_reg = as_Register($dst$$reg);
 3477     address con = (address)$src$$constant;
 3478     if (con == nullptr || con == (address)1) {
 3479       ShouldNotReachHere();
 3480     } else {
 3481       relocInfo::relocType rtype = $src->constant_reloc();
 3482       if (rtype == relocInfo::oop_type) {
 3483         __ movoop(dst_reg, (jobject)con);
 3484       } else if (rtype == relocInfo::metadata_type) {
 3485         __ mov_metadata(dst_reg, (Metadata*)con);
 3486       } else {
 3487         assert(rtype == relocInfo::none, "unexpected reloc type");
 3488         if (! __ is_valid_AArch64_address(con) ||
 3489             con < (address)(uintptr_t)os::vm_page_size()) {
 3490           __ mov(dst_reg, con);
 3491         } else {
 3492           uint64_t offset;
 3493           __ adrp(dst_reg, con, offset);
 3494           __ add(dst_reg, dst_reg, offset);
 3495         }
 3496       }
 3497     }
 3498   %}
 3499 
 3500   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3501     Register dst_reg = as_Register($dst$$reg);
 3502     __ mov(dst_reg, zr);
 3503   %}
 3504 
 3505   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3506     Register dst_reg = as_Register($dst$$reg);
 3507     __ mov(dst_reg, (uint64_t)1);
 3508   %}
 3509 
 3510   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3511     Register dst_reg = as_Register($dst$$reg);
 3512     address con = (address)$src$$constant;
 3513     if (con == nullptr) {
 3514       ShouldNotReachHere();
 3515     } else {
 3516       relocInfo::relocType rtype = $src->constant_reloc();
 3517       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3518       __ set_narrow_oop(dst_reg, (jobject)con);
 3519     }
 3520   %}
 3521 
 3522   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3523     Register dst_reg = as_Register($dst$$reg);
 3524     __ mov(dst_reg, zr);
 3525   %}
 3526 
 3527   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3528     Register dst_reg = as_Register($dst$$reg);
 3529     address con = (address)$src$$constant;
 3530     if (con == nullptr) {
 3531       ShouldNotReachHere();
 3532     } else {
 3533       relocInfo::relocType rtype = $src->constant_reloc();
 3534       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3535       __ set_narrow_klass(dst_reg, (Klass *)con);
 3536     }
 3537   %}
 3538 
 3539   // arithmetic encodings
 3540 
 3541   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3542     Register dst_reg = as_Register($dst$$reg);
 3543     Register src_reg = as_Register($src1$$reg);
 3544     int32_t con = (int32_t)$src2$$constant;
 3545     // add has primary == 0, subtract has primary == 1
 3546     if ($primary) { con = -con; }
 3547     if (con < 0) {
 3548       __ subw(dst_reg, src_reg, -con);
 3549     } else {
 3550       __ addw(dst_reg, src_reg, con);
 3551     }
 3552   %}
 3553 
 3554   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3555     Register dst_reg = as_Register($dst$$reg);
 3556     Register src_reg = as_Register($src1$$reg);
 3557     int32_t con = (int32_t)$src2$$constant;
 3558     // add has primary == 0, subtract has primary == 1
 3559     if ($primary) { con = -con; }
 3560     if (con < 0) {
 3561       __ sub(dst_reg, src_reg, -con);
 3562     } else {
 3563       __ add(dst_reg, src_reg, con);
 3564     }
 3565   %}
 3566 
 3567   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3568    Register dst_reg = as_Register($dst$$reg);
 3569    Register src1_reg = as_Register($src1$$reg);
 3570    Register src2_reg = as_Register($src2$$reg);
 3571     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3572   %}
 3573 
 3574   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3575    Register dst_reg = as_Register($dst$$reg);
 3576    Register src1_reg = as_Register($src1$$reg);
 3577    Register src2_reg = as_Register($src2$$reg);
 3578     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3579   %}
 3580 
 3581   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3582    Register dst_reg = as_Register($dst$$reg);
 3583    Register src1_reg = as_Register($src1$$reg);
 3584    Register src2_reg = as_Register($src2$$reg);
 3585     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3586   %}
 3587 
 3588   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3589    Register dst_reg = as_Register($dst$$reg);
 3590    Register src1_reg = as_Register($src1$$reg);
 3591    Register src2_reg = as_Register($src2$$reg);
 3592     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3593   %}
 3594 
 3595   // compare instruction encodings
 3596 
 3597   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3598     Register reg1 = as_Register($src1$$reg);
 3599     Register reg2 = as_Register($src2$$reg);
 3600     __ cmpw(reg1, reg2);
 3601   %}
 3602 
 3603   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3604     Register reg = as_Register($src1$$reg);
 3605     int32_t val = $src2$$constant;
 3606     if (val >= 0) {
 3607       __ subsw(zr, reg, val);
 3608     } else {
 3609       __ addsw(zr, reg, -val);
 3610     }
 3611   %}
 3612 
 3613   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3614     Register reg1 = as_Register($src1$$reg);
 3615     uint32_t val = (uint32_t)$src2$$constant;
 3616     __ movw(rscratch1, val);
 3617     __ cmpw(reg1, rscratch1);
 3618   %}
 3619 
 3620   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3621     Register reg1 = as_Register($src1$$reg);
 3622     Register reg2 = as_Register($src2$$reg);
 3623     __ cmp(reg1, reg2);
 3624   %}
 3625 
 3626   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3627     Register reg = as_Register($src1$$reg);
 3628     int64_t val = $src2$$constant;
 3629     if (val >= 0) {
 3630       __ subs(zr, reg, val);
 3631     } else if (val != -val) {
 3632       __ adds(zr, reg, -val);
 3633     } else {
 3634     // aargh, Long.MIN_VALUE is a special case
 3635       __ orr(rscratch1, zr, (uint64_t)val);
 3636       __ subs(zr, reg, rscratch1);
 3637     }
 3638   %}
 3639 
 3640   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3641     Register reg1 = as_Register($src1$$reg);
 3642     uint64_t val = (uint64_t)$src2$$constant;
 3643     __ mov(rscratch1, val);
 3644     __ cmp(reg1, rscratch1);
 3645   %}
 3646 
 3647   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3648     Register reg1 = as_Register($src1$$reg);
 3649     Register reg2 = as_Register($src2$$reg);
 3650     __ cmp(reg1, reg2);
 3651   %}
 3652 
 3653   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3654     Register reg1 = as_Register($src1$$reg);
 3655     Register reg2 = as_Register($src2$$reg);
 3656     __ cmpw(reg1, reg2);
 3657   %}
 3658 
 3659   enc_class aarch64_enc_testp(iRegP src) %{
 3660     Register reg = as_Register($src$$reg);
 3661     __ cmp(reg, zr);
 3662   %}
 3663 
 3664   enc_class aarch64_enc_testn(iRegN src) %{
 3665     Register reg = as_Register($src$$reg);
 3666     __ cmpw(reg, zr);
 3667   %}
 3668 
 3669   enc_class aarch64_enc_b(label lbl) %{
 3670     Label *L = $lbl$$label;
 3671     __ b(*L);
 3672   %}
 3673 
 3674   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3675     Label *L = $lbl$$label;
 3676     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3677   %}
 3678 
 3679   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3680     Label *L = $lbl$$label;
 3681     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3682   %}
 3683 
 3684   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3685   %{
 3686      Register sub_reg = as_Register($sub$$reg);
 3687      Register super_reg = as_Register($super$$reg);
 3688      Register temp_reg = as_Register($temp$$reg);
 3689      Register result_reg = as_Register($result$$reg);
 3690 
 3691      Label miss;
 3692      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3693                                      nullptr, &miss,
 3694                                      /*set_cond_codes:*/ true);
 3695      if ($primary) {
 3696        __ mov(result_reg, zr);
 3697      }
 3698      __ bind(miss);
 3699   %}
 3700 
 3701   enc_class aarch64_enc_java_static_call(method meth) %{
 3702     address addr = (address)$meth$$method;
 3703     address call;
 3704     if (!_method) {
 3705       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3706       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3707       if (call == nullptr) {
 3708         ciEnv::current()->record_failure("CodeCache is full");
 3709         return;
 3710       }
 3711     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3712       // The NOP here is purely to ensure that eliding a call to
 3713       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3714       __ nop();
 3715       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3716     } else {
 3717       int method_index = resolved_method_index(masm);
 3718       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3719                                                   : static_call_Relocation::spec(method_index);
 3720       call = __ trampoline_call(Address(addr, rspec));
 3721       if (call == nullptr) {
 3722         ciEnv::current()->record_failure("CodeCache is full");
 3723         return;
 3724       }
 3725       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3726         // Calls of the same statically bound method can share
 3727         // a stub to the interpreter.
 3728         __ code()->shared_stub_to_interp_for(_method, call - __ begin());
 3729       } else {
 3730         // Emit stub for static call
 3731         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 3732         if (stub == nullptr) {
 3733           ciEnv::current()->record_failure("CodeCache is full");
 3734           return;
 3735         }
 3736       }
 3737     }
 3738 
 3739     __ post_call_nop();
 3740 
 3741     // Only non uncommon_trap calls need to reinitialize ptrue.
 3742     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3743       __ reinitialize_ptrue();
 3744     }
 3745   %}
 3746 
 3747   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3748     int method_index = resolved_method_index(masm);
 3749     address call = __ ic_call((address)$meth$$method, method_index);
 3750     if (call == nullptr) {
 3751       ciEnv::current()->record_failure("CodeCache is full");
 3752       return;
 3753     }
 3754     __ post_call_nop();
 3755     if (Compile::current()->max_vector_size() > 0) {
 3756       __ reinitialize_ptrue();
 3757     }
 3758   %}
 3759 
 3760   enc_class aarch64_enc_call_epilog() %{
 3761     if (VerifyStackAtCalls) {
 3762       // Check that stack depth is unchanged: find majik cookie on stack
 3763       __ call_Unimplemented();
 3764     }
 3765   %}
 3766 
 3767   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3768     // some calls to generated routines (arraycopy code) are scheduled
 3769     // by C2 as runtime calls. if so we can call them using a br (they
 3770     // will be in a reachable segment) otherwise we have to use a blr
 3771     // which loads the absolute address into a register.
 3772     address entry = (address)$meth$$method;
 3773     CodeBlob *cb = CodeCache::find_blob(entry);
 3774     if (cb) {
 3775       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3776       if (call == nullptr) {
 3777         ciEnv::current()->record_failure("CodeCache is full");
 3778         return;
 3779       }
 3780       __ post_call_nop();
 3781     } else {
 3782       Label retaddr;
 3783       // Make the anchor frame walkable
 3784       __ adr(rscratch2, retaddr);
 3785       __ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 3786       __ lea(rscratch1, RuntimeAddress(entry));
 3787       __ blr(rscratch1);
 3788       __ bind(retaddr);
 3789       __ post_call_nop();
 3790     }
 3791     if (Compile::current()->max_vector_size() > 0) {
 3792       __ reinitialize_ptrue();
 3793     }
 3794   %}
 3795 
 3796   enc_class aarch64_enc_rethrow() %{
 3797     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3798   %}
 3799 
 3800   enc_class aarch64_enc_ret() %{
 3801 #ifdef ASSERT
 3802     if (Compile::current()->max_vector_size() > 0) {
 3803       __ verify_ptrue();
 3804     }
 3805 #endif
 3806     __ ret(lr);
 3807   %}
 3808 
 3809   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3810     Register target_reg = as_Register($jump_target$$reg);
 3811     __ br(target_reg);
 3812   %}
 3813 
 3814   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3815     Register target_reg = as_Register($jump_target$$reg);
 3816     // exception oop should be in r0
 3817     // ret addr has been popped into lr
 3818     // callee expects it in r3
 3819     __ mov(r3, lr);
 3820     __ br(target_reg);
 3821   %}
 3822 
 3823 %}
 3824 
 3825 //----------FRAME--------------------------------------------------------------
 3826 // Definition of frame structure and management information.
 3827 //
 3828 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3829 //                             |   (to get allocators register number
 3830 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3831 //  r   CALLER     |        |
 3832 //  o     |        +--------+      pad to even-align allocators stack-slot
 3833 //  w     V        |  pad0  |        numbers; owned by CALLER
 3834 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3835 //  h     ^        |   in   |  5
 3836 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3837 //  |     |        |        |  3
 3838 //  |     |        +--------+
 3839 //  V     |        | old out|      Empty on Intel, window on Sparc
 3840 //        |    old |preserve|      Must be even aligned.
 3841 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3842 //        |        |   in   |  3   area for Intel ret address
 3843 //     Owned by    |preserve|      Empty on Sparc.
 3844 //       SELF      +--------+
 3845 //        |        |  pad2  |  2   pad to align old SP
 3846 //        |        +--------+  1
 3847 //        |        | locks  |  0
 3848 //        |        +--------+----> OptoReg::stack0(), even aligned
 3849 //        |        |  pad1  | 11   pad to align new SP
 3850 //        |        +--------+
 3851 //        |        |        | 10
 3852 //        |        | spills |  9   spills
 3853 //        V        |        |  8   (pad0 slot for callee)
 3854 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3855 //        ^        |  out   |  7
 3856 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3857 //     Owned by    +--------+
 3858 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3859 //        |    new |preserve|      Must be even-aligned.
 3860 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3861 //        |        |        |
 3862 //
 3863 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3864 //         known from SELF's arguments and the Java calling convention.
 3865 //         Region 6-7 is determined per call site.
 3866 // Note 2: If the calling convention leaves holes in the incoming argument
 3867 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3868 //         are owned by the CALLEE.  Holes should not be necessary in the
 3869 //         incoming area, as the Java calling convention is completely under
 3870 //         the control of the AD file.  Doubles can be sorted and packed to
 3871 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3872 //         varargs C calling conventions.
 3873 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3874 //         even aligned with pad0 as needed.
 3875 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3876 //           (the latter is true on Intel but is it false on AArch64?)
 3877 //         region 6-11 is even aligned; it may be padded out more so that
 3878 //         the region from SP to FP meets the minimum stack alignment.
 3879 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3880 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3881 //         SP meets the minimum alignment.
 3882 
 3883 frame %{
 3884   // These three registers define part of the calling convention
 3885   // between compiled code and the interpreter.
 3886 
 3887   // Inline Cache Register or Method for I2C.
 3888   inline_cache_reg(R12);
 3889 
 3890   // Number of stack slots consumed by locking an object
 3891   sync_stack_slots(2);
 3892 
 3893   // Compiled code's Frame Pointer
 3894   frame_pointer(R31);
 3895 
 3896   // Interpreter stores its frame pointer in a register which is
 3897   // stored to the stack by I2CAdaptors.
 3898   // I2CAdaptors convert from interpreted java to compiled java.
 3899   interpreter_frame_pointer(R29);
 3900 
 3901   // Stack alignment requirement
 3902   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3903 
 3904   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3905   // for calls to C.  Supports the var-args backing area for register parms.
 3906   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3907 
 3908   // The after-PROLOG location of the return address.  Location of
 3909   // return address specifies a type (REG or STACK) and a number
 3910   // representing the register number (i.e. - use a register name) or
 3911   // stack slot.
 3912   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3913   // Otherwise, it is above the locks and verification slot and alignment word
 3914   // TODO this may well be correct but need to check why that - 2 is there
 3915   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3916   // which folds in the space used for monitors
 3917   return_addr(STACK - 2 +
 3918               align_up((Compile::current()->in_preserve_stack_slots() +
 3919                         Compile::current()->fixed_slots()),
 3920                        stack_alignment_in_slots()));
 3921 
 3922   // Location of compiled Java return values.  Same as C for now.
 3923   return_value
 3924   %{
 3925     // TODO do we allow ideal_reg == Op_RegN???
 3926     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3927            "only return normal values");
 3928 
 3929     static const int lo[Op_RegL + 1] = { // enum name
 3930       0,                                 // Op_Node
 3931       0,                                 // Op_Set
 3932       R0_num,                            // Op_RegN
 3933       R0_num,                            // Op_RegI
 3934       R0_num,                            // Op_RegP
 3935       V0_num,                            // Op_RegF
 3936       V0_num,                            // Op_RegD
 3937       R0_num                             // Op_RegL
 3938     };
 3939 
 3940     static const int hi[Op_RegL + 1] = { // enum name
 3941       0,                                 // Op_Node
 3942       0,                                 // Op_Set
 3943       OptoReg::Bad,                      // Op_RegN
 3944       OptoReg::Bad,                      // Op_RegI
 3945       R0_H_num,                          // Op_RegP
 3946       OptoReg::Bad,                      // Op_RegF
 3947       V0_H_num,                          // Op_RegD
 3948       R0_H_num                           // Op_RegL
 3949     };
 3950 
 3951     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3952   %}
 3953 %}
 3954 
 3955 //----------ATTRIBUTES---------------------------------------------------------
 3956 //----------Operand Attributes-------------------------------------------------
 3957 op_attrib op_cost(1);        // Required cost attribute
 3958 
 3959 //----------Instruction Attributes---------------------------------------------
 3960 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3961 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3962 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3963                                 // a non-matching short branch variant
 3964                                 // of some long branch?
 3965 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3966                                 // be a power of 2) specifies the
 3967                                 // alignment that some part of the
 3968                                 // instruction (not necessarily the
 3969                                 // start) requires.  If > 1, a
 3970                                 // compute_padding() function must be
 3971                                 // provided for the instruction
 3972 
 3973 // Whether this node is expanded during code emission into a sequence of
 3974 // instructions and the first instruction can perform an implicit null check.
 3975 ins_attrib ins_is_late_expanded_null_check_candidate(false);
 3976 
 3977 //----------OPERANDS-----------------------------------------------------------
 3978 // Operand definitions must precede instruction definitions for correct parsing
 3979 // in the ADLC because operands constitute user defined types which are used in
 3980 // instruction definitions.
 3981 
 3982 //----------Simple Operands----------------------------------------------------
 3983 
 3984 // Integer operands 32 bit
 3985 // 32 bit immediate
 3986 operand immI()
 3987 %{
 3988   match(ConI);
 3989 
 3990   op_cost(0);
 3991   format %{ %}
 3992   interface(CONST_INTER);
 3993 %}
 3994 
 3995 // 32 bit zero
 3996 operand immI0()
 3997 %{
 3998   predicate(n->get_int() == 0);
 3999   match(ConI);
 4000 
 4001   op_cost(0);
 4002   format %{ %}
 4003   interface(CONST_INTER);
 4004 %}
 4005 
 4006 // 32 bit unit increment
 4007 operand immI_1()
 4008 %{
 4009   predicate(n->get_int() == 1);
 4010   match(ConI);
 4011 
 4012   op_cost(0);
 4013   format %{ %}
 4014   interface(CONST_INTER);
 4015 %}
 4016 
 4017 // 32 bit unit decrement
 4018 operand immI_M1()
 4019 %{
 4020   predicate(n->get_int() == -1);
 4021   match(ConI);
 4022 
 4023   op_cost(0);
 4024   format %{ %}
 4025   interface(CONST_INTER);
 4026 %}
 4027 
 4028 // Shift values for add/sub extension shift
 4029 operand immIExt()
 4030 %{
 4031   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 4032   match(ConI);
 4033 
 4034   op_cost(0);
 4035   format %{ %}
 4036   interface(CONST_INTER);
 4037 %}
 4038 
 4039 operand immI_gt_1()
 4040 %{
 4041   predicate(n->get_int() > 1);
 4042   match(ConI);
 4043 
 4044   op_cost(0);
 4045   format %{ %}
 4046   interface(CONST_INTER);
 4047 %}
 4048 
 4049 operand immI_le_4()
 4050 %{
 4051   predicate(n->get_int() <= 4);
 4052   match(ConI);
 4053 
 4054   op_cost(0);
 4055   format %{ %}
 4056   interface(CONST_INTER);
 4057 %}
 4058 
 4059 operand immI_16()
 4060 %{
 4061   predicate(n->get_int() == 16);
 4062   match(ConI);
 4063 
 4064   op_cost(0);
 4065   format %{ %}
 4066   interface(CONST_INTER);
 4067 %}
 4068 
 4069 operand immI_24()
 4070 %{
 4071   predicate(n->get_int() == 24);
 4072   match(ConI);
 4073 
 4074   op_cost(0);
 4075   format %{ %}
 4076   interface(CONST_INTER);
 4077 %}
 4078 
 4079 operand immI_32()
 4080 %{
 4081   predicate(n->get_int() == 32);
 4082   match(ConI);
 4083 
 4084   op_cost(0);
 4085   format %{ %}
 4086   interface(CONST_INTER);
 4087 %}
 4088 
 4089 operand immI_48()
 4090 %{
 4091   predicate(n->get_int() == 48);
 4092   match(ConI);
 4093 
 4094   op_cost(0);
 4095   format %{ %}
 4096   interface(CONST_INTER);
 4097 %}
 4098 
 4099 operand immI_56()
 4100 %{
 4101   predicate(n->get_int() == 56);
 4102   match(ConI);
 4103 
 4104   op_cost(0);
 4105   format %{ %}
 4106   interface(CONST_INTER);
 4107 %}
 4108 
 4109 operand immI_255()
 4110 %{
 4111   predicate(n->get_int() == 255);
 4112   match(ConI);
 4113 
 4114   op_cost(0);
 4115   format %{ %}
 4116   interface(CONST_INTER);
 4117 %}
 4118 
 4119 operand immI_65535()
 4120 %{
 4121   predicate(n->get_int() == 65535);
 4122   match(ConI);
 4123 
 4124   op_cost(0);
 4125   format %{ %}
 4126   interface(CONST_INTER);
 4127 %}
 4128 
 4129 operand immI_positive()
 4130 %{
 4131   predicate(n->get_int() > 0);
 4132   match(ConI);
 4133 
 4134   op_cost(0);
 4135   format %{ %}
 4136   interface(CONST_INTER);
 4137 %}
 4138 
 4139 // BoolTest condition for signed compare
 4140 operand immI_cmp_cond()
 4141 %{
 4142   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4143   match(ConI);
 4144 
 4145   op_cost(0);
 4146   format %{ %}
 4147   interface(CONST_INTER);
 4148 %}
 4149 
 4150 // BoolTest condition for unsigned compare
 4151 operand immI_cmpU_cond()
 4152 %{
 4153   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4154   match(ConI);
 4155 
 4156   op_cost(0);
 4157   format %{ %}
 4158   interface(CONST_INTER);
 4159 %}
 4160 
 4161 operand immL_255()
 4162 %{
 4163   predicate(n->get_long() == 255L);
 4164   match(ConL);
 4165 
 4166   op_cost(0);
 4167   format %{ %}
 4168   interface(CONST_INTER);
 4169 %}
 4170 
 4171 operand immL_65535()
 4172 %{
 4173   predicate(n->get_long() == 65535L);
 4174   match(ConL);
 4175 
 4176   op_cost(0);
 4177   format %{ %}
 4178   interface(CONST_INTER);
 4179 %}
 4180 
 4181 operand immL_4294967295()
 4182 %{
 4183   predicate(n->get_long() == 4294967295L);
 4184   match(ConL);
 4185 
 4186   op_cost(0);
 4187   format %{ %}
 4188   interface(CONST_INTER);
 4189 %}
 4190 
 4191 operand immL_bitmask()
 4192 %{
 4193   predicate((n->get_long() != 0)
 4194             && ((n->get_long() & 0xc000000000000000l) == 0)
 4195             && is_power_of_2(n->get_long() + 1));
 4196   match(ConL);
 4197 
 4198   op_cost(0);
 4199   format %{ %}
 4200   interface(CONST_INTER);
 4201 %}
 4202 
 4203 operand immI_bitmask()
 4204 %{
 4205   predicate((n->get_int() != 0)
 4206             && ((n->get_int() & 0xc0000000) == 0)
 4207             && is_power_of_2(n->get_int() + 1));
 4208   match(ConI);
 4209 
 4210   op_cost(0);
 4211   format %{ %}
 4212   interface(CONST_INTER);
 4213 %}
 4214 
 4215 operand immL_positive_bitmaskI()
 4216 %{
 4217   predicate((n->get_long() != 0)
 4218             && ((julong)n->get_long() < 0x80000000ULL)
 4219             && is_power_of_2(n->get_long() + 1));
 4220   match(ConL);
 4221 
 4222   op_cost(0);
 4223   format %{ %}
 4224   interface(CONST_INTER);
 4225 %}
 4226 
 4227 // Scale values for scaled offset addressing modes (up to long but not quad)
 4228 operand immIScale()
 4229 %{
 4230   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4231   match(ConI);
 4232 
 4233   op_cost(0);
 4234   format %{ %}
 4235   interface(CONST_INTER);
 4236 %}
 4237 
 4238 // 5 bit signed integer
 4239 operand immI5()
 4240 %{
 4241   predicate(Assembler::is_simm(n->get_int(), 5));
 4242   match(ConI);
 4243 
 4244   op_cost(0);
 4245   format %{ %}
 4246   interface(CONST_INTER);
 4247 %}
 4248 
 4249 // 7 bit unsigned integer
 4250 operand immIU7()
 4251 %{
 4252   predicate(Assembler::is_uimm(n->get_int(), 7));
 4253   match(ConI);
 4254 
 4255   op_cost(0);
 4256   format %{ %}
 4257   interface(CONST_INTER);
 4258 %}
 4259 
 4260 // Offset for scaled or unscaled immediate loads and stores
 4261 operand immIOffset()
 4262 %{
 4263   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4264   match(ConI);
 4265 
 4266   op_cost(0);
 4267   format %{ %}
 4268   interface(CONST_INTER);
 4269 %}
 4270 
 4271 operand immIOffset1()
 4272 %{
 4273   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4274   match(ConI);
 4275 
 4276   op_cost(0);
 4277   format %{ %}
 4278   interface(CONST_INTER);
 4279 %}
 4280 
 4281 operand immIOffset2()
 4282 %{
 4283   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4284   match(ConI);
 4285 
 4286   op_cost(0);
 4287   format %{ %}
 4288   interface(CONST_INTER);
 4289 %}
 4290 
 4291 operand immIOffset4()
 4292 %{
 4293   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4294   match(ConI);
 4295 
 4296   op_cost(0);
 4297   format %{ %}
 4298   interface(CONST_INTER);
 4299 %}
 4300 
 4301 operand immIOffset8()
 4302 %{
 4303   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4304   match(ConI);
 4305 
 4306   op_cost(0);
 4307   format %{ %}
 4308   interface(CONST_INTER);
 4309 %}
 4310 
 4311 operand immIOffset16()
 4312 %{
 4313   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4314   match(ConI);
 4315 
 4316   op_cost(0);
 4317   format %{ %}
 4318   interface(CONST_INTER);
 4319 %}
 4320 
 4321 operand immLOffset()
 4322 %{
 4323   predicate(n->get_long() >= -256 && n->get_long() <= 65520);
 4324   match(ConL);
 4325 
 4326   op_cost(0);
 4327   format %{ %}
 4328   interface(CONST_INTER);
 4329 %}
 4330 
 4331 operand immLoffset1()
 4332 %{
 4333   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4334   match(ConL);
 4335 
 4336   op_cost(0);
 4337   format %{ %}
 4338   interface(CONST_INTER);
 4339 %}
 4340 
 4341 operand immLoffset2()
 4342 %{
 4343   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4344   match(ConL);
 4345 
 4346   op_cost(0);
 4347   format %{ %}
 4348   interface(CONST_INTER);
 4349 %}
 4350 
 4351 operand immLoffset4()
 4352 %{
 4353   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4354   match(ConL);
 4355 
 4356   op_cost(0);
 4357   format %{ %}
 4358   interface(CONST_INTER);
 4359 %}
 4360 
 4361 operand immLoffset8()
 4362 %{
 4363   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4364   match(ConL);
 4365 
 4366   op_cost(0);
 4367   format %{ %}
 4368   interface(CONST_INTER);
 4369 %}
 4370 
 4371 operand immLoffset16()
 4372 %{
 4373   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4374   match(ConL);
 4375 
 4376   op_cost(0);
 4377   format %{ %}
 4378   interface(CONST_INTER);
 4379 %}
 4380 
 4381 // 5 bit signed long integer
 4382 operand immL5()
 4383 %{
 4384   predicate(Assembler::is_simm(n->get_long(), 5));
 4385   match(ConL);
 4386 
 4387   op_cost(0);
 4388   format %{ %}
 4389   interface(CONST_INTER);
 4390 %}
 4391 
 4392 // 7 bit unsigned long integer
 4393 operand immLU7()
 4394 %{
 4395   predicate(Assembler::is_uimm(n->get_long(), 7));
 4396   match(ConL);
 4397 
 4398   op_cost(0);
 4399   format %{ %}
 4400   interface(CONST_INTER);
 4401 %}
 4402 
 4403 // 8 bit signed value.
 4404 operand immI8()
 4405 %{
 4406   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4407   match(ConI);
 4408 
 4409   op_cost(0);
 4410   format %{ %}
 4411   interface(CONST_INTER);
 4412 %}
 4413 
 4414 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4415 operand immIDupV()
 4416 %{
 4417   predicate(Assembler::operand_valid_for_sve_dup_immediate((int64_t)n->get_int()));
 4418   match(ConI);
 4419 
 4420   op_cost(0);
 4421   format %{ %}
 4422   interface(CONST_INTER);
 4423 %}
 4424 
 4425 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4426 operand immLDupV()
 4427 %{
 4428   predicate(Assembler::operand_valid_for_sve_dup_immediate(n->get_long()));
 4429   match(ConL);
 4430 
 4431   op_cost(0);
 4432   format %{ %}
 4433   interface(CONST_INTER);
 4434 %}
 4435 
 4436 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4437 operand immHDupV()
 4438 %{
 4439   predicate(Assembler::operand_valid_for_sve_dup_immediate((int64_t)n->geth()));
 4440   match(ConH);
 4441 
 4442   op_cost(0);
 4443   format %{ %}
 4444   interface(CONST_INTER);
 4445 %}
 4446 
 4447 // 8 bit integer valid for vector add sub immediate
 4448 operand immBAddSubV()
 4449 %{
 4450   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4451   match(ConI);
 4452 
 4453   op_cost(0);
 4454   format %{ %}
 4455   interface(CONST_INTER);
 4456 %}
 4457 
 4458 // 32 bit integer valid for add sub immediate
 4459 operand immIAddSub()
 4460 %{
 4461   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4462   match(ConI);
 4463   op_cost(0);
 4464   format %{ %}
 4465   interface(CONST_INTER);
 4466 %}
 4467 
 4468 // 32 bit integer valid for vector add sub immediate
 4469 operand immIAddSubV()
 4470 %{
 4471   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4472   match(ConI);
 4473 
 4474   op_cost(0);
 4475   format %{ %}
 4476   interface(CONST_INTER);
 4477 %}
 4478 
 4479 // 32 bit unsigned integer valid for logical immediate
 4480 
 4481 operand immBLog()
 4482 %{
 4483   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4484   match(ConI);
 4485 
 4486   op_cost(0);
 4487   format %{ %}
 4488   interface(CONST_INTER);
 4489 %}
 4490 
 4491 operand immSLog()
 4492 %{
 4493   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4494   match(ConI);
 4495 
 4496   op_cost(0);
 4497   format %{ %}
 4498   interface(CONST_INTER);
 4499 %}
 4500 
 4501 operand immILog()
 4502 %{
 4503   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4504   match(ConI);
 4505 
 4506   op_cost(0);
 4507   format %{ %}
 4508   interface(CONST_INTER);
 4509 %}
 4510 
 4511 // Integer operands 64 bit
 4512 // 64 bit immediate
 4513 operand immL()
 4514 %{
 4515   match(ConL);
 4516 
 4517   op_cost(0);
 4518   format %{ %}
 4519   interface(CONST_INTER);
 4520 %}
 4521 
 4522 // 64 bit zero
 4523 operand immL0()
 4524 %{
 4525   predicate(n->get_long() == 0);
 4526   match(ConL);
 4527 
 4528   op_cost(0);
 4529   format %{ %}
 4530   interface(CONST_INTER);
 4531 %}
 4532 
 4533 // 64 bit unit decrement
 4534 operand immL_M1()
 4535 %{
 4536   predicate(n->get_long() == -1);
 4537   match(ConL);
 4538 
 4539   op_cost(0);
 4540   format %{ %}
 4541   interface(CONST_INTER);
 4542 %}
 4543 
 4544 // 64 bit integer valid for add sub immediate
 4545 operand immLAddSub()
 4546 %{
 4547   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4548   match(ConL);
 4549   op_cost(0);
 4550   format %{ %}
 4551   interface(CONST_INTER);
 4552 %}
 4553 
 4554 // 64 bit integer valid for addv subv immediate
 4555 operand immLAddSubV()
 4556 %{
 4557   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4558   match(ConL);
 4559 
 4560   op_cost(0);
 4561   format %{ %}
 4562   interface(CONST_INTER);
 4563 %}
 4564 
 4565 // 64 bit integer valid for logical immediate
 4566 operand immLLog()
 4567 %{
 4568   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4569   match(ConL);
 4570   op_cost(0);
 4571   format %{ %}
 4572   interface(CONST_INTER);
 4573 %}
 4574 
 4575 // Long Immediate: low 32-bit mask
 4576 operand immL_32bits()
 4577 %{
 4578   predicate(n->get_long() == 0xFFFFFFFFL);
 4579   match(ConL);
 4580   op_cost(0);
 4581   format %{ %}
 4582   interface(CONST_INTER);
 4583 %}
 4584 
 4585 // Pointer operands
 4586 // Pointer Immediate
 4587 operand immP()
 4588 %{
 4589   match(ConP);
 4590 
 4591   op_cost(0);
 4592   format %{ %}
 4593   interface(CONST_INTER);
 4594 %}
 4595 
 4596 // nullptr Pointer Immediate
 4597 operand immP0()
 4598 %{
 4599   predicate(n->get_ptr() == 0);
 4600   match(ConP);
 4601 
 4602   op_cost(0);
 4603   format %{ %}
 4604   interface(CONST_INTER);
 4605 %}
 4606 
 4607 // Pointer Immediate One
 4608 // this is used in object initialization (initial object header)
 4609 operand immP_1()
 4610 %{
 4611   predicate(n->get_ptr() == 1);
 4612   match(ConP);
 4613 
 4614   op_cost(0);
 4615   format %{ %}
 4616   interface(CONST_INTER);
 4617 %}
 4618 
 4619 // Float and Double operands
 4620 // Double Immediate
 4621 operand immD()
 4622 %{
 4623   match(ConD);
 4624   op_cost(0);
 4625   format %{ %}
 4626   interface(CONST_INTER);
 4627 %}
 4628 
 4629 // Double Immediate: +0.0d
 4630 operand immD0()
 4631 %{
 4632   predicate(jlong_cast(n->getd()) == 0);
 4633   match(ConD);
 4634 
 4635   op_cost(0);
 4636   format %{ %}
 4637   interface(CONST_INTER);
 4638 %}
 4639 
 4640 // constant 'double +0.0'.
 4641 operand immDPacked()
 4642 %{
 4643   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4644   match(ConD);
 4645   op_cost(0);
 4646   format %{ %}
 4647   interface(CONST_INTER);
 4648 %}
 4649 
 4650 // Float Immediate
 4651 operand immF()
 4652 %{
 4653   match(ConF);
 4654   op_cost(0);
 4655   format %{ %}
 4656   interface(CONST_INTER);
 4657 %}
 4658 
 4659 // Float Immediate: +0.0f.
 4660 operand immF0()
 4661 %{
 4662   predicate(jint_cast(n->getf()) == 0);
 4663   match(ConF);
 4664 
 4665   op_cost(0);
 4666   format %{ %}
 4667   interface(CONST_INTER);
 4668 %}
 4669 
 4670 // Half Float (FP16) Immediate
 4671 operand immH()
 4672 %{
 4673   match(ConH);
 4674   op_cost(0);
 4675   format %{ %}
 4676   interface(CONST_INTER);
 4677 %}
 4678 
 4679 //
 4680 operand immFPacked()
 4681 %{
 4682   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4683   match(ConF);
 4684   op_cost(0);
 4685   format %{ %}
 4686   interface(CONST_INTER);
 4687 %}
 4688 
 4689 // Narrow pointer operands
 4690 // Narrow Pointer Immediate
 4691 operand immN()
 4692 %{
 4693   match(ConN);
 4694 
 4695   op_cost(0);
 4696   format %{ %}
 4697   interface(CONST_INTER);
 4698 %}
 4699 
 4700 // Narrow nullptr Pointer Immediate
 4701 operand immN0()
 4702 %{
 4703   predicate(n->get_narrowcon() == 0);
 4704   match(ConN);
 4705 
 4706   op_cost(0);
 4707   format %{ %}
 4708   interface(CONST_INTER);
 4709 %}
 4710 
 4711 operand immNKlass()
 4712 %{
 4713   match(ConNKlass);
 4714 
 4715   op_cost(0);
 4716   format %{ %}
 4717   interface(CONST_INTER);
 4718 %}
 4719 
 4720 // Integer 32 bit Register Operands
 4721 // Integer 32 bitRegister (excludes SP)
 4722 operand iRegI()
 4723 %{
 4724   constraint(ALLOC_IN_RC(any_reg32));
 4725   match(RegI);
 4726   match(iRegINoSp);
 4727   op_cost(0);
 4728   format %{ %}
 4729   interface(REG_INTER);
 4730 %}
 4731 
 4732 // Integer 32 bit Register not Special
 4733 operand iRegINoSp()
 4734 %{
 4735   constraint(ALLOC_IN_RC(no_special_reg32));
 4736   match(RegI);
 4737   op_cost(0);
 4738   format %{ %}
 4739   interface(REG_INTER);
 4740 %}
 4741 
 4742 // Integer 64 bit Register Operands
 4743 // Integer 64 bit Register (includes SP)
 4744 operand iRegL()
 4745 %{
 4746   constraint(ALLOC_IN_RC(any_reg));
 4747   match(RegL);
 4748   match(iRegLNoSp);
 4749   op_cost(0);
 4750   format %{ %}
 4751   interface(REG_INTER);
 4752 %}
 4753 
 4754 // Integer 64 bit Register not Special
 4755 operand iRegLNoSp()
 4756 %{
 4757   constraint(ALLOC_IN_RC(no_special_reg));
 4758   match(RegL);
 4759   match(iRegL_R0);
 4760   format %{ %}
 4761   interface(REG_INTER);
 4762 %}
 4763 
 4764 // Pointer Register Operands
 4765 // Pointer Register
 4766 operand iRegP()
 4767 %{
 4768   constraint(ALLOC_IN_RC(ptr_reg));
 4769   match(RegP);
 4770   match(iRegPNoSp);
 4771   match(iRegP_R0);
 4772   //match(iRegP_R2);
 4773   //match(iRegP_R4);
 4774   match(iRegP_R5);
 4775   match(thread_RegP);
 4776   op_cost(0);
 4777   format %{ %}
 4778   interface(REG_INTER);
 4779 %}
 4780 
 4781 // Pointer 64 bit Register not Special
 4782 operand iRegPNoSp()
 4783 %{
 4784   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4785   match(RegP);
 4786   // match(iRegP);
 4787   // match(iRegP_R0);
 4788   // match(iRegP_R2);
 4789   // match(iRegP_R4);
 4790   // match(iRegP_R5);
 4791   // match(thread_RegP);
 4792   op_cost(0);
 4793   format %{ %}
 4794   interface(REG_INTER);
 4795 %}
 4796 
 4797 // This operand is not allowed to use rfp even if
 4798 // rfp is not used to hold the frame pointer.
 4799 operand iRegPNoSpNoRfp()
 4800 %{
 4801   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4802   match(RegP);
 4803   match(iRegPNoSp);
 4804   op_cost(0);
 4805   format %{ %}
 4806   interface(REG_INTER);
 4807 %}
 4808 
 4809 // Pointer 64 bit Register R0 only
 4810 operand iRegP_R0()
 4811 %{
 4812   constraint(ALLOC_IN_RC(r0_reg));
 4813   match(RegP);
 4814   // match(iRegP);
 4815   match(iRegPNoSp);
 4816   op_cost(0);
 4817   format %{ %}
 4818   interface(REG_INTER);
 4819 %}
 4820 
 4821 // Pointer 64 bit Register R1 only
 4822 operand iRegP_R1()
 4823 %{
 4824   constraint(ALLOC_IN_RC(r1_reg));
 4825   match(RegP);
 4826   // match(iRegP);
 4827   match(iRegPNoSp);
 4828   op_cost(0);
 4829   format %{ %}
 4830   interface(REG_INTER);
 4831 %}
 4832 
 4833 // Pointer 64 bit Register R2 only
 4834 operand iRegP_R2()
 4835 %{
 4836   constraint(ALLOC_IN_RC(r2_reg));
 4837   match(RegP);
 4838   // match(iRegP);
 4839   match(iRegPNoSp);
 4840   op_cost(0);
 4841   format %{ %}
 4842   interface(REG_INTER);
 4843 %}
 4844 
 4845 // Pointer 64 bit Register R3 only
 4846 operand iRegP_R3()
 4847 %{
 4848   constraint(ALLOC_IN_RC(r3_reg));
 4849   match(RegP);
 4850   // match(iRegP);
 4851   match(iRegPNoSp);
 4852   op_cost(0);
 4853   format %{ %}
 4854   interface(REG_INTER);
 4855 %}
 4856 
 4857 // Pointer 64 bit Register R4 only
 4858 operand iRegP_R4()
 4859 %{
 4860   constraint(ALLOC_IN_RC(r4_reg));
 4861   match(RegP);
 4862   // match(iRegP);
 4863   match(iRegPNoSp);
 4864   op_cost(0);
 4865   format %{ %}
 4866   interface(REG_INTER);
 4867 %}
 4868 
 4869 // Pointer 64 bit Register R5 only
 4870 operand iRegP_R5()
 4871 %{
 4872   constraint(ALLOC_IN_RC(r5_reg));
 4873   match(RegP);
 4874   // match(iRegP);
 4875   match(iRegPNoSp);
 4876   op_cost(0);
 4877   format %{ %}
 4878   interface(REG_INTER);
 4879 %}
 4880 
 4881 // Pointer 64 bit Register R10 only
 4882 operand iRegP_R10()
 4883 %{
 4884   constraint(ALLOC_IN_RC(r10_reg));
 4885   match(RegP);
 4886   // match(iRegP);
 4887   match(iRegPNoSp);
 4888   op_cost(0);
 4889   format %{ %}
 4890   interface(REG_INTER);
 4891 %}
 4892 
 4893 // Long 64 bit Register R0 only
 4894 operand iRegL_R0()
 4895 %{
 4896   constraint(ALLOC_IN_RC(r0_reg));
 4897   match(RegL);
 4898   match(iRegLNoSp);
 4899   op_cost(0);
 4900   format %{ %}
 4901   interface(REG_INTER);
 4902 %}
 4903 
 4904 // Long 64 bit Register R11 only
 4905 operand iRegL_R11()
 4906 %{
 4907   constraint(ALLOC_IN_RC(r11_reg));
 4908   match(RegL);
 4909   match(iRegLNoSp);
 4910   op_cost(0);
 4911   format %{ %}
 4912   interface(REG_INTER);
 4913 %}
 4914 
 4915 // Register R0 only
 4916 operand iRegI_R0()
 4917 %{
 4918   constraint(ALLOC_IN_RC(int_r0_reg));
 4919   match(RegI);
 4920   match(iRegINoSp);
 4921   op_cost(0);
 4922   format %{ %}
 4923   interface(REG_INTER);
 4924 %}
 4925 
 4926 // Register R2 only
 4927 operand iRegI_R2()
 4928 %{
 4929   constraint(ALLOC_IN_RC(int_r2_reg));
 4930   match(RegI);
 4931   match(iRegINoSp);
 4932   op_cost(0);
 4933   format %{ %}
 4934   interface(REG_INTER);
 4935 %}
 4936 
 4937 // Register R3 only
 4938 operand iRegI_R3()
 4939 %{
 4940   constraint(ALLOC_IN_RC(int_r3_reg));
 4941   match(RegI);
 4942   match(iRegINoSp);
 4943   op_cost(0);
 4944   format %{ %}
 4945   interface(REG_INTER);
 4946 %}
 4947 
 4948 
 4949 // Register R4 only
 4950 operand iRegI_R4()
 4951 %{
 4952   constraint(ALLOC_IN_RC(int_r4_reg));
 4953   match(RegI);
 4954   match(iRegINoSp);
 4955   op_cost(0);
 4956   format %{ %}
 4957   interface(REG_INTER);
 4958 %}
 4959 
 4960 
 4961 // Pointer Register Operands
 4962 // Narrow Pointer Register
 4963 operand iRegN()
 4964 %{
 4965   constraint(ALLOC_IN_RC(any_reg32));
 4966   match(RegN);
 4967   match(iRegNNoSp);
 4968   op_cost(0);
 4969   format %{ %}
 4970   interface(REG_INTER);
 4971 %}
 4972 
 4973 // Integer 64 bit Register not Special
 4974 operand iRegNNoSp()
 4975 %{
 4976   constraint(ALLOC_IN_RC(no_special_reg32));
 4977   match(RegN);
 4978   op_cost(0);
 4979   format %{ %}
 4980   interface(REG_INTER);
 4981 %}
 4982 
 4983 // Float Register
 4984 // Float register operands
 4985 operand vRegF()
 4986 %{
 4987   constraint(ALLOC_IN_RC(float_reg));
 4988   match(RegF);
 4989 
 4990   op_cost(0);
 4991   format %{ %}
 4992   interface(REG_INTER);
 4993 %}
 4994 
 4995 // Double Register
 4996 // Double register operands
 4997 operand vRegD()
 4998 %{
 4999   constraint(ALLOC_IN_RC(double_reg));
 5000   match(RegD);
 5001 
 5002   op_cost(0);
 5003   format %{ %}
 5004   interface(REG_INTER);
 5005 %}
 5006 
 5007 // Generic vector class. This will be used for
 5008 // all vector operands, including NEON and SVE.
 5009 operand vReg()
 5010 %{
 5011   constraint(ALLOC_IN_RC(dynamic));
 5012   match(VecA);
 5013   match(VecD);
 5014   match(VecX);
 5015 
 5016   op_cost(0);
 5017   format %{ %}
 5018   interface(REG_INTER);
 5019 %}
 5020 
 5021 operand vReg_V10()
 5022 %{
 5023   constraint(ALLOC_IN_RC(v10_veca_reg));
 5024   match(vReg);
 5025 
 5026   op_cost(0);
 5027   format %{ %}
 5028   interface(REG_INTER);
 5029 %}
 5030 
 5031 operand vReg_V11()
 5032 %{
 5033   constraint(ALLOC_IN_RC(v11_veca_reg));
 5034   match(vReg);
 5035 
 5036   op_cost(0);
 5037   format %{ %}
 5038   interface(REG_INTER);
 5039 %}
 5040 
 5041 operand vReg_V12()
 5042 %{
 5043   constraint(ALLOC_IN_RC(v12_veca_reg));
 5044   match(vReg);
 5045 
 5046   op_cost(0);
 5047   format %{ %}
 5048   interface(REG_INTER);
 5049 %}
 5050 
 5051 operand vReg_V13()
 5052 %{
 5053   constraint(ALLOC_IN_RC(v13_veca_reg));
 5054   match(vReg);
 5055 
 5056   op_cost(0);
 5057   format %{ %}
 5058   interface(REG_INTER);
 5059 %}
 5060 
 5061 operand vReg_V17()
 5062 %{
 5063   constraint(ALLOC_IN_RC(v17_veca_reg));
 5064   match(vReg);
 5065 
 5066   op_cost(0);
 5067   format %{ %}
 5068   interface(REG_INTER);
 5069 %}
 5070 
 5071 operand vReg_V18()
 5072 %{
 5073   constraint(ALLOC_IN_RC(v18_veca_reg));
 5074   match(vReg);
 5075 
 5076   op_cost(0);
 5077   format %{ %}
 5078   interface(REG_INTER);
 5079 %}
 5080 
 5081 operand vReg_V23()
 5082 %{
 5083   constraint(ALLOC_IN_RC(v23_veca_reg));
 5084   match(vReg);
 5085 
 5086   op_cost(0);
 5087   format %{ %}
 5088   interface(REG_INTER);
 5089 %}
 5090 
 5091 operand vReg_V24()
 5092 %{
 5093   constraint(ALLOC_IN_RC(v24_veca_reg));
 5094   match(vReg);
 5095 
 5096   op_cost(0);
 5097   format %{ %}
 5098   interface(REG_INTER);
 5099 %}
 5100 
 5101 operand vecA()
 5102 %{
 5103   constraint(ALLOC_IN_RC(vectora_reg));
 5104   match(VecA);
 5105 
 5106   op_cost(0);
 5107   format %{ %}
 5108   interface(REG_INTER);
 5109 %}
 5110 
 5111 operand vecD()
 5112 %{
 5113   constraint(ALLOC_IN_RC(vectord_reg));
 5114   match(VecD);
 5115 
 5116   op_cost(0);
 5117   format %{ %}
 5118   interface(REG_INTER);
 5119 %}
 5120 
 5121 operand vecX()
 5122 %{
 5123   constraint(ALLOC_IN_RC(vectorx_reg));
 5124   match(VecX);
 5125 
 5126   op_cost(0);
 5127   format %{ %}
 5128   interface(REG_INTER);
 5129 %}
 5130 
 5131 operand vRegD_V0()
 5132 %{
 5133   constraint(ALLOC_IN_RC(v0_reg));
 5134   match(RegD);
 5135   op_cost(0);
 5136   format %{ %}
 5137   interface(REG_INTER);
 5138 %}
 5139 
 5140 operand vRegD_V1()
 5141 %{
 5142   constraint(ALLOC_IN_RC(v1_reg));
 5143   match(RegD);
 5144   op_cost(0);
 5145   format %{ %}
 5146   interface(REG_INTER);
 5147 %}
 5148 
 5149 operand vRegD_V2()
 5150 %{
 5151   constraint(ALLOC_IN_RC(v2_reg));
 5152   match(RegD);
 5153   op_cost(0);
 5154   format %{ %}
 5155   interface(REG_INTER);
 5156 %}
 5157 
 5158 operand vRegD_V3()
 5159 %{
 5160   constraint(ALLOC_IN_RC(v3_reg));
 5161   match(RegD);
 5162   op_cost(0);
 5163   format %{ %}
 5164   interface(REG_INTER);
 5165 %}
 5166 
 5167 operand vRegD_V4()
 5168 %{
 5169   constraint(ALLOC_IN_RC(v4_reg));
 5170   match(RegD);
 5171   op_cost(0);
 5172   format %{ %}
 5173   interface(REG_INTER);
 5174 %}
 5175 
 5176 operand vRegD_V5()
 5177 %{
 5178   constraint(ALLOC_IN_RC(v5_reg));
 5179   match(RegD);
 5180   op_cost(0);
 5181   format %{ %}
 5182   interface(REG_INTER);
 5183 %}
 5184 
 5185 operand vRegD_V6()
 5186 %{
 5187   constraint(ALLOC_IN_RC(v6_reg));
 5188   match(RegD);
 5189   op_cost(0);
 5190   format %{ %}
 5191   interface(REG_INTER);
 5192 %}
 5193 
 5194 operand vRegD_V7()
 5195 %{
 5196   constraint(ALLOC_IN_RC(v7_reg));
 5197   match(RegD);
 5198   op_cost(0);
 5199   format %{ %}
 5200   interface(REG_INTER);
 5201 %}
 5202 
 5203 operand vRegD_V12()
 5204 %{
 5205   constraint(ALLOC_IN_RC(v12_reg));
 5206   match(RegD);
 5207   op_cost(0);
 5208   format %{ %}
 5209   interface(REG_INTER);
 5210 %}
 5211 
 5212 operand vRegD_V13()
 5213 %{
 5214   constraint(ALLOC_IN_RC(v13_reg));
 5215   match(RegD);
 5216   op_cost(0);
 5217   format %{ %}
 5218   interface(REG_INTER);
 5219 %}
 5220 
 5221 operand pReg()
 5222 %{
 5223   constraint(ALLOC_IN_RC(pr_reg));
 5224   match(RegVectMask);
 5225   match(pRegGov);
 5226   op_cost(0);
 5227   format %{ %}
 5228   interface(REG_INTER);
 5229 %}
 5230 
 5231 operand pRegGov()
 5232 %{
 5233   constraint(ALLOC_IN_RC(gov_pr));
 5234   match(RegVectMask);
 5235   match(pReg);
 5236   op_cost(0);
 5237   format %{ %}
 5238   interface(REG_INTER);
 5239 %}
 5240 
 5241 operand pRegGov_P0()
 5242 %{
 5243   constraint(ALLOC_IN_RC(p0_reg));
 5244   match(RegVectMask);
 5245   op_cost(0);
 5246   format %{ %}
 5247   interface(REG_INTER);
 5248 %}
 5249 
 5250 operand pRegGov_P1()
 5251 %{
 5252   constraint(ALLOC_IN_RC(p1_reg));
 5253   match(RegVectMask);
 5254   op_cost(0);
 5255   format %{ %}
 5256   interface(REG_INTER);
 5257 %}
 5258 
 5259 // Flags register, used as output of signed compare instructions
 5260 
 5261 // note that on AArch64 we also use this register as the output for
 5262 // for floating point compare instructions (CmpF CmpD). this ensures
 5263 // that ordered inequality tests use GT, GE, LT or LE none of which
 5264 // pass through cases where the result is unordered i.e. one or both
 5265 // inputs to the compare is a NaN. this means that the ideal code can
 5266 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5267 // (where the comparison should always fail). EQ and NE tests are
 5268 // always generated in ideal code so that unordered folds into the NE
 5269 // case, matching the behaviour of AArch64 NE.
 5270 //
 5271 // This differs from x86 where the outputs of FP compares use a
 5272 // special FP flags registers and where compares based on this
 5273 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5274 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5275 // to explicitly handle the unordered case in branches. x86 also has
 5276 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5277 
 5278 operand rFlagsReg()
 5279 %{
 5280   constraint(ALLOC_IN_RC(int_flags));
 5281   match(RegFlags);
 5282 
 5283   op_cost(0);
 5284   format %{ "RFLAGS" %}
 5285   interface(REG_INTER);
 5286 %}
 5287 
 5288 // Flags register, used as output of unsigned compare instructions
 5289 operand rFlagsRegU()
 5290 %{
 5291   constraint(ALLOC_IN_RC(int_flags));
 5292   match(RegFlags);
 5293 
 5294   op_cost(0);
 5295   format %{ "RFLAGSU" %}
 5296   interface(REG_INTER);
 5297 %}
 5298 
 5299 // Special Registers
 5300 
 5301 // Method Register
 5302 operand inline_cache_RegP(iRegP reg)
 5303 %{
 5304   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5305   match(reg);
 5306   match(iRegPNoSp);
 5307   op_cost(0);
 5308   format %{ %}
 5309   interface(REG_INTER);
 5310 %}
 5311 
 5312 // Thread Register
 5313 operand thread_RegP(iRegP reg)
 5314 %{
 5315   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5316   match(reg);
 5317   op_cost(0);
 5318   format %{ %}
 5319   interface(REG_INTER);
 5320 %}
 5321 
 5322 //----------Memory Operands----------------------------------------------------
 5323 
 5324 operand indirect(iRegP reg)
 5325 %{
 5326   constraint(ALLOC_IN_RC(ptr_reg));
 5327   match(reg);
 5328   op_cost(0);
 5329   format %{ "[$reg]" %}
 5330   interface(MEMORY_INTER) %{
 5331     base($reg);
 5332     index(0xffffffff);
 5333     scale(0x0);
 5334     disp(0x0);
 5335   %}
 5336 %}
 5337 
 5338 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5339 %{
 5340   constraint(ALLOC_IN_RC(ptr_reg));
 5341   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5342   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5343   op_cost(0);
 5344   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5345   interface(MEMORY_INTER) %{
 5346     base($reg);
 5347     index($ireg);
 5348     scale($scale);
 5349     disp(0x0);
 5350   %}
 5351 %}
 5352 
 5353 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5354 %{
 5355   constraint(ALLOC_IN_RC(ptr_reg));
 5356   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5357   match(AddP reg (LShiftL lreg scale));
 5358   op_cost(0);
 5359   format %{ "$reg, $lreg lsl($scale)" %}
 5360   interface(MEMORY_INTER) %{
 5361     base($reg);
 5362     index($lreg);
 5363     scale($scale);
 5364     disp(0x0);
 5365   %}
 5366 %}
 5367 
 5368 operand indIndexI2L(iRegP reg, iRegI ireg)
 5369 %{
 5370   constraint(ALLOC_IN_RC(ptr_reg));
 5371   match(AddP reg (ConvI2L ireg));
 5372   op_cost(0);
 5373   format %{ "$reg, $ireg, 0, I2L" %}
 5374   interface(MEMORY_INTER) %{
 5375     base($reg);
 5376     index($ireg);
 5377     scale(0x0);
 5378     disp(0x0);
 5379   %}
 5380 %}
 5381 
 5382 operand indIndex(iRegP reg, iRegL lreg)
 5383 %{
 5384   constraint(ALLOC_IN_RC(ptr_reg));
 5385   match(AddP reg lreg);
 5386   op_cost(0);
 5387   format %{ "$reg, $lreg" %}
 5388   interface(MEMORY_INTER) %{
 5389     base($reg);
 5390     index($lreg);
 5391     scale(0x0);
 5392     disp(0x0);
 5393   %}
 5394 %}
 5395 
 5396 operand indOffI1(iRegP reg, immIOffset1 off)
 5397 %{
 5398   constraint(ALLOC_IN_RC(ptr_reg));
 5399   match(AddP reg off);
 5400   op_cost(0);
 5401   format %{ "[$reg, $off]" %}
 5402   interface(MEMORY_INTER) %{
 5403     base($reg);
 5404     index(0xffffffff);
 5405     scale(0x0);
 5406     disp($off);
 5407   %}
 5408 %}
 5409 
 5410 operand indOffI2(iRegP reg, immIOffset2 off)
 5411 %{
 5412   constraint(ALLOC_IN_RC(ptr_reg));
 5413   match(AddP reg off);
 5414   op_cost(0);
 5415   format %{ "[$reg, $off]" %}
 5416   interface(MEMORY_INTER) %{
 5417     base($reg);
 5418     index(0xffffffff);
 5419     scale(0x0);
 5420     disp($off);
 5421   %}
 5422 %}
 5423 
 5424 operand indOffI4(iRegP reg, immIOffset4 off)
 5425 %{
 5426   constraint(ALLOC_IN_RC(ptr_reg));
 5427   match(AddP reg off);
 5428   op_cost(0);
 5429   format %{ "[$reg, $off]" %}
 5430   interface(MEMORY_INTER) %{
 5431     base($reg);
 5432     index(0xffffffff);
 5433     scale(0x0);
 5434     disp($off);
 5435   %}
 5436 %}
 5437 
 5438 operand indOffI8(iRegP reg, immIOffset8 off)
 5439 %{
 5440   constraint(ALLOC_IN_RC(ptr_reg));
 5441   match(AddP reg off);
 5442   op_cost(0);
 5443   format %{ "[$reg, $off]" %}
 5444   interface(MEMORY_INTER) %{
 5445     base($reg);
 5446     index(0xffffffff);
 5447     scale(0x0);
 5448     disp($off);
 5449   %}
 5450 %}
 5451 
 5452 operand indOffI16(iRegP reg, immIOffset16 off)
 5453 %{
 5454   constraint(ALLOC_IN_RC(ptr_reg));
 5455   match(AddP reg off);
 5456   op_cost(0);
 5457   format %{ "[$reg, $off]" %}
 5458   interface(MEMORY_INTER) %{
 5459     base($reg);
 5460     index(0xffffffff);
 5461     scale(0x0);
 5462     disp($off);
 5463   %}
 5464 %}
 5465 
 5466 operand indOffL1(iRegP reg, immLoffset1 off)
 5467 %{
 5468   constraint(ALLOC_IN_RC(ptr_reg));
 5469   match(AddP reg off);
 5470   op_cost(0);
 5471   format %{ "[$reg, $off]" %}
 5472   interface(MEMORY_INTER) %{
 5473     base($reg);
 5474     index(0xffffffff);
 5475     scale(0x0);
 5476     disp($off);
 5477   %}
 5478 %}
 5479 
 5480 operand indOffL2(iRegP reg, immLoffset2 off)
 5481 %{
 5482   constraint(ALLOC_IN_RC(ptr_reg));
 5483   match(AddP reg off);
 5484   op_cost(0);
 5485   format %{ "[$reg, $off]" %}
 5486   interface(MEMORY_INTER) %{
 5487     base($reg);
 5488     index(0xffffffff);
 5489     scale(0x0);
 5490     disp($off);
 5491   %}
 5492 %}
 5493 
 5494 operand indOffL4(iRegP reg, immLoffset4 off)
 5495 %{
 5496   constraint(ALLOC_IN_RC(ptr_reg));
 5497   match(AddP reg off);
 5498   op_cost(0);
 5499   format %{ "[$reg, $off]" %}
 5500   interface(MEMORY_INTER) %{
 5501     base($reg);
 5502     index(0xffffffff);
 5503     scale(0x0);
 5504     disp($off);
 5505   %}
 5506 %}
 5507 
 5508 operand indOffL8(iRegP reg, immLoffset8 off)
 5509 %{
 5510   constraint(ALLOC_IN_RC(ptr_reg));
 5511   match(AddP reg off);
 5512   op_cost(0);
 5513   format %{ "[$reg, $off]" %}
 5514   interface(MEMORY_INTER) %{
 5515     base($reg);
 5516     index(0xffffffff);
 5517     scale(0x0);
 5518     disp($off);
 5519   %}
 5520 %}
 5521 
 5522 operand indOffL16(iRegP reg, immLoffset16 off)
 5523 %{
 5524   constraint(ALLOC_IN_RC(ptr_reg));
 5525   match(AddP reg off);
 5526   op_cost(0);
 5527   format %{ "[$reg, $off]" %}
 5528   interface(MEMORY_INTER) %{
 5529     base($reg);
 5530     index(0xffffffff);
 5531     scale(0x0);
 5532     disp($off);
 5533   %}
 5534 %}
 5535 
 5536 operand indirectX2P(iRegL reg)
 5537 %{
 5538   constraint(ALLOC_IN_RC(ptr_reg));
 5539   match(CastX2P reg);
 5540   op_cost(0);
 5541   format %{ "[$reg]\t# long -> ptr" %}
 5542   interface(MEMORY_INTER) %{
 5543     base($reg);
 5544     index(0xffffffff);
 5545     scale(0x0);
 5546     disp(0x0);
 5547   %}
 5548 %}
 5549 
 5550 operand indOffX2P(iRegL reg, immLOffset off)
 5551 %{
 5552   constraint(ALLOC_IN_RC(ptr_reg));
 5553   match(AddP (CastX2P reg) off);
 5554   op_cost(0);
 5555   format %{ "[$reg, $off]\t# long -> ptr" %}
 5556   interface(MEMORY_INTER) %{
 5557     base($reg);
 5558     index(0xffffffff);
 5559     scale(0x0);
 5560     disp($off);
 5561   %}
 5562 %}
 5563 
 5564 operand indirectN(iRegN reg)
 5565 %{
 5566   predicate(CompressedOops::shift() == 0);
 5567   constraint(ALLOC_IN_RC(ptr_reg));
 5568   match(DecodeN reg);
 5569   op_cost(0);
 5570   format %{ "[$reg]\t# narrow" %}
 5571   interface(MEMORY_INTER) %{
 5572     base($reg);
 5573     index(0xffffffff);
 5574     scale(0x0);
 5575     disp(0x0);
 5576   %}
 5577 %}
 5578 
 5579 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5580 %{
 5581   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5582   constraint(ALLOC_IN_RC(ptr_reg));
 5583   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5584   op_cost(0);
 5585   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5586   interface(MEMORY_INTER) %{
 5587     base($reg);
 5588     index($ireg);
 5589     scale($scale);
 5590     disp(0x0);
 5591   %}
 5592 %}
 5593 
 5594 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5595 %{
 5596   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5597   constraint(ALLOC_IN_RC(ptr_reg));
 5598   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5599   op_cost(0);
 5600   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5601   interface(MEMORY_INTER) %{
 5602     base($reg);
 5603     index($lreg);
 5604     scale($scale);
 5605     disp(0x0);
 5606   %}
 5607 %}
 5608 
 5609 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5610 %{
 5611   predicate(CompressedOops::shift() == 0);
 5612   constraint(ALLOC_IN_RC(ptr_reg));
 5613   match(AddP (DecodeN reg) (ConvI2L ireg));
 5614   op_cost(0);
 5615   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5616   interface(MEMORY_INTER) %{
 5617     base($reg);
 5618     index($ireg);
 5619     scale(0x0);
 5620     disp(0x0);
 5621   %}
 5622 %}
 5623 
 5624 operand indIndexN(iRegN reg, iRegL lreg)
 5625 %{
 5626   predicate(CompressedOops::shift() == 0);
 5627   constraint(ALLOC_IN_RC(ptr_reg));
 5628   match(AddP (DecodeN reg) lreg);
 5629   op_cost(0);
 5630   format %{ "$reg, $lreg\t# narrow" %}
 5631   interface(MEMORY_INTER) %{
 5632     base($reg);
 5633     index($lreg);
 5634     scale(0x0);
 5635     disp(0x0);
 5636   %}
 5637 %}
 5638 
 5639 operand indOffIN(iRegN reg, immIOffset off)
 5640 %{
 5641   predicate(CompressedOops::shift() == 0);
 5642   constraint(ALLOC_IN_RC(ptr_reg));
 5643   match(AddP (DecodeN reg) off);
 5644   op_cost(0);
 5645   format %{ "[$reg, $off]\t# narrow" %}
 5646   interface(MEMORY_INTER) %{
 5647     base($reg);
 5648     index(0xffffffff);
 5649     scale(0x0);
 5650     disp($off);
 5651   %}
 5652 %}
 5653 
 5654 operand indOffLN(iRegN reg, immLOffset off)
 5655 %{
 5656   predicate(CompressedOops::shift() == 0);
 5657   constraint(ALLOC_IN_RC(ptr_reg));
 5658   match(AddP (DecodeN reg) off);
 5659   op_cost(0);
 5660   format %{ "[$reg, $off]\t# narrow" %}
 5661   interface(MEMORY_INTER) %{
 5662     base($reg);
 5663     index(0xffffffff);
 5664     scale(0x0);
 5665     disp($off);
 5666   %}
 5667 %}
 5668 
 5669 
 5670 //----------Special Memory Operands--------------------------------------------
 5671 // Stack Slot Operand - This operand is used for loading and storing temporary
 5672 //                      values on the stack where a match requires a value to
 5673 //                      flow through memory.
 5674 operand stackSlotP(sRegP reg)
 5675 %{
 5676   constraint(ALLOC_IN_RC(stack_slots));
 5677   op_cost(100);
 5678   // No match rule because this operand is only generated in matching
 5679   // match(RegP);
 5680   format %{ "[$reg]" %}
 5681   interface(MEMORY_INTER) %{
 5682     base(0x1e);  // RSP
 5683     index(0x0);  // No Index
 5684     scale(0x0);  // No Scale
 5685     disp($reg);  // Stack Offset
 5686   %}
 5687 %}
 5688 
 5689 operand stackSlotI(sRegI reg)
 5690 %{
 5691   constraint(ALLOC_IN_RC(stack_slots));
 5692   // No match rule because this operand is only generated in matching
 5693   // match(RegI);
 5694   format %{ "[$reg]" %}
 5695   interface(MEMORY_INTER) %{
 5696     base(0x1e);  // RSP
 5697     index(0x0);  // No Index
 5698     scale(0x0);  // No Scale
 5699     disp($reg);  // Stack Offset
 5700   %}
 5701 %}
 5702 
 5703 operand stackSlotF(sRegF reg)
 5704 %{
 5705   constraint(ALLOC_IN_RC(stack_slots));
 5706   // No match rule because this operand is only generated in matching
 5707   // match(RegF);
 5708   format %{ "[$reg]" %}
 5709   interface(MEMORY_INTER) %{
 5710     base(0x1e);  // RSP
 5711     index(0x0);  // No Index
 5712     scale(0x0);  // No Scale
 5713     disp($reg);  // Stack Offset
 5714   %}
 5715 %}
 5716 
 5717 operand stackSlotD(sRegD reg)
 5718 %{
 5719   constraint(ALLOC_IN_RC(stack_slots));
 5720   // No match rule because this operand is only generated in matching
 5721   // match(RegD);
 5722   format %{ "[$reg]" %}
 5723   interface(MEMORY_INTER) %{
 5724     base(0x1e);  // RSP
 5725     index(0x0);  // No Index
 5726     scale(0x0);  // No Scale
 5727     disp($reg);  // Stack Offset
 5728   %}
 5729 %}
 5730 
 5731 operand stackSlotL(sRegL reg)
 5732 %{
 5733   constraint(ALLOC_IN_RC(stack_slots));
 5734   // No match rule because this operand is only generated in matching
 5735   // match(RegL);
 5736   format %{ "[$reg]" %}
 5737   interface(MEMORY_INTER) %{
 5738     base(0x1e);  // RSP
 5739     index(0x0);  // No Index
 5740     scale(0x0);  // No Scale
 5741     disp($reg);  // Stack Offset
 5742   %}
 5743 %}
 5744 
 5745 // Operands for expressing Control Flow
 5746 // NOTE: Label is a predefined operand which should not be redefined in
 5747 //       the AD file. It is generically handled within the ADLC.
 5748 
 5749 //----------Conditional Branch Operands----------------------------------------
 5750 // Comparison Op  - This is the operation of the comparison, and is limited to
 5751 //                  the following set of codes:
 5752 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 5753 //
 5754 // Other attributes of the comparison, such as unsignedness, are specified
 5755 // by the comparison instruction that sets a condition code flags register.
 5756 // That result is represented by a flags operand whose subtype is appropriate
 5757 // to the unsignedness (etc.) of the comparison.
 5758 //
 5759 // Later, the instruction which matches both the Comparison Op (a Bool) and
 5760 // the flags (produced by the Cmp) specifies the coding of the comparison op
 5761 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 5762 
 5763 // used for signed integral comparisons and fp comparisons
 5764 
 5765 operand cmpOp()
 5766 %{
 5767   match(Bool);
 5768 
 5769   format %{ "" %}
 5770   interface(COND_INTER) %{
 5771     equal(0x0, "eq");
 5772     not_equal(0x1, "ne");
 5773     less(0xb, "lt");
 5774     greater_equal(0xa, "ge");
 5775     less_equal(0xd, "le");
 5776     greater(0xc, "gt");
 5777     overflow(0x6, "vs");
 5778     no_overflow(0x7, "vc");
 5779   %}
 5780 %}
 5781 
 5782 // used for unsigned integral comparisons
 5783 
 5784 operand cmpOpU()
 5785 %{
 5786   match(Bool);
 5787 
 5788   format %{ "" %}
 5789   interface(COND_INTER) %{
 5790     equal(0x0, "eq");
 5791     not_equal(0x1, "ne");
 5792     less(0x3, "lo");
 5793     greater_equal(0x2, "hs");
 5794     less_equal(0x9, "ls");
 5795     greater(0x8, "hi");
 5796     overflow(0x6, "vs");
 5797     no_overflow(0x7, "vc");
 5798   %}
 5799 %}
 5800 
 5801 // used for certain integral comparisons which can be
 5802 // converted to cbxx or tbxx instructions
 5803 
 5804 operand cmpOpEqNe()
 5805 %{
 5806   match(Bool);
 5807   op_cost(0);
 5808   predicate(n->as_Bool()->_test._test == BoolTest::ne
 5809             || n->as_Bool()->_test._test == BoolTest::eq);
 5810 
 5811   format %{ "" %}
 5812   interface(COND_INTER) %{
 5813     equal(0x0, "eq");
 5814     not_equal(0x1, "ne");
 5815     less(0xb, "lt");
 5816     greater_equal(0xa, "ge");
 5817     less_equal(0xd, "le");
 5818     greater(0xc, "gt");
 5819     overflow(0x6, "vs");
 5820     no_overflow(0x7, "vc");
 5821   %}
 5822 %}
 5823 
 5824 // used for certain integral comparisons which can be
 5825 // converted to cbxx or tbxx instructions
 5826 
 5827 operand cmpOpLtGe()
 5828 %{
 5829   match(Bool);
 5830   op_cost(0);
 5831 
 5832   predicate(n->as_Bool()->_test._test == BoolTest::lt
 5833             || n->as_Bool()->_test._test == BoolTest::ge);
 5834 
 5835   format %{ "" %}
 5836   interface(COND_INTER) %{
 5837     equal(0x0, "eq");
 5838     not_equal(0x1, "ne");
 5839     less(0xb, "lt");
 5840     greater_equal(0xa, "ge");
 5841     less_equal(0xd, "le");
 5842     greater(0xc, "gt");
 5843     overflow(0x6, "vs");
 5844     no_overflow(0x7, "vc");
 5845   %}
 5846 %}
 5847 
 5848 // used for certain unsigned integral comparisons which can be
 5849 // converted to cbxx or tbxx instructions
 5850 
 5851 operand cmpOpUEqNeLeGt()
 5852 %{
 5853   match(Bool);
 5854   op_cost(0);
 5855 
 5856   predicate(n->as_Bool()->_test._test == BoolTest::eq ||
 5857             n->as_Bool()->_test._test == BoolTest::ne ||
 5858             n->as_Bool()->_test._test == BoolTest::le ||
 5859             n->as_Bool()->_test._test == BoolTest::gt);
 5860 
 5861   format %{ "" %}
 5862   interface(COND_INTER) %{
 5863     equal(0x0, "eq");
 5864     not_equal(0x1, "ne");
 5865     less(0x3, "lo");
 5866     greater_equal(0x2, "hs");
 5867     less_equal(0x9, "ls");
 5868     greater(0x8, "hi");
 5869     overflow(0x6, "vs");
 5870     no_overflow(0x7, "vc");
 5871   %}
 5872 %}
 5873 
 5874 // Special operand allowing long args to int ops to be truncated for free
 5875 
 5876 operand iRegL2I(iRegL reg) %{
 5877 
 5878   op_cost(0);
 5879 
 5880   match(ConvL2I reg);
 5881 
 5882   format %{ "l2i($reg)" %}
 5883 
 5884   interface(REG_INTER)
 5885 %}
 5886 
 5887 operand iRegL2P(iRegL reg) %{
 5888 
 5889   op_cost(0);
 5890 
 5891   match(CastX2P reg);
 5892 
 5893   format %{ "l2p($reg)" %}
 5894 
 5895   interface(REG_INTER)
 5896 %}
 5897 
 5898 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 5899 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 5900 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 5901 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 5902 
 5903 //----------OPERAND CLASSES----------------------------------------------------
 5904 // Operand Classes are groups of operands that are used as to simplify
 5905 // instruction definitions by not requiring the AD writer to specify
 5906 // separate instructions for every form of operand when the
 5907 // instruction accepts multiple operand types with the same basic
 5908 // encoding and format. The classic case of this is memory operands.
 5909 
 5910 // memory is used to define read/write location for load/store
 5911 // instruction defs. we can turn a memory op into an Address
 5912 
 5913 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 5914                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5915 
 5916 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 5917                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5918 
 5919 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 5920                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5921 
 5922 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 5923                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5924 
 5925 // All of the memory operands. For the pipeline description.
 5926 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 5927                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 5928                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5929 
 5930 
 5931 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 5932 // operations. it allows the src to be either an iRegI or a (ConvL2I
 5933 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 5934 // can be elided because the 32-bit instruction will just employ the
 5935 // lower 32 bits anyway.
 5936 //
 5937 // n.b. this does not elide all L2I conversions. if the truncated
 5938 // value is consumed by more than one operation then the ConvL2I
 5939 // cannot be bundled into the consuming nodes so an l2i gets planted
 5940 // (actually a movw $dst $src) and the downstream instructions consume
 5941 // the result of the l2i as an iRegI input. That's a shame since the
 5942 // movw is actually redundant but its not too costly.
 5943 
 5944 opclass iRegIorL2I(iRegI, iRegL2I);
 5945 opclass iRegPorL2P(iRegP, iRegL2P);
 5946 
 5947 //----------PIPELINE-----------------------------------------------------------
 5948 // Rules which define the behavior of the target architectures pipeline.
 5949 
 5950 // For specific pipelines, eg A53, define the stages of that pipeline
 5951 //pipe_desc(ISS, EX1, EX2, WR);
 5952 #define ISS S0
 5953 #define EX1 S1
 5954 #define EX2 S2
 5955 #define WR  S3
 5956 
 5957 // Integer ALU reg operation
 5958 pipeline %{
 5959 
 5960 attributes %{
 5961   // ARM instructions are of fixed length
 5962   fixed_size_instructions;        // Fixed size instructions TODO does
 5963   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 5964   // ARM instructions come in 32-bit word units
 5965   instruction_unit_size = 4;         // An instruction is 4 bytes long
 5966   instruction_fetch_unit_size = 64;  // The processor fetches one line
 5967   instruction_fetch_units = 1;       // of 64 bytes
 5968 
 5969   // List of nop instructions
 5970   nops( MachNop );
 5971 %}
 5972 
 5973 // We don't use an actual pipeline model so don't care about resources
 5974 // or description. we do use pipeline classes to introduce fixed
 5975 // latencies
 5976 
 5977 //----------RESOURCES----------------------------------------------------------
 5978 // Resources are the functional units available to the machine
 5979 
 5980 resources( INS0, INS1, INS01 = INS0 | INS1,
 5981            ALU0, ALU1, ALU = ALU0 | ALU1,
 5982            MAC,
 5983            DIV,
 5984            BRANCH,
 5985            LDST,
 5986            NEON_FP);
 5987 
 5988 //----------PIPELINE DESCRIPTION-----------------------------------------------
 5989 // Pipeline Description specifies the stages in the machine's pipeline
 5990 
 5991 // Define the pipeline as a generic 6 stage pipeline
 5992 pipe_desc(S0, S1, S2, S3, S4, S5);
 5993 
 5994 //----------PIPELINE CLASSES---------------------------------------------------
 5995 // Pipeline Classes describe the stages in which input and output are
 5996 // referenced by the hardware pipeline.
 5997 
 5998 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 5999 %{
 6000   single_instruction;
 6001   src1   : S1(read);
 6002   src2   : S2(read);
 6003   dst    : S5(write);
 6004   INS01  : ISS;
 6005   NEON_FP : S5;
 6006 %}
 6007 
 6008 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6009 %{
 6010   single_instruction;
 6011   src1   : S1(read);
 6012   src2   : S2(read);
 6013   dst    : S5(write);
 6014   INS01  : ISS;
 6015   NEON_FP : S5;
 6016 %}
 6017 
 6018 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6019 %{
 6020   single_instruction;
 6021   src    : S1(read);
 6022   dst    : S5(write);
 6023   INS01  : ISS;
 6024   NEON_FP : S5;
 6025 %}
 6026 
 6027 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6028 %{
 6029   single_instruction;
 6030   src    : S1(read);
 6031   dst    : S5(write);
 6032   INS01  : ISS;
 6033   NEON_FP : S5;
 6034 %}
 6035 
 6036 pipe_class fp_d2f(vRegF dst, vRegD src)
 6037 %{
 6038   single_instruction;
 6039   src    : S1(read);
 6040   dst    : S5(write);
 6041   INS01  : ISS;
 6042   NEON_FP : S5;
 6043 %}
 6044 
 6045 pipe_class fp_f2d(vRegD dst, vRegF src)
 6046 %{
 6047   single_instruction;
 6048   src    : S1(read);
 6049   dst    : S5(write);
 6050   INS01  : ISS;
 6051   NEON_FP : S5;
 6052 %}
 6053 
 6054 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6055 %{
 6056   single_instruction;
 6057   src    : S1(read);
 6058   dst    : S5(write);
 6059   INS01  : ISS;
 6060   NEON_FP : S5;
 6061 %}
 6062 
 6063 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6064 %{
 6065   single_instruction;
 6066   src    : S1(read);
 6067   dst    : S5(write);
 6068   INS01  : ISS;
 6069   NEON_FP : S5;
 6070 %}
 6071 
 6072 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6073 %{
 6074   single_instruction;
 6075   src    : S1(read);
 6076   dst    : S5(write);
 6077   INS01  : ISS;
 6078   NEON_FP : S5;
 6079 %}
 6080 
 6081 pipe_class fp_l2f(vRegF dst, iRegL src)
 6082 %{
 6083   single_instruction;
 6084   src    : S1(read);
 6085   dst    : S5(write);
 6086   INS01  : ISS;
 6087   NEON_FP : S5;
 6088 %}
 6089 
 6090 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6091 %{
 6092   single_instruction;
 6093   src    : S1(read);
 6094   dst    : S5(write);
 6095   INS01  : ISS;
 6096   NEON_FP : S5;
 6097 %}
 6098 
 6099 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6100 %{
 6101   single_instruction;
 6102   src    : S1(read);
 6103   dst    : S5(write);
 6104   INS01  : ISS;
 6105   NEON_FP : S5;
 6106 %}
 6107 
 6108 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6109 %{
 6110   single_instruction;
 6111   src    : S1(read);
 6112   dst    : S5(write);
 6113   INS01  : ISS;
 6114   NEON_FP : S5;
 6115 %}
 6116 
 6117 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6118 %{
 6119   single_instruction;
 6120   src    : S1(read);
 6121   dst    : S5(write);
 6122   INS01  : ISS;
 6123   NEON_FP : S5;
 6124 %}
 6125 
 6126 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6127 %{
 6128   single_instruction;
 6129   src1   : S1(read);
 6130   src2   : S2(read);
 6131   dst    : S5(write);
 6132   INS0   : ISS;
 6133   NEON_FP : S5;
 6134 %}
 6135 
 6136 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6137 %{
 6138   single_instruction;
 6139   src1   : S1(read);
 6140   src2   : S2(read);
 6141   dst    : S5(write);
 6142   INS0   : ISS;
 6143   NEON_FP : S5;
 6144 %}
 6145 
 6146 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6147 %{
 6148   single_instruction;
 6149   cr     : S1(read);
 6150   src1   : S1(read);
 6151   src2   : S1(read);
 6152   dst    : S3(write);
 6153   INS01  : ISS;
 6154   NEON_FP : S3;
 6155 %}
 6156 
 6157 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6158 %{
 6159   single_instruction;
 6160   cr     : S1(read);
 6161   src1   : S1(read);
 6162   src2   : S1(read);
 6163   dst    : S3(write);
 6164   INS01  : ISS;
 6165   NEON_FP : S3;
 6166 %}
 6167 
 6168 pipe_class fp_imm_s(vRegF dst)
 6169 %{
 6170   single_instruction;
 6171   dst    : S3(write);
 6172   INS01  : ISS;
 6173   NEON_FP : S3;
 6174 %}
 6175 
 6176 pipe_class fp_imm_d(vRegD dst)
 6177 %{
 6178   single_instruction;
 6179   dst    : S3(write);
 6180   INS01  : ISS;
 6181   NEON_FP : S3;
 6182 %}
 6183 
 6184 pipe_class fp_load_constant_s(vRegF dst)
 6185 %{
 6186   single_instruction;
 6187   dst    : S4(write);
 6188   INS01  : ISS;
 6189   NEON_FP : S4;
 6190 %}
 6191 
 6192 pipe_class fp_load_constant_d(vRegD dst)
 6193 %{
 6194   single_instruction;
 6195   dst    : S4(write);
 6196   INS01  : ISS;
 6197   NEON_FP : S4;
 6198 %}
 6199 
 6200 //------- Integer ALU operations --------------------------
 6201 
 6202 // Integer ALU reg-reg operation
 6203 // Operands needed in EX1, result generated in EX2
 6204 // Eg.  ADD     x0, x1, x2
 6205 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6206 %{
 6207   single_instruction;
 6208   dst    : EX2(write);
 6209   src1   : EX1(read);
 6210   src2   : EX1(read);
 6211   INS01  : ISS; // Dual issue as instruction 0 or 1
 6212   ALU    : EX2;
 6213 %}
 6214 
 6215 // Integer ALU reg-reg operation with constant shift
 6216 // Shifted register must be available in LATE_ISS instead of EX1
 6217 // Eg.  ADD     x0, x1, x2, LSL #2
 6218 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6219 %{
 6220   single_instruction;
 6221   dst    : EX2(write);
 6222   src1   : EX1(read);
 6223   src2   : ISS(read);
 6224   INS01  : ISS;
 6225   ALU    : EX2;
 6226 %}
 6227 
 6228 // Integer ALU reg operation with constant shift
 6229 // Eg.  LSL     x0, x1, #shift
 6230 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6231 %{
 6232   single_instruction;
 6233   dst    : EX2(write);
 6234   src1   : ISS(read);
 6235   INS01  : ISS;
 6236   ALU    : EX2;
 6237 %}
 6238 
 6239 // Integer ALU reg-reg operation with variable shift
 6240 // Both operands must be available in LATE_ISS instead of EX1
 6241 // Result is available in EX1 instead of EX2
 6242 // Eg.  LSLV    x0, x1, x2
 6243 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6244 %{
 6245   single_instruction;
 6246   dst    : EX1(write);
 6247   src1   : ISS(read);
 6248   src2   : ISS(read);
 6249   INS01  : ISS;
 6250   ALU    : EX1;
 6251 %}
 6252 
 6253 // Integer ALU reg-reg operation with extract
 6254 // As for _vshift above, but result generated in EX2
 6255 // Eg.  EXTR    x0, x1, x2, #N
 6256 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6257 %{
 6258   single_instruction;
 6259   dst    : EX2(write);
 6260   src1   : ISS(read);
 6261   src2   : ISS(read);
 6262   INS1   : ISS; // Can only dual issue as Instruction 1
 6263   ALU    : EX1;
 6264 %}
 6265 
 6266 // Integer ALU reg operation
 6267 // Eg.  NEG     x0, x1
 6268 pipe_class ialu_reg(iRegI dst, iRegI src)
 6269 %{
 6270   single_instruction;
 6271   dst    : EX2(write);
 6272   src    : EX1(read);
 6273   INS01  : ISS;
 6274   ALU    : EX2;
 6275 %}
 6276 
 6277 // Integer ALU reg mmediate operation
 6278 // Eg.  ADD     x0, x1, #N
 6279 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6280 %{
 6281   single_instruction;
 6282   dst    : EX2(write);
 6283   src1   : EX1(read);
 6284   INS01  : ISS;
 6285   ALU    : EX2;
 6286 %}
 6287 
 6288 // Integer ALU immediate operation (no source operands)
 6289 // Eg.  MOV     x0, #N
 6290 pipe_class ialu_imm(iRegI dst)
 6291 %{
 6292   single_instruction;
 6293   dst    : EX1(write);
 6294   INS01  : ISS;
 6295   ALU    : EX1;
 6296 %}
 6297 
 6298 //------- Compare operation -------------------------------
 6299 
 6300 // Compare reg-reg
 6301 // Eg.  CMP     x0, x1
 6302 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6303 %{
 6304   single_instruction;
 6305 //  fixed_latency(16);
 6306   cr     : EX2(write);
 6307   op1    : EX1(read);
 6308   op2    : EX1(read);
 6309   INS01  : ISS;
 6310   ALU    : EX2;
 6311 %}
 6312 
 6313 // Compare reg-reg
 6314 // Eg.  CMP     x0, #N
 6315 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6316 %{
 6317   single_instruction;
 6318 //  fixed_latency(16);
 6319   cr     : EX2(write);
 6320   op1    : EX1(read);
 6321   INS01  : ISS;
 6322   ALU    : EX2;
 6323 %}
 6324 
 6325 //------- Conditional instructions ------------------------
 6326 
 6327 // Conditional no operands
 6328 // Eg.  CSINC   x0, zr, zr, <cond>
 6329 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6330 %{
 6331   single_instruction;
 6332   cr     : EX1(read);
 6333   dst    : EX2(write);
 6334   INS01  : ISS;
 6335   ALU    : EX2;
 6336 %}
 6337 
 6338 // Conditional 2 operand
 6339 // EG.  CSEL    X0, X1, X2, <cond>
 6340 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6341 %{
 6342   single_instruction;
 6343   cr     : EX1(read);
 6344   src1   : EX1(read);
 6345   src2   : EX1(read);
 6346   dst    : EX2(write);
 6347   INS01  : ISS;
 6348   ALU    : EX2;
 6349 %}
 6350 
 6351 // Conditional 2 operand
 6352 // EG.  CSEL    X0, X1, X2, <cond>
 6353 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6354 %{
 6355   single_instruction;
 6356   cr     : EX1(read);
 6357   src    : EX1(read);
 6358   dst    : EX2(write);
 6359   INS01  : ISS;
 6360   ALU    : EX2;
 6361 %}
 6362 
 6363 //------- Multiply pipeline operations --------------------
 6364 
 6365 // Multiply reg-reg
 6366 // Eg.  MUL     w0, w1, w2
 6367 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6368 %{
 6369   single_instruction;
 6370   dst    : WR(write);
 6371   src1   : ISS(read);
 6372   src2   : ISS(read);
 6373   INS01  : ISS;
 6374   MAC    : WR;
 6375 %}
 6376 
 6377 // Multiply accumulate
 6378 // Eg.  MADD    w0, w1, w2, w3
 6379 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6380 %{
 6381   single_instruction;
 6382   dst    : WR(write);
 6383   src1   : ISS(read);
 6384   src2   : ISS(read);
 6385   src3   : ISS(read);
 6386   INS01  : ISS;
 6387   MAC    : WR;
 6388 %}
 6389 
 6390 // Eg.  MUL     w0, w1, w2
 6391 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6392 %{
 6393   single_instruction;
 6394   fixed_latency(3); // Maximum latency for 64 bit mul
 6395   dst    : WR(write);
 6396   src1   : ISS(read);
 6397   src2   : ISS(read);
 6398   INS01  : ISS;
 6399   MAC    : WR;
 6400 %}
 6401 
 6402 // Multiply accumulate
 6403 // Eg.  MADD    w0, w1, w2, w3
 6404 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6405 %{
 6406   single_instruction;
 6407   fixed_latency(3); // Maximum latency for 64 bit mul
 6408   dst    : WR(write);
 6409   src1   : ISS(read);
 6410   src2   : ISS(read);
 6411   src3   : ISS(read);
 6412   INS01  : ISS;
 6413   MAC    : WR;
 6414 %}
 6415 
 6416 //------- Divide pipeline operations --------------------
 6417 
 6418 // Eg.  SDIV    w0, w1, w2
 6419 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6420 %{
 6421   single_instruction;
 6422   fixed_latency(8); // Maximum latency for 32 bit divide
 6423   dst    : WR(write);
 6424   src1   : ISS(read);
 6425   src2   : ISS(read);
 6426   INS0   : ISS; // Can only dual issue as instruction 0
 6427   DIV    : WR;
 6428 %}
 6429 
 6430 // Eg.  SDIV    x0, x1, x2
 6431 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6432 %{
 6433   single_instruction;
 6434   fixed_latency(16); // Maximum latency for 64 bit divide
 6435   dst    : WR(write);
 6436   src1   : ISS(read);
 6437   src2   : ISS(read);
 6438   INS0   : ISS; // Can only dual issue as instruction 0
 6439   DIV    : WR;
 6440 %}
 6441 
 6442 //------- Load pipeline operations ------------------------
 6443 
 6444 // Load - prefetch
 6445 // Eg.  PFRM    <mem>
 6446 pipe_class iload_prefetch(memory mem)
 6447 %{
 6448   single_instruction;
 6449   mem    : ISS(read);
 6450   INS01  : ISS;
 6451   LDST   : WR;
 6452 %}
 6453 
 6454 // Load - reg, mem
 6455 // Eg.  LDR     x0, <mem>
 6456 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6457 %{
 6458   single_instruction;
 6459   dst    : WR(write);
 6460   mem    : ISS(read);
 6461   INS01  : ISS;
 6462   LDST   : WR;
 6463 %}
 6464 
 6465 // Load - reg, reg
 6466 // Eg.  LDR     x0, [sp, x1]
 6467 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6468 %{
 6469   single_instruction;
 6470   dst    : WR(write);
 6471   src    : ISS(read);
 6472   INS01  : ISS;
 6473   LDST   : WR;
 6474 %}
 6475 
 6476 //------- Store pipeline operations -----------------------
 6477 
 6478 // Store - zr, mem
 6479 // Eg.  STR     zr, <mem>
 6480 pipe_class istore_mem(memory mem)
 6481 %{
 6482   single_instruction;
 6483   mem    : ISS(read);
 6484   INS01  : ISS;
 6485   LDST   : WR;
 6486 %}
 6487 
 6488 // Store - reg, mem
 6489 // Eg.  STR     x0, <mem>
 6490 pipe_class istore_reg_mem(iRegI src, memory mem)
 6491 %{
 6492   single_instruction;
 6493   mem    : ISS(read);
 6494   src    : EX2(read);
 6495   INS01  : ISS;
 6496   LDST   : WR;
 6497 %}
 6498 
 6499 // Store - reg, reg
 6500 // Eg. STR      x0, [sp, x1]
 6501 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6502 %{
 6503   single_instruction;
 6504   dst    : ISS(read);
 6505   src    : EX2(read);
 6506   INS01  : ISS;
 6507   LDST   : WR;
 6508 %}
 6509 
 6510 //------- Store pipeline operations -----------------------
 6511 
 6512 // Branch
 6513 pipe_class pipe_branch()
 6514 %{
 6515   single_instruction;
 6516   INS01  : ISS;
 6517   BRANCH : EX1;
 6518 %}
 6519 
 6520 // Conditional branch
 6521 pipe_class pipe_branch_cond(rFlagsReg cr)
 6522 %{
 6523   single_instruction;
 6524   cr     : EX1(read);
 6525   INS01  : ISS;
 6526   BRANCH : EX1;
 6527 %}
 6528 
 6529 // Compare & Branch
 6530 // EG.  CBZ/CBNZ
 6531 pipe_class pipe_cmp_branch(iRegI op1)
 6532 %{
 6533   single_instruction;
 6534   op1    : EX1(read);
 6535   INS01  : ISS;
 6536   BRANCH : EX1;
 6537 %}
 6538 
 6539 //------- Synchronisation operations ----------------------
 6540 
 6541 // Any operation requiring serialization.
 6542 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6543 pipe_class pipe_serial()
 6544 %{
 6545   single_instruction;
 6546   force_serialization;
 6547   fixed_latency(16);
 6548   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6549   LDST   : WR;
 6550 %}
 6551 
 6552 // Generic big/slow expanded idiom - also serialized
 6553 pipe_class pipe_slow()
 6554 %{
 6555   instruction_count(10);
 6556   multiple_bundles;
 6557   force_serialization;
 6558   fixed_latency(16);
 6559   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6560   LDST   : WR;
 6561 %}
 6562 
 6563 // Empty pipeline class
 6564 pipe_class pipe_class_empty()
 6565 %{
 6566   single_instruction;
 6567   fixed_latency(0);
 6568 %}
 6569 
 6570 // Default pipeline class.
 6571 pipe_class pipe_class_default()
 6572 %{
 6573   single_instruction;
 6574   fixed_latency(2);
 6575 %}
 6576 
 6577 // Pipeline class for compares.
 6578 pipe_class pipe_class_compare()
 6579 %{
 6580   single_instruction;
 6581   fixed_latency(16);
 6582 %}
 6583 
 6584 // Pipeline class for memory operations.
 6585 pipe_class pipe_class_memory()
 6586 %{
 6587   single_instruction;
 6588   fixed_latency(16);
 6589 %}
 6590 
 6591 // Pipeline class for call.
 6592 pipe_class pipe_class_call()
 6593 %{
 6594   single_instruction;
 6595   fixed_latency(100);
 6596 %}
 6597 
 6598 // Define the class for the Nop node.
 6599 define %{
 6600    MachNop = pipe_class_empty;
 6601 %}
 6602 
 6603 %}
 6604 //----------INSTRUCTIONS-------------------------------------------------------
 6605 //
 6606 // match      -- States which machine-independent subtree may be replaced
 6607 //               by this instruction.
 6608 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6609 //               selection to identify a minimum cost tree of machine
 6610 //               instructions that matches a tree of machine-independent
 6611 //               instructions.
 6612 // format     -- A string providing the disassembly for this instruction.
 6613 //               The value of an instruction's operand may be inserted
 6614 //               by referring to it with a '$' prefix.
 6615 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6616 //               to within an encode class as $primary, $secondary, and $tertiary
 6617 //               rrspectively.  The primary opcode is commonly used to
 6618 //               indicate the type of machine instruction, while secondary
 6619 //               and tertiary are often used for prefix options or addressing
 6620 //               modes.
 6621 // ins_encode -- A list of encode classes with parameters. The encode class
 6622 //               name must have been defined in an 'enc_class' specification
 6623 //               in the encode section of the architecture description.
 6624 
 6625 // ============================================================================
 6626 // Memory (Load/Store) Instructions
 6627 
 6628 // Load Instructions
 6629 
 6630 // Load Byte (8 bit signed)
 6631 instruct loadB(iRegINoSp dst, memory1 mem)
 6632 %{
 6633   match(Set dst (LoadB mem));
 6634   predicate(!needs_acquiring_load(n));
 6635 
 6636   ins_cost(4 * INSN_COST);
 6637   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6638 
 6639   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6640 
 6641   ins_pipe(iload_reg_mem);
 6642 %}
 6643 
 6644 // Load Byte (8 bit signed) into long
 6645 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6646 %{
 6647   match(Set dst (ConvI2L (LoadB mem)));
 6648   predicate(!needs_acquiring_load(n->in(1)));
 6649 
 6650   ins_cost(4 * INSN_COST);
 6651   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6652 
 6653   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6654 
 6655   ins_pipe(iload_reg_mem);
 6656 %}
 6657 
 6658 // Load Byte (8 bit unsigned)
 6659 instruct loadUB(iRegINoSp dst, memory1 mem)
 6660 %{
 6661   match(Set dst (LoadUB mem));
 6662   predicate(!needs_acquiring_load(n));
 6663 
 6664   ins_cost(4 * INSN_COST);
 6665   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6666 
 6667   ins_encode(aarch64_enc_ldrb(dst, mem));
 6668 
 6669   ins_pipe(iload_reg_mem);
 6670 %}
 6671 
 6672 // Load Byte (8 bit unsigned) into long
 6673 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6674 %{
 6675   match(Set dst (ConvI2L (LoadUB mem)));
 6676   predicate(!needs_acquiring_load(n->in(1)));
 6677 
 6678   ins_cost(4 * INSN_COST);
 6679   format %{ "ldrb  $dst, $mem\t# byte" %}
 6680 
 6681   ins_encode(aarch64_enc_ldrb(dst, mem));
 6682 
 6683   ins_pipe(iload_reg_mem);
 6684 %}
 6685 
 6686 // Load Short (16 bit signed)
 6687 instruct loadS(iRegINoSp dst, memory2 mem)
 6688 %{
 6689   match(Set dst (LoadS mem));
 6690   predicate(!needs_acquiring_load(n));
 6691 
 6692   ins_cost(4 * INSN_COST);
 6693   format %{ "ldrshw  $dst, $mem\t# short" %}
 6694 
 6695   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6696 
 6697   ins_pipe(iload_reg_mem);
 6698 %}
 6699 
 6700 // Load Short (16 bit signed) into long
 6701 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6702 %{
 6703   match(Set dst (ConvI2L (LoadS mem)));
 6704   predicate(!needs_acquiring_load(n->in(1)));
 6705 
 6706   ins_cost(4 * INSN_COST);
 6707   format %{ "ldrsh  $dst, $mem\t# short" %}
 6708 
 6709   ins_encode(aarch64_enc_ldrsh(dst, mem));
 6710 
 6711   ins_pipe(iload_reg_mem);
 6712 %}
 6713 
 6714 // Load Char (16 bit unsigned)
 6715 instruct loadUS(iRegINoSp dst, memory2 mem)
 6716 %{
 6717   match(Set dst (LoadUS mem));
 6718   predicate(!needs_acquiring_load(n));
 6719 
 6720   ins_cost(4 * INSN_COST);
 6721   format %{ "ldrh  $dst, $mem\t# short" %}
 6722 
 6723   ins_encode(aarch64_enc_ldrh(dst, mem));
 6724 
 6725   ins_pipe(iload_reg_mem);
 6726 %}
 6727 
 6728 // Load Short/Char (16 bit unsigned) into long
 6729 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 6730 %{
 6731   match(Set dst (ConvI2L (LoadUS mem)));
 6732   predicate(!needs_acquiring_load(n->in(1)));
 6733 
 6734   ins_cost(4 * INSN_COST);
 6735   format %{ "ldrh  $dst, $mem\t# short" %}
 6736 
 6737   ins_encode(aarch64_enc_ldrh(dst, mem));
 6738 
 6739   ins_pipe(iload_reg_mem);
 6740 %}
 6741 
 6742 // Load Integer (32 bit signed)
 6743 instruct loadI(iRegINoSp dst, memory4 mem)
 6744 %{
 6745   match(Set dst (LoadI mem));
 6746   predicate(!needs_acquiring_load(n));
 6747 
 6748   ins_cost(4 * INSN_COST);
 6749   format %{ "ldrw  $dst, $mem\t# int" %}
 6750 
 6751   ins_encode(aarch64_enc_ldrw(dst, mem));
 6752 
 6753   ins_pipe(iload_reg_mem);
 6754 %}
 6755 
 6756 // Load Integer (32 bit signed) into long
 6757 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 6758 %{
 6759   match(Set dst (ConvI2L (LoadI mem)));
 6760   predicate(!needs_acquiring_load(n->in(1)));
 6761 
 6762   ins_cost(4 * INSN_COST);
 6763   format %{ "ldrsw  $dst, $mem\t# int" %}
 6764 
 6765   ins_encode(aarch64_enc_ldrsw(dst, mem));
 6766 
 6767   ins_pipe(iload_reg_mem);
 6768 %}
 6769 
 6770 // Load Integer (32 bit unsigned) into long
 6771 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 6772 %{
 6773   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 6774   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 6775 
 6776   ins_cost(4 * INSN_COST);
 6777   format %{ "ldrw  $dst, $mem\t# int" %}
 6778 
 6779   ins_encode(aarch64_enc_ldrw(dst, mem));
 6780 
 6781   ins_pipe(iload_reg_mem);
 6782 %}
 6783 
 6784 // Load Long (64 bit signed)
 6785 instruct loadL(iRegLNoSp dst, memory8 mem)
 6786 %{
 6787   match(Set dst (LoadL mem));
 6788   predicate(!needs_acquiring_load(n));
 6789 
 6790   ins_cost(4 * INSN_COST);
 6791   format %{ "ldr  $dst, $mem\t# int" %}
 6792 
 6793   ins_encode(aarch64_enc_ldr(dst, mem));
 6794 
 6795   ins_pipe(iload_reg_mem);
 6796 %}
 6797 
 6798 // Load Range
 6799 instruct loadRange(iRegINoSp dst, memory4 mem)
 6800 %{
 6801   match(Set dst (LoadRange mem));
 6802 
 6803   ins_cost(4 * INSN_COST);
 6804   format %{ "ldrw  $dst, $mem\t# range" %}
 6805 
 6806   ins_encode(aarch64_enc_ldrw(dst, mem));
 6807 
 6808   ins_pipe(iload_reg_mem);
 6809 %}
 6810 
 6811 // Load Pointer
 6812 instruct loadP(iRegPNoSp dst, memory8 mem)
 6813 %{
 6814   match(Set dst (LoadP mem));
 6815   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 6816 
 6817   ins_cost(4 * INSN_COST);
 6818   format %{ "ldr  $dst, $mem\t# ptr" %}
 6819 
 6820   ins_encode(aarch64_enc_ldr(dst, mem));
 6821 
 6822   ins_pipe(iload_reg_mem);
 6823 %}
 6824 
 6825 // Load Compressed Pointer
 6826 instruct loadN(iRegNNoSp dst, memory4 mem)
 6827 %{
 6828   match(Set dst (LoadN mem));
 6829   predicate(!needs_acquiring_load(n) && n->as_Load()->barrier_data() == 0);
 6830 
 6831   ins_cost(4 * INSN_COST);
 6832   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 6833 
 6834   ins_encode(aarch64_enc_ldrw(dst, mem));
 6835 
 6836   ins_pipe(iload_reg_mem);
 6837 %}
 6838 
 6839 // Load Klass Pointer
 6840 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 6841 %{
 6842   match(Set dst (LoadKlass mem));
 6843   predicate(!needs_acquiring_load(n));
 6844 
 6845   ins_cost(4 * INSN_COST);
 6846   format %{ "ldr  $dst, $mem\t# class" %}
 6847 
 6848   ins_encode(aarch64_enc_ldr(dst, mem));
 6849 
 6850   ins_pipe(iload_reg_mem);
 6851 %}
 6852 
 6853 // Load Narrow Klass Pointer
 6854 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 6855 %{
 6856   match(Set dst (LoadNKlass mem));
 6857   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
 6858 
 6859   ins_cost(4 * INSN_COST);
 6860   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 6861 
 6862   ins_encode(aarch64_enc_ldrw(dst, mem));
 6863 
 6864   ins_pipe(iload_reg_mem);
 6865 %}
 6866 
 6867 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem)
 6868 %{
 6869   match(Set dst (LoadNKlass mem));
 6870   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
 6871 
 6872   ins_cost(4 * INSN_COST);
 6873   format %{
 6874     "ldrw  $dst, $mem\t# compressed class ptr, shifted\n\t"
 6875     "lsrw  $dst, $dst, markWord::klass_shift_at_offset"
 6876   %}
 6877   ins_encode %{
 6878     // inlined aarch64_enc_ldrw
 6879     loadStore(masm, &MacroAssembler::ldrw, $dst$$Register, $mem->opcode(),
 6880               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 6881     __ lsrw($dst$$Register, $dst$$Register, markWord::klass_shift_at_offset);
 6882   %}
 6883   ins_pipe(iload_reg_mem);
 6884 %}
 6885 
 6886 // Load Float
 6887 instruct loadF(vRegF dst, memory4 mem)
 6888 %{
 6889   match(Set dst (LoadF mem));
 6890   predicate(!needs_acquiring_load(n));
 6891 
 6892   ins_cost(4 * INSN_COST);
 6893   format %{ "ldrs  $dst, $mem\t# float" %}
 6894 
 6895   ins_encode( aarch64_enc_ldrs(dst, mem) );
 6896 
 6897   ins_pipe(pipe_class_memory);
 6898 %}
 6899 
 6900 // Load Double
 6901 instruct loadD(vRegD dst, memory8 mem)
 6902 %{
 6903   match(Set dst (LoadD mem));
 6904   predicate(!needs_acquiring_load(n));
 6905 
 6906   ins_cost(4 * INSN_COST);
 6907   format %{ "ldrd  $dst, $mem\t# double" %}
 6908 
 6909   ins_encode( aarch64_enc_ldrd(dst, mem) );
 6910 
 6911   ins_pipe(pipe_class_memory);
 6912 %}
 6913 
 6914 
 6915 // Load Int Constant
 6916 instruct loadConI(iRegINoSp dst, immI src)
 6917 %{
 6918   match(Set dst src);
 6919 
 6920   ins_cost(INSN_COST);
 6921   format %{ "mov $dst, $src\t# int" %}
 6922 
 6923   ins_encode( aarch64_enc_movw_imm(dst, src) );
 6924 
 6925   ins_pipe(ialu_imm);
 6926 %}
 6927 
 6928 // Load Long Constant
 6929 instruct loadConL(iRegLNoSp dst, immL src)
 6930 %{
 6931   match(Set dst src);
 6932 
 6933   ins_cost(INSN_COST);
 6934   format %{ "mov $dst, $src\t# long" %}
 6935 
 6936   ins_encode( aarch64_enc_mov_imm(dst, src) );
 6937 
 6938   ins_pipe(ialu_imm);
 6939 %}
 6940 
 6941 // Load Pointer Constant
 6942 
 6943 instruct loadConP(iRegPNoSp dst, immP con)
 6944 %{
 6945   match(Set dst con);
 6946 
 6947   ins_cost(INSN_COST * 4);
 6948   format %{
 6949     "mov  $dst, $con\t# ptr\n\t"
 6950   %}
 6951 
 6952   ins_encode(aarch64_enc_mov_p(dst, con));
 6953 
 6954   ins_pipe(ialu_imm);
 6955 %}
 6956 
 6957 // Load Null Pointer Constant
 6958 
 6959 instruct loadConP0(iRegPNoSp dst, immP0 con)
 6960 %{
 6961   match(Set dst con);
 6962 
 6963   ins_cost(INSN_COST);
 6964   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6965 
 6966   ins_encode(aarch64_enc_mov_p0(dst, con));
 6967 
 6968   ins_pipe(ialu_imm);
 6969 %}
 6970 
 6971 // Load Pointer Constant One
 6972 
 6973 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 6974 %{
 6975   match(Set dst con);
 6976 
 6977   ins_cost(INSN_COST);
 6978   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6979 
 6980   ins_encode(aarch64_enc_mov_p1(dst, con));
 6981 
 6982   ins_pipe(ialu_imm);
 6983 %}
 6984 
 6985 // Load Narrow Pointer Constant
 6986 
 6987 instruct loadConN(iRegNNoSp dst, immN con)
 6988 %{
 6989   match(Set dst con);
 6990 
 6991   ins_cost(INSN_COST * 4);
 6992   format %{ "mov  $dst, $con\t# compressed ptr" %}
 6993 
 6994   ins_encode(aarch64_enc_mov_n(dst, con));
 6995 
 6996   ins_pipe(ialu_imm);
 6997 %}
 6998 
 6999 // Load Narrow Null Pointer Constant
 7000 
 7001 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7002 %{
 7003   match(Set dst con);
 7004 
 7005   ins_cost(INSN_COST);
 7006   format %{ "mov  $dst, $con\t# compressed nullptr ptr" %}
 7007 
 7008   ins_encode(aarch64_enc_mov_n0(dst, con));
 7009 
 7010   ins_pipe(ialu_imm);
 7011 %}
 7012 
 7013 // Load Narrow Klass Constant
 7014 
 7015 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7016 %{
 7017   match(Set dst con);
 7018 
 7019   ins_cost(INSN_COST);
 7020   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7021 
 7022   ins_encode(aarch64_enc_mov_nk(dst, con));
 7023 
 7024   ins_pipe(ialu_imm);
 7025 %}
 7026 
 7027 // Load Packed Float Constant
 7028 
 7029 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7030   match(Set dst con);
 7031   ins_cost(INSN_COST * 4);
 7032   format %{ "fmovs  $dst, $con"%}
 7033   ins_encode %{
 7034     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7035   %}
 7036 
 7037   ins_pipe(fp_imm_s);
 7038 %}
 7039 
 7040 // Load Float Constant
 7041 
 7042 instruct loadConF(vRegF dst, immF con) %{
 7043   match(Set dst con);
 7044 
 7045   ins_cost(INSN_COST * 4);
 7046 
 7047   format %{
 7048     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7049   %}
 7050 
 7051   ins_encode %{
 7052     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7053   %}
 7054 
 7055   ins_pipe(fp_load_constant_s);
 7056 %}
 7057 
 7058 // Load Packed Double Constant
 7059 
 7060 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7061   match(Set dst con);
 7062   ins_cost(INSN_COST);
 7063   format %{ "fmovd  $dst, $con"%}
 7064   ins_encode %{
 7065     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7066   %}
 7067 
 7068   ins_pipe(fp_imm_d);
 7069 %}
 7070 
 7071 // Load Double Constant
 7072 
 7073 instruct loadConD(vRegD dst, immD con) %{
 7074   match(Set dst con);
 7075 
 7076   ins_cost(INSN_COST * 5);
 7077   format %{
 7078     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7079   %}
 7080 
 7081   ins_encode %{
 7082     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7083   %}
 7084 
 7085   ins_pipe(fp_load_constant_d);
 7086 %}
 7087 
 7088 // Load Half Float Constant
 7089 instruct loadConH(vRegF dst, immH con) %{
 7090   match(Set dst con);
 7091   format %{ "mov    rscratch1, $con\n\t"
 7092             "fmov   $dst, rscratch1"
 7093          %}
 7094   ins_encode %{
 7095     __ movw(rscratch1, (uint32_t)$con$$constant);
 7096     __ fmovs($dst$$FloatRegister, rscratch1);
 7097   %}
 7098   ins_pipe(pipe_class_default);
 7099 %}
 7100 
 7101 // Store Instructions
 7102 
 7103 // Store Byte
 7104 instruct storeB(iRegIorL2I src, memory1 mem)
 7105 %{
 7106   match(Set mem (StoreB mem src));
 7107   predicate(!needs_releasing_store(n));
 7108 
 7109   ins_cost(INSN_COST);
 7110   format %{ "strb  $src, $mem\t# byte" %}
 7111 
 7112   ins_encode(aarch64_enc_strb(src, mem));
 7113 
 7114   ins_pipe(istore_reg_mem);
 7115 %}
 7116 
 7117 
 7118 instruct storeimmB0(immI0 zero, memory1 mem)
 7119 %{
 7120   match(Set mem (StoreB mem zero));
 7121   predicate(!needs_releasing_store(n));
 7122 
 7123   ins_cost(INSN_COST);
 7124   format %{ "strb rscractch2, $mem\t# byte" %}
 7125 
 7126   ins_encode(aarch64_enc_strb0(mem));
 7127 
 7128   ins_pipe(istore_mem);
 7129 %}
 7130 
 7131 // Store Char/Short
 7132 instruct storeC(iRegIorL2I src, memory2 mem)
 7133 %{
 7134   match(Set mem (StoreC mem src));
 7135   predicate(!needs_releasing_store(n));
 7136 
 7137   ins_cost(INSN_COST);
 7138   format %{ "strh  $src, $mem\t# short" %}
 7139 
 7140   ins_encode(aarch64_enc_strh(src, mem));
 7141 
 7142   ins_pipe(istore_reg_mem);
 7143 %}
 7144 
 7145 instruct storeimmC0(immI0 zero, memory2 mem)
 7146 %{
 7147   match(Set mem (StoreC mem zero));
 7148   predicate(!needs_releasing_store(n));
 7149 
 7150   ins_cost(INSN_COST);
 7151   format %{ "strh  zr, $mem\t# short" %}
 7152 
 7153   ins_encode(aarch64_enc_strh0(mem));
 7154 
 7155   ins_pipe(istore_mem);
 7156 %}
 7157 
 7158 // Store Integer
 7159 
 7160 instruct storeI(iRegIorL2I src, memory4 mem)
 7161 %{
 7162   match(Set mem(StoreI mem src));
 7163   predicate(!needs_releasing_store(n));
 7164 
 7165   ins_cost(INSN_COST);
 7166   format %{ "strw  $src, $mem\t# int" %}
 7167 
 7168   ins_encode(aarch64_enc_strw(src, mem));
 7169 
 7170   ins_pipe(istore_reg_mem);
 7171 %}
 7172 
 7173 instruct storeimmI0(immI0 zero, memory4 mem)
 7174 %{
 7175   match(Set mem(StoreI mem zero));
 7176   predicate(!needs_releasing_store(n));
 7177 
 7178   ins_cost(INSN_COST);
 7179   format %{ "strw  zr, $mem\t# int" %}
 7180 
 7181   ins_encode(aarch64_enc_strw0(mem));
 7182 
 7183   ins_pipe(istore_mem);
 7184 %}
 7185 
 7186 // Store Long (64 bit signed)
 7187 instruct storeL(iRegL src, memory8 mem)
 7188 %{
 7189   match(Set mem (StoreL mem src));
 7190   predicate(!needs_releasing_store(n));
 7191 
 7192   ins_cost(INSN_COST);
 7193   format %{ "str  $src, $mem\t# int" %}
 7194 
 7195   ins_encode(aarch64_enc_str(src, mem));
 7196 
 7197   ins_pipe(istore_reg_mem);
 7198 %}
 7199 
 7200 // Store Long (64 bit signed)
 7201 instruct storeimmL0(immL0 zero, memory8 mem)
 7202 %{
 7203   match(Set mem (StoreL mem zero));
 7204   predicate(!needs_releasing_store(n));
 7205 
 7206   ins_cost(INSN_COST);
 7207   format %{ "str  zr, $mem\t# int" %}
 7208 
 7209   ins_encode(aarch64_enc_str0(mem));
 7210 
 7211   ins_pipe(istore_mem);
 7212 %}
 7213 
 7214 // Store Pointer
 7215 instruct storeP(iRegP src, memory8 mem)
 7216 %{
 7217   match(Set mem (StoreP mem src));
 7218   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7219 
 7220   ins_cost(INSN_COST);
 7221   format %{ "str  $src, $mem\t# ptr" %}
 7222 
 7223   ins_encode(aarch64_enc_str(src, mem));
 7224 
 7225   ins_pipe(istore_reg_mem);
 7226 %}
 7227 
 7228 // Store Pointer
 7229 instruct storeimmP0(immP0 zero, memory8 mem)
 7230 %{
 7231   match(Set mem (StoreP mem zero));
 7232   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7233 
 7234   ins_cost(INSN_COST);
 7235   format %{ "str zr, $mem\t# ptr" %}
 7236 
 7237   ins_encode(aarch64_enc_str0(mem));
 7238 
 7239   ins_pipe(istore_mem);
 7240 %}
 7241 
 7242 // Store Compressed Pointer
 7243 instruct storeN(iRegN src, memory4 mem)
 7244 %{
 7245   match(Set mem (StoreN mem src));
 7246   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7247 
 7248   ins_cost(INSN_COST);
 7249   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7250 
 7251   ins_encode(aarch64_enc_strw(src, mem));
 7252 
 7253   ins_pipe(istore_reg_mem);
 7254 %}
 7255 
 7256 instruct storeImmN0(immN0 zero, memory4 mem)
 7257 %{
 7258   match(Set mem (StoreN mem zero));
 7259   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7260 
 7261   ins_cost(INSN_COST);
 7262   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7263 
 7264   ins_encode(aarch64_enc_strw0(mem));
 7265 
 7266   ins_pipe(istore_mem);
 7267 %}
 7268 
 7269 // Store Float
 7270 instruct storeF(vRegF src, memory4 mem)
 7271 %{
 7272   match(Set mem (StoreF mem src));
 7273   predicate(!needs_releasing_store(n));
 7274 
 7275   ins_cost(INSN_COST);
 7276   format %{ "strs  $src, $mem\t# float" %}
 7277 
 7278   ins_encode( aarch64_enc_strs(src, mem) );
 7279 
 7280   ins_pipe(pipe_class_memory);
 7281 %}
 7282 
 7283 // TODO
 7284 // implement storeImmF0 and storeFImmPacked
 7285 
 7286 // Store Double
 7287 instruct storeD(vRegD src, memory8 mem)
 7288 %{
 7289   match(Set mem (StoreD mem src));
 7290   predicate(!needs_releasing_store(n));
 7291 
 7292   ins_cost(INSN_COST);
 7293   format %{ "strd  $src, $mem\t# double" %}
 7294 
 7295   ins_encode( aarch64_enc_strd(src, mem) );
 7296 
 7297   ins_pipe(pipe_class_memory);
 7298 %}
 7299 
 7300 // Store Compressed Klass Pointer
 7301 instruct storeNKlass(iRegN src, memory4 mem)
 7302 %{
 7303   predicate(!needs_releasing_store(n));
 7304   match(Set mem (StoreNKlass mem src));
 7305 
 7306   ins_cost(INSN_COST);
 7307   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7308 
 7309   ins_encode(aarch64_enc_strw(src, mem));
 7310 
 7311   ins_pipe(istore_reg_mem);
 7312 %}
 7313 
 7314 // TODO
 7315 // implement storeImmD0 and storeDImmPacked
 7316 
 7317 // prefetch instructions
 7318 // Must be safe to execute with invalid address (cannot fault).
 7319 
 7320 instruct prefetchalloc( memory8 mem ) %{
 7321   match(PrefetchAllocation mem);
 7322 
 7323   ins_cost(INSN_COST);
 7324   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7325 
 7326   ins_encode( aarch64_enc_prefetchw(mem) );
 7327 
 7328   ins_pipe(iload_prefetch);
 7329 %}
 7330 
 7331 //  ---------------- volatile loads and stores ----------------
 7332 
 7333 // Load Byte (8 bit signed)
 7334 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7335 %{
 7336   match(Set dst (LoadB mem));
 7337 
 7338   ins_cost(VOLATILE_REF_COST);
 7339   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7340 
 7341   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7342 
 7343   ins_pipe(pipe_serial);
 7344 %}
 7345 
 7346 // Load Byte (8 bit signed) into long
 7347 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7348 %{
 7349   match(Set dst (ConvI2L (LoadB mem)));
 7350 
 7351   ins_cost(VOLATILE_REF_COST);
 7352   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7353 
 7354   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7355 
 7356   ins_pipe(pipe_serial);
 7357 %}
 7358 
 7359 // Load Byte (8 bit unsigned)
 7360 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7361 %{
 7362   match(Set dst (LoadUB mem));
 7363 
 7364   ins_cost(VOLATILE_REF_COST);
 7365   format %{ "ldarb  $dst, $mem\t# byte" %}
 7366 
 7367   ins_encode(aarch64_enc_ldarb(dst, mem));
 7368 
 7369   ins_pipe(pipe_serial);
 7370 %}
 7371 
 7372 // Load Byte (8 bit unsigned) into long
 7373 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7374 %{
 7375   match(Set dst (ConvI2L (LoadUB mem)));
 7376 
 7377   ins_cost(VOLATILE_REF_COST);
 7378   format %{ "ldarb  $dst, $mem\t# byte" %}
 7379 
 7380   ins_encode(aarch64_enc_ldarb(dst, mem));
 7381 
 7382   ins_pipe(pipe_serial);
 7383 %}
 7384 
 7385 // Load Short (16 bit signed)
 7386 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7387 %{
 7388   match(Set dst (LoadS mem));
 7389 
 7390   ins_cost(VOLATILE_REF_COST);
 7391   format %{ "ldarshw  $dst, $mem\t# short" %}
 7392 
 7393   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7394 
 7395   ins_pipe(pipe_serial);
 7396 %}
 7397 
 7398 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7399 %{
 7400   match(Set dst (LoadUS mem));
 7401 
 7402   ins_cost(VOLATILE_REF_COST);
 7403   format %{ "ldarhw  $dst, $mem\t# short" %}
 7404 
 7405   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7406 
 7407   ins_pipe(pipe_serial);
 7408 %}
 7409 
 7410 // Load Short/Char (16 bit unsigned) into long
 7411 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7412 %{
 7413   match(Set dst (ConvI2L (LoadUS mem)));
 7414 
 7415   ins_cost(VOLATILE_REF_COST);
 7416   format %{ "ldarh  $dst, $mem\t# short" %}
 7417 
 7418   ins_encode(aarch64_enc_ldarh(dst, mem));
 7419 
 7420   ins_pipe(pipe_serial);
 7421 %}
 7422 
 7423 // Load Short/Char (16 bit signed) into long
 7424 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7425 %{
 7426   match(Set dst (ConvI2L (LoadS mem)));
 7427 
 7428   ins_cost(VOLATILE_REF_COST);
 7429   format %{ "ldarh  $dst, $mem\t# short" %}
 7430 
 7431   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7432 
 7433   ins_pipe(pipe_serial);
 7434 %}
 7435 
 7436 // Load Integer (32 bit signed)
 7437 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7438 %{
 7439   match(Set dst (LoadI mem));
 7440 
 7441   ins_cost(VOLATILE_REF_COST);
 7442   format %{ "ldarw  $dst, $mem\t# int" %}
 7443 
 7444   ins_encode(aarch64_enc_ldarw(dst, mem));
 7445 
 7446   ins_pipe(pipe_serial);
 7447 %}
 7448 
 7449 // Load Integer (32 bit unsigned) into long
 7450 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7451 %{
 7452   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7453 
 7454   ins_cost(VOLATILE_REF_COST);
 7455   format %{ "ldarw  $dst, $mem\t# int" %}
 7456 
 7457   ins_encode(aarch64_enc_ldarw(dst, mem));
 7458 
 7459   ins_pipe(pipe_serial);
 7460 %}
 7461 
 7462 // Load Long (64 bit signed)
 7463 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7464 %{
 7465   match(Set dst (LoadL mem));
 7466 
 7467   ins_cost(VOLATILE_REF_COST);
 7468   format %{ "ldar  $dst, $mem\t# int" %}
 7469 
 7470   ins_encode(aarch64_enc_ldar(dst, mem));
 7471 
 7472   ins_pipe(pipe_serial);
 7473 %}
 7474 
 7475 // Load Pointer
 7476 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7477 %{
 7478   match(Set dst (LoadP mem));
 7479   predicate(n->as_Load()->barrier_data() == 0);
 7480 
 7481   ins_cost(VOLATILE_REF_COST);
 7482   format %{ "ldar  $dst, $mem\t# ptr" %}
 7483 
 7484   ins_encode(aarch64_enc_ldar(dst, mem));
 7485 
 7486   ins_pipe(pipe_serial);
 7487 %}
 7488 
 7489 // Load Compressed Pointer
 7490 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7491 %{
 7492   match(Set dst (LoadN mem));
 7493   predicate(n->as_Load()->barrier_data() == 0);
 7494 
 7495   ins_cost(VOLATILE_REF_COST);
 7496   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7497 
 7498   ins_encode(aarch64_enc_ldarw(dst, mem));
 7499 
 7500   ins_pipe(pipe_serial);
 7501 %}
 7502 
 7503 // Load Float
 7504 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7505 %{
 7506   match(Set dst (LoadF mem));
 7507 
 7508   ins_cost(VOLATILE_REF_COST);
 7509   format %{ "ldars  $dst, $mem\t# float" %}
 7510 
 7511   ins_encode( aarch64_enc_fldars(dst, mem) );
 7512 
 7513   ins_pipe(pipe_serial);
 7514 %}
 7515 
 7516 // Load Double
 7517 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7518 %{
 7519   match(Set dst (LoadD mem));
 7520 
 7521   ins_cost(VOLATILE_REF_COST);
 7522   format %{ "ldard  $dst, $mem\t# double" %}
 7523 
 7524   ins_encode( aarch64_enc_fldard(dst, mem) );
 7525 
 7526   ins_pipe(pipe_serial);
 7527 %}
 7528 
 7529 // Store Byte
 7530 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7531 %{
 7532   match(Set mem (StoreB mem src));
 7533 
 7534   ins_cost(VOLATILE_REF_COST);
 7535   format %{ "stlrb  $src, $mem\t# byte" %}
 7536 
 7537   ins_encode(aarch64_enc_stlrb(src, mem));
 7538 
 7539   ins_pipe(pipe_class_memory);
 7540 %}
 7541 
 7542 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7543 %{
 7544   match(Set mem (StoreB mem zero));
 7545 
 7546   ins_cost(VOLATILE_REF_COST);
 7547   format %{ "stlrb  zr, $mem\t# byte" %}
 7548 
 7549   ins_encode(aarch64_enc_stlrb0(mem));
 7550 
 7551   ins_pipe(pipe_class_memory);
 7552 %}
 7553 
 7554 // Store Char/Short
 7555 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7556 %{
 7557   match(Set mem (StoreC mem src));
 7558 
 7559   ins_cost(VOLATILE_REF_COST);
 7560   format %{ "stlrh  $src, $mem\t# short" %}
 7561 
 7562   ins_encode(aarch64_enc_stlrh(src, mem));
 7563 
 7564   ins_pipe(pipe_class_memory);
 7565 %}
 7566 
 7567 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7568 %{
 7569   match(Set mem (StoreC mem zero));
 7570 
 7571   ins_cost(VOLATILE_REF_COST);
 7572   format %{ "stlrh  zr, $mem\t# short" %}
 7573 
 7574   ins_encode(aarch64_enc_stlrh0(mem));
 7575 
 7576   ins_pipe(pipe_class_memory);
 7577 %}
 7578 
 7579 // Store Integer
 7580 
 7581 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7582 %{
 7583   match(Set mem(StoreI mem src));
 7584 
 7585   ins_cost(VOLATILE_REF_COST);
 7586   format %{ "stlrw  $src, $mem\t# int" %}
 7587 
 7588   ins_encode(aarch64_enc_stlrw(src, mem));
 7589 
 7590   ins_pipe(pipe_class_memory);
 7591 %}
 7592 
 7593 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7594 %{
 7595   match(Set mem(StoreI mem zero));
 7596 
 7597   ins_cost(VOLATILE_REF_COST);
 7598   format %{ "stlrw  zr, $mem\t# int" %}
 7599 
 7600   ins_encode(aarch64_enc_stlrw0(mem));
 7601 
 7602   ins_pipe(pipe_class_memory);
 7603 %}
 7604 
 7605 // Store Long (64 bit signed)
 7606 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7607 %{
 7608   match(Set mem (StoreL mem src));
 7609 
 7610   ins_cost(VOLATILE_REF_COST);
 7611   format %{ "stlr  $src, $mem\t# int" %}
 7612 
 7613   ins_encode(aarch64_enc_stlr(src, mem));
 7614 
 7615   ins_pipe(pipe_class_memory);
 7616 %}
 7617 
 7618 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7619 %{
 7620   match(Set mem (StoreL mem zero));
 7621 
 7622   ins_cost(VOLATILE_REF_COST);
 7623   format %{ "stlr  zr, $mem\t# int" %}
 7624 
 7625   ins_encode(aarch64_enc_stlr0(mem));
 7626 
 7627   ins_pipe(pipe_class_memory);
 7628 %}
 7629 
 7630 // Store Pointer
 7631 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7632 %{
 7633   match(Set mem (StoreP mem src));
 7634   predicate(n->as_Store()->barrier_data() == 0);
 7635 
 7636   ins_cost(VOLATILE_REF_COST);
 7637   format %{ "stlr  $src, $mem\t# ptr" %}
 7638 
 7639   ins_encode(aarch64_enc_stlr(src, mem));
 7640 
 7641   ins_pipe(pipe_class_memory);
 7642 %}
 7643 
 7644 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7645 %{
 7646   match(Set mem (StoreP mem zero));
 7647   predicate(n->as_Store()->barrier_data() == 0);
 7648 
 7649   ins_cost(VOLATILE_REF_COST);
 7650   format %{ "stlr  zr, $mem\t# ptr" %}
 7651 
 7652   ins_encode(aarch64_enc_stlr0(mem));
 7653 
 7654   ins_pipe(pipe_class_memory);
 7655 %}
 7656 
 7657 // Store Compressed Pointer
 7658 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7659 %{
 7660   match(Set mem (StoreN mem src));
 7661   predicate(n->as_Store()->barrier_data() == 0);
 7662 
 7663   ins_cost(VOLATILE_REF_COST);
 7664   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7665 
 7666   ins_encode(aarch64_enc_stlrw(src, mem));
 7667 
 7668   ins_pipe(pipe_class_memory);
 7669 %}
 7670 
 7671 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7672 %{
 7673   match(Set mem (StoreN mem zero));
 7674   predicate(n->as_Store()->barrier_data() == 0);
 7675 
 7676   ins_cost(VOLATILE_REF_COST);
 7677   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7678 
 7679   ins_encode(aarch64_enc_stlrw0(mem));
 7680 
 7681   ins_pipe(pipe_class_memory);
 7682 %}
 7683 
 7684 // Store Float
 7685 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7686 %{
 7687   match(Set mem (StoreF mem src));
 7688 
 7689   ins_cost(VOLATILE_REF_COST);
 7690   format %{ "stlrs  $src, $mem\t# float" %}
 7691 
 7692   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7693 
 7694   ins_pipe(pipe_class_memory);
 7695 %}
 7696 
 7697 // TODO
 7698 // implement storeImmF0 and storeFImmPacked
 7699 
 7700 // Store Double
 7701 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 7702 %{
 7703   match(Set mem (StoreD mem src));
 7704 
 7705   ins_cost(VOLATILE_REF_COST);
 7706   format %{ "stlrd  $src, $mem\t# double" %}
 7707 
 7708   ins_encode( aarch64_enc_fstlrd(src, mem) );
 7709 
 7710   ins_pipe(pipe_class_memory);
 7711 %}
 7712 
 7713 //  ---------------- end of volatile loads and stores ----------------
 7714 
 7715 instruct cacheWB(indirect addr)
 7716 %{
 7717   predicate(VM_Version::supports_data_cache_line_flush());
 7718   match(CacheWB addr);
 7719 
 7720   ins_cost(100);
 7721   format %{"cache wb $addr" %}
 7722   ins_encode %{
 7723     assert($addr->index_position() < 0, "should be");
 7724     assert($addr$$disp == 0, "should be");
 7725     __ cache_wb(Address($addr$$base$$Register, 0));
 7726   %}
 7727   ins_pipe(pipe_slow); // XXX
 7728 %}
 7729 
 7730 instruct cacheWBPreSync()
 7731 %{
 7732   predicate(VM_Version::supports_data_cache_line_flush());
 7733   match(CacheWBPreSync);
 7734 
 7735   ins_cost(100);
 7736   format %{"cache wb presync" %}
 7737   ins_encode %{
 7738     __ cache_wbsync(true);
 7739   %}
 7740   ins_pipe(pipe_slow); // XXX
 7741 %}
 7742 
 7743 instruct cacheWBPostSync()
 7744 %{
 7745   predicate(VM_Version::supports_data_cache_line_flush());
 7746   match(CacheWBPostSync);
 7747 
 7748   ins_cost(100);
 7749   format %{"cache wb postsync" %}
 7750   ins_encode %{
 7751     __ cache_wbsync(false);
 7752   %}
 7753   ins_pipe(pipe_slow); // XXX
 7754 %}
 7755 
 7756 // ============================================================================
 7757 // BSWAP Instructions
 7758 
 7759 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 7760   match(Set dst (ReverseBytesI src));
 7761 
 7762   ins_cost(INSN_COST);
 7763   format %{ "revw  $dst, $src" %}
 7764 
 7765   ins_encode %{
 7766     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 7767   %}
 7768 
 7769   ins_pipe(ialu_reg);
 7770 %}
 7771 
 7772 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 7773   match(Set dst (ReverseBytesL src));
 7774 
 7775   ins_cost(INSN_COST);
 7776   format %{ "rev  $dst, $src" %}
 7777 
 7778   ins_encode %{
 7779     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 7780   %}
 7781 
 7782   ins_pipe(ialu_reg);
 7783 %}
 7784 
 7785 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 7786   match(Set dst (ReverseBytesUS src));
 7787 
 7788   ins_cost(INSN_COST);
 7789   format %{ "rev16w  $dst, $src" %}
 7790 
 7791   ins_encode %{
 7792     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7793   %}
 7794 
 7795   ins_pipe(ialu_reg);
 7796 %}
 7797 
 7798 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 7799   match(Set dst (ReverseBytesS src));
 7800 
 7801   ins_cost(INSN_COST);
 7802   format %{ "rev16w  $dst, $src\n\t"
 7803             "sbfmw $dst, $dst, #0, #15" %}
 7804 
 7805   ins_encode %{
 7806     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7807     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 7808   %}
 7809 
 7810   ins_pipe(ialu_reg);
 7811 %}
 7812 
 7813 // ============================================================================
 7814 // Zero Count Instructions
 7815 
 7816 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7817   match(Set dst (CountLeadingZerosI src));
 7818 
 7819   ins_cost(INSN_COST);
 7820   format %{ "clzw  $dst, $src" %}
 7821   ins_encode %{
 7822     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 7823   %}
 7824 
 7825   ins_pipe(ialu_reg);
 7826 %}
 7827 
 7828 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 7829   match(Set dst (CountLeadingZerosL src));
 7830 
 7831   ins_cost(INSN_COST);
 7832   format %{ "clz   $dst, $src" %}
 7833   ins_encode %{
 7834     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 7835   %}
 7836 
 7837   ins_pipe(ialu_reg);
 7838 %}
 7839 
 7840 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7841   match(Set dst (CountTrailingZerosI src));
 7842 
 7843   ins_cost(INSN_COST * 2);
 7844   format %{ "rbitw  $dst, $src\n\t"
 7845             "clzw   $dst, $dst" %}
 7846   ins_encode %{
 7847     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 7848     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 7849   %}
 7850 
 7851   ins_pipe(ialu_reg);
 7852 %}
 7853 
 7854 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 7855   match(Set dst (CountTrailingZerosL src));
 7856 
 7857   ins_cost(INSN_COST * 2);
 7858   format %{ "rbit   $dst, $src\n\t"
 7859             "clz    $dst, $dst" %}
 7860   ins_encode %{
 7861     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 7862     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 7863   %}
 7864 
 7865   ins_pipe(ialu_reg);
 7866 %}
 7867 
 7868 //---------- Population Count Instructions -------------------------------------
 7869 //
 7870 
 7871 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 7872   match(Set dst (PopCountI src));
 7873   effect(TEMP tmp);
 7874   ins_cost(INSN_COST * 13);
 7875 
 7876   format %{ "fmovs  $tmp, $src\t# vector (1S)\n\t"
 7877             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7878             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7879             "mov    $dst, $tmp\t# vector (1D)" %}
 7880   ins_encode %{
 7881     __ fmovs($tmp$$FloatRegister, $src$$Register);
 7882     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7883     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7884     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7885   %}
 7886 
 7887   ins_pipe(pipe_class_default);
 7888 %}
 7889 
 7890 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 7891   match(Set dst (PopCountI (LoadI mem)));
 7892   effect(TEMP tmp);
 7893   ins_cost(INSN_COST * 13);
 7894 
 7895   format %{ "ldrs   $tmp, $mem\n\t"
 7896             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7897             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7898             "mov    $dst, $tmp\t# vector (1D)" %}
 7899   ins_encode %{
 7900     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7901     loadStore(masm, &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 7902               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 7903     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7904     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7905     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7906   %}
 7907 
 7908   ins_pipe(pipe_class_default);
 7909 %}
 7910 
 7911 // Note: Long.bitCount(long) returns an int.
 7912 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 7913   match(Set dst (PopCountL src));
 7914   effect(TEMP tmp);
 7915   ins_cost(INSN_COST * 13);
 7916 
 7917   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 7918             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7919             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7920             "mov    $dst, $tmp\t# vector (1D)" %}
 7921   ins_encode %{
 7922     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 7923     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7924     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7925     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7926   %}
 7927 
 7928   ins_pipe(pipe_class_default);
 7929 %}
 7930 
 7931 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 7932   match(Set dst (PopCountL (LoadL mem)));
 7933   effect(TEMP tmp);
 7934   ins_cost(INSN_COST * 13);
 7935 
 7936   format %{ "ldrd   $tmp, $mem\n\t"
 7937             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7938             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7939             "mov    $dst, $tmp\t# vector (1D)" %}
 7940   ins_encode %{
 7941     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7942     loadStore(masm, &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 7943               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 7944     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7945     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7946     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7947   %}
 7948 
 7949   ins_pipe(pipe_class_default);
 7950 %}
 7951 
 7952 // ============================================================================
 7953 // VerifyVectorAlignment Instruction
 7954 
 7955 instruct verify_vector_alignment(iRegP addr, immL_positive_bitmaskI mask, rFlagsReg cr) %{
 7956   match(Set addr (VerifyVectorAlignment addr mask));
 7957   effect(KILL cr);
 7958   format %{ "verify_vector_alignment $addr $mask \t! verify alignment" %}
 7959   ins_encode %{
 7960     Label Lskip;
 7961     // check if masked bits of addr are zero
 7962     __ tst($addr$$Register, $mask$$constant);
 7963     __ br(Assembler::EQ, Lskip);
 7964     __ stop("verify_vector_alignment found a misaligned vector memory access");
 7965     __ bind(Lskip);
 7966   %}
 7967   ins_pipe(pipe_slow);
 7968 %}
 7969 
 7970 // ============================================================================
 7971 // MemBar Instruction
 7972 
 7973 instruct load_fence() %{
 7974   match(LoadFence);
 7975   ins_cost(VOLATILE_REF_COST);
 7976 
 7977   format %{ "load_fence" %}
 7978 
 7979   ins_encode %{
 7980     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7981   %}
 7982   ins_pipe(pipe_serial);
 7983 %}
 7984 
 7985 instruct unnecessary_membar_acquire() %{
 7986   predicate(unnecessary_acquire(n));
 7987   match(MemBarAcquire);
 7988   ins_cost(0);
 7989 
 7990   format %{ "membar_acquire (elided)" %}
 7991 
 7992   ins_encode %{
 7993     __ block_comment("membar_acquire (elided)");
 7994   %}
 7995 
 7996   ins_pipe(pipe_class_empty);
 7997 %}
 7998 
 7999 instruct membar_acquire() %{
 8000   match(MemBarAcquire);
 8001   ins_cost(VOLATILE_REF_COST);
 8002 
 8003   format %{ "membar_acquire\n\t"
 8004             "dmb ishld" %}
 8005 
 8006   ins_encode %{
 8007     __ block_comment("membar_acquire");
 8008     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8009   %}
 8010 
 8011   ins_pipe(pipe_serial);
 8012 %}
 8013 
 8014 
 8015 instruct membar_acquire_lock() %{
 8016   match(MemBarAcquireLock);
 8017   ins_cost(VOLATILE_REF_COST);
 8018 
 8019   format %{ "membar_acquire_lock (elided)" %}
 8020 
 8021   ins_encode %{
 8022     __ block_comment("membar_acquire_lock (elided)");
 8023   %}
 8024 
 8025   ins_pipe(pipe_serial);
 8026 %}
 8027 
 8028 instruct store_fence() %{
 8029   match(StoreFence);
 8030   ins_cost(VOLATILE_REF_COST);
 8031 
 8032   format %{ "store_fence" %}
 8033 
 8034   ins_encode %{
 8035     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8036   %}
 8037   ins_pipe(pipe_serial);
 8038 %}
 8039 
 8040 instruct unnecessary_membar_release() %{
 8041   predicate(unnecessary_release(n));
 8042   match(MemBarRelease);
 8043   ins_cost(0);
 8044 
 8045   format %{ "membar_release (elided)" %}
 8046 
 8047   ins_encode %{
 8048     __ block_comment("membar_release (elided)");
 8049   %}
 8050   ins_pipe(pipe_serial);
 8051 %}
 8052 
 8053 instruct membar_release() %{
 8054   match(MemBarRelease);
 8055   ins_cost(VOLATILE_REF_COST);
 8056 
 8057   format %{ "membar_release\n\t"
 8058             "dmb ishst\n\tdmb ishld" %}
 8059 
 8060   ins_encode %{
 8061     __ block_comment("membar_release");
 8062     // These will be merged if AlwaysMergeDMB is enabled.
 8063     __ membar(Assembler::StoreStore);
 8064     __ membar(Assembler::LoadStore);
 8065   %}
 8066   ins_pipe(pipe_serial);
 8067 %}
 8068 
 8069 instruct membar_storestore() %{
 8070   match(MemBarStoreStore);
 8071   match(StoreStoreFence);
 8072   ins_cost(VOLATILE_REF_COST);
 8073 
 8074   format %{ "MEMBAR-store-store" %}
 8075 
 8076   ins_encode %{
 8077     __ membar(Assembler::StoreStore);
 8078   %}
 8079   ins_pipe(pipe_serial);
 8080 %}
 8081 
 8082 instruct membar_release_lock() %{
 8083   match(MemBarReleaseLock);
 8084   ins_cost(VOLATILE_REF_COST);
 8085 
 8086   format %{ "membar_release_lock (elided)" %}
 8087 
 8088   ins_encode %{
 8089     __ block_comment("membar_release_lock (elided)");
 8090   %}
 8091 
 8092   ins_pipe(pipe_serial);
 8093 %}
 8094 
 8095 instruct unnecessary_membar_volatile() %{
 8096   predicate(unnecessary_volatile(n));
 8097   match(MemBarVolatile);
 8098   ins_cost(0);
 8099 
 8100   format %{ "membar_volatile (elided)" %}
 8101 
 8102   ins_encode %{
 8103     __ block_comment("membar_volatile (elided)");
 8104   %}
 8105 
 8106   ins_pipe(pipe_serial);
 8107 %}
 8108 
 8109 instruct membar_volatile() %{
 8110   match(MemBarVolatile);
 8111   ins_cost(VOLATILE_REF_COST*100);
 8112 
 8113   format %{ "membar_volatile\n\t"
 8114              "dmb ish"%}
 8115 
 8116   ins_encode %{
 8117     __ block_comment("membar_volatile");
 8118     __ membar(Assembler::StoreLoad);
 8119   %}
 8120 
 8121   ins_pipe(pipe_serial);
 8122 %}
 8123 
 8124 // ============================================================================
 8125 // Cast/Convert Instructions
 8126 
 8127 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8128   match(Set dst (CastX2P src));
 8129 
 8130   ins_cost(INSN_COST);
 8131   format %{ "mov $dst, $src\t# long -> ptr" %}
 8132 
 8133   ins_encode %{
 8134     if ($dst$$reg != $src$$reg) {
 8135       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8136     }
 8137   %}
 8138 
 8139   ins_pipe(ialu_reg);
 8140 %}
 8141 
 8142 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8143   match(Set dst (CastP2X src));
 8144 
 8145   ins_cost(INSN_COST);
 8146   format %{ "mov $dst, $src\t# ptr -> long" %}
 8147 
 8148   ins_encode %{
 8149     if ($dst$$reg != $src$$reg) {
 8150       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8151     }
 8152   %}
 8153 
 8154   ins_pipe(ialu_reg);
 8155 %}
 8156 
 8157 // Convert oop into int for vectors alignment masking
 8158 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8159   match(Set dst (ConvL2I (CastP2X src)));
 8160 
 8161   ins_cost(INSN_COST);
 8162   format %{ "movw $dst, $src\t# ptr -> int" %}
 8163   ins_encode %{
 8164     __ movw($dst$$Register, $src$$Register);
 8165   %}
 8166 
 8167   ins_pipe(ialu_reg);
 8168 %}
 8169 
 8170 // Convert compressed oop into int for vectors alignment masking
 8171 // in case of 32bit oops (heap < 4Gb).
 8172 instruct convN2I(iRegINoSp dst, iRegN src)
 8173 %{
 8174   predicate(CompressedOops::shift() == 0);
 8175   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8176 
 8177   ins_cost(INSN_COST);
 8178   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8179   ins_encode %{
 8180     __ movw($dst$$Register, $src$$Register);
 8181   %}
 8182 
 8183   ins_pipe(ialu_reg);
 8184 %}
 8185 
 8186 
 8187 // Convert oop pointer into compressed form
 8188 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8189   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8190   match(Set dst (EncodeP src));
 8191   effect(KILL cr);
 8192   ins_cost(INSN_COST * 3);
 8193   format %{ "encode_heap_oop $dst, $src" %}
 8194   ins_encode %{
 8195     Register s = $src$$Register;
 8196     Register d = $dst$$Register;
 8197     __ encode_heap_oop(d, s);
 8198   %}
 8199   ins_pipe(ialu_reg);
 8200 %}
 8201 
 8202 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8203   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8204   match(Set dst (EncodeP src));
 8205   ins_cost(INSN_COST * 3);
 8206   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8207   ins_encode %{
 8208     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8209   %}
 8210   ins_pipe(ialu_reg);
 8211 %}
 8212 
 8213 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8214   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8215             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8216   match(Set dst (DecodeN src));
 8217   ins_cost(INSN_COST * 3);
 8218   format %{ "decode_heap_oop $dst, $src" %}
 8219   ins_encode %{
 8220     Register s = $src$$Register;
 8221     Register d = $dst$$Register;
 8222     __ decode_heap_oop(d, s);
 8223   %}
 8224   ins_pipe(ialu_reg);
 8225 %}
 8226 
 8227 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8228   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8229             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8230   match(Set dst (DecodeN src));
 8231   ins_cost(INSN_COST * 3);
 8232   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8233   ins_encode %{
 8234     Register s = $src$$Register;
 8235     Register d = $dst$$Register;
 8236     __ decode_heap_oop_not_null(d, s);
 8237   %}
 8238   ins_pipe(ialu_reg);
 8239 %}
 8240 
 8241 // n.b. AArch64 implementations of encode_klass_not_null and
 8242 // decode_klass_not_null do not modify the flags register so, unlike
 8243 // Intel, we don't kill CR as a side effect here
 8244 
 8245 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8246   match(Set dst (EncodePKlass src));
 8247 
 8248   ins_cost(INSN_COST * 3);
 8249   format %{ "encode_klass_not_null $dst,$src" %}
 8250 
 8251   ins_encode %{
 8252     Register src_reg = as_Register($src$$reg);
 8253     Register dst_reg = as_Register($dst$$reg);
 8254     __ encode_klass_not_null(dst_reg, src_reg);
 8255   %}
 8256 
 8257    ins_pipe(ialu_reg);
 8258 %}
 8259 
 8260 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8261   match(Set dst (DecodeNKlass src));
 8262 
 8263   ins_cost(INSN_COST * 3);
 8264   format %{ "decode_klass_not_null $dst,$src" %}
 8265 
 8266   ins_encode %{
 8267     Register src_reg = as_Register($src$$reg);
 8268     Register dst_reg = as_Register($dst$$reg);
 8269     if (dst_reg != src_reg) {
 8270       __ decode_klass_not_null(dst_reg, src_reg);
 8271     } else {
 8272       __ decode_klass_not_null(dst_reg);
 8273     }
 8274   %}
 8275 
 8276    ins_pipe(ialu_reg);
 8277 %}
 8278 
 8279 instruct checkCastPP(iRegPNoSp dst)
 8280 %{
 8281   match(Set dst (CheckCastPP dst));
 8282 
 8283   size(0);
 8284   format %{ "# checkcastPP of $dst" %}
 8285   ins_encode(/* empty encoding */);
 8286   ins_pipe(pipe_class_empty);
 8287 %}
 8288 
 8289 instruct castPP(iRegPNoSp dst)
 8290 %{
 8291   match(Set dst (CastPP dst));
 8292 
 8293   size(0);
 8294   format %{ "# castPP of $dst" %}
 8295   ins_encode(/* empty encoding */);
 8296   ins_pipe(pipe_class_empty);
 8297 %}
 8298 
 8299 instruct castII(iRegI dst)
 8300 %{
 8301   predicate(VerifyConstraintCasts == 0);
 8302   match(Set dst (CastII dst));
 8303 
 8304   size(0);
 8305   format %{ "# castII of $dst" %}
 8306   ins_encode(/* empty encoding */);
 8307   ins_cost(0);
 8308   ins_pipe(pipe_class_empty);
 8309 %}
 8310 
 8311 instruct castII_checked(iRegI dst, rFlagsReg cr)
 8312 %{
 8313   predicate(VerifyConstraintCasts > 0);
 8314   match(Set dst (CastII dst));
 8315   effect(KILL cr);
 8316 
 8317   format %{ "# castII_checked of $dst" %}
 8318   ins_encode %{
 8319     __ verify_int_in_range(_idx, bottom_type()->is_int(), $dst$$Register, rscratch1);
 8320   %}
 8321   ins_pipe(pipe_slow);
 8322 %}
 8323 
 8324 instruct castLL(iRegL dst)
 8325 %{
 8326   predicate(VerifyConstraintCasts == 0);
 8327   match(Set dst (CastLL dst));
 8328 
 8329   size(0);
 8330   format %{ "# castLL of $dst" %}
 8331   ins_encode(/* empty encoding */);
 8332   ins_cost(0);
 8333   ins_pipe(pipe_class_empty);
 8334 %}
 8335 
 8336 instruct castLL_checked(iRegL dst, rFlagsReg cr)
 8337 %{
 8338   predicate(VerifyConstraintCasts > 0);
 8339   match(Set dst (CastLL dst));
 8340   effect(KILL cr);
 8341 
 8342   format %{ "# castLL_checked of $dst" %}
 8343   ins_encode %{
 8344     __ verify_long_in_range(_idx, bottom_type()->is_long(), $dst$$Register, rscratch1);
 8345   %}
 8346   ins_pipe(pipe_slow);
 8347 %}
 8348 
 8349 instruct castHH(vRegF dst)
 8350 %{
 8351   match(Set dst (CastHH dst));
 8352   size(0);
 8353   format %{ "# castHH of $dst" %}
 8354   ins_encode(/* empty encoding */);
 8355   ins_cost(0);
 8356   ins_pipe(pipe_class_empty);
 8357 %}
 8358 
 8359 instruct castFF(vRegF dst)
 8360 %{
 8361   match(Set dst (CastFF dst));
 8362 
 8363   size(0);
 8364   format %{ "# castFF of $dst" %}
 8365   ins_encode(/* empty encoding */);
 8366   ins_cost(0);
 8367   ins_pipe(pipe_class_empty);
 8368 %}
 8369 
 8370 instruct castDD(vRegD dst)
 8371 %{
 8372   match(Set dst (CastDD dst));
 8373 
 8374   size(0);
 8375   format %{ "# castDD of $dst" %}
 8376   ins_encode(/* empty encoding */);
 8377   ins_cost(0);
 8378   ins_pipe(pipe_class_empty);
 8379 %}
 8380 
 8381 instruct castVV(vReg dst)
 8382 %{
 8383   match(Set dst (CastVV dst));
 8384 
 8385   size(0);
 8386   format %{ "# castVV of $dst" %}
 8387   ins_encode(/* empty encoding */);
 8388   ins_cost(0);
 8389   ins_pipe(pipe_class_empty);
 8390 %}
 8391 
 8392 instruct castVVMask(pRegGov dst)
 8393 %{
 8394   match(Set dst (CastVV dst));
 8395 
 8396   size(0);
 8397   format %{ "# castVV of $dst" %}
 8398   ins_encode(/* empty encoding */);
 8399   ins_cost(0);
 8400   ins_pipe(pipe_class_empty);
 8401 %}
 8402 
 8403 // ============================================================================
 8404 // Atomic operation instructions
 8405 //
 8406 
 8407 // standard CompareAndSwapX when we are using barriers
 8408 // these have higher priority than the rules selected by a predicate
 8409 
 8410 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8411 // can't match them
 8412 
 8413 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8414 
 8415   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8416   ins_cost(2 * VOLATILE_REF_COST);
 8417 
 8418   effect(KILL cr);
 8419 
 8420   format %{
 8421     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8422     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8423   %}
 8424 
 8425   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8426             aarch64_enc_cset_eq(res));
 8427 
 8428   ins_pipe(pipe_slow);
 8429 %}
 8430 
 8431 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8432 
 8433   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8434   ins_cost(2 * VOLATILE_REF_COST);
 8435 
 8436   effect(KILL cr);
 8437 
 8438   format %{
 8439     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8440     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8441   %}
 8442 
 8443   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8444             aarch64_enc_cset_eq(res));
 8445 
 8446   ins_pipe(pipe_slow);
 8447 %}
 8448 
 8449 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8450 
 8451   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8452   ins_cost(2 * VOLATILE_REF_COST);
 8453 
 8454   effect(KILL cr);
 8455 
 8456  format %{
 8457     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8458     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8459  %}
 8460 
 8461  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8462             aarch64_enc_cset_eq(res));
 8463 
 8464   ins_pipe(pipe_slow);
 8465 %}
 8466 
 8467 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8468 
 8469   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8470   ins_cost(2 * VOLATILE_REF_COST);
 8471 
 8472   effect(KILL cr);
 8473 
 8474  format %{
 8475     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8476     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8477  %}
 8478 
 8479  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8480             aarch64_enc_cset_eq(res));
 8481 
 8482   ins_pipe(pipe_slow);
 8483 %}
 8484 
 8485 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8486 
 8487   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8488   predicate(n->as_LoadStore()->barrier_data() == 0);
 8489   ins_cost(2 * VOLATILE_REF_COST);
 8490 
 8491   effect(KILL cr);
 8492 
 8493  format %{
 8494     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8495     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8496  %}
 8497 
 8498  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8499             aarch64_enc_cset_eq(res));
 8500 
 8501   ins_pipe(pipe_slow);
 8502 %}
 8503 
 8504 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8505 
 8506   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8507   predicate(n->as_LoadStore()->barrier_data() == 0);
 8508   ins_cost(2 * VOLATILE_REF_COST);
 8509 
 8510   effect(KILL cr);
 8511 
 8512  format %{
 8513     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8514     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8515  %}
 8516 
 8517  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8518             aarch64_enc_cset_eq(res));
 8519 
 8520   ins_pipe(pipe_slow);
 8521 %}
 8522 
 8523 // alternative CompareAndSwapX when we are eliding barriers
 8524 
 8525 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8526 
 8527   predicate(needs_acquiring_load_exclusive(n));
 8528   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8529   ins_cost(VOLATILE_REF_COST);
 8530 
 8531   effect(KILL cr);
 8532 
 8533   format %{
 8534     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8535     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8536   %}
 8537 
 8538   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8539             aarch64_enc_cset_eq(res));
 8540 
 8541   ins_pipe(pipe_slow);
 8542 %}
 8543 
 8544 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8545 
 8546   predicate(needs_acquiring_load_exclusive(n));
 8547   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8548   ins_cost(VOLATILE_REF_COST);
 8549 
 8550   effect(KILL cr);
 8551 
 8552   format %{
 8553     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8554     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8555   %}
 8556 
 8557   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8558             aarch64_enc_cset_eq(res));
 8559 
 8560   ins_pipe(pipe_slow);
 8561 %}
 8562 
 8563 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8564 
 8565   predicate(needs_acquiring_load_exclusive(n));
 8566   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8567   ins_cost(VOLATILE_REF_COST);
 8568 
 8569   effect(KILL cr);
 8570 
 8571  format %{
 8572     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8573     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8574  %}
 8575 
 8576  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8577             aarch64_enc_cset_eq(res));
 8578 
 8579   ins_pipe(pipe_slow);
 8580 %}
 8581 
 8582 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8583 
 8584   predicate(needs_acquiring_load_exclusive(n));
 8585   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8586   ins_cost(VOLATILE_REF_COST);
 8587 
 8588   effect(KILL cr);
 8589 
 8590  format %{
 8591     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8592     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8593  %}
 8594 
 8595  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8596             aarch64_enc_cset_eq(res));
 8597 
 8598   ins_pipe(pipe_slow);
 8599 %}
 8600 
 8601 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8602 
 8603   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8604   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8605   ins_cost(VOLATILE_REF_COST);
 8606 
 8607   effect(KILL cr);
 8608 
 8609  format %{
 8610     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8611     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8612  %}
 8613 
 8614  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8615             aarch64_enc_cset_eq(res));
 8616 
 8617   ins_pipe(pipe_slow);
 8618 %}
 8619 
 8620 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8621 
 8622   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8623   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8624   ins_cost(VOLATILE_REF_COST);
 8625 
 8626   effect(KILL cr);
 8627 
 8628  format %{
 8629     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8630     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8631  %}
 8632 
 8633  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8634             aarch64_enc_cset_eq(res));
 8635 
 8636   ins_pipe(pipe_slow);
 8637 %}
 8638 
 8639 
 8640 // ---------------------------------------------------------------------
 8641 
 8642 // BEGIN This section of the file is automatically generated. Do not edit --------------
 8643 
 8644 // Sundry CAS operations.  Note that release is always true,
 8645 // regardless of the memory ordering of the CAS.  This is because we
 8646 // need the volatile case to be sequentially consistent but there is
 8647 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 8648 // can't check the type of memory ordering here, so we always emit a
 8649 // STLXR.
 8650 
 8651 // This section is generated from cas.m4
 8652 
 8653 
 8654 // This pattern is generated automatically from cas.m4.
 8655 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8656 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8657   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8658   ins_cost(2 * VOLATILE_REF_COST);
 8659   effect(TEMP_DEF res, KILL cr);
 8660   format %{
 8661     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8662   %}
 8663   ins_encode %{
 8664     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8665                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8666                /*weak*/ false, $res$$Register);
 8667     __ sxtbw($res$$Register, $res$$Register);
 8668   %}
 8669   ins_pipe(pipe_slow);
 8670 %}
 8671 
 8672 // This pattern is generated automatically from cas.m4.
 8673 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8674 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8675   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8676   ins_cost(2 * VOLATILE_REF_COST);
 8677   effect(TEMP_DEF res, KILL cr);
 8678   format %{
 8679     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8680   %}
 8681   ins_encode %{
 8682     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8683                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8684                /*weak*/ false, $res$$Register);
 8685     __ sxthw($res$$Register, $res$$Register);
 8686   %}
 8687   ins_pipe(pipe_slow);
 8688 %}
 8689 
 8690 // This pattern is generated automatically from cas.m4.
 8691 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8692 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8693   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8694   ins_cost(2 * VOLATILE_REF_COST);
 8695   effect(TEMP_DEF res, KILL cr);
 8696   format %{
 8697     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8698   %}
 8699   ins_encode %{
 8700     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8701                Assembler::word, /*acquire*/ false, /*release*/ true,
 8702                /*weak*/ false, $res$$Register);
 8703   %}
 8704   ins_pipe(pipe_slow);
 8705 %}
 8706 
 8707 // This pattern is generated automatically from cas.m4.
 8708 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8709 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8710   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8711   ins_cost(2 * VOLATILE_REF_COST);
 8712   effect(TEMP_DEF res, KILL cr);
 8713   format %{
 8714     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8715   %}
 8716   ins_encode %{
 8717     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8718                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8719                /*weak*/ false, $res$$Register);
 8720   %}
 8721   ins_pipe(pipe_slow);
 8722 %}
 8723 
 8724 // This pattern is generated automatically from cas.m4.
 8725 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8726 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8727   predicate(n->as_LoadStore()->barrier_data() == 0);
 8728   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8729   ins_cost(2 * VOLATILE_REF_COST);
 8730   effect(TEMP_DEF res, KILL cr);
 8731   format %{
 8732     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8733   %}
 8734   ins_encode %{
 8735     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8736                Assembler::word, /*acquire*/ false, /*release*/ true,
 8737                /*weak*/ false, $res$$Register);
 8738   %}
 8739   ins_pipe(pipe_slow);
 8740 %}
 8741 
 8742 // This pattern is generated automatically from cas.m4.
 8743 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8744 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8745   predicate(n->as_LoadStore()->barrier_data() == 0);
 8746   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8747   ins_cost(2 * VOLATILE_REF_COST);
 8748   effect(TEMP_DEF res, KILL cr);
 8749   format %{
 8750     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8751   %}
 8752   ins_encode %{
 8753     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8754                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8755                /*weak*/ false, $res$$Register);
 8756   %}
 8757   ins_pipe(pipe_slow);
 8758 %}
 8759 
 8760 // This pattern is generated automatically from cas.m4.
 8761 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8762 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8763   predicate(needs_acquiring_load_exclusive(n));
 8764   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8765   ins_cost(VOLATILE_REF_COST);
 8766   effect(TEMP_DEF res, KILL cr);
 8767   format %{
 8768     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8769   %}
 8770   ins_encode %{
 8771     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8772                Assembler::byte, /*acquire*/ true, /*release*/ true,
 8773                /*weak*/ false, $res$$Register);
 8774     __ sxtbw($res$$Register, $res$$Register);
 8775   %}
 8776   ins_pipe(pipe_slow);
 8777 %}
 8778 
 8779 // This pattern is generated automatically from cas.m4.
 8780 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8781 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8782   predicate(needs_acquiring_load_exclusive(n));
 8783   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8784   ins_cost(VOLATILE_REF_COST);
 8785   effect(TEMP_DEF res, KILL cr);
 8786   format %{
 8787     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8788   %}
 8789   ins_encode %{
 8790     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8791                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 8792                /*weak*/ false, $res$$Register);
 8793     __ sxthw($res$$Register, $res$$Register);
 8794   %}
 8795   ins_pipe(pipe_slow);
 8796 %}
 8797 
 8798 // This pattern is generated automatically from cas.m4.
 8799 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8800 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8801   predicate(needs_acquiring_load_exclusive(n));
 8802   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8803   ins_cost(VOLATILE_REF_COST);
 8804   effect(TEMP_DEF res, KILL cr);
 8805   format %{
 8806     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8807   %}
 8808   ins_encode %{
 8809     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8810                Assembler::word, /*acquire*/ true, /*release*/ true,
 8811                /*weak*/ false, $res$$Register);
 8812   %}
 8813   ins_pipe(pipe_slow);
 8814 %}
 8815 
 8816 // This pattern is generated automatically from cas.m4.
 8817 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8818 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8819   predicate(needs_acquiring_load_exclusive(n));
 8820   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8821   ins_cost(VOLATILE_REF_COST);
 8822   effect(TEMP_DEF res, KILL cr);
 8823   format %{
 8824     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8825   %}
 8826   ins_encode %{
 8827     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8828                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8829                /*weak*/ false, $res$$Register);
 8830   %}
 8831   ins_pipe(pipe_slow);
 8832 %}
 8833 
 8834 // This pattern is generated automatically from cas.m4.
 8835 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8836 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8837   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8838   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8839   ins_cost(VOLATILE_REF_COST);
 8840   effect(TEMP_DEF res, KILL cr);
 8841   format %{
 8842     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8843   %}
 8844   ins_encode %{
 8845     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8846                Assembler::word, /*acquire*/ true, /*release*/ true,
 8847                /*weak*/ false, $res$$Register);
 8848   %}
 8849   ins_pipe(pipe_slow);
 8850 %}
 8851 
 8852 // This pattern is generated automatically from cas.m4.
 8853 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8854 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8855   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8856   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8857   ins_cost(VOLATILE_REF_COST);
 8858   effect(TEMP_DEF res, KILL cr);
 8859   format %{
 8860     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8861   %}
 8862   ins_encode %{
 8863     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8864                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8865                /*weak*/ false, $res$$Register);
 8866   %}
 8867   ins_pipe(pipe_slow);
 8868 %}
 8869 
 8870 // This pattern is generated automatically from cas.m4.
 8871 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8872 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8873   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8874   ins_cost(2 * VOLATILE_REF_COST);
 8875   effect(KILL cr);
 8876   format %{
 8877     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8878     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8879   %}
 8880   ins_encode %{
 8881     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8882                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8883                /*weak*/ true, noreg);
 8884     __ csetw($res$$Register, Assembler::EQ);
 8885   %}
 8886   ins_pipe(pipe_slow);
 8887 %}
 8888 
 8889 // This pattern is generated automatically from cas.m4.
 8890 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8891 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8892   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 8893   ins_cost(2 * VOLATILE_REF_COST);
 8894   effect(KILL cr);
 8895   format %{
 8896     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8897     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8898   %}
 8899   ins_encode %{
 8900     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8901                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8902                /*weak*/ true, noreg);
 8903     __ csetw($res$$Register, Assembler::EQ);
 8904   %}
 8905   ins_pipe(pipe_slow);
 8906 %}
 8907 
 8908 // This pattern is generated automatically from cas.m4.
 8909 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8910 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8911   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 8912   ins_cost(2 * VOLATILE_REF_COST);
 8913   effect(KILL cr);
 8914   format %{
 8915     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8916     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8917   %}
 8918   ins_encode %{
 8919     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8920                Assembler::word, /*acquire*/ false, /*release*/ true,
 8921                /*weak*/ true, noreg);
 8922     __ csetw($res$$Register, Assembler::EQ);
 8923   %}
 8924   ins_pipe(pipe_slow);
 8925 %}
 8926 
 8927 // This pattern is generated automatically from cas.m4.
 8928 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8929 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8930   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 8931   ins_cost(2 * VOLATILE_REF_COST);
 8932   effect(KILL cr);
 8933   format %{
 8934     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8935     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8936   %}
 8937   ins_encode %{
 8938     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8939                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8940                /*weak*/ true, noreg);
 8941     __ csetw($res$$Register, Assembler::EQ);
 8942   %}
 8943   ins_pipe(pipe_slow);
 8944 %}
 8945 
 8946 // This pattern is generated automatically from cas.m4.
 8947 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8948 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8949   predicate(n->as_LoadStore()->barrier_data() == 0);
 8950   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 8951   ins_cost(2 * VOLATILE_REF_COST);
 8952   effect(KILL cr);
 8953   format %{
 8954     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8955     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8956   %}
 8957   ins_encode %{
 8958     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8959                Assembler::word, /*acquire*/ false, /*release*/ true,
 8960                /*weak*/ true, noreg);
 8961     __ csetw($res$$Register, Assembler::EQ);
 8962   %}
 8963   ins_pipe(pipe_slow);
 8964 %}
 8965 
 8966 // This pattern is generated automatically from cas.m4.
 8967 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8968 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8969   predicate(n->as_LoadStore()->barrier_data() == 0);
 8970   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 8971   ins_cost(2 * VOLATILE_REF_COST);
 8972   effect(KILL cr);
 8973   format %{
 8974     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8975     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8976   %}
 8977   ins_encode %{
 8978     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8979                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8980                /*weak*/ true, noreg);
 8981     __ csetw($res$$Register, Assembler::EQ);
 8982   %}
 8983   ins_pipe(pipe_slow);
 8984 %}
 8985 
 8986 // This pattern is generated automatically from cas.m4.
 8987 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8988 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8989   predicate(needs_acquiring_load_exclusive(n));
 8990   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8991   ins_cost(VOLATILE_REF_COST);
 8992   effect(KILL cr);
 8993   format %{
 8994     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8995     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8996   %}
 8997   ins_encode %{
 8998     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8999                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9000                /*weak*/ true, noreg);
 9001     __ csetw($res$$Register, Assembler::EQ);
 9002   %}
 9003   ins_pipe(pipe_slow);
 9004 %}
 9005 
 9006 // This pattern is generated automatically from cas.m4.
 9007 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9008 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9009   predicate(needs_acquiring_load_exclusive(n));
 9010   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9011   ins_cost(VOLATILE_REF_COST);
 9012   effect(KILL cr);
 9013   format %{
 9014     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9015     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9016   %}
 9017   ins_encode %{
 9018     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9019                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9020                /*weak*/ true, noreg);
 9021     __ csetw($res$$Register, Assembler::EQ);
 9022   %}
 9023   ins_pipe(pipe_slow);
 9024 %}
 9025 
 9026 // This pattern is generated automatically from cas.m4.
 9027 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9028 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9029   predicate(needs_acquiring_load_exclusive(n));
 9030   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9031   ins_cost(VOLATILE_REF_COST);
 9032   effect(KILL cr);
 9033   format %{
 9034     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9035     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9036   %}
 9037   ins_encode %{
 9038     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9039                Assembler::word, /*acquire*/ true, /*release*/ true,
 9040                /*weak*/ true, noreg);
 9041     __ csetw($res$$Register, Assembler::EQ);
 9042   %}
 9043   ins_pipe(pipe_slow);
 9044 %}
 9045 
 9046 // This pattern is generated automatically from cas.m4.
 9047 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9048 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9049   predicate(needs_acquiring_load_exclusive(n));
 9050   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9051   ins_cost(VOLATILE_REF_COST);
 9052   effect(KILL cr);
 9053   format %{
 9054     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9055     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9056   %}
 9057   ins_encode %{
 9058     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9059                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9060                /*weak*/ true, noreg);
 9061     __ csetw($res$$Register, Assembler::EQ);
 9062   %}
 9063   ins_pipe(pipe_slow);
 9064 %}
 9065 
 9066 // This pattern is generated automatically from cas.m4.
 9067 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9068 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9069   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 9070   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9071   ins_cost(VOLATILE_REF_COST);
 9072   effect(KILL cr);
 9073   format %{
 9074     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9075     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9076   %}
 9077   ins_encode %{
 9078     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9079                Assembler::word, /*acquire*/ true, /*release*/ true,
 9080                /*weak*/ true, noreg);
 9081     __ csetw($res$$Register, Assembler::EQ);
 9082   %}
 9083   ins_pipe(pipe_slow);
 9084 %}
 9085 
 9086 // This pattern is generated automatically from cas.m4.
 9087 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9088 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9089   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9090   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9091   ins_cost(VOLATILE_REF_COST);
 9092   effect(KILL cr);
 9093   format %{
 9094     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9095     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9096   %}
 9097   ins_encode %{
 9098     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9099                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9100                /*weak*/ true, noreg);
 9101     __ csetw($res$$Register, Assembler::EQ);
 9102   %}
 9103   ins_pipe(pipe_slow);
 9104 %}
 9105 
 9106 // END This section of the file is automatically generated. Do not edit --------------
 9107 // ---------------------------------------------------------------------
 9108 
 9109 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9110   match(Set prev (GetAndSetI mem newv));
 9111   ins_cost(2 * VOLATILE_REF_COST);
 9112   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9113   ins_encode %{
 9114     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9115   %}
 9116   ins_pipe(pipe_serial);
 9117 %}
 9118 
 9119 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9120   match(Set prev (GetAndSetL mem newv));
 9121   ins_cost(2 * VOLATILE_REF_COST);
 9122   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9123   ins_encode %{
 9124     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9125   %}
 9126   ins_pipe(pipe_serial);
 9127 %}
 9128 
 9129 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9130   predicate(n->as_LoadStore()->barrier_data() == 0);
 9131   match(Set prev (GetAndSetN mem newv));
 9132   ins_cost(2 * VOLATILE_REF_COST);
 9133   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9134   ins_encode %{
 9135     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9136   %}
 9137   ins_pipe(pipe_serial);
 9138 %}
 9139 
 9140 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9141   predicate(n->as_LoadStore()->barrier_data() == 0);
 9142   match(Set prev (GetAndSetP mem newv));
 9143   ins_cost(2 * VOLATILE_REF_COST);
 9144   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9145   ins_encode %{
 9146     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9147   %}
 9148   ins_pipe(pipe_serial);
 9149 %}
 9150 
 9151 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9152   predicate(needs_acquiring_load_exclusive(n));
 9153   match(Set prev (GetAndSetI mem newv));
 9154   ins_cost(VOLATILE_REF_COST);
 9155   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9156   ins_encode %{
 9157     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9158   %}
 9159   ins_pipe(pipe_serial);
 9160 %}
 9161 
 9162 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9163   predicate(needs_acquiring_load_exclusive(n));
 9164   match(Set prev (GetAndSetL mem newv));
 9165   ins_cost(VOLATILE_REF_COST);
 9166   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9167   ins_encode %{
 9168     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9169   %}
 9170   ins_pipe(pipe_serial);
 9171 %}
 9172 
 9173 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9174   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 9175   match(Set prev (GetAndSetN mem newv));
 9176   ins_cost(VOLATILE_REF_COST);
 9177   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9178   ins_encode %{
 9179     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9180   %}
 9181   ins_pipe(pipe_serial);
 9182 %}
 9183 
 9184 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9185   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9186   match(Set prev (GetAndSetP mem newv));
 9187   ins_cost(VOLATILE_REF_COST);
 9188   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9189   ins_encode %{
 9190     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9191   %}
 9192   ins_pipe(pipe_serial);
 9193 %}
 9194 
 9195 
 9196 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9197   match(Set newval (GetAndAddL mem incr));
 9198   ins_cost(2 * VOLATILE_REF_COST + 1);
 9199   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9200   ins_encode %{
 9201     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9202   %}
 9203   ins_pipe(pipe_serial);
 9204 %}
 9205 
 9206 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9207   predicate(n->as_LoadStore()->result_not_used());
 9208   match(Set dummy (GetAndAddL mem incr));
 9209   ins_cost(2 * VOLATILE_REF_COST);
 9210   format %{ "get_and_addL [$mem], $incr" %}
 9211   ins_encode %{
 9212     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9213   %}
 9214   ins_pipe(pipe_serial);
 9215 %}
 9216 
 9217 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9218   match(Set newval (GetAndAddL mem incr));
 9219   ins_cost(2 * VOLATILE_REF_COST + 1);
 9220   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9221   ins_encode %{
 9222     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9223   %}
 9224   ins_pipe(pipe_serial);
 9225 %}
 9226 
 9227 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9228   predicate(n->as_LoadStore()->result_not_used());
 9229   match(Set dummy (GetAndAddL mem incr));
 9230   ins_cost(2 * VOLATILE_REF_COST);
 9231   format %{ "get_and_addL [$mem], $incr" %}
 9232   ins_encode %{
 9233     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9234   %}
 9235   ins_pipe(pipe_serial);
 9236 %}
 9237 
 9238 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9239   match(Set newval (GetAndAddI mem incr));
 9240   ins_cost(2 * VOLATILE_REF_COST + 1);
 9241   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9242   ins_encode %{
 9243     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9244   %}
 9245   ins_pipe(pipe_serial);
 9246 %}
 9247 
 9248 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9249   predicate(n->as_LoadStore()->result_not_used());
 9250   match(Set dummy (GetAndAddI mem incr));
 9251   ins_cost(2 * VOLATILE_REF_COST);
 9252   format %{ "get_and_addI [$mem], $incr" %}
 9253   ins_encode %{
 9254     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9255   %}
 9256   ins_pipe(pipe_serial);
 9257 %}
 9258 
 9259 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9260   match(Set newval (GetAndAddI mem incr));
 9261   ins_cost(2 * VOLATILE_REF_COST + 1);
 9262   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9263   ins_encode %{
 9264     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9265   %}
 9266   ins_pipe(pipe_serial);
 9267 %}
 9268 
 9269 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9270   predicate(n->as_LoadStore()->result_not_used());
 9271   match(Set dummy (GetAndAddI mem incr));
 9272   ins_cost(2 * VOLATILE_REF_COST);
 9273   format %{ "get_and_addI [$mem], $incr" %}
 9274   ins_encode %{
 9275     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9276   %}
 9277   ins_pipe(pipe_serial);
 9278 %}
 9279 
 9280 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9281   predicate(needs_acquiring_load_exclusive(n));
 9282   match(Set newval (GetAndAddL mem incr));
 9283   ins_cost(VOLATILE_REF_COST + 1);
 9284   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9285   ins_encode %{
 9286     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9287   %}
 9288   ins_pipe(pipe_serial);
 9289 %}
 9290 
 9291 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9292   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9293   match(Set dummy (GetAndAddL mem incr));
 9294   ins_cost(VOLATILE_REF_COST);
 9295   format %{ "get_and_addL_acq [$mem], $incr" %}
 9296   ins_encode %{
 9297     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9298   %}
 9299   ins_pipe(pipe_serial);
 9300 %}
 9301 
 9302 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9303   predicate(needs_acquiring_load_exclusive(n));
 9304   match(Set newval (GetAndAddL mem incr));
 9305   ins_cost(VOLATILE_REF_COST + 1);
 9306   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9307   ins_encode %{
 9308     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9309   %}
 9310   ins_pipe(pipe_serial);
 9311 %}
 9312 
 9313 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9314   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9315   match(Set dummy (GetAndAddL mem incr));
 9316   ins_cost(VOLATILE_REF_COST);
 9317   format %{ "get_and_addL_acq [$mem], $incr" %}
 9318   ins_encode %{
 9319     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9320   %}
 9321   ins_pipe(pipe_serial);
 9322 %}
 9323 
 9324 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9325   predicate(needs_acquiring_load_exclusive(n));
 9326   match(Set newval (GetAndAddI mem incr));
 9327   ins_cost(VOLATILE_REF_COST + 1);
 9328   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9329   ins_encode %{
 9330     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9331   %}
 9332   ins_pipe(pipe_serial);
 9333 %}
 9334 
 9335 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9336   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9337   match(Set dummy (GetAndAddI mem incr));
 9338   ins_cost(VOLATILE_REF_COST);
 9339   format %{ "get_and_addI_acq [$mem], $incr" %}
 9340   ins_encode %{
 9341     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9342   %}
 9343   ins_pipe(pipe_serial);
 9344 %}
 9345 
 9346 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9347   predicate(needs_acquiring_load_exclusive(n));
 9348   match(Set newval (GetAndAddI mem incr));
 9349   ins_cost(VOLATILE_REF_COST + 1);
 9350   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9351   ins_encode %{
 9352     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9353   %}
 9354   ins_pipe(pipe_serial);
 9355 %}
 9356 
 9357 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9358   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9359   match(Set dummy (GetAndAddI mem incr));
 9360   ins_cost(VOLATILE_REF_COST);
 9361   format %{ "get_and_addI_acq [$mem], $incr" %}
 9362   ins_encode %{
 9363     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9364   %}
 9365   ins_pipe(pipe_serial);
 9366 %}
 9367 
 9368 // Manifest a CmpU result in an integer register.
 9369 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9370 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 9371 %{
 9372   match(Set dst (CmpU3 src1 src2));
 9373   effect(KILL flags);
 9374 
 9375   ins_cost(INSN_COST * 3);
 9376   format %{
 9377       "cmpw $src1, $src2\n\t"
 9378       "csetw $dst, ne\n\t"
 9379       "cnegw $dst, lo\t# CmpU3(reg)"
 9380   %}
 9381   ins_encode %{
 9382     __ cmpw($src1$$Register, $src2$$Register);
 9383     __ csetw($dst$$Register, Assembler::NE);
 9384     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9385   %}
 9386 
 9387   ins_pipe(pipe_class_default);
 9388 %}
 9389 
 9390 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 9391 %{
 9392   match(Set dst (CmpU3 src1 src2));
 9393   effect(KILL flags);
 9394 
 9395   ins_cost(INSN_COST * 3);
 9396   format %{
 9397       "subsw zr, $src1, $src2\n\t"
 9398       "csetw $dst, ne\n\t"
 9399       "cnegw $dst, lo\t# CmpU3(imm)"
 9400   %}
 9401   ins_encode %{
 9402     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 9403     __ csetw($dst$$Register, Assembler::NE);
 9404     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9405   %}
 9406 
 9407   ins_pipe(pipe_class_default);
 9408 %}
 9409 
 9410 // Manifest a CmpUL result in an integer register.
 9411 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9412 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9413 %{
 9414   match(Set dst (CmpUL3 src1 src2));
 9415   effect(KILL flags);
 9416 
 9417   ins_cost(INSN_COST * 3);
 9418   format %{
 9419       "cmp $src1, $src2\n\t"
 9420       "csetw $dst, ne\n\t"
 9421       "cnegw $dst, lo\t# CmpUL3(reg)"
 9422   %}
 9423   ins_encode %{
 9424     __ cmp($src1$$Register, $src2$$Register);
 9425     __ csetw($dst$$Register, Assembler::NE);
 9426     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9427   %}
 9428 
 9429   ins_pipe(pipe_class_default);
 9430 %}
 9431 
 9432 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9433 %{
 9434   match(Set dst (CmpUL3 src1 src2));
 9435   effect(KILL flags);
 9436 
 9437   ins_cost(INSN_COST * 3);
 9438   format %{
 9439       "subs zr, $src1, $src2\n\t"
 9440       "csetw $dst, ne\n\t"
 9441       "cnegw $dst, lo\t# CmpUL3(imm)"
 9442   %}
 9443   ins_encode %{
 9444     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9445     __ csetw($dst$$Register, Assembler::NE);
 9446     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9447   %}
 9448 
 9449   ins_pipe(pipe_class_default);
 9450 %}
 9451 
 9452 // Manifest a CmpL result in an integer register.
 9453 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9454 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9455 %{
 9456   match(Set dst (CmpL3 src1 src2));
 9457   effect(KILL flags);
 9458 
 9459   ins_cost(INSN_COST * 3);
 9460   format %{
 9461       "cmp $src1, $src2\n\t"
 9462       "csetw $dst, ne\n\t"
 9463       "cnegw $dst, lt\t# CmpL3(reg)"
 9464   %}
 9465   ins_encode %{
 9466     __ cmp($src1$$Register, $src2$$Register);
 9467     __ csetw($dst$$Register, Assembler::NE);
 9468     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9469   %}
 9470 
 9471   ins_pipe(pipe_class_default);
 9472 %}
 9473 
 9474 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9475 %{
 9476   match(Set dst (CmpL3 src1 src2));
 9477   effect(KILL flags);
 9478 
 9479   ins_cost(INSN_COST * 3);
 9480   format %{
 9481       "subs zr, $src1, $src2\n\t"
 9482       "csetw $dst, ne\n\t"
 9483       "cnegw $dst, lt\t# CmpL3(imm)"
 9484   %}
 9485   ins_encode %{
 9486     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9487     __ csetw($dst$$Register, Assembler::NE);
 9488     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9489   %}
 9490 
 9491   ins_pipe(pipe_class_default);
 9492 %}
 9493 
 9494 // ============================================================================
 9495 // Conditional Move Instructions
 9496 
 9497 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9498 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9499 // define an op class which merged both inputs and use it to type the
 9500 // argument to a single rule. unfortunatelyt his fails because the
 9501 // opclass does not live up to the COND_INTER interface of its
 9502 // component operands. When the generic code tries to negate the
 9503 // operand it ends up running the generci Machoper::negate method
 9504 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9505 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9506 
 9507 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9508   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9509 
 9510   ins_cost(INSN_COST * 2);
 9511   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9512 
 9513   ins_encode %{
 9514     __ cselw(as_Register($dst$$reg),
 9515              as_Register($src2$$reg),
 9516              as_Register($src1$$reg),
 9517              (Assembler::Condition)$cmp$$cmpcode);
 9518   %}
 9519 
 9520   ins_pipe(icond_reg_reg);
 9521 %}
 9522 
 9523 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9524   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9525 
 9526   ins_cost(INSN_COST * 2);
 9527   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9528 
 9529   ins_encode %{
 9530     __ cselw(as_Register($dst$$reg),
 9531              as_Register($src2$$reg),
 9532              as_Register($src1$$reg),
 9533              (Assembler::Condition)$cmp$$cmpcode);
 9534   %}
 9535 
 9536   ins_pipe(icond_reg_reg);
 9537 %}
 9538 
 9539 // special cases where one arg is zero
 9540 
 9541 // n.b. this is selected in preference to the rule above because it
 9542 // avoids loading constant 0 into a source register
 9543 
 9544 // TODO
 9545 // we ought only to be able to cull one of these variants as the ideal
 9546 // transforms ought always to order the zero consistently (to left/right?)
 9547 
 9548 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9549   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9550 
 9551   ins_cost(INSN_COST * 2);
 9552   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9553 
 9554   ins_encode %{
 9555     __ cselw(as_Register($dst$$reg),
 9556              as_Register($src$$reg),
 9557              zr,
 9558              (Assembler::Condition)$cmp$$cmpcode);
 9559   %}
 9560 
 9561   ins_pipe(icond_reg);
 9562 %}
 9563 
 9564 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9565   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9566 
 9567   ins_cost(INSN_COST * 2);
 9568   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9569 
 9570   ins_encode %{
 9571     __ cselw(as_Register($dst$$reg),
 9572              as_Register($src$$reg),
 9573              zr,
 9574              (Assembler::Condition)$cmp$$cmpcode);
 9575   %}
 9576 
 9577   ins_pipe(icond_reg);
 9578 %}
 9579 
 9580 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9581   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9582 
 9583   ins_cost(INSN_COST * 2);
 9584   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9585 
 9586   ins_encode %{
 9587     __ cselw(as_Register($dst$$reg),
 9588              zr,
 9589              as_Register($src$$reg),
 9590              (Assembler::Condition)$cmp$$cmpcode);
 9591   %}
 9592 
 9593   ins_pipe(icond_reg);
 9594 %}
 9595 
 9596 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9597   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9598 
 9599   ins_cost(INSN_COST * 2);
 9600   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9601 
 9602   ins_encode %{
 9603     __ cselw(as_Register($dst$$reg),
 9604              zr,
 9605              as_Register($src$$reg),
 9606              (Assembler::Condition)$cmp$$cmpcode);
 9607   %}
 9608 
 9609   ins_pipe(icond_reg);
 9610 %}
 9611 
 9612 // special case for creating a boolean 0 or 1
 9613 
 9614 // n.b. this is selected in preference to the rule above because it
 9615 // avoids loading constants 0 and 1 into a source register
 9616 
 9617 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9618   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9619 
 9620   ins_cost(INSN_COST * 2);
 9621   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9622 
 9623   ins_encode %{
 9624     // equivalently
 9625     // cset(as_Register($dst$$reg),
 9626     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9627     __ csincw(as_Register($dst$$reg),
 9628              zr,
 9629              zr,
 9630              (Assembler::Condition)$cmp$$cmpcode);
 9631   %}
 9632 
 9633   ins_pipe(icond_none);
 9634 %}
 9635 
 9636 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9637   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9638 
 9639   ins_cost(INSN_COST * 2);
 9640   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9641 
 9642   ins_encode %{
 9643     // equivalently
 9644     // cset(as_Register($dst$$reg),
 9645     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9646     __ csincw(as_Register($dst$$reg),
 9647              zr,
 9648              zr,
 9649              (Assembler::Condition)$cmp$$cmpcode);
 9650   %}
 9651 
 9652   ins_pipe(icond_none);
 9653 %}
 9654 
 9655 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9656   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9657 
 9658   ins_cost(INSN_COST * 2);
 9659   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9660 
 9661   ins_encode %{
 9662     __ csel(as_Register($dst$$reg),
 9663             as_Register($src2$$reg),
 9664             as_Register($src1$$reg),
 9665             (Assembler::Condition)$cmp$$cmpcode);
 9666   %}
 9667 
 9668   ins_pipe(icond_reg_reg);
 9669 %}
 9670 
 9671 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9672   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9673 
 9674   ins_cost(INSN_COST * 2);
 9675   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9676 
 9677   ins_encode %{
 9678     __ csel(as_Register($dst$$reg),
 9679             as_Register($src2$$reg),
 9680             as_Register($src1$$reg),
 9681             (Assembler::Condition)$cmp$$cmpcode);
 9682   %}
 9683 
 9684   ins_pipe(icond_reg_reg);
 9685 %}
 9686 
 9687 // special cases where one arg is zero
 9688 
 9689 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9690   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9691 
 9692   ins_cost(INSN_COST * 2);
 9693   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 9694 
 9695   ins_encode %{
 9696     __ csel(as_Register($dst$$reg),
 9697             zr,
 9698             as_Register($src$$reg),
 9699             (Assembler::Condition)$cmp$$cmpcode);
 9700   %}
 9701 
 9702   ins_pipe(icond_reg);
 9703 %}
 9704 
 9705 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9706   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9707 
 9708   ins_cost(INSN_COST * 2);
 9709   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 9710 
 9711   ins_encode %{
 9712     __ csel(as_Register($dst$$reg),
 9713             zr,
 9714             as_Register($src$$reg),
 9715             (Assembler::Condition)$cmp$$cmpcode);
 9716   %}
 9717 
 9718   ins_pipe(icond_reg);
 9719 %}
 9720 
 9721 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9722   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9723 
 9724   ins_cost(INSN_COST * 2);
 9725   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 9726 
 9727   ins_encode %{
 9728     __ csel(as_Register($dst$$reg),
 9729             as_Register($src$$reg),
 9730             zr,
 9731             (Assembler::Condition)$cmp$$cmpcode);
 9732   %}
 9733 
 9734   ins_pipe(icond_reg);
 9735 %}
 9736 
 9737 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9738   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9739 
 9740   ins_cost(INSN_COST * 2);
 9741   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 9742 
 9743   ins_encode %{
 9744     __ csel(as_Register($dst$$reg),
 9745             as_Register($src$$reg),
 9746             zr,
 9747             (Assembler::Condition)$cmp$$cmpcode);
 9748   %}
 9749 
 9750   ins_pipe(icond_reg);
 9751 %}
 9752 
 9753 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9754   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9755 
 9756   ins_cost(INSN_COST * 2);
 9757   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
 9758 
 9759   ins_encode %{
 9760     __ csel(as_Register($dst$$reg),
 9761             as_Register($src2$$reg),
 9762             as_Register($src1$$reg),
 9763             (Assembler::Condition)$cmp$$cmpcode);
 9764   %}
 9765 
 9766   ins_pipe(icond_reg_reg);
 9767 %}
 9768 
 9769 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9770   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9771 
 9772   ins_cost(INSN_COST * 2);
 9773   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
 9774 
 9775   ins_encode %{
 9776     __ csel(as_Register($dst$$reg),
 9777             as_Register($src2$$reg),
 9778             as_Register($src1$$reg),
 9779             (Assembler::Condition)$cmp$$cmpcode);
 9780   %}
 9781 
 9782   ins_pipe(icond_reg_reg);
 9783 %}
 9784 
 9785 // special cases where one arg is zero
 9786 
 9787 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9788   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9789 
 9790   ins_cost(INSN_COST * 2);
 9791   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
 9792 
 9793   ins_encode %{
 9794     __ csel(as_Register($dst$$reg),
 9795             zr,
 9796             as_Register($src$$reg),
 9797             (Assembler::Condition)$cmp$$cmpcode);
 9798   %}
 9799 
 9800   ins_pipe(icond_reg);
 9801 %}
 9802 
 9803 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9804   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9805 
 9806   ins_cost(INSN_COST * 2);
 9807   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
 9808 
 9809   ins_encode %{
 9810     __ csel(as_Register($dst$$reg),
 9811             zr,
 9812             as_Register($src$$reg),
 9813             (Assembler::Condition)$cmp$$cmpcode);
 9814   %}
 9815 
 9816   ins_pipe(icond_reg);
 9817 %}
 9818 
 9819 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9820   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9821 
 9822   ins_cost(INSN_COST * 2);
 9823   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
 9824 
 9825   ins_encode %{
 9826     __ csel(as_Register($dst$$reg),
 9827             as_Register($src$$reg),
 9828             zr,
 9829             (Assembler::Condition)$cmp$$cmpcode);
 9830   %}
 9831 
 9832   ins_pipe(icond_reg);
 9833 %}
 9834 
 9835 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9836   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9837 
 9838   ins_cost(INSN_COST * 2);
 9839   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
 9840 
 9841   ins_encode %{
 9842     __ csel(as_Register($dst$$reg),
 9843             as_Register($src$$reg),
 9844             zr,
 9845             (Assembler::Condition)$cmp$$cmpcode);
 9846   %}
 9847 
 9848   ins_pipe(icond_reg);
 9849 %}
 9850 
 9851 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9852   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9853 
 9854   ins_cost(INSN_COST * 2);
 9855   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9856 
 9857   ins_encode %{
 9858     __ cselw(as_Register($dst$$reg),
 9859              as_Register($src2$$reg),
 9860              as_Register($src1$$reg),
 9861              (Assembler::Condition)$cmp$$cmpcode);
 9862   %}
 9863 
 9864   ins_pipe(icond_reg_reg);
 9865 %}
 9866 
 9867 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9868   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9869 
 9870   ins_cost(INSN_COST * 2);
 9871   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9872 
 9873   ins_encode %{
 9874     __ cselw(as_Register($dst$$reg),
 9875              as_Register($src2$$reg),
 9876              as_Register($src1$$reg),
 9877              (Assembler::Condition)$cmp$$cmpcode);
 9878   %}
 9879 
 9880   ins_pipe(icond_reg_reg);
 9881 %}
 9882 
 9883 // special cases where one arg is zero
 9884 
 9885 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9886   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9887 
 9888   ins_cost(INSN_COST * 2);
 9889   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
 9890 
 9891   ins_encode %{
 9892     __ cselw(as_Register($dst$$reg),
 9893              zr,
 9894              as_Register($src$$reg),
 9895              (Assembler::Condition)$cmp$$cmpcode);
 9896   %}
 9897 
 9898   ins_pipe(icond_reg);
 9899 %}
 9900 
 9901 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9902   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9903 
 9904   ins_cost(INSN_COST * 2);
 9905   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
 9906 
 9907   ins_encode %{
 9908     __ cselw(as_Register($dst$$reg),
 9909              zr,
 9910              as_Register($src$$reg),
 9911              (Assembler::Condition)$cmp$$cmpcode);
 9912   %}
 9913 
 9914   ins_pipe(icond_reg);
 9915 %}
 9916 
 9917 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9918   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9919 
 9920   ins_cost(INSN_COST * 2);
 9921   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
 9922 
 9923   ins_encode %{
 9924     __ cselw(as_Register($dst$$reg),
 9925              as_Register($src$$reg),
 9926              zr,
 9927              (Assembler::Condition)$cmp$$cmpcode);
 9928   %}
 9929 
 9930   ins_pipe(icond_reg);
 9931 %}
 9932 
 9933 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9934   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9935 
 9936   ins_cost(INSN_COST * 2);
 9937   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
 9938 
 9939   ins_encode %{
 9940     __ cselw(as_Register($dst$$reg),
 9941              as_Register($src$$reg),
 9942              zr,
 9943              (Assembler::Condition)$cmp$$cmpcode);
 9944   %}
 9945 
 9946   ins_pipe(icond_reg);
 9947 %}
 9948 
 9949 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
 9950 %{
 9951   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9952 
 9953   ins_cost(INSN_COST * 3);
 9954 
 9955   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9956   ins_encode %{
 9957     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9958     __ fcsels(as_FloatRegister($dst$$reg),
 9959               as_FloatRegister($src2$$reg),
 9960               as_FloatRegister($src1$$reg),
 9961               cond);
 9962   %}
 9963 
 9964   ins_pipe(fp_cond_reg_reg_s);
 9965 %}
 9966 
 9967 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
 9968 %{
 9969   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9970 
 9971   ins_cost(INSN_COST * 3);
 9972 
 9973   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 9974   ins_encode %{
 9975     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9976     __ fcsels(as_FloatRegister($dst$$reg),
 9977               as_FloatRegister($src2$$reg),
 9978               as_FloatRegister($src1$$reg),
 9979               cond);
 9980   %}
 9981 
 9982   ins_pipe(fp_cond_reg_reg_s);
 9983 %}
 9984 
 9985 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
 9986 %{
 9987   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 9988 
 9989   ins_cost(INSN_COST * 3);
 9990 
 9991   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9992   ins_encode %{
 9993     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9994     __ fcseld(as_FloatRegister($dst$$reg),
 9995               as_FloatRegister($src2$$reg),
 9996               as_FloatRegister($src1$$reg),
 9997               cond);
 9998   %}
 9999 
10000   ins_pipe(fp_cond_reg_reg_d);
10001 %}
10002 
10003 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10004 %{
10005   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10006 
10007   ins_cost(INSN_COST * 3);
10008 
10009   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10010   ins_encode %{
10011     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10012     __ fcseld(as_FloatRegister($dst$$reg),
10013               as_FloatRegister($src2$$reg),
10014               as_FloatRegister($src1$$reg),
10015               cond);
10016   %}
10017 
10018   ins_pipe(fp_cond_reg_reg_d);
10019 %}
10020 
10021 // ============================================================================
10022 // Arithmetic Instructions
10023 //
10024 
10025 // Integer Addition
10026 
10027 // TODO
10028 // these currently employ operations which do not set CR and hence are
10029 // not flagged as killing CR but we would like to isolate the cases
10030 // where we want to set flags from those where we don't. need to work
10031 // out how to do that.
10032 
10033 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10034   match(Set dst (AddI src1 src2));
10035 
10036   ins_cost(INSN_COST);
10037   format %{ "addw  $dst, $src1, $src2" %}
10038 
10039   ins_encode %{
10040     __ addw(as_Register($dst$$reg),
10041             as_Register($src1$$reg),
10042             as_Register($src2$$reg));
10043   %}
10044 
10045   ins_pipe(ialu_reg_reg);
10046 %}
10047 
10048 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10049   match(Set dst (AddI src1 src2));
10050 
10051   ins_cost(INSN_COST);
10052   format %{ "addw $dst, $src1, $src2" %}
10053 
10054   // use opcode to indicate that this is an add not a sub
10055   opcode(0x0);
10056 
10057   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10058 
10059   ins_pipe(ialu_reg_imm);
10060 %}
10061 
10062 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10063   match(Set dst (AddI (ConvL2I src1) src2));
10064 
10065   ins_cost(INSN_COST);
10066   format %{ "addw $dst, $src1, $src2" %}
10067 
10068   // use opcode to indicate that this is an add not a sub
10069   opcode(0x0);
10070 
10071   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10072 
10073   ins_pipe(ialu_reg_imm);
10074 %}
10075 
10076 // Pointer Addition
10077 instruct addP_reg_reg(iRegPNoSp dst, iRegPorL2P src1, iRegL src2) %{
10078   match(Set dst (AddP src1 src2));
10079 
10080   ins_cost(INSN_COST);
10081   format %{ "add $dst, $src1, $src2\t# ptr" %}
10082 
10083   ins_encode %{
10084     __ add(as_Register($dst$$reg),
10085            as_Register($src1$$reg),
10086            as_Register($src2$$reg));
10087   %}
10088 
10089   ins_pipe(ialu_reg_reg);
10090 %}
10091 
10092 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2) %{
10093   match(Set dst (AddP src1 (ConvI2L src2)));
10094 
10095   ins_cost(1.9 * INSN_COST);
10096   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10097 
10098   ins_encode %{
10099     __ add(as_Register($dst$$reg),
10100            as_Register($src1$$reg),
10101            as_Register($src2$$reg), ext::sxtw);
10102   %}
10103 
10104   ins_pipe(ialu_reg_reg);
10105 %}
10106 
10107 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegPorL2P src1, iRegL src2, immIScale scale) %{
10108   match(Set dst (AddP src1 (LShiftL src2 scale)));
10109 
10110   ins_cost(1.9 * INSN_COST);
10111   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10112 
10113   ins_encode %{
10114     __ lea(as_Register($dst$$reg),
10115            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10116                    Address::lsl($scale$$constant)));
10117   %}
10118 
10119   ins_pipe(ialu_reg_reg_shift);
10120 %}
10121 
10122 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2, immIScale scale) %{
10123   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10124 
10125   ins_cost(1.9 * INSN_COST);
10126   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10127 
10128   ins_encode %{
10129     __ lea(as_Register($dst$$reg),
10130            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10131                    Address::sxtw($scale$$constant)));
10132   %}
10133 
10134   ins_pipe(ialu_reg_reg_shift);
10135 %}
10136 
10137 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10138   match(Set dst (LShiftL (ConvI2L src) scale));
10139 
10140   ins_cost(INSN_COST);
10141   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10142 
10143   ins_encode %{
10144     __ sbfiz(as_Register($dst$$reg),
10145           as_Register($src$$reg),
10146           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10147   %}
10148 
10149   ins_pipe(ialu_reg_shift);
10150 %}
10151 
10152 // Pointer Immediate Addition
10153 // n.b. this needs to be more expensive than using an indirect memory
10154 // operand
10155 instruct addP_reg_imm(iRegPNoSp dst, iRegPorL2P src1, immLAddSub src2) %{
10156   match(Set dst (AddP src1 src2));
10157 
10158   ins_cost(INSN_COST);
10159   format %{ "add $dst, $src1, $src2\t# ptr" %}
10160 
10161   // use opcode to indicate that this is an add not a sub
10162   opcode(0x0);
10163 
10164   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10165 
10166   ins_pipe(ialu_reg_imm);
10167 %}
10168 
10169 // Long Addition
10170 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10171 
10172   match(Set dst (AddL src1 src2));
10173 
10174   ins_cost(INSN_COST);
10175   format %{ "add  $dst, $src1, $src2" %}
10176 
10177   ins_encode %{
10178     __ add(as_Register($dst$$reg),
10179            as_Register($src1$$reg),
10180            as_Register($src2$$reg));
10181   %}
10182 
10183   ins_pipe(ialu_reg_reg);
10184 %}
10185 
10186 // No constant pool entries requiredLong Immediate Addition.
10187 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10188   match(Set dst (AddL src1 src2));
10189 
10190   ins_cost(INSN_COST);
10191   format %{ "add $dst, $src1, $src2" %}
10192 
10193   // use opcode to indicate that this is an add not a sub
10194   opcode(0x0);
10195 
10196   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10197 
10198   ins_pipe(ialu_reg_imm);
10199 %}
10200 
10201 // Integer Subtraction
10202 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10203   match(Set dst (SubI src1 src2));
10204 
10205   ins_cost(INSN_COST);
10206   format %{ "subw  $dst, $src1, $src2" %}
10207 
10208   ins_encode %{
10209     __ subw(as_Register($dst$$reg),
10210             as_Register($src1$$reg),
10211             as_Register($src2$$reg));
10212   %}
10213 
10214   ins_pipe(ialu_reg_reg);
10215 %}
10216 
10217 // Immediate Subtraction
10218 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10219   match(Set dst (SubI src1 src2));
10220 
10221   ins_cost(INSN_COST);
10222   format %{ "subw $dst, $src1, $src2" %}
10223 
10224   // use opcode to indicate that this is a sub not an add
10225   opcode(0x1);
10226 
10227   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10228 
10229   ins_pipe(ialu_reg_imm);
10230 %}
10231 
10232 // Long Subtraction
10233 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10234 
10235   match(Set dst (SubL src1 src2));
10236 
10237   ins_cost(INSN_COST);
10238   format %{ "sub  $dst, $src1, $src2" %}
10239 
10240   ins_encode %{
10241     __ sub(as_Register($dst$$reg),
10242            as_Register($src1$$reg),
10243            as_Register($src2$$reg));
10244   %}
10245 
10246   ins_pipe(ialu_reg_reg);
10247 %}
10248 
10249 // No constant pool entries requiredLong Immediate Subtraction.
10250 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10251   match(Set dst (SubL src1 src2));
10252 
10253   ins_cost(INSN_COST);
10254   format %{ "sub$dst, $src1, $src2" %}
10255 
10256   // use opcode to indicate that this is a sub not an add
10257   opcode(0x1);
10258 
10259   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10260 
10261   ins_pipe(ialu_reg_imm);
10262 %}
10263 
10264 // Integer Negation (special case for sub)
10265 
10266 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10267   match(Set dst (SubI zero src));
10268 
10269   ins_cost(INSN_COST);
10270   format %{ "negw $dst, $src\t# int" %}
10271 
10272   ins_encode %{
10273     __ negw(as_Register($dst$$reg),
10274             as_Register($src$$reg));
10275   %}
10276 
10277   ins_pipe(ialu_reg);
10278 %}
10279 
10280 // Long Negation
10281 
10282 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10283   match(Set dst (SubL zero src));
10284 
10285   ins_cost(INSN_COST);
10286   format %{ "neg $dst, $src\t# long" %}
10287 
10288   ins_encode %{
10289     __ neg(as_Register($dst$$reg),
10290            as_Register($src$$reg));
10291   %}
10292 
10293   ins_pipe(ialu_reg);
10294 %}
10295 
10296 // Integer Multiply
10297 
10298 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10299   match(Set dst (MulI src1 src2));
10300 
10301   ins_cost(INSN_COST * 3);
10302   format %{ "mulw  $dst, $src1, $src2" %}
10303 
10304   ins_encode %{
10305     __ mulw(as_Register($dst$$reg),
10306             as_Register($src1$$reg),
10307             as_Register($src2$$reg));
10308   %}
10309 
10310   ins_pipe(imul_reg_reg);
10311 %}
10312 
10313 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10314   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10315 
10316   ins_cost(INSN_COST * 3);
10317   format %{ "smull  $dst, $src1, $src2" %}
10318 
10319   ins_encode %{
10320     __ smull(as_Register($dst$$reg),
10321              as_Register($src1$$reg),
10322              as_Register($src2$$reg));
10323   %}
10324 
10325   ins_pipe(imul_reg_reg);
10326 %}
10327 
10328 // Long Multiply
10329 
10330 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10331   match(Set dst (MulL src1 src2));
10332 
10333   ins_cost(INSN_COST * 5);
10334   format %{ "mul  $dst, $src1, $src2" %}
10335 
10336   ins_encode %{
10337     __ mul(as_Register($dst$$reg),
10338            as_Register($src1$$reg),
10339            as_Register($src2$$reg));
10340   %}
10341 
10342   ins_pipe(lmul_reg_reg);
10343 %}
10344 
10345 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10346 %{
10347   match(Set dst (MulHiL src1 src2));
10348 
10349   ins_cost(INSN_COST * 7);
10350   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
10351 
10352   ins_encode %{
10353     __ smulh(as_Register($dst$$reg),
10354              as_Register($src1$$reg),
10355              as_Register($src2$$reg));
10356   %}
10357 
10358   ins_pipe(lmul_reg_reg);
10359 %}
10360 
10361 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10362 %{
10363   match(Set dst (UMulHiL src1 src2));
10364 
10365   ins_cost(INSN_COST * 7);
10366   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
10367 
10368   ins_encode %{
10369     __ umulh(as_Register($dst$$reg),
10370              as_Register($src1$$reg),
10371              as_Register($src2$$reg));
10372   %}
10373 
10374   ins_pipe(lmul_reg_reg);
10375 %}
10376 
10377 // Combined Integer Multiply & Add/Sub
10378 
10379 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10380   match(Set dst (AddI src3 (MulI src1 src2)));
10381 
10382   ins_cost(INSN_COST * 3);
10383   format %{ "madd  $dst, $src1, $src2, $src3" %}
10384 
10385   ins_encode %{
10386     __ maddw(as_Register($dst$$reg),
10387              as_Register($src1$$reg),
10388              as_Register($src2$$reg),
10389              as_Register($src3$$reg));
10390   %}
10391 
10392   ins_pipe(imac_reg_reg);
10393 %}
10394 
10395 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10396   match(Set dst (SubI src3 (MulI src1 src2)));
10397 
10398   ins_cost(INSN_COST * 3);
10399   format %{ "msub  $dst, $src1, $src2, $src3" %}
10400 
10401   ins_encode %{
10402     __ msubw(as_Register($dst$$reg),
10403              as_Register($src1$$reg),
10404              as_Register($src2$$reg),
10405              as_Register($src3$$reg));
10406   %}
10407 
10408   ins_pipe(imac_reg_reg);
10409 %}
10410 
10411 // Combined Integer Multiply & Neg
10412 
10413 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10414   match(Set dst (MulI (SubI zero src1) src2));
10415 
10416   ins_cost(INSN_COST * 3);
10417   format %{ "mneg  $dst, $src1, $src2" %}
10418 
10419   ins_encode %{
10420     __ mnegw(as_Register($dst$$reg),
10421              as_Register($src1$$reg),
10422              as_Register($src2$$reg));
10423   %}
10424 
10425   ins_pipe(imac_reg_reg);
10426 %}
10427 
10428 // Combined Long Multiply & Add/Sub
10429 
10430 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10431   match(Set dst (AddL src3 (MulL src1 src2)));
10432 
10433   ins_cost(INSN_COST * 5);
10434   format %{ "madd  $dst, $src1, $src2, $src3" %}
10435 
10436   ins_encode %{
10437     __ madd(as_Register($dst$$reg),
10438             as_Register($src1$$reg),
10439             as_Register($src2$$reg),
10440             as_Register($src3$$reg));
10441   %}
10442 
10443   ins_pipe(lmac_reg_reg);
10444 %}
10445 
10446 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10447   match(Set dst (SubL src3 (MulL src1 src2)));
10448 
10449   ins_cost(INSN_COST * 5);
10450   format %{ "msub  $dst, $src1, $src2, $src3" %}
10451 
10452   ins_encode %{
10453     __ msub(as_Register($dst$$reg),
10454             as_Register($src1$$reg),
10455             as_Register($src2$$reg),
10456             as_Register($src3$$reg));
10457   %}
10458 
10459   ins_pipe(lmac_reg_reg);
10460 %}
10461 
10462 // Combined Long Multiply & Neg
10463 
10464 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10465   match(Set dst (MulL (SubL zero src1) src2));
10466 
10467   ins_cost(INSN_COST * 5);
10468   format %{ "mneg  $dst, $src1, $src2" %}
10469 
10470   ins_encode %{
10471     __ mneg(as_Register($dst$$reg),
10472             as_Register($src1$$reg),
10473             as_Register($src2$$reg));
10474   %}
10475 
10476   ins_pipe(lmac_reg_reg);
10477 %}
10478 
10479 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10480 
10481 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10482   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10483 
10484   ins_cost(INSN_COST * 3);
10485   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10486 
10487   ins_encode %{
10488     __ smaddl(as_Register($dst$$reg),
10489               as_Register($src1$$reg),
10490               as_Register($src2$$reg),
10491               as_Register($src3$$reg));
10492   %}
10493 
10494   ins_pipe(imac_reg_reg);
10495 %}
10496 
10497 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10498   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10499 
10500   ins_cost(INSN_COST * 3);
10501   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10502 
10503   ins_encode %{
10504     __ smsubl(as_Register($dst$$reg),
10505               as_Register($src1$$reg),
10506               as_Register($src2$$reg),
10507               as_Register($src3$$reg));
10508   %}
10509 
10510   ins_pipe(imac_reg_reg);
10511 %}
10512 
10513 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10514   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10515 
10516   ins_cost(INSN_COST * 3);
10517   format %{ "smnegl  $dst, $src1, $src2" %}
10518 
10519   ins_encode %{
10520     __ smnegl(as_Register($dst$$reg),
10521               as_Register($src1$$reg),
10522               as_Register($src2$$reg));
10523   %}
10524 
10525   ins_pipe(imac_reg_reg);
10526 %}
10527 
10528 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10529 
10530 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10531   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10532 
10533   ins_cost(INSN_COST * 5);
10534   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10535             "maddw $dst, $src3, $src4, rscratch1" %}
10536 
10537   ins_encode %{
10538     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10539     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10540 
10541   ins_pipe(imac_reg_reg);
10542 %}
10543 
10544 // Integer Divide
10545 
10546 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10547   match(Set dst (DivI src1 src2));
10548 
10549   ins_cost(INSN_COST * 19);
10550   format %{ "sdivw  $dst, $src1, $src2" %}
10551 
10552   ins_encode(aarch64_enc_divw(dst, src1, src2));
10553   ins_pipe(idiv_reg_reg);
10554 %}
10555 
10556 // Long Divide
10557 
10558 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10559   match(Set dst (DivL src1 src2));
10560 
10561   ins_cost(INSN_COST * 35);
10562   format %{ "sdiv   $dst, $src1, $src2" %}
10563 
10564   ins_encode(aarch64_enc_div(dst, src1, src2));
10565   ins_pipe(ldiv_reg_reg);
10566 %}
10567 
10568 // Integer Remainder
10569 
10570 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10571   match(Set dst (ModI src1 src2));
10572 
10573   ins_cost(INSN_COST * 22);
10574   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10575             "msubw  $dst, rscratch1, $src2, $src1" %}
10576 
10577   ins_encode(aarch64_enc_modw(dst, src1, src2));
10578   ins_pipe(idiv_reg_reg);
10579 %}
10580 
10581 // Long Remainder
10582 
10583 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10584   match(Set dst (ModL src1 src2));
10585 
10586   ins_cost(INSN_COST * 38);
10587   format %{ "sdiv   rscratch1, $src1, $src2\n"
10588             "msub   $dst, rscratch1, $src2, $src1" %}
10589 
10590   ins_encode(aarch64_enc_mod(dst, src1, src2));
10591   ins_pipe(ldiv_reg_reg);
10592 %}
10593 
10594 // Unsigned Integer Divide
10595 
10596 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10597   match(Set dst (UDivI src1 src2));
10598 
10599   ins_cost(INSN_COST * 19);
10600   format %{ "udivw  $dst, $src1, $src2" %}
10601 
10602   ins_encode %{
10603     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
10604   %}
10605 
10606   ins_pipe(idiv_reg_reg);
10607 %}
10608 
10609 //  Unsigned Long Divide
10610 
10611 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10612   match(Set dst (UDivL src1 src2));
10613 
10614   ins_cost(INSN_COST * 35);
10615   format %{ "udiv   $dst, $src1, $src2" %}
10616 
10617   ins_encode %{
10618     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
10619   %}
10620 
10621   ins_pipe(ldiv_reg_reg);
10622 %}
10623 
10624 // Unsigned Integer Remainder
10625 
10626 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10627   match(Set dst (UModI src1 src2));
10628 
10629   ins_cost(INSN_COST * 22);
10630   format %{ "udivw  rscratch1, $src1, $src2\n\t"
10631             "msubw  $dst, rscratch1, $src2, $src1" %}
10632 
10633   ins_encode %{
10634     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
10635     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10636   %}
10637 
10638   ins_pipe(idiv_reg_reg);
10639 %}
10640 
10641 // Unsigned Long Remainder
10642 
10643 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10644   match(Set dst (UModL src1 src2));
10645 
10646   ins_cost(INSN_COST * 38);
10647   format %{ "udiv   rscratch1, $src1, $src2\n"
10648             "msub   $dst, rscratch1, $src2, $src1" %}
10649 
10650   ins_encode %{
10651     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
10652     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10653   %}
10654 
10655   ins_pipe(ldiv_reg_reg);
10656 %}
10657 
10658 // Integer Shifts
10659 
10660 // Shift Left Register
10661 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10662   match(Set dst (LShiftI src1 src2));
10663 
10664   ins_cost(INSN_COST * 2);
10665   format %{ "lslvw  $dst, $src1, $src2" %}
10666 
10667   ins_encode %{
10668     __ lslvw(as_Register($dst$$reg),
10669              as_Register($src1$$reg),
10670              as_Register($src2$$reg));
10671   %}
10672 
10673   ins_pipe(ialu_reg_reg_vshift);
10674 %}
10675 
10676 // Shift Left Immediate
10677 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10678   match(Set dst (LShiftI src1 src2));
10679 
10680   ins_cost(INSN_COST);
10681   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10682 
10683   ins_encode %{
10684     __ lslw(as_Register($dst$$reg),
10685             as_Register($src1$$reg),
10686             $src2$$constant & 0x1f);
10687   %}
10688 
10689   ins_pipe(ialu_reg_shift);
10690 %}
10691 
10692 // Shift Right Logical Register
10693 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10694   match(Set dst (URShiftI src1 src2));
10695 
10696   ins_cost(INSN_COST * 2);
10697   format %{ "lsrvw  $dst, $src1, $src2" %}
10698 
10699   ins_encode %{
10700     __ lsrvw(as_Register($dst$$reg),
10701              as_Register($src1$$reg),
10702              as_Register($src2$$reg));
10703   %}
10704 
10705   ins_pipe(ialu_reg_reg_vshift);
10706 %}
10707 
10708 // Shift Right Logical Immediate
10709 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10710   match(Set dst (URShiftI src1 src2));
10711 
10712   ins_cost(INSN_COST);
10713   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10714 
10715   ins_encode %{
10716     __ lsrw(as_Register($dst$$reg),
10717             as_Register($src1$$reg),
10718             $src2$$constant & 0x1f);
10719   %}
10720 
10721   ins_pipe(ialu_reg_shift);
10722 %}
10723 
10724 // Shift Right Arithmetic Register
10725 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10726   match(Set dst (RShiftI src1 src2));
10727 
10728   ins_cost(INSN_COST * 2);
10729   format %{ "asrvw  $dst, $src1, $src2" %}
10730 
10731   ins_encode %{
10732     __ asrvw(as_Register($dst$$reg),
10733              as_Register($src1$$reg),
10734              as_Register($src2$$reg));
10735   %}
10736 
10737   ins_pipe(ialu_reg_reg_vshift);
10738 %}
10739 
10740 // Shift Right Arithmetic Immediate
10741 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10742   match(Set dst (RShiftI src1 src2));
10743 
10744   ins_cost(INSN_COST);
10745   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10746 
10747   ins_encode %{
10748     __ asrw(as_Register($dst$$reg),
10749             as_Register($src1$$reg),
10750             $src2$$constant & 0x1f);
10751   %}
10752 
10753   ins_pipe(ialu_reg_shift);
10754 %}
10755 
10756 // Combined Int Mask and Right Shift (using UBFM)
10757 // TODO
10758 
10759 // Long Shifts
10760 
10761 // Shift Left Register
10762 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10763   match(Set dst (LShiftL src1 src2));
10764 
10765   ins_cost(INSN_COST * 2);
10766   format %{ "lslv  $dst, $src1, $src2" %}
10767 
10768   ins_encode %{
10769     __ lslv(as_Register($dst$$reg),
10770             as_Register($src1$$reg),
10771             as_Register($src2$$reg));
10772   %}
10773 
10774   ins_pipe(ialu_reg_reg_vshift);
10775 %}
10776 
10777 // Shift Left Immediate
10778 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10779   match(Set dst (LShiftL src1 src2));
10780 
10781   ins_cost(INSN_COST);
10782   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10783 
10784   ins_encode %{
10785     __ lsl(as_Register($dst$$reg),
10786             as_Register($src1$$reg),
10787             $src2$$constant & 0x3f);
10788   %}
10789 
10790   ins_pipe(ialu_reg_shift);
10791 %}
10792 
10793 // Shift Right Logical Register
10794 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10795   match(Set dst (URShiftL src1 src2));
10796 
10797   ins_cost(INSN_COST * 2);
10798   format %{ "lsrv  $dst, $src1, $src2" %}
10799 
10800   ins_encode %{
10801     __ lsrv(as_Register($dst$$reg),
10802             as_Register($src1$$reg),
10803             as_Register($src2$$reg));
10804   %}
10805 
10806   ins_pipe(ialu_reg_reg_vshift);
10807 %}
10808 
10809 // Shift Right Logical Immediate
10810 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10811   match(Set dst (URShiftL src1 src2));
10812 
10813   ins_cost(INSN_COST);
10814   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10815 
10816   ins_encode %{
10817     __ lsr(as_Register($dst$$reg),
10818            as_Register($src1$$reg),
10819            $src2$$constant & 0x3f);
10820   %}
10821 
10822   ins_pipe(ialu_reg_shift);
10823 %}
10824 
10825 // A special-case pattern for card table stores.
10826 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10827   match(Set dst (URShiftL (CastP2X src1) src2));
10828 
10829   ins_cost(INSN_COST);
10830   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10831 
10832   ins_encode %{
10833     __ lsr(as_Register($dst$$reg),
10834            as_Register($src1$$reg),
10835            $src2$$constant & 0x3f);
10836   %}
10837 
10838   ins_pipe(ialu_reg_shift);
10839 %}
10840 
10841 // Shift Right Arithmetic Register
10842 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10843   match(Set dst (RShiftL src1 src2));
10844 
10845   ins_cost(INSN_COST * 2);
10846   format %{ "asrv  $dst, $src1, $src2" %}
10847 
10848   ins_encode %{
10849     __ asrv(as_Register($dst$$reg),
10850             as_Register($src1$$reg),
10851             as_Register($src2$$reg));
10852   %}
10853 
10854   ins_pipe(ialu_reg_reg_vshift);
10855 %}
10856 
10857 // Shift Right Arithmetic Immediate
10858 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10859   match(Set dst (RShiftL src1 src2));
10860 
10861   ins_cost(INSN_COST);
10862   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10863 
10864   ins_encode %{
10865     __ asr(as_Register($dst$$reg),
10866            as_Register($src1$$reg),
10867            $src2$$constant & 0x3f);
10868   %}
10869 
10870   ins_pipe(ialu_reg_shift);
10871 %}
10872 
10873 // BEGIN This section of the file is automatically generated. Do not edit --------------
10874 // This section is generated from aarch64_ad.m4
10875 
10876 // This pattern is automatically generated from aarch64_ad.m4
10877 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10878 instruct regL_not_reg(iRegLNoSp dst,
10879                          iRegL src1, immL_M1 m1,
10880                          rFlagsReg cr) %{
10881   match(Set dst (XorL src1 m1));
10882   ins_cost(INSN_COST);
10883   format %{ "eon  $dst, $src1, zr" %}
10884 
10885   ins_encode %{
10886     __ eon(as_Register($dst$$reg),
10887               as_Register($src1$$reg),
10888               zr,
10889               Assembler::LSL, 0);
10890   %}
10891 
10892   ins_pipe(ialu_reg);
10893 %}
10894 
10895 // This pattern is automatically generated from aarch64_ad.m4
10896 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10897 instruct regI_not_reg(iRegINoSp dst,
10898                          iRegIorL2I src1, immI_M1 m1,
10899                          rFlagsReg cr) %{
10900   match(Set dst (XorI src1 m1));
10901   ins_cost(INSN_COST);
10902   format %{ "eonw  $dst, $src1, zr" %}
10903 
10904   ins_encode %{
10905     __ eonw(as_Register($dst$$reg),
10906               as_Register($src1$$reg),
10907               zr,
10908               Assembler::LSL, 0);
10909   %}
10910 
10911   ins_pipe(ialu_reg);
10912 %}
10913 
10914 // This pattern is automatically generated from aarch64_ad.m4
10915 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10916 instruct NegI_reg_URShift_reg(iRegINoSp dst,
10917                               immI0 zero, iRegIorL2I src1, immI src2) %{
10918   match(Set dst (SubI zero (URShiftI src1 src2)));
10919 
10920   ins_cost(1.9 * INSN_COST);
10921   format %{ "negw  $dst, $src1, LSR $src2" %}
10922 
10923   ins_encode %{
10924     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10925             Assembler::LSR, $src2$$constant & 0x1f);
10926   %}
10927 
10928   ins_pipe(ialu_reg_shift);
10929 %}
10930 
10931 // This pattern is automatically generated from aarch64_ad.m4
10932 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10933 instruct NegI_reg_RShift_reg(iRegINoSp dst,
10934                               immI0 zero, iRegIorL2I src1, immI src2) %{
10935   match(Set dst (SubI zero (RShiftI src1 src2)));
10936 
10937   ins_cost(1.9 * INSN_COST);
10938   format %{ "negw  $dst, $src1, ASR $src2" %}
10939 
10940   ins_encode %{
10941     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10942             Assembler::ASR, $src2$$constant & 0x1f);
10943   %}
10944 
10945   ins_pipe(ialu_reg_shift);
10946 %}
10947 
10948 // This pattern is automatically generated from aarch64_ad.m4
10949 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10950 instruct NegI_reg_LShift_reg(iRegINoSp dst,
10951                               immI0 zero, iRegIorL2I src1, immI src2) %{
10952   match(Set dst (SubI zero (LShiftI src1 src2)));
10953 
10954   ins_cost(1.9 * INSN_COST);
10955   format %{ "negw  $dst, $src1, LSL $src2" %}
10956 
10957   ins_encode %{
10958     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10959             Assembler::LSL, $src2$$constant & 0x1f);
10960   %}
10961 
10962   ins_pipe(ialu_reg_shift);
10963 %}
10964 
10965 // This pattern is automatically generated from aarch64_ad.m4
10966 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10967 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
10968                               immL0 zero, iRegL src1, immI src2) %{
10969   match(Set dst (SubL zero (URShiftL src1 src2)));
10970 
10971   ins_cost(1.9 * INSN_COST);
10972   format %{ "neg  $dst, $src1, LSR $src2" %}
10973 
10974   ins_encode %{
10975     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10976             Assembler::LSR, $src2$$constant & 0x3f);
10977   %}
10978 
10979   ins_pipe(ialu_reg_shift);
10980 %}
10981 
10982 // This pattern is automatically generated from aarch64_ad.m4
10983 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10984 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
10985                               immL0 zero, iRegL src1, immI src2) %{
10986   match(Set dst (SubL zero (RShiftL src1 src2)));
10987 
10988   ins_cost(1.9 * INSN_COST);
10989   format %{ "neg  $dst, $src1, ASR $src2" %}
10990 
10991   ins_encode %{
10992     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10993             Assembler::ASR, $src2$$constant & 0x3f);
10994   %}
10995 
10996   ins_pipe(ialu_reg_shift);
10997 %}
10998 
10999 // This pattern is automatically generated from aarch64_ad.m4
11000 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11001 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
11002                               immL0 zero, iRegL src1, immI src2) %{
11003   match(Set dst (SubL zero (LShiftL src1 src2)));
11004 
11005   ins_cost(1.9 * INSN_COST);
11006   format %{ "neg  $dst, $src1, LSL $src2" %}
11007 
11008   ins_encode %{
11009     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11010             Assembler::LSL, $src2$$constant & 0x3f);
11011   %}
11012 
11013   ins_pipe(ialu_reg_shift);
11014 %}
11015 
11016 // This pattern is automatically generated from aarch64_ad.m4
11017 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11018 instruct AndI_reg_not_reg(iRegINoSp dst,
11019                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11020   match(Set dst (AndI src1 (XorI src2 m1)));
11021   ins_cost(INSN_COST);
11022   format %{ "bicw  $dst, $src1, $src2" %}
11023 
11024   ins_encode %{
11025     __ bicw(as_Register($dst$$reg),
11026               as_Register($src1$$reg),
11027               as_Register($src2$$reg),
11028               Assembler::LSL, 0);
11029   %}
11030 
11031   ins_pipe(ialu_reg_reg);
11032 %}
11033 
11034 // This pattern is automatically generated from aarch64_ad.m4
11035 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11036 instruct AndL_reg_not_reg(iRegLNoSp dst,
11037                          iRegL src1, iRegL src2, immL_M1 m1) %{
11038   match(Set dst (AndL src1 (XorL src2 m1)));
11039   ins_cost(INSN_COST);
11040   format %{ "bic  $dst, $src1, $src2" %}
11041 
11042   ins_encode %{
11043     __ bic(as_Register($dst$$reg),
11044               as_Register($src1$$reg),
11045               as_Register($src2$$reg),
11046               Assembler::LSL, 0);
11047   %}
11048 
11049   ins_pipe(ialu_reg_reg);
11050 %}
11051 
11052 // This pattern is automatically generated from aarch64_ad.m4
11053 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11054 instruct OrI_reg_not_reg(iRegINoSp dst,
11055                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11056   match(Set dst (OrI src1 (XorI src2 m1)));
11057   ins_cost(INSN_COST);
11058   format %{ "ornw  $dst, $src1, $src2" %}
11059 
11060   ins_encode %{
11061     __ ornw(as_Register($dst$$reg),
11062               as_Register($src1$$reg),
11063               as_Register($src2$$reg),
11064               Assembler::LSL, 0);
11065   %}
11066 
11067   ins_pipe(ialu_reg_reg);
11068 %}
11069 
11070 // This pattern is automatically generated from aarch64_ad.m4
11071 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11072 instruct OrL_reg_not_reg(iRegLNoSp dst,
11073                          iRegL src1, iRegL src2, immL_M1 m1) %{
11074   match(Set dst (OrL src1 (XorL src2 m1)));
11075   ins_cost(INSN_COST);
11076   format %{ "orn  $dst, $src1, $src2" %}
11077 
11078   ins_encode %{
11079     __ orn(as_Register($dst$$reg),
11080               as_Register($src1$$reg),
11081               as_Register($src2$$reg),
11082               Assembler::LSL, 0);
11083   %}
11084 
11085   ins_pipe(ialu_reg_reg);
11086 %}
11087 
11088 // This pattern is automatically generated from aarch64_ad.m4
11089 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11090 instruct XorI_reg_not_reg(iRegINoSp dst,
11091                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11092   match(Set dst (XorI m1 (XorI src2 src1)));
11093   ins_cost(INSN_COST);
11094   format %{ "eonw  $dst, $src1, $src2" %}
11095 
11096   ins_encode %{
11097     __ eonw(as_Register($dst$$reg),
11098               as_Register($src1$$reg),
11099               as_Register($src2$$reg),
11100               Assembler::LSL, 0);
11101   %}
11102 
11103   ins_pipe(ialu_reg_reg);
11104 %}
11105 
11106 // This pattern is automatically generated from aarch64_ad.m4
11107 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11108 instruct XorL_reg_not_reg(iRegLNoSp dst,
11109                          iRegL src1, iRegL src2, immL_M1 m1) %{
11110   match(Set dst (XorL m1 (XorL src2 src1)));
11111   ins_cost(INSN_COST);
11112   format %{ "eon  $dst, $src1, $src2" %}
11113 
11114   ins_encode %{
11115     __ eon(as_Register($dst$$reg),
11116               as_Register($src1$$reg),
11117               as_Register($src2$$reg),
11118               Assembler::LSL, 0);
11119   %}
11120 
11121   ins_pipe(ialu_reg_reg);
11122 %}
11123 
11124 // This pattern is automatically generated from aarch64_ad.m4
11125 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11126 // val & (-1 ^ (val >>> shift)) ==> bicw
11127 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11128                          iRegIorL2I src1, iRegIorL2I src2,
11129                          immI src3, immI_M1 src4) %{
11130   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11131   ins_cost(1.9 * INSN_COST);
11132   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11133 
11134   ins_encode %{
11135     __ bicw(as_Register($dst$$reg),
11136               as_Register($src1$$reg),
11137               as_Register($src2$$reg),
11138               Assembler::LSR,
11139               $src3$$constant & 0x1f);
11140   %}
11141 
11142   ins_pipe(ialu_reg_reg_shift);
11143 %}
11144 
11145 // This pattern is automatically generated from aarch64_ad.m4
11146 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11147 // val & (-1 ^ (val >>> shift)) ==> bic
11148 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11149                          iRegL src1, iRegL src2,
11150                          immI src3, immL_M1 src4) %{
11151   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11152   ins_cost(1.9 * INSN_COST);
11153   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11154 
11155   ins_encode %{
11156     __ bic(as_Register($dst$$reg),
11157               as_Register($src1$$reg),
11158               as_Register($src2$$reg),
11159               Assembler::LSR,
11160               $src3$$constant & 0x3f);
11161   %}
11162 
11163   ins_pipe(ialu_reg_reg_shift);
11164 %}
11165 
11166 // This pattern is automatically generated from aarch64_ad.m4
11167 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11168 // val & (-1 ^ (val >> shift)) ==> bicw
11169 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11170                          iRegIorL2I src1, iRegIorL2I src2,
11171                          immI src3, immI_M1 src4) %{
11172   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11173   ins_cost(1.9 * INSN_COST);
11174   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11175 
11176   ins_encode %{
11177     __ bicw(as_Register($dst$$reg),
11178               as_Register($src1$$reg),
11179               as_Register($src2$$reg),
11180               Assembler::ASR,
11181               $src3$$constant & 0x1f);
11182   %}
11183 
11184   ins_pipe(ialu_reg_reg_shift);
11185 %}
11186 
11187 // This pattern is automatically generated from aarch64_ad.m4
11188 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11189 // val & (-1 ^ (val >> shift)) ==> bic
11190 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11191                          iRegL src1, iRegL src2,
11192                          immI src3, immL_M1 src4) %{
11193   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11194   ins_cost(1.9 * INSN_COST);
11195   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11196 
11197   ins_encode %{
11198     __ bic(as_Register($dst$$reg),
11199               as_Register($src1$$reg),
11200               as_Register($src2$$reg),
11201               Assembler::ASR,
11202               $src3$$constant & 0x3f);
11203   %}
11204 
11205   ins_pipe(ialu_reg_reg_shift);
11206 %}
11207 
11208 // This pattern is automatically generated from aarch64_ad.m4
11209 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11210 // val & (-1 ^ (val ror shift)) ==> bicw
11211 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11212                          iRegIorL2I src1, iRegIorL2I src2,
11213                          immI src3, immI_M1 src4) %{
11214   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11215   ins_cost(1.9 * INSN_COST);
11216   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11217 
11218   ins_encode %{
11219     __ bicw(as_Register($dst$$reg),
11220               as_Register($src1$$reg),
11221               as_Register($src2$$reg),
11222               Assembler::ROR,
11223               $src3$$constant & 0x1f);
11224   %}
11225 
11226   ins_pipe(ialu_reg_reg_shift);
11227 %}
11228 
11229 // This pattern is automatically generated from aarch64_ad.m4
11230 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11231 // val & (-1 ^ (val ror shift)) ==> bic
11232 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11233                          iRegL src1, iRegL src2,
11234                          immI src3, immL_M1 src4) %{
11235   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11236   ins_cost(1.9 * INSN_COST);
11237   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11238 
11239   ins_encode %{
11240     __ bic(as_Register($dst$$reg),
11241               as_Register($src1$$reg),
11242               as_Register($src2$$reg),
11243               Assembler::ROR,
11244               $src3$$constant & 0x3f);
11245   %}
11246 
11247   ins_pipe(ialu_reg_reg_shift);
11248 %}
11249 
11250 // This pattern is automatically generated from aarch64_ad.m4
11251 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11252 // val & (-1 ^ (val << shift)) ==> bicw
11253 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11254                          iRegIorL2I src1, iRegIorL2I src2,
11255                          immI src3, immI_M1 src4) %{
11256   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11257   ins_cost(1.9 * INSN_COST);
11258   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11259 
11260   ins_encode %{
11261     __ bicw(as_Register($dst$$reg),
11262               as_Register($src1$$reg),
11263               as_Register($src2$$reg),
11264               Assembler::LSL,
11265               $src3$$constant & 0x1f);
11266   %}
11267 
11268   ins_pipe(ialu_reg_reg_shift);
11269 %}
11270 
11271 // This pattern is automatically generated from aarch64_ad.m4
11272 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11273 // val & (-1 ^ (val << shift)) ==> bic
11274 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11275                          iRegL src1, iRegL src2,
11276                          immI src3, immL_M1 src4) %{
11277   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11278   ins_cost(1.9 * INSN_COST);
11279   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11280 
11281   ins_encode %{
11282     __ bic(as_Register($dst$$reg),
11283               as_Register($src1$$reg),
11284               as_Register($src2$$reg),
11285               Assembler::LSL,
11286               $src3$$constant & 0x3f);
11287   %}
11288 
11289   ins_pipe(ialu_reg_reg_shift);
11290 %}
11291 
11292 // This pattern is automatically generated from aarch64_ad.m4
11293 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11294 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11295 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11296                          iRegIorL2I src1, iRegIorL2I src2,
11297                          immI src3, immI_M1 src4) %{
11298   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11299   ins_cost(1.9 * INSN_COST);
11300   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11301 
11302   ins_encode %{
11303     __ eonw(as_Register($dst$$reg),
11304               as_Register($src1$$reg),
11305               as_Register($src2$$reg),
11306               Assembler::LSR,
11307               $src3$$constant & 0x1f);
11308   %}
11309 
11310   ins_pipe(ialu_reg_reg_shift);
11311 %}
11312 
11313 // This pattern is automatically generated from aarch64_ad.m4
11314 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11315 // val ^ (-1 ^ (val >>> shift)) ==> eon
11316 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11317                          iRegL src1, iRegL src2,
11318                          immI src3, immL_M1 src4) %{
11319   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11320   ins_cost(1.9 * INSN_COST);
11321   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11322 
11323   ins_encode %{
11324     __ eon(as_Register($dst$$reg),
11325               as_Register($src1$$reg),
11326               as_Register($src2$$reg),
11327               Assembler::LSR,
11328               $src3$$constant & 0x3f);
11329   %}
11330 
11331   ins_pipe(ialu_reg_reg_shift);
11332 %}
11333 
11334 // This pattern is automatically generated from aarch64_ad.m4
11335 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11336 // val ^ (-1 ^ (val >> shift)) ==> eonw
11337 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11338                          iRegIorL2I src1, iRegIorL2I src2,
11339                          immI src3, immI_M1 src4) %{
11340   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11341   ins_cost(1.9 * INSN_COST);
11342   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11343 
11344   ins_encode %{
11345     __ eonw(as_Register($dst$$reg),
11346               as_Register($src1$$reg),
11347               as_Register($src2$$reg),
11348               Assembler::ASR,
11349               $src3$$constant & 0x1f);
11350   %}
11351 
11352   ins_pipe(ialu_reg_reg_shift);
11353 %}
11354 
11355 // This pattern is automatically generated from aarch64_ad.m4
11356 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11357 // val ^ (-1 ^ (val >> shift)) ==> eon
11358 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11359                          iRegL src1, iRegL src2,
11360                          immI src3, immL_M1 src4) %{
11361   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11362   ins_cost(1.9 * INSN_COST);
11363   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11364 
11365   ins_encode %{
11366     __ eon(as_Register($dst$$reg),
11367               as_Register($src1$$reg),
11368               as_Register($src2$$reg),
11369               Assembler::ASR,
11370               $src3$$constant & 0x3f);
11371   %}
11372 
11373   ins_pipe(ialu_reg_reg_shift);
11374 %}
11375 
11376 // This pattern is automatically generated from aarch64_ad.m4
11377 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11378 // val ^ (-1 ^ (val ror shift)) ==> eonw
11379 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11380                          iRegIorL2I src1, iRegIorL2I src2,
11381                          immI src3, immI_M1 src4) %{
11382   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11383   ins_cost(1.9 * INSN_COST);
11384   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11385 
11386   ins_encode %{
11387     __ eonw(as_Register($dst$$reg),
11388               as_Register($src1$$reg),
11389               as_Register($src2$$reg),
11390               Assembler::ROR,
11391               $src3$$constant & 0x1f);
11392   %}
11393 
11394   ins_pipe(ialu_reg_reg_shift);
11395 %}
11396 
11397 // This pattern is automatically generated from aarch64_ad.m4
11398 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11399 // val ^ (-1 ^ (val ror shift)) ==> eon
11400 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11401                          iRegL src1, iRegL src2,
11402                          immI src3, immL_M1 src4) %{
11403   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11404   ins_cost(1.9 * INSN_COST);
11405   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11406 
11407   ins_encode %{
11408     __ eon(as_Register($dst$$reg),
11409               as_Register($src1$$reg),
11410               as_Register($src2$$reg),
11411               Assembler::ROR,
11412               $src3$$constant & 0x3f);
11413   %}
11414 
11415   ins_pipe(ialu_reg_reg_shift);
11416 %}
11417 
11418 // This pattern is automatically generated from aarch64_ad.m4
11419 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11420 // val ^ (-1 ^ (val << shift)) ==> eonw
11421 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11422                          iRegIorL2I src1, iRegIorL2I src2,
11423                          immI src3, immI_M1 src4) %{
11424   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11425   ins_cost(1.9 * INSN_COST);
11426   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11427 
11428   ins_encode %{
11429     __ eonw(as_Register($dst$$reg),
11430               as_Register($src1$$reg),
11431               as_Register($src2$$reg),
11432               Assembler::LSL,
11433               $src3$$constant & 0x1f);
11434   %}
11435 
11436   ins_pipe(ialu_reg_reg_shift);
11437 %}
11438 
11439 // This pattern is automatically generated from aarch64_ad.m4
11440 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11441 // val ^ (-1 ^ (val << shift)) ==> eon
11442 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11443                          iRegL src1, iRegL src2,
11444                          immI src3, immL_M1 src4) %{
11445   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11446   ins_cost(1.9 * INSN_COST);
11447   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11448 
11449   ins_encode %{
11450     __ eon(as_Register($dst$$reg),
11451               as_Register($src1$$reg),
11452               as_Register($src2$$reg),
11453               Assembler::LSL,
11454               $src3$$constant & 0x3f);
11455   %}
11456 
11457   ins_pipe(ialu_reg_reg_shift);
11458 %}
11459 
11460 // This pattern is automatically generated from aarch64_ad.m4
11461 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11462 // val | (-1 ^ (val >>> shift)) ==> ornw
11463 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11464                          iRegIorL2I src1, iRegIorL2I src2,
11465                          immI src3, immI_M1 src4) %{
11466   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11467   ins_cost(1.9 * INSN_COST);
11468   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11469 
11470   ins_encode %{
11471     __ ornw(as_Register($dst$$reg),
11472               as_Register($src1$$reg),
11473               as_Register($src2$$reg),
11474               Assembler::LSR,
11475               $src3$$constant & 0x1f);
11476   %}
11477 
11478   ins_pipe(ialu_reg_reg_shift);
11479 %}
11480 
11481 // This pattern is automatically generated from aarch64_ad.m4
11482 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11483 // val | (-1 ^ (val >>> shift)) ==> orn
11484 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11485                          iRegL src1, iRegL src2,
11486                          immI src3, immL_M1 src4) %{
11487   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11488   ins_cost(1.9 * INSN_COST);
11489   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11490 
11491   ins_encode %{
11492     __ orn(as_Register($dst$$reg),
11493               as_Register($src1$$reg),
11494               as_Register($src2$$reg),
11495               Assembler::LSR,
11496               $src3$$constant & 0x3f);
11497   %}
11498 
11499   ins_pipe(ialu_reg_reg_shift);
11500 %}
11501 
11502 // This pattern is automatically generated from aarch64_ad.m4
11503 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11504 // val | (-1 ^ (val >> shift)) ==> ornw
11505 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11506                          iRegIorL2I src1, iRegIorL2I src2,
11507                          immI src3, immI_M1 src4) %{
11508   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11509   ins_cost(1.9 * INSN_COST);
11510   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11511 
11512   ins_encode %{
11513     __ ornw(as_Register($dst$$reg),
11514               as_Register($src1$$reg),
11515               as_Register($src2$$reg),
11516               Assembler::ASR,
11517               $src3$$constant & 0x1f);
11518   %}
11519 
11520   ins_pipe(ialu_reg_reg_shift);
11521 %}
11522 
11523 // This pattern is automatically generated from aarch64_ad.m4
11524 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11525 // val | (-1 ^ (val >> shift)) ==> orn
11526 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11527                          iRegL src1, iRegL src2,
11528                          immI src3, immL_M1 src4) %{
11529   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11530   ins_cost(1.9 * INSN_COST);
11531   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11532 
11533   ins_encode %{
11534     __ orn(as_Register($dst$$reg),
11535               as_Register($src1$$reg),
11536               as_Register($src2$$reg),
11537               Assembler::ASR,
11538               $src3$$constant & 0x3f);
11539   %}
11540 
11541   ins_pipe(ialu_reg_reg_shift);
11542 %}
11543 
11544 // This pattern is automatically generated from aarch64_ad.m4
11545 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11546 // val | (-1 ^ (val ror shift)) ==> ornw
11547 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11548                          iRegIorL2I src1, iRegIorL2I src2,
11549                          immI src3, immI_M1 src4) %{
11550   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11551   ins_cost(1.9 * INSN_COST);
11552   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11553 
11554   ins_encode %{
11555     __ ornw(as_Register($dst$$reg),
11556               as_Register($src1$$reg),
11557               as_Register($src2$$reg),
11558               Assembler::ROR,
11559               $src3$$constant & 0x1f);
11560   %}
11561 
11562   ins_pipe(ialu_reg_reg_shift);
11563 %}
11564 
11565 // This pattern is automatically generated from aarch64_ad.m4
11566 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11567 // val | (-1 ^ (val ror shift)) ==> orn
11568 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11569                          iRegL src1, iRegL src2,
11570                          immI src3, immL_M1 src4) %{
11571   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11572   ins_cost(1.9 * INSN_COST);
11573   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11574 
11575   ins_encode %{
11576     __ orn(as_Register($dst$$reg),
11577               as_Register($src1$$reg),
11578               as_Register($src2$$reg),
11579               Assembler::ROR,
11580               $src3$$constant & 0x3f);
11581   %}
11582 
11583   ins_pipe(ialu_reg_reg_shift);
11584 %}
11585 
11586 // This pattern is automatically generated from aarch64_ad.m4
11587 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11588 // val | (-1 ^ (val << shift)) ==> ornw
11589 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11590                          iRegIorL2I src1, iRegIorL2I src2,
11591                          immI src3, immI_M1 src4) %{
11592   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11593   ins_cost(1.9 * INSN_COST);
11594   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11595 
11596   ins_encode %{
11597     __ ornw(as_Register($dst$$reg),
11598               as_Register($src1$$reg),
11599               as_Register($src2$$reg),
11600               Assembler::LSL,
11601               $src3$$constant & 0x1f);
11602   %}
11603 
11604   ins_pipe(ialu_reg_reg_shift);
11605 %}
11606 
11607 // This pattern is automatically generated from aarch64_ad.m4
11608 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11609 // val | (-1 ^ (val << shift)) ==> orn
11610 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11611                          iRegL src1, iRegL src2,
11612                          immI src3, immL_M1 src4) %{
11613   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11614   ins_cost(1.9 * INSN_COST);
11615   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11616 
11617   ins_encode %{
11618     __ orn(as_Register($dst$$reg),
11619               as_Register($src1$$reg),
11620               as_Register($src2$$reg),
11621               Assembler::LSL,
11622               $src3$$constant & 0x3f);
11623   %}
11624 
11625   ins_pipe(ialu_reg_reg_shift);
11626 %}
11627 
11628 // This pattern is automatically generated from aarch64_ad.m4
11629 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11630 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11631                          iRegIorL2I src1, iRegIorL2I src2,
11632                          immI src3) %{
11633   match(Set dst (AndI src1 (URShiftI src2 src3)));
11634 
11635   ins_cost(1.9 * INSN_COST);
11636   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11637 
11638   ins_encode %{
11639     __ andw(as_Register($dst$$reg),
11640               as_Register($src1$$reg),
11641               as_Register($src2$$reg),
11642               Assembler::LSR,
11643               $src3$$constant & 0x1f);
11644   %}
11645 
11646   ins_pipe(ialu_reg_reg_shift);
11647 %}
11648 
11649 // This pattern is automatically generated from aarch64_ad.m4
11650 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11651 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11652                          iRegL src1, iRegL src2,
11653                          immI src3) %{
11654   match(Set dst (AndL src1 (URShiftL src2 src3)));
11655 
11656   ins_cost(1.9 * INSN_COST);
11657   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11658 
11659   ins_encode %{
11660     __ andr(as_Register($dst$$reg),
11661               as_Register($src1$$reg),
11662               as_Register($src2$$reg),
11663               Assembler::LSR,
11664               $src3$$constant & 0x3f);
11665   %}
11666 
11667   ins_pipe(ialu_reg_reg_shift);
11668 %}
11669 
11670 // This pattern is automatically generated from aarch64_ad.m4
11671 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11672 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11673                          iRegIorL2I src1, iRegIorL2I src2,
11674                          immI src3) %{
11675   match(Set dst (AndI src1 (RShiftI src2 src3)));
11676 
11677   ins_cost(1.9 * INSN_COST);
11678   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11679 
11680   ins_encode %{
11681     __ andw(as_Register($dst$$reg),
11682               as_Register($src1$$reg),
11683               as_Register($src2$$reg),
11684               Assembler::ASR,
11685               $src3$$constant & 0x1f);
11686   %}
11687 
11688   ins_pipe(ialu_reg_reg_shift);
11689 %}
11690 
11691 // This pattern is automatically generated from aarch64_ad.m4
11692 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11693 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11694                          iRegL src1, iRegL src2,
11695                          immI src3) %{
11696   match(Set dst (AndL src1 (RShiftL src2 src3)));
11697 
11698   ins_cost(1.9 * INSN_COST);
11699   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11700 
11701   ins_encode %{
11702     __ andr(as_Register($dst$$reg),
11703               as_Register($src1$$reg),
11704               as_Register($src2$$reg),
11705               Assembler::ASR,
11706               $src3$$constant & 0x3f);
11707   %}
11708 
11709   ins_pipe(ialu_reg_reg_shift);
11710 %}
11711 
11712 // This pattern is automatically generated from aarch64_ad.m4
11713 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11714 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11715                          iRegIorL2I src1, iRegIorL2I src2,
11716                          immI src3) %{
11717   match(Set dst (AndI src1 (LShiftI src2 src3)));
11718 
11719   ins_cost(1.9 * INSN_COST);
11720   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11721 
11722   ins_encode %{
11723     __ andw(as_Register($dst$$reg),
11724               as_Register($src1$$reg),
11725               as_Register($src2$$reg),
11726               Assembler::LSL,
11727               $src3$$constant & 0x1f);
11728   %}
11729 
11730   ins_pipe(ialu_reg_reg_shift);
11731 %}
11732 
11733 // This pattern is automatically generated from aarch64_ad.m4
11734 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11735 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11736                          iRegL src1, iRegL src2,
11737                          immI src3) %{
11738   match(Set dst (AndL src1 (LShiftL src2 src3)));
11739 
11740   ins_cost(1.9 * INSN_COST);
11741   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11742 
11743   ins_encode %{
11744     __ andr(as_Register($dst$$reg),
11745               as_Register($src1$$reg),
11746               as_Register($src2$$reg),
11747               Assembler::LSL,
11748               $src3$$constant & 0x3f);
11749   %}
11750 
11751   ins_pipe(ialu_reg_reg_shift);
11752 %}
11753 
11754 // This pattern is automatically generated from aarch64_ad.m4
11755 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11756 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
11757                          iRegIorL2I src1, iRegIorL2I src2,
11758                          immI src3) %{
11759   match(Set dst (AndI src1 (RotateRight src2 src3)));
11760 
11761   ins_cost(1.9 * INSN_COST);
11762   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
11763 
11764   ins_encode %{
11765     __ andw(as_Register($dst$$reg),
11766               as_Register($src1$$reg),
11767               as_Register($src2$$reg),
11768               Assembler::ROR,
11769               $src3$$constant & 0x1f);
11770   %}
11771 
11772   ins_pipe(ialu_reg_reg_shift);
11773 %}
11774 
11775 // This pattern is automatically generated from aarch64_ad.m4
11776 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11777 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
11778                          iRegL src1, iRegL src2,
11779                          immI src3) %{
11780   match(Set dst (AndL src1 (RotateRight src2 src3)));
11781 
11782   ins_cost(1.9 * INSN_COST);
11783   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
11784 
11785   ins_encode %{
11786     __ andr(as_Register($dst$$reg),
11787               as_Register($src1$$reg),
11788               as_Register($src2$$reg),
11789               Assembler::ROR,
11790               $src3$$constant & 0x3f);
11791   %}
11792 
11793   ins_pipe(ialu_reg_reg_shift);
11794 %}
11795 
11796 // This pattern is automatically generated from aarch64_ad.m4
11797 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11798 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11799                          iRegIorL2I src1, iRegIorL2I src2,
11800                          immI src3) %{
11801   match(Set dst (XorI src1 (URShiftI src2 src3)));
11802 
11803   ins_cost(1.9 * INSN_COST);
11804   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11805 
11806   ins_encode %{
11807     __ eorw(as_Register($dst$$reg),
11808               as_Register($src1$$reg),
11809               as_Register($src2$$reg),
11810               Assembler::LSR,
11811               $src3$$constant & 0x1f);
11812   %}
11813 
11814   ins_pipe(ialu_reg_reg_shift);
11815 %}
11816 
11817 // This pattern is automatically generated from aarch64_ad.m4
11818 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11819 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11820                          iRegL src1, iRegL src2,
11821                          immI src3) %{
11822   match(Set dst (XorL src1 (URShiftL src2 src3)));
11823 
11824   ins_cost(1.9 * INSN_COST);
11825   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11826 
11827   ins_encode %{
11828     __ eor(as_Register($dst$$reg),
11829               as_Register($src1$$reg),
11830               as_Register($src2$$reg),
11831               Assembler::LSR,
11832               $src3$$constant & 0x3f);
11833   %}
11834 
11835   ins_pipe(ialu_reg_reg_shift);
11836 %}
11837 
11838 // This pattern is automatically generated from aarch64_ad.m4
11839 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11840 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11841                          iRegIorL2I src1, iRegIorL2I src2,
11842                          immI src3) %{
11843   match(Set dst (XorI src1 (RShiftI src2 src3)));
11844 
11845   ins_cost(1.9 * INSN_COST);
11846   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11847 
11848   ins_encode %{
11849     __ eorw(as_Register($dst$$reg),
11850               as_Register($src1$$reg),
11851               as_Register($src2$$reg),
11852               Assembler::ASR,
11853               $src3$$constant & 0x1f);
11854   %}
11855 
11856   ins_pipe(ialu_reg_reg_shift);
11857 %}
11858 
11859 // This pattern is automatically generated from aarch64_ad.m4
11860 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11861 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11862                          iRegL src1, iRegL src2,
11863                          immI src3) %{
11864   match(Set dst (XorL src1 (RShiftL src2 src3)));
11865 
11866   ins_cost(1.9 * INSN_COST);
11867   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11868 
11869   ins_encode %{
11870     __ eor(as_Register($dst$$reg),
11871               as_Register($src1$$reg),
11872               as_Register($src2$$reg),
11873               Assembler::ASR,
11874               $src3$$constant & 0x3f);
11875   %}
11876 
11877   ins_pipe(ialu_reg_reg_shift);
11878 %}
11879 
11880 // This pattern is automatically generated from aarch64_ad.m4
11881 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11882 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11883                          iRegIorL2I src1, iRegIorL2I src2,
11884                          immI src3) %{
11885   match(Set dst (XorI src1 (LShiftI src2 src3)));
11886 
11887   ins_cost(1.9 * INSN_COST);
11888   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11889 
11890   ins_encode %{
11891     __ eorw(as_Register($dst$$reg),
11892               as_Register($src1$$reg),
11893               as_Register($src2$$reg),
11894               Assembler::LSL,
11895               $src3$$constant & 0x1f);
11896   %}
11897 
11898   ins_pipe(ialu_reg_reg_shift);
11899 %}
11900 
11901 // This pattern is automatically generated from aarch64_ad.m4
11902 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11903 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11904                          iRegL src1, iRegL src2,
11905                          immI src3) %{
11906   match(Set dst (XorL src1 (LShiftL src2 src3)));
11907 
11908   ins_cost(1.9 * INSN_COST);
11909   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11910 
11911   ins_encode %{
11912     __ eor(as_Register($dst$$reg),
11913               as_Register($src1$$reg),
11914               as_Register($src2$$reg),
11915               Assembler::LSL,
11916               $src3$$constant & 0x3f);
11917   %}
11918 
11919   ins_pipe(ialu_reg_reg_shift);
11920 %}
11921 
11922 // This pattern is automatically generated from aarch64_ad.m4
11923 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11924 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
11925                          iRegIorL2I src1, iRegIorL2I src2,
11926                          immI src3) %{
11927   match(Set dst (XorI src1 (RotateRight src2 src3)));
11928 
11929   ins_cost(1.9 * INSN_COST);
11930   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
11931 
11932   ins_encode %{
11933     __ eorw(as_Register($dst$$reg),
11934               as_Register($src1$$reg),
11935               as_Register($src2$$reg),
11936               Assembler::ROR,
11937               $src3$$constant & 0x1f);
11938   %}
11939 
11940   ins_pipe(ialu_reg_reg_shift);
11941 %}
11942 
11943 // This pattern is automatically generated from aarch64_ad.m4
11944 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11945 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
11946                          iRegL src1, iRegL src2,
11947                          immI src3) %{
11948   match(Set dst (XorL src1 (RotateRight src2 src3)));
11949 
11950   ins_cost(1.9 * INSN_COST);
11951   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
11952 
11953   ins_encode %{
11954     __ eor(as_Register($dst$$reg),
11955               as_Register($src1$$reg),
11956               as_Register($src2$$reg),
11957               Assembler::ROR,
11958               $src3$$constant & 0x3f);
11959   %}
11960 
11961   ins_pipe(ialu_reg_reg_shift);
11962 %}
11963 
11964 // This pattern is automatically generated from aarch64_ad.m4
11965 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11966 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11967                          iRegIorL2I src1, iRegIorL2I src2,
11968                          immI src3) %{
11969   match(Set dst (OrI src1 (URShiftI src2 src3)));
11970 
11971   ins_cost(1.9 * INSN_COST);
11972   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11973 
11974   ins_encode %{
11975     __ orrw(as_Register($dst$$reg),
11976               as_Register($src1$$reg),
11977               as_Register($src2$$reg),
11978               Assembler::LSR,
11979               $src3$$constant & 0x1f);
11980   %}
11981 
11982   ins_pipe(ialu_reg_reg_shift);
11983 %}
11984 
11985 // This pattern is automatically generated from aarch64_ad.m4
11986 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11987 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11988                          iRegL src1, iRegL src2,
11989                          immI src3) %{
11990   match(Set dst (OrL src1 (URShiftL src2 src3)));
11991 
11992   ins_cost(1.9 * INSN_COST);
11993   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11994 
11995   ins_encode %{
11996     __ orr(as_Register($dst$$reg),
11997               as_Register($src1$$reg),
11998               as_Register($src2$$reg),
11999               Assembler::LSR,
12000               $src3$$constant & 0x3f);
12001   %}
12002 
12003   ins_pipe(ialu_reg_reg_shift);
12004 %}
12005 
12006 // This pattern is automatically generated from aarch64_ad.m4
12007 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12008 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12009                          iRegIorL2I src1, iRegIorL2I src2,
12010                          immI src3) %{
12011   match(Set dst (OrI src1 (RShiftI src2 src3)));
12012 
12013   ins_cost(1.9 * INSN_COST);
12014   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12015 
12016   ins_encode %{
12017     __ orrw(as_Register($dst$$reg),
12018               as_Register($src1$$reg),
12019               as_Register($src2$$reg),
12020               Assembler::ASR,
12021               $src3$$constant & 0x1f);
12022   %}
12023 
12024   ins_pipe(ialu_reg_reg_shift);
12025 %}
12026 
12027 // This pattern is automatically generated from aarch64_ad.m4
12028 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12029 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12030                          iRegL src1, iRegL src2,
12031                          immI src3) %{
12032   match(Set dst (OrL src1 (RShiftL src2 src3)));
12033 
12034   ins_cost(1.9 * INSN_COST);
12035   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12036 
12037   ins_encode %{
12038     __ orr(as_Register($dst$$reg),
12039               as_Register($src1$$reg),
12040               as_Register($src2$$reg),
12041               Assembler::ASR,
12042               $src3$$constant & 0x3f);
12043   %}
12044 
12045   ins_pipe(ialu_reg_reg_shift);
12046 %}
12047 
12048 // This pattern is automatically generated from aarch64_ad.m4
12049 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12050 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12051                          iRegIorL2I src1, iRegIorL2I src2,
12052                          immI src3) %{
12053   match(Set dst (OrI src1 (LShiftI src2 src3)));
12054 
12055   ins_cost(1.9 * INSN_COST);
12056   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12057 
12058   ins_encode %{
12059     __ orrw(as_Register($dst$$reg),
12060               as_Register($src1$$reg),
12061               as_Register($src2$$reg),
12062               Assembler::LSL,
12063               $src3$$constant & 0x1f);
12064   %}
12065 
12066   ins_pipe(ialu_reg_reg_shift);
12067 %}
12068 
12069 // This pattern is automatically generated from aarch64_ad.m4
12070 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12071 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12072                          iRegL src1, iRegL src2,
12073                          immI src3) %{
12074   match(Set dst (OrL src1 (LShiftL src2 src3)));
12075 
12076   ins_cost(1.9 * INSN_COST);
12077   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12078 
12079   ins_encode %{
12080     __ orr(as_Register($dst$$reg),
12081               as_Register($src1$$reg),
12082               as_Register($src2$$reg),
12083               Assembler::LSL,
12084               $src3$$constant & 0x3f);
12085   %}
12086 
12087   ins_pipe(ialu_reg_reg_shift);
12088 %}
12089 
12090 // This pattern is automatically generated from aarch64_ad.m4
12091 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12092 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
12093                          iRegIorL2I src1, iRegIorL2I src2,
12094                          immI src3) %{
12095   match(Set dst (OrI src1 (RotateRight src2 src3)));
12096 
12097   ins_cost(1.9 * INSN_COST);
12098   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
12099 
12100   ins_encode %{
12101     __ orrw(as_Register($dst$$reg),
12102               as_Register($src1$$reg),
12103               as_Register($src2$$reg),
12104               Assembler::ROR,
12105               $src3$$constant & 0x1f);
12106   %}
12107 
12108   ins_pipe(ialu_reg_reg_shift);
12109 %}
12110 
12111 // This pattern is automatically generated from aarch64_ad.m4
12112 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12113 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
12114                          iRegL src1, iRegL src2,
12115                          immI src3) %{
12116   match(Set dst (OrL src1 (RotateRight src2 src3)));
12117 
12118   ins_cost(1.9 * INSN_COST);
12119   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
12120 
12121   ins_encode %{
12122     __ orr(as_Register($dst$$reg),
12123               as_Register($src1$$reg),
12124               as_Register($src2$$reg),
12125               Assembler::ROR,
12126               $src3$$constant & 0x3f);
12127   %}
12128 
12129   ins_pipe(ialu_reg_reg_shift);
12130 %}
12131 
12132 // This pattern is automatically generated from aarch64_ad.m4
12133 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12134 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12135                          iRegIorL2I src1, iRegIorL2I src2,
12136                          immI src3) %{
12137   match(Set dst (AddI src1 (URShiftI src2 src3)));
12138 
12139   ins_cost(1.9 * INSN_COST);
12140   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12141 
12142   ins_encode %{
12143     __ addw(as_Register($dst$$reg),
12144               as_Register($src1$$reg),
12145               as_Register($src2$$reg),
12146               Assembler::LSR,
12147               $src3$$constant & 0x1f);
12148   %}
12149 
12150   ins_pipe(ialu_reg_reg_shift);
12151 %}
12152 
12153 // This pattern is automatically generated from aarch64_ad.m4
12154 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12155 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12156                          iRegL src1, iRegL src2,
12157                          immI src3) %{
12158   match(Set dst (AddL src1 (URShiftL src2 src3)));
12159 
12160   ins_cost(1.9 * INSN_COST);
12161   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12162 
12163   ins_encode %{
12164     __ add(as_Register($dst$$reg),
12165               as_Register($src1$$reg),
12166               as_Register($src2$$reg),
12167               Assembler::LSR,
12168               $src3$$constant & 0x3f);
12169   %}
12170 
12171   ins_pipe(ialu_reg_reg_shift);
12172 %}
12173 
12174 // This pattern is automatically generated from aarch64_ad.m4
12175 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12176 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12177                          iRegIorL2I src1, iRegIorL2I src2,
12178                          immI src3) %{
12179   match(Set dst (AddI src1 (RShiftI src2 src3)));
12180 
12181   ins_cost(1.9 * INSN_COST);
12182   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12183 
12184   ins_encode %{
12185     __ addw(as_Register($dst$$reg),
12186               as_Register($src1$$reg),
12187               as_Register($src2$$reg),
12188               Assembler::ASR,
12189               $src3$$constant & 0x1f);
12190   %}
12191 
12192   ins_pipe(ialu_reg_reg_shift);
12193 %}
12194 
12195 // This pattern is automatically generated from aarch64_ad.m4
12196 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12197 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12198                          iRegL src1, iRegL src2,
12199                          immI src3) %{
12200   match(Set dst (AddL src1 (RShiftL src2 src3)));
12201 
12202   ins_cost(1.9 * INSN_COST);
12203   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12204 
12205   ins_encode %{
12206     __ add(as_Register($dst$$reg),
12207               as_Register($src1$$reg),
12208               as_Register($src2$$reg),
12209               Assembler::ASR,
12210               $src3$$constant & 0x3f);
12211   %}
12212 
12213   ins_pipe(ialu_reg_reg_shift);
12214 %}
12215 
12216 // This pattern is automatically generated from aarch64_ad.m4
12217 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12218 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12219                          iRegIorL2I src1, iRegIorL2I src2,
12220                          immI src3) %{
12221   match(Set dst (AddI src1 (LShiftI src2 src3)));
12222 
12223   ins_cost(1.9 * INSN_COST);
12224   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12225 
12226   ins_encode %{
12227     __ addw(as_Register($dst$$reg),
12228               as_Register($src1$$reg),
12229               as_Register($src2$$reg),
12230               Assembler::LSL,
12231               $src3$$constant & 0x1f);
12232   %}
12233 
12234   ins_pipe(ialu_reg_reg_shift);
12235 %}
12236 
12237 // This pattern is automatically generated from aarch64_ad.m4
12238 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12239 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12240                          iRegL src1, iRegL src2,
12241                          immI src3) %{
12242   match(Set dst (AddL src1 (LShiftL src2 src3)));
12243 
12244   ins_cost(1.9 * INSN_COST);
12245   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12246 
12247   ins_encode %{
12248     __ add(as_Register($dst$$reg),
12249               as_Register($src1$$reg),
12250               as_Register($src2$$reg),
12251               Assembler::LSL,
12252               $src3$$constant & 0x3f);
12253   %}
12254 
12255   ins_pipe(ialu_reg_reg_shift);
12256 %}
12257 
12258 // This pattern is automatically generated from aarch64_ad.m4
12259 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12260 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12261                          iRegIorL2I src1, iRegIorL2I src2,
12262                          immI src3) %{
12263   match(Set dst (SubI src1 (URShiftI src2 src3)));
12264 
12265   ins_cost(1.9 * INSN_COST);
12266   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12267 
12268   ins_encode %{
12269     __ subw(as_Register($dst$$reg),
12270               as_Register($src1$$reg),
12271               as_Register($src2$$reg),
12272               Assembler::LSR,
12273               $src3$$constant & 0x1f);
12274   %}
12275 
12276   ins_pipe(ialu_reg_reg_shift);
12277 %}
12278 
12279 // This pattern is automatically generated from aarch64_ad.m4
12280 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12281 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12282                          iRegL src1, iRegL src2,
12283                          immI src3) %{
12284   match(Set dst (SubL src1 (URShiftL src2 src3)));
12285 
12286   ins_cost(1.9 * INSN_COST);
12287   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12288 
12289   ins_encode %{
12290     __ sub(as_Register($dst$$reg),
12291               as_Register($src1$$reg),
12292               as_Register($src2$$reg),
12293               Assembler::LSR,
12294               $src3$$constant & 0x3f);
12295   %}
12296 
12297   ins_pipe(ialu_reg_reg_shift);
12298 %}
12299 
12300 // This pattern is automatically generated from aarch64_ad.m4
12301 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12302 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12303                          iRegIorL2I src1, iRegIorL2I src2,
12304                          immI src3) %{
12305   match(Set dst (SubI src1 (RShiftI src2 src3)));
12306 
12307   ins_cost(1.9 * INSN_COST);
12308   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12309 
12310   ins_encode %{
12311     __ subw(as_Register($dst$$reg),
12312               as_Register($src1$$reg),
12313               as_Register($src2$$reg),
12314               Assembler::ASR,
12315               $src3$$constant & 0x1f);
12316   %}
12317 
12318   ins_pipe(ialu_reg_reg_shift);
12319 %}
12320 
12321 // This pattern is automatically generated from aarch64_ad.m4
12322 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12323 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12324                          iRegL src1, iRegL src2,
12325                          immI src3) %{
12326   match(Set dst (SubL src1 (RShiftL src2 src3)));
12327 
12328   ins_cost(1.9 * INSN_COST);
12329   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12330 
12331   ins_encode %{
12332     __ sub(as_Register($dst$$reg),
12333               as_Register($src1$$reg),
12334               as_Register($src2$$reg),
12335               Assembler::ASR,
12336               $src3$$constant & 0x3f);
12337   %}
12338 
12339   ins_pipe(ialu_reg_reg_shift);
12340 %}
12341 
12342 // This pattern is automatically generated from aarch64_ad.m4
12343 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12344 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12345                          iRegIorL2I src1, iRegIorL2I src2,
12346                          immI src3) %{
12347   match(Set dst (SubI src1 (LShiftI src2 src3)));
12348 
12349   ins_cost(1.9 * INSN_COST);
12350   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12351 
12352   ins_encode %{
12353     __ subw(as_Register($dst$$reg),
12354               as_Register($src1$$reg),
12355               as_Register($src2$$reg),
12356               Assembler::LSL,
12357               $src3$$constant & 0x1f);
12358   %}
12359 
12360   ins_pipe(ialu_reg_reg_shift);
12361 %}
12362 
12363 // This pattern is automatically generated from aarch64_ad.m4
12364 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12365 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12366                          iRegL src1, iRegL src2,
12367                          immI src3) %{
12368   match(Set dst (SubL src1 (LShiftL src2 src3)));
12369 
12370   ins_cost(1.9 * INSN_COST);
12371   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12372 
12373   ins_encode %{
12374     __ sub(as_Register($dst$$reg),
12375               as_Register($src1$$reg),
12376               as_Register($src2$$reg),
12377               Assembler::LSL,
12378               $src3$$constant & 0x3f);
12379   %}
12380 
12381   ins_pipe(ialu_reg_reg_shift);
12382 %}
12383 
12384 // This pattern is automatically generated from aarch64_ad.m4
12385 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12386 
12387 // Shift Left followed by Shift Right.
12388 // This idiom is used by the compiler for the i2b bytecode etc.
12389 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12390 %{
12391   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12392   ins_cost(INSN_COST * 2);
12393   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12394   ins_encode %{
12395     int lshift = $lshift_count$$constant & 63;
12396     int rshift = $rshift_count$$constant & 63;
12397     int s = 63 - lshift;
12398     int r = (rshift - lshift) & 63;
12399     __ sbfm(as_Register($dst$$reg),
12400             as_Register($src$$reg),
12401             r, s);
12402   %}
12403 
12404   ins_pipe(ialu_reg_shift);
12405 %}
12406 
12407 // This pattern is automatically generated from aarch64_ad.m4
12408 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12409 
12410 // Shift Left followed by Shift Right.
12411 // This idiom is used by the compiler for the i2b bytecode etc.
12412 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12413 %{
12414   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12415   ins_cost(INSN_COST * 2);
12416   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12417   ins_encode %{
12418     int lshift = $lshift_count$$constant & 31;
12419     int rshift = $rshift_count$$constant & 31;
12420     int s = 31 - lshift;
12421     int r = (rshift - lshift) & 31;
12422     __ sbfmw(as_Register($dst$$reg),
12423             as_Register($src$$reg),
12424             r, s);
12425   %}
12426 
12427   ins_pipe(ialu_reg_shift);
12428 %}
12429 
12430 // This pattern is automatically generated from aarch64_ad.m4
12431 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12432 
12433 // Shift Left followed by Shift Right.
12434 // This idiom is used by the compiler for the i2b bytecode etc.
12435 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12436 %{
12437   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12438   ins_cost(INSN_COST * 2);
12439   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12440   ins_encode %{
12441     int lshift = $lshift_count$$constant & 63;
12442     int rshift = $rshift_count$$constant & 63;
12443     int s = 63 - lshift;
12444     int r = (rshift - lshift) & 63;
12445     __ ubfm(as_Register($dst$$reg),
12446             as_Register($src$$reg),
12447             r, s);
12448   %}
12449 
12450   ins_pipe(ialu_reg_shift);
12451 %}
12452 
12453 // This pattern is automatically generated from aarch64_ad.m4
12454 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12455 
12456 // Shift Left followed by Shift Right.
12457 // This idiom is used by the compiler for the i2b bytecode etc.
12458 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12459 %{
12460   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12461   ins_cost(INSN_COST * 2);
12462   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12463   ins_encode %{
12464     int lshift = $lshift_count$$constant & 31;
12465     int rshift = $rshift_count$$constant & 31;
12466     int s = 31 - lshift;
12467     int r = (rshift - lshift) & 31;
12468     __ ubfmw(as_Register($dst$$reg),
12469             as_Register($src$$reg),
12470             r, s);
12471   %}
12472 
12473   ins_pipe(ialu_reg_shift);
12474 %}
12475 
12476 // Bitfield extract with shift & mask
12477 
12478 // This pattern is automatically generated from aarch64_ad.m4
12479 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12480 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12481 %{
12482   match(Set dst (AndI (URShiftI src rshift) mask));
12483   // Make sure we are not going to exceed what ubfxw can do.
12484   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12485 
12486   ins_cost(INSN_COST);
12487   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12488   ins_encode %{
12489     int rshift = $rshift$$constant & 31;
12490     intptr_t mask = $mask$$constant;
12491     int width = exact_log2(mask+1);
12492     __ ubfxw(as_Register($dst$$reg),
12493             as_Register($src$$reg), rshift, width);
12494   %}
12495   ins_pipe(ialu_reg_shift);
12496 %}
12497 
12498 // This pattern is automatically generated from aarch64_ad.m4
12499 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12500 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12501 %{
12502   match(Set dst (AndL (URShiftL src rshift) mask));
12503   // Make sure we are not going to exceed what ubfx can do.
12504   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12505 
12506   ins_cost(INSN_COST);
12507   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12508   ins_encode %{
12509     int rshift = $rshift$$constant & 63;
12510     intptr_t mask = $mask$$constant;
12511     int width = exact_log2_long(mask+1);
12512     __ ubfx(as_Register($dst$$reg),
12513             as_Register($src$$reg), rshift, width);
12514   %}
12515   ins_pipe(ialu_reg_shift);
12516 %}
12517 
12518 
12519 // This pattern is automatically generated from aarch64_ad.m4
12520 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12521 
12522 // We can use ubfx when extending an And with a mask when we know mask
12523 // is positive.  We know that because immI_bitmask guarantees it.
12524 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12525 %{
12526   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12527   // Make sure we are not going to exceed what ubfxw can do.
12528   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12529 
12530   ins_cost(INSN_COST * 2);
12531   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12532   ins_encode %{
12533     int rshift = $rshift$$constant & 31;
12534     intptr_t mask = $mask$$constant;
12535     int width = exact_log2(mask+1);
12536     __ ubfx(as_Register($dst$$reg),
12537             as_Register($src$$reg), rshift, width);
12538   %}
12539   ins_pipe(ialu_reg_shift);
12540 %}
12541 
12542 
12543 // This pattern is automatically generated from aarch64_ad.m4
12544 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12545 
12546 // We can use ubfiz when masking by a positive number and then left shifting the result.
12547 // We know that the mask is positive because immI_bitmask guarantees it.
12548 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12549 %{
12550   match(Set dst (LShiftI (AndI src mask) lshift));
12551   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12552 
12553   ins_cost(INSN_COST);
12554   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12555   ins_encode %{
12556     int lshift = $lshift$$constant & 31;
12557     intptr_t mask = $mask$$constant;
12558     int width = exact_log2(mask+1);
12559     __ ubfizw(as_Register($dst$$reg),
12560           as_Register($src$$reg), lshift, width);
12561   %}
12562   ins_pipe(ialu_reg_shift);
12563 %}
12564 
12565 // This pattern is automatically generated from aarch64_ad.m4
12566 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12567 
12568 // We can use ubfiz when masking by a positive number and then left shifting the result.
12569 // We know that the mask is positive because immL_bitmask guarantees it.
12570 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12571 %{
12572   match(Set dst (LShiftL (AndL src mask) lshift));
12573   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12574 
12575   ins_cost(INSN_COST);
12576   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12577   ins_encode %{
12578     int lshift = $lshift$$constant & 63;
12579     intptr_t mask = $mask$$constant;
12580     int width = exact_log2_long(mask+1);
12581     __ ubfiz(as_Register($dst$$reg),
12582           as_Register($src$$reg), lshift, width);
12583   %}
12584   ins_pipe(ialu_reg_shift);
12585 %}
12586 
12587 // This pattern is automatically generated from aarch64_ad.m4
12588 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12589 
12590 // We can use ubfiz when masking by a positive number and then left shifting the result.
12591 // We know that the mask is positive because immI_bitmask guarantees it.
12592 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12593 %{
12594   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12595   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12596 
12597   ins_cost(INSN_COST);
12598   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12599   ins_encode %{
12600     int lshift = $lshift$$constant & 31;
12601     intptr_t mask = $mask$$constant;
12602     int width = exact_log2(mask+1);
12603     __ ubfizw(as_Register($dst$$reg),
12604           as_Register($src$$reg), lshift, width);
12605   %}
12606   ins_pipe(ialu_reg_shift);
12607 %}
12608 
12609 // This pattern is automatically generated from aarch64_ad.m4
12610 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12611 
12612 // We can use ubfiz when masking by a positive number and then left shifting the result.
12613 // We know that the mask is positive because immL_bitmask guarantees it.
12614 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12615 %{
12616   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12617   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12618 
12619   ins_cost(INSN_COST);
12620   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12621   ins_encode %{
12622     int lshift = $lshift$$constant & 63;
12623     intptr_t mask = $mask$$constant;
12624     int width = exact_log2_long(mask+1);
12625     __ ubfiz(as_Register($dst$$reg),
12626           as_Register($src$$reg), lshift, width);
12627   %}
12628   ins_pipe(ialu_reg_shift);
12629 %}
12630 
12631 
12632 // This pattern is automatically generated from aarch64_ad.m4
12633 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12634 
12635 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12636 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12637 %{
12638   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12639   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12640 
12641   ins_cost(INSN_COST);
12642   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12643   ins_encode %{
12644     int lshift = $lshift$$constant & 63;
12645     intptr_t mask = $mask$$constant;
12646     int width = exact_log2(mask+1);
12647     __ ubfiz(as_Register($dst$$reg),
12648              as_Register($src$$reg), lshift, width);
12649   %}
12650   ins_pipe(ialu_reg_shift);
12651 %}
12652 
12653 // This pattern is automatically generated from aarch64_ad.m4
12654 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12655 
12656 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12657 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12658 %{
12659   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12660   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12661 
12662   ins_cost(INSN_COST);
12663   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12664   ins_encode %{
12665     int lshift = $lshift$$constant & 31;
12666     intptr_t mask = $mask$$constant;
12667     int width = exact_log2(mask+1);
12668     __ ubfiz(as_Register($dst$$reg),
12669              as_Register($src$$reg), lshift, width);
12670   %}
12671   ins_pipe(ialu_reg_shift);
12672 %}
12673 
12674 // This pattern is automatically generated from aarch64_ad.m4
12675 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12676 
12677 // Can skip int2long conversions after AND with small bitmask
12678 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12679 %{
12680   match(Set dst (ConvI2L (AndI src msk)));
12681   ins_cost(INSN_COST);
12682   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12683   ins_encode %{
12684     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12685   %}
12686   ins_pipe(ialu_reg_shift);
12687 %}
12688 
12689 
12690 // Rotations
12691 
12692 // This pattern is automatically generated from aarch64_ad.m4
12693 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12694 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12695 %{
12696   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12697   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12698 
12699   ins_cost(INSN_COST);
12700   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12701 
12702   ins_encode %{
12703     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12704             $rshift$$constant & 63);
12705   %}
12706   ins_pipe(ialu_reg_reg_extr);
12707 %}
12708 
12709 
12710 // This pattern is automatically generated from aarch64_ad.m4
12711 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12712 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12713 %{
12714   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12715   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12716 
12717   ins_cost(INSN_COST);
12718   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12719 
12720   ins_encode %{
12721     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12722             $rshift$$constant & 31);
12723   %}
12724   ins_pipe(ialu_reg_reg_extr);
12725 %}
12726 
12727 
12728 // This pattern is automatically generated from aarch64_ad.m4
12729 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12730 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12731 %{
12732   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12733   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12734 
12735   ins_cost(INSN_COST);
12736   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12737 
12738   ins_encode %{
12739     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12740             $rshift$$constant & 63);
12741   %}
12742   ins_pipe(ialu_reg_reg_extr);
12743 %}
12744 
12745 
12746 // This pattern is automatically generated from aarch64_ad.m4
12747 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12748 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12749 %{
12750   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12751   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12752 
12753   ins_cost(INSN_COST);
12754   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12755 
12756   ins_encode %{
12757     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12758             $rshift$$constant & 31);
12759   %}
12760   ins_pipe(ialu_reg_reg_extr);
12761 %}
12762 
12763 // This pattern is automatically generated from aarch64_ad.m4
12764 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12765 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
12766 %{
12767   match(Set dst (RotateRight src shift));
12768 
12769   ins_cost(INSN_COST);
12770   format %{ "ror    $dst, $src, $shift" %}
12771 
12772   ins_encode %{
12773      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12774                $shift$$constant & 0x1f);
12775   %}
12776   ins_pipe(ialu_reg_reg_vshift);
12777 %}
12778 
12779 // This pattern is automatically generated from aarch64_ad.m4
12780 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12781 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
12782 %{
12783   match(Set dst (RotateRight src shift));
12784 
12785   ins_cost(INSN_COST);
12786   format %{ "ror    $dst, $src, $shift" %}
12787 
12788   ins_encode %{
12789      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12790                $shift$$constant & 0x3f);
12791   %}
12792   ins_pipe(ialu_reg_reg_vshift);
12793 %}
12794 
12795 // This pattern is automatically generated from aarch64_ad.m4
12796 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12797 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12798 %{
12799   match(Set dst (RotateRight src shift));
12800 
12801   ins_cost(INSN_COST);
12802   format %{ "ror    $dst, $src, $shift" %}
12803 
12804   ins_encode %{
12805      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12806   %}
12807   ins_pipe(ialu_reg_reg_vshift);
12808 %}
12809 
12810 // This pattern is automatically generated from aarch64_ad.m4
12811 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12812 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12813 %{
12814   match(Set dst (RotateRight src shift));
12815 
12816   ins_cost(INSN_COST);
12817   format %{ "ror    $dst, $src, $shift" %}
12818 
12819   ins_encode %{
12820      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12821   %}
12822   ins_pipe(ialu_reg_reg_vshift);
12823 %}
12824 
12825 // This pattern is automatically generated from aarch64_ad.m4
12826 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12827 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12828 %{
12829   match(Set dst (RotateLeft src shift));
12830 
12831   ins_cost(INSN_COST);
12832   format %{ "rol    $dst, $src, $shift" %}
12833 
12834   ins_encode %{
12835      __ subw(rscratch1, zr, as_Register($shift$$reg));
12836      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12837   %}
12838   ins_pipe(ialu_reg_reg_vshift);
12839 %}
12840 
12841 // This pattern is automatically generated from aarch64_ad.m4
12842 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12843 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12844 %{
12845   match(Set dst (RotateLeft src shift));
12846 
12847   ins_cost(INSN_COST);
12848   format %{ "rol    $dst, $src, $shift" %}
12849 
12850   ins_encode %{
12851      __ subw(rscratch1, zr, as_Register($shift$$reg));
12852      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12853   %}
12854   ins_pipe(ialu_reg_reg_vshift);
12855 %}
12856 
12857 
12858 // Add/subtract (extended)
12859 
12860 // This pattern is automatically generated from aarch64_ad.m4
12861 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12862 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12863 %{
12864   match(Set dst (AddL src1 (ConvI2L src2)));
12865   ins_cost(INSN_COST);
12866   format %{ "add  $dst, $src1, $src2, sxtw" %}
12867 
12868    ins_encode %{
12869      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12870             as_Register($src2$$reg), ext::sxtw);
12871    %}
12872   ins_pipe(ialu_reg_reg);
12873 %}
12874 
12875 // This pattern is automatically generated from aarch64_ad.m4
12876 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12877 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12878 %{
12879   match(Set dst (SubL src1 (ConvI2L src2)));
12880   ins_cost(INSN_COST);
12881   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12882 
12883    ins_encode %{
12884      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12885             as_Register($src2$$reg), ext::sxtw);
12886    %}
12887   ins_pipe(ialu_reg_reg);
12888 %}
12889 
12890 // This pattern is automatically generated from aarch64_ad.m4
12891 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12892 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12893 %{
12894   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12895   ins_cost(INSN_COST);
12896   format %{ "add  $dst, $src1, $src2, sxth" %}
12897 
12898    ins_encode %{
12899      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12900             as_Register($src2$$reg), ext::sxth);
12901    %}
12902   ins_pipe(ialu_reg_reg);
12903 %}
12904 
12905 // This pattern is automatically generated from aarch64_ad.m4
12906 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12907 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12908 %{
12909   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12910   ins_cost(INSN_COST);
12911   format %{ "add  $dst, $src1, $src2, sxtb" %}
12912 
12913    ins_encode %{
12914      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12915             as_Register($src2$$reg), ext::sxtb);
12916    %}
12917   ins_pipe(ialu_reg_reg);
12918 %}
12919 
12920 // This pattern is automatically generated from aarch64_ad.m4
12921 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12922 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12923 %{
12924   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12925   ins_cost(INSN_COST);
12926   format %{ "add  $dst, $src1, $src2, uxtb" %}
12927 
12928    ins_encode %{
12929      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12930             as_Register($src2$$reg), ext::uxtb);
12931    %}
12932   ins_pipe(ialu_reg_reg);
12933 %}
12934 
12935 // This pattern is automatically generated from aarch64_ad.m4
12936 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12937 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12938 %{
12939   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12940   ins_cost(INSN_COST);
12941   format %{ "add  $dst, $src1, $src2, sxth" %}
12942 
12943    ins_encode %{
12944      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12945             as_Register($src2$$reg), ext::sxth);
12946    %}
12947   ins_pipe(ialu_reg_reg);
12948 %}
12949 
12950 // This pattern is automatically generated from aarch64_ad.m4
12951 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12952 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12953 %{
12954   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12955   ins_cost(INSN_COST);
12956   format %{ "add  $dst, $src1, $src2, sxtw" %}
12957 
12958    ins_encode %{
12959      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12960             as_Register($src2$$reg), ext::sxtw);
12961    %}
12962   ins_pipe(ialu_reg_reg);
12963 %}
12964 
12965 // This pattern is automatically generated from aarch64_ad.m4
12966 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12967 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12968 %{
12969   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12970   ins_cost(INSN_COST);
12971   format %{ "add  $dst, $src1, $src2, sxtb" %}
12972 
12973    ins_encode %{
12974      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12975             as_Register($src2$$reg), ext::sxtb);
12976    %}
12977   ins_pipe(ialu_reg_reg);
12978 %}
12979 
12980 // This pattern is automatically generated from aarch64_ad.m4
12981 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12982 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12983 %{
12984   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12985   ins_cost(INSN_COST);
12986   format %{ "add  $dst, $src1, $src2, uxtb" %}
12987 
12988    ins_encode %{
12989      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12990             as_Register($src2$$reg), ext::uxtb);
12991    %}
12992   ins_pipe(ialu_reg_reg);
12993 %}
12994 
12995 // This pattern is automatically generated from aarch64_ad.m4
12996 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12997 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12998 %{
12999   match(Set dst (AddI src1 (AndI src2 mask)));
13000   ins_cost(INSN_COST);
13001   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13002 
13003    ins_encode %{
13004      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13005             as_Register($src2$$reg), ext::uxtb);
13006    %}
13007   ins_pipe(ialu_reg_reg);
13008 %}
13009 
13010 // This pattern is automatically generated from aarch64_ad.m4
13011 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13012 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13013 %{
13014   match(Set dst (AddI src1 (AndI src2 mask)));
13015   ins_cost(INSN_COST);
13016   format %{ "addw  $dst, $src1, $src2, uxth" %}
13017 
13018    ins_encode %{
13019      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13020             as_Register($src2$$reg), ext::uxth);
13021    %}
13022   ins_pipe(ialu_reg_reg);
13023 %}
13024 
13025 // This pattern is automatically generated from aarch64_ad.m4
13026 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13027 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13028 %{
13029   match(Set dst (AddL src1 (AndL src2 mask)));
13030   ins_cost(INSN_COST);
13031   format %{ "add  $dst, $src1, $src2, uxtb" %}
13032 
13033    ins_encode %{
13034      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13035             as_Register($src2$$reg), ext::uxtb);
13036    %}
13037   ins_pipe(ialu_reg_reg);
13038 %}
13039 
13040 // This pattern is automatically generated from aarch64_ad.m4
13041 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13042 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13043 %{
13044   match(Set dst (AddL src1 (AndL src2 mask)));
13045   ins_cost(INSN_COST);
13046   format %{ "add  $dst, $src1, $src2, uxth" %}
13047 
13048    ins_encode %{
13049      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13050             as_Register($src2$$reg), ext::uxth);
13051    %}
13052   ins_pipe(ialu_reg_reg);
13053 %}
13054 
13055 // This pattern is automatically generated from aarch64_ad.m4
13056 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13057 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13058 %{
13059   match(Set dst (AddL src1 (AndL src2 mask)));
13060   ins_cost(INSN_COST);
13061   format %{ "add  $dst, $src1, $src2, uxtw" %}
13062 
13063    ins_encode %{
13064      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13065             as_Register($src2$$reg), ext::uxtw);
13066    %}
13067   ins_pipe(ialu_reg_reg);
13068 %}
13069 
13070 // This pattern is automatically generated from aarch64_ad.m4
13071 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13072 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13073 %{
13074   match(Set dst (SubI src1 (AndI src2 mask)));
13075   ins_cost(INSN_COST);
13076   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13077 
13078    ins_encode %{
13079      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13080             as_Register($src2$$reg), ext::uxtb);
13081    %}
13082   ins_pipe(ialu_reg_reg);
13083 %}
13084 
13085 // This pattern is automatically generated from aarch64_ad.m4
13086 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13087 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13088 %{
13089   match(Set dst (SubI src1 (AndI src2 mask)));
13090   ins_cost(INSN_COST);
13091   format %{ "subw  $dst, $src1, $src2, uxth" %}
13092 
13093    ins_encode %{
13094      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13095             as_Register($src2$$reg), ext::uxth);
13096    %}
13097   ins_pipe(ialu_reg_reg);
13098 %}
13099 
13100 // This pattern is automatically generated from aarch64_ad.m4
13101 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13102 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13103 %{
13104   match(Set dst (SubL src1 (AndL src2 mask)));
13105   ins_cost(INSN_COST);
13106   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13107 
13108    ins_encode %{
13109      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13110             as_Register($src2$$reg), ext::uxtb);
13111    %}
13112   ins_pipe(ialu_reg_reg);
13113 %}
13114 
13115 // This pattern is automatically generated from aarch64_ad.m4
13116 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13117 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13118 %{
13119   match(Set dst (SubL src1 (AndL src2 mask)));
13120   ins_cost(INSN_COST);
13121   format %{ "sub  $dst, $src1, $src2, uxth" %}
13122 
13123    ins_encode %{
13124      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13125             as_Register($src2$$reg), ext::uxth);
13126    %}
13127   ins_pipe(ialu_reg_reg);
13128 %}
13129 
13130 // This pattern is automatically generated from aarch64_ad.m4
13131 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13132 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13133 %{
13134   match(Set dst (SubL src1 (AndL src2 mask)));
13135   ins_cost(INSN_COST);
13136   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13137 
13138    ins_encode %{
13139      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13140             as_Register($src2$$reg), ext::uxtw);
13141    %}
13142   ins_pipe(ialu_reg_reg);
13143 %}
13144 
13145 
13146 // This pattern is automatically generated from aarch64_ad.m4
13147 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13148 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13149 %{
13150   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13151   ins_cost(1.9 * INSN_COST);
13152   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13153 
13154    ins_encode %{
13155      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13156             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13157    %}
13158   ins_pipe(ialu_reg_reg_shift);
13159 %}
13160 
13161 // This pattern is automatically generated from aarch64_ad.m4
13162 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13163 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13164 %{
13165   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13166   ins_cost(1.9 * INSN_COST);
13167   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13168 
13169    ins_encode %{
13170      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13171             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13172    %}
13173   ins_pipe(ialu_reg_reg_shift);
13174 %}
13175 
13176 // This pattern is automatically generated from aarch64_ad.m4
13177 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13178 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13179 %{
13180   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13181   ins_cost(1.9 * INSN_COST);
13182   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13183 
13184    ins_encode %{
13185      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13186             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13187    %}
13188   ins_pipe(ialu_reg_reg_shift);
13189 %}
13190 
13191 // This pattern is automatically generated from aarch64_ad.m4
13192 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13193 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13194 %{
13195   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13196   ins_cost(1.9 * INSN_COST);
13197   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13198 
13199    ins_encode %{
13200      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13201             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13202    %}
13203   ins_pipe(ialu_reg_reg_shift);
13204 %}
13205 
13206 // This pattern is automatically generated from aarch64_ad.m4
13207 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13208 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13209 %{
13210   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13211   ins_cost(1.9 * INSN_COST);
13212   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13213 
13214    ins_encode %{
13215      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13216             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13217    %}
13218   ins_pipe(ialu_reg_reg_shift);
13219 %}
13220 
13221 // This pattern is automatically generated from aarch64_ad.m4
13222 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13223 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13224 %{
13225   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13226   ins_cost(1.9 * INSN_COST);
13227   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13228 
13229    ins_encode %{
13230      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13231             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13232    %}
13233   ins_pipe(ialu_reg_reg_shift);
13234 %}
13235 
13236 // This pattern is automatically generated from aarch64_ad.m4
13237 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13238 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13239 %{
13240   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13241   ins_cost(1.9 * INSN_COST);
13242   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13243 
13244    ins_encode %{
13245      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13246             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13247    %}
13248   ins_pipe(ialu_reg_reg_shift);
13249 %}
13250 
13251 // This pattern is automatically generated from aarch64_ad.m4
13252 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13253 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13254 %{
13255   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13256   ins_cost(1.9 * INSN_COST);
13257   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13258 
13259    ins_encode %{
13260      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13261             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13262    %}
13263   ins_pipe(ialu_reg_reg_shift);
13264 %}
13265 
13266 // This pattern is automatically generated from aarch64_ad.m4
13267 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13268 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13269 %{
13270   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13271   ins_cost(1.9 * INSN_COST);
13272   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13273 
13274    ins_encode %{
13275      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13276             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13277    %}
13278   ins_pipe(ialu_reg_reg_shift);
13279 %}
13280 
13281 // This pattern is automatically generated from aarch64_ad.m4
13282 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13283 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13284 %{
13285   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13286   ins_cost(1.9 * INSN_COST);
13287   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13288 
13289    ins_encode %{
13290      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13291             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13292    %}
13293   ins_pipe(ialu_reg_reg_shift);
13294 %}
13295 
13296 // This pattern is automatically generated from aarch64_ad.m4
13297 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13298 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13299 %{
13300   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13301   ins_cost(1.9 * INSN_COST);
13302   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13303 
13304    ins_encode %{
13305      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13306             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13307    %}
13308   ins_pipe(ialu_reg_reg_shift);
13309 %}
13310 
13311 // This pattern is automatically generated from aarch64_ad.m4
13312 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13313 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13314 %{
13315   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13316   ins_cost(1.9 * INSN_COST);
13317   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13318 
13319    ins_encode %{
13320      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13321             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13322    %}
13323   ins_pipe(ialu_reg_reg_shift);
13324 %}
13325 
13326 // This pattern is automatically generated from aarch64_ad.m4
13327 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13328 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13329 %{
13330   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13331   ins_cost(1.9 * INSN_COST);
13332   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13333 
13334    ins_encode %{
13335      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13336             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13337    %}
13338   ins_pipe(ialu_reg_reg_shift);
13339 %}
13340 
13341 // This pattern is automatically generated from aarch64_ad.m4
13342 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13343 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13344 %{
13345   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13346   ins_cost(1.9 * INSN_COST);
13347   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13348 
13349    ins_encode %{
13350      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13351             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13352    %}
13353   ins_pipe(ialu_reg_reg_shift);
13354 %}
13355 
13356 // This pattern is automatically generated from aarch64_ad.m4
13357 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13358 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13359 %{
13360   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13361   ins_cost(1.9 * INSN_COST);
13362   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13363 
13364    ins_encode %{
13365      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13366             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13367    %}
13368   ins_pipe(ialu_reg_reg_shift);
13369 %}
13370 
13371 // This pattern is automatically generated from aarch64_ad.m4
13372 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13373 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13374 %{
13375   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13376   ins_cost(1.9 * INSN_COST);
13377   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13378 
13379    ins_encode %{
13380      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13381             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13382    %}
13383   ins_pipe(ialu_reg_reg_shift);
13384 %}
13385 
13386 // This pattern is automatically generated from aarch64_ad.m4
13387 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13388 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13389 %{
13390   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13391   ins_cost(1.9 * INSN_COST);
13392   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13393 
13394    ins_encode %{
13395      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13396             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13397    %}
13398   ins_pipe(ialu_reg_reg_shift);
13399 %}
13400 
13401 // This pattern is automatically generated from aarch64_ad.m4
13402 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13403 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13404 %{
13405   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13406   ins_cost(1.9 * INSN_COST);
13407   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13408 
13409    ins_encode %{
13410      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13411             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13412    %}
13413   ins_pipe(ialu_reg_reg_shift);
13414 %}
13415 
13416 // This pattern is automatically generated from aarch64_ad.m4
13417 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13418 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13419 %{
13420   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13421   ins_cost(1.9 * INSN_COST);
13422   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13423 
13424    ins_encode %{
13425      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13426             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13427    %}
13428   ins_pipe(ialu_reg_reg_shift);
13429 %}
13430 
13431 // This pattern is automatically generated from aarch64_ad.m4
13432 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13433 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13434 %{
13435   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13436   ins_cost(1.9 * INSN_COST);
13437   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13438 
13439    ins_encode %{
13440      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13441             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13442    %}
13443   ins_pipe(ialu_reg_reg_shift);
13444 %}
13445 
13446 // This pattern is automatically generated from aarch64_ad.m4
13447 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13448 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13449 %{
13450   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13451   ins_cost(1.9 * INSN_COST);
13452   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13453 
13454    ins_encode %{
13455      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13456             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13457    %}
13458   ins_pipe(ialu_reg_reg_shift);
13459 %}
13460 
13461 // This pattern is automatically generated from aarch64_ad.m4
13462 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13463 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13464 %{
13465   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13466   ins_cost(1.9 * INSN_COST);
13467   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13468 
13469    ins_encode %{
13470      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13471             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13472    %}
13473   ins_pipe(ialu_reg_reg_shift);
13474 %}
13475 
13476 // This pattern is automatically generated from aarch64_ad.m4
13477 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13478 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13479 %{
13480   effect(DEF dst, USE src1, USE src2, USE cr);
13481   ins_cost(INSN_COST * 2);
13482   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13483 
13484   ins_encode %{
13485     __ cselw($dst$$Register,
13486              $src1$$Register,
13487              $src2$$Register,
13488              Assembler::LT);
13489   %}
13490   ins_pipe(icond_reg_reg);
13491 %}
13492 
13493 // This pattern is automatically generated from aarch64_ad.m4
13494 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13495 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13496 %{
13497   effect(DEF dst, USE src1, USE src2, USE cr);
13498   ins_cost(INSN_COST * 2);
13499   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13500 
13501   ins_encode %{
13502     __ cselw($dst$$Register,
13503              $src1$$Register,
13504              $src2$$Register,
13505              Assembler::GT);
13506   %}
13507   ins_pipe(icond_reg_reg);
13508 %}
13509 
13510 // This pattern is automatically generated from aarch64_ad.m4
13511 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13512 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13513 %{
13514   effect(DEF dst, USE src1, USE cr);
13515   ins_cost(INSN_COST * 2);
13516   format %{ "cselw $dst, $src1, zr lt\t"  %}
13517 
13518   ins_encode %{
13519     __ cselw($dst$$Register,
13520              $src1$$Register,
13521              zr,
13522              Assembler::LT);
13523   %}
13524   ins_pipe(icond_reg);
13525 %}
13526 
13527 // This pattern is automatically generated from aarch64_ad.m4
13528 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13529 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13530 %{
13531   effect(DEF dst, USE src1, USE cr);
13532   ins_cost(INSN_COST * 2);
13533   format %{ "cselw $dst, $src1, zr gt\t"  %}
13534 
13535   ins_encode %{
13536     __ cselw($dst$$Register,
13537              $src1$$Register,
13538              zr,
13539              Assembler::GT);
13540   %}
13541   ins_pipe(icond_reg);
13542 %}
13543 
13544 // This pattern is automatically generated from aarch64_ad.m4
13545 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13546 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13547 %{
13548   effect(DEF dst, USE src1, USE cr);
13549   ins_cost(INSN_COST * 2);
13550   format %{ "csincw $dst, $src1, zr le\t"  %}
13551 
13552   ins_encode %{
13553     __ csincw($dst$$Register,
13554              $src1$$Register,
13555              zr,
13556              Assembler::LE);
13557   %}
13558   ins_pipe(icond_reg);
13559 %}
13560 
13561 // This pattern is automatically generated from aarch64_ad.m4
13562 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13563 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13564 %{
13565   effect(DEF dst, USE src1, USE cr);
13566   ins_cost(INSN_COST * 2);
13567   format %{ "csincw $dst, $src1, zr gt\t"  %}
13568 
13569   ins_encode %{
13570     __ csincw($dst$$Register,
13571              $src1$$Register,
13572              zr,
13573              Assembler::GT);
13574   %}
13575   ins_pipe(icond_reg);
13576 %}
13577 
13578 // This pattern is automatically generated from aarch64_ad.m4
13579 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13580 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13581 %{
13582   effect(DEF dst, USE src1, USE cr);
13583   ins_cost(INSN_COST * 2);
13584   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13585 
13586   ins_encode %{
13587     __ csinvw($dst$$Register,
13588              $src1$$Register,
13589              zr,
13590              Assembler::LT);
13591   %}
13592   ins_pipe(icond_reg);
13593 %}
13594 
13595 // This pattern is automatically generated from aarch64_ad.m4
13596 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13597 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13598 %{
13599   effect(DEF dst, USE src1, USE cr);
13600   ins_cost(INSN_COST * 2);
13601   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13602 
13603   ins_encode %{
13604     __ csinvw($dst$$Register,
13605              $src1$$Register,
13606              zr,
13607              Assembler::GE);
13608   %}
13609   ins_pipe(icond_reg);
13610 %}
13611 
13612 // This pattern is automatically generated from aarch64_ad.m4
13613 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13614 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13615 %{
13616   match(Set dst (MinI src imm));
13617   ins_cost(INSN_COST * 3);
13618   expand %{
13619     rFlagsReg cr;
13620     compI_reg_imm0(cr, src);
13621     cmovI_reg_imm0_lt(dst, src, cr);
13622   %}
13623 %}
13624 
13625 // This pattern is automatically generated from aarch64_ad.m4
13626 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13627 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13628 %{
13629   match(Set dst (MinI imm src));
13630   ins_cost(INSN_COST * 3);
13631   expand %{
13632     rFlagsReg cr;
13633     compI_reg_imm0(cr, src);
13634     cmovI_reg_imm0_lt(dst, src, cr);
13635   %}
13636 %}
13637 
13638 // This pattern is automatically generated from aarch64_ad.m4
13639 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13640 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13641 %{
13642   match(Set dst (MinI src imm));
13643   ins_cost(INSN_COST * 3);
13644   expand %{
13645     rFlagsReg cr;
13646     compI_reg_imm0(cr, src);
13647     cmovI_reg_imm1_le(dst, src, cr);
13648   %}
13649 %}
13650 
13651 // This pattern is automatically generated from aarch64_ad.m4
13652 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13653 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13654 %{
13655   match(Set dst (MinI imm src));
13656   ins_cost(INSN_COST * 3);
13657   expand %{
13658     rFlagsReg cr;
13659     compI_reg_imm0(cr, src);
13660     cmovI_reg_imm1_le(dst, src, cr);
13661   %}
13662 %}
13663 
13664 // This pattern is automatically generated from aarch64_ad.m4
13665 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13666 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13667 %{
13668   match(Set dst (MinI src imm));
13669   ins_cost(INSN_COST * 3);
13670   expand %{
13671     rFlagsReg cr;
13672     compI_reg_imm0(cr, src);
13673     cmovI_reg_immM1_lt(dst, src, cr);
13674   %}
13675 %}
13676 
13677 // This pattern is automatically generated from aarch64_ad.m4
13678 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13679 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13680 %{
13681   match(Set dst (MinI imm src));
13682   ins_cost(INSN_COST * 3);
13683   expand %{
13684     rFlagsReg cr;
13685     compI_reg_imm0(cr, src);
13686     cmovI_reg_immM1_lt(dst, src, cr);
13687   %}
13688 %}
13689 
13690 // This pattern is automatically generated from aarch64_ad.m4
13691 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13692 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13693 %{
13694   match(Set dst (MaxI src imm));
13695   ins_cost(INSN_COST * 3);
13696   expand %{
13697     rFlagsReg cr;
13698     compI_reg_imm0(cr, src);
13699     cmovI_reg_imm0_gt(dst, src, cr);
13700   %}
13701 %}
13702 
13703 // This pattern is automatically generated from aarch64_ad.m4
13704 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13705 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13706 %{
13707   match(Set dst (MaxI imm src));
13708   ins_cost(INSN_COST * 3);
13709   expand %{
13710     rFlagsReg cr;
13711     compI_reg_imm0(cr, src);
13712     cmovI_reg_imm0_gt(dst, src, cr);
13713   %}
13714 %}
13715 
13716 // This pattern is automatically generated from aarch64_ad.m4
13717 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13718 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13719 %{
13720   match(Set dst (MaxI src imm));
13721   ins_cost(INSN_COST * 3);
13722   expand %{
13723     rFlagsReg cr;
13724     compI_reg_imm0(cr, src);
13725     cmovI_reg_imm1_gt(dst, src, cr);
13726   %}
13727 %}
13728 
13729 // This pattern is automatically generated from aarch64_ad.m4
13730 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13731 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13732 %{
13733   match(Set dst (MaxI imm src));
13734   ins_cost(INSN_COST * 3);
13735   expand %{
13736     rFlagsReg cr;
13737     compI_reg_imm0(cr, src);
13738     cmovI_reg_imm1_gt(dst, src, cr);
13739   %}
13740 %}
13741 
13742 // This pattern is automatically generated from aarch64_ad.m4
13743 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13744 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13745 %{
13746   match(Set dst (MaxI src imm));
13747   ins_cost(INSN_COST * 3);
13748   expand %{
13749     rFlagsReg cr;
13750     compI_reg_imm0(cr, src);
13751     cmovI_reg_immM1_ge(dst, src, cr);
13752   %}
13753 %}
13754 
13755 // This pattern is automatically generated from aarch64_ad.m4
13756 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13757 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13758 %{
13759   match(Set dst (MaxI imm src));
13760   ins_cost(INSN_COST * 3);
13761   expand %{
13762     rFlagsReg cr;
13763     compI_reg_imm0(cr, src);
13764     cmovI_reg_immM1_ge(dst, src, cr);
13765   %}
13766 %}
13767 
13768 // This pattern is automatically generated from aarch64_ad.m4
13769 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13770 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
13771 %{
13772   match(Set dst (ReverseI src));
13773   ins_cost(INSN_COST);
13774   format %{ "rbitw  $dst, $src" %}
13775   ins_encode %{
13776     __ rbitw($dst$$Register, $src$$Register);
13777   %}
13778   ins_pipe(ialu_reg);
13779 %}
13780 
13781 // This pattern is automatically generated from aarch64_ad.m4
13782 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13783 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
13784 %{
13785   match(Set dst (ReverseL src));
13786   ins_cost(INSN_COST);
13787   format %{ "rbit  $dst, $src" %}
13788   ins_encode %{
13789     __ rbit($dst$$Register, $src$$Register);
13790   %}
13791   ins_pipe(ialu_reg);
13792 %}
13793 
13794 
13795 // END This section of the file is automatically generated. Do not edit --------------
13796 
13797 
13798 // ============================================================================
13799 // Floating Point Arithmetic Instructions
13800 
13801 instruct addHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13802   match(Set dst (AddHF src1 src2));
13803   format %{ "faddh $dst, $src1, $src2" %}
13804   ins_encode %{
13805     __ faddh($dst$$FloatRegister,
13806              $src1$$FloatRegister,
13807              $src2$$FloatRegister);
13808   %}
13809   ins_pipe(fp_dop_reg_reg_s);
13810 %}
13811 
13812 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13813   match(Set dst (AddF src1 src2));
13814 
13815   ins_cost(INSN_COST * 5);
13816   format %{ "fadds   $dst, $src1, $src2" %}
13817 
13818   ins_encode %{
13819     __ fadds(as_FloatRegister($dst$$reg),
13820              as_FloatRegister($src1$$reg),
13821              as_FloatRegister($src2$$reg));
13822   %}
13823 
13824   ins_pipe(fp_dop_reg_reg_s);
13825 %}
13826 
13827 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13828   match(Set dst (AddD src1 src2));
13829 
13830   ins_cost(INSN_COST * 5);
13831   format %{ "faddd   $dst, $src1, $src2" %}
13832 
13833   ins_encode %{
13834     __ faddd(as_FloatRegister($dst$$reg),
13835              as_FloatRegister($src1$$reg),
13836              as_FloatRegister($src2$$reg));
13837   %}
13838 
13839   ins_pipe(fp_dop_reg_reg_d);
13840 %}
13841 
13842 instruct subHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13843   match(Set dst (SubHF src1 src2));
13844   format %{ "fsubh $dst, $src1, $src2" %}
13845   ins_encode %{
13846     __ fsubh($dst$$FloatRegister,
13847              $src1$$FloatRegister,
13848              $src2$$FloatRegister);
13849   %}
13850   ins_pipe(fp_dop_reg_reg_s);
13851 %}
13852 
13853 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13854   match(Set dst (SubF src1 src2));
13855 
13856   ins_cost(INSN_COST * 5);
13857   format %{ "fsubs   $dst, $src1, $src2" %}
13858 
13859   ins_encode %{
13860     __ fsubs(as_FloatRegister($dst$$reg),
13861              as_FloatRegister($src1$$reg),
13862              as_FloatRegister($src2$$reg));
13863   %}
13864 
13865   ins_pipe(fp_dop_reg_reg_s);
13866 %}
13867 
13868 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13869   match(Set dst (SubD src1 src2));
13870 
13871   ins_cost(INSN_COST * 5);
13872   format %{ "fsubd   $dst, $src1, $src2" %}
13873 
13874   ins_encode %{
13875     __ fsubd(as_FloatRegister($dst$$reg),
13876              as_FloatRegister($src1$$reg),
13877              as_FloatRegister($src2$$reg));
13878   %}
13879 
13880   ins_pipe(fp_dop_reg_reg_d);
13881 %}
13882 
13883 instruct mulHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13884   match(Set dst (MulHF src1 src2));
13885   format %{ "fmulh $dst, $src1, $src2" %}
13886   ins_encode %{
13887     __ fmulh($dst$$FloatRegister,
13888              $src1$$FloatRegister,
13889              $src2$$FloatRegister);
13890   %}
13891   ins_pipe(fp_dop_reg_reg_s);
13892 %}
13893 
13894 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13895   match(Set dst (MulF src1 src2));
13896 
13897   ins_cost(INSN_COST * 6);
13898   format %{ "fmuls   $dst, $src1, $src2" %}
13899 
13900   ins_encode %{
13901     __ fmuls(as_FloatRegister($dst$$reg),
13902              as_FloatRegister($src1$$reg),
13903              as_FloatRegister($src2$$reg));
13904   %}
13905 
13906   ins_pipe(fp_dop_reg_reg_s);
13907 %}
13908 
13909 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13910   match(Set dst (MulD src1 src2));
13911 
13912   ins_cost(INSN_COST * 6);
13913   format %{ "fmuld   $dst, $src1, $src2" %}
13914 
13915   ins_encode %{
13916     __ fmuld(as_FloatRegister($dst$$reg),
13917              as_FloatRegister($src1$$reg),
13918              as_FloatRegister($src2$$reg));
13919   %}
13920 
13921   ins_pipe(fp_dop_reg_reg_d);
13922 %}
13923 
13924 // src1 * src2 + src3 (half-precision float)
13925 instruct maddHF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13926   match(Set dst (FmaHF src3 (Binary src1 src2)));
13927   format %{ "fmaddh $dst, $src1, $src2, $src3" %}
13928   ins_encode %{
13929     assert(UseFMA, "Needs FMA instructions support.");
13930     __ fmaddh($dst$$FloatRegister,
13931               $src1$$FloatRegister,
13932               $src2$$FloatRegister,
13933               $src3$$FloatRegister);
13934   %}
13935   ins_pipe(pipe_class_default);
13936 %}
13937 
13938 // src1 * src2 + src3
13939 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13940   match(Set dst (FmaF src3 (Binary src1 src2)));
13941 
13942   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13943 
13944   ins_encode %{
13945     assert(UseFMA, "Needs FMA instructions support.");
13946     __ fmadds(as_FloatRegister($dst$$reg),
13947              as_FloatRegister($src1$$reg),
13948              as_FloatRegister($src2$$reg),
13949              as_FloatRegister($src3$$reg));
13950   %}
13951 
13952   ins_pipe(pipe_class_default);
13953 %}
13954 
13955 // src1 * src2 + src3
13956 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13957   match(Set dst (FmaD src3 (Binary src1 src2)));
13958 
13959   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13960 
13961   ins_encode %{
13962     assert(UseFMA, "Needs FMA instructions support.");
13963     __ fmaddd(as_FloatRegister($dst$$reg),
13964              as_FloatRegister($src1$$reg),
13965              as_FloatRegister($src2$$reg),
13966              as_FloatRegister($src3$$reg));
13967   %}
13968 
13969   ins_pipe(pipe_class_default);
13970 %}
13971 
13972 // src1 * (-src2) + src3
13973 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13974 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13975   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13976 
13977   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13978 
13979   ins_encode %{
13980     assert(UseFMA, "Needs FMA instructions support.");
13981     __ fmsubs(as_FloatRegister($dst$$reg),
13982               as_FloatRegister($src1$$reg),
13983               as_FloatRegister($src2$$reg),
13984               as_FloatRegister($src3$$reg));
13985   %}
13986 
13987   ins_pipe(pipe_class_default);
13988 %}
13989 
13990 // src1 * (-src2) + src3
13991 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13992 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13993   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13994 
13995   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13996 
13997   ins_encode %{
13998     assert(UseFMA, "Needs FMA instructions support.");
13999     __ fmsubd(as_FloatRegister($dst$$reg),
14000               as_FloatRegister($src1$$reg),
14001               as_FloatRegister($src2$$reg),
14002               as_FloatRegister($src3$$reg));
14003   %}
14004 
14005   ins_pipe(pipe_class_default);
14006 %}
14007 
14008 // src1 * (-src2) - src3
14009 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
14010 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14011   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
14012 
14013   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
14014 
14015   ins_encode %{
14016     assert(UseFMA, "Needs FMA instructions support.");
14017     __ fnmadds(as_FloatRegister($dst$$reg),
14018                as_FloatRegister($src1$$reg),
14019                as_FloatRegister($src2$$reg),
14020                as_FloatRegister($src3$$reg));
14021   %}
14022 
14023   ins_pipe(pipe_class_default);
14024 %}
14025 
14026 // src1 * (-src2) - src3
14027 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
14028 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14029   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
14030 
14031   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
14032 
14033   ins_encode %{
14034     assert(UseFMA, "Needs FMA instructions support.");
14035     __ fnmaddd(as_FloatRegister($dst$$reg),
14036                as_FloatRegister($src1$$reg),
14037                as_FloatRegister($src2$$reg),
14038                as_FloatRegister($src3$$reg));
14039   %}
14040 
14041   ins_pipe(pipe_class_default);
14042 %}
14043 
14044 // src1 * src2 - src3
14045 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
14046   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
14047 
14048   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
14049 
14050   ins_encode %{
14051     assert(UseFMA, "Needs FMA instructions support.");
14052     __ fnmsubs(as_FloatRegister($dst$$reg),
14053                as_FloatRegister($src1$$reg),
14054                as_FloatRegister($src2$$reg),
14055                as_FloatRegister($src3$$reg));
14056   %}
14057 
14058   ins_pipe(pipe_class_default);
14059 %}
14060 
14061 // src1 * src2 - src3
14062 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
14063   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
14064 
14065   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
14066 
14067   ins_encode %{
14068     assert(UseFMA, "Needs FMA instructions support.");
14069     // n.b. insn name should be fnmsubd
14070     __ fnmsub(as_FloatRegister($dst$$reg),
14071               as_FloatRegister($src1$$reg),
14072               as_FloatRegister($src2$$reg),
14073               as_FloatRegister($src3$$reg));
14074   %}
14075 
14076   ins_pipe(pipe_class_default);
14077 %}
14078 
14079 // Math.max(HH)H (half-precision float)
14080 instruct maxHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14081   match(Set dst (MaxHF src1 src2));
14082   format %{ "fmaxh $dst, $src1, $src2" %}
14083   ins_encode %{
14084     __ fmaxh($dst$$FloatRegister,
14085              $src1$$FloatRegister,
14086              $src2$$FloatRegister);
14087   %}
14088   ins_pipe(fp_dop_reg_reg_s);
14089 %}
14090 
14091 // Math.min(HH)H (half-precision float)
14092 instruct minHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14093   match(Set dst (MinHF src1 src2));
14094   format %{ "fminh $dst, $src1, $src2" %}
14095   ins_encode %{
14096     __ fminh($dst$$FloatRegister,
14097              $src1$$FloatRegister,
14098              $src2$$FloatRegister);
14099   %}
14100   ins_pipe(fp_dop_reg_reg_s);
14101 %}
14102 
14103 // Math.max(FF)F
14104 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14105   match(Set dst (MaxF src1 src2));
14106 
14107   format %{ "fmaxs   $dst, $src1, $src2" %}
14108   ins_encode %{
14109     __ fmaxs(as_FloatRegister($dst$$reg),
14110              as_FloatRegister($src1$$reg),
14111              as_FloatRegister($src2$$reg));
14112   %}
14113 
14114   ins_pipe(fp_dop_reg_reg_s);
14115 %}
14116 
14117 // Math.min(FF)F
14118 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14119   match(Set dst (MinF src1 src2));
14120 
14121   format %{ "fmins   $dst, $src1, $src2" %}
14122   ins_encode %{
14123     __ fmins(as_FloatRegister($dst$$reg),
14124              as_FloatRegister($src1$$reg),
14125              as_FloatRegister($src2$$reg));
14126   %}
14127 
14128   ins_pipe(fp_dop_reg_reg_s);
14129 %}
14130 
14131 // Math.max(DD)D
14132 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14133   match(Set dst (MaxD src1 src2));
14134 
14135   format %{ "fmaxd   $dst, $src1, $src2" %}
14136   ins_encode %{
14137     __ fmaxd(as_FloatRegister($dst$$reg),
14138              as_FloatRegister($src1$$reg),
14139              as_FloatRegister($src2$$reg));
14140   %}
14141 
14142   ins_pipe(fp_dop_reg_reg_d);
14143 %}
14144 
14145 // Math.min(DD)D
14146 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14147   match(Set dst (MinD src1 src2));
14148 
14149   format %{ "fmind   $dst, $src1, $src2" %}
14150   ins_encode %{
14151     __ fmind(as_FloatRegister($dst$$reg),
14152              as_FloatRegister($src1$$reg),
14153              as_FloatRegister($src2$$reg));
14154   %}
14155 
14156   ins_pipe(fp_dop_reg_reg_d);
14157 %}
14158 
14159 instruct divHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14160   match(Set dst (DivHF src1  src2));
14161   format %{ "fdivh $dst, $src1, $src2" %}
14162   ins_encode %{
14163     __ fdivh($dst$$FloatRegister,
14164              $src1$$FloatRegister,
14165              $src2$$FloatRegister);
14166   %}
14167   ins_pipe(fp_div_s);
14168 %}
14169 
14170 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14171   match(Set dst (DivF src1  src2));
14172 
14173   ins_cost(INSN_COST * 18);
14174   format %{ "fdivs   $dst, $src1, $src2" %}
14175 
14176   ins_encode %{
14177     __ fdivs(as_FloatRegister($dst$$reg),
14178              as_FloatRegister($src1$$reg),
14179              as_FloatRegister($src2$$reg));
14180   %}
14181 
14182   ins_pipe(fp_div_s);
14183 %}
14184 
14185 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14186   match(Set dst (DivD src1  src2));
14187 
14188   ins_cost(INSN_COST * 32);
14189   format %{ "fdivd   $dst, $src1, $src2" %}
14190 
14191   ins_encode %{
14192     __ fdivd(as_FloatRegister($dst$$reg),
14193              as_FloatRegister($src1$$reg),
14194              as_FloatRegister($src2$$reg));
14195   %}
14196 
14197   ins_pipe(fp_div_d);
14198 %}
14199 
14200 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14201   match(Set dst (NegF src));
14202 
14203   ins_cost(INSN_COST * 3);
14204   format %{ "fneg   $dst, $src" %}
14205 
14206   ins_encode %{
14207     __ fnegs(as_FloatRegister($dst$$reg),
14208              as_FloatRegister($src$$reg));
14209   %}
14210 
14211   ins_pipe(fp_uop_s);
14212 %}
14213 
14214 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14215   match(Set dst (NegD src));
14216 
14217   ins_cost(INSN_COST * 3);
14218   format %{ "fnegd   $dst, $src" %}
14219 
14220   ins_encode %{
14221     __ fnegd(as_FloatRegister($dst$$reg),
14222              as_FloatRegister($src$$reg));
14223   %}
14224 
14225   ins_pipe(fp_uop_d);
14226 %}
14227 
14228 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14229 %{
14230   match(Set dst (AbsI src));
14231 
14232   effect(KILL cr);
14233   ins_cost(INSN_COST * 2);
14234   format %{ "cmpw  $src, zr\n\t"
14235             "cnegw $dst, $src, Assembler::LT\t# int abs"
14236   %}
14237 
14238   ins_encode %{
14239     __ cmpw(as_Register($src$$reg), zr);
14240     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14241   %}
14242   ins_pipe(pipe_class_default);
14243 %}
14244 
14245 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
14246 %{
14247   match(Set dst (AbsL src));
14248 
14249   effect(KILL cr);
14250   ins_cost(INSN_COST * 2);
14251   format %{ "cmp  $src, zr\n\t"
14252             "cneg $dst, $src, Assembler::LT\t# long abs"
14253   %}
14254 
14255   ins_encode %{
14256     __ cmp(as_Register($src$$reg), zr);
14257     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14258   %}
14259   ins_pipe(pipe_class_default);
14260 %}
14261 
14262 instruct absF_reg(vRegF dst, vRegF src) %{
14263   match(Set dst (AbsF src));
14264 
14265   ins_cost(INSN_COST * 3);
14266   format %{ "fabss   $dst, $src" %}
14267   ins_encode %{
14268     __ fabss(as_FloatRegister($dst$$reg),
14269              as_FloatRegister($src$$reg));
14270   %}
14271 
14272   ins_pipe(fp_uop_s);
14273 %}
14274 
14275 instruct absD_reg(vRegD dst, vRegD src) %{
14276   match(Set dst (AbsD src));
14277 
14278   ins_cost(INSN_COST * 3);
14279   format %{ "fabsd   $dst, $src" %}
14280   ins_encode %{
14281     __ fabsd(as_FloatRegister($dst$$reg),
14282              as_FloatRegister($src$$reg));
14283   %}
14284 
14285   ins_pipe(fp_uop_d);
14286 %}
14287 
14288 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14289   match(Set dst (AbsF (SubF src1 src2)));
14290 
14291   ins_cost(INSN_COST * 3);
14292   format %{ "fabds   $dst, $src1, $src2" %}
14293   ins_encode %{
14294     __ fabds(as_FloatRegister($dst$$reg),
14295              as_FloatRegister($src1$$reg),
14296              as_FloatRegister($src2$$reg));
14297   %}
14298 
14299   ins_pipe(fp_uop_s);
14300 %}
14301 
14302 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14303   match(Set dst (AbsD (SubD src1 src2)));
14304 
14305   ins_cost(INSN_COST * 3);
14306   format %{ "fabdd   $dst, $src1, $src2" %}
14307   ins_encode %{
14308     __ fabdd(as_FloatRegister($dst$$reg),
14309              as_FloatRegister($src1$$reg),
14310              as_FloatRegister($src2$$reg));
14311   %}
14312 
14313   ins_pipe(fp_uop_d);
14314 %}
14315 
14316 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14317   match(Set dst (SqrtD src));
14318 
14319   ins_cost(INSN_COST * 50);
14320   format %{ "fsqrtd  $dst, $src" %}
14321   ins_encode %{
14322     __ fsqrtd(as_FloatRegister($dst$$reg),
14323              as_FloatRegister($src$$reg));
14324   %}
14325 
14326   ins_pipe(fp_div_s);
14327 %}
14328 
14329 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14330   match(Set dst (SqrtF src));
14331 
14332   ins_cost(INSN_COST * 50);
14333   format %{ "fsqrts  $dst, $src" %}
14334   ins_encode %{
14335     __ fsqrts(as_FloatRegister($dst$$reg),
14336              as_FloatRegister($src$$reg));
14337   %}
14338 
14339   ins_pipe(fp_div_d);
14340 %}
14341 
14342 instruct sqrtHF_reg(vRegF dst, vRegF src) %{
14343   match(Set dst (SqrtHF src));
14344   format %{ "fsqrth $dst, $src" %}
14345   ins_encode %{
14346     __ fsqrth($dst$$FloatRegister,
14347               $src$$FloatRegister);
14348   %}
14349   ins_pipe(fp_div_s);
14350 %}
14351 
14352 // Math.rint, floor, ceil
14353 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14354   match(Set dst (RoundDoubleMode src rmode));
14355   format %{ "frint  $dst, $src, $rmode" %}
14356   ins_encode %{
14357     switch ($rmode$$constant) {
14358       case RoundDoubleModeNode::rmode_rint:
14359         __ frintnd(as_FloatRegister($dst$$reg),
14360                    as_FloatRegister($src$$reg));
14361         break;
14362       case RoundDoubleModeNode::rmode_floor:
14363         __ frintmd(as_FloatRegister($dst$$reg),
14364                    as_FloatRegister($src$$reg));
14365         break;
14366       case RoundDoubleModeNode::rmode_ceil:
14367         __ frintpd(as_FloatRegister($dst$$reg),
14368                    as_FloatRegister($src$$reg));
14369         break;
14370     }
14371   %}
14372   ins_pipe(fp_uop_d);
14373 %}
14374 
14375 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14376   match(Set dst (CopySignD src1 (Binary src2 zero)));
14377   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14378   format %{ "CopySignD  $dst $src1 $src2" %}
14379   ins_encode %{
14380     FloatRegister dst = as_FloatRegister($dst$$reg),
14381                   src1 = as_FloatRegister($src1$$reg),
14382                   src2 = as_FloatRegister($src2$$reg),
14383                   zero = as_FloatRegister($zero$$reg);
14384     __ fnegd(dst, zero);
14385     __ bsl(dst, __ T8B, src2, src1);
14386   %}
14387   ins_pipe(fp_uop_d);
14388 %}
14389 
14390 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14391   match(Set dst (CopySignF src1 src2));
14392   effect(TEMP_DEF dst, USE src1, USE src2);
14393   format %{ "CopySignF  $dst $src1 $src2" %}
14394   ins_encode %{
14395     FloatRegister dst = as_FloatRegister($dst$$reg),
14396                   src1 = as_FloatRegister($src1$$reg),
14397                   src2 = as_FloatRegister($src2$$reg);
14398     __ movi(dst, __ T2S, 0x80, 24);
14399     __ bsl(dst, __ T8B, src2, src1);
14400   %}
14401   ins_pipe(fp_uop_d);
14402 %}
14403 
14404 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14405   match(Set dst (SignumD src (Binary zero one)));
14406   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14407   format %{ "signumD  $dst, $src" %}
14408   ins_encode %{
14409     FloatRegister src = as_FloatRegister($src$$reg),
14410                   dst = as_FloatRegister($dst$$reg),
14411                   zero = as_FloatRegister($zero$$reg),
14412                   one = as_FloatRegister($one$$reg);
14413     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14414     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14415     // Bit selection instruction gets bit from "one" for each enabled bit in
14416     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14417     // NaN the whole "src" will be copied because "dst" is zero. For all other
14418     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14419     // from "src", and all other bits are copied from 1.0.
14420     __ bsl(dst, __ T8B, one, src);
14421   %}
14422   ins_pipe(fp_uop_d);
14423 %}
14424 
14425 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14426   match(Set dst (SignumF src (Binary zero one)));
14427   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14428   format %{ "signumF  $dst, $src" %}
14429   ins_encode %{
14430     FloatRegister src = as_FloatRegister($src$$reg),
14431                   dst = as_FloatRegister($dst$$reg),
14432                   zero = as_FloatRegister($zero$$reg),
14433                   one = as_FloatRegister($one$$reg);
14434     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14435     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14436     // Bit selection instruction gets bit from "one" for each enabled bit in
14437     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14438     // NaN the whole "src" will be copied because "dst" is zero. For all other
14439     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14440     // from "src", and all other bits are copied from 1.0.
14441     __ bsl(dst, __ T8B, one, src);
14442   %}
14443   ins_pipe(fp_uop_d);
14444 %}
14445 
14446 instruct onspinwait() %{
14447   match(OnSpinWait);
14448   ins_cost(INSN_COST);
14449 
14450   format %{ "onspinwait" %}
14451 
14452   ins_encode %{
14453     __ spin_wait();
14454   %}
14455   ins_pipe(pipe_class_empty);
14456 %}
14457 
14458 // ============================================================================
14459 // Logical Instructions
14460 
14461 // Integer Logical Instructions
14462 
14463 // And Instructions
14464 
14465 
14466 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14467   match(Set dst (AndI src1 src2));
14468 
14469   format %{ "andw  $dst, $src1, $src2\t# int" %}
14470 
14471   ins_cost(INSN_COST);
14472   ins_encode %{
14473     __ andw(as_Register($dst$$reg),
14474             as_Register($src1$$reg),
14475             as_Register($src2$$reg));
14476   %}
14477 
14478   ins_pipe(ialu_reg_reg);
14479 %}
14480 
14481 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14482   match(Set dst (AndI src1 src2));
14483 
14484   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14485 
14486   ins_cost(INSN_COST);
14487   ins_encode %{
14488     __ andw(as_Register($dst$$reg),
14489             as_Register($src1$$reg),
14490             (uint64_t)($src2$$constant));
14491   %}
14492 
14493   ins_pipe(ialu_reg_imm);
14494 %}
14495 
14496 // Or Instructions
14497 
14498 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14499   match(Set dst (OrI src1 src2));
14500 
14501   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14502 
14503   ins_cost(INSN_COST);
14504   ins_encode %{
14505     __ orrw(as_Register($dst$$reg),
14506             as_Register($src1$$reg),
14507             as_Register($src2$$reg));
14508   %}
14509 
14510   ins_pipe(ialu_reg_reg);
14511 %}
14512 
14513 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14514   match(Set dst (OrI src1 src2));
14515 
14516   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14517 
14518   ins_cost(INSN_COST);
14519   ins_encode %{
14520     __ orrw(as_Register($dst$$reg),
14521             as_Register($src1$$reg),
14522             (uint64_t)($src2$$constant));
14523   %}
14524 
14525   ins_pipe(ialu_reg_imm);
14526 %}
14527 
14528 // Xor Instructions
14529 
14530 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14531   match(Set dst (XorI src1 src2));
14532 
14533   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14534 
14535   ins_cost(INSN_COST);
14536   ins_encode %{
14537     __ eorw(as_Register($dst$$reg),
14538             as_Register($src1$$reg),
14539             as_Register($src2$$reg));
14540   %}
14541 
14542   ins_pipe(ialu_reg_reg);
14543 %}
14544 
14545 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14546   match(Set dst (XorI src1 src2));
14547 
14548   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14549 
14550   ins_cost(INSN_COST);
14551   ins_encode %{
14552     __ eorw(as_Register($dst$$reg),
14553             as_Register($src1$$reg),
14554             (uint64_t)($src2$$constant));
14555   %}
14556 
14557   ins_pipe(ialu_reg_imm);
14558 %}
14559 
14560 // Long Logical Instructions
14561 // TODO
14562 
14563 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14564   match(Set dst (AndL src1 src2));
14565 
14566   format %{ "and  $dst, $src1, $src2\t# int" %}
14567 
14568   ins_cost(INSN_COST);
14569   ins_encode %{
14570     __ andr(as_Register($dst$$reg),
14571             as_Register($src1$$reg),
14572             as_Register($src2$$reg));
14573   %}
14574 
14575   ins_pipe(ialu_reg_reg);
14576 %}
14577 
14578 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14579   match(Set dst (AndL src1 src2));
14580 
14581   format %{ "and  $dst, $src1, $src2\t# int" %}
14582 
14583   ins_cost(INSN_COST);
14584   ins_encode %{
14585     __ andr(as_Register($dst$$reg),
14586             as_Register($src1$$reg),
14587             (uint64_t)($src2$$constant));
14588   %}
14589 
14590   ins_pipe(ialu_reg_imm);
14591 %}
14592 
14593 // Or Instructions
14594 
14595 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14596   match(Set dst (OrL src1 src2));
14597 
14598   format %{ "orr  $dst, $src1, $src2\t# int" %}
14599 
14600   ins_cost(INSN_COST);
14601   ins_encode %{
14602     __ orr(as_Register($dst$$reg),
14603            as_Register($src1$$reg),
14604            as_Register($src2$$reg));
14605   %}
14606 
14607   ins_pipe(ialu_reg_reg);
14608 %}
14609 
14610 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14611   match(Set dst (OrL src1 src2));
14612 
14613   format %{ "orr  $dst, $src1, $src2\t# int" %}
14614 
14615   ins_cost(INSN_COST);
14616   ins_encode %{
14617     __ orr(as_Register($dst$$reg),
14618            as_Register($src1$$reg),
14619            (uint64_t)($src2$$constant));
14620   %}
14621 
14622   ins_pipe(ialu_reg_imm);
14623 %}
14624 
14625 // Xor Instructions
14626 
14627 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14628   match(Set dst (XorL src1 src2));
14629 
14630   format %{ "eor  $dst, $src1, $src2\t# int" %}
14631 
14632   ins_cost(INSN_COST);
14633   ins_encode %{
14634     __ eor(as_Register($dst$$reg),
14635            as_Register($src1$$reg),
14636            as_Register($src2$$reg));
14637   %}
14638 
14639   ins_pipe(ialu_reg_reg);
14640 %}
14641 
14642 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14643   match(Set dst (XorL src1 src2));
14644 
14645   ins_cost(INSN_COST);
14646   format %{ "eor  $dst, $src1, $src2\t# int" %}
14647 
14648   ins_encode %{
14649     __ eor(as_Register($dst$$reg),
14650            as_Register($src1$$reg),
14651            (uint64_t)($src2$$constant));
14652   %}
14653 
14654   ins_pipe(ialu_reg_imm);
14655 %}
14656 
14657 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14658 %{
14659   match(Set dst (ConvI2L src));
14660 
14661   ins_cost(INSN_COST);
14662   format %{ "sxtw  $dst, $src\t# i2l" %}
14663   ins_encode %{
14664     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14665   %}
14666   ins_pipe(ialu_reg_shift);
14667 %}
14668 
14669 // this pattern occurs in bigmath arithmetic
14670 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14671 %{
14672   match(Set dst (AndL (ConvI2L src) mask));
14673 
14674   ins_cost(INSN_COST);
14675   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14676   ins_encode %{
14677     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14678   %}
14679 
14680   ins_pipe(ialu_reg_shift);
14681 %}
14682 
14683 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14684   match(Set dst (ConvL2I src));
14685 
14686   ins_cost(INSN_COST);
14687   format %{ "movw  $dst, $src \t// l2i" %}
14688 
14689   ins_encode %{
14690     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14691   %}
14692 
14693   ins_pipe(ialu_reg);
14694 %}
14695 
14696 instruct convD2F_reg(vRegF dst, vRegD src) %{
14697   match(Set dst (ConvD2F src));
14698 
14699   ins_cost(INSN_COST * 5);
14700   format %{ "fcvtd  $dst, $src \t// d2f" %}
14701 
14702   ins_encode %{
14703     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14704   %}
14705 
14706   ins_pipe(fp_d2f);
14707 %}
14708 
14709 instruct convF2D_reg(vRegD dst, vRegF src) %{
14710   match(Set dst (ConvF2D src));
14711 
14712   ins_cost(INSN_COST * 5);
14713   format %{ "fcvts  $dst, $src \t// f2d" %}
14714 
14715   ins_encode %{
14716     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14717   %}
14718 
14719   ins_pipe(fp_f2d);
14720 %}
14721 
14722 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14723   match(Set dst (ConvF2I src));
14724 
14725   ins_cost(INSN_COST * 5);
14726   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14727 
14728   ins_encode %{
14729     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14730   %}
14731 
14732   ins_pipe(fp_f2i);
14733 %}
14734 
14735 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14736   match(Set dst (ConvF2L src));
14737 
14738   ins_cost(INSN_COST * 5);
14739   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14740 
14741   ins_encode %{
14742     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14743   %}
14744 
14745   ins_pipe(fp_f2l);
14746 %}
14747 
14748 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
14749   match(Set dst (ConvF2HF src));
14750   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
14751             "smov $dst, $tmp\t# move result from $tmp to $dst"
14752   %}
14753   effect(TEMP tmp);
14754   ins_encode %{
14755       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
14756   %}
14757   ins_pipe(pipe_slow);
14758 %}
14759 
14760 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
14761   match(Set dst (ConvHF2F src));
14762   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
14763             "fcvt $dst, $tmp\t# convert half to single precision"
14764   %}
14765   effect(TEMP tmp);
14766   ins_encode %{
14767       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
14768   %}
14769   ins_pipe(pipe_slow);
14770 %}
14771 
14772 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14773   match(Set dst (ConvI2F src));
14774 
14775   ins_cost(INSN_COST * 5);
14776   format %{ "scvtfws  $dst, $src \t// i2f" %}
14777 
14778   ins_encode %{
14779     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14780   %}
14781 
14782   ins_pipe(fp_i2f);
14783 %}
14784 
14785 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14786   match(Set dst (ConvL2F src));
14787 
14788   ins_cost(INSN_COST * 5);
14789   format %{ "scvtfs  $dst, $src \t// l2f" %}
14790 
14791   ins_encode %{
14792     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14793   %}
14794 
14795   ins_pipe(fp_l2f);
14796 %}
14797 
14798 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14799   match(Set dst (ConvD2I src));
14800 
14801   ins_cost(INSN_COST * 5);
14802   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14803 
14804   ins_encode %{
14805     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14806   %}
14807 
14808   ins_pipe(fp_d2i);
14809 %}
14810 
14811 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14812   match(Set dst (ConvD2L src));
14813 
14814   ins_cost(INSN_COST * 5);
14815   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14816 
14817   ins_encode %{
14818     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14819   %}
14820 
14821   ins_pipe(fp_d2l);
14822 %}
14823 
14824 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14825   match(Set dst (ConvI2D src));
14826 
14827   ins_cost(INSN_COST * 5);
14828   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14829 
14830   ins_encode %{
14831     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14832   %}
14833 
14834   ins_pipe(fp_i2d);
14835 %}
14836 
14837 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14838   match(Set dst (ConvL2D src));
14839 
14840   ins_cost(INSN_COST * 5);
14841   format %{ "scvtfd  $dst, $src \t// l2d" %}
14842 
14843   ins_encode %{
14844     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14845   %}
14846 
14847   ins_pipe(fp_l2d);
14848 %}
14849 
14850 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
14851 %{
14852   match(Set dst (RoundD src));
14853   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14854   format %{ "java_round_double $dst,$src"%}
14855   ins_encode %{
14856     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
14857                          as_FloatRegister($ftmp$$reg));
14858   %}
14859   ins_pipe(pipe_slow);
14860 %}
14861 
14862 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
14863 %{
14864   match(Set dst (RoundF src));
14865   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14866   format %{ "java_round_float $dst,$src"%}
14867   ins_encode %{
14868     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
14869                         as_FloatRegister($ftmp$$reg));
14870   %}
14871   ins_pipe(pipe_slow);
14872 %}
14873 
14874 // stack <-> reg and reg <-> reg shuffles with no conversion
14875 
14876 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14877 
14878   match(Set dst (MoveF2I src));
14879 
14880   effect(DEF dst, USE src);
14881 
14882   ins_cost(4 * INSN_COST);
14883 
14884   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14885 
14886   ins_encode %{
14887     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14888   %}
14889 
14890   ins_pipe(iload_reg_reg);
14891 
14892 %}
14893 
14894 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14895 
14896   match(Set dst (MoveI2F src));
14897 
14898   effect(DEF dst, USE src);
14899 
14900   ins_cost(4 * INSN_COST);
14901 
14902   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14903 
14904   ins_encode %{
14905     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14906   %}
14907 
14908   ins_pipe(pipe_class_memory);
14909 
14910 %}
14911 
14912 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14913 
14914   match(Set dst (MoveD2L src));
14915 
14916   effect(DEF dst, USE src);
14917 
14918   ins_cost(4 * INSN_COST);
14919 
14920   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14921 
14922   ins_encode %{
14923     __ ldr($dst$$Register, Address(sp, $src$$disp));
14924   %}
14925 
14926   ins_pipe(iload_reg_reg);
14927 
14928 %}
14929 
14930 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14931 
14932   match(Set dst (MoveL2D src));
14933 
14934   effect(DEF dst, USE src);
14935 
14936   ins_cost(4 * INSN_COST);
14937 
14938   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14939 
14940   ins_encode %{
14941     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14942   %}
14943 
14944   ins_pipe(pipe_class_memory);
14945 
14946 %}
14947 
14948 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14949 
14950   match(Set dst (MoveF2I src));
14951 
14952   effect(DEF dst, USE src);
14953 
14954   ins_cost(INSN_COST);
14955 
14956   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14957 
14958   ins_encode %{
14959     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14960   %}
14961 
14962   ins_pipe(pipe_class_memory);
14963 
14964 %}
14965 
14966 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14967 
14968   match(Set dst (MoveI2F src));
14969 
14970   effect(DEF dst, USE src);
14971 
14972   ins_cost(INSN_COST);
14973 
14974   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14975 
14976   ins_encode %{
14977     __ strw($src$$Register, Address(sp, $dst$$disp));
14978   %}
14979 
14980   ins_pipe(istore_reg_reg);
14981 
14982 %}
14983 
14984 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14985 
14986   match(Set dst (MoveD2L src));
14987 
14988   effect(DEF dst, USE src);
14989 
14990   ins_cost(INSN_COST);
14991 
14992   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14993 
14994   ins_encode %{
14995     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14996   %}
14997 
14998   ins_pipe(pipe_class_memory);
14999 
15000 %}
15001 
15002 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
15003 
15004   match(Set dst (MoveL2D src));
15005 
15006   effect(DEF dst, USE src);
15007 
15008   ins_cost(INSN_COST);
15009 
15010   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
15011 
15012   ins_encode %{
15013     __ str($src$$Register, Address(sp, $dst$$disp));
15014   %}
15015 
15016   ins_pipe(istore_reg_reg);
15017 
15018 %}
15019 
15020 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
15021 
15022   match(Set dst (MoveF2I src));
15023 
15024   effect(DEF dst, USE src);
15025 
15026   ins_cost(INSN_COST);
15027 
15028   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
15029 
15030   ins_encode %{
15031     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
15032   %}
15033 
15034   ins_pipe(fp_f2i);
15035 
15036 %}
15037 
15038 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
15039 
15040   match(Set dst (MoveI2F src));
15041 
15042   effect(DEF dst, USE src);
15043 
15044   ins_cost(INSN_COST);
15045 
15046   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
15047 
15048   ins_encode %{
15049     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
15050   %}
15051 
15052   ins_pipe(fp_i2f);
15053 
15054 %}
15055 
15056 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
15057 
15058   match(Set dst (MoveD2L src));
15059 
15060   effect(DEF dst, USE src);
15061 
15062   ins_cost(INSN_COST);
15063 
15064   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
15065 
15066   ins_encode %{
15067     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
15068   %}
15069 
15070   ins_pipe(fp_d2l);
15071 
15072 %}
15073 
15074 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
15075 
15076   match(Set dst (MoveL2D src));
15077 
15078   effect(DEF dst, USE src);
15079 
15080   ins_cost(INSN_COST);
15081 
15082   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
15083 
15084   ins_encode %{
15085     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
15086   %}
15087 
15088   ins_pipe(fp_l2d);
15089 
15090 %}
15091 
15092 // ============================================================================
15093 // clearing of an array
15094 
15095 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
15096 %{
15097   match(Set dummy (ClearArray cnt base));
15098   effect(USE_KILL cnt, USE_KILL base, KILL cr);
15099 
15100   ins_cost(4 * INSN_COST);
15101   format %{ "ClearArray $cnt, $base" %}
15102 
15103   ins_encode %{
15104     address tpc = __ zero_words($base$$Register, $cnt$$Register);
15105     if (tpc == nullptr) {
15106       ciEnv::current()->record_failure("CodeCache is full");
15107       return;
15108     }
15109   %}
15110 
15111   ins_pipe(pipe_class_memory);
15112 %}
15113 
15114 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15115 %{
15116   predicate((uint64_t)n->in(2)->get_long()
15117             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
15118   match(Set dummy (ClearArray cnt base));
15119   effect(TEMP temp, USE_KILL base, KILL cr);
15120 
15121   ins_cost(4 * INSN_COST);
15122   format %{ "ClearArray $cnt, $base" %}
15123 
15124   ins_encode %{
15125     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15126     if (tpc == nullptr) {
15127       ciEnv::current()->record_failure("CodeCache is full");
15128       return;
15129     }
15130   %}
15131 
15132   ins_pipe(pipe_class_memory);
15133 %}
15134 
15135 // ============================================================================
15136 // Overflow Math Instructions
15137 
15138 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15139 %{
15140   match(Set cr (OverflowAddI op1 op2));
15141 
15142   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15143   ins_cost(INSN_COST);
15144   ins_encode %{
15145     __ cmnw($op1$$Register, $op2$$Register);
15146   %}
15147 
15148   ins_pipe(icmp_reg_reg);
15149 %}
15150 
15151 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15152 %{
15153   match(Set cr (OverflowAddI op1 op2));
15154 
15155   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15156   ins_cost(INSN_COST);
15157   ins_encode %{
15158     __ cmnw($op1$$Register, $op2$$constant);
15159   %}
15160 
15161   ins_pipe(icmp_reg_imm);
15162 %}
15163 
15164 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15165 %{
15166   match(Set cr (OverflowAddL op1 op2));
15167 
15168   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15169   ins_cost(INSN_COST);
15170   ins_encode %{
15171     __ cmn($op1$$Register, $op2$$Register);
15172   %}
15173 
15174   ins_pipe(icmp_reg_reg);
15175 %}
15176 
15177 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15178 %{
15179   match(Set cr (OverflowAddL op1 op2));
15180 
15181   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
15182   ins_cost(INSN_COST);
15183   ins_encode %{
15184     __ adds(zr, $op1$$Register, $op2$$constant);
15185   %}
15186 
15187   ins_pipe(icmp_reg_imm);
15188 %}
15189 
15190 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15191 %{
15192   match(Set cr (OverflowSubI op1 op2));
15193 
15194   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15195   ins_cost(INSN_COST);
15196   ins_encode %{
15197     __ cmpw($op1$$Register, $op2$$Register);
15198   %}
15199 
15200   ins_pipe(icmp_reg_reg);
15201 %}
15202 
15203 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15204 %{
15205   match(Set cr (OverflowSubI op1 op2));
15206 
15207   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15208   ins_cost(INSN_COST);
15209   ins_encode %{
15210     __ cmpw($op1$$Register, $op2$$constant);
15211   %}
15212 
15213   ins_pipe(icmp_reg_imm);
15214 %}
15215 
15216 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15217 %{
15218   match(Set cr (OverflowSubL op1 op2));
15219 
15220   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15221   ins_cost(INSN_COST);
15222   ins_encode %{
15223     __ cmp($op1$$Register, $op2$$Register);
15224   %}
15225 
15226   ins_pipe(icmp_reg_reg);
15227 %}
15228 
15229 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15230 %{
15231   match(Set cr (OverflowSubL op1 op2));
15232 
15233   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15234   ins_cost(INSN_COST);
15235   ins_encode %{
15236     __ subs(zr, $op1$$Register, $op2$$constant);
15237   %}
15238 
15239   ins_pipe(icmp_reg_imm);
15240 %}
15241 
15242 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15243 %{
15244   match(Set cr (OverflowSubI zero op1));
15245 
15246   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15247   ins_cost(INSN_COST);
15248   ins_encode %{
15249     __ cmpw(zr, $op1$$Register);
15250   %}
15251 
15252   ins_pipe(icmp_reg_imm);
15253 %}
15254 
15255 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15256 %{
15257   match(Set cr (OverflowSubL zero op1));
15258 
15259   format %{ "cmp   zr, $op1\t# overflow check long" %}
15260   ins_cost(INSN_COST);
15261   ins_encode %{
15262     __ cmp(zr, $op1$$Register);
15263   %}
15264 
15265   ins_pipe(icmp_reg_imm);
15266 %}
15267 
15268 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15269 %{
15270   match(Set cr (OverflowMulI op1 op2));
15271 
15272   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15273             "cmp   rscratch1, rscratch1, sxtw\n\t"
15274             "movw  rscratch1, #0x80000000\n\t"
15275             "cselw rscratch1, rscratch1, zr, NE\n\t"
15276             "cmpw  rscratch1, #1" %}
15277   ins_cost(5 * INSN_COST);
15278   ins_encode %{
15279     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15280     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15281     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15282     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15283     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15284   %}
15285 
15286   ins_pipe(pipe_slow);
15287 %}
15288 
15289 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15290 %{
15291   match(If cmp (OverflowMulI op1 op2));
15292   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15293             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15294   effect(USE labl, KILL cr);
15295 
15296   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15297             "cmp   rscratch1, rscratch1, sxtw\n\t"
15298             "b$cmp   $labl" %}
15299   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15300   ins_encode %{
15301     Label* L = $labl$$label;
15302     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15303     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15304     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15305     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15306   %}
15307 
15308   ins_pipe(pipe_serial);
15309 %}
15310 
15311 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15312 %{
15313   match(Set cr (OverflowMulL op1 op2));
15314 
15315   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15316             "smulh rscratch2, $op1, $op2\n\t"
15317             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15318             "movw  rscratch1, #0x80000000\n\t"
15319             "cselw rscratch1, rscratch1, zr, NE\n\t"
15320             "cmpw  rscratch1, #1" %}
15321   ins_cost(6 * INSN_COST);
15322   ins_encode %{
15323     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15324     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15325     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15326     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15327     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15328     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15329   %}
15330 
15331   ins_pipe(pipe_slow);
15332 %}
15333 
15334 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15335 %{
15336   match(If cmp (OverflowMulL op1 op2));
15337   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15338             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15339   effect(USE labl, KILL cr);
15340 
15341   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15342             "smulh rscratch2, $op1, $op2\n\t"
15343             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15344             "b$cmp $labl" %}
15345   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15346   ins_encode %{
15347     Label* L = $labl$$label;
15348     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15349     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15350     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15351     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15352     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15353   %}
15354 
15355   ins_pipe(pipe_serial);
15356 %}
15357 
15358 // ============================================================================
15359 // Compare Instructions
15360 
15361 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15362 %{
15363   match(Set cr (CmpI op1 op2));
15364 
15365   effect(DEF cr, USE op1, USE op2);
15366 
15367   ins_cost(INSN_COST);
15368   format %{ "cmpw  $op1, $op2" %}
15369 
15370   ins_encode(aarch64_enc_cmpw(op1, op2));
15371 
15372   ins_pipe(icmp_reg_reg);
15373 %}
15374 
15375 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15376 %{
15377   match(Set cr (CmpI op1 zero));
15378 
15379   effect(DEF cr, USE op1);
15380 
15381   ins_cost(INSN_COST);
15382   format %{ "cmpw $op1, 0" %}
15383 
15384   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15385 
15386   ins_pipe(icmp_reg_imm);
15387 %}
15388 
15389 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15390 %{
15391   match(Set cr (CmpI op1 op2));
15392 
15393   effect(DEF cr, USE op1);
15394 
15395   ins_cost(INSN_COST);
15396   format %{ "cmpw  $op1, $op2" %}
15397 
15398   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15399 
15400   ins_pipe(icmp_reg_imm);
15401 %}
15402 
15403 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15404 %{
15405   match(Set cr (CmpI op1 op2));
15406 
15407   effect(DEF cr, USE op1);
15408 
15409   ins_cost(INSN_COST * 2);
15410   format %{ "cmpw  $op1, $op2" %}
15411 
15412   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15413 
15414   ins_pipe(icmp_reg_imm);
15415 %}
15416 
15417 // Unsigned compare Instructions; really, same as signed compare
15418 // except it should only be used to feed an If or a CMovI which takes a
15419 // cmpOpU.
15420 
15421 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15422 %{
15423   match(Set cr (CmpU op1 op2));
15424 
15425   effect(DEF cr, USE op1, USE op2);
15426 
15427   ins_cost(INSN_COST);
15428   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15429 
15430   ins_encode(aarch64_enc_cmpw(op1, op2));
15431 
15432   ins_pipe(icmp_reg_reg);
15433 %}
15434 
15435 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15436 %{
15437   match(Set cr (CmpU op1 zero));
15438 
15439   effect(DEF cr, USE op1);
15440 
15441   ins_cost(INSN_COST);
15442   format %{ "cmpw $op1, #0\t# unsigned" %}
15443 
15444   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15445 
15446   ins_pipe(icmp_reg_imm);
15447 %}
15448 
15449 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15450 %{
15451   match(Set cr (CmpU op1 op2));
15452 
15453   effect(DEF cr, USE op1);
15454 
15455   ins_cost(INSN_COST);
15456   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15457 
15458   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15459 
15460   ins_pipe(icmp_reg_imm);
15461 %}
15462 
15463 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15464 %{
15465   match(Set cr (CmpU op1 op2));
15466 
15467   effect(DEF cr, USE op1);
15468 
15469   ins_cost(INSN_COST * 2);
15470   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15471 
15472   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15473 
15474   ins_pipe(icmp_reg_imm);
15475 %}
15476 
15477 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15478 %{
15479   match(Set cr (CmpL op1 op2));
15480 
15481   effect(DEF cr, USE op1, USE op2);
15482 
15483   ins_cost(INSN_COST);
15484   format %{ "cmp  $op1, $op2" %}
15485 
15486   ins_encode(aarch64_enc_cmp(op1, op2));
15487 
15488   ins_pipe(icmp_reg_reg);
15489 %}
15490 
15491 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15492 %{
15493   match(Set cr (CmpL op1 zero));
15494 
15495   effect(DEF cr, USE op1);
15496 
15497   ins_cost(INSN_COST);
15498   format %{ "tst  $op1" %}
15499 
15500   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15501 
15502   ins_pipe(icmp_reg_imm);
15503 %}
15504 
15505 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15506 %{
15507   match(Set cr (CmpL op1 op2));
15508 
15509   effect(DEF cr, USE op1);
15510 
15511   ins_cost(INSN_COST);
15512   format %{ "cmp  $op1, $op2" %}
15513 
15514   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15515 
15516   ins_pipe(icmp_reg_imm);
15517 %}
15518 
15519 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15520 %{
15521   match(Set cr (CmpL op1 op2));
15522 
15523   effect(DEF cr, USE op1);
15524 
15525   ins_cost(INSN_COST * 2);
15526   format %{ "cmp  $op1, $op2" %}
15527 
15528   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15529 
15530   ins_pipe(icmp_reg_imm);
15531 %}
15532 
15533 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15534 %{
15535   match(Set cr (CmpUL op1 op2));
15536 
15537   effect(DEF cr, USE op1, USE op2);
15538 
15539   ins_cost(INSN_COST);
15540   format %{ "cmp  $op1, $op2" %}
15541 
15542   ins_encode(aarch64_enc_cmp(op1, op2));
15543 
15544   ins_pipe(icmp_reg_reg);
15545 %}
15546 
15547 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15548 %{
15549   match(Set cr (CmpUL op1 zero));
15550 
15551   effect(DEF cr, USE op1);
15552 
15553   ins_cost(INSN_COST);
15554   format %{ "tst  $op1" %}
15555 
15556   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15557 
15558   ins_pipe(icmp_reg_imm);
15559 %}
15560 
15561 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15562 %{
15563   match(Set cr (CmpUL op1 op2));
15564 
15565   effect(DEF cr, USE op1);
15566 
15567   ins_cost(INSN_COST);
15568   format %{ "cmp  $op1, $op2" %}
15569 
15570   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15571 
15572   ins_pipe(icmp_reg_imm);
15573 %}
15574 
15575 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15576 %{
15577   match(Set cr (CmpUL op1 op2));
15578 
15579   effect(DEF cr, USE op1);
15580 
15581   ins_cost(INSN_COST * 2);
15582   format %{ "cmp  $op1, $op2" %}
15583 
15584   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15585 
15586   ins_pipe(icmp_reg_imm);
15587 %}
15588 
15589 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15590 %{
15591   match(Set cr (CmpP op1 op2));
15592 
15593   effect(DEF cr, USE op1, USE op2);
15594 
15595   ins_cost(INSN_COST);
15596   format %{ "cmp  $op1, $op2\t // ptr" %}
15597 
15598   ins_encode(aarch64_enc_cmpp(op1, op2));
15599 
15600   ins_pipe(icmp_reg_reg);
15601 %}
15602 
15603 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15604 %{
15605   match(Set cr (CmpN op1 op2));
15606 
15607   effect(DEF cr, USE op1, USE op2);
15608 
15609   ins_cost(INSN_COST);
15610   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15611 
15612   ins_encode(aarch64_enc_cmpn(op1, op2));
15613 
15614   ins_pipe(icmp_reg_reg);
15615 %}
15616 
15617 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15618 %{
15619   match(Set cr (CmpP op1 zero));
15620 
15621   effect(DEF cr, USE op1, USE zero);
15622 
15623   ins_cost(INSN_COST);
15624   format %{ "cmp  $op1, 0\t // ptr" %}
15625 
15626   ins_encode(aarch64_enc_testp(op1));
15627 
15628   ins_pipe(icmp_reg_imm);
15629 %}
15630 
15631 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15632 %{
15633   match(Set cr (CmpN op1 zero));
15634 
15635   effect(DEF cr, USE op1, USE zero);
15636 
15637   ins_cost(INSN_COST);
15638   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15639 
15640   ins_encode(aarch64_enc_testn(op1));
15641 
15642   ins_pipe(icmp_reg_imm);
15643 %}
15644 
15645 // FP comparisons
15646 //
15647 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15648 // using normal cmpOp. See declaration of rFlagsReg for details.
15649 
15650 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15651 %{
15652   match(Set cr (CmpF src1 src2));
15653 
15654   ins_cost(3 * INSN_COST);
15655   format %{ "fcmps $src1, $src2" %}
15656 
15657   ins_encode %{
15658     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15659   %}
15660 
15661   ins_pipe(pipe_class_compare);
15662 %}
15663 
15664 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15665 %{
15666   match(Set cr (CmpF src1 src2));
15667 
15668   ins_cost(3 * INSN_COST);
15669   format %{ "fcmps $src1, 0.0" %}
15670 
15671   ins_encode %{
15672     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15673   %}
15674 
15675   ins_pipe(pipe_class_compare);
15676 %}
15677 // FROM HERE
15678 
15679 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15680 %{
15681   match(Set cr (CmpD src1 src2));
15682 
15683   ins_cost(3 * INSN_COST);
15684   format %{ "fcmpd $src1, $src2" %}
15685 
15686   ins_encode %{
15687     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15688   %}
15689 
15690   ins_pipe(pipe_class_compare);
15691 %}
15692 
15693 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15694 %{
15695   match(Set cr (CmpD src1 src2));
15696 
15697   ins_cost(3 * INSN_COST);
15698   format %{ "fcmpd $src1, 0.0" %}
15699 
15700   ins_encode %{
15701     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15702   %}
15703 
15704   ins_pipe(pipe_class_compare);
15705 %}
15706 
15707 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15708 %{
15709   match(Set dst (CmpF3 src1 src2));
15710   effect(KILL cr);
15711 
15712   ins_cost(5 * INSN_COST);
15713   format %{ "fcmps $src1, $src2\n\t"
15714             "csinvw($dst, zr, zr, eq\n\t"
15715             "csnegw($dst, $dst, $dst, lt)"
15716   %}
15717 
15718   ins_encode %{
15719     Label done;
15720     FloatRegister s1 = as_FloatRegister($src1$$reg);
15721     FloatRegister s2 = as_FloatRegister($src2$$reg);
15722     Register d = as_Register($dst$$reg);
15723     __ fcmps(s1, s2);
15724     // installs 0 if EQ else -1
15725     __ csinvw(d, zr, zr, Assembler::EQ);
15726     // keeps -1 if less or unordered else installs 1
15727     __ csnegw(d, d, d, Assembler::LT);
15728     __ bind(done);
15729   %}
15730 
15731   ins_pipe(pipe_class_default);
15732 
15733 %}
15734 
15735 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15736 %{
15737   match(Set dst (CmpD3 src1 src2));
15738   effect(KILL cr);
15739 
15740   ins_cost(5 * INSN_COST);
15741   format %{ "fcmpd $src1, $src2\n\t"
15742             "csinvw($dst, zr, zr, eq\n\t"
15743             "csnegw($dst, $dst, $dst, lt)"
15744   %}
15745 
15746   ins_encode %{
15747     Label done;
15748     FloatRegister s1 = as_FloatRegister($src1$$reg);
15749     FloatRegister s2 = as_FloatRegister($src2$$reg);
15750     Register d = as_Register($dst$$reg);
15751     __ fcmpd(s1, s2);
15752     // installs 0 if EQ else -1
15753     __ csinvw(d, zr, zr, Assembler::EQ);
15754     // keeps -1 if less or unordered else installs 1
15755     __ csnegw(d, d, d, Assembler::LT);
15756     __ bind(done);
15757   %}
15758   ins_pipe(pipe_class_default);
15759 
15760 %}
15761 
15762 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15763 %{
15764   match(Set dst (CmpF3 src1 zero));
15765   effect(KILL cr);
15766 
15767   ins_cost(5 * INSN_COST);
15768   format %{ "fcmps $src1, 0.0\n\t"
15769             "csinvw($dst, zr, zr, eq\n\t"
15770             "csnegw($dst, $dst, $dst, lt)"
15771   %}
15772 
15773   ins_encode %{
15774     Label done;
15775     FloatRegister s1 = as_FloatRegister($src1$$reg);
15776     Register d = as_Register($dst$$reg);
15777     __ fcmps(s1, 0.0);
15778     // installs 0 if EQ else -1
15779     __ csinvw(d, zr, zr, Assembler::EQ);
15780     // keeps -1 if less or unordered else installs 1
15781     __ csnegw(d, d, d, Assembler::LT);
15782     __ bind(done);
15783   %}
15784 
15785   ins_pipe(pipe_class_default);
15786 
15787 %}
15788 
15789 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15790 %{
15791   match(Set dst (CmpD3 src1 zero));
15792   effect(KILL cr);
15793 
15794   ins_cost(5 * INSN_COST);
15795   format %{ "fcmpd $src1, 0.0\n\t"
15796             "csinvw($dst, zr, zr, eq\n\t"
15797             "csnegw($dst, $dst, $dst, lt)"
15798   %}
15799 
15800   ins_encode %{
15801     Label done;
15802     FloatRegister s1 = as_FloatRegister($src1$$reg);
15803     Register d = as_Register($dst$$reg);
15804     __ fcmpd(s1, 0.0);
15805     // installs 0 if EQ else -1
15806     __ csinvw(d, zr, zr, Assembler::EQ);
15807     // keeps -1 if less or unordered else installs 1
15808     __ csnegw(d, d, d, Assembler::LT);
15809     __ bind(done);
15810   %}
15811   ins_pipe(pipe_class_default);
15812 
15813 %}
15814 
15815 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15816 %{
15817   match(Set dst (CmpLTMask p q));
15818   effect(KILL cr);
15819 
15820   ins_cost(3 * INSN_COST);
15821 
15822   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15823             "csetw $dst, lt\n\t"
15824             "subw $dst, zr, $dst"
15825   %}
15826 
15827   ins_encode %{
15828     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15829     __ csetw(as_Register($dst$$reg), Assembler::LT);
15830     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15831   %}
15832 
15833   ins_pipe(ialu_reg_reg);
15834 %}
15835 
15836 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15837 %{
15838   match(Set dst (CmpLTMask src zero));
15839   effect(KILL cr);
15840 
15841   ins_cost(INSN_COST);
15842 
15843   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15844 
15845   ins_encode %{
15846     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15847   %}
15848 
15849   ins_pipe(ialu_reg_shift);
15850 %}
15851 
15852 // ============================================================================
15853 // Max and Min
15854 
15855 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
15856 
15857 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
15858 %{
15859   effect(DEF cr, USE src);
15860   ins_cost(INSN_COST);
15861   format %{ "cmpw $src, 0" %}
15862 
15863   ins_encode %{
15864     __ cmpw($src$$Register, 0);
15865   %}
15866   ins_pipe(icmp_reg_imm);
15867 %}
15868 
15869 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15870 %{
15871   match(Set dst (MinI src1 src2));
15872   ins_cost(INSN_COST * 3);
15873 
15874   expand %{
15875     rFlagsReg cr;
15876     compI_reg_reg(cr, src1, src2);
15877     cmovI_reg_reg_lt(dst, src1, src2, cr);
15878   %}
15879 %}
15880 
15881 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15882 %{
15883   match(Set dst (MaxI src1 src2));
15884   ins_cost(INSN_COST * 3);
15885 
15886   expand %{
15887     rFlagsReg cr;
15888     compI_reg_reg(cr, src1, src2);
15889     cmovI_reg_reg_gt(dst, src1, src2, cr);
15890   %}
15891 %}
15892 
15893 
15894 // ============================================================================
15895 // Branch Instructions
15896 
15897 // Direct Branch.
15898 instruct branch(label lbl)
15899 %{
15900   match(Goto);
15901 
15902   effect(USE lbl);
15903 
15904   ins_cost(BRANCH_COST);
15905   format %{ "b  $lbl" %}
15906 
15907   ins_encode(aarch64_enc_b(lbl));
15908 
15909   ins_pipe(pipe_branch);
15910 %}
15911 
15912 // Conditional Near Branch
15913 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15914 %{
15915   // Same match rule as `branchConFar'.
15916   match(If cmp cr);
15917 
15918   effect(USE lbl);
15919 
15920   ins_cost(BRANCH_COST);
15921   // If set to 1 this indicates that the current instruction is a
15922   // short variant of a long branch. This avoids using this
15923   // instruction in first-pass matching. It will then only be used in
15924   // the `Shorten_branches' pass.
15925   // ins_short_branch(1);
15926   format %{ "b$cmp  $lbl" %}
15927 
15928   ins_encode(aarch64_enc_br_con(cmp, lbl));
15929 
15930   ins_pipe(pipe_branch_cond);
15931 %}
15932 
15933 // Conditional Near Branch Unsigned
15934 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15935 %{
15936   // Same match rule as `branchConFar'.
15937   match(If cmp cr);
15938 
15939   effect(USE lbl);
15940 
15941   ins_cost(BRANCH_COST);
15942   // If set to 1 this indicates that the current instruction is a
15943   // short variant of a long branch. This avoids using this
15944   // instruction in first-pass matching. It will then only be used in
15945   // the `Shorten_branches' pass.
15946   // ins_short_branch(1);
15947   format %{ "b$cmp  $lbl\t# unsigned" %}
15948 
15949   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15950 
15951   ins_pipe(pipe_branch_cond);
15952 %}
15953 
15954 // Make use of CBZ and CBNZ.  These instructions, as well as being
15955 // shorter than (cmp; branch), have the additional benefit of not
15956 // killing the flags.
15957 
15958 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15959   match(If cmp (CmpI op1 op2));
15960   effect(USE labl);
15961 
15962   ins_cost(BRANCH_COST);
15963   format %{ "cbw$cmp   $op1, $labl" %}
15964   ins_encode %{
15965     Label* L = $labl$$label;
15966     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15967     if (cond == Assembler::EQ)
15968       __ cbzw($op1$$Register, *L);
15969     else
15970       __ cbnzw($op1$$Register, *L);
15971   %}
15972   ins_pipe(pipe_cmp_branch);
15973 %}
15974 
15975 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15976   match(If cmp (CmpL op1 op2));
15977   effect(USE labl);
15978 
15979   ins_cost(BRANCH_COST);
15980   format %{ "cb$cmp   $op1, $labl" %}
15981   ins_encode %{
15982     Label* L = $labl$$label;
15983     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15984     if (cond == Assembler::EQ)
15985       __ cbz($op1$$Register, *L);
15986     else
15987       __ cbnz($op1$$Register, *L);
15988   %}
15989   ins_pipe(pipe_cmp_branch);
15990 %}
15991 
15992 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15993   match(If cmp (CmpP op1 op2));
15994   effect(USE labl);
15995 
15996   ins_cost(BRANCH_COST);
15997   format %{ "cb$cmp   $op1, $labl" %}
15998   ins_encode %{
15999     Label* L = $labl$$label;
16000     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16001     if (cond == Assembler::EQ)
16002       __ cbz($op1$$Register, *L);
16003     else
16004       __ cbnz($op1$$Register, *L);
16005   %}
16006   ins_pipe(pipe_cmp_branch);
16007 %}
16008 
16009 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
16010   match(If cmp (CmpN op1 op2));
16011   effect(USE labl);
16012 
16013   ins_cost(BRANCH_COST);
16014   format %{ "cbw$cmp   $op1, $labl" %}
16015   ins_encode %{
16016     Label* L = $labl$$label;
16017     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16018     if (cond == Assembler::EQ)
16019       __ cbzw($op1$$Register, *L);
16020     else
16021       __ cbnzw($op1$$Register, *L);
16022   %}
16023   ins_pipe(pipe_cmp_branch);
16024 %}
16025 
16026 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
16027   match(If cmp (CmpP (DecodeN oop) zero));
16028   effect(USE labl);
16029 
16030   ins_cost(BRANCH_COST);
16031   format %{ "cb$cmp   $oop, $labl" %}
16032   ins_encode %{
16033     Label* L = $labl$$label;
16034     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16035     if (cond == Assembler::EQ)
16036       __ cbzw($oop$$Register, *L);
16037     else
16038       __ cbnzw($oop$$Register, *L);
16039   %}
16040   ins_pipe(pipe_cmp_branch);
16041 %}
16042 
16043 instruct cmpUI_imm0_branch(cmpOpUEqNeLeGt cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16044   match(If cmp (CmpU op1 op2));
16045   effect(USE labl);
16046 
16047   ins_cost(BRANCH_COST);
16048   format %{ "cbw$cmp   $op1, $labl" %}
16049   ins_encode %{
16050     Label* L = $labl$$label;
16051     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16052     if (cond == Assembler::EQ || cond == Assembler::LS) {
16053       __ cbzw($op1$$Register, *L);
16054     } else {
16055       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
16056       __ cbnzw($op1$$Register, *L);
16057     }
16058   %}
16059   ins_pipe(pipe_cmp_branch);
16060 %}
16061 
16062 instruct cmpUL_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 op2, label labl) %{
16063   match(If cmp (CmpUL op1 op2));
16064   effect(USE labl);
16065 
16066   ins_cost(BRANCH_COST);
16067   format %{ "cb$cmp   $op1, $labl" %}
16068   ins_encode %{
16069     Label* L = $labl$$label;
16070     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16071     if (cond == Assembler::EQ || cond == Assembler::LS) {
16072       __ cbz($op1$$Register, *L);
16073     } else {
16074       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
16075       __ cbnz($op1$$Register, *L);
16076     }
16077   %}
16078   ins_pipe(pipe_cmp_branch);
16079 %}
16080 
16081 // Test bit and Branch
16082 
16083 // Patterns for short (< 32KiB) variants
16084 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16085   match(If cmp (CmpL op1 op2));
16086   effect(USE labl);
16087 
16088   ins_cost(BRANCH_COST);
16089   format %{ "cb$cmp   $op1, $labl # long" %}
16090   ins_encode %{
16091     Label* L = $labl$$label;
16092     Assembler::Condition cond =
16093       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16094     __ tbr(cond, $op1$$Register, 63, *L);
16095   %}
16096   ins_pipe(pipe_cmp_branch);
16097   ins_short_branch(1);
16098 %}
16099 
16100 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16101   match(If cmp (CmpI op1 op2));
16102   effect(USE labl);
16103 
16104   ins_cost(BRANCH_COST);
16105   format %{ "cb$cmp   $op1, $labl # int" %}
16106   ins_encode %{
16107     Label* L = $labl$$label;
16108     Assembler::Condition cond =
16109       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16110     __ tbr(cond, $op1$$Register, 31, *L);
16111   %}
16112   ins_pipe(pipe_cmp_branch);
16113   ins_short_branch(1);
16114 %}
16115 
16116 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16117   match(If cmp (CmpL (AndL op1 op2) op3));
16118   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16119   effect(USE labl);
16120 
16121   ins_cost(BRANCH_COST);
16122   format %{ "tb$cmp   $op1, $op2, $labl" %}
16123   ins_encode %{
16124     Label* L = $labl$$label;
16125     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16126     int bit = exact_log2_long($op2$$constant);
16127     __ tbr(cond, $op1$$Register, bit, *L);
16128   %}
16129   ins_pipe(pipe_cmp_branch);
16130   ins_short_branch(1);
16131 %}
16132 
16133 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16134   match(If cmp (CmpI (AndI op1 op2) op3));
16135   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16136   effect(USE labl);
16137 
16138   ins_cost(BRANCH_COST);
16139   format %{ "tb$cmp   $op1, $op2, $labl" %}
16140   ins_encode %{
16141     Label* L = $labl$$label;
16142     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16143     int bit = exact_log2((juint)$op2$$constant);
16144     __ tbr(cond, $op1$$Register, bit, *L);
16145   %}
16146   ins_pipe(pipe_cmp_branch);
16147   ins_short_branch(1);
16148 %}
16149 
16150 // And far variants
16151 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16152   match(If cmp (CmpL op1 op2));
16153   effect(USE labl);
16154 
16155   ins_cost(BRANCH_COST);
16156   format %{ "cb$cmp   $op1, $labl # long" %}
16157   ins_encode %{
16158     Label* L = $labl$$label;
16159     Assembler::Condition cond =
16160       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16161     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
16162   %}
16163   ins_pipe(pipe_cmp_branch);
16164 %}
16165 
16166 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16167   match(If cmp (CmpI op1 op2));
16168   effect(USE labl);
16169 
16170   ins_cost(BRANCH_COST);
16171   format %{ "cb$cmp   $op1, $labl # int" %}
16172   ins_encode %{
16173     Label* L = $labl$$label;
16174     Assembler::Condition cond =
16175       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16176     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16177   %}
16178   ins_pipe(pipe_cmp_branch);
16179 %}
16180 
16181 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16182   match(If cmp (CmpL (AndL op1 op2) op3));
16183   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16184   effect(USE labl);
16185 
16186   ins_cost(BRANCH_COST);
16187   format %{ "tb$cmp   $op1, $op2, $labl" %}
16188   ins_encode %{
16189     Label* L = $labl$$label;
16190     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16191     int bit = exact_log2_long($op2$$constant);
16192     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16193   %}
16194   ins_pipe(pipe_cmp_branch);
16195 %}
16196 
16197 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16198   match(If cmp (CmpI (AndI op1 op2) op3));
16199   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16200   effect(USE labl);
16201 
16202   ins_cost(BRANCH_COST);
16203   format %{ "tb$cmp   $op1, $op2, $labl" %}
16204   ins_encode %{
16205     Label* L = $labl$$label;
16206     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16207     int bit = exact_log2((juint)$op2$$constant);
16208     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16209   %}
16210   ins_pipe(pipe_cmp_branch);
16211 %}
16212 
16213 // Test bits
16214 
16215 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16216   match(Set cr (CmpL (AndL op1 op2) op3));
16217   predicate(Assembler::operand_valid_for_logical_immediate
16218             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16219 
16220   ins_cost(INSN_COST);
16221   format %{ "tst $op1, $op2 # long" %}
16222   ins_encode %{
16223     __ tst($op1$$Register, $op2$$constant);
16224   %}
16225   ins_pipe(ialu_reg_reg);
16226 %}
16227 
16228 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16229   match(Set cr (CmpI (AndI op1 op2) op3));
16230   predicate(Assembler::operand_valid_for_logical_immediate
16231             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16232 
16233   ins_cost(INSN_COST);
16234   format %{ "tst $op1, $op2 # int" %}
16235   ins_encode %{
16236     __ tstw($op1$$Register, $op2$$constant);
16237   %}
16238   ins_pipe(ialu_reg_reg);
16239 %}
16240 
16241 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16242   match(Set cr (CmpL (AndL op1 op2) op3));
16243 
16244   ins_cost(INSN_COST);
16245   format %{ "tst $op1, $op2 # long" %}
16246   ins_encode %{
16247     __ tst($op1$$Register, $op2$$Register);
16248   %}
16249   ins_pipe(ialu_reg_reg);
16250 %}
16251 
16252 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16253   match(Set cr (CmpI (AndI op1 op2) op3));
16254 
16255   ins_cost(INSN_COST);
16256   format %{ "tstw $op1, $op2 # int" %}
16257   ins_encode %{
16258     __ tstw($op1$$Register, $op2$$Register);
16259   %}
16260   ins_pipe(ialu_reg_reg);
16261 %}
16262 
16263 
16264 // Conditional Far Branch
16265 // Conditional Far Branch Unsigned
16266 // TODO: fixme
16267 
16268 // counted loop end branch near
16269 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16270 %{
16271   match(CountedLoopEnd cmp cr);
16272 
16273   effect(USE lbl);
16274 
16275   ins_cost(BRANCH_COST);
16276   // short variant.
16277   // ins_short_branch(1);
16278   format %{ "b$cmp $lbl \t// counted loop end" %}
16279 
16280   ins_encode(aarch64_enc_br_con(cmp, lbl));
16281 
16282   ins_pipe(pipe_branch);
16283 %}
16284 
16285 // counted loop end branch far
16286 // TODO: fixme
16287 
16288 // ============================================================================
16289 // inlined locking and unlocking
16290 
16291 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16292 %{
16293   match(Set cr (FastLock object box));
16294   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16295 
16296   ins_cost(5 * INSN_COST);
16297   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16298 
16299   ins_encode %{
16300     __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16301   %}
16302 
16303   ins_pipe(pipe_serial);
16304 %}
16305 
16306 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16307 %{
16308   match(Set cr (FastUnlock object box));
16309   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16310 
16311   ins_cost(5 * INSN_COST);
16312   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %}
16313 
16314   ins_encode %{
16315     __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16316   %}
16317 
16318   ins_pipe(pipe_serial);
16319 %}
16320 
16321 // ============================================================================
16322 // Safepoint Instructions
16323 
16324 // TODO
16325 // provide a near and far version of this code
16326 
16327 instruct safePoint(rFlagsReg cr, iRegP poll)
16328 %{
16329   match(SafePoint poll);
16330   effect(KILL cr);
16331 
16332   format %{
16333     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16334   %}
16335   ins_encode %{
16336     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16337   %}
16338   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16339 %}
16340 
16341 
16342 // ============================================================================
16343 // Procedure Call/Return Instructions
16344 
16345 // Call Java Static Instruction
16346 
16347 instruct CallStaticJavaDirect(method meth)
16348 %{
16349   match(CallStaticJava);
16350 
16351   effect(USE meth);
16352 
16353   ins_cost(CALL_COST);
16354 
16355   format %{ "call,static $meth \t// ==> " %}
16356 
16357   ins_encode(aarch64_enc_java_static_call(meth),
16358              aarch64_enc_call_epilog);
16359 
16360   ins_pipe(pipe_class_call);
16361 %}
16362 
16363 // TO HERE
16364 
16365 // Call Java Dynamic Instruction
16366 instruct CallDynamicJavaDirect(method meth)
16367 %{
16368   match(CallDynamicJava);
16369 
16370   effect(USE meth);
16371 
16372   ins_cost(CALL_COST);
16373 
16374   format %{ "CALL,dynamic $meth \t// ==> " %}
16375 
16376   ins_encode(aarch64_enc_java_dynamic_call(meth),
16377              aarch64_enc_call_epilog);
16378 
16379   ins_pipe(pipe_class_call);
16380 %}
16381 
16382 // Call Runtime Instruction
16383 
16384 instruct CallRuntimeDirect(method meth)
16385 %{
16386   match(CallRuntime);
16387 
16388   effect(USE meth);
16389 
16390   ins_cost(CALL_COST);
16391 
16392   format %{ "CALL, runtime $meth" %}
16393 
16394   ins_encode( aarch64_enc_java_to_runtime(meth) );
16395 
16396   ins_pipe(pipe_class_call);
16397 %}
16398 
16399 // Call Runtime Instruction
16400 
16401 instruct CallLeafDirect(method meth)
16402 %{
16403   match(CallLeaf);
16404 
16405   effect(USE meth);
16406 
16407   ins_cost(CALL_COST);
16408 
16409   format %{ "CALL, runtime leaf $meth" %}
16410 
16411   ins_encode( aarch64_enc_java_to_runtime(meth) );
16412 
16413   ins_pipe(pipe_class_call);
16414 %}
16415 
16416 // Call Runtime Instruction without safepoint and with vector arguments
16417 instruct CallLeafDirectVector(method meth)
16418 %{
16419   match(CallLeafVector);
16420 
16421   effect(USE meth);
16422 
16423   ins_cost(CALL_COST);
16424 
16425   format %{ "CALL, runtime leaf vector $meth" %}
16426 
16427   ins_encode(aarch64_enc_java_to_runtime(meth));
16428 
16429   ins_pipe(pipe_class_call);
16430 %}
16431 
16432 // Call Runtime Instruction
16433 
16434 instruct CallLeafNoFPDirect(method meth)
16435 %{
16436   match(CallLeafNoFP);
16437 
16438   effect(USE meth);
16439 
16440   ins_cost(CALL_COST);
16441 
16442   format %{ "CALL, runtime leaf nofp $meth" %}
16443 
16444   ins_encode( aarch64_enc_java_to_runtime(meth) );
16445 
16446   ins_pipe(pipe_class_call);
16447 %}
16448 
16449 // Tail Call; Jump from runtime stub to Java code.
16450 // Also known as an 'interprocedural jump'.
16451 // Target of jump will eventually return to caller.
16452 // TailJump below removes the return address.
16453 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16454 // emitted just above the TailCall which has reset rfp to the caller state.
16455 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
16456 %{
16457   match(TailCall jump_target method_ptr);
16458 
16459   ins_cost(CALL_COST);
16460 
16461   format %{ "br $jump_target\t# $method_ptr holds method" %}
16462 
16463   ins_encode(aarch64_enc_tail_call(jump_target));
16464 
16465   ins_pipe(pipe_class_call);
16466 %}
16467 
16468 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
16469 %{
16470   match(TailJump jump_target ex_oop);
16471 
16472   ins_cost(CALL_COST);
16473 
16474   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16475 
16476   ins_encode(aarch64_enc_tail_jmp(jump_target));
16477 
16478   ins_pipe(pipe_class_call);
16479 %}
16480 
16481 // Forward exception.
16482 instruct ForwardExceptionjmp()
16483 %{
16484   match(ForwardException);
16485   ins_cost(CALL_COST);
16486 
16487   format %{ "b forward_exception_stub" %}
16488   ins_encode %{
16489     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
16490   %}
16491   ins_pipe(pipe_class_call);
16492 %}
16493 
16494 // Create exception oop: created by stack-crawling runtime code.
16495 // Created exception is now available to this handler, and is setup
16496 // just prior to jumping to this handler. No code emitted.
16497 // TODO check
16498 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16499 instruct CreateException(iRegP_R0 ex_oop)
16500 %{
16501   match(Set ex_oop (CreateEx));
16502 
16503   format %{ " -- \t// exception oop; no code emitted" %}
16504 
16505   size(0);
16506 
16507   ins_encode( /*empty*/ );
16508 
16509   ins_pipe(pipe_class_empty);
16510 %}
16511 
16512 // Rethrow exception: The exception oop will come in the first
16513 // argument position. Then JUMP (not call) to the rethrow stub code.
16514 instruct RethrowException() %{
16515   match(Rethrow);
16516   ins_cost(CALL_COST);
16517 
16518   format %{ "b rethrow_stub" %}
16519 
16520   ins_encode( aarch64_enc_rethrow() );
16521 
16522   ins_pipe(pipe_class_call);
16523 %}
16524 
16525 
16526 // Return Instruction
16527 // epilog node loads ret address into lr as part of frame pop
16528 instruct Ret()
16529 %{
16530   match(Return);
16531 
16532   format %{ "ret\t// return register" %}
16533 
16534   ins_encode( aarch64_enc_ret() );
16535 
16536   ins_pipe(pipe_branch);
16537 %}
16538 
16539 // Die now.
16540 instruct ShouldNotReachHere() %{
16541   match(Halt);
16542 
16543   ins_cost(CALL_COST);
16544   format %{ "ShouldNotReachHere" %}
16545 
16546   ins_encode %{
16547     if (is_reachable()) {
16548       const char* str = __ code_string(_halt_reason);
16549       __ stop(str);
16550     }
16551   %}
16552 
16553   ins_pipe(pipe_class_default);
16554 %}
16555 
16556 // ============================================================================
16557 // Partial Subtype Check
16558 //
16559 // superklass array for an instance of the superklass.  Set a hidden
16560 // internal cache on a hit (cache is checked with exposed code in
16561 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16562 // encoding ALSO sets flags.
16563 
16564 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16565 %{
16566   match(Set result (PartialSubtypeCheck sub super));
16567   predicate(!UseSecondarySupersTable);
16568   effect(KILL cr, KILL temp);
16569 
16570   ins_cost(20 * INSN_COST);  // slightly larger than the next version
16571   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16572 
16573   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16574 
16575   opcode(0x1); // Force zero of result reg on hit
16576 
16577   ins_pipe(pipe_class_memory);
16578 %}
16579 
16580 // Two versions of partialSubtypeCheck, both used when we need to
16581 // search for a super class in the secondary supers array. The first
16582 // is used when we don't know _a priori_ the class being searched
16583 // for. The second, far more common, is used when we do know: this is
16584 // used for instanceof, checkcast, and any case where C2 can determine
16585 // it by constant propagation.
16586 
16587 instruct partialSubtypeCheckVarSuper(iRegP_R4 sub, iRegP_R0 super, vRegD_V0 vtemp, iRegP_R5 result,
16588                                      iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16589                                      rFlagsReg cr)
16590 %{
16591   match(Set result (PartialSubtypeCheck sub super));
16592   predicate(UseSecondarySupersTable);
16593   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16594 
16595   ins_cost(10 * INSN_COST);  // slightly larger than the next version
16596   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16597 
16598   ins_encode %{
16599     __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
16600                                          $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16601                                          $vtemp$$FloatRegister,
16602                                          $result$$Register, /*L_success*/nullptr);
16603   %}
16604 
16605   ins_pipe(pipe_class_memory);
16606 %}
16607 
16608 instruct partialSubtypeCheckConstSuper(iRegP_R4 sub, iRegP_R0 super_reg, immP super_con, vRegD_V0 vtemp, iRegP_R5 result,
16609                                        iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16610                                        rFlagsReg cr)
16611 %{
16612   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
16613   predicate(UseSecondarySupersTable);
16614   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16615 
16616   ins_cost(5 * INSN_COST);  // smaller than the next version
16617   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
16618 
16619   ins_encode %{
16620     bool success = false;
16621     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
16622     if (InlineSecondarySupersTest) {
16623       success =
16624         __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
16625                                                $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16626                                                $vtemp$$FloatRegister,
16627                                                $result$$Register,
16628                                                super_klass_slot);
16629     } else {
16630       address call = __ trampoline_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
16631       success = (call != nullptr);
16632     }
16633     if (!success) {
16634       ciEnv::current()->record_failure("CodeCache is full");
16635       return;
16636     }
16637   %}
16638 
16639   ins_pipe(pipe_class_memory);
16640 %}
16641 
16642 // Intrisics for String.compareTo()
16643 
16644 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16645                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16646 %{
16647   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16648   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16649   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16650 
16651   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16652   ins_encode %{
16653     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16654     __ string_compare($str1$$Register, $str2$$Register,
16655                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16656                       $tmp1$$Register, $tmp2$$Register,
16657                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
16658   %}
16659   ins_pipe(pipe_class_memory);
16660 %}
16661 
16662 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16663                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16664 %{
16665   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16666   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16667   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16668 
16669   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16670   ins_encode %{
16671     __ string_compare($str1$$Register, $str2$$Register,
16672                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16673                       $tmp1$$Register, $tmp2$$Register,
16674                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
16675   %}
16676   ins_pipe(pipe_class_memory);
16677 %}
16678 
16679 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16680                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16681                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16682 %{
16683   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16684   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16685   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16686          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16687 
16688   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16689   ins_encode %{
16690     __ string_compare($str1$$Register, $str2$$Register,
16691                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16692                       $tmp1$$Register, $tmp2$$Register,
16693                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16694                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
16695   %}
16696   ins_pipe(pipe_class_memory);
16697 %}
16698 
16699 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16700                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16701                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16702 %{
16703   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16704   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16705   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16706          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16707 
16708   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16709   ins_encode %{
16710     __ string_compare($str1$$Register, $str2$$Register,
16711                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16712                       $tmp1$$Register, $tmp2$$Register,
16713                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16714                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
16715   %}
16716   ins_pipe(pipe_class_memory);
16717 %}
16718 
16719 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
16720 // these string_compare variants as NEON register type for convenience so that the prototype of
16721 // string_compare can be shared with all variants.
16722 
16723 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16724                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16725                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16726                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16727 %{
16728   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16729   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16730   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16731          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16732 
16733   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16734   ins_encode %{
16735     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16736     __ string_compare($str1$$Register, $str2$$Register,
16737                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16738                       $tmp1$$Register, $tmp2$$Register,
16739                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16740                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16741                       StrIntrinsicNode::LL);
16742   %}
16743   ins_pipe(pipe_class_memory);
16744 %}
16745 
16746 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16747                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16748                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16749                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16750 %{
16751   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16752   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16753   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16754          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16755 
16756   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16757   ins_encode %{
16758     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16759     __ string_compare($str1$$Register, $str2$$Register,
16760                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16761                       $tmp1$$Register, $tmp2$$Register,
16762                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16763                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16764                       StrIntrinsicNode::LU);
16765   %}
16766   ins_pipe(pipe_class_memory);
16767 %}
16768 
16769 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16770                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16771                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16772                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16773 %{
16774   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16775   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16776   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16777          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16778 
16779   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16780   ins_encode %{
16781     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16782     __ string_compare($str1$$Register, $str2$$Register,
16783                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16784                       $tmp1$$Register, $tmp2$$Register,
16785                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16786                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16787                       StrIntrinsicNode::UL);
16788   %}
16789   ins_pipe(pipe_class_memory);
16790 %}
16791 
16792 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16793                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16794                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16795                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16796 %{
16797   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16798   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16799   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16800          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16801 
16802   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16803   ins_encode %{
16804     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16805     __ string_compare($str1$$Register, $str2$$Register,
16806                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16807                       $tmp1$$Register, $tmp2$$Register,
16808                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16809                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16810                       StrIntrinsicNode::UU);
16811   %}
16812   ins_pipe(pipe_class_memory);
16813 %}
16814 
16815 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16816                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16817                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16818                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16819 %{
16820   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16821   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16822   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16823          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16824          TEMP vtmp0, TEMP vtmp1, KILL cr);
16825   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16826             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16827 
16828   ins_encode %{
16829     __ string_indexof($str1$$Register, $str2$$Register,
16830                       $cnt1$$Register, $cnt2$$Register,
16831                       $tmp1$$Register, $tmp2$$Register,
16832                       $tmp3$$Register, $tmp4$$Register,
16833                       $tmp5$$Register, $tmp6$$Register,
16834                       -1, $result$$Register, StrIntrinsicNode::UU);
16835   %}
16836   ins_pipe(pipe_class_memory);
16837 %}
16838 
16839 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16840                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16841                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16842                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16843 %{
16844   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16845   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16846   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16847          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16848          TEMP vtmp0, TEMP vtmp1, KILL cr);
16849   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16850             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16851 
16852   ins_encode %{
16853     __ string_indexof($str1$$Register, $str2$$Register,
16854                       $cnt1$$Register, $cnt2$$Register,
16855                       $tmp1$$Register, $tmp2$$Register,
16856                       $tmp3$$Register, $tmp4$$Register,
16857                       $tmp5$$Register, $tmp6$$Register,
16858                       -1, $result$$Register, StrIntrinsicNode::LL);
16859   %}
16860   ins_pipe(pipe_class_memory);
16861 %}
16862 
16863 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16864                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16865                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16866                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16867 %{
16868   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16869   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16870   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16871          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16872          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16873   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16874             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16875 
16876   ins_encode %{
16877     __ string_indexof($str1$$Register, $str2$$Register,
16878                       $cnt1$$Register, $cnt2$$Register,
16879                       $tmp1$$Register, $tmp2$$Register,
16880                       $tmp3$$Register, $tmp4$$Register,
16881                       $tmp5$$Register, $tmp6$$Register,
16882                       -1, $result$$Register, StrIntrinsicNode::UL);
16883   %}
16884   ins_pipe(pipe_class_memory);
16885 %}
16886 
16887 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16888                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16889                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16890 %{
16891   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16892   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16893   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16894          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16895   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
16896             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16897 
16898   ins_encode %{
16899     int icnt2 = (int)$int_cnt2$$constant;
16900     __ string_indexof($str1$$Register, $str2$$Register,
16901                       $cnt1$$Register, zr,
16902                       $tmp1$$Register, $tmp2$$Register,
16903                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16904                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16905   %}
16906   ins_pipe(pipe_class_memory);
16907 %}
16908 
16909 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16910                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16911                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16912 %{
16913   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16914   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16915   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16916          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16917   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
16918             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16919 
16920   ins_encode %{
16921     int icnt2 = (int)$int_cnt2$$constant;
16922     __ string_indexof($str1$$Register, $str2$$Register,
16923                       $cnt1$$Register, zr,
16924                       $tmp1$$Register, $tmp2$$Register,
16925                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16926                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16927   %}
16928   ins_pipe(pipe_class_memory);
16929 %}
16930 
16931 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16932                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16933                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16934 %{
16935   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16936   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16937   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16938          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16939   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
16940             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16941 
16942   ins_encode %{
16943     int icnt2 = (int)$int_cnt2$$constant;
16944     __ string_indexof($str1$$Register, $str2$$Register,
16945                       $cnt1$$Register, zr,
16946                       $tmp1$$Register, $tmp2$$Register,
16947                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16948                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16949   %}
16950   ins_pipe(pipe_class_memory);
16951 %}
16952 
16953 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16954                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16955                              iRegINoSp tmp3, rFlagsReg cr)
16956 %{
16957   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16958   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
16959   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16960          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16961 
16962   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16963 
16964   ins_encode %{
16965     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16966                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16967                            $tmp3$$Register);
16968   %}
16969   ins_pipe(pipe_class_memory);
16970 %}
16971 
16972 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16973                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16974                               iRegINoSp tmp3, rFlagsReg cr)
16975 %{
16976   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16977   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
16978   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16979          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16980 
16981   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16982 
16983   ins_encode %{
16984     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16985                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
16986                             $tmp3$$Register);
16987   %}
16988   ins_pipe(pipe_class_memory);
16989 %}
16990 
16991 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16992                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
16993                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
16994   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
16995   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16996   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
16997   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
16998   ins_encode %{
16999     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17000                                $result$$Register, $ztmp1$$FloatRegister,
17001                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17002                                $ptmp$$PRegister, true /* isL */);
17003   %}
17004   ins_pipe(pipe_class_memory);
17005 %}
17006 
17007 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17008                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17009                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17010   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
17011   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17012   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17013   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17014   ins_encode %{
17015     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17016                                $result$$Register, $ztmp1$$FloatRegister,
17017                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17018                                $ptmp$$PRegister, false /* isL */);
17019   %}
17020   ins_pipe(pipe_class_memory);
17021 %}
17022 
17023 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17024                         iRegI_R0 result, rFlagsReg cr)
17025 %{
17026   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
17027   match(Set result (StrEquals (Binary str1 str2) cnt));
17028   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17029 
17030   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17031   ins_encode %{
17032     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17033     __ string_equals($str1$$Register, $str2$$Register,
17034                      $result$$Register, $cnt$$Register);
17035   %}
17036   ins_pipe(pipe_class_memory);
17037 %}
17038 
17039 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17040                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17041                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17042                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17043                        iRegP_R10 tmp, rFlagsReg cr)
17044 %{
17045   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
17046   match(Set result (AryEq ary1 ary2));
17047   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17048          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17049          TEMP vtmp6, TEMP vtmp7, KILL cr);
17050 
17051   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17052   ins_encode %{
17053     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17054                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17055                                    $result$$Register, $tmp$$Register, 1);
17056     if (tpc == nullptr) {
17057       ciEnv::current()->record_failure("CodeCache is full");
17058       return;
17059     }
17060   %}
17061   ins_pipe(pipe_class_memory);
17062 %}
17063 
17064 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17065                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17066                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17067                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17068                        iRegP_R10 tmp, rFlagsReg cr)
17069 %{
17070   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
17071   match(Set result (AryEq ary1 ary2));
17072   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17073          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17074          TEMP vtmp6, TEMP vtmp7, KILL cr);
17075 
17076   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17077   ins_encode %{
17078     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17079                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17080                                    $result$$Register, $tmp$$Register, 2);
17081     if (tpc == nullptr) {
17082       ciEnv::current()->record_failure("CodeCache is full");
17083       return;
17084     }
17085   %}
17086   ins_pipe(pipe_class_memory);
17087 %}
17088 
17089 instruct arrays_hashcode(iRegP_R1 ary, iRegI_R2 cnt, iRegI_R0 result, immI basic_type,
17090                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17091                          vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17092                          vRegD_V12 vtmp8, vRegD_V13 vtmp9, rFlagsReg cr)
17093 %{
17094   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
17095   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, TEMP vtmp6,
17096          TEMP vtmp7, TEMP vtmp8, TEMP vtmp9, USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
17097 
17098   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
17099   ins_encode %{
17100     address tpc = __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
17101                                      $vtmp3$$FloatRegister, $vtmp2$$FloatRegister,
17102                                      $vtmp1$$FloatRegister, $vtmp0$$FloatRegister,
17103                                      $vtmp4$$FloatRegister, $vtmp5$$FloatRegister,
17104                                      $vtmp6$$FloatRegister, $vtmp7$$FloatRegister,
17105                                      $vtmp8$$FloatRegister, $vtmp9$$FloatRegister,
17106                                      (BasicType)$basic_type$$constant);
17107     if (tpc == nullptr) {
17108       ciEnv::current()->record_failure("CodeCache is full");
17109       return;
17110     }
17111   %}
17112   ins_pipe(pipe_class_memory);
17113 %}
17114 
17115 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
17116 %{
17117   match(Set result (CountPositives ary1 len));
17118   effect(USE_KILL ary1, USE_KILL len, KILL cr);
17119   format %{ "count positives byte[] $ary1,$len -> $result" %}
17120   ins_encode %{
17121     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
17122     if (tpc == nullptr) {
17123       ciEnv::current()->record_failure("CodeCache is full");
17124       return;
17125     }
17126   %}
17127   ins_pipe( pipe_slow );
17128 %}
17129 
17130 // fast char[] to byte[] compression
17131 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17132                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17133                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17134                          iRegI_R0 result, rFlagsReg cr)
17135 %{
17136   match(Set result (StrCompressedCopy src (Binary dst len)));
17137   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17138          USE_KILL src, USE_KILL dst, USE len, KILL cr);
17139 
17140   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17141   ins_encode %{
17142     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
17143                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17144                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17145                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17146   %}
17147   ins_pipe(pipe_slow);
17148 %}
17149 
17150 // fast byte[] to char[] inflation
17151 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
17152                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17153                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
17154 %{
17155   match(Set dummy (StrInflatedCopy src (Binary dst len)));
17156   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
17157          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
17158          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
17159 
17160   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
17161   ins_encode %{
17162     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
17163                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17164                                         $vtmp2$$FloatRegister, $tmp$$Register);
17165     if (tpc == nullptr) {
17166       ciEnv::current()->record_failure("CodeCache is full");
17167       return;
17168     }
17169   %}
17170   ins_pipe(pipe_class_memory);
17171 %}
17172 
17173 // encode char[] to byte[] in ISO_8859_1
17174 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17175                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17176                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17177                           iRegI_R0 result, rFlagsReg cr)
17178 %{
17179   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
17180   match(Set result (EncodeISOArray src (Binary dst len)));
17181   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17182          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17183 
17184   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17185   ins_encode %{
17186     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17187                         $result$$Register, false,
17188                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17189                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17190                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17191   %}
17192   ins_pipe(pipe_class_memory);
17193 %}
17194 
17195 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17196                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17197                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17198                             iRegI_R0 result, rFlagsReg cr)
17199 %{
17200   predicate(((EncodeISOArrayNode*)n)->is_ascii());
17201   match(Set result (EncodeISOArray src (Binary dst len)));
17202   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17203          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17204 
17205   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17206   ins_encode %{
17207     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17208                         $result$$Register, true,
17209                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17210                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17211                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17212   %}
17213   ins_pipe(pipe_class_memory);
17214 %}
17215 
17216 //----------------------------- CompressBits/ExpandBits ------------------------
17217 
17218 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17219                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17220   match(Set dst (CompressBits src mask));
17221   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17222   format %{ "mov    $tsrc, $src\n\t"
17223             "mov    $tmask, $mask\n\t"
17224             "bext   $tdst, $tsrc, $tmask\n\t"
17225             "mov    $dst, $tdst"
17226           %}
17227   ins_encode %{
17228     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17229     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17230     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17231     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17232   %}
17233   ins_pipe(pipe_slow);
17234 %}
17235 
17236 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17237                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17238   match(Set dst (CompressBits (LoadI mem) mask));
17239   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17240   format %{ "ldrs   $tsrc, $mem\n\t"
17241             "ldrs   $tmask, $mask\n\t"
17242             "bext   $tdst, $tsrc, $tmask\n\t"
17243             "mov    $dst, $tdst"
17244           %}
17245   ins_encode %{
17246     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17247               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17248     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17249     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17250     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17251   %}
17252   ins_pipe(pipe_slow);
17253 %}
17254 
17255 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17256                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
17257   match(Set dst (CompressBits src mask));
17258   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17259   format %{ "mov    $tsrc, $src\n\t"
17260             "mov    $tmask, $mask\n\t"
17261             "bext   $tdst, $tsrc, $tmask\n\t"
17262             "mov    $dst, $tdst"
17263           %}
17264   ins_encode %{
17265     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17266     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17267     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17268     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17269   %}
17270   ins_pipe(pipe_slow);
17271 %}
17272 
17273 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
17274                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17275   match(Set dst (CompressBits (LoadL mem) mask));
17276   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17277   format %{ "ldrd   $tsrc, $mem\n\t"
17278             "ldrd   $tmask, $mask\n\t"
17279             "bext   $tdst, $tsrc, $tmask\n\t"
17280             "mov    $dst, $tdst"
17281           %}
17282   ins_encode %{
17283     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17284               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17285     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17286     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17287     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17288   %}
17289   ins_pipe(pipe_slow);
17290 %}
17291 
17292 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17293                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17294   match(Set dst (ExpandBits src mask));
17295   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17296   format %{ "mov    $tsrc, $src\n\t"
17297             "mov    $tmask, $mask\n\t"
17298             "bdep   $tdst, $tsrc, $tmask\n\t"
17299             "mov    $dst, $tdst"
17300           %}
17301   ins_encode %{
17302     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17303     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17304     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17305     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17306   %}
17307   ins_pipe(pipe_slow);
17308 %}
17309 
17310 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17311                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17312   match(Set dst (ExpandBits (LoadI mem) mask));
17313   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17314   format %{ "ldrs   $tsrc, $mem\n\t"
17315             "ldrs   $tmask, $mask\n\t"
17316             "bdep   $tdst, $tsrc, $tmask\n\t"
17317             "mov    $dst, $tdst"
17318           %}
17319   ins_encode %{
17320     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17321               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17322     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17323     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17324     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17325   %}
17326   ins_pipe(pipe_slow);
17327 %}
17328 
17329 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17330                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
17331   match(Set dst (ExpandBits src mask));
17332   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17333   format %{ "mov    $tsrc, $src\n\t"
17334             "mov    $tmask, $mask\n\t"
17335             "bdep   $tdst, $tsrc, $tmask\n\t"
17336             "mov    $dst, $tdst"
17337           %}
17338   ins_encode %{
17339     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17340     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17341     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17342     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17343   %}
17344   ins_pipe(pipe_slow);
17345 %}
17346 
17347 
17348 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
17349                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17350   match(Set dst (ExpandBits (LoadL mem) mask));
17351   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17352   format %{ "ldrd   $tsrc, $mem\n\t"
17353             "ldrd   $tmask, $mask\n\t"
17354             "bdep   $tdst, $tsrc, $tmask\n\t"
17355             "mov    $dst, $tdst"
17356           %}
17357   ins_encode %{
17358     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17359               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17360     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17361     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17362     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17363   %}
17364   ins_pipe(pipe_slow);
17365 %}
17366 
17367 //----------------------------- Reinterpret ----------------------------------
17368 // Reinterpret a half-precision float value in a floating point register to a general purpose register
17369 instruct reinterpretHF2S(iRegINoSp dst, vRegF src) %{
17370   match(Set dst (ReinterpretHF2S src));
17371   format %{ "reinterpretHF2S $dst, $src" %}
17372   ins_encode %{
17373     __ smov($dst$$Register, $src$$FloatRegister, __ H, 0);
17374   %}
17375   ins_pipe(pipe_slow);
17376 %}
17377 
17378 // Reinterpret a half-precision float value in a general purpose register to a floating point register
17379 instruct reinterpretS2HF(vRegF dst, iRegINoSp src) %{
17380   match(Set dst (ReinterpretS2HF src));
17381   format %{ "reinterpretS2HF $dst, $src" %}
17382   ins_encode %{
17383     __ mov($dst$$FloatRegister, __ H, 0, $src$$Register);
17384   %}
17385   ins_pipe(pipe_slow);
17386 %}
17387 
17388 // Without this optimization, ReinterpretS2HF (ConvF2HF src) would result in the following
17389 // instructions (the first two are for ConvF2HF and the last instruction is for ReinterpretS2HF) -
17390 // fcvt $tmp1_fpr, $src_fpr    // Convert float to half-precision float
17391 // mov  $tmp2_gpr, $tmp1_fpr   // Move half-precision float in FPR to a GPR
17392 // mov  $dst_fpr,  $tmp2_gpr   // Move the result from a GPR to an FPR
17393 // The move from FPR to GPR in ConvF2HF and the move from GPR to FPR in ReinterpretS2HF
17394 // can be omitted in this pattern, resulting in -
17395 // fcvt $dst, $src  // Convert float to half-precision float
17396 instruct convF2HFAndS2HF(vRegF dst, vRegF src)
17397 %{
17398   match(Set dst (ReinterpretS2HF (ConvF2HF src)));
17399   format %{ "convF2HFAndS2HF $dst, $src" %}
17400   ins_encode %{
17401     __ fcvtsh($dst$$FloatRegister, $src$$FloatRegister);
17402   %}
17403   ins_pipe(pipe_slow);
17404 %}
17405 
17406 // Without this optimization, ConvHF2F (ReinterpretHF2S src) would result in the following
17407 // instructions (the first one is for ReinterpretHF2S and the last two are for ConvHF2F) -
17408 // mov  $tmp1_gpr, $src_fpr  // Move the half-precision float from an FPR to a GPR
17409 // mov  $tmp2_fpr, $tmp1_gpr // Move the same value from GPR to an FPR
17410 // fcvt $dst_fpr,  $tmp2_fpr // Convert the half-precision float to 32-bit float
17411 // The move from FPR to GPR in ReinterpretHF2S and the move from GPR to FPR in ConvHF2F
17412 // can be omitted as the input (src) is already in an FPR required for the fcvths instruction
17413 // resulting in -
17414 // fcvt $dst, $src  // Convert half-precision float to a 32-bit float
17415 instruct convHF2SAndHF2F(vRegF dst, vRegF src)
17416 %{
17417   match(Set dst (ConvHF2F (ReinterpretHF2S src)));
17418   format %{ "convHF2SAndHF2F $dst, $src" %}
17419   ins_encode %{
17420     __ fcvths($dst$$FloatRegister, $src$$FloatRegister);
17421   %}
17422   ins_pipe(pipe_slow);
17423 %}
17424 
17425 // ============================================================================
17426 // This name is KNOWN by the ADLC and cannot be changed.
17427 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17428 // for this guy.
17429 instruct tlsLoadP(thread_RegP dst)
17430 %{
17431   match(Set dst (ThreadLocal));
17432 
17433   ins_cost(0);
17434 
17435   format %{ " -- \t// $dst=Thread::current(), empty" %}
17436 
17437   size(0);
17438 
17439   ins_encode( /*empty*/ );
17440 
17441   ins_pipe(pipe_class_empty);
17442 %}
17443 
17444 //----------PEEPHOLE RULES-----------------------------------------------------
17445 // These must follow all instruction definitions as they use the names
17446 // defined in the instructions definitions.
17447 //
17448 // peepmatch ( root_instr_name [preceding_instruction]* );
17449 //
17450 // peepconstraint %{
17451 // (instruction_number.operand_name relational_op instruction_number.operand_name
17452 //  [, ...] );
17453 // // instruction numbers are zero-based using left to right order in peepmatch
17454 //
17455 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17456 // // provide an instruction_number.operand_name for each operand that appears
17457 // // in the replacement instruction's match rule
17458 //
17459 // ---------VM FLAGS---------------------------------------------------------
17460 //
17461 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17462 //
17463 // Each peephole rule is given an identifying number starting with zero and
17464 // increasing by one in the order seen by the parser.  An individual peephole
17465 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17466 // on the command-line.
17467 //
17468 // ---------CURRENT LIMITATIONS----------------------------------------------
17469 //
17470 // Only match adjacent instructions in same basic block
17471 // Only equality constraints
17472 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17473 // Only one replacement instruction
17474 //
17475 // ---------EXAMPLE----------------------------------------------------------
17476 //
17477 // // pertinent parts of existing instructions in architecture description
17478 // instruct movI(iRegINoSp dst, iRegI src)
17479 // %{
17480 //   match(Set dst (CopyI src));
17481 // %}
17482 //
17483 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17484 // %{
17485 //   match(Set dst (AddI dst src));
17486 //   effect(KILL cr);
17487 // %}
17488 //
17489 // // Change (inc mov) to lea
17490 // peephole %{
17491 //   // increment preceded by register-register move
17492 //   peepmatch ( incI_iReg movI );
17493 //   // require that the destination register of the increment
17494 //   // match the destination register of the move
17495 //   peepconstraint ( 0.dst == 1.dst );
17496 //   // construct a replacement instruction that sets
17497 //   // the destination to ( move's source register + one )
17498 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17499 // %}
17500 //
17501 
17502 // Implementation no longer uses movX instructions since
17503 // machine-independent system no longer uses CopyX nodes.
17504 //
17505 // peephole
17506 // %{
17507 //   peepmatch (incI_iReg movI);
17508 //   peepconstraint (0.dst == 1.dst);
17509 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17510 // %}
17511 
17512 // peephole
17513 // %{
17514 //   peepmatch (decI_iReg movI);
17515 //   peepconstraint (0.dst == 1.dst);
17516 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17517 // %}
17518 
17519 // peephole
17520 // %{
17521 //   peepmatch (addI_iReg_imm movI);
17522 //   peepconstraint (0.dst == 1.dst);
17523 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17524 // %}
17525 
17526 // peephole
17527 // %{
17528 //   peepmatch (incL_iReg movL);
17529 //   peepconstraint (0.dst == 1.dst);
17530 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17531 // %}
17532 
17533 // peephole
17534 // %{
17535 //   peepmatch (decL_iReg movL);
17536 //   peepconstraint (0.dst == 1.dst);
17537 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17538 // %}
17539 
17540 // peephole
17541 // %{
17542 //   peepmatch (addL_iReg_imm movL);
17543 //   peepconstraint (0.dst == 1.dst);
17544 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17545 // %}
17546 
17547 // peephole
17548 // %{
17549 //   peepmatch (addP_iReg_imm movP);
17550 //   peepconstraint (0.dst == 1.dst);
17551 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17552 // %}
17553 
17554 // // Change load of spilled value to only a spill
17555 // instruct storeI(memory mem, iRegI src)
17556 // %{
17557 //   match(Set mem (StoreI mem src));
17558 // %}
17559 //
17560 // instruct loadI(iRegINoSp dst, memory mem)
17561 // %{
17562 //   match(Set dst (LoadI mem));
17563 // %}
17564 //
17565 
17566 //----------SMARTSPILL RULES---------------------------------------------------
17567 // These must follow all instruction definitions as they use the names
17568 // defined in the instructions definitions.
17569 
17570 // Local Variables:
17571 // mode: c++
17572 // End: