1 //
    2 // Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // architecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182 
  183   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  184   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  185   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  186   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  187 
  188   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  189   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  190   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  191   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  192 
  193   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  194   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  195   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  196   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  197 
  198   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  199   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  200   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  201   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  202 
  203   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  204   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  205   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  206   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  207 
  208   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  209   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  210   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  211   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  212 
  213   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  214   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  215   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  216   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  217 
  218   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  219   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  220   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  221   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  222 
  223   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  224   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  225   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  226   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  227 
  228   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  229   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  230   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  231   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  232 
  233   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  234   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  235   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  236   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  237 
  238   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  239   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  240   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  241   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  242 
  243   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  244   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  245   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  246   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  247 
  248   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  249   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  250   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  251   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  252 
  253   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  254   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  255   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  256   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  257 
  258   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  259   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  260   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  261   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  262 
  263   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  264   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  265   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  266   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  267 
  268   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  269   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  270   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  271   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  272 
  273   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  274   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  275   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  276   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  277 
  278   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  279   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  280   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  281   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  282 
  283   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  284   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  285   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  286   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  287 
  288   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  289   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  290   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  291   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  292 
  293   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  294   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  295   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  296   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  297 
  298   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  299   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  300   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  301   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  302 
  303   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  304   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  305   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  306   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  307 
  308   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  309   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  310   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  311   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  312 
  313   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  314   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  315   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  316   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  317 
  318   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  319   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  320   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  321   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  322 
  323   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  324   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  325   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  326   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  327 
  328   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  329   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  330   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  331   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  332 
  333   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  334   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  335   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  336   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  337 
  338 // ----------------------------
  339 // SVE Predicate Registers
  340 // ----------------------------
  341   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  342   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  343   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  344   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  345   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  346   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  347   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  348   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  349   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  350   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  351   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  352   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  353   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  354   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  355   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  356   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  357 
  358 // ----------------------------
  359 // Special Registers
  360 // ----------------------------
  361 
  362 // the AArch64 CSPR status flag register is not directly accessible as
  363 // instruction operand. the FPSR status flag register is a system
  364 // register which can be written/read using MSR/MRS but again does not
  365 // appear as an operand (a code identifying the FSPR occurs as an
  366 // immediate value in the instruction).
  367 
  368 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  369 
  370 // Specify priority of register selection within phases of register
  371 // allocation.  Highest priority is first.  A useful heuristic is to
  372 // give registers a low priority when they are required by machine
  373 // instructions, like EAX and EDX on I486, and choose no-save registers
  374 // before save-on-call, & save-on-call before save-on-entry.  Registers
  375 // which participate in fixed calling sequences should come last.
  376 // Registers which are used as pairs must fall on an even boundary.
  377 
  378 alloc_class chunk0(
  379     // volatiles
  380     R10, R10_H,
  381     R11, R11_H,
  382     R12, R12_H,
  383     R13, R13_H,
  384     R14, R14_H,
  385     R15, R15_H,
  386     R16, R16_H,
  387     R17, R17_H,
  388     R18, R18_H,
  389 
  390     // arg registers
  391     R0, R0_H,
  392     R1, R1_H,
  393     R2, R2_H,
  394     R3, R3_H,
  395     R4, R4_H,
  396     R5, R5_H,
  397     R6, R6_H,
  398     R7, R7_H,
  399 
  400     // non-volatiles
  401     R19, R19_H,
  402     R20, R20_H,
  403     R21, R21_H,
  404     R22, R22_H,
  405     R23, R23_H,
  406     R24, R24_H,
  407     R25, R25_H,
  408     R26, R26_H,
  409 
  410     // non-allocatable registers
  411 
  412     R27, R27_H, // heapbase
  413     R28, R28_H, // thread
  414     R29, R29_H, // fp
  415     R30, R30_H, // lr
  416     R31, R31_H, // sp
  417     R8, R8_H,   // rscratch1
  418     R9, R9_H,   // rscratch2
  419 );
  420 
  421 alloc_class chunk1(
  422 
  423     // no save
  424     V16, V16_H, V16_J, V16_K,
  425     V17, V17_H, V17_J, V17_K,
  426     V18, V18_H, V18_J, V18_K,
  427     V19, V19_H, V19_J, V19_K,
  428     V20, V20_H, V20_J, V20_K,
  429     V21, V21_H, V21_J, V21_K,
  430     V22, V22_H, V22_J, V22_K,
  431     V23, V23_H, V23_J, V23_K,
  432     V24, V24_H, V24_J, V24_K,
  433     V25, V25_H, V25_J, V25_K,
  434     V26, V26_H, V26_J, V26_K,
  435     V27, V27_H, V27_J, V27_K,
  436     V28, V28_H, V28_J, V28_K,
  437     V29, V29_H, V29_J, V29_K,
  438     V30, V30_H, V30_J, V30_K,
  439     V31, V31_H, V31_J, V31_K,
  440 
  441     // arg registers
  442     V0, V0_H, V0_J, V0_K,
  443     V1, V1_H, V1_J, V1_K,
  444     V2, V2_H, V2_J, V2_K,
  445     V3, V3_H, V3_J, V3_K,
  446     V4, V4_H, V4_J, V4_K,
  447     V5, V5_H, V5_J, V5_K,
  448     V6, V6_H, V6_J, V6_K,
  449     V7, V7_H, V7_J, V7_K,
  450 
  451     // non-volatiles
  452     V8, V8_H, V8_J, V8_K,
  453     V9, V9_H, V9_J, V9_K,
  454     V10, V10_H, V10_J, V10_K,
  455     V11, V11_H, V11_J, V11_K,
  456     V12, V12_H, V12_J, V12_K,
  457     V13, V13_H, V13_J, V13_K,
  458     V14, V14_H, V14_J, V14_K,
  459     V15, V15_H, V15_J, V15_K,
  460 );
  461 
  462 alloc_class chunk2 (
  463     // Governing predicates for load/store and arithmetic
  464     P0,
  465     P1,
  466     P2,
  467     P3,
  468     P4,
  469     P5,
  470     P6,
  471 
  472     // Extra predicates
  473     P8,
  474     P9,
  475     P10,
  476     P11,
  477     P12,
  478     P13,
  479     P14,
  480     P15,
  481 
  482     // Preserved for all-true predicate
  483     P7,
  484 );
  485 
  486 alloc_class chunk3(RFLAGS);
  487 
  488 //----------Architecture Description Register Classes--------------------------
  489 // Several register classes are automatically defined based upon information in
  490 // this architecture description.
  491 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  492 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  493 //
  494 
  495 // Class for all 32 bit general purpose registers
  496 reg_class all_reg32(
  497     R0,
  498     R1,
  499     R2,
  500     R3,
  501     R4,
  502     R5,
  503     R6,
  504     R7,
  505     R10,
  506     R11,
  507     R12,
  508     R13,
  509     R14,
  510     R15,
  511     R16,
  512     R17,
  513     R18,
  514     R19,
  515     R20,
  516     R21,
  517     R22,
  518     R23,
  519     R24,
  520     R25,
  521     R26,
  522     R27,
  523     R28,
  524     R29,
  525     R30,
  526     R31
  527 );
  528 
  529 
  530 // Class for all 32 bit integer registers (excluding SP which
  531 // will never be used as an integer register)
  532 reg_class any_reg32 %{
  533   return _ANY_REG32_mask;
  534 %}
  535 
  536 // Singleton class for R0 int register
  537 reg_class int_r0_reg(R0);
  538 
  539 // Singleton class for R2 int register
  540 reg_class int_r2_reg(R2);
  541 
  542 // Singleton class for R3 int register
  543 reg_class int_r3_reg(R3);
  544 
  545 // Singleton class for R4 int register
  546 reg_class int_r4_reg(R4);
  547 
  548 // Singleton class for R31 int register
  549 reg_class int_r31_reg(R31);
  550 
  551 // Class for all 64 bit general purpose registers
  552 reg_class all_reg(
  553     R0, R0_H,
  554     R1, R1_H,
  555     R2, R2_H,
  556     R3, R3_H,
  557     R4, R4_H,
  558     R5, R5_H,
  559     R6, R6_H,
  560     R7, R7_H,
  561     R10, R10_H,
  562     R11, R11_H,
  563     R12, R12_H,
  564     R13, R13_H,
  565     R14, R14_H,
  566     R15, R15_H,
  567     R16, R16_H,
  568     R17, R17_H,
  569     R18, R18_H,
  570     R19, R19_H,
  571     R20, R20_H,
  572     R21, R21_H,
  573     R22, R22_H,
  574     R23, R23_H,
  575     R24, R24_H,
  576     R25, R25_H,
  577     R26, R26_H,
  578     R27, R27_H,
  579     R28, R28_H,
  580     R29, R29_H,
  581     R30, R30_H,
  582     R31, R31_H
  583 );
  584 
  585 // Class for all long integer registers (including SP)
  586 reg_class any_reg %{
  587   return _ANY_REG_mask;
  588 %}
  589 
  590 // Class for non-allocatable 32 bit registers
  591 reg_class non_allocatable_reg32(
  592 #ifdef R18_RESERVED
  593     // See comment in register_aarch64.hpp
  594     R18,                        // tls on Windows
  595 #endif
  596     R28,                        // thread
  597     R30,                        // lr
  598     R31                         // sp
  599 );
  600 
  601 // Class for non-allocatable 64 bit registers
  602 reg_class non_allocatable_reg(
  603 #ifdef R18_RESERVED
  604     // See comment in register_aarch64.hpp
  605     R18, R18_H,                 // tls on Windows, platform register on macOS
  606 #endif
  607     R28, R28_H,                 // thread
  608     R30, R30_H,                 // lr
  609     R31, R31_H                  // sp
  610 );
  611 
  612 // Class for all non-special integer registers
  613 reg_class no_special_reg32 %{
  614   return _NO_SPECIAL_REG32_mask;
  615 %}
  616 
  617 // Class for all non-special long integer registers
  618 reg_class no_special_reg %{
  619   return _NO_SPECIAL_REG_mask;
  620 %}
  621 
  622 // Class for 64 bit register r0
  623 reg_class r0_reg(
  624     R0, R0_H
  625 );
  626 
  627 // Class for 64 bit register r1
  628 reg_class r1_reg(
  629     R1, R1_H
  630 );
  631 
  632 // Class for 64 bit register r2
  633 reg_class r2_reg(
  634     R2, R2_H
  635 );
  636 
  637 // Class for 64 bit register r3
  638 reg_class r3_reg(
  639     R3, R3_H
  640 );
  641 
  642 // Class for 64 bit register r4
  643 reg_class r4_reg(
  644     R4, R4_H
  645 );
  646 
  647 // Class for 64 bit register r5
  648 reg_class r5_reg(
  649     R5, R5_H
  650 );
  651 
  652 // Class for 64 bit register r10
  653 reg_class r10_reg(
  654     R10, R10_H
  655 );
  656 
  657 // Class for 64 bit register r11
  658 reg_class r11_reg(
  659     R11, R11_H
  660 );
  661 
  662 // Class for method register
  663 reg_class method_reg(
  664     R12, R12_H
  665 );
  666 
  667 // Class for thread register
  668 reg_class thread_reg(
  669     R28, R28_H
  670 );
  671 
  672 // Class for frame pointer register
  673 reg_class fp_reg(
  674     R29, R29_H
  675 );
  676 
  677 // Class for link register
  678 reg_class lr_reg(
  679     R30, R30_H
  680 );
  681 
  682 // Class for long sp register
  683 reg_class sp_reg(
  684   R31, R31_H
  685 );
  686 
  687 // Class for all pointer registers
  688 reg_class ptr_reg %{
  689   return _PTR_REG_mask;
  690 %}
  691 
  692 // Class for all non_special pointer registers
  693 reg_class no_special_ptr_reg %{
  694   return _NO_SPECIAL_PTR_REG_mask;
  695 %}
  696 
  697 // Class for all non_special pointer registers (excluding rfp)
  698 reg_class no_special_no_rfp_ptr_reg %{
  699   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  700 %}
  701 
  702 // Class for all float registers
  703 reg_class float_reg(
  704     V0,
  705     V1,
  706     V2,
  707     V3,
  708     V4,
  709     V5,
  710     V6,
  711     V7,
  712     V8,
  713     V9,
  714     V10,
  715     V11,
  716     V12,
  717     V13,
  718     V14,
  719     V15,
  720     V16,
  721     V17,
  722     V18,
  723     V19,
  724     V20,
  725     V21,
  726     V22,
  727     V23,
  728     V24,
  729     V25,
  730     V26,
  731     V27,
  732     V28,
  733     V29,
  734     V30,
  735     V31
  736 );
  737 
  738 // Double precision float registers have virtual `high halves' that
  739 // are needed by the allocator.
  740 // Class for all double registers
  741 reg_class double_reg(
  742     V0, V0_H,
  743     V1, V1_H,
  744     V2, V2_H,
  745     V3, V3_H,
  746     V4, V4_H,
  747     V5, V5_H,
  748     V6, V6_H,
  749     V7, V7_H,
  750     V8, V8_H,
  751     V9, V9_H,
  752     V10, V10_H,
  753     V11, V11_H,
  754     V12, V12_H,
  755     V13, V13_H,
  756     V14, V14_H,
  757     V15, V15_H,
  758     V16, V16_H,
  759     V17, V17_H,
  760     V18, V18_H,
  761     V19, V19_H,
  762     V20, V20_H,
  763     V21, V21_H,
  764     V22, V22_H,
  765     V23, V23_H,
  766     V24, V24_H,
  767     V25, V25_H,
  768     V26, V26_H,
  769     V27, V27_H,
  770     V28, V28_H,
  771     V29, V29_H,
  772     V30, V30_H,
  773     V31, V31_H
  774 );
  775 
  776 // Class for all SVE vector registers.
  777 reg_class vectora_reg (
  778     V0, V0_H, V0_J, V0_K,
  779     V1, V1_H, V1_J, V1_K,
  780     V2, V2_H, V2_J, V2_K,
  781     V3, V3_H, V3_J, V3_K,
  782     V4, V4_H, V4_J, V4_K,
  783     V5, V5_H, V5_J, V5_K,
  784     V6, V6_H, V6_J, V6_K,
  785     V7, V7_H, V7_J, V7_K,
  786     V8, V8_H, V8_J, V8_K,
  787     V9, V9_H, V9_J, V9_K,
  788     V10, V10_H, V10_J, V10_K,
  789     V11, V11_H, V11_J, V11_K,
  790     V12, V12_H, V12_J, V12_K,
  791     V13, V13_H, V13_J, V13_K,
  792     V14, V14_H, V14_J, V14_K,
  793     V15, V15_H, V15_J, V15_K,
  794     V16, V16_H, V16_J, V16_K,
  795     V17, V17_H, V17_J, V17_K,
  796     V18, V18_H, V18_J, V18_K,
  797     V19, V19_H, V19_J, V19_K,
  798     V20, V20_H, V20_J, V20_K,
  799     V21, V21_H, V21_J, V21_K,
  800     V22, V22_H, V22_J, V22_K,
  801     V23, V23_H, V23_J, V23_K,
  802     V24, V24_H, V24_J, V24_K,
  803     V25, V25_H, V25_J, V25_K,
  804     V26, V26_H, V26_J, V26_K,
  805     V27, V27_H, V27_J, V27_K,
  806     V28, V28_H, V28_J, V28_K,
  807     V29, V29_H, V29_J, V29_K,
  808     V30, V30_H, V30_J, V30_K,
  809     V31, V31_H, V31_J, V31_K,
  810 );
  811 
  812 // Class for all 64bit vector registers
  813 reg_class vectord_reg(
  814     V0, V0_H,
  815     V1, V1_H,
  816     V2, V2_H,
  817     V3, V3_H,
  818     V4, V4_H,
  819     V5, V5_H,
  820     V6, V6_H,
  821     V7, V7_H,
  822     V8, V8_H,
  823     V9, V9_H,
  824     V10, V10_H,
  825     V11, V11_H,
  826     V12, V12_H,
  827     V13, V13_H,
  828     V14, V14_H,
  829     V15, V15_H,
  830     V16, V16_H,
  831     V17, V17_H,
  832     V18, V18_H,
  833     V19, V19_H,
  834     V20, V20_H,
  835     V21, V21_H,
  836     V22, V22_H,
  837     V23, V23_H,
  838     V24, V24_H,
  839     V25, V25_H,
  840     V26, V26_H,
  841     V27, V27_H,
  842     V28, V28_H,
  843     V29, V29_H,
  844     V30, V30_H,
  845     V31, V31_H
  846 );
  847 
  848 // Class for all 128bit vector registers
  849 reg_class vectorx_reg(
  850     V0, V0_H, V0_J, V0_K,
  851     V1, V1_H, V1_J, V1_K,
  852     V2, V2_H, V2_J, V2_K,
  853     V3, V3_H, V3_J, V3_K,
  854     V4, V4_H, V4_J, V4_K,
  855     V5, V5_H, V5_J, V5_K,
  856     V6, V6_H, V6_J, V6_K,
  857     V7, V7_H, V7_J, V7_K,
  858     V8, V8_H, V8_J, V8_K,
  859     V9, V9_H, V9_J, V9_K,
  860     V10, V10_H, V10_J, V10_K,
  861     V11, V11_H, V11_J, V11_K,
  862     V12, V12_H, V12_J, V12_K,
  863     V13, V13_H, V13_J, V13_K,
  864     V14, V14_H, V14_J, V14_K,
  865     V15, V15_H, V15_J, V15_K,
  866     V16, V16_H, V16_J, V16_K,
  867     V17, V17_H, V17_J, V17_K,
  868     V18, V18_H, V18_J, V18_K,
  869     V19, V19_H, V19_J, V19_K,
  870     V20, V20_H, V20_J, V20_K,
  871     V21, V21_H, V21_J, V21_K,
  872     V22, V22_H, V22_J, V22_K,
  873     V23, V23_H, V23_J, V23_K,
  874     V24, V24_H, V24_J, V24_K,
  875     V25, V25_H, V25_J, V25_K,
  876     V26, V26_H, V26_J, V26_K,
  877     V27, V27_H, V27_J, V27_K,
  878     V28, V28_H, V28_J, V28_K,
  879     V29, V29_H, V29_J, V29_K,
  880     V30, V30_H, V30_J, V30_K,
  881     V31, V31_H, V31_J, V31_K
  882 );
  883 
  884 // Class for vector register V10
  885 reg_class v10_veca_reg(
  886     V10, V10_H, V10_J, V10_K
  887 );
  888 
  889 // Class for vector register V11
  890 reg_class v11_veca_reg(
  891     V11, V11_H, V11_J, V11_K
  892 );
  893 
  894 // Class for vector register V12
  895 reg_class v12_veca_reg(
  896     V12, V12_H, V12_J, V12_K
  897 );
  898 
  899 // Class for vector register V13
  900 reg_class v13_veca_reg(
  901     V13, V13_H, V13_J, V13_K
  902 );
  903 
  904 // Class for vector register V17
  905 reg_class v17_veca_reg(
  906     V17, V17_H, V17_J, V17_K
  907 );
  908 
  909 // Class for vector register V18
  910 reg_class v18_veca_reg(
  911     V18, V18_H, V18_J, V18_K
  912 );
  913 
  914 // Class for vector register V23
  915 reg_class v23_veca_reg(
  916     V23, V23_H, V23_J, V23_K
  917 );
  918 
  919 // Class for vector register V24
  920 reg_class v24_veca_reg(
  921     V24, V24_H, V24_J, V24_K
  922 );
  923 
  924 // Class for 128 bit register v0
  925 reg_class v0_reg(
  926     V0, V0_H
  927 );
  928 
  929 // Class for 128 bit register v1
  930 reg_class v1_reg(
  931     V1, V1_H
  932 );
  933 
  934 // Class for 128 bit register v2
  935 reg_class v2_reg(
  936     V2, V2_H
  937 );
  938 
  939 // Class for 128 bit register v3
  940 reg_class v3_reg(
  941     V3, V3_H
  942 );
  943 
  944 // Class for 128 bit register v4
  945 reg_class v4_reg(
  946     V4, V4_H
  947 );
  948 
  949 // Class for 128 bit register v5
  950 reg_class v5_reg(
  951     V5, V5_H
  952 );
  953 
  954 // Class for 128 bit register v6
  955 reg_class v6_reg(
  956     V6, V6_H
  957 );
  958 
  959 // Class for 128 bit register v7
  960 reg_class v7_reg(
  961     V7, V7_H
  962 );
  963 
  964 // Class for 128 bit register v8
  965 reg_class v8_reg(
  966     V8, V8_H
  967 );
  968 
  969 // Class for 128 bit register v9
  970 reg_class v9_reg(
  971     V9, V9_H
  972 );
  973 
  974 // Class for 128 bit register v10
  975 reg_class v10_reg(
  976     V10, V10_H
  977 );
  978 
  979 // Class for 128 bit register v11
  980 reg_class v11_reg(
  981     V11, V11_H
  982 );
  983 
  984 // Class for 128 bit register v12
  985 reg_class v12_reg(
  986     V12, V12_H
  987 );
  988 
  989 // Class for 128 bit register v13
  990 reg_class v13_reg(
  991     V13, V13_H
  992 );
  993 
  994 // Class for 128 bit register v14
  995 reg_class v14_reg(
  996     V14, V14_H
  997 );
  998 
  999 // Class for 128 bit register v15
 1000 reg_class v15_reg(
 1001     V15, V15_H
 1002 );
 1003 
 1004 // Class for 128 bit register v16
 1005 reg_class v16_reg(
 1006     V16, V16_H
 1007 );
 1008 
 1009 // Class for 128 bit register v17
 1010 reg_class v17_reg(
 1011     V17, V17_H
 1012 );
 1013 
 1014 // Class for 128 bit register v18
 1015 reg_class v18_reg(
 1016     V18, V18_H
 1017 );
 1018 
 1019 // Class for 128 bit register v19
 1020 reg_class v19_reg(
 1021     V19, V19_H
 1022 );
 1023 
 1024 // Class for 128 bit register v20
 1025 reg_class v20_reg(
 1026     V20, V20_H
 1027 );
 1028 
 1029 // Class for 128 bit register v21
 1030 reg_class v21_reg(
 1031     V21, V21_H
 1032 );
 1033 
 1034 // Class for 128 bit register v22
 1035 reg_class v22_reg(
 1036     V22, V22_H
 1037 );
 1038 
 1039 // Class for 128 bit register v23
 1040 reg_class v23_reg(
 1041     V23, V23_H
 1042 );
 1043 
 1044 // Class for 128 bit register v24
 1045 reg_class v24_reg(
 1046     V24, V24_H
 1047 );
 1048 
 1049 // Class for 128 bit register v25
 1050 reg_class v25_reg(
 1051     V25, V25_H
 1052 );
 1053 
 1054 // Class for 128 bit register v26
 1055 reg_class v26_reg(
 1056     V26, V26_H
 1057 );
 1058 
 1059 // Class for 128 bit register v27
 1060 reg_class v27_reg(
 1061     V27, V27_H
 1062 );
 1063 
 1064 // Class for 128 bit register v28
 1065 reg_class v28_reg(
 1066     V28, V28_H
 1067 );
 1068 
 1069 // Class for 128 bit register v29
 1070 reg_class v29_reg(
 1071     V29, V29_H
 1072 );
 1073 
 1074 // Class for 128 bit register v30
 1075 reg_class v30_reg(
 1076     V30, V30_H
 1077 );
 1078 
 1079 // Class for 128 bit register v31
 1080 reg_class v31_reg(
 1081     V31, V31_H
 1082 );
 1083 
 1084 // Class for all SVE predicate registers.
 1085 reg_class pr_reg (
 1086     P0,
 1087     P1,
 1088     P2,
 1089     P3,
 1090     P4,
 1091     P5,
 1092     P6,
 1093     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1094     P8,
 1095     P9,
 1096     P10,
 1097     P11,
 1098     P12,
 1099     P13,
 1100     P14,
 1101     P15
 1102 );
 1103 
 1104 // Class for SVE governing predicate registers, which are used
 1105 // to determine the active elements of a predicated instruction.
 1106 reg_class gov_pr (
 1107     P0,
 1108     P1,
 1109     P2,
 1110     P3,
 1111     P4,
 1112     P5,
 1113     P6,
 1114     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1115 );
 1116 
 1117 reg_class p0_reg(P0);
 1118 reg_class p1_reg(P1);
 1119 
 1120 // Singleton class for condition codes
 1121 reg_class int_flags(RFLAGS);
 1122 
 1123 %}
 1124 
 1125 //----------DEFINITION BLOCK---------------------------------------------------
 1126 // Define name --> value mappings to inform the ADLC of an integer valued name
 1127 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1128 // Format:
 1129 //        int_def  <name>         ( <int_value>, <expression>);
 1130 // Generated Code in ad_<arch>.hpp
 1131 //        #define  <name>   (<expression>)
 1132 //        // value == <int_value>
 1133 // Generated code in ad_<arch>.cpp adlc_verification()
 1134 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1135 //
 1136 
 1137 // we follow the ppc-aix port in using a simple cost model which ranks
 1138 // register operations as cheap, memory ops as more expensive and
 1139 // branches as most expensive. the first two have a low as well as a
 1140 // normal cost. huge cost appears to be a way of saying don't do
 1141 // something
 1142 
 1143 definitions %{
 1144   // The default cost (of a register move instruction).
 1145   int_def INSN_COST            (    100,     100);
 1146   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1147   int_def CALL_COST            (    200,     2 * INSN_COST);
 1148   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1149 %}
 1150 
 1151 
 1152 //----------SOURCE BLOCK-------------------------------------------------------
 1153 // This is a block of C++ code which provides values, functions, and
 1154 // definitions necessary in the rest of the architecture description
 1155 
 1156 source_hpp %{
 1157 
 1158 #include "asm/macroAssembler.hpp"
 1159 #include "gc/shared/barrierSetAssembler.hpp"
 1160 #include "gc/shared/cardTable.hpp"
 1161 #include "gc/shared/cardTableBarrierSet.hpp"
 1162 #include "gc/shared/collectedHeap.hpp"
 1163 #include "opto/addnode.hpp"
 1164 #include "opto/convertnode.hpp"
 1165 #include "runtime/objectMonitor.hpp"
 1166 
 1167 extern RegMask _ANY_REG32_mask;
 1168 extern RegMask _ANY_REG_mask;
 1169 extern RegMask _PTR_REG_mask;
 1170 extern RegMask _NO_SPECIAL_REG32_mask;
 1171 extern RegMask _NO_SPECIAL_REG_mask;
 1172 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1173 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1174 
 1175 class CallStubImpl {
 1176 
 1177   //--------------------------------------------------------------
 1178   //---<  Used for optimization in Compile::shorten_branches  >---
 1179   //--------------------------------------------------------------
 1180 
 1181  public:
 1182   // Size of call trampoline stub.
 1183   static uint size_call_trampoline() {
 1184     return MacroAssembler::max_trampoline_stub_size(); // no call trampolines on this platform
 1185   }
 1186 
 1187   // number of relocations needed by a call trampoline stub
 1188   static uint reloc_call_trampoline() {
 1189     return 0; // no call trampolines on this platform
 1190   }
 1191 };
 1192 
 1193 class HandlerImpl {
 1194 
 1195  public:
 1196 
 1197   static int emit_exception_handler(C2_MacroAssembler *masm);
 1198   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1199 
 1200   static uint size_exception_handler() {
 1201     return MacroAssembler::far_codestub_branch_size();
 1202   }
 1203 
 1204   static uint size_deopt_handler() {
 1205     // count one adr and one far branch instruction
 1206     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1207   }
 1208 };
 1209 
 1210 class Node::PD {
 1211 public:
 1212   enum NodeFlags {
 1213     _last_flag = Node::_last_flag
 1214   };
 1215 };
 1216 
 1217   bool is_CAS(int opcode, bool maybe_volatile);
 1218 
 1219   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1220 
 1221   bool unnecessary_acquire(const Node *barrier);
 1222   bool needs_acquiring_load(const Node *load);
 1223 
 1224   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1225 
 1226   bool unnecessary_release(const Node *barrier);
 1227   bool unnecessary_volatile(const Node *barrier);
 1228   bool needs_releasing_store(const Node *store);
 1229 
 1230   // predicate controlling translation of CompareAndSwapX
 1231   bool needs_acquiring_load_exclusive(const Node *load);
 1232 
 1233   // predicate controlling addressing modes
 1234   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1235 
 1236   // Convert BootTest condition to Assembler condition.
 1237   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1238   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1239 %}
 1240 
 1241 source %{
 1242 
 1243   // Derived RegMask with conditionally allocatable registers
 1244 
 1245   void PhaseOutput::pd_perform_mach_node_analysis() {
 1246   }
 1247 
 1248   int MachNode::pd_alignment_required() const {
 1249     return 1;
 1250   }
 1251 
 1252   int MachNode::compute_padding(int current_offset) const {
 1253     return 0;
 1254   }
 1255 
 1256   RegMask _ANY_REG32_mask;
 1257   RegMask _ANY_REG_mask;
 1258   RegMask _PTR_REG_mask;
 1259   RegMask _NO_SPECIAL_REG32_mask;
 1260   RegMask _NO_SPECIAL_REG_mask;
 1261   RegMask _NO_SPECIAL_PTR_REG_mask;
 1262   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1263 
 1264   void reg_mask_init() {
 1265     // We derive below RegMask(s) from the ones which are auto-generated from
 1266     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1267     // registers conditionally reserved.
 1268 
 1269     _ANY_REG32_mask = _ALL_REG32_mask;
 1270     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1271 
 1272     _ANY_REG_mask = _ALL_REG_mask;
 1273 
 1274     _PTR_REG_mask = _ALL_REG_mask;
 1275 
 1276     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1277     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1278 
 1279     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1280     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1281 
 1282     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1283     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1284 
 1285     // r27 is not allocatable when compressed oops is on and heapbase is not
 1286     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1287     if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
 1288       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1289       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1290       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1291     }
 1292 
 1293     // r29 is not allocatable when PreserveFramePointer is on
 1294     if (PreserveFramePointer) {
 1295       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1296       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1297       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1298     }
 1299 
 1300     _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1301     _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1302   }
 1303 
 1304   // Optimizaton of volatile gets and puts
 1305   // -------------------------------------
 1306   //
 1307   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1308   // use to implement volatile reads and writes. For a volatile read
 1309   // we simply need
 1310   //
 1311   //   ldar<x>
 1312   //
 1313   // and for a volatile write we need
 1314   //
 1315   //   stlr<x>
 1316   //
 1317   // Alternatively, we can implement them by pairing a normal
 1318   // load/store with a memory barrier. For a volatile read we need
 1319   //
 1320   //   ldr<x>
 1321   //   dmb ishld
 1322   //
 1323   // for a volatile write
 1324   //
 1325   //   dmb ish
 1326   //   str<x>
 1327   //   dmb ish
 1328   //
 1329   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1330   // sequences. These are normally translated to an instruction
 1331   // sequence like the following
 1332   //
 1333   //   dmb      ish
 1334   // retry:
 1335   //   ldxr<x>   rval raddr
 1336   //   cmp       rval rold
 1337   //   b.ne done
 1338   //   stlxr<x>  rval, rnew, rold
 1339   //   cbnz      rval retry
 1340   // done:
 1341   //   cset      r0, eq
 1342   //   dmb ishld
 1343   //
 1344   // Note that the exclusive store is already using an stlxr
 1345   // instruction. That is required to ensure visibility to other
 1346   // threads of the exclusive write (assuming it succeeds) before that
 1347   // of any subsequent writes.
 1348   //
 1349   // The following instruction sequence is an improvement on the above
 1350   //
 1351   // retry:
 1352   //   ldaxr<x>  rval raddr
 1353   //   cmp       rval rold
 1354   //   b.ne done
 1355   //   stlxr<x>  rval, rnew, rold
 1356   //   cbnz      rval retry
 1357   // done:
 1358   //   cset      r0, eq
 1359   //
 1360   // We don't need the leading dmb ish since the stlxr guarantees
 1361   // visibility of prior writes in the case that the swap is
 1362   // successful. Crucially we don't have to worry about the case where
 1363   // the swap is not successful since no valid program should be
 1364   // relying on visibility of prior changes by the attempting thread
 1365   // in the case where the CAS fails.
 1366   //
 1367   // Similarly, we don't need the trailing dmb ishld if we substitute
 1368   // an ldaxr instruction since that will provide all the guarantees we
 1369   // require regarding observation of changes made by other threads
 1370   // before any change to the CAS address observed by the load.
 1371   //
 1372   // In order to generate the desired instruction sequence we need to
 1373   // be able to identify specific 'signature' ideal graph node
 1374   // sequences which i) occur as a translation of a volatile reads or
 1375   // writes or CAS operations and ii) do not occur through any other
 1376   // translation or graph transformation. We can then provide
 1377   // alternative aldc matching rules which translate these node
 1378   // sequences to the desired machine code sequences. Selection of the
 1379   // alternative rules can be implemented by predicates which identify
 1380   // the relevant node sequences.
 1381   //
 1382   // The ideal graph generator translates a volatile read to the node
 1383   // sequence
 1384   //
 1385   //   LoadX[mo_acquire]
 1386   //   MemBarAcquire
 1387   //
 1388   // As a special case when using the compressed oops optimization we
 1389   // may also see this variant
 1390   //
 1391   //   LoadN[mo_acquire]
 1392   //   DecodeN
 1393   //   MemBarAcquire
 1394   //
 1395   // A volatile write is translated to the node sequence
 1396   //
 1397   //   MemBarRelease
 1398   //   StoreX[mo_release] {CardMark}-optional
 1399   //   MemBarVolatile
 1400   //
 1401   // n.b. the above node patterns are generated with a strict
 1402   // 'signature' configuration of input and output dependencies (see
 1403   // the predicates below for exact details). The card mark may be as
 1404   // simple as a few extra nodes or, in a few GC configurations, may
 1405   // include more complex control flow between the leading and
 1406   // trailing memory barriers. However, whatever the card mark
 1407   // configuration these signatures are unique to translated volatile
 1408   // reads/stores -- they will not appear as a result of any other
 1409   // bytecode translation or inlining nor as a consequence of
 1410   // optimizing transforms.
 1411   //
 1412   // We also want to catch inlined unsafe volatile gets and puts and
 1413   // be able to implement them using either ldar<x>/stlr<x> or some
 1414   // combination of ldr<x>/stlr<x> and dmb instructions.
 1415   //
 1416   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1417   // normal volatile put node sequence containing an extra cpuorder
 1418   // membar
 1419   //
 1420   //   MemBarRelease
 1421   //   MemBarCPUOrder
 1422   //   StoreX[mo_release] {CardMark}-optional
 1423   //   MemBarCPUOrder
 1424   //   MemBarVolatile
 1425   //
 1426   // n.b. as an aside, a cpuorder membar is not itself subject to
 1427   // matching and translation by adlc rules.  However, the rule
 1428   // predicates need to detect its presence in order to correctly
 1429   // select the desired adlc rules.
 1430   //
 1431   // Inlined unsafe volatile gets manifest as a slightly different
 1432   // node sequence to a normal volatile get because of the
 1433   // introduction of some CPUOrder memory barriers to bracket the
 1434   // Load. However, but the same basic skeleton of a LoadX feeding a
 1435   // MemBarAcquire, possibly through an optional DecodeN, is still
 1436   // present
 1437   //
 1438   //   MemBarCPUOrder
 1439   //        ||       \\
 1440   //   MemBarCPUOrder LoadX[mo_acquire]
 1441   //        ||            |
 1442   //        ||       {DecodeN} optional
 1443   //        ||       /
 1444   //     MemBarAcquire
 1445   //
 1446   // In this case the acquire membar does not directly depend on the
 1447   // load. However, we can be sure that the load is generated from an
 1448   // inlined unsafe volatile get if we see it dependent on this unique
 1449   // sequence of membar nodes. Similarly, given an acquire membar we
 1450   // can know that it was added because of an inlined unsafe volatile
 1451   // get if it is fed and feeds a cpuorder membar and if its feed
 1452   // membar also feeds an acquiring load.
 1453   //
 1454   // Finally an inlined (Unsafe) CAS operation is translated to the
 1455   // following ideal graph
 1456   //
 1457   //   MemBarRelease
 1458   //   MemBarCPUOrder
 1459   //   CompareAndSwapX {CardMark}-optional
 1460   //   MemBarCPUOrder
 1461   //   MemBarAcquire
 1462   //
 1463   // So, where we can identify these volatile read and write
 1464   // signatures we can choose to plant either of the above two code
 1465   // sequences. For a volatile read we can simply plant a normal
 1466   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1467   // also choose to inhibit translation of the MemBarAcquire and
 1468   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1469   //
 1470   // When we recognise a volatile store signature we can choose to
 1471   // plant at a dmb ish as a translation for the MemBarRelease, a
 1472   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1473   // Alternatively, we can inhibit translation of the MemBarRelease
 1474   // and MemBarVolatile and instead plant a simple stlr<x>
 1475   // instruction.
 1476   //
 1477   // when we recognise a CAS signature we can choose to plant a dmb
 1478   // ish as a translation for the MemBarRelease, the conventional
 1479   // macro-instruction sequence for the CompareAndSwap node (which
 1480   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1481   // Alternatively, we can elide generation of the dmb instructions
 1482   // and plant the alternative CompareAndSwap macro-instruction
 1483   // sequence (which uses ldaxr<x>).
 1484   //
 1485   // Of course, the above only applies when we see these signature
 1486   // configurations. We still want to plant dmb instructions in any
 1487   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1488   // MemBarVolatile. For example, at the end of a constructor which
 1489   // writes final/volatile fields we will see a MemBarRelease
 1490   // instruction and this needs a 'dmb ish' lest we risk the
 1491   // constructed object being visible without making the
 1492   // final/volatile field writes visible.
 1493   //
 1494   // n.b. the translation rules below which rely on detection of the
 1495   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1496   // If we see anything other than the signature configurations we
 1497   // always just translate the loads and stores to ldr<x> and str<x>
 1498   // and translate acquire, release and volatile membars to the
 1499   // relevant dmb instructions.
 1500   //
 1501 
 1502   // is_CAS(int opcode, bool maybe_volatile)
 1503   //
 1504   // return true if opcode is one of the possible CompareAndSwapX
 1505   // values otherwise false.
 1506 
 1507   bool is_CAS(int opcode, bool maybe_volatile)
 1508   {
 1509     switch(opcode) {
 1510       // We handle these
 1511     case Op_CompareAndSwapI:
 1512     case Op_CompareAndSwapL:
 1513     case Op_CompareAndSwapP:
 1514     case Op_CompareAndSwapN:
 1515     case Op_ShenandoahCompareAndSwapP:
 1516     case Op_ShenandoahCompareAndSwapN:
 1517     case Op_CompareAndSwapB:
 1518     case Op_CompareAndSwapS:
 1519     case Op_GetAndSetI:
 1520     case Op_GetAndSetL:
 1521     case Op_GetAndSetP:
 1522     case Op_GetAndSetN:
 1523     case Op_GetAndAddI:
 1524     case Op_GetAndAddL:
 1525       return true;
 1526     case Op_CompareAndExchangeI:
 1527     case Op_CompareAndExchangeN:
 1528     case Op_CompareAndExchangeB:
 1529     case Op_CompareAndExchangeS:
 1530     case Op_CompareAndExchangeL:
 1531     case Op_CompareAndExchangeP:
 1532     case Op_WeakCompareAndSwapB:
 1533     case Op_WeakCompareAndSwapS:
 1534     case Op_WeakCompareAndSwapI:
 1535     case Op_WeakCompareAndSwapL:
 1536     case Op_WeakCompareAndSwapP:
 1537     case Op_WeakCompareAndSwapN:
 1538     case Op_ShenandoahWeakCompareAndSwapP:
 1539     case Op_ShenandoahWeakCompareAndSwapN:
 1540     case Op_ShenandoahCompareAndExchangeP:
 1541     case Op_ShenandoahCompareAndExchangeN:
 1542       return maybe_volatile;
 1543     default:
 1544       return false;
 1545     }
 1546   }
 1547 
 1548   // helper to determine the maximum number of Phi nodes we may need to
 1549   // traverse when searching from a card mark membar for the merge mem
 1550   // feeding a trailing membar or vice versa
 1551 
 1552 // predicates controlling emit of ldr<x>/ldar<x>
 1553 
 1554 bool unnecessary_acquire(const Node *barrier)
 1555 {
 1556   assert(barrier->is_MemBar(), "expecting a membar");
 1557 
 1558   MemBarNode* mb = barrier->as_MemBar();
 1559 
 1560   if (mb->trailing_load()) {
 1561     return true;
 1562   }
 1563 
 1564   if (mb->trailing_load_store()) {
 1565     Node* load_store = mb->in(MemBarNode::Precedent);
 1566     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1567     return is_CAS(load_store->Opcode(), true);
 1568   }
 1569 
 1570   return false;
 1571 }
 1572 
 1573 bool needs_acquiring_load(const Node *n)
 1574 {
 1575   assert(n->is_Load(), "expecting a load");
 1576   LoadNode *ld = n->as_Load();
 1577   return ld->is_acquire();
 1578 }
 1579 
 1580 bool unnecessary_release(const Node *n)
 1581 {
 1582   assert((n->is_MemBar() &&
 1583           n->Opcode() == Op_MemBarRelease),
 1584          "expecting a release membar");
 1585 
 1586   MemBarNode *barrier = n->as_MemBar();
 1587   if (!barrier->leading()) {
 1588     return false;
 1589   } else {
 1590     Node* trailing = barrier->trailing_membar();
 1591     MemBarNode* trailing_mb = trailing->as_MemBar();
 1592     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1593     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1594 
 1595     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1596     if (mem->is_Store()) {
 1597       assert(mem->as_Store()->is_release(), "");
 1598       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1599       return true;
 1600     } else {
 1601       assert(mem->is_LoadStore(), "");
 1602       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1603       return is_CAS(mem->Opcode(), true);
 1604     }
 1605   }
 1606   return false;
 1607 }
 1608 
 1609 bool unnecessary_volatile(const Node *n)
 1610 {
 1611   // assert n->is_MemBar();
 1612   MemBarNode *mbvol = n->as_MemBar();
 1613 
 1614   bool release = mbvol->trailing_store();
 1615   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1616 #ifdef ASSERT
 1617   if (release) {
 1618     Node* leading = mbvol->leading_membar();
 1619     assert(leading->Opcode() == Op_MemBarRelease, "");
 1620     assert(leading->as_MemBar()->leading_store(), "");
 1621     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1622   }
 1623 #endif
 1624 
 1625   return release;
 1626 }
 1627 
 1628 // predicates controlling emit of str<x>/stlr<x>
 1629 
 1630 bool needs_releasing_store(const Node *n)
 1631 {
 1632   // assert n->is_Store();
 1633   StoreNode *st = n->as_Store();
 1634   return st->trailing_membar() != nullptr;
 1635 }
 1636 
 1637 // predicate controlling translation of CAS
 1638 //
 1639 // returns true if CAS needs to use an acquiring load otherwise false
 1640 
 1641 bool needs_acquiring_load_exclusive(const Node *n)
 1642 {
 1643   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1644   LoadStoreNode* ldst = n->as_LoadStore();
 1645   if (is_CAS(n->Opcode(), false)) {
 1646     assert(ldst->trailing_membar() != nullptr, "expected trailing membar");
 1647   } else {
 1648     return ldst->trailing_membar() != nullptr;
 1649   }
 1650 
 1651   // so we can just return true here
 1652   return true;
 1653 }
 1654 
 1655 #define __ masm->
 1656 
 1657 // advance declarations for helper functions to convert register
 1658 // indices to register objects
 1659 
 1660 // the ad file has to provide implementations of certain methods
 1661 // expected by the generic code
 1662 //
 1663 // REQUIRED FUNCTIONALITY
 1664 
 1665 //=============================================================================
 1666 
 1667 // !!!!! Special hack to get all types of calls to specify the byte offset
 1668 //       from the start of the call to the point where the return address
 1669 //       will point.
 1670 
 1671 int MachCallStaticJavaNode::ret_addr_offset()
 1672 {
 1673   // call should be a simple bl
 1674   int off = 4;
 1675   return off;
 1676 }
 1677 
 1678 int MachCallDynamicJavaNode::ret_addr_offset()
 1679 {
 1680   return 16; // movz, movk, movk, bl
 1681 }
 1682 
 1683 int MachCallRuntimeNode::ret_addr_offset() {
 1684   // for generated stubs the call will be
 1685   //   bl(addr)
 1686   // or with far branches
 1687   //   bl(trampoline_stub)
 1688   // for real runtime callouts it will be six instructions
 1689   // see aarch64_enc_java_to_runtime
 1690   //   adr(rscratch2, retaddr)
 1691   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1692   //   lea(rscratch1, RuntimeAddress(addr)
 1693   //   blr(rscratch1)
 1694   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1695   if (cb) {
 1696     return 1 * NativeInstruction::instruction_size;
 1697   } else {
 1698     return 6 * NativeInstruction::instruction_size;
 1699   }
 1700 }
 1701 
 1702 //=============================================================================
 1703 
 1704 #ifndef PRODUCT
 1705 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1706   st->print("BREAKPOINT");
 1707 }
 1708 #endif
 1709 
 1710 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1711   __ brk(0);
 1712 }
 1713 
 1714 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1715   return MachNode::size(ra_);
 1716 }
 1717 
 1718 //=============================================================================
 1719 
 1720 #ifndef PRODUCT
 1721   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1722     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1723   }
 1724 #endif
 1725 
 1726   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1727     for (int i = 0; i < _count; i++) {
 1728       __ nop();
 1729     }
 1730   }
 1731 
 1732   uint MachNopNode::size(PhaseRegAlloc*) const {
 1733     return _count * NativeInstruction::instruction_size;
 1734   }
 1735 
 1736 //=============================================================================
 1737 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1738 
 1739 int ConstantTable::calculate_table_base_offset() const {
 1740   return 0;  // absolute addressing, no offset
 1741 }
 1742 
 1743 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1744 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1745   ShouldNotReachHere();
 1746 }
 1747 
 1748 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1749   // Empty encoding
 1750 }
 1751 
 1752 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1753   return 0;
 1754 }
 1755 
 1756 #ifndef PRODUCT
 1757 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1758   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1759 }
 1760 #endif
 1761 
 1762 #ifndef PRODUCT
 1763 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1764   Compile* C = ra_->C;
 1765 
 1766   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1767 
 1768   if (C->output()->need_stack_bang(framesize))
 1769     st->print("# stack bang size=%d\n\t", framesize);
 1770 
 1771   if (VM_Version::use_rop_protection()) {
 1772     st->print("ldr  zr, [lr]\n\t");
 1773     st->print("paciaz\n\t");
 1774   }
 1775   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1776     st->print("sub  sp, sp, #%d\n\t", framesize);
 1777     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1778     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1779   } else {
 1780     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1781     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1782     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1783     st->print("sub  sp, sp, rscratch1");
 1784   }
 1785   if (C->stub_function() == nullptr) {
 1786     st->print("\n\t");
 1787     st->print("ldr  rscratch1, [guard]\n\t");
 1788     st->print("dmb ishld\n\t");
 1789     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1790     st->print("cmp  rscratch1, rscratch2\n\t");
 1791     st->print("b.eq skip");
 1792     st->print("\n\t");
 1793     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1794     st->print("b skip\n\t");
 1795     st->print("guard: int\n\t");
 1796     st->print("\n\t");
 1797     st->print("skip:\n\t");
 1798   }
 1799 }
 1800 #endif
 1801 
 1802 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1803   Compile* C = ra_->C;
 1804 
 1805   // n.b. frame size includes space for return pc and rfp
 1806   const int framesize = C->output()->frame_size_in_bytes();
 1807 
 1808   if (C->clinit_barrier_on_entry()) {
 1809     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1810 
 1811     Label L_skip_barrier;
 1812 
 1813     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1814     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1815     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1816     __ bind(L_skip_barrier);
 1817   }
 1818 
 1819   if (C->max_vector_size() > 0) {
 1820     __ reinitialize_ptrue();
 1821   }
 1822 
 1823   int bangsize = C->output()->bang_size_in_bytes();
 1824   if (C->output()->need_stack_bang(bangsize))
 1825     __ generate_stack_overflow_check(bangsize);
 1826 
 1827   __ build_frame(framesize);
 1828 
 1829   if (C->stub_function() == nullptr) {
 1830     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1831     // Dummy labels for just measuring the code size
 1832     Label dummy_slow_path;
 1833     Label dummy_continuation;
 1834     Label dummy_guard;
 1835     Label* slow_path = &dummy_slow_path;
 1836     Label* continuation = &dummy_continuation;
 1837     Label* guard = &dummy_guard;
 1838     if (!Compile::current()->output()->in_scratch_emit_size()) {
 1839       // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1840       C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1841       Compile::current()->output()->add_stub(stub);
 1842       slow_path = &stub->entry();
 1843       continuation = &stub->continuation();
 1844       guard = &stub->guard();
 1845     }
 1846     // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1847     bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1848   }
 1849 
 1850   if (VerifyStackAtCalls) {
 1851     Unimplemented();
 1852   }
 1853 
 1854   C->output()->set_frame_complete(__ offset());
 1855 
 1856   if (C->has_mach_constant_base_node()) {
 1857     // NOTE: We set the table base offset here because users might be
 1858     // emitted before MachConstantBaseNode.
 1859     ConstantTable& constant_table = C->output()->constant_table();
 1860     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1861   }
 1862 }
 1863 
 1864 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1865 {
 1866   return MachNode::size(ra_); // too many variables; just compute it
 1867                               // the hard way
 1868 }
 1869 
 1870 int MachPrologNode::reloc() const
 1871 {
 1872   return 0;
 1873 }
 1874 
 1875 //=============================================================================
 1876 
 1877 #ifndef PRODUCT
 1878 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1879   Compile* C = ra_->C;
 1880   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1881 
 1882   st->print("# pop frame %d\n\t",framesize);
 1883 
 1884   if (framesize == 0) {
 1885     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1886   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1887     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1888     st->print("add  sp, sp, #%d\n\t", framesize);
 1889   } else {
 1890     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1891     st->print("add  sp, sp, rscratch1\n\t");
 1892     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1893   }
 1894   if (VM_Version::use_rop_protection()) {
 1895     st->print("autiaz\n\t");
 1896     st->print("ldr  zr, [lr]\n\t");
 1897   }
 1898 
 1899   if (do_polling() && C->is_method_compilation()) {
 1900     st->print("# test polling word\n\t");
 1901     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1902     st->print("cmp  sp, rscratch1\n\t");
 1903     st->print("bhi #slow_path");
 1904   }
 1905 }
 1906 #endif
 1907 
 1908 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1909   Compile* C = ra_->C;
 1910   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1911 
 1912   __ remove_frame(framesize);
 1913 
 1914   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1915     __ reserved_stack_check();
 1916   }
 1917 
 1918   if (do_polling() && C->is_method_compilation()) {
 1919     Label dummy_label;
 1920     Label* code_stub = &dummy_label;
 1921     if (!C->output()->in_scratch_emit_size()) {
 1922       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1923       C->output()->add_stub(stub);
 1924       code_stub = &stub->entry();
 1925     }
 1926     __ relocate(relocInfo::poll_return_type);
 1927     __ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
 1928   }
 1929 }
 1930 
 1931 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1932   // Variable size. Determine dynamically.
 1933   return MachNode::size(ra_);
 1934 }
 1935 
 1936 int MachEpilogNode::reloc() const {
 1937   // Return number of relocatable values contained in this instruction.
 1938   return 1; // 1 for polling page.
 1939 }
 1940 
 1941 const Pipeline * MachEpilogNode::pipeline() const {
 1942   return MachNode::pipeline_class();
 1943 }
 1944 
 1945 //=============================================================================
 1946 
 1947 static enum RC rc_class(OptoReg::Name reg) {
 1948 
 1949   if (reg == OptoReg::Bad) {
 1950     return rc_bad;
 1951   }
 1952 
 1953   // we have 32 int registers * 2 halves
 1954   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1955 
 1956   if (reg < slots_of_int_registers) {
 1957     return rc_int;
 1958   }
 1959 
 1960   // we have 32 float register * 8 halves
 1961   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1962   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1963     return rc_float;
 1964   }
 1965 
 1966   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1967   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1968     return rc_predicate;
 1969   }
 1970 
 1971   // Between predicate regs & stack is the flags.
 1972   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1973 
 1974   return rc_stack;
 1975 }
 1976 
 1977 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1978   Compile* C = ra_->C;
 1979 
 1980   // Get registers to move.
 1981   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1982   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1983   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1984   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1985 
 1986   enum RC src_hi_rc = rc_class(src_hi);
 1987   enum RC src_lo_rc = rc_class(src_lo);
 1988   enum RC dst_hi_rc = rc_class(dst_hi);
 1989   enum RC dst_lo_rc = rc_class(dst_lo);
 1990 
 1991   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1992 
 1993   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1994     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1995            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1996            "expected aligned-adjacent pairs");
 1997   }
 1998 
 1999   if (src_lo == dst_lo && src_hi == dst_hi) {
 2000     return 0;            // Self copy, no move.
 2001   }
 2002 
 2003   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 2004               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 2005   int src_offset = ra_->reg2offset(src_lo);
 2006   int dst_offset = ra_->reg2offset(dst_lo);
 2007 
 2008   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2009     uint ireg = ideal_reg();
 2010     if (ireg == Op_VecA && masm) {
 2011       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 2012       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2013         // stack->stack
 2014         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 2015                                                 sve_vector_reg_size_in_bytes);
 2016       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2017         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2018                             sve_vector_reg_size_in_bytes);
 2019       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2020         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2021                               sve_vector_reg_size_in_bytes);
 2022       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2023         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2024                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 2025                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 2026       } else {
 2027         ShouldNotReachHere();
 2028       }
 2029     } else if (masm) {
 2030       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2031       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2032       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2033         // stack->stack
 2034         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2035         if (ireg == Op_VecD) {
 2036           __ unspill(rscratch1, true, src_offset);
 2037           __ spill(rscratch1, true, dst_offset);
 2038         } else {
 2039           __ spill_copy128(src_offset, dst_offset);
 2040         }
 2041       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2042         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2043                ireg == Op_VecD ? __ T8B : __ T16B,
 2044                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2045       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2046         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2047                  ireg == Op_VecD ? __ D : __ Q,
 2048                  ra_->reg2offset(dst_lo));
 2049       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2050         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2051                    ireg == Op_VecD ? __ D : __ Q,
 2052                    ra_->reg2offset(src_lo));
 2053       } else {
 2054         ShouldNotReachHere();
 2055       }
 2056     }
 2057   } else if (masm) {
 2058     switch (src_lo_rc) {
 2059     case rc_int:
 2060       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2061         if (is64) {
 2062             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2063                    as_Register(Matcher::_regEncode[src_lo]));
 2064         } else {
 2065             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2066                     as_Register(Matcher::_regEncode[src_lo]));
 2067         }
 2068       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2069         if (is64) {
 2070             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2071                      as_Register(Matcher::_regEncode[src_lo]));
 2072         } else {
 2073             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2074                      as_Register(Matcher::_regEncode[src_lo]));
 2075         }
 2076       } else {                    // gpr --> stack spill
 2077         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2078         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2079       }
 2080       break;
 2081     case rc_float:
 2082       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2083         if (is64) {
 2084             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2085                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2086         } else {
 2087             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2088                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2089         }
 2090       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2091         if (is64) {
 2092             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2093                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2094         } else {
 2095             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2096                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2097         }
 2098       } else {                    // fpr --> stack spill
 2099         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2100         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2101                  is64 ? __ D : __ S, dst_offset);
 2102       }
 2103       break;
 2104     case rc_stack:
 2105       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2106         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2107       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2108         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2109                    is64 ? __ D : __ S, src_offset);
 2110       } else if (dst_lo_rc == rc_predicate) {
 2111         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2112                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2113       } else {                    // stack --> stack copy
 2114         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2115         if (ideal_reg() == Op_RegVectMask) {
 2116           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2117                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2118         } else {
 2119           __ unspill(rscratch1, is64, src_offset);
 2120           __ spill(rscratch1, is64, dst_offset);
 2121         }
 2122       }
 2123       break;
 2124     case rc_predicate:
 2125       if (dst_lo_rc == rc_predicate) {
 2126         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2127       } else if (dst_lo_rc == rc_stack) {
 2128         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2129                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2130       } else {
 2131         assert(false, "bad src and dst rc_class combination.");
 2132         ShouldNotReachHere();
 2133       }
 2134       break;
 2135     default:
 2136       assert(false, "bad rc_class for spill");
 2137       ShouldNotReachHere();
 2138     }
 2139   }
 2140 
 2141   if (st) {
 2142     st->print("spill ");
 2143     if (src_lo_rc == rc_stack) {
 2144       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2145     } else {
 2146       st->print("%s -> ", Matcher::regName[src_lo]);
 2147     }
 2148     if (dst_lo_rc == rc_stack) {
 2149       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2150     } else {
 2151       st->print("%s", Matcher::regName[dst_lo]);
 2152     }
 2153     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2154       int vsize = 0;
 2155       switch (ideal_reg()) {
 2156       case Op_VecD:
 2157         vsize = 64;
 2158         break;
 2159       case Op_VecX:
 2160         vsize = 128;
 2161         break;
 2162       case Op_VecA:
 2163         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2164         break;
 2165       default:
 2166         assert(false, "bad register type for spill");
 2167         ShouldNotReachHere();
 2168       }
 2169       st->print("\t# vector spill size = %d", vsize);
 2170     } else if (ideal_reg() == Op_RegVectMask) {
 2171       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2172       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2173       st->print("\t# predicate spill size = %d", vsize);
 2174     } else {
 2175       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2176     }
 2177   }
 2178 
 2179   return 0;
 2180 
 2181 }
 2182 
 2183 #ifndef PRODUCT
 2184 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2185   if (!ra_)
 2186     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2187   else
 2188     implementation(nullptr, ra_, false, st);
 2189 }
 2190 #endif
 2191 
 2192 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2193   implementation(masm, ra_, false, nullptr);
 2194 }
 2195 
 2196 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2197   return MachNode::size(ra_);
 2198 }
 2199 
 2200 //=============================================================================
 2201 
 2202 #ifndef PRODUCT
 2203 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2204   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2205   int reg = ra_->get_reg_first(this);
 2206   st->print("add %s, rsp, #%d]\t# box lock",
 2207             Matcher::regName[reg], offset);
 2208 }
 2209 #endif
 2210 
 2211 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2212   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2213   int reg    = ra_->get_encode(this);
 2214 
 2215   // This add will handle any 24-bit signed offset. 24 bits allows an
 2216   // 8 megabyte stack frame.
 2217   __ add(as_Register(reg), sp, offset);
 2218 }
 2219 
 2220 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2221   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2222   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2223 
 2224   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2225     return NativeInstruction::instruction_size;
 2226   } else {
 2227     return 2 * NativeInstruction::instruction_size;
 2228   }
 2229 }
 2230 
 2231 //=============================================================================
 2232 
 2233 #ifndef PRODUCT
 2234 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2235 {
 2236   st->print_cr("# MachUEPNode");
 2237   if (UseCompressedClassPointers) {
 2238     st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2239     st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2240     st->print_cr("\tcmpw rscratch1, r10");
 2241   } else {
 2242     st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2243     st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2244     st->print_cr("\tcmp rscratch1, r10");
 2245   }
 2246   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2247 }
 2248 #endif
 2249 
 2250 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 2251 {
 2252   __ ic_check(InteriorEntryAlignment);
 2253 }
 2254 
 2255 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2256 {
 2257   return MachNode::size(ra_);
 2258 }
 2259 
 2260 // REQUIRED EMIT CODE
 2261 
 2262 //=============================================================================
 2263 
 2264 // Emit exception handler code.
 2265 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 2266 {
 2267   // mov rscratch1 #exception_blob_entry_point
 2268   // br rscratch1
 2269   // Note that the code buffer's insts_mark is always relative to insts.
 2270   // That's why we must use the macroassembler to generate a handler.
 2271   address base = __ start_a_stub(size_exception_handler());
 2272   if (base == nullptr) {
 2273     ciEnv::current()->record_failure("CodeCache is full");
 2274     return 0;  // CodeBuffer::expand failed
 2275   }
 2276   int offset = __ offset();
 2277   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2278   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2279   __ end_a_stub();
 2280   return offset;
 2281 }
 2282 
 2283 // Emit deopt handler code.
 2284 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 2285 {
 2286   // Note that the code buffer's insts_mark is always relative to insts.
 2287   // That's why we must use the macroassembler to generate a handler.
 2288   address base = __ start_a_stub(size_deopt_handler());
 2289   if (base == nullptr) {
 2290     ciEnv::current()->record_failure("CodeCache is full");
 2291     return 0;  // CodeBuffer::expand failed
 2292   }
 2293   int offset = __ offset();
 2294 
 2295   __ adr(lr, __ pc());
 2296   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2297 
 2298   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2299   __ end_a_stub();
 2300   return offset;
 2301 }
 2302 
 2303 // REQUIRED MATCHER CODE
 2304 
 2305 //=============================================================================
 2306 
 2307 bool Matcher::match_rule_supported(int opcode) {
 2308   if (!has_match_rule(opcode))
 2309     return false;
 2310 
 2311   switch (opcode) {
 2312     case Op_OnSpinWait:
 2313       return VM_Version::supports_on_spin_wait();
 2314     case Op_CacheWB:
 2315     case Op_CacheWBPreSync:
 2316     case Op_CacheWBPostSync:
 2317       if (!VM_Version::supports_data_cache_line_flush()) {
 2318         return false;
 2319       }
 2320       break;
 2321     case Op_ExpandBits:
 2322     case Op_CompressBits:
 2323       if (!VM_Version::supports_svebitperm()) {
 2324         return false;
 2325       }
 2326       break;
 2327     case Op_FmaF:
 2328     case Op_FmaD:
 2329     case Op_FmaVF:
 2330     case Op_FmaVD:
 2331       if (!UseFMA) {
 2332         return false;
 2333       }
 2334       break;
 2335     case Op_FmaHF:
 2336       // UseFMA flag also needs to be checked along with FEAT_FP16
 2337       if (!UseFMA || !is_feat_fp16_supported()) {
 2338         return false;
 2339       }
 2340       break;
 2341     case Op_AddHF:
 2342     case Op_SubHF:
 2343     case Op_MulHF:
 2344     case Op_DivHF:
 2345     case Op_MinHF:
 2346     case Op_MaxHF:
 2347     case Op_SqrtHF:
 2348       // Half-precision floating point scalar operations require FEAT_FP16
 2349       // to be available. FEAT_FP16 is enabled if both "fphp" and "asimdhp"
 2350       // features are supported.
 2351       if (!is_feat_fp16_supported()) {
 2352         return false;
 2353       }
 2354       break;
 2355   }
 2356 
 2357   return true; // Per default match rules are supported.
 2358 }
 2359 
 2360 const RegMask* Matcher::predicate_reg_mask(void) {
 2361   return &_PR_REG_mask;
 2362 }
 2363 
 2364 bool Matcher::supports_vector_calling_convention(void) {
 2365   return EnableVectorSupport;
 2366 }
 2367 
 2368 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2369   assert(EnableVectorSupport, "sanity");
 2370   int lo = V0_num;
 2371   int hi = V0_H_num;
 2372   if (ideal_reg == Op_VecX || ideal_reg == Op_VecA) {
 2373     hi = V0_K_num;
 2374   }
 2375   return OptoRegPair(hi, lo);
 2376 }
 2377 
 2378 // Is this branch offset short enough that a short branch can be used?
 2379 //
 2380 // NOTE: If the platform does not provide any short branch variants, then
 2381 //       this method should return false for offset 0.
 2382 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2383   // The passed offset is relative to address of the branch.
 2384 
 2385   return (-32768 <= offset && offset < 32768);
 2386 }
 2387 
 2388 // Vector width in bytes.
 2389 int Matcher::vector_width_in_bytes(BasicType bt) {
 2390   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2391   int size = MIN2((UseSVE > 0) ? (int)FloatRegister::sve_vl_max : (int)FloatRegister::neon_vl, (int)MaxVectorSize);
 2392   // Minimum 2 values in vector
 2393   if (size < 2*type2aelembytes(bt)) size = 0;
 2394   // But never < 4
 2395   if (size < 4) size = 0;
 2396   return size;
 2397 }
 2398 
 2399 // Limits on vector size (number of elements) loaded into vector.
 2400 int Matcher::max_vector_size(const BasicType bt) {
 2401   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2402 }
 2403 
 2404 int Matcher::min_vector_size(const BasicType bt) {
 2405   // Usually, the shortest vector length supported by AArch64 ISA and
 2406   // Vector API species is 64 bits. However, we allow 32-bit or 16-bit
 2407   // vectors in a few special cases.
 2408   int size;
 2409   switch(bt) {
 2410     case T_BOOLEAN:
 2411       // Load/store a vector mask with only 2 elements for vector types
 2412       // such as "2I/2F/2L/2D".
 2413       size = 2;
 2414       break;
 2415     case T_BYTE:
 2416       // Generate a "4B" vector, to support vector cast between "8B/16B"
 2417       // and "4S/4I/4L/4F/4D".
 2418       size = 4;
 2419       break;
 2420     case T_SHORT:
 2421       // Generate a "2S" vector, to support vector cast between "4S/8S"
 2422       // and "2I/2L/2F/2D".
 2423       size = 2;
 2424       break;
 2425     default:
 2426       // Limit the min vector length to 64-bit.
 2427       size = 8 / type2aelembytes(bt);
 2428       // The number of elements in a vector should be at least 2.
 2429       size = MAX2(size, 2);
 2430   }
 2431 
 2432   int max_size = max_vector_size(bt);
 2433   return MIN2(size, max_size);
 2434 }
 2435 
 2436 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2437   return Matcher::max_vector_size(bt);
 2438 }
 2439 
 2440 // Actual max scalable vector register length.
 2441 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2442   return Matcher::max_vector_size(bt);
 2443 }
 2444 
 2445 // Vector ideal reg.
 2446 uint Matcher::vector_ideal_reg(int len) {
 2447   if (UseSVE > 0 && FloatRegister::neon_vl < len && len <= FloatRegister::sve_vl_max) {
 2448     return Op_VecA;
 2449   }
 2450   switch(len) {
 2451     // For 16-bit/32-bit mask vector, reuse VecD.
 2452     case  2:
 2453     case  4:
 2454     case  8: return Op_VecD;
 2455     case 16: return Op_VecX;
 2456   }
 2457   ShouldNotReachHere();
 2458   return 0;
 2459 }
 2460 
 2461 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2462   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2463   switch (ideal_reg) {
 2464     case Op_VecA: return new vecAOper();
 2465     case Op_VecD: return new vecDOper();
 2466     case Op_VecX: return new vecXOper();
 2467   }
 2468   ShouldNotReachHere();
 2469   return nullptr;
 2470 }
 2471 
 2472 bool Matcher::is_reg2reg_move(MachNode* m) {
 2473   return false;
 2474 }
 2475 
 2476 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2477   return opnd->opcode() == VREG;
 2478 }
 2479 
 2480 // Return whether or not this register is ever used as an argument.
 2481 // This function is used on startup to build the trampoline stubs in
 2482 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2483 // call in the trampoline, and arguments in those registers not be
 2484 // available to the callee.
 2485 bool Matcher::can_be_java_arg(int reg)
 2486 {
 2487   return
 2488     reg ==  R0_num || reg == R0_H_num ||
 2489     reg ==  R1_num || reg == R1_H_num ||
 2490     reg ==  R2_num || reg == R2_H_num ||
 2491     reg ==  R3_num || reg == R3_H_num ||
 2492     reg ==  R4_num || reg == R4_H_num ||
 2493     reg ==  R5_num || reg == R5_H_num ||
 2494     reg ==  R6_num || reg == R6_H_num ||
 2495     reg ==  R7_num || reg == R7_H_num ||
 2496     reg ==  V0_num || reg == V0_H_num ||
 2497     reg ==  V1_num || reg == V1_H_num ||
 2498     reg ==  V2_num || reg == V2_H_num ||
 2499     reg ==  V3_num || reg == V3_H_num ||
 2500     reg ==  V4_num || reg == V4_H_num ||
 2501     reg ==  V5_num || reg == V5_H_num ||
 2502     reg ==  V6_num || reg == V6_H_num ||
 2503     reg ==  V7_num || reg == V7_H_num;
 2504 }
 2505 
 2506 bool Matcher::is_spillable_arg(int reg)
 2507 {
 2508   return can_be_java_arg(reg);
 2509 }
 2510 
 2511 uint Matcher::int_pressure_limit()
 2512 {
 2513   // JDK-8183543: When taking the number of available registers as int
 2514   // register pressure threshold, the jtreg test:
 2515   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2516   // failed due to C2 compilation failure with
 2517   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2518   //
 2519   // A derived pointer is live at CallNode and then is flagged by RA
 2520   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2521   // derived pointers and lastly fail to spill after reaching maximum
 2522   // number of iterations. Lowering the default pressure threshold to
 2523   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2524   // a high register pressure area of the code so that split_DEF can
 2525   // generate DefinitionSpillCopy for the derived pointer.
 2526   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2527   if (!PreserveFramePointer) {
 2528     // When PreserveFramePointer is off, frame pointer is allocatable,
 2529     // but different from other SOC registers, it is excluded from
 2530     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2531     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2532     // See check_pressure_at_fatproj().
 2533     default_int_pressure_threshold--;
 2534   }
 2535   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2536 }
 2537 
 2538 uint Matcher::float_pressure_limit()
 2539 {
 2540   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2541   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2542 }
 2543 
 2544 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2545   return false;
 2546 }
 2547 
 2548 RegMask Matcher::divI_proj_mask() {
 2549   ShouldNotReachHere();
 2550   return RegMask();
 2551 }
 2552 
 2553 // Register for MODI projection of divmodI.
 2554 RegMask Matcher::modI_proj_mask() {
 2555   ShouldNotReachHere();
 2556   return RegMask();
 2557 }
 2558 
 2559 // Register for DIVL projection of divmodL.
 2560 RegMask Matcher::divL_proj_mask() {
 2561   ShouldNotReachHere();
 2562   return RegMask();
 2563 }
 2564 
 2565 // Register for MODL projection of divmodL.
 2566 RegMask Matcher::modL_proj_mask() {
 2567   ShouldNotReachHere();
 2568   return RegMask();
 2569 }
 2570 
 2571 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2572   return FP_REG_mask();
 2573 }
 2574 
 2575 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2576   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2577     Node* u = addp->fast_out(i);
 2578     if (u->is_LoadStore()) {
 2579       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2580       // instructions) only take register indirect as an operand, so
 2581       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2582       // must fail.
 2583       return false;
 2584     }
 2585     if (u->is_Mem()) {
 2586       int opsize = u->as_Mem()->memory_size();
 2587       assert(opsize > 0, "unexpected memory operand size");
 2588       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2589         return false;
 2590       }
 2591     }
 2592   }
 2593   return true;
 2594 }
 2595 
 2596 // Convert BootTest condition to Assembler condition.
 2597 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2598 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2599   Assembler::Condition result;
 2600   switch(cond) {
 2601     case BoolTest::eq:
 2602       result = Assembler::EQ; break;
 2603     case BoolTest::ne:
 2604       result = Assembler::NE; break;
 2605     case BoolTest::le:
 2606       result = Assembler::LE; break;
 2607     case BoolTest::ge:
 2608       result = Assembler::GE; break;
 2609     case BoolTest::lt:
 2610       result = Assembler::LT; break;
 2611     case BoolTest::gt:
 2612       result = Assembler::GT; break;
 2613     case BoolTest::ule:
 2614       result = Assembler::LS; break;
 2615     case BoolTest::uge:
 2616       result = Assembler::HS; break;
 2617     case BoolTest::ult:
 2618       result = Assembler::LO; break;
 2619     case BoolTest::ugt:
 2620       result = Assembler::HI; break;
 2621     case BoolTest::overflow:
 2622       result = Assembler::VS; break;
 2623     case BoolTest::no_overflow:
 2624       result = Assembler::VC; break;
 2625     default:
 2626       ShouldNotReachHere();
 2627       return Assembler::Condition(-1);
 2628   }
 2629 
 2630   // Check conversion
 2631   if (cond & BoolTest::unsigned_compare) {
 2632     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2633   } else {
 2634     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2635   }
 2636 
 2637   return result;
 2638 }
 2639 
 2640 // Binary src (Replicate con)
 2641 static bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2642   if (n == nullptr || m == nullptr) {
 2643     return false;
 2644   }
 2645 
 2646   if (UseSVE == 0 || m->Opcode() != Op_Replicate) {
 2647     return false;
 2648   }
 2649 
 2650   Node* imm_node = m->in(1);
 2651   if (!imm_node->is_Con()) {
 2652     return false;
 2653   }
 2654 
 2655   const Type* t = imm_node->bottom_type();
 2656   if (!(t->isa_int() || t->isa_long())) {
 2657     return false;
 2658   }
 2659 
 2660   switch (n->Opcode()) {
 2661   case Op_AndV:
 2662   case Op_OrV:
 2663   case Op_XorV: {
 2664     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2665     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2666     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2667   }
 2668   case Op_AddVB:
 2669     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2670   case Op_AddVS:
 2671   case Op_AddVI:
 2672     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2673   case Op_AddVL:
 2674     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2675   default:
 2676     return false;
 2677   }
 2678 }
 2679 
 2680 // (XorV src (Replicate m1))
 2681 // (XorVMask src (MaskAll m1))
 2682 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2683   if (n != nullptr && m != nullptr) {
 2684     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2685            VectorNode::is_all_ones_vector(m);
 2686   }
 2687   return false;
 2688 }
 2689 
 2690 // Should the matcher clone input 'm' of node 'n'?
 2691 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2692   if (is_vshift_con_pattern(n, m) ||
 2693       is_vector_bitwise_not_pattern(n, m) ||
 2694       is_valid_sve_arith_imm_pattern(n, m) ||
 2695       is_encode_and_store_pattern(n, m)) {
 2696     mstack.push(m, Visit);
 2697     return true;
 2698   }
 2699   return false;
 2700 }
 2701 
 2702 // Should the Matcher clone shifts on addressing modes, expecting them
 2703 // to be subsumed into complex addressing expressions or compute them
 2704 // into registers?
 2705 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2706 
 2707   // Loads and stores with indirect memory input (e.g., volatile loads and
 2708   // stores) do not subsume the input into complex addressing expressions. If
 2709   // the addressing expression is input to at least one such load or store, do
 2710   // not clone the addressing expression. Query needs_acquiring_load and
 2711   // needs_releasing_store as a proxy for indirect memory input, as it is not
 2712   // possible to directly query for indirect memory input at this stage.
 2713   for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
 2714     Node* n = m->fast_out(i);
 2715     if (n->is_Load() && needs_acquiring_load(n)) {
 2716       return false;
 2717     }
 2718     if (n->is_Store() && needs_releasing_store(n)) {
 2719       return false;
 2720     }
 2721   }
 2722 
 2723   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2724     return true;
 2725   }
 2726 
 2727   Node *off = m->in(AddPNode::Offset);
 2728   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2729       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2730       // Are there other uses besides address expressions?
 2731       !is_visited(off)) {
 2732     address_visited.set(off->_idx); // Flag as address_visited
 2733     mstack.push(off->in(2), Visit);
 2734     Node *conv = off->in(1);
 2735     if (conv->Opcode() == Op_ConvI2L &&
 2736         // Are there other uses besides address expressions?
 2737         !is_visited(conv)) {
 2738       address_visited.set(conv->_idx); // Flag as address_visited
 2739       mstack.push(conv->in(1), Pre_Visit);
 2740     } else {
 2741       mstack.push(conv, Pre_Visit);
 2742     }
 2743     address_visited.test_set(m->_idx); // Flag as address_visited
 2744     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2745     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2746     return true;
 2747   } else if (off->Opcode() == Op_ConvI2L &&
 2748              // Are there other uses besides address expressions?
 2749              !is_visited(off)) {
 2750     address_visited.test_set(m->_idx); // Flag as address_visited
 2751     address_visited.set(off->_idx); // Flag as address_visited
 2752     mstack.push(off->in(1), Pre_Visit);
 2753     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2754     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2755     return true;
 2756   }
 2757   return false;
 2758 }
 2759 
 2760 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2761   {                                                                     \
 2762     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2763     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2764     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2765     __ INSN(REG, as_Register(BASE));                                    \
 2766   }
 2767 
 2768 
 2769 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2770   {
 2771     Address::extend scale;
 2772 
 2773     // Hooboy, this is fugly.  We need a way to communicate to the
 2774     // encoder that the index needs to be sign extended, so we have to
 2775     // enumerate all the cases.
 2776     switch (opcode) {
 2777     case INDINDEXSCALEDI2L:
 2778     case INDINDEXSCALEDI2LN:
 2779     case INDINDEXI2L:
 2780     case INDINDEXI2LN:
 2781       scale = Address::sxtw(size);
 2782       break;
 2783     default:
 2784       scale = Address::lsl(size);
 2785     }
 2786 
 2787     if (index == -1) {
 2788       return Address(base, disp);
 2789     } else {
 2790       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2791       return Address(base, as_Register(index), scale);
 2792     }
 2793   }
 2794 
 2795 
 2796 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2797 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2798 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2799 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2800                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2801 
 2802   // Used for all non-volatile memory accesses.  The use of
 2803   // $mem->opcode() to discover whether this pattern uses sign-extended
 2804   // offsets is something of a kludge.
 2805   static void loadStore(C2_MacroAssembler* masm, mem_insn insn,
 2806                         Register reg, int opcode,
 2807                         Register base, int index, int scale, int disp,
 2808                         int size_in_memory)
 2809   {
 2810     Address addr = mem2address(opcode, base, index, scale, disp);
 2811     if (addr.getMode() == Address::base_plus_offset) {
 2812       /* Fix up any out-of-range offsets. */
 2813       assert_different_registers(rscratch1, base);
 2814       assert_different_registers(rscratch1, reg);
 2815       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2816     }
 2817     (masm->*insn)(reg, addr);
 2818   }
 2819 
 2820   static void loadStore(C2_MacroAssembler* masm, mem_float_insn insn,
 2821                         FloatRegister reg, int opcode,
 2822                         Register base, int index, int size, int disp,
 2823                         int size_in_memory)
 2824   {
 2825     Address::extend scale;
 2826 
 2827     switch (opcode) {
 2828     case INDINDEXSCALEDI2L:
 2829     case INDINDEXSCALEDI2LN:
 2830       scale = Address::sxtw(size);
 2831       break;
 2832     default:
 2833       scale = Address::lsl(size);
 2834     }
 2835 
 2836     if (index == -1) {
 2837       // Fix up any out-of-range offsets.
 2838       assert_different_registers(rscratch1, base);
 2839       Address addr = Address(base, disp);
 2840       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2841       (masm->*insn)(reg, addr);
 2842     } else {
 2843       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2844       (masm->*insn)(reg, Address(base, as_Register(index), scale));
 2845     }
 2846   }
 2847 
 2848   static void loadStore(C2_MacroAssembler* masm, mem_vector_insn insn,
 2849                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2850                         int opcode, Register base, int index, int size, int disp)
 2851   {
 2852     if (index == -1) {
 2853       (masm->*insn)(reg, T, Address(base, disp));
 2854     } else {
 2855       assert(disp == 0, "unsupported address mode");
 2856       (masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2857     }
 2858   }
 2859 
 2860 %}
 2861 
 2862 
 2863 
 2864 //----------ENCODING BLOCK-----------------------------------------------------
 2865 // This block specifies the encoding classes used by the compiler to
 2866 // output byte streams.  Encoding classes are parameterized macros
 2867 // used by Machine Instruction Nodes in order to generate the bit
 2868 // encoding of the instruction.  Operands specify their base encoding
 2869 // interface with the interface keyword.  There are currently
 2870 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2871 // COND_INTER.  REG_INTER causes an operand to generate a function
 2872 // which returns its register number when queried.  CONST_INTER causes
 2873 // an operand to generate a function which returns the value of the
 2874 // constant when queried.  MEMORY_INTER causes an operand to generate
 2875 // four functions which return the Base Register, the Index Register,
 2876 // the Scale Value, and the Offset Value of the operand when queried.
 2877 // COND_INTER causes an operand to generate six functions which return
 2878 // the encoding code (ie - encoding bits for the instruction)
 2879 // associated with each basic boolean condition for a conditional
 2880 // instruction.
 2881 //
 2882 // Instructions specify two basic values for encoding.  Again, a
 2883 // function is available to check if the constant displacement is an
 2884 // oop. They use the ins_encode keyword to specify their encoding
 2885 // classes (which must be a sequence of enc_class names, and their
 2886 // parameters, specified in the encoding block), and they use the
 2887 // opcode keyword to specify, in order, their primary, secondary, and
 2888 // tertiary opcode.  Only the opcode sections which a particular
 2889 // instruction needs for encoding need to be specified.
 2890 encode %{
 2891   // Build emit functions for each basic byte or larger field in the
 2892   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2893   // from C++ code in the enc_class source block.  Emit functions will
 2894   // live in the main source block for now.  In future, we can
 2895   // generalize this by adding a syntax that specifies the sizes of
 2896   // fields in an order, so that the adlc can build the emit functions
 2897   // automagically
 2898 
 2899   // catch all for unimplemented encodings
 2900   enc_class enc_unimplemented %{
 2901     __ unimplemented("C2 catch all");
 2902   %}
 2903 
 2904   // BEGIN Non-volatile memory access
 2905 
 2906   // This encoding class is generated automatically from ad_encode.m4.
 2907   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2908   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2909     Register dst_reg = as_Register($dst$$reg);
 2910     loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2911                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2912   %}
 2913 
 2914   // This encoding class is generated automatically from ad_encode.m4.
 2915   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2916   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2917     Register dst_reg = as_Register($dst$$reg);
 2918     loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2919                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2920   %}
 2921 
 2922   // This encoding class is generated automatically from ad_encode.m4.
 2923   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2924   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2925     Register dst_reg = as_Register($dst$$reg);
 2926     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2927                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2928   %}
 2929 
 2930   // This encoding class is generated automatically from ad_encode.m4.
 2931   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2932   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2933     Register dst_reg = as_Register($dst$$reg);
 2934     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2935                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2936   %}
 2937 
 2938   // This encoding class is generated automatically from ad_encode.m4.
 2939   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2940   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2941     Register dst_reg = as_Register($dst$$reg);
 2942     loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2943                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2944   %}
 2945 
 2946   // This encoding class is generated automatically from ad_encode.m4.
 2947   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2948   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2949     Register dst_reg = as_Register($dst$$reg);
 2950     loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2951                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2952   %}
 2953 
 2954   // This encoding class is generated automatically from ad_encode.m4.
 2955   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2956   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2957     Register dst_reg = as_Register($dst$$reg);
 2958     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2959                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2960   %}
 2961 
 2962   // This encoding class is generated automatically from ad_encode.m4.
 2963   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2964   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2965     Register dst_reg = as_Register($dst$$reg);
 2966     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2967                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2968   %}
 2969 
 2970   // This encoding class is generated automatically from ad_encode.m4.
 2971   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2972   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2973     Register dst_reg = as_Register($dst$$reg);
 2974     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2975                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2976   %}
 2977 
 2978   // This encoding class is generated automatically from ad_encode.m4.
 2979   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2980   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2981     Register dst_reg = as_Register($dst$$reg);
 2982     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2983                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2984   %}
 2985 
 2986   // This encoding class is generated automatically from ad_encode.m4.
 2987   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2988   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2989     Register dst_reg = as_Register($dst$$reg);
 2990     loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2991                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2992   %}
 2993 
 2994   // This encoding class is generated automatically from ad_encode.m4.
 2995   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2996   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2997     Register dst_reg = as_Register($dst$$reg);
 2998     loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2999                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3000   %}
 3001 
 3002   // This encoding class is generated automatically from ad_encode.m4.
 3003   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3004   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 3005     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3006     loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 3007                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3008   %}
 3009 
 3010   // This encoding class is generated automatically from ad_encode.m4.
 3011   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3012   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 3013     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3014     loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 3015                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3016   %}
 3017 
 3018   // This encoding class is generated automatically from ad_encode.m4.
 3019   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3020   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 3021     Register src_reg = as_Register($src$$reg);
 3022     loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
 3023                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3024   %}
 3025 
 3026   // This encoding class is generated automatically from ad_encode.m4.
 3027   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3028   enc_class aarch64_enc_strb0(memory1 mem) %{
 3029     loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3030                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3031   %}
 3032 
 3033   // This encoding class is generated automatically from ad_encode.m4.
 3034   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3035   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 3036     Register src_reg = as_Register($src$$reg);
 3037     loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
 3038                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 3039   %}
 3040 
 3041   // This encoding class is generated automatically from ad_encode.m4.
 3042   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3043   enc_class aarch64_enc_strh0(memory2 mem) %{
 3044     loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
 3045                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 3046   %}
 3047 
 3048   // This encoding class is generated automatically from ad_encode.m4.
 3049   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3050   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 3051     Register src_reg = as_Register($src$$reg);
 3052     loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
 3053                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3054   %}
 3055 
 3056   // This encoding class is generated automatically from ad_encode.m4.
 3057   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3058   enc_class aarch64_enc_strw0(memory4 mem) %{
 3059     loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3060                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3061   %}
 3062 
 3063   // This encoding class is generated automatically from ad_encode.m4.
 3064   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3065   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3066     Register src_reg = as_Register($src$$reg);
 3067     // we sometimes get asked to store the stack pointer into the
 3068     // current thread -- we cannot do that directly on AArch64
 3069     if (src_reg == r31_sp) {
 3070       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3071       __ mov(rscratch2, sp);
 3072       src_reg = rscratch2;
 3073     }
 3074     loadStore(masm, &MacroAssembler::str, src_reg, $mem->opcode(),
 3075                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3076   %}
 3077 
 3078   // This encoding class is generated automatically from ad_encode.m4.
 3079   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3080   enc_class aarch64_enc_str0(memory8 mem) %{
 3081     loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
 3082                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3083   %}
 3084 
 3085   // This encoding class is generated automatically from ad_encode.m4.
 3086   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3087   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3088     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3089     loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
 3090                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3091   %}
 3092 
 3093   // This encoding class is generated automatically from ad_encode.m4.
 3094   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3095   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3096     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3097     loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
 3098                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3099   %}
 3100 
 3101   // This encoding class is generated automatically from ad_encode.m4.
 3102   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3103   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3104       __ membar(Assembler::StoreStore);
 3105       loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3106                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3107   %}
 3108 
 3109   // END Non-volatile memory access
 3110 
 3111   // Vector loads and stores
 3112   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3113     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3114     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3115        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3116   %}
 3117 
 3118   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3119     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3120     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3121        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3122   %}
 3123 
 3124   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3125     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3126     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3127        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3128   %}
 3129 
 3130   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3131     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3132     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3133        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3134   %}
 3135 
 3136   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3137     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3138     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::H,
 3139        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3140   %}
 3141 
 3142   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3143     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3144     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::S,
 3145        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3146   %}
 3147 
 3148   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3149     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3150     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::D,
 3151        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3152   %}
 3153 
 3154   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3155     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3156     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3157        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3158   %}
 3159 
 3160   // volatile loads and stores
 3161 
 3162   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3163     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3164                  rscratch1, stlrb);
 3165   %}
 3166 
 3167   enc_class aarch64_enc_stlrb0(memory mem) %{
 3168     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3169                  rscratch1, stlrb);
 3170   %}
 3171 
 3172   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3173     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3174                  rscratch1, stlrh);
 3175   %}
 3176 
 3177   enc_class aarch64_enc_stlrh0(memory mem) %{
 3178     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3179                  rscratch1, stlrh);
 3180   %}
 3181 
 3182   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3183     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3184                  rscratch1, stlrw);
 3185   %}
 3186 
 3187   enc_class aarch64_enc_stlrw0(memory mem) %{
 3188     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3189                  rscratch1, stlrw);
 3190   %}
 3191 
 3192   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3193     Register dst_reg = as_Register($dst$$reg);
 3194     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3195              rscratch1, ldarb);
 3196     __ sxtbw(dst_reg, dst_reg);
 3197   %}
 3198 
 3199   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3200     Register dst_reg = as_Register($dst$$reg);
 3201     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3202              rscratch1, ldarb);
 3203     __ sxtb(dst_reg, dst_reg);
 3204   %}
 3205 
 3206   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3207     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3208              rscratch1, ldarb);
 3209   %}
 3210 
 3211   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3212     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3213              rscratch1, ldarb);
 3214   %}
 3215 
 3216   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3217     Register dst_reg = as_Register($dst$$reg);
 3218     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3219              rscratch1, ldarh);
 3220     __ sxthw(dst_reg, dst_reg);
 3221   %}
 3222 
 3223   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3224     Register dst_reg = as_Register($dst$$reg);
 3225     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3226              rscratch1, ldarh);
 3227     __ sxth(dst_reg, dst_reg);
 3228   %}
 3229 
 3230   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3231     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3232              rscratch1, ldarh);
 3233   %}
 3234 
 3235   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3236     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3237              rscratch1, ldarh);
 3238   %}
 3239 
 3240   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3241     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3242              rscratch1, ldarw);
 3243   %}
 3244 
 3245   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3246     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3247              rscratch1, ldarw);
 3248   %}
 3249 
 3250   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3251     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3252              rscratch1, ldar);
 3253   %}
 3254 
 3255   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3256     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3257              rscratch1, ldarw);
 3258     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3259   %}
 3260 
 3261   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3262     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3263              rscratch1, ldar);
 3264     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3265   %}
 3266 
 3267   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3268     Register src_reg = as_Register($src$$reg);
 3269     // we sometimes get asked to store the stack pointer into the
 3270     // current thread -- we cannot do that directly on AArch64
 3271     if (src_reg == r31_sp) {
 3272       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3273       __ mov(rscratch2, sp);
 3274       src_reg = rscratch2;
 3275     }
 3276     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3277                  rscratch1, stlr);
 3278   %}
 3279 
 3280   enc_class aarch64_enc_stlr0(memory mem) %{
 3281     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3282                  rscratch1, stlr);
 3283   %}
 3284 
 3285   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3286     {
 3287       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3288       __ fmovs(rscratch2, src_reg);
 3289     }
 3290     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3291                  rscratch1, stlrw);
 3292   %}
 3293 
 3294   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3295     {
 3296       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3297       __ fmovd(rscratch2, src_reg);
 3298     }
 3299     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3300                  rscratch1, stlr);
 3301   %}
 3302 
 3303   // synchronized read/update encodings
 3304 
 3305   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3306     Register dst_reg = as_Register($dst$$reg);
 3307     Register base = as_Register($mem$$base);
 3308     int index = $mem$$index;
 3309     int scale = $mem$$scale;
 3310     int disp = $mem$$disp;
 3311     if (index == -1) {
 3312        if (disp != 0) {
 3313         __ lea(rscratch1, Address(base, disp));
 3314         __ ldaxr(dst_reg, rscratch1);
 3315       } else {
 3316         // TODO
 3317         // should we ever get anything other than this case?
 3318         __ ldaxr(dst_reg, base);
 3319       }
 3320     } else {
 3321       Register index_reg = as_Register(index);
 3322       if (disp == 0) {
 3323         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3324         __ ldaxr(dst_reg, rscratch1);
 3325       } else {
 3326         __ lea(rscratch1, Address(base, disp));
 3327         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3328         __ ldaxr(dst_reg, rscratch1);
 3329       }
 3330     }
 3331   %}
 3332 
 3333   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3334     Register src_reg = as_Register($src$$reg);
 3335     Register base = as_Register($mem$$base);
 3336     int index = $mem$$index;
 3337     int scale = $mem$$scale;
 3338     int disp = $mem$$disp;
 3339     if (index == -1) {
 3340        if (disp != 0) {
 3341         __ lea(rscratch2, Address(base, disp));
 3342         __ stlxr(rscratch1, src_reg, rscratch2);
 3343       } else {
 3344         // TODO
 3345         // should we ever get anything other than this case?
 3346         __ stlxr(rscratch1, src_reg, base);
 3347       }
 3348     } else {
 3349       Register index_reg = as_Register(index);
 3350       if (disp == 0) {
 3351         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3352         __ stlxr(rscratch1, src_reg, rscratch2);
 3353       } else {
 3354         __ lea(rscratch2, Address(base, disp));
 3355         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3356         __ stlxr(rscratch1, src_reg, rscratch2);
 3357       }
 3358     }
 3359     __ cmpw(rscratch1, zr);
 3360   %}
 3361 
 3362   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3363     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3364     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3365                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3366                /*weak*/ false, noreg);
 3367   %}
 3368 
 3369   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3370     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3371     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3372                Assembler::word, /*acquire*/ false, /*release*/ true,
 3373                /*weak*/ false, noreg);
 3374   %}
 3375 
 3376   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3377     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3378     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3379                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3380                /*weak*/ false, noreg);
 3381   %}
 3382 
 3383   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3384     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3385     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3386                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3387                /*weak*/ false, noreg);
 3388   %}
 3389 
 3390 
 3391   // The only difference between aarch64_enc_cmpxchg and
 3392   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3393   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3394   // lock.
 3395   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3396     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3397     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3398                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3399                /*weak*/ false, noreg);
 3400   %}
 3401 
 3402   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3403     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3404     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3405                Assembler::word, /*acquire*/ true, /*release*/ true,
 3406                /*weak*/ false, noreg);
 3407   %}
 3408 
 3409   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3410     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3411     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3412                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3413                /*weak*/ false, noreg);
 3414   %}
 3415 
 3416   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3417     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3418     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3419                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3420                /*weak*/ false, noreg);
 3421   %}
 3422 
 3423   // auxiliary used for CompareAndSwapX to set result register
 3424   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3425     Register res_reg = as_Register($res$$reg);
 3426     __ cset(res_reg, Assembler::EQ);
 3427   %}
 3428 
 3429   // prefetch encodings
 3430 
 3431   enc_class aarch64_enc_prefetchw(memory mem) %{
 3432     Register base = as_Register($mem$$base);
 3433     int index = $mem$$index;
 3434     int scale = $mem$$scale;
 3435     int disp = $mem$$disp;
 3436     if (index == -1) {
 3437       // Fix up any out-of-range offsets.
 3438       assert_different_registers(rscratch1, base);
 3439       Address addr = Address(base, disp);
 3440       addr = __ legitimize_address(addr, 8, rscratch1);
 3441       __ prfm(addr, PSTL1KEEP);
 3442     } else {
 3443       Register index_reg = as_Register(index);
 3444       if (disp == 0) {
 3445         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3446       } else {
 3447         __ lea(rscratch1, Address(base, disp));
 3448 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3449       }
 3450     }
 3451   %}
 3452 
 3453   // mov encodings
 3454 
 3455   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3456     uint32_t con = (uint32_t)$src$$constant;
 3457     Register dst_reg = as_Register($dst$$reg);
 3458     if (con == 0) {
 3459       __ movw(dst_reg, zr);
 3460     } else {
 3461       __ movw(dst_reg, con);
 3462     }
 3463   %}
 3464 
 3465   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3466     Register dst_reg = as_Register($dst$$reg);
 3467     uint64_t con = (uint64_t)$src$$constant;
 3468     if (con == 0) {
 3469       __ mov(dst_reg, zr);
 3470     } else {
 3471       __ mov(dst_reg, con);
 3472     }
 3473   %}
 3474 
 3475   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3476     Register dst_reg = as_Register($dst$$reg);
 3477     address con = (address)$src$$constant;
 3478     if (con == nullptr || con == (address)1) {
 3479       ShouldNotReachHere();
 3480     } else {
 3481       relocInfo::relocType rtype = $src->constant_reloc();
 3482       if (rtype == relocInfo::oop_type) {
 3483         __ movoop(dst_reg, (jobject)con);
 3484       } else if (rtype == relocInfo::metadata_type) {
 3485         __ mov_metadata(dst_reg, (Metadata*)con);
 3486       } else {
 3487         assert(rtype == relocInfo::none || rtype == relocInfo::external_word_type, "unexpected reloc type");
 3488         if (! __ is_valid_AArch64_address(con) ||
 3489             con < (address)(uintptr_t)os::vm_page_size()) {
 3490           __ mov(dst_reg, con);
 3491         } else {
 3492           uint64_t offset;
 3493           __ adrp(dst_reg, con, offset);
 3494           __ add(dst_reg, dst_reg, offset);
 3495         }
 3496       }
 3497     }
 3498   %}
 3499 
 3500   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3501     Register dst_reg = as_Register($dst$$reg);
 3502     __ mov(dst_reg, zr);
 3503   %}
 3504 
 3505   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3506     Register dst_reg = as_Register($dst$$reg);
 3507     __ mov(dst_reg, (uint64_t)1);
 3508   %}
 3509 
 3510   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3511     Register dst_reg = as_Register($dst$$reg);
 3512     address con = (address)$src$$constant;
 3513     if (con == nullptr) {
 3514       ShouldNotReachHere();
 3515     } else {
 3516       relocInfo::relocType rtype = $src->constant_reloc();
 3517       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3518       __ set_narrow_oop(dst_reg, (jobject)con);
 3519     }
 3520   %}
 3521 
 3522   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3523     Register dst_reg = as_Register($dst$$reg);
 3524     __ mov(dst_reg, zr);
 3525   %}
 3526 
 3527   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3528     Register dst_reg = as_Register($dst$$reg);
 3529     address con = (address)$src$$constant;
 3530     if (con == nullptr) {
 3531       ShouldNotReachHere();
 3532     } else {
 3533       relocInfo::relocType rtype = $src->constant_reloc();
 3534       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3535       __ set_narrow_klass(dst_reg, (Klass *)con);
 3536     }
 3537   %}
 3538 
 3539   // arithmetic encodings
 3540 
 3541   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3542     Register dst_reg = as_Register($dst$$reg);
 3543     Register src_reg = as_Register($src1$$reg);
 3544     int32_t con = (int32_t)$src2$$constant;
 3545     // add has primary == 0, subtract has primary == 1
 3546     if ($primary) { con = -con; }
 3547     if (con < 0) {
 3548       __ subw(dst_reg, src_reg, -con);
 3549     } else {
 3550       __ addw(dst_reg, src_reg, con);
 3551     }
 3552   %}
 3553 
 3554   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3555     Register dst_reg = as_Register($dst$$reg);
 3556     Register src_reg = as_Register($src1$$reg);
 3557     int32_t con = (int32_t)$src2$$constant;
 3558     // add has primary == 0, subtract has primary == 1
 3559     if ($primary) { con = -con; }
 3560     if (con < 0) {
 3561       __ sub(dst_reg, src_reg, -con);
 3562     } else {
 3563       __ add(dst_reg, src_reg, con);
 3564     }
 3565   %}
 3566 
 3567   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3568    Register dst_reg = as_Register($dst$$reg);
 3569    Register src1_reg = as_Register($src1$$reg);
 3570    Register src2_reg = as_Register($src2$$reg);
 3571     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3572   %}
 3573 
 3574   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3575    Register dst_reg = as_Register($dst$$reg);
 3576    Register src1_reg = as_Register($src1$$reg);
 3577    Register src2_reg = as_Register($src2$$reg);
 3578     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3579   %}
 3580 
 3581   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3582    Register dst_reg = as_Register($dst$$reg);
 3583    Register src1_reg = as_Register($src1$$reg);
 3584    Register src2_reg = as_Register($src2$$reg);
 3585     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3586   %}
 3587 
 3588   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3589    Register dst_reg = as_Register($dst$$reg);
 3590    Register src1_reg = as_Register($src1$$reg);
 3591    Register src2_reg = as_Register($src2$$reg);
 3592     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3593   %}
 3594 
 3595   // compare instruction encodings
 3596 
 3597   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3598     Register reg1 = as_Register($src1$$reg);
 3599     Register reg2 = as_Register($src2$$reg);
 3600     __ cmpw(reg1, reg2);
 3601   %}
 3602 
 3603   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3604     Register reg = as_Register($src1$$reg);
 3605     int32_t val = $src2$$constant;
 3606     if (val >= 0) {
 3607       __ subsw(zr, reg, val);
 3608     } else {
 3609       __ addsw(zr, reg, -val);
 3610     }
 3611   %}
 3612 
 3613   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3614     Register reg1 = as_Register($src1$$reg);
 3615     uint32_t val = (uint32_t)$src2$$constant;
 3616     __ movw(rscratch1, val);
 3617     __ cmpw(reg1, rscratch1);
 3618   %}
 3619 
 3620   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3621     Register reg1 = as_Register($src1$$reg);
 3622     Register reg2 = as_Register($src2$$reg);
 3623     __ cmp(reg1, reg2);
 3624   %}
 3625 
 3626   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3627     Register reg = as_Register($src1$$reg);
 3628     int64_t val = $src2$$constant;
 3629     if (val >= 0) {
 3630       __ subs(zr, reg, val);
 3631     } else if (val != -val) {
 3632       __ adds(zr, reg, -val);
 3633     } else {
 3634     // aargh, Long.MIN_VALUE is a special case
 3635       __ orr(rscratch1, zr, (uint64_t)val);
 3636       __ subs(zr, reg, rscratch1);
 3637     }
 3638   %}
 3639 
 3640   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3641     Register reg1 = as_Register($src1$$reg);
 3642     uint64_t val = (uint64_t)$src2$$constant;
 3643     __ mov(rscratch1, val);
 3644     __ cmp(reg1, rscratch1);
 3645   %}
 3646 
 3647   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3648     Register reg1 = as_Register($src1$$reg);
 3649     Register reg2 = as_Register($src2$$reg);
 3650     __ cmp(reg1, reg2);
 3651   %}
 3652 
 3653   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3654     Register reg1 = as_Register($src1$$reg);
 3655     Register reg2 = as_Register($src2$$reg);
 3656     __ cmpw(reg1, reg2);
 3657   %}
 3658 
 3659   enc_class aarch64_enc_testp(iRegP src) %{
 3660     Register reg = as_Register($src$$reg);
 3661     __ cmp(reg, zr);
 3662   %}
 3663 
 3664   enc_class aarch64_enc_testn(iRegN src) %{
 3665     Register reg = as_Register($src$$reg);
 3666     __ cmpw(reg, zr);
 3667   %}
 3668 
 3669   enc_class aarch64_enc_b(label lbl) %{
 3670     Label *L = $lbl$$label;
 3671     __ b(*L);
 3672   %}
 3673 
 3674   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3675     Label *L = $lbl$$label;
 3676     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3677   %}
 3678 
 3679   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3680     Label *L = $lbl$$label;
 3681     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3682   %}
 3683 
 3684   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3685   %{
 3686      Register sub_reg = as_Register($sub$$reg);
 3687      Register super_reg = as_Register($super$$reg);
 3688      Register temp_reg = as_Register($temp$$reg);
 3689      Register result_reg = as_Register($result$$reg);
 3690 
 3691      Label miss;
 3692      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3693                                      nullptr, &miss,
 3694                                      /*set_cond_codes:*/ true);
 3695      if ($primary) {
 3696        __ mov(result_reg, zr);
 3697      }
 3698      __ bind(miss);
 3699   %}
 3700 
 3701   enc_class aarch64_enc_java_static_call(method meth) %{
 3702     address addr = (address)$meth$$method;
 3703     address call;
 3704     if (!_method) {
 3705       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3706       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3707       if (call == nullptr) {
 3708         ciEnv::current()->record_failure("CodeCache is full");
 3709         return;
 3710       }
 3711     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3712       // The NOP here is purely to ensure that eliding a call to
 3713       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3714       __ nop();
 3715       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3716     } else {
 3717       int method_index = resolved_method_index(masm);
 3718       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3719                                                   : static_call_Relocation::spec(method_index);
 3720       call = __ trampoline_call(Address(addr, rspec));
 3721       if (call == nullptr) {
 3722         ciEnv::current()->record_failure("CodeCache is full");
 3723         return;
 3724       }
 3725       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3726         // Calls of the same statically bound method can share
 3727         // a stub to the interpreter.
 3728         __ code()->shared_stub_to_interp_for(_method, call - __ begin());
 3729       } else {
 3730         // Emit stub for static call
 3731         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 3732         if (stub == nullptr) {
 3733           ciEnv::current()->record_failure("CodeCache is full");
 3734           return;
 3735         }
 3736       }
 3737     }
 3738 
 3739     __ post_call_nop();
 3740 
 3741     // Only non uncommon_trap calls need to reinitialize ptrue.
 3742     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3743       __ reinitialize_ptrue();
 3744     }
 3745   %}
 3746 
 3747   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3748     int method_index = resolved_method_index(masm);
 3749     address call = __ ic_call((address)$meth$$method, method_index);
 3750     if (call == nullptr) {
 3751       ciEnv::current()->record_failure("CodeCache is full");
 3752       return;
 3753     }
 3754     __ post_call_nop();
 3755     if (Compile::current()->max_vector_size() > 0) {
 3756       __ reinitialize_ptrue();
 3757     }
 3758   %}
 3759 
 3760   enc_class aarch64_enc_call_epilog() %{
 3761     if (VerifyStackAtCalls) {
 3762       // Check that stack depth is unchanged: find majik cookie on stack
 3763       __ call_Unimplemented();
 3764     }
 3765   %}
 3766 
 3767   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3768     // some calls to generated routines (arraycopy code) are scheduled
 3769     // by C2 as runtime calls. if so we can call them using a br (they
 3770     // will be in a reachable segment) otherwise we have to use a blr
 3771     // which loads the absolute address into a register.
 3772     address entry = (address)$meth$$method;
 3773     CodeBlob *cb = CodeCache::find_blob(entry);
 3774     if (cb) {
 3775       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3776       if (call == nullptr) {
 3777         ciEnv::current()->record_failure("CodeCache is full");
 3778         return;
 3779       }
 3780       __ post_call_nop();
 3781     } else {
 3782       Label retaddr;
 3783       // Make the anchor frame walkable
 3784       __ adr(rscratch2, retaddr);
 3785       __ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 3786       __ lea(rscratch1, RuntimeAddress(entry));
 3787       __ blr(rscratch1);
 3788       __ bind(retaddr);
 3789       __ post_call_nop();
 3790     }
 3791     if (Compile::current()->max_vector_size() > 0) {
 3792       __ reinitialize_ptrue();
 3793     }
 3794   %}
 3795 
 3796   enc_class aarch64_enc_rethrow() %{
 3797     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3798   %}
 3799 
 3800   enc_class aarch64_enc_ret() %{
 3801 #ifdef ASSERT
 3802     if (Compile::current()->max_vector_size() > 0) {
 3803       __ verify_ptrue();
 3804     }
 3805 #endif
 3806     __ ret(lr);
 3807   %}
 3808 
 3809   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3810     Register target_reg = as_Register($jump_target$$reg);
 3811     __ br(target_reg);
 3812   %}
 3813 
 3814   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3815     Register target_reg = as_Register($jump_target$$reg);
 3816     // exception oop should be in r0
 3817     // ret addr has been popped into lr
 3818     // callee expects it in r3
 3819     __ mov(r3, lr);
 3820     __ br(target_reg);
 3821   %}
 3822 
 3823 %}
 3824 
 3825 //----------FRAME--------------------------------------------------------------
 3826 // Definition of frame structure and management information.
 3827 //
 3828 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3829 //                             |   (to get allocators register number
 3830 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3831 //  r   CALLER     |        |
 3832 //  o     |        +--------+      pad to even-align allocators stack-slot
 3833 //  w     V        |  pad0  |        numbers; owned by CALLER
 3834 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3835 //  h     ^        |   in   |  5
 3836 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3837 //  |     |        |        |  3
 3838 //  |     |        +--------+
 3839 //  V     |        | old out|      Empty on Intel, window on Sparc
 3840 //        |    old |preserve|      Must be even aligned.
 3841 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3842 //        |        |   in   |  3   area for Intel ret address
 3843 //     Owned by    |preserve|      Empty on Sparc.
 3844 //       SELF      +--------+
 3845 //        |        |  pad2  |  2   pad to align old SP
 3846 //        |        +--------+  1
 3847 //        |        | locks  |  0
 3848 //        |        +--------+----> OptoReg::stack0(), even aligned
 3849 //        |        |  pad1  | 11   pad to align new SP
 3850 //        |        +--------+
 3851 //        |        |        | 10
 3852 //        |        | spills |  9   spills
 3853 //        V        |        |  8   (pad0 slot for callee)
 3854 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3855 //        ^        |  out   |  7
 3856 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3857 //     Owned by    +--------+
 3858 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3859 //        |    new |preserve|      Must be even-aligned.
 3860 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3861 //        |        |        |
 3862 //
 3863 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3864 //         known from SELF's arguments and the Java calling convention.
 3865 //         Region 6-7 is determined per call site.
 3866 // Note 2: If the calling convention leaves holes in the incoming argument
 3867 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3868 //         are owned by the CALLEE.  Holes should not be necessary in the
 3869 //         incoming area, as the Java calling convention is completely under
 3870 //         the control of the AD file.  Doubles can be sorted and packed to
 3871 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3872 //         varargs C calling conventions.
 3873 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3874 //         even aligned with pad0 as needed.
 3875 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3876 //           (the latter is true on Intel but is it false on AArch64?)
 3877 //         region 6-11 is even aligned; it may be padded out more so that
 3878 //         the region from SP to FP meets the minimum stack alignment.
 3879 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3880 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3881 //         SP meets the minimum alignment.
 3882 
 3883 frame %{
 3884   // These three registers define part of the calling convention
 3885   // between compiled code and the interpreter.
 3886 
 3887   // Inline Cache Register or Method for I2C.
 3888   inline_cache_reg(R12);
 3889 
 3890   // Number of stack slots consumed by locking an object
 3891   sync_stack_slots(2);
 3892 
 3893   // Compiled code's Frame Pointer
 3894   frame_pointer(R31);
 3895 
 3896   // Interpreter stores its frame pointer in a register which is
 3897   // stored to the stack by I2CAdaptors.
 3898   // I2CAdaptors convert from interpreted java to compiled java.
 3899   interpreter_frame_pointer(R29);
 3900 
 3901   // Stack alignment requirement
 3902   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3903 
 3904   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3905   // for calls to C.  Supports the var-args backing area for register parms.
 3906   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3907 
 3908   // The after-PROLOG location of the return address.  Location of
 3909   // return address specifies a type (REG or STACK) and a number
 3910   // representing the register number (i.e. - use a register name) or
 3911   // stack slot.
 3912   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3913   // Otherwise, it is above the locks and verification slot and alignment word
 3914   // TODO this may well be correct but need to check why that - 2 is there
 3915   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3916   // which folds in the space used for monitors
 3917   return_addr(STACK - 2 +
 3918               align_up((Compile::current()->in_preserve_stack_slots() +
 3919                         Compile::current()->fixed_slots()),
 3920                        stack_alignment_in_slots()));
 3921 
 3922   // Location of compiled Java return values.  Same as C for now.
 3923   return_value
 3924   %{
 3925     // TODO do we allow ideal_reg == Op_RegN???
 3926     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3927            "only return normal values");
 3928 
 3929     static const int lo[Op_RegL + 1] = { // enum name
 3930       0,                                 // Op_Node
 3931       0,                                 // Op_Set
 3932       R0_num,                            // Op_RegN
 3933       R0_num,                            // Op_RegI
 3934       R0_num,                            // Op_RegP
 3935       V0_num,                            // Op_RegF
 3936       V0_num,                            // Op_RegD
 3937       R0_num                             // Op_RegL
 3938     };
 3939 
 3940     static const int hi[Op_RegL + 1] = { // enum name
 3941       0,                                 // Op_Node
 3942       0,                                 // Op_Set
 3943       OptoReg::Bad,                      // Op_RegN
 3944       OptoReg::Bad,                      // Op_RegI
 3945       R0_H_num,                          // Op_RegP
 3946       OptoReg::Bad,                      // Op_RegF
 3947       V0_H_num,                          // Op_RegD
 3948       R0_H_num                           // Op_RegL
 3949     };
 3950 
 3951     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3952   %}
 3953 %}
 3954 
 3955 //----------ATTRIBUTES---------------------------------------------------------
 3956 //----------Operand Attributes-------------------------------------------------
 3957 op_attrib op_cost(1);        // Required cost attribute
 3958 
 3959 //----------Instruction Attributes---------------------------------------------
 3960 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3961 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3962 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3963                                 // a non-matching short branch variant
 3964                                 // of some long branch?
 3965 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3966                                 // be a power of 2) specifies the
 3967                                 // alignment that some part of the
 3968                                 // instruction (not necessarily the
 3969                                 // start) requires.  If > 1, a
 3970                                 // compute_padding() function must be
 3971                                 // provided for the instruction
 3972 
 3973 // Whether this node is expanded during code emission into a sequence of
 3974 // instructions and the first instruction can perform an implicit null check.
 3975 ins_attrib ins_is_late_expanded_null_check_candidate(false);
 3976 
 3977 //----------OPERANDS-----------------------------------------------------------
 3978 // Operand definitions must precede instruction definitions for correct parsing
 3979 // in the ADLC because operands constitute user defined types which are used in
 3980 // instruction definitions.
 3981 
 3982 //----------Simple Operands----------------------------------------------------
 3983 
 3984 // Integer operands 32 bit
 3985 // 32 bit immediate
 3986 operand immI()
 3987 %{
 3988   match(ConI);
 3989 
 3990   op_cost(0);
 3991   format %{ %}
 3992   interface(CONST_INTER);
 3993 %}
 3994 
 3995 // 32 bit zero
 3996 operand immI0()
 3997 %{
 3998   predicate(n->get_int() == 0);
 3999   match(ConI);
 4000 
 4001   op_cost(0);
 4002   format %{ %}
 4003   interface(CONST_INTER);
 4004 %}
 4005 
 4006 // 32 bit unit increment
 4007 operand immI_1()
 4008 %{
 4009   predicate(n->get_int() == 1);
 4010   match(ConI);
 4011 
 4012   op_cost(0);
 4013   format %{ %}
 4014   interface(CONST_INTER);
 4015 %}
 4016 
 4017 // 32 bit unit decrement
 4018 operand immI_M1()
 4019 %{
 4020   predicate(n->get_int() == -1);
 4021   match(ConI);
 4022 
 4023   op_cost(0);
 4024   format %{ %}
 4025   interface(CONST_INTER);
 4026 %}
 4027 
 4028 // Shift values for add/sub extension shift
 4029 operand immIExt()
 4030 %{
 4031   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 4032   match(ConI);
 4033 
 4034   op_cost(0);
 4035   format %{ %}
 4036   interface(CONST_INTER);
 4037 %}
 4038 
 4039 operand immI_gt_1()
 4040 %{
 4041   predicate(n->get_int() > 1);
 4042   match(ConI);
 4043 
 4044   op_cost(0);
 4045   format %{ %}
 4046   interface(CONST_INTER);
 4047 %}
 4048 
 4049 operand immI_le_4()
 4050 %{
 4051   predicate(n->get_int() <= 4);
 4052   match(ConI);
 4053 
 4054   op_cost(0);
 4055   format %{ %}
 4056   interface(CONST_INTER);
 4057 %}
 4058 
 4059 operand immI_16()
 4060 %{
 4061   predicate(n->get_int() == 16);
 4062   match(ConI);
 4063 
 4064   op_cost(0);
 4065   format %{ %}
 4066   interface(CONST_INTER);
 4067 %}
 4068 
 4069 operand immI_24()
 4070 %{
 4071   predicate(n->get_int() == 24);
 4072   match(ConI);
 4073 
 4074   op_cost(0);
 4075   format %{ %}
 4076   interface(CONST_INTER);
 4077 %}
 4078 
 4079 operand immI_32()
 4080 %{
 4081   predicate(n->get_int() == 32);
 4082   match(ConI);
 4083 
 4084   op_cost(0);
 4085   format %{ %}
 4086   interface(CONST_INTER);
 4087 %}
 4088 
 4089 operand immI_48()
 4090 %{
 4091   predicate(n->get_int() == 48);
 4092   match(ConI);
 4093 
 4094   op_cost(0);
 4095   format %{ %}
 4096   interface(CONST_INTER);
 4097 %}
 4098 
 4099 operand immI_56()
 4100 %{
 4101   predicate(n->get_int() == 56);
 4102   match(ConI);
 4103 
 4104   op_cost(0);
 4105   format %{ %}
 4106   interface(CONST_INTER);
 4107 %}
 4108 
 4109 operand immI_255()
 4110 %{
 4111   predicate(n->get_int() == 255);
 4112   match(ConI);
 4113 
 4114   op_cost(0);
 4115   format %{ %}
 4116   interface(CONST_INTER);
 4117 %}
 4118 
 4119 operand immI_65535()
 4120 %{
 4121   predicate(n->get_int() == 65535);
 4122   match(ConI);
 4123 
 4124   op_cost(0);
 4125   format %{ %}
 4126   interface(CONST_INTER);
 4127 %}
 4128 
 4129 operand immI_positive()
 4130 %{
 4131   predicate(n->get_int() > 0);
 4132   match(ConI);
 4133 
 4134   op_cost(0);
 4135   format %{ %}
 4136   interface(CONST_INTER);
 4137 %}
 4138 
 4139 // BoolTest condition for signed compare
 4140 operand immI_cmp_cond()
 4141 %{
 4142   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4143   match(ConI);
 4144 
 4145   op_cost(0);
 4146   format %{ %}
 4147   interface(CONST_INTER);
 4148 %}
 4149 
 4150 // BoolTest condition for unsigned compare
 4151 operand immI_cmpU_cond()
 4152 %{
 4153   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4154   match(ConI);
 4155 
 4156   op_cost(0);
 4157   format %{ %}
 4158   interface(CONST_INTER);
 4159 %}
 4160 
 4161 operand immL_255()
 4162 %{
 4163   predicate(n->get_long() == 255L);
 4164   match(ConL);
 4165 
 4166   op_cost(0);
 4167   format %{ %}
 4168   interface(CONST_INTER);
 4169 %}
 4170 
 4171 operand immL_65535()
 4172 %{
 4173   predicate(n->get_long() == 65535L);
 4174   match(ConL);
 4175 
 4176   op_cost(0);
 4177   format %{ %}
 4178   interface(CONST_INTER);
 4179 %}
 4180 
 4181 operand immL_4294967295()
 4182 %{
 4183   predicate(n->get_long() == 4294967295L);
 4184   match(ConL);
 4185 
 4186   op_cost(0);
 4187   format %{ %}
 4188   interface(CONST_INTER);
 4189 %}
 4190 
 4191 operand immL_bitmask()
 4192 %{
 4193   predicate((n->get_long() != 0)
 4194             && ((n->get_long() & 0xc000000000000000l) == 0)
 4195             && is_power_of_2(n->get_long() + 1));
 4196   match(ConL);
 4197 
 4198   op_cost(0);
 4199   format %{ %}
 4200   interface(CONST_INTER);
 4201 %}
 4202 
 4203 operand immI_bitmask()
 4204 %{
 4205   predicate((n->get_int() != 0)
 4206             && ((n->get_int() & 0xc0000000) == 0)
 4207             && is_power_of_2(n->get_int() + 1));
 4208   match(ConI);
 4209 
 4210   op_cost(0);
 4211   format %{ %}
 4212   interface(CONST_INTER);
 4213 %}
 4214 
 4215 operand immL_positive_bitmaskI()
 4216 %{
 4217   predicate((n->get_long() != 0)
 4218             && ((julong)n->get_long() < 0x80000000ULL)
 4219             && is_power_of_2(n->get_long() + 1));
 4220   match(ConL);
 4221 
 4222   op_cost(0);
 4223   format %{ %}
 4224   interface(CONST_INTER);
 4225 %}
 4226 
 4227 // Scale values for scaled offset addressing modes (up to long but not quad)
 4228 operand immIScale()
 4229 %{
 4230   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4231   match(ConI);
 4232 
 4233   op_cost(0);
 4234   format %{ %}
 4235   interface(CONST_INTER);
 4236 %}
 4237 
 4238 // 5 bit signed integer
 4239 operand immI5()
 4240 %{
 4241   predicate(Assembler::is_simm(n->get_int(), 5));
 4242   match(ConI);
 4243 
 4244   op_cost(0);
 4245   format %{ %}
 4246   interface(CONST_INTER);
 4247 %}
 4248 
 4249 // 7 bit unsigned integer
 4250 operand immIU7()
 4251 %{
 4252   predicate(Assembler::is_uimm(n->get_int(), 7));
 4253   match(ConI);
 4254 
 4255   op_cost(0);
 4256   format %{ %}
 4257   interface(CONST_INTER);
 4258 %}
 4259 
 4260 // Offset for scaled or unscaled immediate loads and stores
 4261 operand immIOffset()
 4262 %{
 4263   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4264   match(ConI);
 4265 
 4266   op_cost(0);
 4267   format %{ %}
 4268   interface(CONST_INTER);
 4269 %}
 4270 
 4271 operand immIOffset1()
 4272 %{
 4273   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4274   match(ConI);
 4275 
 4276   op_cost(0);
 4277   format %{ %}
 4278   interface(CONST_INTER);
 4279 %}
 4280 
 4281 operand immIOffset2()
 4282 %{
 4283   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4284   match(ConI);
 4285 
 4286   op_cost(0);
 4287   format %{ %}
 4288   interface(CONST_INTER);
 4289 %}
 4290 
 4291 operand immIOffset4()
 4292 %{
 4293   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4294   match(ConI);
 4295 
 4296   op_cost(0);
 4297   format %{ %}
 4298   interface(CONST_INTER);
 4299 %}
 4300 
 4301 operand immIOffset8()
 4302 %{
 4303   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4304   match(ConI);
 4305 
 4306   op_cost(0);
 4307   format %{ %}
 4308   interface(CONST_INTER);
 4309 %}
 4310 
 4311 operand immIOffset16()
 4312 %{
 4313   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4314   match(ConI);
 4315 
 4316   op_cost(0);
 4317   format %{ %}
 4318   interface(CONST_INTER);
 4319 %}
 4320 
 4321 operand immLOffset()
 4322 %{
 4323   predicate(n->get_long() >= -256 && n->get_long() <= 65520);
 4324   match(ConL);
 4325 
 4326   op_cost(0);
 4327   format %{ %}
 4328   interface(CONST_INTER);
 4329 %}
 4330 
 4331 operand immLoffset1()
 4332 %{
 4333   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4334   match(ConL);
 4335 
 4336   op_cost(0);
 4337   format %{ %}
 4338   interface(CONST_INTER);
 4339 %}
 4340 
 4341 operand immLoffset2()
 4342 %{
 4343   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4344   match(ConL);
 4345 
 4346   op_cost(0);
 4347   format %{ %}
 4348   interface(CONST_INTER);
 4349 %}
 4350 
 4351 operand immLoffset4()
 4352 %{
 4353   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4354   match(ConL);
 4355 
 4356   op_cost(0);
 4357   format %{ %}
 4358   interface(CONST_INTER);
 4359 %}
 4360 
 4361 operand immLoffset8()
 4362 %{
 4363   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4364   match(ConL);
 4365 
 4366   op_cost(0);
 4367   format %{ %}
 4368   interface(CONST_INTER);
 4369 %}
 4370 
 4371 operand immLoffset16()
 4372 %{
 4373   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4374   match(ConL);
 4375 
 4376   op_cost(0);
 4377   format %{ %}
 4378   interface(CONST_INTER);
 4379 %}
 4380 
 4381 // 5 bit signed long integer
 4382 operand immL5()
 4383 %{
 4384   predicate(Assembler::is_simm(n->get_long(), 5));
 4385   match(ConL);
 4386 
 4387   op_cost(0);
 4388   format %{ %}
 4389   interface(CONST_INTER);
 4390 %}
 4391 
 4392 // 7 bit unsigned long integer
 4393 operand immLU7()
 4394 %{
 4395   predicate(Assembler::is_uimm(n->get_long(), 7));
 4396   match(ConL);
 4397 
 4398   op_cost(0);
 4399   format %{ %}
 4400   interface(CONST_INTER);
 4401 %}
 4402 
 4403 // 8 bit signed value.
 4404 operand immI8()
 4405 %{
 4406   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4407   match(ConI);
 4408 
 4409   op_cost(0);
 4410   format %{ %}
 4411   interface(CONST_INTER);
 4412 %}
 4413 
 4414 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4415 operand immIDupV()
 4416 %{
 4417   predicate(Assembler::operand_valid_for_sve_dup_immediate((int64_t)n->get_int()));
 4418   match(ConI);
 4419 
 4420   op_cost(0);
 4421   format %{ %}
 4422   interface(CONST_INTER);
 4423 %}
 4424 
 4425 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4426 operand immLDupV()
 4427 %{
 4428   predicate(Assembler::operand_valid_for_sve_dup_immediate(n->get_long()));
 4429   match(ConL);
 4430 
 4431   op_cost(0);
 4432   format %{ %}
 4433   interface(CONST_INTER);
 4434 %}
 4435 
 4436 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4437 operand immHDupV()
 4438 %{
 4439   predicate(Assembler::operand_valid_for_sve_dup_immediate((int64_t)n->geth()));
 4440   match(ConH);
 4441 
 4442   op_cost(0);
 4443   format %{ %}
 4444   interface(CONST_INTER);
 4445 %}
 4446 
 4447 // 8 bit integer valid for vector add sub immediate
 4448 operand immBAddSubV()
 4449 %{
 4450   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4451   match(ConI);
 4452 
 4453   op_cost(0);
 4454   format %{ %}
 4455   interface(CONST_INTER);
 4456 %}
 4457 
 4458 // 32 bit integer valid for add sub immediate
 4459 operand immIAddSub()
 4460 %{
 4461   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4462   match(ConI);
 4463   op_cost(0);
 4464   format %{ %}
 4465   interface(CONST_INTER);
 4466 %}
 4467 
 4468 // 32 bit integer valid for vector add sub immediate
 4469 operand immIAddSubV()
 4470 %{
 4471   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4472   match(ConI);
 4473 
 4474   op_cost(0);
 4475   format %{ %}
 4476   interface(CONST_INTER);
 4477 %}
 4478 
 4479 // 32 bit unsigned integer valid for logical immediate
 4480 
 4481 operand immBLog()
 4482 %{
 4483   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4484   match(ConI);
 4485 
 4486   op_cost(0);
 4487   format %{ %}
 4488   interface(CONST_INTER);
 4489 %}
 4490 
 4491 operand immSLog()
 4492 %{
 4493   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4494   match(ConI);
 4495 
 4496   op_cost(0);
 4497   format %{ %}
 4498   interface(CONST_INTER);
 4499 %}
 4500 
 4501 operand immILog()
 4502 %{
 4503   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4504   match(ConI);
 4505 
 4506   op_cost(0);
 4507   format %{ %}
 4508   interface(CONST_INTER);
 4509 %}
 4510 
 4511 // Integer operands 64 bit
 4512 // 64 bit immediate
 4513 operand immL()
 4514 %{
 4515   match(ConL);
 4516 
 4517   op_cost(0);
 4518   format %{ %}
 4519   interface(CONST_INTER);
 4520 %}
 4521 
 4522 // 64 bit zero
 4523 operand immL0()
 4524 %{
 4525   predicate(n->get_long() == 0);
 4526   match(ConL);
 4527 
 4528   op_cost(0);
 4529   format %{ %}
 4530   interface(CONST_INTER);
 4531 %}
 4532 
 4533 // 64 bit unit decrement
 4534 operand immL_M1()
 4535 %{
 4536   predicate(n->get_long() == -1);
 4537   match(ConL);
 4538 
 4539   op_cost(0);
 4540   format %{ %}
 4541   interface(CONST_INTER);
 4542 %}
 4543 
 4544 // 64 bit integer valid for add sub immediate
 4545 operand immLAddSub()
 4546 %{
 4547   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4548   match(ConL);
 4549   op_cost(0);
 4550   format %{ %}
 4551   interface(CONST_INTER);
 4552 %}
 4553 
 4554 // 64 bit integer valid for addv subv immediate
 4555 operand immLAddSubV()
 4556 %{
 4557   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4558   match(ConL);
 4559 
 4560   op_cost(0);
 4561   format %{ %}
 4562   interface(CONST_INTER);
 4563 %}
 4564 
 4565 // 64 bit integer valid for logical immediate
 4566 operand immLLog()
 4567 %{
 4568   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4569   match(ConL);
 4570   op_cost(0);
 4571   format %{ %}
 4572   interface(CONST_INTER);
 4573 %}
 4574 
 4575 // Long Immediate: low 32-bit mask
 4576 operand immL_32bits()
 4577 %{
 4578   predicate(n->get_long() == 0xFFFFFFFFL);
 4579   match(ConL);
 4580   op_cost(0);
 4581   format %{ %}
 4582   interface(CONST_INTER);
 4583 %}
 4584 
 4585 // Pointer operands
 4586 // Pointer Immediate
 4587 operand immP()
 4588 %{
 4589   match(ConP);
 4590 
 4591   op_cost(0);
 4592   format %{ %}
 4593   interface(CONST_INTER);
 4594 %}
 4595 
 4596 // nullptr Pointer Immediate
 4597 operand immP0()
 4598 %{
 4599   predicate(n->get_ptr() == 0);
 4600   match(ConP);
 4601 
 4602   op_cost(0);
 4603   format %{ %}
 4604   interface(CONST_INTER);
 4605 %}
 4606 
 4607 // Pointer Immediate One
 4608 // this is used in object initialization (initial object header)
 4609 operand immP_1()
 4610 %{
 4611   predicate(n->get_ptr() == 1);
 4612   match(ConP);
 4613 
 4614   op_cost(0);
 4615   format %{ %}
 4616   interface(CONST_INTER);
 4617 %}
 4618 
 4619 // AOT Runtime Constants Address
 4620 operand immAOTRuntimeConstantsAddress()
 4621 %{
 4622   // Check if the address is in the range of AOT Runtime Constants
 4623   predicate(AOTRuntimeConstants::contains((address)(n->get_ptr())));
 4624   match(ConP);
 4625 
 4626   op_cost(0);
 4627   format %{ %}
 4628   interface(CONST_INTER);
 4629 %}
 4630 
 4631 // Float and Double operands
 4632 // Double Immediate
 4633 operand immD()
 4634 %{
 4635   match(ConD);
 4636   op_cost(0);
 4637   format %{ %}
 4638   interface(CONST_INTER);
 4639 %}
 4640 
 4641 // Double Immediate: +0.0d
 4642 operand immD0()
 4643 %{
 4644   predicate(jlong_cast(n->getd()) == 0);
 4645   match(ConD);
 4646 
 4647   op_cost(0);
 4648   format %{ %}
 4649   interface(CONST_INTER);
 4650 %}
 4651 
 4652 // constant 'double +0.0'.
 4653 operand immDPacked()
 4654 %{
 4655   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4656   match(ConD);
 4657   op_cost(0);
 4658   format %{ %}
 4659   interface(CONST_INTER);
 4660 %}
 4661 
 4662 // Float Immediate
 4663 operand immF()
 4664 %{
 4665   match(ConF);
 4666   op_cost(0);
 4667   format %{ %}
 4668   interface(CONST_INTER);
 4669 %}
 4670 
 4671 // Float Immediate: +0.0f.
 4672 operand immF0()
 4673 %{
 4674   predicate(jint_cast(n->getf()) == 0);
 4675   match(ConF);
 4676 
 4677   op_cost(0);
 4678   format %{ %}
 4679   interface(CONST_INTER);
 4680 %}
 4681 
 4682 // Half Float (FP16) Immediate
 4683 operand immH()
 4684 %{
 4685   match(ConH);
 4686   op_cost(0);
 4687   format %{ %}
 4688   interface(CONST_INTER);
 4689 %}
 4690 
 4691 //
 4692 operand immFPacked()
 4693 %{
 4694   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4695   match(ConF);
 4696   op_cost(0);
 4697   format %{ %}
 4698   interface(CONST_INTER);
 4699 %}
 4700 
 4701 // Narrow pointer operands
 4702 // Narrow Pointer Immediate
 4703 operand immN()
 4704 %{
 4705   match(ConN);
 4706 
 4707   op_cost(0);
 4708   format %{ %}
 4709   interface(CONST_INTER);
 4710 %}
 4711 
 4712 // Narrow nullptr Pointer Immediate
 4713 operand immN0()
 4714 %{
 4715   predicate(n->get_narrowcon() == 0);
 4716   match(ConN);
 4717 
 4718   op_cost(0);
 4719   format %{ %}
 4720   interface(CONST_INTER);
 4721 %}
 4722 
 4723 operand immNKlass()
 4724 %{
 4725   match(ConNKlass);
 4726 
 4727   op_cost(0);
 4728   format %{ %}
 4729   interface(CONST_INTER);
 4730 %}
 4731 
 4732 // Integer 32 bit Register Operands
 4733 // Integer 32 bitRegister (excludes SP)
 4734 operand iRegI()
 4735 %{
 4736   constraint(ALLOC_IN_RC(any_reg32));
 4737   match(RegI);
 4738   match(iRegINoSp);
 4739   op_cost(0);
 4740   format %{ %}
 4741   interface(REG_INTER);
 4742 %}
 4743 
 4744 // Integer 32 bit Register not Special
 4745 operand iRegINoSp()
 4746 %{
 4747   constraint(ALLOC_IN_RC(no_special_reg32));
 4748   match(RegI);
 4749   op_cost(0);
 4750   format %{ %}
 4751   interface(REG_INTER);
 4752 %}
 4753 
 4754 // Integer 64 bit Register Operands
 4755 // Integer 64 bit Register (includes SP)
 4756 operand iRegL()
 4757 %{
 4758   constraint(ALLOC_IN_RC(any_reg));
 4759   match(RegL);
 4760   match(iRegLNoSp);
 4761   op_cost(0);
 4762   format %{ %}
 4763   interface(REG_INTER);
 4764 %}
 4765 
 4766 // Integer 64 bit Register not Special
 4767 operand iRegLNoSp()
 4768 %{
 4769   constraint(ALLOC_IN_RC(no_special_reg));
 4770   match(RegL);
 4771   match(iRegL_R0);
 4772   format %{ %}
 4773   interface(REG_INTER);
 4774 %}
 4775 
 4776 // Pointer Register Operands
 4777 // Pointer Register
 4778 operand iRegP()
 4779 %{
 4780   constraint(ALLOC_IN_RC(ptr_reg));
 4781   match(RegP);
 4782   match(iRegPNoSp);
 4783   match(iRegP_R0);
 4784   //match(iRegP_R2);
 4785   //match(iRegP_R4);
 4786   match(iRegP_R5);
 4787   match(thread_RegP);
 4788   op_cost(0);
 4789   format %{ %}
 4790   interface(REG_INTER);
 4791 %}
 4792 
 4793 // Pointer 64 bit Register not Special
 4794 operand iRegPNoSp()
 4795 %{
 4796   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4797   match(RegP);
 4798   // match(iRegP);
 4799   // match(iRegP_R0);
 4800   // match(iRegP_R2);
 4801   // match(iRegP_R4);
 4802   // match(iRegP_R5);
 4803   // match(thread_RegP);
 4804   op_cost(0);
 4805   format %{ %}
 4806   interface(REG_INTER);
 4807 %}
 4808 
 4809 // This operand is not allowed to use rfp even if
 4810 // rfp is not used to hold the frame pointer.
 4811 operand iRegPNoSpNoRfp()
 4812 %{
 4813   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4814   match(RegP);
 4815   match(iRegPNoSp);
 4816   op_cost(0);
 4817   format %{ %}
 4818   interface(REG_INTER);
 4819 %}
 4820 
 4821 // Pointer 64 bit Register R0 only
 4822 operand iRegP_R0()
 4823 %{
 4824   constraint(ALLOC_IN_RC(r0_reg));
 4825   match(RegP);
 4826   // match(iRegP);
 4827   match(iRegPNoSp);
 4828   op_cost(0);
 4829   format %{ %}
 4830   interface(REG_INTER);
 4831 %}
 4832 
 4833 // Pointer 64 bit Register R1 only
 4834 operand iRegP_R1()
 4835 %{
 4836   constraint(ALLOC_IN_RC(r1_reg));
 4837   match(RegP);
 4838   // match(iRegP);
 4839   match(iRegPNoSp);
 4840   op_cost(0);
 4841   format %{ %}
 4842   interface(REG_INTER);
 4843 %}
 4844 
 4845 // Pointer 64 bit Register R2 only
 4846 operand iRegP_R2()
 4847 %{
 4848   constraint(ALLOC_IN_RC(r2_reg));
 4849   match(RegP);
 4850   // match(iRegP);
 4851   match(iRegPNoSp);
 4852   op_cost(0);
 4853   format %{ %}
 4854   interface(REG_INTER);
 4855 %}
 4856 
 4857 // Pointer 64 bit Register R3 only
 4858 operand iRegP_R3()
 4859 %{
 4860   constraint(ALLOC_IN_RC(r3_reg));
 4861   match(RegP);
 4862   // match(iRegP);
 4863   match(iRegPNoSp);
 4864   op_cost(0);
 4865   format %{ %}
 4866   interface(REG_INTER);
 4867 %}
 4868 
 4869 // Pointer 64 bit Register R4 only
 4870 operand iRegP_R4()
 4871 %{
 4872   constraint(ALLOC_IN_RC(r4_reg));
 4873   match(RegP);
 4874   // match(iRegP);
 4875   match(iRegPNoSp);
 4876   op_cost(0);
 4877   format %{ %}
 4878   interface(REG_INTER);
 4879 %}
 4880 
 4881 // Pointer 64 bit Register R5 only
 4882 operand iRegP_R5()
 4883 %{
 4884   constraint(ALLOC_IN_RC(r5_reg));
 4885   match(RegP);
 4886   // match(iRegP);
 4887   match(iRegPNoSp);
 4888   op_cost(0);
 4889   format %{ %}
 4890   interface(REG_INTER);
 4891 %}
 4892 
 4893 // Pointer 64 bit Register R10 only
 4894 operand iRegP_R10()
 4895 %{
 4896   constraint(ALLOC_IN_RC(r10_reg));
 4897   match(RegP);
 4898   // match(iRegP);
 4899   match(iRegPNoSp);
 4900   op_cost(0);
 4901   format %{ %}
 4902   interface(REG_INTER);
 4903 %}
 4904 
 4905 // Long 64 bit Register R0 only
 4906 operand iRegL_R0()
 4907 %{
 4908   constraint(ALLOC_IN_RC(r0_reg));
 4909   match(RegL);
 4910   match(iRegLNoSp);
 4911   op_cost(0);
 4912   format %{ %}
 4913   interface(REG_INTER);
 4914 %}
 4915 
 4916 // Long 64 bit Register R11 only
 4917 operand iRegL_R11()
 4918 %{
 4919   constraint(ALLOC_IN_RC(r11_reg));
 4920   match(RegL);
 4921   match(iRegLNoSp);
 4922   op_cost(0);
 4923   format %{ %}
 4924   interface(REG_INTER);
 4925 %}
 4926 
 4927 // Register R0 only
 4928 operand iRegI_R0()
 4929 %{
 4930   constraint(ALLOC_IN_RC(int_r0_reg));
 4931   match(RegI);
 4932   match(iRegINoSp);
 4933   op_cost(0);
 4934   format %{ %}
 4935   interface(REG_INTER);
 4936 %}
 4937 
 4938 // Register R2 only
 4939 operand iRegI_R2()
 4940 %{
 4941   constraint(ALLOC_IN_RC(int_r2_reg));
 4942   match(RegI);
 4943   match(iRegINoSp);
 4944   op_cost(0);
 4945   format %{ %}
 4946   interface(REG_INTER);
 4947 %}
 4948 
 4949 // Register R3 only
 4950 operand iRegI_R3()
 4951 %{
 4952   constraint(ALLOC_IN_RC(int_r3_reg));
 4953   match(RegI);
 4954   match(iRegINoSp);
 4955   op_cost(0);
 4956   format %{ %}
 4957   interface(REG_INTER);
 4958 %}
 4959 
 4960 
 4961 // Register R4 only
 4962 operand iRegI_R4()
 4963 %{
 4964   constraint(ALLOC_IN_RC(int_r4_reg));
 4965   match(RegI);
 4966   match(iRegINoSp);
 4967   op_cost(0);
 4968   format %{ %}
 4969   interface(REG_INTER);
 4970 %}
 4971 
 4972 
 4973 // Pointer Register Operands
 4974 // Narrow Pointer Register
 4975 operand iRegN()
 4976 %{
 4977   constraint(ALLOC_IN_RC(any_reg32));
 4978   match(RegN);
 4979   match(iRegNNoSp);
 4980   op_cost(0);
 4981   format %{ %}
 4982   interface(REG_INTER);
 4983 %}
 4984 
 4985 // Integer 64 bit Register not Special
 4986 operand iRegNNoSp()
 4987 %{
 4988   constraint(ALLOC_IN_RC(no_special_reg32));
 4989   match(RegN);
 4990   op_cost(0);
 4991   format %{ %}
 4992   interface(REG_INTER);
 4993 %}
 4994 
 4995 // Float Register
 4996 // Float register operands
 4997 operand vRegF()
 4998 %{
 4999   constraint(ALLOC_IN_RC(float_reg));
 5000   match(RegF);
 5001 
 5002   op_cost(0);
 5003   format %{ %}
 5004   interface(REG_INTER);
 5005 %}
 5006 
 5007 // Double Register
 5008 // Double register operands
 5009 operand vRegD()
 5010 %{
 5011   constraint(ALLOC_IN_RC(double_reg));
 5012   match(RegD);
 5013 
 5014   op_cost(0);
 5015   format %{ %}
 5016   interface(REG_INTER);
 5017 %}
 5018 
 5019 // Generic vector class. This will be used for
 5020 // all vector operands, including NEON and SVE.
 5021 operand vReg()
 5022 %{
 5023   constraint(ALLOC_IN_RC(dynamic));
 5024   match(VecA);
 5025   match(VecD);
 5026   match(VecX);
 5027 
 5028   op_cost(0);
 5029   format %{ %}
 5030   interface(REG_INTER);
 5031 %}
 5032 
 5033 operand vReg_V10()
 5034 %{
 5035   constraint(ALLOC_IN_RC(v10_veca_reg));
 5036   match(vReg);
 5037 
 5038   op_cost(0);
 5039   format %{ %}
 5040   interface(REG_INTER);
 5041 %}
 5042 
 5043 operand vReg_V11()
 5044 %{
 5045   constraint(ALLOC_IN_RC(v11_veca_reg));
 5046   match(vReg);
 5047 
 5048   op_cost(0);
 5049   format %{ %}
 5050   interface(REG_INTER);
 5051 %}
 5052 
 5053 operand vReg_V12()
 5054 %{
 5055   constraint(ALLOC_IN_RC(v12_veca_reg));
 5056   match(vReg);
 5057 
 5058   op_cost(0);
 5059   format %{ %}
 5060   interface(REG_INTER);
 5061 %}
 5062 
 5063 operand vReg_V13()
 5064 %{
 5065   constraint(ALLOC_IN_RC(v13_veca_reg));
 5066   match(vReg);
 5067 
 5068   op_cost(0);
 5069   format %{ %}
 5070   interface(REG_INTER);
 5071 %}
 5072 
 5073 operand vReg_V17()
 5074 %{
 5075   constraint(ALLOC_IN_RC(v17_veca_reg));
 5076   match(vReg);
 5077 
 5078   op_cost(0);
 5079   format %{ %}
 5080   interface(REG_INTER);
 5081 %}
 5082 
 5083 operand vReg_V18()
 5084 %{
 5085   constraint(ALLOC_IN_RC(v18_veca_reg));
 5086   match(vReg);
 5087 
 5088   op_cost(0);
 5089   format %{ %}
 5090   interface(REG_INTER);
 5091 %}
 5092 
 5093 operand vReg_V23()
 5094 %{
 5095   constraint(ALLOC_IN_RC(v23_veca_reg));
 5096   match(vReg);
 5097 
 5098   op_cost(0);
 5099   format %{ %}
 5100   interface(REG_INTER);
 5101 %}
 5102 
 5103 operand vReg_V24()
 5104 %{
 5105   constraint(ALLOC_IN_RC(v24_veca_reg));
 5106   match(vReg);
 5107 
 5108   op_cost(0);
 5109   format %{ %}
 5110   interface(REG_INTER);
 5111 %}
 5112 
 5113 operand vecA()
 5114 %{
 5115   constraint(ALLOC_IN_RC(vectora_reg));
 5116   match(VecA);
 5117 
 5118   op_cost(0);
 5119   format %{ %}
 5120   interface(REG_INTER);
 5121 %}
 5122 
 5123 operand vecD()
 5124 %{
 5125   constraint(ALLOC_IN_RC(vectord_reg));
 5126   match(VecD);
 5127 
 5128   op_cost(0);
 5129   format %{ %}
 5130   interface(REG_INTER);
 5131 %}
 5132 
 5133 operand vecX()
 5134 %{
 5135   constraint(ALLOC_IN_RC(vectorx_reg));
 5136   match(VecX);
 5137 
 5138   op_cost(0);
 5139   format %{ %}
 5140   interface(REG_INTER);
 5141 %}
 5142 
 5143 operand vRegD_V0()
 5144 %{
 5145   constraint(ALLOC_IN_RC(v0_reg));
 5146   match(RegD);
 5147   op_cost(0);
 5148   format %{ %}
 5149   interface(REG_INTER);
 5150 %}
 5151 
 5152 operand vRegD_V1()
 5153 %{
 5154   constraint(ALLOC_IN_RC(v1_reg));
 5155   match(RegD);
 5156   op_cost(0);
 5157   format %{ %}
 5158   interface(REG_INTER);
 5159 %}
 5160 
 5161 operand vRegD_V2()
 5162 %{
 5163   constraint(ALLOC_IN_RC(v2_reg));
 5164   match(RegD);
 5165   op_cost(0);
 5166   format %{ %}
 5167   interface(REG_INTER);
 5168 %}
 5169 
 5170 operand vRegD_V3()
 5171 %{
 5172   constraint(ALLOC_IN_RC(v3_reg));
 5173   match(RegD);
 5174   op_cost(0);
 5175   format %{ %}
 5176   interface(REG_INTER);
 5177 %}
 5178 
 5179 operand vRegD_V4()
 5180 %{
 5181   constraint(ALLOC_IN_RC(v4_reg));
 5182   match(RegD);
 5183   op_cost(0);
 5184   format %{ %}
 5185   interface(REG_INTER);
 5186 %}
 5187 
 5188 operand vRegD_V5()
 5189 %{
 5190   constraint(ALLOC_IN_RC(v5_reg));
 5191   match(RegD);
 5192   op_cost(0);
 5193   format %{ %}
 5194   interface(REG_INTER);
 5195 %}
 5196 
 5197 operand vRegD_V6()
 5198 %{
 5199   constraint(ALLOC_IN_RC(v6_reg));
 5200   match(RegD);
 5201   op_cost(0);
 5202   format %{ %}
 5203   interface(REG_INTER);
 5204 %}
 5205 
 5206 operand vRegD_V7()
 5207 %{
 5208   constraint(ALLOC_IN_RC(v7_reg));
 5209   match(RegD);
 5210   op_cost(0);
 5211   format %{ %}
 5212   interface(REG_INTER);
 5213 %}
 5214 
 5215 operand vRegD_V12()
 5216 %{
 5217   constraint(ALLOC_IN_RC(v12_reg));
 5218   match(RegD);
 5219   op_cost(0);
 5220   format %{ %}
 5221   interface(REG_INTER);
 5222 %}
 5223 
 5224 operand vRegD_V13()
 5225 %{
 5226   constraint(ALLOC_IN_RC(v13_reg));
 5227   match(RegD);
 5228   op_cost(0);
 5229   format %{ %}
 5230   interface(REG_INTER);
 5231 %}
 5232 
 5233 operand pReg()
 5234 %{
 5235   constraint(ALLOC_IN_RC(pr_reg));
 5236   match(RegVectMask);
 5237   match(pRegGov);
 5238   op_cost(0);
 5239   format %{ %}
 5240   interface(REG_INTER);
 5241 %}
 5242 
 5243 operand pRegGov()
 5244 %{
 5245   constraint(ALLOC_IN_RC(gov_pr));
 5246   match(RegVectMask);
 5247   match(pReg);
 5248   op_cost(0);
 5249   format %{ %}
 5250   interface(REG_INTER);
 5251 %}
 5252 
 5253 operand pRegGov_P0()
 5254 %{
 5255   constraint(ALLOC_IN_RC(p0_reg));
 5256   match(RegVectMask);
 5257   op_cost(0);
 5258   format %{ %}
 5259   interface(REG_INTER);
 5260 %}
 5261 
 5262 operand pRegGov_P1()
 5263 %{
 5264   constraint(ALLOC_IN_RC(p1_reg));
 5265   match(RegVectMask);
 5266   op_cost(0);
 5267   format %{ %}
 5268   interface(REG_INTER);
 5269 %}
 5270 
 5271 // Flags register, used as output of signed compare instructions
 5272 
 5273 // note that on AArch64 we also use this register as the output for
 5274 // for floating point compare instructions (CmpF CmpD). this ensures
 5275 // that ordered inequality tests use GT, GE, LT or LE none of which
 5276 // pass through cases where the result is unordered i.e. one or both
 5277 // inputs to the compare is a NaN. this means that the ideal code can
 5278 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5279 // (where the comparison should always fail). EQ and NE tests are
 5280 // always generated in ideal code so that unordered folds into the NE
 5281 // case, matching the behaviour of AArch64 NE.
 5282 //
 5283 // This differs from x86 where the outputs of FP compares use a
 5284 // special FP flags registers and where compares based on this
 5285 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5286 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5287 // to explicitly handle the unordered case in branches. x86 also has
 5288 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5289 
 5290 operand rFlagsReg()
 5291 %{
 5292   constraint(ALLOC_IN_RC(int_flags));
 5293   match(RegFlags);
 5294 
 5295   op_cost(0);
 5296   format %{ "RFLAGS" %}
 5297   interface(REG_INTER);
 5298 %}
 5299 
 5300 // Flags register, used as output of unsigned compare instructions
 5301 operand rFlagsRegU()
 5302 %{
 5303   constraint(ALLOC_IN_RC(int_flags));
 5304   match(RegFlags);
 5305 
 5306   op_cost(0);
 5307   format %{ "RFLAGSU" %}
 5308   interface(REG_INTER);
 5309 %}
 5310 
 5311 // Special Registers
 5312 
 5313 // Method Register
 5314 operand inline_cache_RegP(iRegP reg)
 5315 %{
 5316   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5317   match(reg);
 5318   match(iRegPNoSp);
 5319   op_cost(0);
 5320   format %{ %}
 5321   interface(REG_INTER);
 5322 %}
 5323 
 5324 // Thread Register
 5325 operand thread_RegP(iRegP reg)
 5326 %{
 5327   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5328   match(reg);
 5329   op_cost(0);
 5330   format %{ %}
 5331   interface(REG_INTER);
 5332 %}
 5333 
 5334 //----------Memory Operands----------------------------------------------------
 5335 
 5336 operand indirect(iRegP reg)
 5337 %{
 5338   constraint(ALLOC_IN_RC(ptr_reg));
 5339   match(reg);
 5340   op_cost(0);
 5341   format %{ "[$reg]" %}
 5342   interface(MEMORY_INTER) %{
 5343     base($reg);
 5344     index(0xffffffff);
 5345     scale(0x0);
 5346     disp(0x0);
 5347   %}
 5348 %}
 5349 
 5350 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5351 %{
 5352   constraint(ALLOC_IN_RC(ptr_reg));
 5353   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5354   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5355   op_cost(0);
 5356   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5357   interface(MEMORY_INTER) %{
 5358     base($reg);
 5359     index($ireg);
 5360     scale($scale);
 5361     disp(0x0);
 5362   %}
 5363 %}
 5364 
 5365 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5366 %{
 5367   constraint(ALLOC_IN_RC(ptr_reg));
 5368   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5369   match(AddP reg (LShiftL lreg scale));
 5370   op_cost(0);
 5371   format %{ "$reg, $lreg lsl($scale)" %}
 5372   interface(MEMORY_INTER) %{
 5373     base($reg);
 5374     index($lreg);
 5375     scale($scale);
 5376     disp(0x0);
 5377   %}
 5378 %}
 5379 
 5380 operand indIndexI2L(iRegP reg, iRegI ireg)
 5381 %{
 5382   constraint(ALLOC_IN_RC(ptr_reg));
 5383   match(AddP reg (ConvI2L ireg));
 5384   op_cost(0);
 5385   format %{ "$reg, $ireg, 0, I2L" %}
 5386   interface(MEMORY_INTER) %{
 5387     base($reg);
 5388     index($ireg);
 5389     scale(0x0);
 5390     disp(0x0);
 5391   %}
 5392 %}
 5393 
 5394 operand indIndex(iRegP reg, iRegL lreg)
 5395 %{
 5396   constraint(ALLOC_IN_RC(ptr_reg));
 5397   match(AddP reg lreg);
 5398   op_cost(0);
 5399   format %{ "$reg, $lreg" %}
 5400   interface(MEMORY_INTER) %{
 5401     base($reg);
 5402     index($lreg);
 5403     scale(0x0);
 5404     disp(0x0);
 5405   %}
 5406 %}
 5407 
 5408 operand indOffI1(iRegP reg, immIOffset1 off)
 5409 %{
 5410   constraint(ALLOC_IN_RC(ptr_reg));
 5411   match(AddP reg off);
 5412   op_cost(0);
 5413   format %{ "[$reg, $off]" %}
 5414   interface(MEMORY_INTER) %{
 5415     base($reg);
 5416     index(0xffffffff);
 5417     scale(0x0);
 5418     disp($off);
 5419   %}
 5420 %}
 5421 
 5422 operand indOffI2(iRegP reg, immIOffset2 off)
 5423 %{
 5424   constraint(ALLOC_IN_RC(ptr_reg));
 5425   match(AddP reg off);
 5426   op_cost(0);
 5427   format %{ "[$reg, $off]" %}
 5428   interface(MEMORY_INTER) %{
 5429     base($reg);
 5430     index(0xffffffff);
 5431     scale(0x0);
 5432     disp($off);
 5433   %}
 5434 %}
 5435 
 5436 operand indOffI4(iRegP reg, immIOffset4 off)
 5437 %{
 5438   constraint(ALLOC_IN_RC(ptr_reg));
 5439   match(AddP reg off);
 5440   op_cost(0);
 5441   format %{ "[$reg, $off]" %}
 5442   interface(MEMORY_INTER) %{
 5443     base($reg);
 5444     index(0xffffffff);
 5445     scale(0x0);
 5446     disp($off);
 5447   %}
 5448 %}
 5449 
 5450 operand indOffI8(iRegP reg, immIOffset8 off)
 5451 %{
 5452   constraint(ALLOC_IN_RC(ptr_reg));
 5453   match(AddP reg off);
 5454   op_cost(0);
 5455   format %{ "[$reg, $off]" %}
 5456   interface(MEMORY_INTER) %{
 5457     base($reg);
 5458     index(0xffffffff);
 5459     scale(0x0);
 5460     disp($off);
 5461   %}
 5462 %}
 5463 
 5464 operand indOffI16(iRegP reg, immIOffset16 off)
 5465 %{
 5466   constraint(ALLOC_IN_RC(ptr_reg));
 5467   match(AddP reg off);
 5468   op_cost(0);
 5469   format %{ "[$reg, $off]" %}
 5470   interface(MEMORY_INTER) %{
 5471     base($reg);
 5472     index(0xffffffff);
 5473     scale(0x0);
 5474     disp($off);
 5475   %}
 5476 %}
 5477 
 5478 operand indOffL1(iRegP reg, immLoffset1 off)
 5479 %{
 5480   constraint(ALLOC_IN_RC(ptr_reg));
 5481   match(AddP reg off);
 5482   op_cost(0);
 5483   format %{ "[$reg, $off]" %}
 5484   interface(MEMORY_INTER) %{
 5485     base($reg);
 5486     index(0xffffffff);
 5487     scale(0x0);
 5488     disp($off);
 5489   %}
 5490 %}
 5491 
 5492 operand indOffL2(iRegP reg, immLoffset2 off)
 5493 %{
 5494   constraint(ALLOC_IN_RC(ptr_reg));
 5495   match(AddP reg off);
 5496   op_cost(0);
 5497   format %{ "[$reg, $off]" %}
 5498   interface(MEMORY_INTER) %{
 5499     base($reg);
 5500     index(0xffffffff);
 5501     scale(0x0);
 5502     disp($off);
 5503   %}
 5504 %}
 5505 
 5506 operand indOffL4(iRegP reg, immLoffset4 off)
 5507 %{
 5508   constraint(ALLOC_IN_RC(ptr_reg));
 5509   match(AddP reg off);
 5510   op_cost(0);
 5511   format %{ "[$reg, $off]" %}
 5512   interface(MEMORY_INTER) %{
 5513     base($reg);
 5514     index(0xffffffff);
 5515     scale(0x0);
 5516     disp($off);
 5517   %}
 5518 %}
 5519 
 5520 operand indOffL8(iRegP reg, immLoffset8 off)
 5521 %{
 5522   constraint(ALLOC_IN_RC(ptr_reg));
 5523   match(AddP reg off);
 5524   op_cost(0);
 5525   format %{ "[$reg, $off]" %}
 5526   interface(MEMORY_INTER) %{
 5527     base($reg);
 5528     index(0xffffffff);
 5529     scale(0x0);
 5530     disp($off);
 5531   %}
 5532 %}
 5533 
 5534 operand indOffL16(iRegP reg, immLoffset16 off)
 5535 %{
 5536   constraint(ALLOC_IN_RC(ptr_reg));
 5537   match(AddP reg off);
 5538   op_cost(0);
 5539   format %{ "[$reg, $off]" %}
 5540   interface(MEMORY_INTER) %{
 5541     base($reg);
 5542     index(0xffffffff);
 5543     scale(0x0);
 5544     disp($off);
 5545   %}
 5546 %}
 5547 
 5548 operand indirectX2P(iRegL reg)
 5549 %{
 5550   constraint(ALLOC_IN_RC(ptr_reg));
 5551   match(CastX2P reg);
 5552   op_cost(0);
 5553   format %{ "[$reg]\t# long -> ptr" %}
 5554   interface(MEMORY_INTER) %{
 5555     base($reg);
 5556     index(0xffffffff);
 5557     scale(0x0);
 5558     disp(0x0);
 5559   %}
 5560 %}
 5561 
 5562 operand indOffX2P(iRegL reg, immLOffset off)
 5563 %{
 5564   constraint(ALLOC_IN_RC(ptr_reg));
 5565   match(AddP (CastX2P reg) off);
 5566   op_cost(0);
 5567   format %{ "[$reg, $off]\t# long -> ptr" %}
 5568   interface(MEMORY_INTER) %{
 5569     base($reg);
 5570     index(0xffffffff);
 5571     scale(0x0);
 5572     disp($off);
 5573   %}
 5574 %}
 5575 
 5576 operand indirectN(iRegN reg)
 5577 %{
 5578   predicate(CompressedOops::shift() == 0);
 5579   constraint(ALLOC_IN_RC(ptr_reg));
 5580   match(DecodeN reg);
 5581   op_cost(0);
 5582   format %{ "[$reg]\t# narrow" %}
 5583   interface(MEMORY_INTER) %{
 5584     base($reg);
 5585     index(0xffffffff);
 5586     scale(0x0);
 5587     disp(0x0);
 5588   %}
 5589 %}
 5590 
 5591 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5592 %{
 5593   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5594   constraint(ALLOC_IN_RC(ptr_reg));
 5595   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5596   op_cost(0);
 5597   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5598   interface(MEMORY_INTER) %{
 5599     base($reg);
 5600     index($ireg);
 5601     scale($scale);
 5602     disp(0x0);
 5603   %}
 5604 %}
 5605 
 5606 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5607 %{
 5608   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5609   constraint(ALLOC_IN_RC(ptr_reg));
 5610   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5611   op_cost(0);
 5612   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5613   interface(MEMORY_INTER) %{
 5614     base($reg);
 5615     index($lreg);
 5616     scale($scale);
 5617     disp(0x0);
 5618   %}
 5619 %}
 5620 
 5621 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5622 %{
 5623   predicate(CompressedOops::shift() == 0);
 5624   constraint(ALLOC_IN_RC(ptr_reg));
 5625   match(AddP (DecodeN reg) (ConvI2L ireg));
 5626   op_cost(0);
 5627   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5628   interface(MEMORY_INTER) %{
 5629     base($reg);
 5630     index($ireg);
 5631     scale(0x0);
 5632     disp(0x0);
 5633   %}
 5634 %}
 5635 
 5636 operand indIndexN(iRegN reg, iRegL lreg)
 5637 %{
 5638   predicate(CompressedOops::shift() == 0);
 5639   constraint(ALLOC_IN_RC(ptr_reg));
 5640   match(AddP (DecodeN reg) lreg);
 5641   op_cost(0);
 5642   format %{ "$reg, $lreg\t# narrow" %}
 5643   interface(MEMORY_INTER) %{
 5644     base($reg);
 5645     index($lreg);
 5646     scale(0x0);
 5647     disp(0x0);
 5648   %}
 5649 %}
 5650 
 5651 operand indOffIN(iRegN reg, immIOffset off)
 5652 %{
 5653   predicate(CompressedOops::shift() == 0);
 5654   constraint(ALLOC_IN_RC(ptr_reg));
 5655   match(AddP (DecodeN reg) off);
 5656   op_cost(0);
 5657   format %{ "[$reg, $off]\t# narrow" %}
 5658   interface(MEMORY_INTER) %{
 5659     base($reg);
 5660     index(0xffffffff);
 5661     scale(0x0);
 5662     disp($off);
 5663   %}
 5664 %}
 5665 
 5666 operand indOffLN(iRegN reg, immLOffset off)
 5667 %{
 5668   predicate(CompressedOops::shift() == 0);
 5669   constraint(ALLOC_IN_RC(ptr_reg));
 5670   match(AddP (DecodeN reg) off);
 5671   op_cost(0);
 5672   format %{ "[$reg, $off]\t# narrow" %}
 5673   interface(MEMORY_INTER) %{
 5674     base($reg);
 5675     index(0xffffffff);
 5676     scale(0x0);
 5677     disp($off);
 5678   %}
 5679 %}
 5680 
 5681 
 5682 //----------Special Memory Operands--------------------------------------------
 5683 // Stack Slot Operand - This operand is used for loading and storing temporary
 5684 //                      values on the stack where a match requires a value to
 5685 //                      flow through memory.
 5686 operand stackSlotP(sRegP reg)
 5687 %{
 5688   constraint(ALLOC_IN_RC(stack_slots));
 5689   op_cost(100);
 5690   // No match rule because this operand is only generated in matching
 5691   // match(RegP);
 5692   format %{ "[$reg]" %}
 5693   interface(MEMORY_INTER) %{
 5694     base(0x1e);  // RSP
 5695     index(0x0);  // No Index
 5696     scale(0x0);  // No Scale
 5697     disp($reg);  // Stack Offset
 5698   %}
 5699 %}
 5700 
 5701 operand stackSlotI(sRegI reg)
 5702 %{
 5703   constraint(ALLOC_IN_RC(stack_slots));
 5704   // No match rule because this operand is only generated in matching
 5705   // match(RegI);
 5706   format %{ "[$reg]" %}
 5707   interface(MEMORY_INTER) %{
 5708     base(0x1e);  // RSP
 5709     index(0x0);  // No Index
 5710     scale(0x0);  // No Scale
 5711     disp($reg);  // Stack Offset
 5712   %}
 5713 %}
 5714 
 5715 operand stackSlotF(sRegF reg)
 5716 %{
 5717   constraint(ALLOC_IN_RC(stack_slots));
 5718   // No match rule because this operand is only generated in matching
 5719   // match(RegF);
 5720   format %{ "[$reg]" %}
 5721   interface(MEMORY_INTER) %{
 5722     base(0x1e);  // RSP
 5723     index(0x0);  // No Index
 5724     scale(0x0);  // No Scale
 5725     disp($reg);  // Stack Offset
 5726   %}
 5727 %}
 5728 
 5729 operand stackSlotD(sRegD reg)
 5730 %{
 5731   constraint(ALLOC_IN_RC(stack_slots));
 5732   // No match rule because this operand is only generated in matching
 5733   // match(RegD);
 5734   format %{ "[$reg]" %}
 5735   interface(MEMORY_INTER) %{
 5736     base(0x1e);  // RSP
 5737     index(0x0);  // No Index
 5738     scale(0x0);  // No Scale
 5739     disp($reg);  // Stack Offset
 5740   %}
 5741 %}
 5742 
 5743 operand stackSlotL(sRegL reg)
 5744 %{
 5745   constraint(ALLOC_IN_RC(stack_slots));
 5746   // No match rule because this operand is only generated in matching
 5747   // match(RegL);
 5748   format %{ "[$reg]" %}
 5749   interface(MEMORY_INTER) %{
 5750     base(0x1e);  // RSP
 5751     index(0x0);  // No Index
 5752     scale(0x0);  // No Scale
 5753     disp($reg);  // Stack Offset
 5754   %}
 5755 %}
 5756 
 5757 // Operands for expressing Control Flow
 5758 // NOTE: Label is a predefined operand which should not be redefined in
 5759 //       the AD file. It is generically handled within the ADLC.
 5760 
 5761 //----------Conditional Branch Operands----------------------------------------
 5762 // Comparison Op  - This is the operation of the comparison, and is limited to
 5763 //                  the following set of codes:
 5764 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 5765 //
 5766 // Other attributes of the comparison, such as unsignedness, are specified
 5767 // by the comparison instruction that sets a condition code flags register.
 5768 // That result is represented by a flags operand whose subtype is appropriate
 5769 // to the unsignedness (etc.) of the comparison.
 5770 //
 5771 // Later, the instruction which matches both the Comparison Op (a Bool) and
 5772 // the flags (produced by the Cmp) specifies the coding of the comparison op
 5773 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 5774 
 5775 // used for signed integral comparisons and fp comparisons
 5776 
 5777 operand cmpOp()
 5778 %{
 5779   match(Bool);
 5780 
 5781   format %{ "" %}
 5782   interface(COND_INTER) %{
 5783     equal(0x0, "eq");
 5784     not_equal(0x1, "ne");
 5785     less(0xb, "lt");
 5786     greater_equal(0xa, "ge");
 5787     less_equal(0xd, "le");
 5788     greater(0xc, "gt");
 5789     overflow(0x6, "vs");
 5790     no_overflow(0x7, "vc");
 5791   %}
 5792 %}
 5793 
 5794 // used for unsigned integral comparisons
 5795 
 5796 operand cmpOpU()
 5797 %{
 5798   match(Bool);
 5799 
 5800   format %{ "" %}
 5801   interface(COND_INTER) %{
 5802     equal(0x0, "eq");
 5803     not_equal(0x1, "ne");
 5804     less(0x3, "lo");
 5805     greater_equal(0x2, "hs");
 5806     less_equal(0x9, "ls");
 5807     greater(0x8, "hi");
 5808     overflow(0x6, "vs");
 5809     no_overflow(0x7, "vc");
 5810   %}
 5811 %}
 5812 
 5813 // used for certain integral comparisons which can be
 5814 // converted to cbxx or tbxx instructions
 5815 
 5816 operand cmpOpEqNe()
 5817 %{
 5818   match(Bool);
 5819   op_cost(0);
 5820   predicate(n->as_Bool()->_test._test == BoolTest::ne
 5821             || n->as_Bool()->_test._test == BoolTest::eq);
 5822 
 5823   format %{ "" %}
 5824   interface(COND_INTER) %{
 5825     equal(0x0, "eq");
 5826     not_equal(0x1, "ne");
 5827     less(0xb, "lt");
 5828     greater_equal(0xa, "ge");
 5829     less_equal(0xd, "le");
 5830     greater(0xc, "gt");
 5831     overflow(0x6, "vs");
 5832     no_overflow(0x7, "vc");
 5833   %}
 5834 %}
 5835 
 5836 // used for certain integral comparisons which can be
 5837 // converted to cbxx or tbxx instructions
 5838 
 5839 operand cmpOpLtGe()
 5840 %{
 5841   match(Bool);
 5842   op_cost(0);
 5843 
 5844   predicate(n->as_Bool()->_test._test == BoolTest::lt
 5845             || n->as_Bool()->_test._test == BoolTest::ge);
 5846 
 5847   format %{ "" %}
 5848   interface(COND_INTER) %{
 5849     equal(0x0, "eq");
 5850     not_equal(0x1, "ne");
 5851     less(0xb, "lt");
 5852     greater_equal(0xa, "ge");
 5853     less_equal(0xd, "le");
 5854     greater(0xc, "gt");
 5855     overflow(0x6, "vs");
 5856     no_overflow(0x7, "vc");
 5857   %}
 5858 %}
 5859 
 5860 // used for certain unsigned integral comparisons which can be
 5861 // converted to cbxx or tbxx instructions
 5862 
 5863 operand cmpOpUEqNeLeGt()
 5864 %{
 5865   match(Bool);
 5866   op_cost(0);
 5867 
 5868   predicate(n->as_Bool()->_test._test == BoolTest::eq ||
 5869             n->as_Bool()->_test._test == BoolTest::ne ||
 5870             n->as_Bool()->_test._test == BoolTest::le ||
 5871             n->as_Bool()->_test._test == BoolTest::gt);
 5872 
 5873   format %{ "" %}
 5874   interface(COND_INTER) %{
 5875     equal(0x0, "eq");
 5876     not_equal(0x1, "ne");
 5877     less(0x3, "lo");
 5878     greater_equal(0x2, "hs");
 5879     less_equal(0x9, "ls");
 5880     greater(0x8, "hi");
 5881     overflow(0x6, "vs");
 5882     no_overflow(0x7, "vc");
 5883   %}
 5884 %}
 5885 
 5886 // Special operand allowing long args to int ops to be truncated for free
 5887 
 5888 operand iRegL2I(iRegL reg) %{
 5889 
 5890   op_cost(0);
 5891 
 5892   match(ConvL2I reg);
 5893 
 5894   format %{ "l2i($reg)" %}
 5895 
 5896   interface(REG_INTER)
 5897 %}
 5898 
 5899 operand iRegL2P(iRegL reg) %{
 5900 
 5901   op_cost(0);
 5902 
 5903   match(CastX2P reg);
 5904 
 5905   format %{ "l2p($reg)" %}
 5906 
 5907   interface(REG_INTER)
 5908 %}
 5909 
 5910 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 5911 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 5912 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 5913 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 5914 
 5915 //----------OPERAND CLASSES----------------------------------------------------
 5916 // Operand Classes are groups of operands that are used as to simplify
 5917 // instruction definitions by not requiring the AD writer to specify
 5918 // separate instructions for every form of operand when the
 5919 // instruction accepts multiple operand types with the same basic
 5920 // encoding and format. The classic case of this is memory operands.
 5921 
 5922 // memory is used to define read/write location for load/store
 5923 // instruction defs. we can turn a memory op into an Address
 5924 
 5925 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 5926                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5927 
 5928 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 5929                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5930 
 5931 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 5932                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5933 
 5934 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 5935                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5936 
 5937 // All of the memory operands. For the pipeline description.
 5938 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 5939                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 5940                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5941 
 5942 
 5943 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 5944 // operations. it allows the src to be either an iRegI or a (ConvL2I
 5945 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 5946 // can be elided because the 32-bit instruction will just employ the
 5947 // lower 32 bits anyway.
 5948 //
 5949 // n.b. this does not elide all L2I conversions. if the truncated
 5950 // value is consumed by more than one operation then the ConvL2I
 5951 // cannot be bundled into the consuming nodes so an l2i gets planted
 5952 // (actually a movw $dst $src) and the downstream instructions consume
 5953 // the result of the l2i as an iRegI input. That's a shame since the
 5954 // movw is actually redundant but its not too costly.
 5955 
 5956 opclass iRegIorL2I(iRegI, iRegL2I);
 5957 opclass iRegPorL2P(iRegP, iRegL2P);
 5958 
 5959 //----------PIPELINE-----------------------------------------------------------
 5960 // Rules which define the behavior of the target architectures pipeline.
 5961 
 5962 // For specific pipelines, eg A53, define the stages of that pipeline
 5963 //pipe_desc(ISS, EX1, EX2, WR);
 5964 #define ISS S0
 5965 #define EX1 S1
 5966 #define EX2 S2
 5967 #define WR  S3
 5968 
 5969 // Integer ALU reg operation
 5970 pipeline %{
 5971 
 5972 attributes %{
 5973   // ARM instructions are of fixed length
 5974   fixed_size_instructions;        // Fixed size instructions TODO does
 5975   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 5976   // ARM instructions come in 32-bit word units
 5977   instruction_unit_size = 4;         // An instruction is 4 bytes long
 5978   instruction_fetch_unit_size = 64;  // The processor fetches one line
 5979   instruction_fetch_units = 1;       // of 64 bytes
 5980 %}
 5981 
 5982 // We don't use an actual pipeline model so don't care about resources
 5983 // or description. we do use pipeline classes to introduce fixed
 5984 // latencies
 5985 
 5986 //----------RESOURCES----------------------------------------------------------
 5987 // Resources are the functional units available to the machine
 5988 
 5989 resources( INS0, INS1, INS01 = INS0 | INS1,
 5990            ALU0, ALU1, ALU = ALU0 | ALU1,
 5991            MAC,
 5992            DIV,
 5993            BRANCH,
 5994            LDST,
 5995            NEON_FP);
 5996 
 5997 //----------PIPELINE DESCRIPTION-----------------------------------------------
 5998 // Pipeline Description specifies the stages in the machine's pipeline
 5999 
 6000 // Define the pipeline as a generic 6 stage pipeline
 6001 pipe_desc(S0, S1, S2, S3, S4, S5);
 6002 
 6003 //----------PIPELINE CLASSES---------------------------------------------------
 6004 // Pipeline Classes describe the stages in which input and output are
 6005 // referenced by the hardware pipeline.
 6006 
 6007 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 6008 %{
 6009   single_instruction;
 6010   src1   : S1(read);
 6011   src2   : S2(read);
 6012   dst    : S5(write);
 6013   INS01  : ISS;
 6014   NEON_FP : S5;
 6015 %}
 6016 
 6017 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6018 %{
 6019   single_instruction;
 6020   src1   : S1(read);
 6021   src2   : S2(read);
 6022   dst    : S5(write);
 6023   INS01  : ISS;
 6024   NEON_FP : S5;
 6025 %}
 6026 
 6027 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6028 %{
 6029   single_instruction;
 6030   src    : S1(read);
 6031   dst    : S5(write);
 6032   INS01  : ISS;
 6033   NEON_FP : S5;
 6034 %}
 6035 
 6036 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6037 %{
 6038   single_instruction;
 6039   src    : S1(read);
 6040   dst    : S5(write);
 6041   INS01  : ISS;
 6042   NEON_FP : S5;
 6043 %}
 6044 
 6045 pipe_class fp_d2f(vRegF dst, vRegD src)
 6046 %{
 6047   single_instruction;
 6048   src    : S1(read);
 6049   dst    : S5(write);
 6050   INS01  : ISS;
 6051   NEON_FP : S5;
 6052 %}
 6053 
 6054 pipe_class fp_f2d(vRegD dst, vRegF src)
 6055 %{
 6056   single_instruction;
 6057   src    : S1(read);
 6058   dst    : S5(write);
 6059   INS01  : ISS;
 6060   NEON_FP : S5;
 6061 %}
 6062 
 6063 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6064 %{
 6065   single_instruction;
 6066   src    : S1(read);
 6067   dst    : S5(write);
 6068   INS01  : ISS;
 6069   NEON_FP : S5;
 6070 %}
 6071 
 6072 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6073 %{
 6074   single_instruction;
 6075   src    : S1(read);
 6076   dst    : S5(write);
 6077   INS01  : ISS;
 6078   NEON_FP : S5;
 6079 %}
 6080 
 6081 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6082 %{
 6083   single_instruction;
 6084   src    : S1(read);
 6085   dst    : S5(write);
 6086   INS01  : ISS;
 6087   NEON_FP : S5;
 6088 %}
 6089 
 6090 pipe_class fp_l2f(vRegF dst, iRegL src)
 6091 %{
 6092   single_instruction;
 6093   src    : S1(read);
 6094   dst    : S5(write);
 6095   INS01  : ISS;
 6096   NEON_FP : S5;
 6097 %}
 6098 
 6099 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6100 %{
 6101   single_instruction;
 6102   src    : S1(read);
 6103   dst    : S5(write);
 6104   INS01  : ISS;
 6105   NEON_FP : S5;
 6106 %}
 6107 
 6108 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6109 %{
 6110   single_instruction;
 6111   src    : S1(read);
 6112   dst    : S5(write);
 6113   INS01  : ISS;
 6114   NEON_FP : S5;
 6115 %}
 6116 
 6117 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6118 %{
 6119   single_instruction;
 6120   src    : S1(read);
 6121   dst    : S5(write);
 6122   INS01  : ISS;
 6123   NEON_FP : S5;
 6124 %}
 6125 
 6126 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6127 %{
 6128   single_instruction;
 6129   src    : S1(read);
 6130   dst    : S5(write);
 6131   INS01  : ISS;
 6132   NEON_FP : S5;
 6133 %}
 6134 
 6135 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6136 %{
 6137   single_instruction;
 6138   src1   : S1(read);
 6139   src2   : S2(read);
 6140   dst    : S5(write);
 6141   INS0   : ISS;
 6142   NEON_FP : S5;
 6143 %}
 6144 
 6145 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6146 %{
 6147   single_instruction;
 6148   src1   : S1(read);
 6149   src2   : S2(read);
 6150   dst    : S5(write);
 6151   INS0   : ISS;
 6152   NEON_FP : S5;
 6153 %}
 6154 
 6155 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6156 %{
 6157   single_instruction;
 6158   cr     : S1(read);
 6159   src1   : S1(read);
 6160   src2   : S1(read);
 6161   dst    : S3(write);
 6162   INS01  : ISS;
 6163   NEON_FP : S3;
 6164 %}
 6165 
 6166 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6167 %{
 6168   single_instruction;
 6169   cr     : S1(read);
 6170   src1   : S1(read);
 6171   src2   : S1(read);
 6172   dst    : S3(write);
 6173   INS01  : ISS;
 6174   NEON_FP : S3;
 6175 %}
 6176 
 6177 pipe_class fp_imm_s(vRegF dst)
 6178 %{
 6179   single_instruction;
 6180   dst    : S3(write);
 6181   INS01  : ISS;
 6182   NEON_FP : S3;
 6183 %}
 6184 
 6185 pipe_class fp_imm_d(vRegD dst)
 6186 %{
 6187   single_instruction;
 6188   dst    : S3(write);
 6189   INS01  : ISS;
 6190   NEON_FP : S3;
 6191 %}
 6192 
 6193 pipe_class fp_load_constant_s(vRegF dst)
 6194 %{
 6195   single_instruction;
 6196   dst    : S4(write);
 6197   INS01  : ISS;
 6198   NEON_FP : S4;
 6199 %}
 6200 
 6201 pipe_class fp_load_constant_d(vRegD dst)
 6202 %{
 6203   single_instruction;
 6204   dst    : S4(write);
 6205   INS01  : ISS;
 6206   NEON_FP : S4;
 6207 %}
 6208 
 6209 //------- Integer ALU operations --------------------------
 6210 
 6211 // Integer ALU reg-reg operation
 6212 // Operands needed in EX1, result generated in EX2
 6213 // Eg.  ADD     x0, x1, x2
 6214 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6215 %{
 6216   single_instruction;
 6217   dst    : EX2(write);
 6218   src1   : EX1(read);
 6219   src2   : EX1(read);
 6220   INS01  : ISS; // Dual issue as instruction 0 or 1
 6221   ALU    : EX2;
 6222 %}
 6223 
 6224 // Integer ALU reg-reg operation with constant shift
 6225 // Shifted register must be available in LATE_ISS instead of EX1
 6226 // Eg.  ADD     x0, x1, x2, LSL #2
 6227 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6228 %{
 6229   single_instruction;
 6230   dst    : EX2(write);
 6231   src1   : EX1(read);
 6232   src2   : ISS(read);
 6233   INS01  : ISS;
 6234   ALU    : EX2;
 6235 %}
 6236 
 6237 // Integer ALU reg operation with constant shift
 6238 // Eg.  LSL     x0, x1, #shift
 6239 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6240 %{
 6241   single_instruction;
 6242   dst    : EX2(write);
 6243   src1   : ISS(read);
 6244   INS01  : ISS;
 6245   ALU    : EX2;
 6246 %}
 6247 
 6248 // Integer ALU reg-reg operation with variable shift
 6249 // Both operands must be available in LATE_ISS instead of EX1
 6250 // Result is available in EX1 instead of EX2
 6251 // Eg.  LSLV    x0, x1, x2
 6252 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6253 %{
 6254   single_instruction;
 6255   dst    : EX1(write);
 6256   src1   : ISS(read);
 6257   src2   : ISS(read);
 6258   INS01  : ISS;
 6259   ALU    : EX1;
 6260 %}
 6261 
 6262 // Integer ALU reg-reg operation with extract
 6263 // As for _vshift above, but result generated in EX2
 6264 // Eg.  EXTR    x0, x1, x2, #N
 6265 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6266 %{
 6267   single_instruction;
 6268   dst    : EX2(write);
 6269   src1   : ISS(read);
 6270   src2   : ISS(read);
 6271   INS1   : ISS; // Can only dual issue as Instruction 1
 6272   ALU    : EX1;
 6273 %}
 6274 
 6275 // Integer ALU reg operation
 6276 // Eg.  NEG     x0, x1
 6277 pipe_class ialu_reg(iRegI dst, iRegI src)
 6278 %{
 6279   single_instruction;
 6280   dst    : EX2(write);
 6281   src    : EX1(read);
 6282   INS01  : ISS;
 6283   ALU    : EX2;
 6284 %}
 6285 
 6286 // Integer ALU reg mmediate operation
 6287 // Eg.  ADD     x0, x1, #N
 6288 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6289 %{
 6290   single_instruction;
 6291   dst    : EX2(write);
 6292   src1   : EX1(read);
 6293   INS01  : ISS;
 6294   ALU    : EX2;
 6295 %}
 6296 
 6297 // Integer ALU immediate operation (no source operands)
 6298 // Eg.  MOV     x0, #N
 6299 pipe_class ialu_imm(iRegI dst)
 6300 %{
 6301   single_instruction;
 6302   dst    : EX1(write);
 6303   INS01  : ISS;
 6304   ALU    : EX1;
 6305 %}
 6306 
 6307 //------- Compare operation -------------------------------
 6308 
 6309 // Compare reg-reg
 6310 // Eg.  CMP     x0, x1
 6311 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6312 %{
 6313   single_instruction;
 6314 //  fixed_latency(16);
 6315   cr     : EX2(write);
 6316   op1    : EX1(read);
 6317   op2    : EX1(read);
 6318   INS01  : ISS;
 6319   ALU    : EX2;
 6320 %}
 6321 
 6322 // Compare reg-reg
 6323 // Eg.  CMP     x0, #N
 6324 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6325 %{
 6326   single_instruction;
 6327 //  fixed_latency(16);
 6328   cr     : EX2(write);
 6329   op1    : EX1(read);
 6330   INS01  : ISS;
 6331   ALU    : EX2;
 6332 %}
 6333 
 6334 //------- Conditional instructions ------------------------
 6335 
 6336 // Conditional no operands
 6337 // Eg.  CSINC   x0, zr, zr, <cond>
 6338 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6339 %{
 6340   single_instruction;
 6341   cr     : EX1(read);
 6342   dst    : EX2(write);
 6343   INS01  : ISS;
 6344   ALU    : EX2;
 6345 %}
 6346 
 6347 // Conditional 2 operand
 6348 // EG.  CSEL    X0, X1, X2, <cond>
 6349 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6350 %{
 6351   single_instruction;
 6352   cr     : EX1(read);
 6353   src1   : EX1(read);
 6354   src2   : EX1(read);
 6355   dst    : EX2(write);
 6356   INS01  : ISS;
 6357   ALU    : EX2;
 6358 %}
 6359 
 6360 // Conditional 2 operand
 6361 // EG.  CSEL    X0, X1, X2, <cond>
 6362 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6363 %{
 6364   single_instruction;
 6365   cr     : EX1(read);
 6366   src    : EX1(read);
 6367   dst    : EX2(write);
 6368   INS01  : ISS;
 6369   ALU    : EX2;
 6370 %}
 6371 
 6372 //------- Multiply pipeline operations --------------------
 6373 
 6374 // Multiply reg-reg
 6375 // Eg.  MUL     w0, w1, w2
 6376 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6377 %{
 6378   single_instruction;
 6379   dst    : WR(write);
 6380   src1   : ISS(read);
 6381   src2   : ISS(read);
 6382   INS01  : ISS;
 6383   MAC    : WR;
 6384 %}
 6385 
 6386 // Multiply accumulate
 6387 // Eg.  MADD    w0, w1, w2, w3
 6388 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6389 %{
 6390   single_instruction;
 6391   dst    : WR(write);
 6392   src1   : ISS(read);
 6393   src2   : ISS(read);
 6394   src3   : ISS(read);
 6395   INS01  : ISS;
 6396   MAC    : WR;
 6397 %}
 6398 
 6399 // Eg.  MUL     w0, w1, w2
 6400 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6401 %{
 6402   single_instruction;
 6403   fixed_latency(3); // Maximum latency for 64 bit mul
 6404   dst    : WR(write);
 6405   src1   : ISS(read);
 6406   src2   : ISS(read);
 6407   INS01  : ISS;
 6408   MAC    : WR;
 6409 %}
 6410 
 6411 // Multiply accumulate
 6412 // Eg.  MADD    w0, w1, w2, w3
 6413 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6414 %{
 6415   single_instruction;
 6416   fixed_latency(3); // Maximum latency for 64 bit mul
 6417   dst    : WR(write);
 6418   src1   : ISS(read);
 6419   src2   : ISS(read);
 6420   src3   : ISS(read);
 6421   INS01  : ISS;
 6422   MAC    : WR;
 6423 %}
 6424 
 6425 //------- Divide pipeline operations --------------------
 6426 
 6427 // Eg.  SDIV    w0, w1, w2
 6428 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6429 %{
 6430   single_instruction;
 6431   fixed_latency(8); // Maximum latency for 32 bit divide
 6432   dst    : WR(write);
 6433   src1   : ISS(read);
 6434   src2   : ISS(read);
 6435   INS0   : ISS; // Can only dual issue as instruction 0
 6436   DIV    : WR;
 6437 %}
 6438 
 6439 // Eg.  SDIV    x0, x1, x2
 6440 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6441 %{
 6442   single_instruction;
 6443   fixed_latency(16); // Maximum latency for 64 bit divide
 6444   dst    : WR(write);
 6445   src1   : ISS(read);
 6446   src2   : ISS(read);
 6447   INS0   : ISS; // Can only dual issue as instruction 0
 6448   DIV    : WR;
 6449 %}
 6450 
 6451 //------- Load pipeline operations ------------------------
 6452 
 6453 // Load - prefetch
 6454 // Eg.  PFRM    <mem>
 6455 pipe_class iload_prefetch(memory mem)
 6456 %{
 6457   single_instruction;
 6458   mem    : ISS(read);
 6459   INS01  : ISS;
 6460   LDST   : WR;
 6461 %}
 6462 
 6463 // Load - reg, mem
 6464 // Eg.  LDR     x0, <mem>
 6465 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6466 %{
 6467   single_instruction;
 6468   dst    : WR(write);
 6469   mem    : ISS(read);
 6470   INS01  : ISS;
 6471   LDST   : WR;
 6472 %}
 6473 
 6474 // Load - reg, reg
 6475 // Eg.  LDR     x0, [sp, x1]
 6476 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6477 %{
 6478   single_instruction;
 6479   dst    : WR(write);
 6480   src    : ISS(read);
 6481   INS01  : ISS;
 6482   LDST   : WR;
 6483 %}
 6484 
 6485 //------- Store pipeline operations -----------------------
 6486 
 6487 // Store - zr, mem
 6488 // Eg.  STR     zr, <mem>
 6489 pipe_class istore_mem(memory mem)
 6490 %{
 6491   single_instruction;
 6492   mem    : ISS(read);
 6493   INS01  : ISS;
 6494   LDST   : WR;
 6495 %}
 6496 
 6497 // Store - reg, mem
 6498 // Eg.  STR     x0, <mem>
 6499 pipe_class istore_reg_mem(iRegI src, memory mem)
 6500 %{
 6501   single_instruction;
 6502   mem    : ISS(read);
 6503   src    : EX2(read);
 6504   INS01  : ISS;
 6505   LDST   : WR;
 6506 %}
 6507 
 6508 // Store - reg, reg
 6509 // Eg. STR      x0, [sp, x1]
 6510 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6511 %{
 6512   single_instruction;
 6513   dst    : ISS(read);
 6514   src    : EX2(read);
 6515   INS01  : ISS;
 6516   LDST   : WR;
 6517 %}
 6518 
 6519 //------- Store pipeline operations -----------------------
 6520 
 6521 // Branch
 6522 pipe_class pipe_branch()
 6523 %{
 6524   single_instruction;
 6525   INS01  : ISS;
 6526   BRANCH : EX1;
 6527 %}
 6528 
 6529 // Conditional branch
 6530 pipe_class pipe_branch_cond(rFlagsReg cr)
 6531 %{
 6532   single_instruction;
 6533   cr     : EX1(read);
 6534   INS01  : ISS;
 6535   BRANCH : EX1;
 6536 %}
 6537 
 6538 // Compare & Branch
 6539 // EG.  CBZ/CBNZ
 6540 pipe_class pipe_cmp_branch(iRegI op1)
 6541 %{
 6542   single_instruction;
 6543   op1    : EX1(read);
 6544   INS01  : ISS;
 6545   BRANCH : EX1;
 6546 %}
 6547 
 6548 //------- Synchronisation operations ----------------------
 6549 
 6550 // Any operation requiring serialization.
 6551 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6552 pipe_class pipe_serial()
 6553 %{
 6554   single_instruction;
 6555   force_serialization;
 6556   fixed_latency(16);
 6557   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6558   LDST   : WR;
 6559 %}
 6560 
 6561 // Generic big/slow expanded idiom - also serialized
 6562 pipe_class pipe_slow()
 6563 %{
 6564   instruction_count(10);
 6565   multiple_bundles;
 6566   force_serialization;
 6567   fixed_latency(16);
 6568   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6569   LDST   : WR;
 6570 %}
 6571 
 6572 // Empty pipeline class
 6573 pipe_class pipe_class_empty()
 6574 %{
 6575   single_instruction;
 6576   fixed_latency(0);
 6577 %}
 6578 
 6579 // Default pipeline class.
 6580 pipe_class pipe_class_default()
 6581 %{
 6582   single_instruction;
 6583   fixed_latency(2);
 6584 %}
 6585 
 6586 // Pipeline class for compares.
 6587 pipe_class pipe_class_compare()
 6588 %{
 6589   single_instruction;
 6590   fixed_latency(16);
 6591 %}
 6592 
 6593 // Pipeline class for memory operations.
 6594 pipe_class pipe_class_memory()
 6595 %{
 6596   single_instruction;
 6597   fixed_latency(16);
 6598 %}
 6599 
 6600 // Pipeline class for call.
 6601 pipe_class pipe_class_call()
 6602 %{
 6603   single_instruction;
 6604   fixed_latency(100);
 6605 %}
 6606 
 6607 // Define the class for the Nop node.
 6608 define %{
 6609    MachNop = pipe_class_empty;
 6610 %}
 6611 
 6612 %}
 6613 //----------INSTRUCTIONS-------------------------------------------------------
 6614 //
 6615 // match      -- States which machine-independent subtree may be replaced
 6616 //               by this instruction.
 6617 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6618 //               selection to identify a minimum cost tree of machine
 6619 //               instructions that matches a tree of machine-independent
 6620 //               instructions.
 6621 // format     -- A string providing the disassembly for this instruction.
 6622 //               The value of an instruction's operand may be inserted
 6623 //               by referring to it with a '$' prefix.
 6624 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6625 //               to within an encode class as $primary, $secondary, and $tertiary
 6626 //               rrspectively.  The primary opcode is commonly used to
 6627 //               indicate the type of machine instruction, while secondary
 6628 //               and tertiary are often used for prefix options or addressing
 6629 //               modes.
 6630 // ins_encode -- A list of encode classes with parameters. The encode class
 6631 //               name must have been defined in an 'enc_class' specification
 6632 //               in the encode section of the architecture description.
 6633 
 6634 // ============================================================================
 6635 // Memory (Load/Store) Instructions
 6636 
 6637 // Load Instructions
 6638 
 6639 // Load Byte (8 bit signed)
 6640 instruct loadB(iRegINoSp dst, memory1 mem)
 6641 %{
 6642   match(Set dst (LoadB mem));
 6643   predicate(!needs_acquiring_load(n));
 6644 
 6645   ins_cost(4 * INSN_COST);
 6646   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6647 
 6648   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6649 
 6650   ins_pipe(iload_reg_mem);
 6651 %}
 6652 
 6653 // Load Byte (8 bit signed) into long
 6654 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6655 %{
 6656   match(Set dst (ConvI2L (LoadB mem)));
 6657   predicate(!needs_acquiring_load(n->in(1)));
 6658 
 6659   ins_cost(4 * INSN_COST);
 6660   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6661 
 6662   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6663 
 6664   ins_pipe(iload_reg_mem);
 6665 %}
 6666 
 6667 // Load Byte (8 bit unsigned)
 6668 instruct loadUB(iRegINoSp dst, memory1 mem)
 6669 %{
 6670   match(Set dst (LoadUB mem));
 6671   predicate(!needs_acquiring_load(n));
 6672 
 6673   ins_cost(4 * INSN_COST);
 6674   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6675 
 6676   ins_encode(aarch64_enc_ldrb(dst, mem));
 6677 
 6678   ins_pipe(iload_reg_mem);
 6679 %}
 6680 
 6681 // Load Byte (8 bit unsigned) into long
 6682 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6683 %{
 6684   match(Set dst (ConvI2L (LoadUB mem)));
 6685   predicate(!needs_acquiring_load(n->in(1)));
 6686 
 6687   ins_cost(4 * INSN_COST);
 6688   format %{ "ldrb  $dst, $mem\t# byte" %}
 6689 
 6690   ins_encode(aarch64_enc_ldrb(dst, mem));
 6691 
 6692   ins_pipe(iload_reg_mem);
 6693 %}
 6694 
 6695 // Load Short (16 bit signed)
 6696 instruct loadS(iRegINoSp dst, memory2 mem)
 6697 %{
 6698   match(Set dst (LoadS mem));
 6699   predicate(!needs_acquiring_load(n));
 6700 
 6701   ins_cost(4 * INSN_COST);
 6702   format %{ "ldrshw  $dst, $mem\t# short" %}
 6703 
 6704   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6705 
 6706   ins_pipe(iload_reg_mem);
 6707 %}
 6708 
 6709 // Load Short (16 bit signed) into long
 6710 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6711 %{
 6712   match(Set dst (ConvI2L (LoadS mem)));
 6713   predicate(!needs_acquiring_load(n->in(1)));
 6714 
 6715   ins_cost(4 * INSN_COST);
 6716   format %{ "ldrsh  $dst, $mem\t# short" %}
 6717 
 6718   ins_encode(aarch64_enc_ldrsh(dst, mem));
 6719 
 6720   ins_pipe(iload_reg_mem);
 6721 %}
 6722 
 6723 // Load Char (16 bit unsigned)
 6724 instruct loadUS(iRegINoSp dst, memory2 mem)
 6725 %{
 6726   match(Set dst (LoadUS mem));
 6727   predicate(!needs_acquiring_load(n));
 6728 
 6729   ins_cost(4 * INSN_COST);
 6730   format %{ "ldrh  $dst, $mem\t# short" %}
 6731 
 6732   ins_encode(aarch64_enc_ldrh(dst, mem));
 6733 
 6734   ins_pipe(iload_reg_mem);
 6735 %}
 6736 
 6737 // Load Short/Char (16 bit unsigned) into long
 6738 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 6739 %{
 6740   match(Set dst (ConvI2L (LoadUS mem)));
 6741   predicate(!needs_acquiring_load(n->in(1)));
 6742 
 6743   ins_cost(4 * INSN_COST);
 6744   format %{ "ldrh  $dst, $mem\t# short" %}
 6745 
 6746   ins_encode(aarch64_enc_ldrh(dst, mem));
 6747 
 6748   ins_pipe(iload_reg_mem);
 6749 %}
 6750 
 6751 // Load Integer (32 bit signed)
 6752 instruct loadI(iRegINoSp dst, memory4 mem)
 6753 %{
 6754   match(Set dst (LoadI mem));
 6755   predicate(!needs_acquiring_load(n));
 6756 
 6757   ins_cost(4 * INSN_COST);
 6758   format %{ "ldrw  $dst, $mem\t# int" %}
 6759 
 6760   ins_encode(aarch64_enc_ldrw(dst, mem));
 6761 
 6762   ins_pipe(iload_reg_mem);
 6763 %}
 6764 
 6765 // Load Integer (32 bit signed) into long
 6766 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 6767 %{
 6768   match(Set dst (ConvI2L (LoadI mem)));
 6769   predicate(!needs_acquiring_load(n->in(1)));
 6770 
 6771   ins_cost(4 * INSN_COST);
 6772   format %{ "ldrsw  $dst, $mem\t# int" %}
 6773 
 6774   ins_encode(aarch64_enc_ldrsw(dst, mem));
 6775 
 6776   ins_pipe(iload_reg_mem);
 6777 %}
 6778 
 6779 // Load Integer (32 bit unsigned) into long
 6780 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 6781 %{
 6782   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 6783   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 6784 
 6785   ins_cost(4 * INSN_COST);
 6786   format %{ "ldrw  $dst, $mem\t# int" %}
 6787 
 6788   ins_encode(aarch64_enc_ldrw(dst, mem));
 6789 
 6790   ins_pipe(iload_reg_mem);
 6791 %}
 6792 
 6793 // Load Long (64 bit signed)
 6794 instruct loadL(iRegLNoSp dst, memory8 mem)
 6795 %{
 6796   match(Set dst (LoadL mem));
 6797   predicate(!needs_acquiring_load(n));
 6798 
 6799   ins_cost(4 * INSN_COST);
 6800   format %{ "ldr  $dst, $mem\t# int" %}
 6801 
 6802   ins_encode(aarch64_enc_ldr(dst, mem));
 6803 
 6804   ins_pipe(iload_reg_mem);
 6805 %}
 6806 
 6807 // Load Range
 6808 instruct loadRange(iRegINoSp dst, memory4 mem)
 6809 %{
 6810   match(Set dst (LoadRange mem));
 6811 
 6812   ins_cost(4 * INSN_COST);
 6813   format %{ "ldrw  $dst, $mem\t# range" %}
 6814 
 6815   ins_encode(aarch64_enc_ldrw(dst, mem));
 6816 
 6817   ins_pipe(iload_reg_mem);
 6818 %}
 6819 
 6820 // Load Pointer
 6821 instruct loadP(iRegPNoSp dst, memory8 mem)
 6822 %{
 6823   match(Set dst (LoadP mem));
 6824   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 6825 
 6826   ins_cost(4 * INSN_COST);
 6827   format %{ "ldr  $dst, $mem\t# ptr" %}
 6828 
 6829   ins_encode(aarch64_enc_ldr(dst, mem));
 6830 
 6831   ins_pipe(iload_reg_mem);
 6832 %}
 6833 
 6834 // Load Compressed Pointer
 6835 instruct loadN(iRegNNoSp dst, memory4 mem)
 6836 %{
 6837   match(Set dst (LoadN mem));
 6838   predicate(!needs_acquiring_load(n) && n->as_Load()->barrier_data() == 0);
 6839 
 6840   ins_cost(4 * INSN_COST);
 6841   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 6842 
 6843   ins_encode(aarch64_enc_ldrw(dst, mem));
 6844 
 6845   ins_pipe(iload_reg_mem);
 6846 %}
 6847 
 6848 // Load Klass Pointer
 6849 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 6850 %{
 6851   match(Set dst (LoadKlass mem));
 6852   predicate(!needs_acquiring_load(n));
 6853 
 6854   ins_cost(4 * INSN_COST);
 6855   format %{ "ldr  $dst, $mem\t# class" %}
 6856 
 6857   ins_encode(aarch64_enc_ldr(dst, mem));
 6858 
 6859   ins_pipe(iload_reg_mem);
 6860 %}
 6861 
 6862 // Load Narrow Klass Pointer
 6863 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 6864 %{
 6865   match(Set dst (LoadNKlass mem));
 6866   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
 6867 
 6868   ins_cost(4 * INSN_COST);
 6869   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 6870 
 6871   ins_encode(aarch64_enc_ldrw(dst, mem));
 6872 
 6873   ins_pipe(iload_reg_mem);
 6874 %}
 6875 
 6876 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem)
 6877 %{
 6878   match(Set dst (LoadNKlass mem));
 6879   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
 6880 
 6881   ins_cost(4 * INSN_COST);
 6882   format %{
 6883     "ldrw  $dst, $mem\t# compressed class ptr, shifted\n\t"
 6884     "lsrw  $dst, $dst, markWord::klass_shift_at_offset"
 6885   %}
 6886   ins_encode %{
 6887     // inlined aarch64_enc_ldrw
 6888     loadStore(masm, &MacroAssembler::ldrw, $dst$$Register, $mem->opcode(),
 6889               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 6890     __ lsrw($dst$$Register, $dst$$Register, markWord::klass_shift_at_offset);
 6891   %}
 6892   ins_pipe(iload_reg_mem);
 6893 %}
 6894 
 6895 // Load Float
 6896 instruct loadF(vRegF dst, memory4 mem)
 6897 %{
 6898   match(Set dst (LoadF mem));
 6899   predicate(!needs_acquiring_load(n));
 6900 
 6901   ins_cost(4 * INSN_COST);
 6902   format %{ "ldrs  $dst, $mem\t# float" %}
 6903 
 6904   ins_encode( aarch64_enc_ldrs(dst, mem) );
 6905 
 6906   ins_pipe(pipe_class_memory);
 6907 %}
 6908 
 6909 // Load Double
 6910 instruct loadD(vRegD dst, memory8 mem)
 6911 %{
 6912   match(Set dst (LoadD mem));
 6913   predicate(!needs_acquiring_load(n));
 6914 
 6915   ins_cost(4 * INSN_COST);
 6916   format %{ "ldrd  $dst, $mem\t# double" %}
 6917 
 6918   ins_encode( aarch64_enc_ldrd(dst, mem) );
 6919 
 6920   ins_pipe(pipe_class_memory);
 6921 %}
 6922 
 6923 
 6924 // Load Int Constant
 6925 instruct loadConI(iRegINoSp dst, immI src)
 6926 %{
 6927   match(Set dst src);
 6928 
 6929   ins_cost(INSN_COST);
 6930   format %{ "mov $dst, $src\t# int" %}
 6931 
 6932   ins_encode( aarch64_enc_movw_imm(dst, src) );
 6933 
 6934   ins_pipe(ialu_imm);
 6935 %}
 6936 
 6937 // Load Long Constant
 6938 instruct loadConL(iRegLNoSp dst, immL src)
 6939 %{
 6940   match(Set dst src);
 6941 
 6942   ins_cost(INSN_COST);
 6943   format %{ "mov $dst, $src\t# long" %}
 6944 
 6945   ins_encode( aarch64_enc_mov_imm(dst, src) );
 6946 
 6947   ins_pipe(ialu_imm);
 6948 %}
 6949 
 6950 // Load Pointer Constant
 6951 
 6952 instruct loadConP(iRegPNoSp dst, immP con)
 6953 %{
 6954   match(Set dst con);
 6955 
 6956   ins_cost(INSN_COST * 4);
 6957   format %{
 6958     "mov  $dst, $con\t# ptr\n\t"
 6959   %}
 6960 
 6961   ins_encode(aarch64_enc_mov_p(dst, con));
 6962 
 6963   ins_pipe(ialu_imm);
 6964 %}
 6965 
 6966 // Load Null Pointer Constant
 6967 
 6968 instruct loadConP0(iRegPNoSp dst, immP0 con)
 6969 %{
 6970   match(Set dst con);
 6971 
 6972   ins_cost(INSN_COST);
 6973   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6974 
 6975   ins_encode(aarch64_enc_mov_p0(dst, con));
 6976 
 6977   ins_pipe(ialu_imm);
 6978 %}
 6979 
 6980 // Load Pointer Constant One
 6981 
 6982 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 6983 %{
 6984   match(Set dst con);
 6985 
 6986   ins_cost(INSN_COST);
 6987   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6988 
 6989   ins_encode(aarch64_enc_mov_p1(dst, con));
 6990 
 6991   ins_pipe(ialu_imm);
 6992 %}
 6993 
 6994 instruct loadAOTRCAddress(iRegPNoSp dst, immAOTRuntimeConstantsAddress con)
 6995 %{
 6996   match(Set dst con);
 6997 
 6998   ins_cost(INSN_COST);
 6999   format %{ "adr  $dst, $con\t# AOT Runtime Constants Address" %}
 7000 
 7001   ins_encode %{
 7002     __ load_aotrc_address($dst$$Register, (address)$con$$constant);
 7003   %}
 7004 
 7005   ins_pipe(ialu_imm);
 7006 %}
 7007 
 7008 // Load Narrow Pointer Constant
 7009 
 7010 instruct loadConN(iRegNNoSp dst, immN con)
 7011 %{
 7012   match(Set dst con);
 7013 
 7014   ins_cost(INSN_COST * 4);
 7015   format %{ "mov  $dst, $con\t# compressed ptr" %}
 7016 
 7017   ins_encode(aarch64_enc_mov_n(dst, con));
 7018 
 7019   ins_pipe(ialu_imm);
 7020 %}
 7021 
 7022 // Load Narrow Null Pointer Constant
 7023 
 7024 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7025 %{
 7026   match(Set dst con);
 7027 
 7028   ins_cost(INSN_COST);
 7029   format %{ "mov  $dst, $con\t# compressed nullptr ptr" %}
 7030 
 7031   ins_encode(aarch64_enc_mov_n0(dst, con));
 7032 
 7033   ins_pipe(ialu_imm);
 7034 %}
 7035 
 7036 // Load Narrow Klass Constant
 7037 
 7038 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7039 %{
 7040   match(Set dst con);
 7041 
 7042   ins_cost(INSN_COST);
 7043   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7044 
 7045   ins_encode(aarch64_enc_mov_nk(dst, con));
 7046 
 7047   ins_pipe(ialu_imm);
 7048 %}
 7049 
 7050 // Load Packed Float Constant
 7051 
 7052 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7053   match(Set dst con);
 7054   ins_cost(INSN_COST * 4);
 7055   format %{ "fmovs  $dst, $con"%}
 7056   ins_encode %{
 7057     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7058   %}
 7059 
 7060   ins_pipe(fp_imm_s);
 7061 %}
 7062 
 7063 // Load Float Constant
 7064 
 7065 instruct loadConF(vRegF dst, immF con) %{
 7066   match(Set dst con);
 7067 
 7068   ins_cost(INSN_COST * 4);
 7069 
 7070   format %{
 7071     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7072   %}
 7073 
 7074   ins_encode %{
 7075     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7076   %}
 7077 
 7078   ins_pipe(fp_load_constant_s);
 7079 %}
 7080 
 7081 // Load Packed Double Constant
 7082 
 7083 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7084   match(Set dst con);
 7085   ins_cost(INSN_COST);
 7086   format %{ "fmovd  $dst, $con"%}
 7087   ins_encode %{
 7088     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7089   %}
 7090 
 7091   ins_pipe(fp_imm_d);
 7092 %}
 7093 
 7094 // Load Double Constant
 7095 
 7096 instruct loadConD(vRegD dst, immD con) %{
 7097   match(Set dst con);
 7098 
 7099   ins_cost(INSN_COST * 5);
 7100   format %{
 7101     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7102   %}
 7103 
 7104   ins_encode %{
 7105     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7106   %}
 7107 
 7108   ins_pipe(fp_load_constant_d);
 7109 %}
 7110 
 7111 // Load Half Float Constant
 7112 instruct loadConH(vRegF dst, immH con) %{
 7113   match(Set dst con);
 7114   format %{ "mov    rscratch1, $con\n\t"
 7115             "fmov   $dst, rscratch1"
 7116          %}
 7117   ins_encode %{
 7118     __ movw(rscratch1, (uint32_t)$con$$constant);
 7119     __ fmovs($dst$$FloatRegister, rscratch1);
 7120   %}
 7121   ins_pipe(pipe_class_default);
 7122 %}
 7123 
 7124 // Store Instructions
 7125 
 7126 // Store Byte
 7127 instruct storeB(iRegIorL2I src, memory1 mem)
 7128 %{
 7129   match(Set mem (StoreB mem src));
 7130   predicate(!needs_releasing_store(n));
 7131 
 7132   ins_cost(INSN_COST);
 7133   format %{ "strb  $src, $mem\t# byte" %}
 7134 
 7135   ins_encode(aarch64_enc_strb(src, mem));
 7136 
 7137   ins_pipe(istore_reg_mem);
 7138 %}
 7139 
 7140 
 7141 instruct storeimmB0(immI0 zero, memory1 mem)
 7142 %{
 7143   match(Set mem (StoreB mem zero));
 7144   predicate(!needs_releasing_store(n));
 7145 
 7146   ins_cost(INSN_COST);
 7147   format %{ "strb rscractch2, $mem\t# byte" %}
 7148 
 7149   ins_encode(aarch64_enc_strb0(mem));
 7150 
 7151   ins_pipe(istore_mem);
 7152 %}
 7153 
 7154 // Store Char/Short
 7155 instruct storeC(iRegIorL2I src, memory2 mem)
 7156 %{
 7157   match(Set mem (StoreC mem src));
 7158   predicate(!needs_releasing_store(n));
 7159 
 7160   ins_cost(INSN_COST);
 7161   format %{ "strh  $src, $mem\t# short" %}
 7162 
 7163   ins_encode(aarch64_enc_strh(src, mem));
 7164 
 7165   ins_pipe(istore_reg_mem);
 7166 %}
 7167 
 7168 instruct storeimmC0(immI0 zero, memory2 mem)
 7169 %{
 7170   match(Set mem (StoreC mem zero));
 7171   predicate(!needs_releasing_store(n));
 7172 
 7173   ins_cost(INSN_COST);
 7174   format %{ "strh  zr, $mem\t# short" %}
 7175 
 7176   ins_encode(aarch64_enc_strh0(mem));
 7177 
 7178   ins_pipe(istore_mem);
 7179 %}
 7180 
 7181 // Store Integer
 7182 
 7183 instruct storeI(iRegIorL2I src, memory4 mem)
 7184 %{
 7185   match(Set mem(StoreI mem src));
 7186   predicate(!needs_releasing_store(n));
 7187 
 7188   ins_cost(INSN_COST);
 7189   format %{ "strw  $src, $mem\t# int" %}
 7190 
 7191   ins_encode(aarch64_enc_strw(src, mem));
 7192 
 7193   ins_pipe(istore_reg_mem);
 7194 %}
 7195 
 7196 instruct storeimmI0(immI0 zero, memory4 mem)
 7197 %{
 7198   match(Set mem(StoreI mem zero));
 7199   predicate(!needs_releasing_store(n));
 7200 
 7201   ins_cost(INSN_COST);
 7202   format %{ "strw  zr, $mem\t# int" %}
 7203 
 7204   ins_encode(aarch64_enc_strw0(mem));
 7205 
 7206   ins_pipe(istore_mem);
 7207 %}
 7208 
 7209 // Store Long (64 bit signed)
 7210 instruct storeL(iRegL src, memory8 mem)
 7211 %{
 7212   match(Set mem (StoreL mem src));
 7213   predicate(!needs_releasing_store(n));
 7214 
 7215   ins_cost(INSN_COST);
 7216   format %{ "str  $src, $mem\t# int" %}
 7217 
 7218   ins_encode(aarch64_enc_str(src, mem));
 7219 
 7220   ins_pipe(istore_reg_mem);
 7221 %}
 7222 
 7223 // Store Long (64 bit signed)
 7224 instruct storeimmL0(immL0 zero, memory8 mem)
 7225 %{
 7226   match(Set mem (StoreL mem zero));
 7227   predicate(!needs_releasing_store(n));
 7228 
 7229   ins_cost(INSN_COST);
 7230   format %{ "str  zr, $mem\t# int" %}
 7231 
 7232   ins_encode(aarch64_enc_str0(mem));
 7233 
 7234   ins_pipe(istore_mem);
 7235 %}
 7236 
 7237 // Store Pointer
 7238 instruct storeP(iRegP src, memory8 mem)
 7239 %{
 7240   match(Set mem (StoreP mem src));
 7241   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7242 
 7243   ins_cost(INSN_COST);
 7244   format %{ "str  $src, $mem\t# ptr" %}
 7245 
 7246   ins_encode(aarch64_enc_str(src, mem));
 7247 
 7248   ins_pipe(istore_reg_mem);
 7249 %}
 7250 
 7251 // Store Pointer
 7252 instruct storeimmP0(immP0 zero, memory8 mem)
 7253 %{
 7254   match(Set mem (StoreP mem zero));
 7255   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7256 
 7257   ins_cost(INSN_COST);
 7258   format %{ "str zr, $mem\t# ptr" %}
 7259 
 7260   ins_encode(aarch64_enc_str0(mem));
 7261 
 7262   ins_pipe(istore_mem);
 7263 %}
 7264 
 7265 // Store Compressed Pointer
 7266 instruct storeN(iRegN src, memory4 mem)
 7267 %{
 7268   match(Set mem (StoreN mem src));
 7269   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7270 
 7271   ins_cost(INSN_COST);
 7272   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7273 
 7274   ins_encode(aarch64_enc_strw(src, mem));
 7275 
 7276   ins_pipe(istore_reg_mem);
 7277 %}
 7278 
 7279 instruct storeImmN0(immN0 zero, memory4 mem)
 7280 %{
 7281   match(Set mem (StoreN mem zero));
 7282   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7283 
 7284   ins_cost(INSN_COST);
 7285   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7286 
 7287   ins_encode(aarch64_enc_strw0(mem));
 7288 
 7289   ins_pipe(istore_mem);
 7290 %}
 7291 
 7292 // Store Float
 7293 instruct storeF(vRegF src, memory4 mem)
 7294 %{
 7295   match(Set mem (StoreF mem src));
 7296   predicate(!needs_releasing_store(n));
 7297 
 7298   ins_cost(INSN_COST);
 7299   format %{ "strs  $src, $mem\t# float" %}
 7300 
 7301   ins_encode( aarch64_enc_strs(src, mem) );
 7302 
 7303   ins_pipe(pipe_class_memory);
 7304 %}
 7305 
 7306 // TODO
 7307 // implement storeImmF0 and storeFImmPacked
 7308 
 7309 // Store Double
 7310 instruct storeD(vRegD src, memory8 mem)
 7311 %{
 7312   match(Set mem (StoreD mem src));
 7313   predicate(!needs_releasing_store(n));
 7314 
 7315   ins_cost(INSN_COST);
 7316   format %{ "strd  $src, $mem\t# double" %}
 7317 
 7318   ins_encode( aarch64_enc_strd(src, mem) );
 7319 
 7320   ins_pipe(pipe_class_memory);
 7321 %}
 7322 
 7323 // Store Compressed Klass Pointer
 7324 instruct storeNKlass(iRegN src, memory4 mem)
 7325 %{
 7326   predicate(!needs_releasing_store(n));
 7327   match(Set mem (StoreNKlass mem src));
 7328 
 7329   ins_cost(INSN_COST);
 7330   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7331 
 7332   ins_encode(aarch64_enc_strw(src, mem));
 7333 
 7334   ins_pipe(istore_reg_mem);
 7335 %}
 7336 
 7337 // TODO
 7338 // implement storeImmD0 and storeDImmPacked
 7339 
 7340 // prefetch instructions
 7341 // Must be safe to execute with invalid address (cannot fault).
 7342 
 7343 instruct prefetchalloc( memory8 mem ) %{
 7344   match(PrefetchAllocation mem);
 7345 
 7346   ins_cost(INSN_COST);
 7347   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7348 
 7349   ins_encode( aarch64_enc_prefetchw(mem) );
 7350 
 7351   ins_pipe(iload_prefetch);
 7352 %}
 7353 
 7354 //  ---------------- volatile loads and stores ----------------
 7355 
 7356 // Load Byte (8 bit signed)
 7357 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7358 %{
 7359   match(Set dst (LoadB mem));
 7360 
 7361   ins_cost(VOLATILE_REF_COST);
 7362   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7363 
 7364   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7365 
 7366   ins_pipe(pipe_serial);
 7367 %}
 7368 
 7369 // Load Byte (8 bit signed) into long
 7370 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7371 %{
 7372   match(Set dst (ConvI2L (LoadB mem)));
 7373 
 7374   ins_cost(VOLATILE_REF_COST);
 7375   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7376 
 7377   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7378 
 7379   ins_pipe(pipe_serial);
 7380 %}
 7381 
 7382 // Load Byte (8 bit unsigned)
 7383 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7384 %{
 7385   match(Set dst (LoadUB mem));
 7386 
 7387   ins_cost(VOLATILE_REF_COST);
 7388   format %{ "ldarb  $dst, $mem\t# byte" %}
 7389 
 7390   ins_encode(aarch64_enc_ldarb(dst, mem));
 7391 
 7392   ins_pipe(pipe_serial);
 7393 %}
 7394 
 7395 // Load Byte (8 bit unsigned) into long
 7396 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7397 %{
 7398   match(Set dst (ConvI2L (LoadUB mem)));
 7399 
 7400   ins_cost(VOLATILE_REF_COST);
 7401   format %{ "ldarb  $dst, $mem\t# byte" %}
 7402 
 7403   ins_encode(aarch64_enc_ldarb(dst, mem));
 7404 
 7405   ins_pipe(pipe_serial);
 7406 %}
 7407 
 7408 // Load Short (16 bit signed)
 7409 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7410 %{
 7411   match(Set dst (LoadS mem));
 7412 
 7413   ins_cost(VOLATILE_REF_COST);
 7414   format %{ "ldarshw  $dst, $mem\t# short" %}
 7415 
 7416   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7417 
 7418   ins_pipe(pipe_serial);
 7419 %}
 7420 
 7421 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7422 %{
 7423   match(Set dst (LoadUS mem));
 7424 
 7425   ins_cost(VOLATILE_REF_COST);
 7426   format %{ "ldarhw  $dst, $mem\t# short" %}
 7427 
 7428   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7429 
 7430   ins_pipe(pipe_serial);
 7431 %}
 7432 
 7433 // Load Short/Char (16 bit unsigned) into long
 7434 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7435 %{
 7436   match(Set dst (ConvI2L (LoadUS mem)));
 7437 
 7438   ins_cost(VOLATILE_REF_COST);
 7439   format %{ "ldarh  $dst, $mem\t# short" %}
 7440 
 7441   ins_encode(aarch64_enc_ldarh(dst, mem));
 7442 
 7443   ins_pipe(pipe_serial);
 7444 %}
 7445 
 7446 // Load Short/Char (16 bit signed) into long
 7447 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7448 %{
 7449   match(Set dst (ConvI2L (LoadS mem)));
 7450 
 7451   ins_cost(VOLATILE_REF_COST);
 7452   format %{ "ldarh  $dst, $mem\t# short" %}
 7453 
 7454   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7455 
 7456   ins_pipe(pipe_serial);
 7457 %}
 7458 
 7459 // Load Integer (32 bit signed)
 7460 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7461 %{
 7462   match(Set dst (LoadI mem));
 7463 
 7464   ins_cost(VOLATILE_REF_COST);
 7465   format %{ "ldarw  $dst, $mem\t# int" %}
 7466 
 7467   ins_encode(aarch64_enc_ldarw(dst, mem));
 7468 
 7469   ins_pipe(pipe_serial);
 7470 %}
 7471 
 7472 // Load Integer (32 bit unsigned) into long
 7473 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7474 %{
 7475   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7476 
 7477   ins_cost(VOLATILE_REF_COST);
 7478   format %{ "ldarw  $dst, $mem\t# int" %}
 7479 
 7480   ins_encode(aarch64_enc_ldarw(dst, mem));
 7481 
 7482   ins_pipe(pipe_serial);
 7483 %}
 7484 
 7485 // Load Long (64 bit signed)
 7486 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7487 %{
 7488   match(Set dst (LoadL mem));
 7489 
 7490   ins_cost(VOLATILE_REF_COST);
 7491   format %{ "ldar  $dst, $mem\t# int" %}
 7492 
 7493   ins_encode(aarch64_enc_ldar(dst, mem));
 7494 
 7495   ins_pipe(pipe_serial);
 7496 %}
 7497 
 7498 // Load Pointer
 7499 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7500 %{
 7501   match(Set dst (LoadP mem));
 7502   predicate(n->as_Load()->barrier_data() == 0);
 7503 
 7504   ins_cost(VOLATILE_REF_COST);
 7505   format %{ "ldar  $dst, $mem\t# ptr" %}
 7506 
 7507   ins_encode(aarch64_enc_ldar(dst, mem));
 7508 
 7509   ins_pipe(pipe_serial);
 7510 %}
 7511 
 7512 // Load Compressed Pointer
 7513 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7514 %{
 7515   match(Set dst (LoadN mem));
 7516   predicate(n->as_Load()->barrier_data() == 0);
 7517 
 7518   ins_cost(VOLATILE_REF_COST);
 7519   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7520 
 7521   ins_encode(aarch64_enc_ldarw(dst, mem));
 7522 
 7523   ins_pipe(pipe_serial);
 7524 %}
 7525 
 7526 // Load Float
 7527 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7528 %{
 7529   match(Set dst (LoadF mem));
 7530 
 7531   ins_cost(VOLATILE_REF_COST);
 7532   format %{ "ldars  $dst, $mem\t# float" %}
 7533 
 7534   ins_encode( aarch64_enc_fldars(dst, mem) );
 7535 
 7536   ins_pipe(pipe_serial);
 7537 %}
 7538 
 7539 // Load Double
 7540 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7541 %{
 7542   match(Set dst (LoadD mem));
 7543 
 7544   ins_cost(VOLATILE_REF_COST);
 7545   format %{ "ldard  $dst, $mem\t# double" %}
 7546 
 7547   ins_encode( aarch64_enc_fldard(dst, mem) );
 7548 
 7549   ins_pipe(pipe_serial);
 7550 %}
 7551 
 7552 // Store Byte
 7553 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7554 %{
 7555   match(Set mem (StoreB mem src));
 7556 
 7557   ins_cost(VOLATILE_REF_COST);
 7558   format %{ "stlrb  $src, $mem\t# byte" %}
 7559 
 7560   ins_encode(aarch64_enc_stlrb(src, mem));
 7561 
 7562   ins_pipe(pipe_class_memory);
 7563 %}
 7564 
 7565 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7566 %{
 7567   match(Set mem (StoreB mem zero));
 7568 
 7569   ins_cost(VOLATILE_REF_COST);
 7570   format %{ "stlrb  zr, $mem\t# byte" %}
 7571 
 7572   ins_encode(aarch64_enc_stlrb0(mem));
 7573 
 7574   ins_pipe(pipe_class_memory);
 7575 %}
 7576 
 7577 // Store Char/Short
 7578 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7579 %{
 7580   match(Set mem (StoreC mem src));
 7581 
 7582   ins_cost(VOLATILE_REF_COST);
 7583   format %{ "stlrh  $src, $mem\t# short" %}
 7584 
 7585   ins_encode(aarch64_enc_stlrh(src, mem));
 7586 
 7587   ins_pipe(pipe_class_memory);
 7588 %}
 7589 
 7590 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7591 %{
 7592   match(Set mem (StoreC mem zero));
 7593 
 7594   ins_cost(VOLATILE_REF_COST);
 7595   format %{ "stlrh  zr, $mem\t# short" %}
 7596 
 7597   ins_encode(aarch64_enc_stlrh0(mem));
 7598 
 7599   ins_pipe(pipe_class_memory);
 7600 %}
 7601 
 7602 // Store Integer
 7603 
 7604 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7605 %{
 7606   match(Set mem(StoreI mem src));
 7607 
 7608   ins_cost(VOLATILE_REF_COST);
 7609   format %{ "stlrw  $src, $mem\t# int" %}
 7610 
 7611   ins_encode(aarch64_enc_stlrw(src, mem));
 7612 
 7613   ins_pipe(pipe_class_memory);
 7614 %}
 7615 
 7616 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7617 %{
 7618   match(Set mem(StoreI mem zero));
 7619 
 7620   ins_cost(VOLATILE_REF_COST);
 7621   format %{ "stlrw  zr, $mem\t# int" %}
 7622 
 7623   ins_encode(aarch64_enc_stlrw0(mem));
 7624 
 7625   ins_pipe(pipe_class_memory);
 7626 %}
 7627 
 7628 // Store Long (64 bit signed)
 7629 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7630 %{
 7631   match(Set mem (StoreL mem src));
 7632 
 7633   ins_cost(VOLATILE_REF_COST);
 7634   format %{ "stlr  $src, $mem\t# int" %}
 7635 
 7636   ins_encode(aarch64_enc_stlr(src, mem));
 7637 
 7638   ins_pipe(pipe_class_memory);
 7639 %}
 7640 
 7641 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7642 %{
 7643   match(Set mem (StoreL mem zero));
 7644 
 7645   ins_cost(VOLATILE_REF_COST);
 7646   format %{ "stlr  zr, $mem\t# int" %}
 7647 
 7648   ins_encode(aarch64_enc_stlr0(mem));
 7649 
 7650   ins_pipe(pipe_class_memory);
 7651 %}
 7652 
 7653 // Store Pointer
 7654 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7655 %{
 7656   match(Set mem (StoreP mem src));
 7657   predicate(n->as_Store()->barrier_data() == 0);
 7658 
 7659   ins_cost(VOLATILE_REF_COST);
 7660   format %{ "stlr  $src, $mem\t# ptr" %}
 7661 
 7662   ins_encode(aarch64_enc_stlr(src, mem));
 7663 
 7664   ins_pipe(pipe_class_memory);
 7665 %}
 7666 
 7667 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7668 %{
 7669   match(Set mem (StoreP mem zero));
 7670   predicate(n->as_Store()->barrier_data() == 0);
 7671 
 7672   ins_cost(VOLATILE_REF_COST);
 7673   format %{ "stlr  zr, $mem\t# ptr" %}
 7674 
 7675   ins_encode(aarch64_enc_stlr0(mem));
 7676 
 7677   ins_pipe(pipe_class_memory);
 7678 %}
 7679 
 7680 // Store Compressed Pointer
 7681 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7682 %{
 7683   match(Set mem (StoreN mem src));
 7684   predicate(n->as_Store()->barrier_data() == 0);
 7685 
 7686   ins_cost(VOLATILE_REF_COST);
 7687   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7688 
 7689   ins_encode(aarch64_enc_stlrw(src, mem));
 7690 
 7691   ins_pipe(pipe_class_memory);
 7692 %}
 7693 
 7694 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7695 %{
 7696   match(Set mem (StoreN mem zero));
 7697   predicate(n->as_Store()->barrier_data() == 0);
 7698 
 7699   ins_cost(VOLATILE_REF_COST);
 7700   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7701 
 7702   ins_encode(aarch64_enc_stlrw0(mem));
 7703 
 7704   ins_pipe(pipe_class_memory);
 7705 %}
 7706 
 7707 // Store Float
 7708 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7709 %{
 7710   match(Set mem (StoreF mem src));
 7711 
 7712   ins_cost(VOLATILE_REF_COST);
 7713   format %{ "stlrs  $src, $mem\t# float" %}
 7714 
 7715   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7716 
 7717   ins_pipe(pipe_class_memory);
 7718 %}
 7719 
 7720 // TODO
 7721 // implement storeImmF0 and storeFImmPacked
 7722 
 7723 // Store Double
 7724 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 7725 %{
 7726   match(Set mem (StoreD mem src));
 7727 
 7728   ins_cost(VOLATILE_REF_COST);
 7729   format %{ "stlrd  $src, $mem\t# double" %}
 7730 
 7731   ins_encode( aarch64_enc_fstlrd(src, mem) );
 7732 
 7733   ins_pipe(pipe_class_memory);
 7734 %}
 7735 
 7736 //  ---------------- end of volatile loads and stores ----------------
 7737 
 7738 instruct cacheWB(indirect addr)
 7739 %{
 7740   predicate(VM_Version::supports_data_cache_line_flush());
 7741   match(CacheWB addr);
 7742 
 7743   ins_cost(100);
 7744   format %{"cache wb $addr" %}
 7745   ins_encode %{
 7746     assert($addr->index_position() < 0, "should be");
 7747     assert($addr$$disp == 0, "should be");
 7748     __ cache_wb(Address($addr$$base$$Register, 0));
 7749   %}
 7750   ins_pipe(pipe_slow); // XXX
 7751 %}
 7752 
 7753 instruct cacheWBPreSync()
 7754 %{
 7755   predicate(VM_Version::supports_data_cache_line_flush());
 7756   match(CacheWBPreSync);
 7757 
 7758   ins_cost(100);
 7759   format %{"cache wb presync" %}
 7760   ins_encode %{
 7761     __ cache_wbsync(true);
 7762   %}
 7763   ins_pipe(pipe_slow); // XXX
 7764 %}
 7765 
 7766 instruct cacheWBPostSync()
 7767 %{
 7768   predicate(VM_Version::supports_data_cache_line_flush());
 7769   match(CacheWBPostSync);
 7770 
 7771   ins_cost(100);
 7772   format %{"cache wb postsync" %}
 7773   ins_encode %{
 7774     __ cache_wbsync(false);
 7775   %}
 7776   ins_pipe(pipe_slow); // XXX
 7777 %}
 7778 
 7779 // ============================================================================
 7780 // BSWAP Instructions
 7781 
 7782 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 7783   match(Set dst (ReverseBytesI src));
 7784 
 7785   ins_cost(INSN_COST);
 7786   format %{ "revw  $dst, $src" %}
 7787 
 7788   ins_encode %{
 7789     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 7790   %}
 7791 
 7792   ins_pipe(ialu_reg);
 7793 %}
 7794 
 7795 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 7796   match(Set dst (ReverseBytesL src));
 7797 
 7798   ins_cost(INSN_COST);
 7799   format %{ "rev  $dst, $src" %}
 7800 
 7801   ins_encode %{
 7802     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 7803   %}
 7804 
 7805   ins_pipe(ialu_reg);
 7806 %}
 7807 
 7808 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 7809   match(Set dst (ReverseBytesUS src));
 7810 
 7811   ins_cost(INSN_COST);
 7812   format %{ "rev16w  $dst, $src" %}
 7813 
 7814   ins_encode %{
 7815     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7816   %}
 7817 
 7818   ins_pipe(ialu_reg);
 7819 %}
 7820 
 7821 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 7822   match(Set dst (ReverseBytesS src));
 7823 
 7824   ins_cost(INSN_COST);
 7825   format %{ "rev16w  $dst, $src\n\t"
 7826             "sbfmw $dst, $dst, #0, #15" %}
 7827 
 7828   ins_encode %{
 7829     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7830     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 7831   %}
 7832 
 7833   ins_pipe(ialu_reg);
 7834 %}
 7835 
 7836 // ============================================================================
 7837 // Zero Count Instructions
 7838 
 7839 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7840   match(Set dst (CountLeadingZerosI src));
 7841 
 7842   ins_cost(INSN_COST);
 7843   format %{ "clzw  $dst, $src" %}
 7844   ins_encode %{
 7845     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 7846   %}
 7847 
 7848   ins_pipe(ialu_reg);
 7849 %}
 7850 
 7851 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 7852   match(Set dst (CountLeadingZerosL src));
 7853 
 7854   ins_cost(INSN_COST);
 7855   format %{ "clz   $dst, $src" %}
 7856   ins_encode %{
 7857     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 7858   %}
 7859 
 7860   ins_pipe(ialu_reg);
 7861 %}
 7862 
 7863 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7864   match(Set dst (CountTrailingZerosI src));
 7865 
 7866   ins_cost(INSN_COST * 2);
 7867   format %{ "rbitw  $dst, $src\n\t"
 7868             "clzw   $dst, $dst" %}
 7869   ins_encode %{
 7870     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 7871     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 7872   %}
 7873 
 7874   ins_pipe(ialu_reg);
 7875 %}
 7876 
 7877 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 7878   match(Set dst (CountTrailingZerosL src));
 7879 
 7880   ins_cost(INSN_COST * 2);
 7881   format %{ "rbit   $dst, $src\n\t"
 7882             "clz    $dst, $dst" %}
 7883   ins_encode %{
 7884     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 7885     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 7886   %}
 7887 
 7888   ins_pipe(ialu_reg);
 7889 %}
 7890 
 7891 //---------- Population Count Instructions -------------------------------------
 7892 //
 7893 
 7894 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 7895   match(Set dst (PopCountI src));
 7896   effect(TEMP tmp);
 7897   ins_cost(INSN_COST * 13);
 7898 
 7899   format %{ "fmovs  $tmp, $src\t# vector (1S)\n\t"
 7900             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7901             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7902             "mov    $dst, $tmp\t# vector (1D)" %}
 7903   ins_encode %{
 7904     __ fmovs($tmp$$FloatRegister, $src$$Register);
 7905     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7906     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7907     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7908   %}
 7909 
 7910   ins_pipe(pipe_class_default);
 7911 %}
 7912 
 7913 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 7914   match(Set dst (PopCountI (LoadI mem)));
 7915   effect(TEMP tmp);
 7916   ins_cost(INSN_COST * 13);
 7917 
 7918   format %{ "ldrs   $tmp, $mem\n\t"
 7919             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7920             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7921             "mov    $dst, $tmp\t# vector (1D)" %}
 7922   ins_encode %{
 7923     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7924     loadStore(masm, &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 7925               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 7926     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7927     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7928     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7929   %}
 7930 
 7931   ins_pipe(pipe_class_default);
 7932 %}
 7933 
 7934 // Note: Long.bitCount(long) returns an int.
 7935 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 7936   match(Set dst (PopCountL src));
 7937   effect(TEMP tmp);
 7938   ins_cost(INSN_COST * 13);
 7939 
 7940   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 7941             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7942             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7943             "mov    $dst, $tmp\t# vector (1D)" %}
 7944   ins_encode %{
 7945     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 7946     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7947     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7948     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7949   %}
 7950 
 7951   ins_pipe(pipe_class_default);
 7952 %}
 7953 
 7954 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 7955   match(Set dst (PopCountL (LoadL mem)));
 7956   effect(TEMP tmp);
 7957   ins_cost(INSN_COST * 13);
 7958 
 7959   format %{ "ldrd   $tmp, $mem\n\t"
 7960             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7961             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7962             "mov    $dst, $tmp\t# vector (1D)" %}
 7963   ins_encode %{
 7964     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7965     loadStore(masm, &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 7966               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 7967     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7968     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7969     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7970   %}
 7971 
 7972   ins_pipe(pipe_class_default);
 7973 %}
 7974 
 7975 // ============================================================================
 7976 // VerifyVectorAlignment Instruction
 7977 
 7978 instruct verify_vector_alignment(iRegP addr, immL_positive_bitmaskI mask, rFlagsReg cr) %{
 7979   match(Set addr (VerifyVectorAlignment addr mask));
 7980   effect(KILL cr);
 7981   format %{ "verify_vector_alignment $addr $mask \t! verify alignment" %}
 7982   ins_encode %{
 7983     Label Lskip;
 7984     // check if masked bits of addr are zero
 7985     __ tst($addr$$Register, $mask$$constant);
 7986     __ br(Assembler::EQ, Lskip);
 7987     __ stop("verify_vector_alignment found a misaligned vector memory access");
 7988     __ bind(Lskip);
 7989   %}
 7990   ins_pipe(pipe_slow);
 7991 %}
 7992 
 7993 // ============================================================================
 7994 // MemBar Instruction
 7995 
 7996 instruct load_fence() %{
 7997   match(LoadFence);
 7998   ins_cost(VOLATILE_REF_COST);
 7999 
 8000   format %{ "load_fence" %}
 8001 
 8002   ins_encode %{
 8003     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8004   %}
 8005   ins_pipe(pipe_serial);
 8006 %}
 8007 
 8008 instruct unnecessary_membar_acquire() %{
 8009   predicate(unnecessary_acquire(n));
 8010   match(MemBarAcquire);
 8011   ins_cost(0);
 8012 
 8013   format %{ "membar_acquire (elided)" %}
 8014 
 8015   ins_encode %{
 8016     __ block_comment("membar_acquire (elided)");
 8017   %}
 8018 
 8019   ins_pipe(pipe_class_empty);
 8020 %}
 8021 
 8022 instruct membar_acquire() %{
 8023   match(MemBarAcquire);
 8024   ins_cost(VOLATILE_REF_COST);
 8025 
 8026   format %{ "membar_acquire\n\t"
 8027             "dmb ishld" %}
 8028 
 8029   ins_encode %{
 8030     __ block_comment("membar_acquire");
 8031     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8032   %}
 8033 
 8034   ins_pipe(pipe_serial);
 8035 %}
 8036 
 8037 
 8038 instruct membar_acquire_lock() %{
 8039   match(MemBarAcquireLock);
 8040   ins_cost(VOLATILE_REF_COST);
 8041 
 8042   format %{ "membar_acquire_lock (elided)" %}
 8043 
 8044   ins_encode %{
 8045     __ block_comment("membar_acquire_lock (elided)");
 8046   %}
 8047 
 8048   ins_pipe(pipe_serial);
 8049 %}
 8050 
 8051 instruct store_fence() %{
 8052   match(StoreFence);
 8053   ins_cost(VOLATILE_REF_COST);
 8054 
 8055   format %{ "store_fence" %}
 8056 
 8057   ins_encode %{
 8058     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8059   %}
 8060   ins_pipe(pipe_serial);
 8061 %}
 8062 
 8063 instruct unnecessary_membar_release() %{
 8064   predicate(unnecessary_release(n));
 8065   match(MemBarRelease);
 8066   ins_cost(0);
 8067 
 8068   format %{ "membar_release (elided)" %}
 8069 
 8070   ins_encode %{
 8071     __ block_comment("membar_release (elided)");
 8072   %}
 8073   ins_pipe(pipe_serial);
 8074 %}
 8075 
 8076 instruct membar_release() %{
 8077   match(MemBarRelease);
 8078   ins_cost(VOLATILE_REF_COST);
 8079 
 8080   format %{ "membar_release\n\t"
 8081             "dmb ishst\n\tdmb ishld" %}
 8082 
 8083   ins_encode %{
 8084     __ block_comment("membar_release");
 8085     // These will be merged if AlwaysMergeDMB is enabled.
 8086     __ membar(Assembler::StoreStore);
 8087     __ membar(Assembler::LoadStore);
 8088   %}
 8089   ins_pipe(pipe_serial);
 8090 %}
 8091 
 8092 instruct membar_storestore() %{
 8093   match(MemBarStoreStore);
 8094   match(StoreStoreFence);
 8095   ins_cost(VOLATILE_REF_COST);
 8096 
 8097   format %{ "MEMBAR-store-store" %}
 8098 
 8099   ins_encode %{
 8100     __ membar(Assembler::StoreStore);
 8101   %}
 8102   ins_pipe(pipe_serial);
 8103 %}
 8104 
 8105 instruct membar_release_lock() %{
 8106   match(MemBarReleaseLock);
 8107   ins_cost(VOLATILE_REF_COST);
 8108 
 8109   format %{ "membar_release_lock (elided)" %}
 8110 
 8111   ins_encode %{
 8112     __ block_comment("membar_release_lock (elided)");
 8113   %}
 8114 
 8115   ins_pipe(pipe_serial);
 8116 %}
 8117 
 8118 instruct unnecessary_membar_volatile() %{
 8119   predicate(unnecessary_volatile(n));
 8120   match(MemBarVolatile);
 8121   ins_cost(0);
 8122 
 8123   format %{ "membar_volatile (elided)" %}
 8124 
 8125   ins_encode %{
 8126     __ block_comment("membar_volatile (elided)");
 8127   %}
 8128 
 8129   ins_pipe(pipe_serial);
 8130 %}
 8131 
 8132 instruct membar_volatile() %{
 8133   match(MemBarVolatile);
 8134   ins_cost(VOLATILE_REF_COST*100);
 8135 
 8136   format %{ "membar_volatile\n\t"
 8137              "dmb ish"%}
 8138 
 8139   ins_encode %{
 8140     __ block_comment("membar_volatile");
 8141     __ membar(Assembler::StoreLoad);
 8142   %}
 8143 
 8144   ins_pipe(pipe_serial);
 8145 %}
 8146 
 8147 // ============================================================================
 8148 // Cast/Convert Instructions
 8149 
 8150 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8151   match(Set dst (CastX2P src));
 8152 
 8153   ins_cost(INSN_COST);
 8154   format %{ "mov $dst, $src\t# long -> ptr" %}
 8155 
 8156   ins_encode %{
 8157     if ($dst$$reg != $src$$reg) {
 8158       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8159     }
 8160   %}
 8161 
 8162   ins_pipe(ialu_reg);
 8163 %}
 8164 
 8165 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8166   match(Set dst (CastP2X src));
 8167 
 8168   ins_cost(INSN_COST);
 8169   format %{ "mov $dst, $src\t# ptr -> long" %}
 8170 
 8171   ins_encode %{
 8172     if ($dst$$reg != $src$$reg) {
 8173       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8174     }
 8175   %}
 8176 
 8177   ins_pipe(ialu_reg);
 8178 %}
 8179 
 8180 // Convert oop into int for vectors alignment masking
 8181 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8182   match(Set dst (ConvL2I (CastP2X src)));
 8183 
 8184   ins_cost(INSN_COST);
 8185   format %{ "movw $dst, $src\t# ptr -> int" %}
 8186   ins_encode %{
 8187     __ movw($dst$$Register, $src$$Register);
 8188   %}
 8189 
 8190   ins_pipe(ialu_reg);
 8191 %}
 8192 
 8193 // Convert compressed oop into int for vectors alignment masking
 8194 // in case of 32bit oops (heap < 4Gb).
 8195 instruct convN2I(iRegINoSp dst, iRegN src)
 8196 %{
 8197   predicate(CompressedOops::shift() == 0);
 8198   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8199 
 8200   ins_cost(INSN_COST);
 8201   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8202   ins_encode %{
 8203     __ movw($dst$$Register, $src$$Register);
 8204   %}
 8205 
 8206   ins_pipe(ialu_reg);
 8207 %}
 8208 
 8209 
 8210 // Convert oop pointer into compressed form
 8211 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8212   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8213   match(Set dst (EncodeP src));
 8214   effect(KILL cr);
 8215   ins_cost(INSN_COST * 3);
 8216   format %{ "encode_heap_oop $dst, $src" %}
 8217   ins_encode %{
 8218     Register s = $src$$Register;
 8219     Register d = $dst$$Register;
 8220     __ encode_heap_oop(d, s);
 8221   %}
 8222   ins_pipe(ialu_reg);
 8223 %}
 8224 
 8225 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8226   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8227   match(Set dst (EncodeP src));
 8228   ins_cost(INSN_COST * 3);
 8229   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8230   ins_encode %{
 8231     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8232   %}
 8233   ins_pipe(ialu_reg);
 8234 %}
 8235 
 8236 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8237   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8238             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8239   match(Set dst (DecodeN src));
 8240   ins_cost(INSN_COST * 3);
 8241   format %{ "decode_heap_oop $dst, $src" %}
 8242   ins_encode %{
 8243     Register s = $src$$Register;
 8244     Register d = $dst$$Register;
 8245     __ decode_heap_oop(d, s);
 8246   %}
 8247   ins_pipe(ialu_reg);
 8248 %}
 8249 
 8250 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8251   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8252             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8253   match(Set dst (DecodeN src));
 8254   ins_cost(INSN_COST * 3);
 8255   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8256   ins_encode %{
 8257     Register s = $src$$Register;
 8258     Register d = $dst$$Register;
 8259     __ decode_heap_oop_not_null(d, s);
 8260   %}
 8261   ins_pipe(ialu_reg);
 8262 %}
 8263 
 8264 // n.b. AArch64 implementations of encode_klass_not_null and
 8265 // decode_klass_not_null do not modify the flags register so, unlike
 8266 // Intel, we don't kill CR as a side effect here
 8267 
 8268 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8269   match(Set dst (EncodePKlass src));
 8270 
 8271   ins_cost(INSN_COST * 3);
 8272   format %{ "encode_klass_not_null $dst,$src" %}
 8273 
 8274   ins_encode %{
 8275     Register src_reg = as_Register($src$$reg);
 8276     Register dst_reg = as_Register($dst$$reg);
 8277     __ encode_klass_not_null(dst_reg, src_reg);
 8278   %}
 8279 
 8280    ins_pipe(ialu_reg);
 8281 %}
 8282 
 8283 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8284   match(Set dst (DecodeNKlass src));
 8285 
 8286   ins_cost(INSN_COST * 3);
 8287   format %{ "decode_klass_not_null $dst,$src" %}
 8288 
 8289   ins_encode %{
 8290     Register src_reg = as_Register($src$$reg);
 8291     Register dst_reg = as_Register($dst$$reg);
 8292     if (dst_reg != src_reg) {
 8293       __ decode_klass_not_null(dst_reg, src_reg);
 8294     } else {
 8295       __ decode_klass_not_null(dst_reg);
 8296     }
 8297   %}
 8298 
 8299    ins_pipe(ialu_reg);
 8300 %}
 8301 
 8302 instruct checkCastPP(iRegPNoSp dst)
 8303 %{
 8304   match(Set dst (CheckCastPP dst));
 8305 
 8306   size(0);
 8307   format %{ "# checkcastPP of $dst" %}
 8308   ins_encode(/* empty encoding */);
 8309   ins_pipe(pipe_class_empty);
 8310 %}
 8311 
 8312 instruct castPP(iRegPNoSp dst)
 8313 %{
 8314   match(Set dst (CastPP dst));
 8315 
 8316   size(0);
 8317   format %{ "# castPP of $dst" %}
 8318   ins_encode(/* empty encoding */);
 8319   ins_pipe(pipe_class_empty);
 8320 %}
 8321 
 8322 instruct castII(iRegI dst)
 8323 %{
 8324   predicate(VerifyConstraintCasts == 0);
 8325   match(Set dst (CastII dst));
 8326 
 8327   size(0);
 8328   format %{ "# castII of $dst" %}
 8329   ins_encode(/* empty encoding */);
 8330   ins_cost(0);
 8331   ins_pipe(pipe_class_empty);
 8332 %}
 8333 
 8334 instruct castII_checked(iRegI dst, rFlagsReg cr)
 8335 %{
 8336   predicate(VerifyConstraintCasts > 0);
 8337   match(Set dst (CastII dst));
 8338   effect(KILL cr);
 8339 
 8340   format %{ "# castII_checked of $dst" %}
 8341   ins_encode %{
 8342     __ verify_int_in_range(_idx, bottom_type()->is_int(), $dst$$Register, rscratch1);
 8343   %}
 8344   ins_pipe(pipe_slow);
 8345 %}
 8346 
 8347 instruct castLL(iRegL dst)
 8348 %{
 8349   predicate(VerifyConstraintCasts == 0);
 8350   match(Set dst (CastLL dst));
 8351 
 8352   size(0);
 8353   format %{ "# castLL of $dst" %}
 8354   ins_encode(/* empty encoding */);
 8355   ins_cost(0);
 8356   ins_pipe(pipe_class_empty);
 8357 %}
 8358 
 8359 instruct castLL_checked(iRegL dst, rFlagsReg cr)
 8360 %{
 8361   predicate(VerifyConstraintCasts > 0);
 8362   match(Set dst (CastLL dst));
 8363   effect(KILL cr);
 8364 
 8365   format %{ "# castLL_checked of $dst" %}
 8366   ins_encode %{
 8367     __ verify_long_in_range(_idx, bottom_type()->is_long(), $dst$$Register, rscratch1);
 8368   %}
 8369   ins_pipe(pipe_slow);
 8370 %}
 8371 
 8372 instruct castHH(vRegF dst)
 8373 %{
 8374   match(Set dst (CastHH dst));
 8375   size(0);
 8376   format %{ "# castHH of $dst" %}
 8377   ins_encode(/* empty encoding */);
 8378   ins_cost(0);
 8379   ins_pipe(pipe_class_empty);
 8380 %}
 8381 
 8382 instruct castFF(vRegF dst)
 8383 %{
 8384   match(Set dst (CastFF dst));
 8385 
 8386   size(0);
 8387   format %{ "# castFF of $dst" %}
 8388   ins_encode(/* empty encoding */);
 8389   ins_cost(0);
 8390   ins_pipe(pipe_class_empty);
 8391 %}
 8392 
 8393 instruct castDD(vRegD dst)
 8394 %{
 8395   match(Set dst (CastDD dst));
 8396 
 8397   size(0);
 8398   format %{ "# castDD of $dst" %}
 8399   ins_encode(/* empty encoding */);
 8400   ins_cost(0);
 8401   ins_pipe(pipe_class_empty);
 8402 %}
 8403 
 8404 instruct castVV(vReg dst)
 8405 %{
 8406   match(Set dst (CastVV dst));
 8407 
 8408   size(0);
 8409   format %{ "# castVV of $dst" %}
 8410   ins_encode(/* empty encoding */);
 8411   ins_cost(0);
 8412   ins_pipe(pipe_class_empty);
 8413 %}
 8414 
 8415 instruct castVVMask(pRegGov dst)
 8416 %{
 8417   match(Set dst (CastVV dst));
 8418 
 8419   size(0);
 8420   format %{ "# castVV of $dst" %}
 8421   ins_encode(/* empty encoding */);
 8422   ins_cost(0);
 8423   ins_pipe(pipe_class_empty);
 8424 %}
 8425 
 8426 // ============================================================================
 8427 // Atomic operation instructions
 8428 //
 8429 
 8430 // standard CompareAndSwapX when we are using barriers
 8431 // these have higher priority than the rules selected by a predicate
 8432 
 8433 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8434 // can't match them
 8435 
 8436 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8437 
 8438   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8439   ins_cost(2 * VOLATILE_REF_COST);
 8440 
 8441   effect(KILL cr);
 8442 
 8443   format %{
 8444     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8445     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8446   %}
 8447 
 8448   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8449             aarch64_enc_cset_eq(res));
 8450 
 8451   ins_pipe(pipe_slow);
 8452 %}
 8453 
 8454 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8455 
 8456   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8457   ins_cost(2 * VOLATILE_REF_COST);
 8458 
 8459   effect(KILL cr);
 8460 
 8461   format %{
 8462     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8463     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8464   %}
 8465 
 8466   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8467             aarch64_enc_cset_eq(res));
 8468 
 8469   ins_pipe(pipe_slow);
 8470 %}
 8471 
 8472 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8473 
 8474   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8475   ins_cost(2 * VOLATILE_REF_COST);
 8476 
 8477   effect(KILL cr);
 8478 
 8479  format %{
 8480     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8481     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8482  %}
 8483 
 8484  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8485             aarch64_enc_cset_eq(res));
 8486 
 8487   ins_pipe(pipe_slow);
 8488 %}
 8489 
 8490 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8491 
 8492   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8493   ins_cost(2 * VOLATILE_REF_COST);
 8494 
 8495   effect(KILL cr);
 8496 
 8497  format %{
 8498     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8499     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8500  %}
 8501 
 8502  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8503             aarch64_enc_cset_eq(res));
 8504 
 8505   ins_pipe(pipe_slow);
 8506 %}
 8507 
 8508 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8509 
 8510   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8511   predicate(n->as_LoadStore()->barrier_data() == 0);
 8512   ins_cost(2 * VOLATILE_REF_COST);
 8513 
 8514   effect(KILL cr);
 8515 
 8516  format %{
 8517     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8518     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8519  %}
 8520 
 8521  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8522             aarch64_enc_cset_eq(res));
 8523 
 8524   ins_pipe(pipe_slow);
 8525 %}
 8526 
 8527 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8528 
 8529   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8530   predicate(n->as_LoadStore()->barrier_data() == 0);
 8531   ins_cost(2 * VOLATILE_REF_COST);
 8532 
 8533   effect(KILL cr);
 8534 
 8535  format %{
 8536     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8537     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8538  %}
 8539 
 8540  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8541             aarch64_enc_cset_eq(res));
 8542 
 8543   ins_pipe(pipe_slow);
 8544 %}
 8545 
 8546 // alternative CompareAndSwapX when we are eliding barriers
 8547 
 8548 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8549 
 8550   predicate(needs_acquiring_load_exclusive(n));
 8551   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8552   ins_cost(VOLATILE_REF_COST);
 8553 
 8554   effect(KILL cr);
 8555 
 8556   format %{
 8557     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8558     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8559   %}
 8560 
 8561   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8562             aarch64_enc_cset_eq(res));
 8563 
 8564   ins_pipe(pipe_slow);
 8565 %}
 8566 
 8567 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8568 
 8569   predicate(needs_acquiring_load_exclusive(n));
 8570   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8571   ins_cost(VOLATILE_REF_COST);
 8572 
 8573   effect(KILL cr);
 8574 
 8575   format %{
 8576     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8577     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8578   %}
 8579 
 8580   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8581             aarch64_enc_cset_eq(res));
 8582 
 8583   ins_pipe(pipe_slow);
 8584 %}
 8585 
 8586 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8587 
 8588   predicate(needs_acquiring_load_exclusive(n));
 8589   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8590   ins_cost(VOLATILE_REF_COST);
 8591 
 8592   effect(KILL cr);
 8593 
 8594  format %{
 8595     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8596     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8597  %}
 8598 
 8599  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8600             aarch64_enc_cset_eq(res));
 8601 
 8602   ins_pipe(pipe_slow);
 8603 %}
 8604 
 8605 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8606 
 8607   predicate(needs_acquiring_load_exclusive(n));
 8608   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8609   ins_cost(VOLATILE_REF_COST);
 8610 
 8611   effect(KILL cr);
 8612 
 8613  format %{
 8614     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8615     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8616  %}
 8617 
 8618  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8619             aarch64_enc_cset_eq(res));
 8620 
 8621   ins_pipe(pipe_slow);
 8622 %}
 8623 
 8624 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8625 
 8626   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8627   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8628   ins_cost(VOLATILE_REF_COST);
 8629 
 8630   effect(KILL cr);
 8631 
 8632  format %{
 8633     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8634     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8635  %}
 8636 
 8637  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8638             aarch64_enc_cset_eq(res));
 8639 
 8640   ins_pipe(pipe_slow);
 8641 %}
 8642 
 8643 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8644 
 8645   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8646   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8647   ins_cost(VOLATILE_REF_COST);
 8648 
 8649   effect(KILL cr);
 8650 
 8651  format %{
 8652     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8653     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8654  %}
 8655 
 8656  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8657             aarch64_enc_cset_eq(res));
 8658 
 8659   ins_pipe(pipe_slow);
 8660 %}
 8661 
 8662 
 8663 // ---------------------------------------------------------------------
 8664 
 8665 // BEGIN This section of the file is automatically generated. Do not edit --------------
 8666 
 8667 // Sundry CAS operations.  Note that release is always true,
 8668 // regardless of the memory ordering of the CAS.  This is because we
 8669 // need the volatile case to be sequentially consistent but there is
 8670 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 8671 // can't check the type of memory ordering here, so we always emit a
 8672 // STLXR.
 8673 
 8674 // This section is generated from cas.m4
 8675 
 8676 
 8677 // This pattern is generated automatically from cas.m4.
 8678 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8679 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8680   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8681   ins_cost(2 * VOLATILE_REF_COST);
 8682   effect(TEMP_DEF res, KILL cr);
 8683   format %{
 8684     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8685   %}
 8686   ins_encode %{
 8687     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8688                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8689                /*weak*/ false, $res$$Register);
 8690     __ sxtbw($res$$Register, $res$$Register);
 8691   %}
 8692   ins_pipe(pipe_slow);
 8693 %}
 8694 
 8695 // This pattern is generated automatically from cas.m4.
 8696 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8697 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8698   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8699   ins_cost(2 * VOLATILE_REF_COST);
 8700   effect(TEMP_DEF res, KILL cr);
 8701   format %{
 8702     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8703   %}
 8704   ins_encode %{
 8705     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8706                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8707                /*weak*/ false, $res$$Register);
 8708     __ sxthw($res$$Register, $res$$Register);
 8709   %}
 8710   ins_pipe(pipe_slow);
 8711 %}
 8712 
 8713 // This pattern is generated automatically from cas.m4.
 8714 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8715 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8716   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8717   ins_cost(2 * VOLATILE_REF_COST);
 8718   effect(TEMP_DEF res, KILL cr);
 8719   format %{
 8720     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8721   %}
 8722   ins_encode %{
 8723     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8724                Assembler::word, /*acquire*/ false, /*release*/ true,
 8725                /*weak*/ false, $res$$Register);
 8726   %}
 8727   ins_pipe(pipe_slow);
 8728 %}
 8729 
 8730 // This pattern is generated automatically from cas.m4.
 8731 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8732 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8733   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8734   ins_cost(2 * VOLATILE_REF_COST);
 8735   effect(TEMP_DEF res, KILL cr);
 8736   format %{
 8737     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8738   %}
 8739   ins_encode %{
 8740     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8741                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8742                /*weak*/ false, $res$$Register);
 8743   %}
 8744   ins_pipe(pipe_slow);
 8745 %}
 8746 
 8747 // This pattern is generated automatically from cas.m4.
 8748 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8749 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8750   predicate(n->as_LoadStore()->barrier_data() == 0);
 8751   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8752   ins_cost(2 * VOLATILE_REF_COST);
 8753   effect(TEMP_DEF res, KILL cr);
 8754   format %{
 8755     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8756   %}
 8757   ins_encode %{
 8758     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8759                Assembler::word, /*acquire*/ false, /*release*/ true,
 8760                /*weak*/ false, $res$$Register);
 8761   %}
 8762   ins_pipe(pipe_slow);
 8763 %}
 8764 
 8765 // This pattern is generated automatically from cas.m4.
 8766 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8767 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8768   predicate(n->as_LoadStore()->barrier_data() == 0);
 8769   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8770   ins_cost(2 * VOLATILE_REF_COST);
 8771   effect(TEMP_DEF res, KILL cr);
 8772   format %{
 8773     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8774   %}
 8775   ins_encode %{
 8776     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8777                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8778                /*weak*/ false, $res$$Register);
 8779   %}
 8780   ins_pipe(pipe_slow);
 8781 %}
 8782 
 8783 // This pattern is generated automatically from cas.m4.
 8784 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8785 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8786   predicate(needs_acquiring_load_exclusive(n));
 8787   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8788   ins_cost(VOLATILE_REF_COST);
 8789   effect(TEMP_DEF res, KILL cr);
 8790   format %{
 8791     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8792   %}
 8793   ins_encode %{
 8794     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8795                Assembler::byte, /*acquire*/ true, /*release*/ true,
 8796                /*weak*/ false, $res$$Register);
 8797     __ sxtbw($res$$Register, $res$$Register);
 8798   %}
 8799   ins_pipe(pipe_slow);
 8800 %}
 8801 
 8802 // This pattern is generated automatically from cas.m4.
 8803 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8804 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8805   predicate(needs_acquiring_load_exclusive(n));
 8806   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8807   ins_cost(VOLATILE_REF_COST);
 8808   effect(TEMP_DEF res, KILL cr);
 8809   format %{
 8810     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8811   %}
 8812   ins_encode %{
 8813     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8814                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 8815                /*weak*/ false, $res$$Register);
 8816     __ sxthw($res$$Register, $res$$Register);
 8817   %}
 8818   ins_pipe(pipe_slow);
 8819 %}
 8820 
 8821 // This pattern is generated automatically from cas.m4.
 8822 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8823 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8824   predicate(needs_acquiring_load_exclusive(n));
 8825   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8826   ins_cost(VOLATILE_REF_COST);
 8827   effect(TEMP_DEF res, KILL cr);
 8828   format %{
 8829     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8830   %}
 8831   ins_encode %{
 8832     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8833                Assembler::word, /*acquire*/ true, /*release*/ true,
 8834                /*weak*/ false, $res$$Register);
 8835   %}
 8836   ins_pipe(pipe_slow);
 8837 %}
 8838 
 8839 // This pattern is generated automatically from cas.m4.
 8840 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8841 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8842   predicate(needs_acquiring_load_exclusive(n));
 8843   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8844   ins_cost(VOLATILE_REF_COST);
 8845   effect(TEMP_DEF res, KILL cr);
 8846   format %{
 8847     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8848   %}
 8849   ins_encode %{
 8850     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8851                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8852                /*weak*/ false, $res$$Register);
 8853   %}
 8854   ins_pipe(pipe_slow);
 8855 %}
 8856 
 8857 // This pattern is generated automatically from cas.m4.
 8858 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8859 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8860   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8861   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8862   ins_cost(VOLATILE_REF_COST);
 8863   effect(TEMP_DEF res, KILL cr);
 8864   format %{
 8865     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8866   %}
 8867   ins_encode %{
 8868     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8869                Assembler::word, /*acquire*/ true, /*release*/ true,
 8870                /*weak*/ false, $res$$Register);
 8871   %}
 8872   ins_pipe(pipe_slow);
 8873 %}
 8874 
 8875 // This pattern is generated automatically from cas.m4.
 8876 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8877 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8878   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8879   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8880   ins_cost(VOLATILE_REF_COST);
 8881   effect(TEMP_DEF res, KILL cr);
 8882   format %{
 8883     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8884   %}
 8885   ins_encode %{
 8886     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8887                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8888                /*weak*/ false, $res$$Register);
 8889   %}
 8890   ins_pipe(pipe_slow);
 8891 %}
 8892 
 8893 // This pattern is generated automatically from cas.m4.
 8894 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8895 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8896   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8897   ins_cost(2 * VOLATILE_REF_COST);
 8898   effect(KILL cr);
 8899   format %{
 8900     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8901     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8902   %}
 8903   ins_encode %{
 8904     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8905                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8906                /*weak*/ true, noreg);
 8907     __ csetw($res$$Register, Assembler::EQ);
 8908   %}
 8909   ins_pipe(pipe_slow);
 8910 %}
 8911 
 8912 // This pattern is generated automatically from cas.m4.
 8913 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8914 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8915   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 8916   ins_cost(2 * VOLATILE_REF_COST);
 8917   effect(KILL cr);
 8918   format %{
 8919     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8920     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8921   %}
 8922   ins_encode %{
 8923     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8924                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8925                /*weak*/ true, noreg);
 8926     __ csetw($res$$Register, Assembler::EQ);
 8927   %}
 8928   ins_pipe(pipe_slow);
 8929 %}
 8930 
 8931 // This pattern is generated automatically from cas.m4.
 8932 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8933 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8934   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 8935   ins_cost(2 * VOLATILE_REF_COST);
 8936   effect(KILL cr);
 8937   format %{
 8938     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8939     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8940   %}
 8941   ins_encode %{
 8942     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8943                Assembler::word, /*acquire*/ false, /*release*/ true,
 8944                /*weak*/ true, noreg);
 8945     __ csetw($res$$Register, Assembler::EQ);
 8946   %}
 8947   ins_pipe(pipe_slow);
 8948 %}
 8949 
 8950 // This pattern is generated automatically from cas.m4.
 8951 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8952 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8953   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 8954   ins_cost(2 * VOLATILE_REF_COST);
 8955   effect(KILL cr);
 8956   format %{
 8957     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8958     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8959   %}
 8960   ins_encode %{
 8961     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8962                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8963                /*weak*/ true, noreg);
 8964     __ csetw($res$$Register, Assembler::EQ);
 8965   %}
 8966   ins_pipe(pipe_slow);
 8967 %}
 8968 
 8969 // This pattern is generated automatically from cas.m4.
 8970 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8971 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8972   predicate(n->as_LoadStore()->barrier_data() == 0);
 8973   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 8974   ins_cost(2 * VOLATILE_REF_COST);
 8975   effect(KILL cr);
 8976   format %{
 8977     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8978     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8979   %}
 8980   ins_encode %{
 8981     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8982                Assembler::word, /*acquire*/ false, /*release*/ true,
 8983                /*weak*/ true, noreg);
 8984     __ csetw($res$$Register, Assembler::EQ);
 8985   %}
 8986   ins_pipe(pipe_slow);
 8987 %}
 8988 
 8989 // This pattern is generated automatically from cas.m4.
 8990 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8991 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8992   predicate(n->as_LoadStore()->barrier_data() == 0);
 8993   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 8994   ins_cost(2 * VOLATILE_REF_COST);
 8995   effect(KILL cr);
 8996   format %{
 8997     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8998     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8999   %}
 9000   ins_encode %{
 9001     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9002                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9003                /*weak*/ true, noreg);
 9004     __ csetw($res$$Register, Assembler::EQ);
 9005   %}
 9006   ins_pipe(pipe_slow);
 9007 %}
 9008 
 9009 // This pattern is generated automatically from cas.m4.
 9010 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9011 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9012   predicate(needs_acquiring_load_exclusive(n));
 9013   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9014   ins_cost(VOLATILE_REF_COST);
 9015   effect(KILL cr);
 9016   format %{
 9017     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9018     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9019   %}
 9020   ins_encode %{
 9021     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9022                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9023                /*weak*/ true, noreg);
 9024     __ csetw($res$$Register, Assembler::EQ);
 9025   %}
 9026   ins_pipe(pipe_slow);
 9027 %}
 9028 
 9029 // This pattern is generated automatically from cas.m4.
 9030 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9031 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9032   predicate(needs_acquiring_load_exclusive(n));
 9033   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9034   ins_cost(VOLATILE_REF_COST);
 9035   effect(KILL cr);
 9036   format %{
 9037     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9038     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9039   %}
 9040   ins_encode %{
 9041     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9042                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9043                /*weak*/ true, noreg);
 9044     __ csetw($res$$Register, Assembler::EQ);
 9045   %}
 9046   ins_pipe(pipe_slow);
 9047 %}
 9048 
 9049 // This pattern is generated automatically from cas.m4.
 9050 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9051 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9052   predicate(needs_acquiring_load_exclusive(n));
 9053   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9054   ins_cost(VOLATILE_REF_COST);
 9055   effect(KILL cr);
 9056   format %{
 9057     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9058     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9059   %}
 9060   ins_encode %{
 9061     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9062                Assembler::word, /*acquire*/ true, /*release*/ true,
 9063                /*weak*/ true, noreg);
 9064     __ csetw($res$$Register, Assembler::EQ);
 9065   %}
 9066   ins_pipe(pipe_slow);
 9067 %}
 9068 
 9069 // This pattern is generated automatically from cas.m4.
 9070 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9071 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9072   predicate(needs_acquiring_load_exclusive(n));
 9073   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9074   ins_cost(VOLATILE_REF_COST);
 9075   effect(KILL cr);
 9076   format %{
 9077     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9078     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9079   %}
 9080   ins_encode %{
 9081     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9082                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9083                /*weak*/ true, noreg);
 9084     __ csetw($res$$Register, Assembler::EQ);
 9085   %}
 9086   ins_pipe(pipe_slow);
 9087 %}
 9088 
 9089 // This pattern is generated automatically from cas.m4.
 9090 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9091 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9092   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 9093   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9094   ins_cost(VOLATILE_REF_COST);
 9095   effect(KILL cr);
 9096   format %{
 9097     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9098     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9099   %}
 9100   ins_encode %{
 9101     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9102                Assembler::word, /*acquire*/ true, /*release*/ true,
 9103                /*weak*/ true, noreg);
 9104     __ csetw($res$$Register, Assembler::EQ);
 9105   %}
 9106   ins_pipe(pipe_slow);
 9107 %}
 9108 
 9109 // This pattern is generated automatically from cas.m4.
 9110 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9111 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9112   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9113   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9114   ins_cost(VOLATILE_REF_COST);
 9115   effect(KILL cr);
 9116   format %{
 9117     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9118     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9119   %}
 9120   ins_encode %{
 9121     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9122                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9123                /*weak*/ true, noreg);
 9124     __ csetw($res$$Register, Assembler::EQ);
 9125   %}
 9126   ins_pipe(pipe_slow);
 9127 %}
 9128 
 9129 // END This section of the file is automatically generated. Do not edit --------------
 9130 // ---------------------------------------------------------------------
 9131 
 9132 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9133   match(Set prev (GetAndSetI mem newv));
 9134   ins_cost(2 * VOLATILE_REF_COST);
 9135   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9136   ins_encode %{
 9137     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9138   %}
 9139   ins_pipe(pipe_serial);
 9140 %}
 9141 
 9142 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9143   match(Set prev (GetAndSetL mem newv));
 9144   ins_cost(2 * VOLATILE_REF_COST);
 9145   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9146   ins_encode %{
 9147     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9148   %}
 9149   ins_pipe(pipe_serial);
 9150 %}
 9151 
 9152 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9153   predicate(n->as_LoadStore()->barrier_data() == 0);
 9154   match(Set prev (GetAndSetN mem newv));
 9155   ins_cost(2 * VOLATILE_REF_COST);
 9156   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9157   ins_encode %{
 9158     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9159   %}
 9160   ins_pipe(pipe_serial);
 9161 %}
 9162 
 9163 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9164   predicate(n->as_LoadStore()->barrier_data() == 0);
 9165   match(Set prev (GetAndSetP mem newv));
 9166   ins_cost(2 * VOLATILE_REF_COST);
 9167   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9168   ins_encode %{
 9169     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9170   %}
 9171   ins_pipe(pipe_serial);
 9172 %}
 9173 
 9174 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9175   predicate(needs_acquiring_load_exclusive(n));
 9176   match(Set prev (GetAndSetI mem newv));
 9177   ins_cost(VOLATILE_REF_COST);
 9178   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9179   ins_encode %{
 9180     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9181   %}
 9182   ins_pipe(pipe_serial);
 9183 %}
 9184 
 9185 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9186   predicate(needs_acquiring_load_exclusive(n));
 9187   match(Set prev (GetAndSetL mem newv));
 9188   ins_cost(VOLATILE_REF_COST);
 9189   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9190   ins_encode %{
 9191     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9192   %}
 9193   ins_pipe(pipe_serial);
 9194 %}
 9195 
 9196 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9197   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 9198   match(Set prev (GetAndSetN mem newv));
 9199   ins_cost(VOLATILE_REF_COST);
 9200   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9201   ins_encode %{
 9202     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9203   %}
 9204   ins_pipe(pipe_serial);
 9205 %}
 9206 
 9207 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9208   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9209   match(Set prev (GetAndSetP mem newv));
 9210   ins_cost(VOLATILE_REF_COST);
 9211   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9212   ins_encode %{
 9213     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9214   %}
 9215   ins_pipe(pipe_serial);
 9216 %}
 9217 
 9218 
 9219 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9220   match(Set newval (GetAndAddL mem incr));
 9221   ins_cost(2 * VOLATILE_REF_COST + 1);
 9222   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9223   ins_encode %{
 9224     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9225   %}
 9226   ins_pipe(pipe_serial);
 9227 %}
 9228 
 9229 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9230   predicate(n->as_LoadStore()->result_not_used());
 9231   match(Set dummy (GetAndAddL mem incr));
 9232   ins_cost(2 * VOLATILE_REF_COST);
 9233   format %{ "get_and_addL [$mem], $incr" %}
 9234   ins_encode %{
 9235     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9236   %}
 9237   ins_pipe(pipe_serial);
 9238 %}
 9239 
 9240 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9241   match(Set newval (GetAndAddL mem incr));
 9242   ins_cost(2 * VOLATILE_REF_COST + 1);
 9243   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9244   ins_encode %{
 9245     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9246   %}
 9247   ins_pipe(pipe_serial);
 9248 %}
 9249 
 9250 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9251   predicate(n->as_LoadStore()->result_not_used());
 9252   match(Set dummy (GetAndAddL mem incr));
 9253   ins_cost(2 * VOLATILE_REF_COST);
 9254   format %{ "get_and_addL [$mem], $incr" %}
 9255   ins_encode %{
 9256     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9257   %}
 9258   ins_pipe(pipe_serial);
 9259 %}
 9260 
 9261 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9262   match(Set newval (GetAndAddI mem incr));
 9263   ins_cost(2 * VOLATILE_REF_COST + 1);
 9264   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9265   ins_encode %{
 9266     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9267   %}
 9268   ins_pipe(pipe_serial);
 9269 %}
 9270 
 9271 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9272   predicate(n->as_LoadStore()->result_not_used());
 9273   match(Set dummy (GetAndAddI mem incr));
 9274   ins_cost(2 * VOLATILE_REF_COST);
 9275   format %{ "get_and_addI [$mem], $incr" %}
 9276   ins_encode %{
 9277     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9278   %}
 9279   ins_pipe(pipe_serial);
 9280 %}
 9281 
 9282 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9283   match(Set newval (GetAndAddI mem incr));
 9284   ins_cost(2 * VOLATILE_REF_COST + 1);
 9285   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9286   ins_encode %{
 9287     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9288   %}
 9289   ins_pipe(pipe_serial);
 9290 %}
 9291 
 9292 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9293   predicate(n->as_LoadStore()->result_not_used());
 9294   match(Set dummy (GetAndAddI mem incr));
 9295   ins_cost(2 * VOLATILE_REF_COST);
 9296   format %{ "get_and_addI [$mem], $incr" %}
 9297   ins_encode %{
 9298     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9299   %}
 9300   ins_pipe(pipe_serial);
 9301 %}
 9302 
 9303 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9304   predicate(needs_acquiring_load_exclusive(n));
 9305   match(Set newval (GetAndAddL mem incr));
 9306   ins_cost(VOLATILE_REF_COST + 1);
 9307   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9308   ins_encode %{
 9309     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9310   %}
 9311   ins_pipe(pipe_serial);
 9312 %}
 9313 
 9314 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9315   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9316   match(Set dummy (GetAndAddL mem incr));
 9317   ins_cost(VOLATILE_REF_COST);
 9318   format %{ "get_and_addL_acq [$mem], $incr" %}
 9319   ins_encode %{
 9320     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9321   %}
 9322   ins_pipe(pipe_serial);
 9323 %}
 9324 
 9325 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9326   predicate(needs_acquiring_load_exclusive(n));
 9327   match(Set newval (GetAndAddL mem incr));
 9328   ins_cost(VOLATILE_REF_COST + 1);
 9329   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9330   ins_encode %{
 9331     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9332   %}
 9333   ins_pipe(pipe_serial);
 9334 %}
 9335 
 9336 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9337   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9338   match(Set dummy (GetAndAddL mem incr));
 9339   ins_cost(VOLATILE_REF_COST);
 9340   format %{ "get_and_addL_acq [$mem], $incr" %}
 9341   ins_encode %{
 9342     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9343   %}
 9344   ins_pipe(pipe_serial);
 9345 %}
 9346 
 9347 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9348   predicate(needs_acquiring_load_exclusive(n));
 9349   match(Set newval (GetAndAddI mem incr));
 9350   ins_cost(VOLATILE_REF_COST + 1);
 9351   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9352   ins_encode %{
 9353     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9354   %}
 9355   ins_pipe(pipe_serial);
 9356 %}
 9357 
 9358 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9359   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9360   match(Set dummy (GetAndAddI mem incr));
 9361   ins_cost(VOLATILE_REF_COST);
 9362   format %{ "get_and_addI_acq [$mem], $incr" %}
 9363   ins_encode %{
 9364     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9365   %}
 9366   ins_pipe(pipe_serial);
 9367 %}
 9368 
 9369 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9370   predicate(needs_acquiring_load_exclusive(n));
 9371   match(Set newval (GetAndAddI mem incr));
 9372   ins_cost(VOLATILE_REF_COST + 1);
 9373   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9374   ins_encode %{
 9375     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9376   %}
 9377   ins_pipe(pipe_serial);
 9378 %}
 9379 
 9380 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9381   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9382   match(Set dummy (GetAndAddI mem incr));
 9383   ins_cost(VOLATILE_REF_COST);
 9384   format %{ "get_and_addI_acq [$mem], $incr" %}
 9385   ins_encode %{
 9386     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9387   %}
 9388   ins_pipe(pipe_serial);
 9389 %}
 9390 
 9391 // Manifest a CmpU result in an integer register.
 9392 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9393 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 9394 %{
 9395   match(Set dst (CmpU3 src1 src2));
 9396   effect(KILL flags);
 9397 
 9398   ins_cost(INSN_COST * 3);
 9399   format %{
 9400       "cmpw $src1, $src2\n\t"
 9401       "csetw $dst, ne\n\t"
 9402       "cnegw $dst, lo\t# CmpU3(reg)"
 9403   %}
 9404   ins_encode %{
 9405     __ cmpw($src1$$Register, $src2$$Register);
 9406     __ csetw($dst$$Register, Assembler::NE);
 9407     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9408   %}
 9409 
 9410   ins_pipe(pipe_class_default);
 9411 %}
 9412 
 9413 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 9414 %{
 9415   match(Set dst (CmpU3 src1 src2));
 9416   effect(KILL flags);
 9417 
 9418   ins_cost(INSN_COST * 3);
 9419   format %{
 9420       "subsw zr, $src1, $src2\n\t"
 9421       "csetw $dst, ne\n\t"
 9422       "cnegw $dst, lo\t# CmpU3(imm)"
 9423   %}
 9424   ins_encode %{
 9425     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 9426     __ csetw($dst$$Register, Assembler::NE);
 9427     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9428   %}
 9429 
 9430   ins_pipe(pipe_class_default);
 9431 %}
 9432 
 9433 // Manifest a CmpUL result in an integer register.
 9434 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9435 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9436 %{
 9437   match(Set dst (CmpUL3 src1 src2));
 9438   effect(KILL flags);
 9439 
 9440   ins_cost(INSN_COST * 3);
 9441   format %{
 9442       "cmp $src1, $src2\n\t"
 9443       "csetw $dst, ne\n\t"
 9444       "cnegw $dst, lo\t# CmpUL3(reg)"
 9445   %}
 9446   ins_encode %{
 9447     __ cmp($src1$$Register, $src2$$Register);
 9448     __ csetw($dst$$Register, Assembler::NE);
 9449     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9450   %}
 9451 
 9452   ins_pipe(pipe_class_default);
 9453 %}
 9454 
 9455 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9456 %{
 9457   match(Set dst (CmpUL3 src1 src2));
 9458   effect(KILL flags);
 9459 
 9460   ins_cost(INSN_COST * 3);
 9461   format %{
 9462       "subs zr, $src1, $src2\n\t"
 9463       "csetw $dst, ne\n\t"
 9464       "cnegw $dst, lo\t# CmpUL3(imm)"
 9465   %}
 9466   ins_encode %{
 9467     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9468     __ csetw($dst$$Register, Assembler::NE);
 9469     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9470   %}
 9471 
 9472   ins_pipe(pipe_class_default);
 9473 %}
 9474 
 9475 // Manifest a CmpL result in an integer register.
 9476 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9477 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9478 %{
 9479   match(Set dst (CmpL3 src1 src2));
 9480   effect(KILL flags);
 9481 
 9482   ins_cost(INSN_COST * 3);
 9483   format %{
 9484       "cmp $src1, $src2\n\t"
 9485       "csetw $dst, ne\n\t"
 9486       "cnegw $dst, lt\t# CmpL3(reg)"
 9487   %}
 9488   ins_encode %{
 9489     __ cmp($src1$$Register, $src2$$Register);
 9490     __ csetw($dst$$Register, Assembler::NE);
 9491     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9492   %}
 9493 
 9494   ins_pipe(pipe_class_default);
 9495 %}
 9496 
 9497 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9498 %{
 9499   match(Set dst (CmpL3 src1 src2));
 9500   effect(KILL flags);
 9501 
 9502   ins_cost(INSN_COST * 3);
 9503   format %{
 9504       "subs zr, $src1, $src2\n\t"
 9505       "csetw $dst, ne\n\t"
 9506       "cnegw $dst, lt\t# CmpL3(imm)"
 9507   %}
 9508   ins_encode %{
 9509     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9510     __ csetw($dst$$Register, Assembler::NE);
 9511     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9512   %}
 9513 
 9514   ins_pipe(pipe_class_default);
 9515 %}
 9516 
 9517 // ============================================================================
 9518 // Conditional Move Instructions
 9519 
 9520 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9521 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9522 // define an op class which merged both inputs and use it to type the
 9523 // argument to a single rule. unfortunatelyt his fails because the
 9524 // opclass does not live up to the COND_INTER interface of its
 9525 // component operands. When the generic code tries to negate the
 9526 // operand it ends up running the generci Machoper::negate method
 9527 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9528 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9529 
 9530 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9531   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9532 
 9533   ins_cost(INSN_COST * 2);
 9534   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9535 
 9536   ins_encode %{
 9537     __ cselw(as_Register($dst$$reg),
 9538              as_Register($src2$$reg),
 9539              as_Register($src1$$reg),
 9540              (Assembler::Condition)$cmp$$cmpcode);
 9541   %}
 9542 
 9543   ins_pipe(icond_reg_reg);
 9544 %}
 9545 
 9546 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9547   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9548 
 9549   ins_cost(INSN_COST * 2);
 9550   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9551 
 9552   ins_encode %{
 9553     __ cselw(as_Register($dst$$reg),
 9554              as_Register($src2$$reg),
 9555              as_Register($src1$$reg),
 9556              (Assembler::Condition)$cmp$$cmpcode);
 9557   %}
 9558 
 9559   ins_pipe(icond_reg_reg);
 9560 %}
 9561 
 9562 // special cases where one arg is zero
 9563 
 9564 // n.b. this is selected in preference to the rule above because it
 9565 // avoids loading constant 0 into a source register
 9566 
 9567 // TODO
 9568 // we ought only to be able to cull one of these variants as the ideal
 9569 // transforms ought always to order the zero consistently (to left/right?)
 9570 
 9571 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9572   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9573 
 9574   ins_cost(INSN_COST * 2);
 9575   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9576 
 9577   ins_encode %{
 9578     __ cselw(as_Register($dst$$reg),
 9579              as_Register($src$$reg),
 9580              zr,
 9581              (Assembler::Condition)$cmp$$cmpcode);
 9582   %}
 9583 
 9584   ins_pipe(icond_reg);
 9585 %}
 9586 
 9587 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9588   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9589 
 9590   ins_cost(INSN_COST * 2);
 9591   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9592 
 9593   ins_encode %{
 9594     __ cselw(as_Register($dst$$reg),
 9595              as_Register($src$$reg),
 9596              zr,
 9597              (Assembler::Condition)$cmp$$cmpcode);
 9598   %}
 9599 
 9600   ins_pipe(icond_reg);
 9601 %}
 9602 
 9603 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9604   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9605 
 9606   ins_cost(INSN_COST * 2);
 9607   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9608 
 9609   ins_encode %{
 9610     __ cselw(as_Register($dst$$reg),
 9611              zr,
 9612              as_Register($src$$reg),
 9613              (Assembler::Condition)$cmp$$cmpcode);
 9614   %}
 9615 
 9616   ins_pipe(icond_reg);
 9617 %}
 9618 
 9619 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9620   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9621 
 9622   ins_cost(INSN_COST * 2);
 9623   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9624 
 9625   ins_encode %{
 9626     __ cselw(as_Register($dst$$reg),
 9627              zr,
 9628              as_Register($src$$reg),
 9629              (Assembler::Condition)$cmp$$cmpcode);
 9630   %}
 9631 
 9632   ins_pipe(icond_reg);
 9633 %}
 9634 
 9635 // special case for creating a boolean 0 or 1
 9636 
 9637 // n.b. this is selected in preference to the rule above because it
 9638 // avoids loading constants 0 and 1 into a source register
 9639 
 9640 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9641   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9642 
 9643   ins_cost(INSN_COST * 2);
 9644   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9645 
 9646   ins_encode %{
 9647     // equivalently
 9648     // cset(as_Register($dst$$reg),
 9649     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9650     __ csincw(as_Register($dst$$reg),
 9651              zr,
 9652              zr,
 9653              (Assembler::Condition)$cmp$$cmpcode);
 9654   %}
 9655 
 9656   ins_pipe(icond_none);
 9657 %}
 9658 
 9659 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9660   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9661 
 9662   ins_cost(INSN_COST * 2);
 9663   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9664 
 9665   ins_encode %{
 9666     // equivalently
 9667     // cset(as_Register($dst$$reg),
 9668     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9669     __ csincw(as_Register($dst$$reg),
 9670              zr,
 9671              zr,
 9672              (Assembler::Condition)$cmp$$cmpcode);
 9673   %}
 9674 
 9675   ins_pipe(icond_none);
 9676 %}
 9677 
 9678 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9679   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9680 
 9681   ins_cost(INSN_COST * 2);
 9682   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9683 
 9684   ins_encode %{
 9685     __ csel(as_Register($dst$$reg),
 9686             as_Register($src2$$reg),
 9687             as_Register($src1$$reg),
 9688             (Assembler::Condition)$cmp$$cmpcode);
 9689   %}
 9690 
 9691   ins_pipe(icond_reg_reg);
 9692 %}
 9693 
 9694 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9695   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9696 
 9697   ins_cost(INSN_COST * 2);
 9698   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9699 
 9700   ins_encode %{
 9701     __ csel(as_Register($dst$$reg),
 9702             as_Register($src2$$reg),
 9703             as_Register($src1$$reg),
 9704             (Assembler::Condition)$cmp$$cmpcode);
 9705   %}
 9706 
 9707   ins_pipe(icond_reg_reg);
 9708 %}
 9709 
 9710 // special cases where one arg is zero
 9711 
 9712 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9713   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9714 
 9715   ins_cost(INSN_COST * 2);
 9716   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 9717 
 9718   ins_encode %{
 9719     __ csel(as_Register($dst$$reg),
 9720             zr,
 9721             as_Register($src$$reg),
 9722             (Assembler::Condition)$cmp$$cmpcode);
 9723   %}
 9724 
 9725   ins_pipe(icond_reg);
 9726 %}
 9727 
 9728 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9729   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9730 
 9731   ins_cost(INSN_COST * 2);
 9732   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 9733 
 9734   ins_encode %{
 9735     __ csel(as_Register($dst$$reg),
 9736             zr,
 9737             as_Register($src$$reg),
 9738             (Assembler::Condition)$cmp$$cmpcode);
 9739   %}
 9740 
 9741   ins_pipe(icond_reg);
 9742 %}
 9743 
 9744 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9745   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9746 
 9747   ins_cost(INSN_COST * 2);
 9748   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 9749 
 9750   ins_encode %{
 9751     __ csel(as_Register($dst$$reg),
 9752             as_Register($src$$reg),
 9753             zr,
 9754             (Assembler::Condition)$cmp$$cmpcode);
 9755   %}
 9756 
 9757   ins_pipe(icond_reg);
 9758 %}
 9759 
 9760 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9761   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9762 
 9763   ins_cost(INSN_COST * 2);
 9764   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 9765 
 9766   ins_encode %{
 9767     __ csel(as_Register($dst$$reg),
 9768             as_Register($src$$reg),
 9769             zr,
 9770             (Assembler::Condition)$cmp$$cmpcode);
 9771   %}
 9772 
 9773   ins_pipe(icond_reg);
 9774 %}
 9775 
 9776 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9777   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9778 
 9779   ins_cost(INSN_COST * 2);
 9780   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
 9781 
 9782   ins_encode %{
 9783     __ csel(as_Register($dst$$reg),
 9784             as_Register($src2$$reg),
 9785             as_Register($src1$$reg),
 9786             (Assembler::Condition)$cmp$$cmpcode);
 9787   %}
 9788 
 9789   ins_pipe(icond_reg_reg);
 9790 %}
 9791 
 9792 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9793   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9794 
 9795   ins_cost(INSN_COST * 2);
 9796   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
 9797 
 9798   ins_encode %{
 9799     __ csel(as_Register($dst$$reg),
 9800             as_Register($src2$$reg),
 9801             as_Register($src1$$reg),
 9802             (Assembler::Condition)$cmp$$cmpcode);
 9803   %}
 9804 
 9805   ins_pipe(icond_reg_reg);
 9806 %}
 9807 
 9808 // special cases where one arg is zero
 9809 
 9810 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9811   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9812 
 9813   ins_cost(INSN_COST * 2);
 9814   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
 9815 
 9816   ins_encode %{
 9817     __ csel(as_Register($dst$$reg),
 9818             zr,
 9819             as_Register($src$$reg),
 9820             (Assembler::Condition)$cmp$$cmpcode);
 9821   %}
 9822 
 9823   ins_pipe(icond_reg);
 9824 %}
 9825 
 9826 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9827   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9828 
 9829   ins_cost(INSN_COST * 2);
 9830   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
 9831 
 9832   ins_encode %{
 9833     __ csel(as_Register($dst$$reg),
 9834             zr,
 9835             as_Register($src$$reg),
 9836             (Assembler::Condition)$cmp$$cmpcode);
 9837   %}
 9838 
 9839   ins_pipe(icond_reg);
 9840 %}
 9841 
 9842 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9843   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9844 
 9845   ins_cost(INSN_COST * 2);
 9846   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
 9847 
 9848   ins_encode %{
 9849     __ csel(as_Register($dst$$reg),
 9850             as_Register($src$$reg),
 9851             zr,
 9852             (Assembler::Condition)$cmp$$cmpcode);
 9853   %}
 9854 
 9855   ins_pipe(icond_reg);
 9856 %}
 9857 
 9858 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9859   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9860 
 9861   ins_cost(INSN_COST * 2);
 9862   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
 9863 
 9864   ins_encode %{
 9865     __ csel(as_Register($dst$$reg),
 9866             as_Register($src$$reg),
 9867             zr,
 9868             (Assembler::Condition)$cmp$$cmpcode);
 9869   %}
 9870 
 9871   ins_pipe(icond_reg);
 9872 %}
 9873 
 9874 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9875   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9876 
 9877   ins_cost(INSN_COST * 2);
 9878   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9879 
 9880   ins_encode %{
 9881     __ cselw(as_Register($dst$$reg),
 9882              as_Register($src2$$reg),
 9883              as_Register($src1$$reg),
 9884              (Assembler::Condition)$cmp$$cmpcode);
 9885   %}
 9886 
 9887   ins_pipe(icond_reg_reg);
 9888 %}
 9889 
 9890 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9891   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9892 
 9893   ins_cost(INSN_COST * 2);
 9894   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9895 
 9896   ins_encode %{
 9897     __ cselw(as_Register($dst$$reg),
 9898              as_Register($src2$$reg),
 9899              as_Register($src1$$reg),
 9900              (Assembler::Condition)$cmp$$cmpcode);
 9901   %}
 9902 
 9903   ins_pipe(icond_reg_reg);
 9904 %}
 9905 
 9906 // special cases where one arg is zero
 9907 
 9908 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9909   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9910 
 9911   ins_cost(INSN_COST * 2);
 9912   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
 9913 
 9914   ins_encode %{
 9915     __ cselw(as_Register($dst$$reg),
 9916              zr,
 9917              as_Register($src$$reg),
 9918              (Assembler::Condition)$cmp$$cmpcode);
 9919   %}
 9920 
 9921   ins_pipe(icond_reg);
 9922 %}
 9923 
 9924 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9925   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9926 
 9927   ins_cost(INSN_COST * 2);
 9928   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
 9929 
 9930   ins_encode %{
 9931     __ cselw(as_Register($dst$$reg),
 9932              zr,
 9933              as_Register($src$$reg),
 9934              (Assembler::Condition)$cmp$$cmpcode);
 9935   %}
 9936 
 9937   ins_pipe(icond_reg);
 9938 %}
 9939 
 9940 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9941   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9942 
 9943   ins_cost(INSN_COST * 2);
 9944   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
 9945 
 9946   ins_encode %{
 9947     __ cselw(as_Register($dst$$reg),
 9948              as_Register($src$$reg),
 9949              zr,
 9950              (Assembler::Condition)$cmp$$cmpcode);
 9951   %}
 9952 
 9953   ins_pipe(icond_reg);
 9954 %}
 9955 
 9956 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9957   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9958 
 9959   ins_cost(INSN_COST * 2);
 9960   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
 9961 
 9962   ins_encode %{
 9963     __ cselw(as_Register($dst$$reg),
 9964              as_Register($src$$reg),
 9965              zr,
 9966              (Assembler::Condition)$cmp$$cmpcode);
 9967   %}
 9968 
 9969   ins_pipe(icond_reg);
 9970 %}
 9971 
 9972 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
 9973 %{
 9974   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9975 
 9976   ins_cost(INSN_COST * 3);
 9977 
 9978   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9979   ins_encode %{
 9980     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9981     __ fcsels(as_FloatRegister($dst$$reg),
 9982               as_FloatRegister($src2$$reg),
 9983               as_FloatRegister($src1$$reg),
 9984               cond);
 9985   %}
 9986 
 9987   ins_pipe(fp_cond_reg_reg_s);
 9988 %}
 9989 
 9990 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
 9991 %{
 9992   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9993 
 9994   ins_cost(INSN_COST * 3);
 9995 
 9996   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 9997   ins_encode %{
 9998     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9999     __ fcsels(as_FloatRegister($dst$$reg),
10000               as_FloatRegister($src2$$reg),
10001               as_FloatRegister($src1$$reg),
10002               cond);
10003   %}
10004 
10005   ins_pipe(fp_cond_reg_reg_s);
10006 %}
10007 
10008 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10009 %{
10010   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10011 
10012   ins_cost(INSN_COST * 3);
10013 
10014   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10015   ins_encode %{
10016     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10017     __ fcseld(as_FloatRegister($dst$$reg),
10018               as_FloatRegister($src2$$reg),
10019               as_FloatRegister($src1$$reg),
10020               cond);
10021   %}
10022 
10023   ins_pipe(fp_cond_reg_reg_d);
10024 %}
10025 
10026 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10027 %{
10028   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10029 
10030   ins_cost(INSN_COST * 3);
10031 
10032   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10033   ins_encode %{
10034     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10035     __ fcseld(as_FloatRegister($dst$$reg),
10036               as_FloatRegister($src2$$reg),
10037               as_FloatRegister($src1$$reg),
10038               cond);
10039   %}
10040 
10041   ins_pipe(fp_cond_reg_reg_d);
10042 %}
10043 
10044 // ============================================================================
10045 // Arithmetic Instructions
10046 //
10047 
10048 // Integer Addition
10049 
10050 // TODO
10051 // these currently employ operations which do not set CR and hence are
10052 // not flagged as killing CR but we would like to isolate the cases
10053 // where we want to set flags from those where we don't. need to work
10054 // out how to do that.
10055 
10056 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10057   match(Set dst (AddI src1 src2));
10058 
10059   ins_cost(INSN_COST);
10060   format %{ "addw  $dst, $src1, $src2" %}
10061 
10062   ins_encode %{
10063     __ addw(as_Register($dst$$reg),
10064             as_Register($src1$$reg),
10065             as_Register($src2$$reg));
10066   %}
10067 
10068   ins_pipe(ialu_reg_reg);
10069 %}
10070 
10071 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10072   match(Set dst (AddI src1 src2));
10073 
10074   ins_cost(INSN_COST);
10075   format %{ "addw $dst, $src1, $src2" %}
10076 
10077   // use opcode to indicate that this is an add not a sub
10078   opcode(0x0);
10079 
10080   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10081 
10082   ins_pipe(ialu_reg_imm);
10083 %}
10084 
10085 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10086   match(Set dst (AddI (ConvL2I src1) src2));
10087 
10088   ins_cost(INSN_COST);
10089   format %{ "addw $dst, $src1, $src2" %}
10090 
10091   // use opcode to indicate that this is an add not a sub
10092   opcode(0x0);
10093 
10094   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10095 
10096   ins_pipe(ialu_reg_imm);
10097 %}
10098 
10099 // Pointer Addition
10100 instruct addP_reg_reg(iRegPNoSp dst, iRegPorL2P src1, iRegL src2) %{
10101   match(Set dst (AddP src1 src2));
10102 
10103   ins_cost(INSN_COST);
10104   format %{ "add $dst, $src1, $src2\t# ptr" %}
10105 
10106   ins_encode %{
10107     __ add(as_Register($dst$$reg),
10108            as_Register($src1$$reg),
10109            as_Register($src2$$reg));
10110   %}
10111 
10112   ins_pipe(ialu_reg_reg);
10113 %}
10114 
10115 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2) %{
10116   match(Set dst (AddP src1 (ConvI2L src2)));
10117 
10118   ins_cost(1.9 * INSN_COST);
10119   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10120 
10121   ins_encode %{
10122     __ add(as_Register($dst$$reg),
10123            as_Register($src1$$reg),
10124            as_Register($src2$$reg), ext::sxtw);
10125   %}
10126 
10127   ins_pipe(ialu_reg_reg);
10128 %}
10129 
10130 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegPorL2P src1, iRegL src2, immIScale scale) %{
10131   match(Set dst (AddP src1 (LShiftL src2 scale)));
10132 
10133   ins_cost(1.9 * INSN_COST);
10134   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10135 
10136   ins_encode %{
10137     __ lea(as_Register($dst$$reg),
10138            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10139                    Address::lsl($scale$$constant)));
10140   %}
10141 
10142   ins_pipe(ialu_reg_reg_shift);
10143 %}
10144 
10145 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2, immIScale scale) %{
10146   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10147 
10148   ins_cost(1.9 * INSN_COST);
10149   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10150 
10151   ins_encode %{
10152     __ lea(as_Register($dst$$reg),
10153            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10154                    Address::sxtw($scale$$constant)));
10155   %}
10156 
10157   ins_pipe(ialu_reg_reg_shift);
10158 %}
10159 
10160 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10161   match(Set dst (LShiftL (ConvI2L src) scale));
10162 
10163   ins_cost(INSN_COST);
10164   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10165 
10166   ins_encode %{
10167     __ sbfiz(as_Register($dst$$reg),
10168           as_Register($src$$reg),
10169           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10170   %}
10171 
10172   ins_pipe(ialu_reg_shift);
10173 %}
10174 
10175 // Pointer Immediate Addition
10176 // n.b. this needs to be more expensive than using an indirect memory
10177 // operand
10178 instruct addP_reg_imm(iRegPNoSp dst, iRegPorL2P src1, immLAddSub src2) %{
10179   match(Set dst (AddP src1 src2));
10180 
10181   ins_cost(INSN_COST);
10182   format %{ "add $dst, $src1, $src2\t# ptr" %}
10183 
10184   // use opcode to indicate that this is an add not a sub
10185   opcode(0x0);
10186 
10187   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10188 
10189   ins_pipe(ialu_reg_imm);
10190 %}
10191 
10192 // Long Addition
10193 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10194 
10195   match(Set dst (AddL src1 src2));
10196 
10197   ins_cost(INSN_COST);
10198   format %{ "add  $dst, $src1, $src2" %}
10199 
10200   ins_encode %{
10201     __ add(as_Register($dst$$reg),
10202            as_Register($src1$$reg),
10203            as_Register($src2$$reg));
10204   %}
10205 
10206   ins_pipe(ialu_reg_reg);
10207 %}
10208 
10209 // No constant pool entries requiredLong Immediate Addition.
10210 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10211   match(Set dst (AddL src1 src2));
10212 
10213   ins_cost(INSN_COST);
10214   format %{ "add $dst, $src1, $src2" %}
10215 
10216   // use opcode to indicate that this is an add not a sub
10217   opcode(0x0);
10218 
10219   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10220 
10221   ins_pipe(ialu_reg_imm);
10222 %}
10223 
10224 // Integer Subtraction
10225 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10226   match(Set dst (SubI src1 src2));
10227 
10228   ins_cost(INSN_COST);
10229   format %{ "subw  $dst, $src1, $src2" %}
10230 
10231   ins_encode %{
10232     __ subw(as_Register($dst$$reg),
10233             as_Register($src1$$reg),
10234             as_Register($src2$$reg));
10235   %}
10236 
10237   ins_pipe(ialu_reg_reg);
10238 %}
10239 
10240 // Immediate Subtraction
10241 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10242   match(Set dst (SubI src1 src2));
10243 
10244   ins_cost(INSN_COST);
10245   format %{ "subw $dst, $src1, $src2" %}
10246 
10247   // use opcode to indicate that this is a sub not an add
10248   opcode(0x1);
10249 
10250   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10251 
10252   ins_pipe(ialu_reg_imm);
10253 %}
10254 
10255 // Long Subtraction
10256 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10257 
10258   match(Set dst (SubL src1 src2));
10259 
10260   ins_cost(INSN_COST);
10261   format %{ "sub  $dst, $src1, $src2" %}
10262 
10263   ins_encode %{
10264     __ sub(as_Register($dst$$reg),
10265            as_Register($src1$$reg),
10266            as_Register($src2$$reg));
10267   %}
10268 
10269   ins_pipe(ialu_reg_reg);
10270 %}
10271 
10272 // No constant pool entries requiredLong Immediate Subtraction.
10273 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10274   match(Set dst (SubL src1 src2));
10275 
10276   ins_cost(INSN_COST);
10277   format %{ "sub$dst, $src1, $src2" %}
10278 
10279   // use opcode to indicate that this is a sub not an add
10280   opcode(0x1);
10281 
10282   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10283 
10284   ins_pipe(ialu_reg_imm);
10285 %}
10286 
10287 // Integer Negation (special case for sub)
10288 
10289 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10290   match(Set dst (SubI zero src));
10291 
10292   ins_cost(INSN_COST);
10293   format %{ "negw $dst, $src\t# int" %}
10294 
10295   ins_encode %{
10296     __ negw(as_Register($dst$$reg),
10297             as_Register($src$$reg));
10298   %}
10299 
10300   ins_pipe(ialu_reg);
10301 %}
10302 
10303 // Long Negation
10304 
10305 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10306   match(Set dst (SubL zero src));
10307 
10308   ins_cost(INSN_COST);
10309   format %{ "neg $dst, $src\t# long" %}
10310 
10311   ins_encode %{
10312     __ neg(as_Register($dst$$reg),
10313            as_Register($src$$reg));
10314   %}
10315 
10316   ins_pipe(ialu_reg);
10317 %}
10318 
10319 // Integer Multiply
10320 
10321 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10322   match(Set dst (MulI src1 src2));
10323 
10324   ins_cost(INSN_COST * 3);
10325   format %{ "mulw  $dst, $src1, $src2" %}
10326 
10327   ins_encode %{
10328     __ mulw(as_Register($dst$$reg),
10329             as_Register($src1$$reg),
10330             as_Register($src2$$reg));
10331   %}
10332 
10333   ins_pipe(imul_reg_reg);
10334 %}
10335 
10336 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10337   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10338 
10339   ins_cost(INSN_COST * 3);
10340   format %{ "smull  $dst, $src1, $src2" %}
10341 
10342   ins_encode %{
10343     __ smull(as_Register($dst$$reg),
10344              as_Register($src1$$reg),
10345              as_Register($src2$$reg));
10346   %}
10347 
10348   ins_pipe(imul_reg_reg);
10349 %}
10350 
10351 // Long Multiply
10352 
10353 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10354   match(Set dst (MulL src1 src2));
10355 
10356   ins_cost(INSN_COST * 5);
10357   format %{ "mul  $dst, $src1, $src2" %}
10358 
10359   ins_encode %{
10360     __ mul(as_Register($dst$$reg),
10361            as_Register($src1$$reg),
10362            as_Register($src2$$reg));
10363   %}
10364 
10365   ins_pipe(lmul_reg_reg);
10366 %}
10367 
10368 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10369 %{
10370   match(Set dst (MulHiL src1 src2));
10371 
10372   ins_cost(INSN_COST * 7);
10373   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
10374 
10375   ins_encode %{
10376     __ smulh(as_Register($dst$$reg),
10377              as_Register($src1$$reg),
10378              as_Register($src2$$reg));
10379   %}
10380 
10381   ins_pipe(lmul_reg_reg);
10382 %}
10383 
10384 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10385 %{
10386   match(Set dst (UMulHiL src1 src2));
10387 
10388   ins_cost(INSN_COST * 7);
10389   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
10390 
10391   ins_encode %{
10392     __ umulh(as_Register($dst$$reg),
10393              as_Register($src1$$reg),
10394              as_Register($src2$$reg));
10395   %}
10396 
10397   ins_pipe(lmul_reg_reg);
10398 %}
10399 
10400 // Combined Integer Multiply & Add/Sub
10401 
10402 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10403   match(Set dst (AddI src3 (MulI src1 src2)));
10404 
10405   ins_cost(INSN_COST * 3);
10406   format %{ "madd  $dst, $src1, $src2, $src3" %}
10407 
10408   ins_encode %{
10409     __ maddw(as_Register($dst$$reg),
10410              as_Register($src1$$reg),
10411              as_Register($src2$$reg),
10412              as_Register($src3$$reg));
10413   %}
10414 
10415   ins_pipe(imac_reg_reg);
10416 %}
10417 
10418 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10419   match(Set dst (SubI src3 (MulI src1 src2)));
10420 
10421   ins_cost(INSN_COST * 3);
10422   format %{ "msub  $dst, $src1, $src2, $src3" %}
10423 
10424   ins_encode %{
10425     __ msubw(as_Register($dst$$reg),
10426              as_Register($src1$$reg),
10427              as_Register($src2$$reg),
10428              as_Register($src3$$reg));
10429   %}
10430 
10431   ins_pipe(imac_reg_reg);
10432 %}
10433 
10434 // Combined Integer Multiply & Neg
10435 
10436 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10437   match(Set dst (MulI (SubI zero src1) src2));
10438 
10439   ins_cost(INSN_COST * 3);
10440   format %{ "mneg  $dst, $src1, $src2" %}
10441 
10442   ins_encode %{
10443     __ mnegw(as_Register($dst$$reg),
10444              as_Register($src1$$reg),
10445              as_Register($src2$$reg));
10446   %}
10447 
10448   ins_pipe(imac_reg_reg);
10449 %}
10450 
10451 // Combined Long Multiply & Add/Sub
10452 
10453 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10454   match(Set dst (AddL src3 (MulL src1 src2)));
10455 
10456   ins_cost(INSN_COST * 5);
10457   format %{ "madd  $dst, $src1, $src2, $src3" %}
10458 
10459   ins_encode %{
10460     __ madd(as_Register($dst$$reg),
10461             as_Register($src1$$reg),
10462             as_Register($src2$$reg),
10463             as_Register($src3$$reg));
10464   %}
10465 
10466   ins_pipe(lmac_reg_reg);
10467 %}
10468 
10469 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10470   match(Set dst (SubL src3 (MulL src1 src2)));
10471 
10472   ins_cost(INSN_COST * 5);
10473   format %{ "msub  $dst, $src1, $src2, $src3" %}
10474 
10475   ins_encode %{
10476     __ msub(as_Register($dst$$reg),
10477             as_Register($src1$$reg),
10478             as_Register($src2$$reg),
10479             as_Register($src3$$reg));
10480   %}
10481 
10482   ins_pipe(lmac_reg_reg);
10483 %}
10484 
10485 // Combined Long Multiply & Neg
10486 
10487 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10488   match(Set dst (MulL (SubL zero src1) src2));
10489 
10490   ins_cost(INSN_COST * 5);
10491   format %{ "mneg  $dst, $src1, $src2" %}
10492 
10493   ins_encode %{
10494     __ mneg(as_Register($dst$$reg),
10495             as_Register($src1$$reg),
10496             as_Register($src2$$reg));
10497   %}
10498 
10499   ins_pipe(lmac_reg_reg);
10500 %}
10501 
10502 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10503 
10504 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10505   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10506 
10507   ins_cost(INSN_COST * 3);
10508   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10509 
10510   ins_encode %{
10511     __ smaddl(as_Register($dst$$reg),
10512               as_Register($src1$$reg),
10513               as_Register($src2$$reg),
10514               as_Register($src3$$reg));
10515   %}
10516 
10517   ins_pipe(imac_reg_reg);
10518 %}
10519 
10520 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10521   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10522 
10523   ins_cost(INSN_COST * 3);
10524   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10525 
10526   ins_encode %{
10527     __ smsubl(as_Register($dst$$reg),
10528               as_Register($src1$$reg),
10529               as_Register($src2$$reg),
10530               as_Register($src3$$reg));
10531   %}
10532 
10533   ins_pipe(imac_reg_reg);
10534 %}
10535 
10536 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10537   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10538 
10539   ins_cost(INSN_COST * 3);
10540   format %{ "smnegl  $dst, $src1, $src2" %}
10541 
10542   ins_encode %{
10543     __ smnegl(as_Register($dst$$reg),
10544               as_Register($src1$$reg),
10545               as_Register($src2$$reg));
10546   %}
10547 
10548   ins_pipe(imac_reg_reg);
10549 %}
10550 
10551 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10552 
10553 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10554   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10555 
10556   ins_cost(INSN_COST * 5);
10557   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10558             "maddw $dst, $src3, $src4, rscratch1" %}
10559 
10560   ins_encode %{
10561     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10562     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10563 
10564   ins_pipe(imac_reg_reg);
10565 %}
10566 
10567 // Integer Divide
10568 
10569 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10570   match(Set dst (DivI src1 src2));
10571 
10572   ins_cost(INSN_COST * 19);
10573   format %{ "sdivw  $dst, $src1, $src2" %}
10574 
10575   ins_encode(aarch64_enc_divw(dst, src1, src2));
10576   ins_pipe(idiv_reg_reg);
10577 %}
10578 
10579 // Long Divide
10580 
10581 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10582   match(Set dst (DivL src1 src2));
10583 
10584   ins_cost(INSN_COST * 35);
10585   format %{ "sdiv   $dst, $src1, $src2" %}
10586 
10587   ins_encode(aarch64_enc_div(dst, src1, src2));
10588   ins_pipe(ldiv_reg_reg);
10589 %}
10590 
10591 // Integer Remainder
10592 
10593 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10594   match(Set dst (ModI src1 src2));
10595 
10596   ins_cost(INSN_COST * 22);
10597   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10598             "msubw  $dst, rscratch1, $src2, $src1" %}
10599 
10600   ins_encode(aarch64_enc_modw(dst, src1, src2));
10601   ins_pipe(idiv_reg_reg);
10602 %}
10603 
10604 // Long Remainder
10605 
10606 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10607   match(Set dst (ModL src1 src2));
10608 
10609   ins_cost(INSN_COST * 38);
10610   format %{ "sdiv   rscratch1, $src1, $src2\n"
10611             "msub   $dst, rscratch1, $src2, $src1" %}
10612 
10613   ins_encode(aarch64_enc_mod(dst, src1, src2));
10614   ins_pipe(ldiv_reg_reg);
10615 %}
10616 
10617 // Unsigned Integer Divide
10618 
10619 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10620   match(Set dst (UDivI src1 src2));
10621 
10622   ins_cost(INSN_COST * 19);
10623   format %{ "udivw  $dst, $src1, $src2" %}
10624 
10625   ins_encode %{
10626     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
10627   %}
10628 
10629   ins_pipe(idiv_reg_reg);
10630 %}
10631 
10632 //  Unsigned Long Divide
10633 
10634 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10635   match(Set dst (UDivL src1 src2));
10636 
10637   ins_cost(INSN_COST * 35);
10638   format %{ "udiv   $dst, $src1, $src2" %}
10639 
10640   ins_encode %{
10641     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
10642   %}
10643 
10644   ins_pipe(ldiv_reg_reg);
10645 %}
10646 
10647 // Unsigned Integer Remainder
10648 
10649 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10650   match(Set dst (UModI src1 src2));
10651 
10652   ins_cost(INSN_COST * 22);
10653   format %{ "udivw  rscratch1, $src1, $src2\n\t"
10654             "msubw  $dst, rscratch1, $src2, $src1" %}
10655 
10656   ins_encode %{
10657     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
10658     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10659   %}
10660 
10661   ins_pipe(idiv_reg_reg);
10662 %}
10663 
10664 // Unsigned Long Remainder
10665 
10666 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10667   match(Set dst (UModL src1 src2));
10668 
10669   ins_cost(INSN_COST * 38);
10670   format %{ "udiv   rscratch1, $src1, $src2\n"
10671             "msub   $dst, rscratch1, $src2, $src1" %}
10672 
10673   ins_encode %{
10674     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
10675     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10676   %}
10677 
10678   ins_pipe(ldiv_reg_reg);
10679 %}
10680 
10681 // Integer Shifts
10682 
10683 // Shift Left Register
10684 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10685   match(Set dst (LShiftI src1 src2));
10686 
10687   ins_cost(INSN_COST * 2);
10688   format %{ "lslvw  $dst, $src1, $src2" %}
10689 
10690   ins_encode %{
10691     __ lslvw(as_Register($dst$$reg),
10692              as_Register($src1$$reg),
10693              as_Register($src2$$reg));
10694   %}
10695 
10696   ins_pipe(ialu_reg_reg_vshift);
10697 %}
10698 
10699 // Shift Left Immediate
10700 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10701   match(Set dst (LShiftI src1 src2));
10702 
10703   ins_cost(INSN_COST);
10704   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10705 
10706   ins_encode %{
10707     __ lslw(as_Register($dst$$reg),
10708             as_Register($src1$$reg),
10709             $src2$$constant & 0x1f);
10710   %}
10711 
10712   ins_pipe(ialu_reg_shift);
10713 %}
10714 
10715 // Shift Right Logical Register
10716 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10717   match(Set dst (URShiftI src1 src2));
10718 
10719   ins_cost(INSN_COST * 2);
10720   format %{ "lsrvw  $dst, $src1, $src2" %}
10721 
10722   ins_encode %{
10723     __ lsrvw(as_Register($dst$$reg),
10724              as_Register($src1$$reg),
10725              as_Register($src2$$reg));
10726   %}
10727 
10728   ins_pipe(ialu_reg_reg_vshift);
10729 %}
10730 
10731 // Shift Right Logical Immediate
10732 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10733   match(Set dst (URShiftI src1 src2));
10734 
10735   ins_cost(INSN_COST);
10736   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10737 
10738   ins_encode %{
10739     __ lsrw(as_Register($dst$$reg),
10740             as_Register($src1$$reg),
10741             $src2$$constant & 0x1f);
10742   %}
10743 
10744   ins_pipe(ialu_reg_shift);
10745 %}
10746 
10747 // Shift Right Arithmetic Register
10748 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10749   match(Set dst (RShiftI src1 src2));
10750 
10751   ins_cost(INSN_COST * 2);
10752   format %{ "asrvw  $dst, $src1, $src2" %}
10753 
10754   ins_encode %{
10755     __ asrvw(as_Register($dst$$reg),
10756              as_Register($src1$$reg),
10757              as_Register($src2$$reg));
10758   %}
10759 
10760   ins_pipe(ialu_reg_reg_vshift);
10761 %}
10762 
10763 // Shift Right Arithmetic Immediate
10764 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10765   match(Set dst (RShiftI src1 src2));
10766 
10767   ins_cost(INSN_COST);
10768   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10769 
10770   ins_encode %{
10771     __ asrw(as_Register($dst$$reg),
10772             as_Register($src1$$reg),
10773             $src2$$constant & 0x1f);
10774   %}
10775 
10776   ins_pipe(ialu_reg_shift);
10777 %}
10778 
10779 // Combined Int Mask and Right Shift (using UBFM)
10780 // TODO
10781 
10782 // Long Shifts
10783 
10784 // Shift Left Register
10785 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10786   match(Set dst (LShiftL src1 src2));
10787 
10788   ins_cost(INSN_COST * 2);
10789   format %{ "lslv  $dst, $src1, $src2" %}
10790 
10791   ins_encode %{
10792     __ lslv(as_Register($dst$$reg),
10793             as_Register($src1$$reg),
10794             as_Register($src2$$reg));
10795   %}
10796 
10797   ins_pipe(ialu_reg_reg_vshift);
10798 %}
10799 
10800 // Shift Left Immediate
10801 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10802   match(Set dst (LShiftL src1 src2));
10803 
10804   ins_cost(INSN_COST);
10805   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10806 
10807   ins_encode %{
10808     __ lsl(as_Register($dst$$reg),
10809             as_Register($src1$$reg),
10810             $src2$$constant & 0x3f);
10811   %}
10812 
10813   ins_pipe(ialu_reg_shift);
10814 %}
10815 
10816 // Shift Right Logical Register
10817 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10818   match(Set dst (URShiftL src1 src2));
10819 
10820   ins_cost(INSN_COST * 2);
10821   format %{ "lsrv  $dst, $src1, $src2" %}
10822 
10823   ins_encode %{
10824     __ lsrv(as_Register($dst$$reg),
10825             as_Register($src1$$reg),
10826             as_Register($src2$$reg));
10827   %}
10828 
10829   ins_pipe(ialu_reg_reg_vshift);
10830 %}
10831 
10832 // Shift Right Logical Immediate
10833 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10834   match(Set dst (URShiftL src1 src2));
10835 
10836   ins_cost(INSN_COST);
10837   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10838 
10839   ins_encode %{
10840     __ lsr(as_Register($dst$$reg),
10841            as_Register($src1$$reg),
10842            $src2$$constant & 0x3f);
10843   %}
10844 
10845   ins_pipe(ialu_reg_shift);
10846 %}
10847 
10848 // A special-case pattern for card table stores.
10849 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10850   match(Set dst (URShiftL (CastP2X src1) src2));
10851 
10852   ins_cost(INSN_COST);
10853   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10854 
10855   ins_encode %{
10856     __ lsr(as_Register($dst$$reg),
10857            as_Register($src1$$reg),
10858            $src2$$constant & 0x3f);
10859   %}
10860 
10861   ins_pipe(ialu_reg_shift);
10862 %}
10863 
10864 // Shift Right Arithmetic Register
10865 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10866   match(Set dst (RShiftL src1 src2));
10867 
10868   ins_cost(INSN_COST * 2);
10869   format %{ "asrv  $dst, $src1, $src2" %}
10870 
10871   ins_encode %{
10872     __ asrv(as_Register($dst$$reg),
10873             as_Register($src1$$reg),
10874             as_Register($src2$$reg));
10875   %}
10876 
10877   ins_pipe(ialu_reg_reg_vshift);
10878 %}
10879 
10880 // Shift Right Arithmetic Immediate
10881 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10882   match(Set dst (RShiftL src1 src2));
10883 
10884   ins_cost(INSN_COST);
10885   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10886 
10887   ins_encode %{
10888     __ asr(as_Register($dst$$reg),
10889            as_Register($src1$$reg),
10890            $src2$$constant & 0x3f);
10891   %}
10892 
10893   ins_pipe(ialu_reg_shift);
10894 %}
10895 
10896 // BEGIN This section of the file is automatically generated. Do not edit --------------
10897 // This section is generated from aarch64_ad.m4
10898 
10899 // This pattern is automatically generated from aarch64_ad.m4
10900 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10901 instruct regL_not_reg(iRegLNoSp dst,
10902                          iRegL src1, immL_M1 m1,
10903                          rFlagsReg cr) %{
10904   match(Set dst (XorL src1 m1));
10905   ins_cost(INSN_COST);
10906   format %{ "eon  $dst, $src1, zr" %}
10907 
10908   ins_encode %{
10909     __ eon(as_Register($dst$$reg),
10910               as_Register($src1$$reg),
10911               zr,
10912               Assembler::LSL, 0);
10913   %}
10914 
10915   ins_pipe(ialu_reg);
10916 %}
10917 
10918 // This pattern is automatically generated from aarch64_ad.m4
10919 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10920 instruct regI_not_reg(iRegINoSp dst,
10921                          iRegIorL2I src1, immI_M1 m1,
10922                          rFlagsReg cr) %{
10923   match(Set dst (XorI src1 m1));
10924   ins_cost(INSN_COST);
10925   format %{ "eonw  $dst, $src1, zr" %}
10926 
10927   ins_encode %{
10928     __ eonw(as_Register($dst$$reg),
10929               as_Register($src1$$reg),
10930               zr,
10931               Assembler::LSL, 0);
10932   %}
10933 
10934   ins_pipe(ialu_reg);
10935 %}
10936 
10937 // This pattern is automatically generated from aarch64_ad.m4
10938 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10939 instruct NegI_reg_URShift_reg(iRegINoSp dst,
10940                               immI0 zero, iRegIorL2I src1, immI src2) %{
10941   match(Set dst (SubI zero (URShiftI src1 src2)));
10942 
10943   ins_cost(1.9 * INSN_COST);
10944   format %{ "negw  $dst, $src1, LSR $src2" %}
10945 
10946   ins_encode %{
10947     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10948             Assembler::LSR, $src2$$constant & 0x1f);
10949   %}
10950 
10951   ins_pipe(ialu_reg_shift);
10952 %}
10953 
10954 // This pattern is automatically generated from aarch64_ad.m4
10955 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10956 instruct NegI_reg_RShift_reg(iRegINoSp dst,
10957                               immI0 zero, iRegIorL2I src1, immI src2) %{
10958   match(Set dst (SubI zero (RShiftI src1 src2)));
10959 
10960   ins_cost(1.9 * INSN_COST);
10961   format %{ "negw  $dst, $src1, ASR $src2" %}
10962 
10963   ins_encode %{
10964     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10965             Assembler::ASR, $src2$$constant & 0x1f);
10966   %}
10967 
10968   ins_pipe(ialu_reg_shift);
10969 %}
10970 
10971 // This pattern is automatically generated from aarch64_ad.m4
10972 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10973 instruct NegI_reg_LShift_reg(iRegINoSp dst,
10974                               immI0 zero, iRegIorL2I src1, immI src2) %{
10975   match(Set dst (SubI zero (LShiftI src1 src2)));
10976 
10977   ins_cost(1.9 * INSN_COST);
10978   format %{ "negw  $dst, $src1, LSL $src2" %}
10979 
10980   ins_encode %{
10981     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10982             Assembler::LSL, $src2$$constant & 0x1f);
10983   %}
10984 
10985   ins_pipe(ialu_reg_shift);
10986 %}
10987 
10988 // This pattern is automatically generated from aarch64_ad.m4
10989 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10990 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
10991                               immL0 zero, iRegL src1, immI src2) %{
10992   match(Set dst (SubL zero (URShiftL src1 src2)));
10993 
10994   ins_cost(1.9 * INSN_COST);
10995   format %{ "neg  $dst, $src1, LSR $src2" %}
10996 
10997   ins_encode %{
10998     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10999             Assembler::LSR, $src2$$constant & 0x3f);
11000   %}
11001 
11002   ins_pipe(ialu_reg_shift);
11003 %}
11004 
11005 // This pattern is automatically generated from aarch64_ad.m4
11006 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11007 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
11008                               immL0 zero, iRegL src1, immI src2) %{
11009   match(Set dst (SubL zero (RShiftL src1 src2)));
11010 
11011   ins_cost(1.9 * INSN_COST);
11012   format %{ "neg  $dst, $src1, ASR $src2" %}
11013 
11014   ins_encode %{
11015     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11016             Assembler::ASR, $src2$$constant & 0x3f);
11017   %}
11018 
11019   ins_pipe(ialu_reg_shift);
11020 %}
11021 
11022 // This pattern is automatically generated from aarch64_ad.m4
11023 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11024 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
11025                               immL0 zero, iRegL src1, immI src2) %{
11026   match(Set dst (SubL zero (LShiftL src1 src2)));
11027 
11028   ins_cost(1.9 * INSN_COST);
11029   format %{ "neg  $dst, $src1, LSL $src2" %}
11030 
11031   ins_encode %{
11032     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11033             Assembler::LSL, $src2$$constant & 0x3f);
11034   %}
11035 
11036   ins_pipe(ialu_reg_shift);
11037 %}
11038 
11039 // This pattern is automatically generated from aarch64_ad.m4
11040 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11041 instruct AndI_reg_not_reg(iRegINoSp dst,
11042                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11043   match(Set dst (AndI src1 (XorI src2 m1)));
11044   ins_cost(INSN_COST);
11045   format %{ "bicw  $dst, $src1, $src2" %}
11046 
11047   ins_encode %{
11048     __ bicw(as_Register($dst$$reg),
11049               as_Register($src1$$reg),
11050               as_Register($src2$$reg),
11051               Assembler::LSL, 0);
11052   %}
11053 
11054   ins_pipe(ialu_reg_reg);
11055 %}
11056 
11057 // This pattern is automatically generated from aarch64_ad.m4
11058 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11059 instruct AndL_reg_not_reg(iRegLNoSp dst,
11060                          iRegL src1, iRegL src2, immL_M1 m1) %{
11061   match(Set dst (AndL src1 (XorL src2 m1)));
11062   ins_cost(INSN_COST);
11063   format %{ "bic  $dst, $src1, $src2" %}
11064 
11065   ins_encode %{
11066     __ bic(as_Register($dst$$reg),
11067               as_Register($src1$$reg),
11068               as_Register($src2$$reg),
11069               Assembler::LSL, 0);
11070   %}
11071 
11072   ins_pipe(ialu_reg_reg);
11073 %}
11074 
11075 // This pattern is automatically generated from aarch64_ad.m4
11076 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11077 instruct OrI_reg_not_reg(iRegINoSp dst,
11078                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11079   match(Set dst (OrI src1 (XorI src2 m1)));
11080   ins_cost(INSN_COST);
11081   format %{ "ornw  $dst, $src1, $src2" %}
11082 
11083   ins_encode %{
11084     __ ornw(as_Register($dst$$reg),
11085               as_Register($src1$$reg),
11086               as_Register($src2$$reg),
11087               Assembler::LSL, 0);
11088   %}
11089 
11090   ins_pipe(ialu_reg_reg);
11091 %}
11092 
11093 // This pattern is automatically generated from aarch64_ad.m4
11094 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11095 instruct OrL_reg_not_reg(iRegLNoSp dst,
11096                          iRegL src1, iRegL src2, immL_M1 m1) %{
11097   match(Set dst (OrL src1 (XorL src2 m1)));
11098   ins_cost(INSN_COST);
11099   format %{ "orn  $dst, $src1, $src2" %}
11100 
11101   ins_encode %{
11102     __ orn(as_Register($dst$$reg),
11103               as_Register($src1$$reg),
11104               as_Register($src2$$reg),
11105               Assembler::LSL, 0);
11106   %}
11107 
11108   ins_pipe(ialu_reg_reg);
11109 %}
11110 
11111 // This pattern is automatically generated from aarch64_ad.m4
11112 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11113 instruct XorI_reg_not_reg(iRegINoSp dst,
11114                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11115   match(Set dst (XorI m1 (XorI src2 src1)));
11116   ins_cost(INSN_COST);
11117   format %{ "eonw  $dst, $src1, $src2" %}
11118 
11119   ins_encode %{
11120     __ eonw(as_Register($dst$$reg),
11121               as_Register($src1$$reg),
11122               as_Register($src2$$reg),
11123               Assembler::LSL, 0);
11124   %}
11125 
11126   ins_pipe(ialu_reg_reg);
11127 %}
11128 
11129 // This pattern is automatically generated from aarch64_ad.m4
11130 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11131 instruct XorL_reg_not_reg(iRegLNoSp dst,
11132                          iRegL src1, iRegL src2, immL_M1 m1) %{
11133   match(Set dst (XorL m1 (XorL src2 src1)));
11134   ins_cost(INSN_COST);
11135   format %{ "eon  $dst, $src1, $src2" %}
11136 
11137   ins_encode %{
11138     __ eon(as_Register($dst$$reg),
11139               as_Register($src1$$reg),
11140               as_Register($src2$$reg),
11141               Assembler::LSL, 0);
11142   %}
11143 
11144   ins_pipe(ialu_reg_reg);
11145 %}
11146 
11147 // This pattern is automatically generated from aarch64_ad.m4
11148 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11149 // val & (-1 ^ (val >>> shift)) ==> bicw
11150 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11151                          iRegIorL2I src1, iRegIorL2I src2,
11152                          immI src3, immI_M1 src4) %{
11153   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11154   ins_cost(1.9 * INSN_COST);
11155   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11156 
11157   ins_encode %{
11158     __ bicw(as_Register($dst$$reg),
11159               as_Register($src1$$reg),
11160               as_Register($src2$$reg),
11161               Assembler::LSR,
11162               $src3$$constant & 0x1f);
11163   %}
11164 
11165   ins_pipe(ialu_reg_reg_shift);
11166 %}
11167 
11168 // This pattern is automatically generated from aarch64_ad.m4
11169 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11170 // val & (-1 ^ (val >>> shift)) ==> bic
11171 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11172                          iRegL src1, iRegL src2,
11173                          immI src3, immL_M1 src4) %{
11174   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11175   ins_cost(1.9 * INSN_COST);
11176   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11177 
11178   ins_encode %{
11179     __ bic(as_Register($dst$$reg),
11180               as_Register($src1$$reg),
11181               as_Register($src2$$reg),
11182               Assembler::LSR,
11183               $src3$$constant & 0x3f);
11184   %}
11185 
11186   ins_pipe(ialu_reg_reg_shift);
11187 %}
11188 
11189 // This pattern is automatically generated from aarch64_ad.m4
11190 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11191 // val & (-1 ^ (val >> shift)) ==> bicw
11192 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11193                          iRegIorL2I src1, iRegIorL2I src2,
11194                          immI src3, immI_M1 src4) %{
11195   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11196   ins_cost(1.9 * INSN_COST);
11197   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11198 
11199   ins_encode %{
11200     __ bicw(as_Register($dst$$reg),
11201               as_Register($src1$$reg),
11202               as_Register($src2$$reg),
11203               Assembler::ASR,
11204               $src3$$constant & 0x1f);
11205   %}
11206 
11207   ins_pipe(ialu_reg_reg_shift);
11208 %}
11209 
11210 // This pattern is automatically generated from aarch64_ad.m4
11211 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11212 // val & (-1 ^ (val >> shift)) ==> bic
11213 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11214                          iRegL src1, iRegL src2,
11215                          immI src3, immL_M1 src4) %{
11216   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11217   ins_cost(1.9 * INSN_COST);
11218   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11219 
11220   ins_encode %{
11221     __ bic(as_Register($dst$$reg),
11222               as_Register($src1$$reg),
11223               as_Register($src2$$reg),
11224               Assembler::ASR,
11225               $src3$$constant & 0x3f);
11226   %}
11227 
11228   ins_pipe(ialu_reg_reg_shift);
11229 %}
11230 
11231 // This pattern is automatically generated from aarch64_ad.m4
11232 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11233 // val & (-1 ^ (val ror shift)) ==> bicw
11234 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11235                          iRegIorL2I src1, iRegIorL2I src2,
11236                          immI src3, immI_M1 src4) %{
11237   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11238   ins_cost(1.9 * INSN_COST);
11239   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11240 
11241   ins_encode %{
11242     __ bicw(as_Register($dst$$reg),
11243               as_Register($src1$$reg),
11244               as_Register($src2$$reg),
11245               Assembler::ROR,
11246               $src3$$constant & 0x1f);
11247   %}
11248 
11249   ins_pipe(ialu_reg_reg_shift);
11250 %}
11251 
11252 // This pattern is automatically generated from aarch64_ad.m4
11253 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11254 // val & (-1 ^ (val ror shift)) ==> bic
11255 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11256                          iRegL src1, iRegL src2,
11257                          immI src3, immL_M1 src4) %{
11258   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11259   ins_cost(1.9 * INSN_COST);
11260   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11261 
11262   ins_encode %{
11263     __ bic(as_Register($dst$$reg),
11264               as_Register($src1$$reg),
11265               as_Register($src2$$reg),
11266               Assembler::ROR,
11267               $src3$$constant & 0x3f);
11268   %}
11269 
11270   ins_pipe(ialu_reg_reg_shift);
11271 %}
11272 
11273 // This pattern is automatically generated from aarch64_ad.m4
11274 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11275 // val & (-1 ^ (val << shift)) ==> bicw
11276 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11277                          iRegIorL2I src1, iRegIorL2I src2,
11278                          immI src3, immI_M1 src4) %{
11279   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11280   ins_cost(1.9 * INSN_COST);
11281   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11282 
11283   ins_encode %{
11284     __ bicw(as_Register($dst$$reg),
11285               as_Register($src1$$reg),
11286               as_Register($src2$$reg),
11287               Assembler::LSL,
11288               $src3$$constant & 0x1f);
11289   %}
11290 
11291   ins_pipe(ialu_reg_reg_shift);
11292 %}
11293 
11294 // This pattern is automatically generated from aarch64_ad.m4
11295 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11296 // val & (-1 ^ (val << shift)) ==> bic
11297 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11298                          iRegL src1, iRegL src2,
11299                          immI src3, immL_M1 src4) %{
11300   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11301   ins_cost(1.9 * INSN_COST);
11302   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11303 
11304   ins_encode %{
11305     __ bic(as_Register($dst$$reg),
11306               as_Register($src1$$reg),
11307               as_Register($src2$$reg),
11308               Assembler::LSL,
11309               $src3$$constant & 0x3f);
11310   %}
11311 
11312   ins_pipe(ialu_reg_reg_shift);
11313 %}
11314 
11315 // This pattern is automatically generated from aarch64_ad.m4
11316 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11317 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11318 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11319                          iRegIorL2I src1, iRegIorL2I src2,
11320                          immI src3, immI_M1 src4) %{
11321   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11322   ins_cost(1.9 * INSN_COST);
11323   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11324 
11325   ins_encode %{
11326     __ eonw(as_Register($dst$$reg),
11327               as_Register($src1$$reg),
11328               as_Register($src2$$reg),
11329               Assembler::LSR,
11330               $src3$$constant & 0x1f);
11331   %}
11332 
11333   ins_pipe(ialu_reg_reg_shift);
11334 %}
11335 
11336 // This pattern is automatically generated from aarch64_ad.m4
11337 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11338 // val ^ (-1 ^ (val >>> shift)) ==> eon
11339 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11340                          iRegL src1, iRegL src2,
11341                          immI src3, immL_M1 src4) %{
11342   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11343   ins_cost(1.9 * INSN_COST);
11344   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11345 
11346   ins_encode %{
11347     __ eon(as_Register($dst$$reg),
11348               as_Register($src1$$reg),
11349               as_Register($src2$$reg),
11350               Assembler::LSR,
11351               $src3$$constant & 0x3f);
11352   %}
11353 
11354   ins_pipe(ialu_reg_reg_shift);
11355 %}
11356 
11357 // This pattern is automatically generated from aarch64_ad.m4
11358 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11359 // val ^ (-1 ^ (val >> shift)) ==> eonw
11360 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11361                          iRegIorL2I src1, iRegIorL2I src2,
11362                          immI src3, immI_M1 src4) %{
11363   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11364   ins_cost(1.9 * INSN_COST);
11365   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11366 
11367   ins_encode %{
11368     __ eonw(as_Register($dst$$reg),
11369               as_Register($src1$$reg),
11370               as_Register($src2$$reg),
11371               Assembler::ASR,
11372               $src3$$constant & 0x1f);
11373   %}
11374 
11375   ins_pipe(ialu_reg_reg_shift);
11376 %}
11377 
11378 // This pattern is automatically generated from aarch64_ad.m4
11379 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11380 // val ^ (-1 ^ (val >> shift)) ==> eon
11381 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11382                          iRegL src1, iRegL src2,
11383                          immI src3, immL_M1 src4) %{
11384   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11385   ins_cost(1.9 * INSN_COST);
11386   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11387 
11388   ins_encode %{
11389     __ eon(as_Register($dst$$reg),
11390               as_Register($src1$$reg),
11391               as_Register($src2$$reg),
11392               Assembler::ASR,
11393               $src3$$constant & 0x3f);
11394   %}
11395 
11396   ins_pipe(ialu_reg_reg_shift);
11397 %}
11398 
11399 // This pattern is automatically generated from aarch64_ad.m4
11400 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11401 // val ^ (-1 ^ (val ror shift)) ==> eonw
11402 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11403                          iRegIorL2I src1, iRegIorL2I src2,
11404                          immI src3, immI_M1 src4) %{
11405   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11406   ins_cost(1.9 * INSN_COST);
11407   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11408 
11409   ins_encode %{
11410     __ eonw(as_Register($dst$$reg),
11411               as_Register($src1$$reg),
11412               as_Register($src2$$reg),
11413               Assembler::ROR,
11414               $src3$$constant & 0x1f);
11415   %}
11416 
11417   ins_pipe(ialu_reg_reg_shift);
11418 %}
11419 
11420 // This pattern is automatically generated from aarch64_ad.m4
11421 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11422 // val ^ (-1 ^ (val ror shift)) ==> eon
11423 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11424                          iRegL src1, iRegL src2,
11425                          immI src3, immL_M1 src4) %{
11426   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11427   ins_cost(1.9 * INSN_COST);
11428   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11429 
11430   ins_encode %{
11431     __ eon(as_Register($dst$$reg),
11432               as_Register($src1$$reg),
11433               as_Register($src2$$reg),
11434               Assembler::ROR,
11435               $src3$$constant & 0x3f);
11436   %}
11437 
11438   ins_pipe(ialu_reg_reg_shift);
11439 %}
11440 
11441 // This pattern is automatically generated from aarch64_ad.m4
11442 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11443 // val ^ (-1 ^ (val << shift)) ==> eonw
11444 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11445                          iRegIorL2I src1, iRegIorL2I src2,
11446                          immI src3, immI_M1 src4) %{
11447   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11448   ins_cost(1.9 * INSN_COST);
11449   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11450 
11451   ins_encode %{
11452     __ eonw(as_Register($dst$$reg),
11453               as_Register($src1$$reg),
11454               as_Register($src2$$reg),
11455               Assembler::LSL,
11456               $src3$$constant & 0x1f);
11457   %}
11458 
11459   ins_pipe(ialu_reg_reg_shift);
11460 %}
11461 
11462 // This pattern is automatically generated from aarch64_ad.m4
11463 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11464 // val ^ (-1 ^ (val << shift)) ==> eon
11465 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11466                          iRegL src1, iRegL src2,
11467                          immI src3, immL_M1 src4) %{
11468   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11469   ins_cost(1.9 * INSN_COST);
11470   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11471 
11472   ins_encode %{
11473     __ eon(as_Register($dst$$reg),
11474               as_Register($src1$$reg),
11475               as_Register($src2$$reg),
11476               Assembler::LSL,
11477               $src3$$constant & 0x3f);
11478   %}
11479 
11480   ins_pipe(ialu_reg_reg_shift);
11481 %}
11482 
11483 // This pattern is automatically generated from aarch64_ad.m4
11484 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11485 // val | (-1 ^ (val >>> shift)) ==> ornw
11486 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11487                          iRegIorL2I src1, iRegIorL2I src2,
11488                          immI src3, immI_M1 src4) %{
11489   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11490   ins_cost(1.9 * INSN_COST);
11491   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11492 
11493   ins_encode %{
11494     __ ornw(as_Register($dst$$reg),
11495               as_Register($src1$$reg),
11496               as_Register($src2$$reg),
11497               Assembler::LSR,
11498               $src3$$constant & 0x1f);
11499   %}
11500 
11501   ins_pipe(ialu_reg_reg_shift);
11502 %}
11503 
11504 // This pattern is automatically generated from aarch64_ad.m4
11505 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11506 // val | (-1 ^ (val >>> shift)) ==> orn
11507 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11508                          iRegL src1, iRegL src2,
11509                          immI src3, immL_M1 src4) %{
11510   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11511   ins_cost(1.9 * INSN_COST);
11512   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11513 
11514   ins_encode %{
11515     __ orn(as_Register($dst$$reg),
11516               as_Register($src1$$reg),
11517               as_Register($src2$$reg),
11518               Assembler::LSR,
11519               $src3$$constant & 0x3f);
11520   %}
11521 
11522   ins_pipe(ialu_reg_reg_shift);
11523 %}
11524 
11525 // This pattern is automatically generated from aarch64_ad.m4
11526 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11527 // val | (-1 ^ (val >> shift)) ==> ornw
11528 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11529                          iRegIorL2I src1, iRegIorL2I src2,
11530                          immI src3, immI_M1 src4) %{
11531   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11532   ins_cost(1.9 * INSN_COST);
11533   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11534 
11535   ins_encode %{
11536     __ ornw(as_Register($dst$$reg),
11537               as_Register($src1$$reg),
11538               as_Register($src2$$reg),
11539               Assembler::ASR,
11540               $src3$$constant & 0x1f);
11541   %}
11542 
11543   ins_pipe(ialu_reg_reg_shift);
11544 %}
11545 
11546 // This pattern is automatically generated from aarch64_ad.m4
11547 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11548 // val | (-1 ^ (val >> shift)) ==> orn
11549 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11550                          iRegL src1, iRegL src2,
11551                          immI src3, immL_M1 src4) %{
11552   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11553   ins_cost(1.9 * INSN_COST);
11554   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11555 
11556   ins_encode %{
11557     __ orn(as_Register($dst$$reg),
11558               as_Register($src1$$reg),
11559               as_Register($src2$$reg),
11560               Assembler::ASR,
11561               $src3$$constant & 0x3f);
11562   %}
11563 
11564   ins_pipe(ialu_reg_reg_shift);
11565 %}
11566 
11567 // This pattern is automatically generated from aarch64_ad.m4
11568 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11569 // val | (-1 ^ (val ror shift)) ==> ornw
11570 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11571                          iRegIorL2I src1, iRegIorL2I src2,
11572                          immI src3, immI_M1 src4) %{
11573   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11574   ins_cost(1.9 * INSN_COST);
11575   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11576 
11577   ins_encode %{
11578     __ ornw(as_Register($dst$$reg),
11579               as_Register($src1$$reg),
11580               as_Register($src2$$reg),
11581               Assembler::ROR,
11582               $src3$$constant & 0x1f);
11583   %}
11584 
11585   ins_pipe(ialu_reg_reg_shift);
11586 %}
11587 
11588 // This pattern is automatically generated from aarch64_ad.m4
11589 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11590 // val | (-1 ^ (val ror shift)) ==> orn
11591 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11592                          iRegL src1, iRegL src2,
11593                          immI src3, immL_M1 src4) %{
11594   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11595   ins_cost(1.9 * INSN_COST);
11596   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11597 
11598   ins_encode %{
11599     __ orn(as_Register($dst$$reg),
11600               as_Register($src1$$reg),
11601               as_Register($src2$$reg),
11602               Assembler::ROR,
11603               $src3$$constant & 0x3f);
11604   %}
11605 
11606   ins_pipe(ialu_reg_reg_shift);
11607 %}
11608 
11609 // This pattern is automatically generated from aarch64_ad.m4
11610 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11611 // val | (-1 ^ (val << shift)) ==> ornw
11612 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11613                          iRegIorL2I src1, iRegIorL2I src2,
11614                          immI src3, immI_M1 src4) %{
11615   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11616   ins_cost(1.9 * INSN_COST);
11617   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11618 
11619   ins_encode %{
11620     __ ornw(as_Register($dst$$reg),
11621               as_Register($src1$$reg),
11622               as_Register($src2$$reg),
11623               Assembler::LSL,
11624               $src3$$constant & 0x1f);
11625   %}
11626 
11627   ins_pipe(ialu_reg_reg_shift);
11628 %}
11629 
11630 // This pattern is automatically generated from aarch64_ad.m4
11631 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11632 // val | (-1 ^ (val << shift)) ==> orn
11633 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11634                          iRegL src1, iRegL src2,
11635                          immI src3, immL_M1 src4) %{
11636   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11637   ins_cost(1.9 * INSN_COST);
11638   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11639 
11640   ins_encode %{
11641     __ orn(as_Register($dst$$reg),
11642               as_Register($src1$$reg),
11643               as_Register($src2$$reg),
11644               Assembler::LSL,
11645               $src3$$constant & 0x3f);
11646   %}
11647 
11648   ins_pipe(ialu_reg_reg_shift);
11649 %}
11650 
11651 // This pattern is automatically generated from aarch64_ad.m4
11652 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11653 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11654                          iRegIorL2I src1, iRegIorL2I src2,
11655                          immI src3) %{
11656   match(Set dst (AndI src1 (URShiftI src2 src3)));
11657 
11658   ins_cost(1.9 * INSN_COST);
11659   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11660 
11661   ins_encode %{
11662     __ andw(as_Register($dst$$reg),
11663               as_Register($src1$$reg),
11664               as_Register($src2$$reg),
11665               Assembler::LSR,
11666               $src3$$constant & 0x1f);
11667   %}
11668 
11669   ins_pipe(ialu_reg_reg_shift);
11670 %}
11671 
11672 // This pattern is automatically generated from aarch64_ad.m4
11673 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11674 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11675                          iRegL src1, iRegL src2,
11676                          immI src3) %{
11677   match(Set dst (AndL src1 (URShiftL src2 src3)));
11678 
11679   ins_cost(1.9 * INSN_COST);
11680   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11681 
11682   ins_encode %{
11683     __ andr(as_Register($dst$$reg),
11684               as_Register($src1$$reg),
11685               as_Register($src2$$reg),
11686               Assembler::LSR,
11687               $src3$$constant & 0x3f);
11688   %}
11689 
11690   ins_pipe(ialu_reg_reg_shift);
11691 %}
11692 
11693 // This pattern is automatically generated from aarch64_ad.m4
11694 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11695 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11696                          iRegIorL2I src1, iRegIorL2I src2,
11697                          immI src3) %{
11698   match(Set dst (AndI src1 (RShiftI src2 src3)));
11699 
11700   ins_cost(1.9 * INSN_COST);
11701   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11702 
11703   ins_encode %{
11704     __ andw(as_Register($dst$$reg),
11705               as_Register($src1$$reg),
11706               as_Register($src2$$reg),
11707               Assembler::ASR,
11708               $src3$$constant & 0x1f);
11709   %}
11710 
11711   ins_pipe(ialu_reg_reg_shift);
11712 %}
11713 
11714 // This pattern is automatically generated from aarch64_ad.m4
11715 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11716 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11717                          iRegL src1, iRegL src2,
11718                          immI src3) %{
11719   match(Set dst (AndL src1 (RShiftL src2 src3)));
11720 
11721   ins_cost(1.9 * INSN_COST);
11722   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11723 
11724   ins_encode %{
11725     __ andr(as_Register($dst$$reg),
11726               as_Register($src1$$reg),
11727               as_Register($src2$$reg),
11728               Assembler::ASR,
11729               $src3$$constant & 0x3f);
11730   %}
11731 
11732   ins_pipe(ialu_reg_reg_shift);
11733 %}
11734 
11735 // This pattern is automatically generated from aarch64_ad.m4
11736 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11737 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11738                          iRegIorL2I src1, iRegIorL2I src2,
11739                          immI src3) %{
11740   match(Set dst (AndI src1 (LShiftI src2 src3)));
11741 
11742   ins_cost(1.9 * INSN_COST);
11743   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11744 
11745   ins_encode %{
11746     __ andw(as_Register($dst$$reg),
11747               as_Register($src1$$reg),
11748               as_Register($src2$$reg),
11749               Assembler::LSL,
11750               $src3$$constant & 0x1f);
11751   %}
11752 
11753   ins_pipe(ialu_reg_reg_shift);
11754 %}
11755 
11756 // This pattern is automatically generated from aarch64_ad.m4
11757 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11758 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11759                          iRegL src1, iRegL src2,
11760                          immI src3) %{
11761   match(Set dst (AndL src1 (LShiftL src2 src3)));
11762 
11763   ins_cost(1.9 * INSN_COST);
11764   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11765 
11766   ins_encode %{
11767     __ andr(as_Register($dst$$reg),
11768               as_Register($src1$$reg),
11769               as_Register($src2$$reg),
11770               Assembler::LSL,
11771               $src3$$constant & 0x3f);
11772   %}
11773 
11774   ins_pipe(ialu_reg_reg_shift);
11775 %}
11776 
11777 // This pattern is automatically generated from aarch64_ad.m4
11778 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11779 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
11780                          iRegIorL2I src1, iRegIorL2I src2,
11781                          immI src3) %{
11782   match(Set dst (AndI src1 (RotateRight src2 src3)));
11783 
11784   ins_cost(1.9 * INSN_COST);
11785   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
11786 
11787   ins_encode %{
11788     __ andw(as_Register($dst$$reg),
11789               as_Register($src1$$reg),
11790               as_Register($src2$$reg),
11791               Assembler::ROR,
11792               $src3$$constant & 0x1f);
11793   %}
11794 
11795   ins_pipe(ialu_reg_reg_shift);
11796 %}
11797 
11798 // This pattern is automatically generated from aarch64_ad.m4
11799 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11800 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
11801                          iRegL src1, iRegL src2,
11802                          immI src3) %{
11803   match(Set dst (AndL src1 (RotateRight src2 src3)));
11804 
11805   ins_cost(1.9 * INSN_COST);
11806   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
11807 
11808   ins_encode %{
11809     __ andr(as_Register($dst$$reg),
11810               as_Register($src1$$reg),
11811               as_Register($src2$$reg),
11812               Assembler::ROR,
11813               $src3$$constant & 0x3f);
11814   %}
11815 
11816   ins_pipe(ialu_reg_reg_shift);
11817 %}
11818 
11819 // This pattern is automatically generated from aarch64_ad.m4
11820 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11821 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11822                          iRegIorL2I src1, iRegIorL2I src2,
11823                          immI src3) %{
11824   match(Set dst (XorI src1 (URShiftI src2 src3)));
11825 
11826   ins_cost(1.9 * INSN_COST);
11827   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11828 
11829   ins_encode %{
11830     __ eorw(as_Register($dst$$reg),
11831               as_Register($src1$$reg),
11832               as_Register($src2$$reg),
11833               Assembler::LSR,
11834               $src3$$constant & 0x1f);
11835   %}
11836 
11837   ins_pipe(ialu_reg_reg_shift);
11838 %}
11839 
11840 // This pattern is automatically generated from aarch64_ad.m4
11841 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11842 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11843                          iRegL src1, iRegL src2,
11844                          immI src3) %{
11845   match(Set dst (XorL src1 (URShiftL src2 src3)));
11846 
11847   ins_cost(1.9 * INSN_COST);
11848   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11849 
11850   ins_encode %{
11851     __ eor(as_Register($dst$$reg),
11852               as_Register($src1$$reg),
11853               as_Register($src2$$reg),
11854               Assembler::LSR,
11855               $src3$$constant & 0x3f);
11856   %}
11857 
11858   ins_pipe(ialu_reg_reg_shift);
11859 %}
11860 
11861 // This pattern is automatically generated from aarch64_ad.m4
11862 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11863 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11864                          iRegIorL2I src1, iRegIorL2I src2,
11865                          immI src3) %{
11866   match(Set dst (XorI src1 (RShiftI src2 src3)));
11867 
11868   ins_cost(1.9 * INSN_COST);
11869   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11870 
11871   ins_encode %{
11872     __ eorw(as_Register($dst$$reg),
11873               as_Register($src1$$reg),
11874               as_Register($src2$$reg),
11875               Assembler::ASR,
11876               $src3$$constant & 0x1f);
11877   %}
11878 
11879   ins_pipe(ialu_reg_reg_shift);
11880 %}
11881 
11882 // This pattern is automatically generated from aarch64_ad.m4
11883 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11884 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11885                          iRegL src1, iRegL src2,
11886                          immI src3) %{
11887   match(Set dst (XorL src1 (RShiftL src2 src3)));
11888 
11889   ins_cost(1.9 * INSN_COST);
11890   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11891 
11892   ins_encode %{
11893     __ eor(as_Register($dst$$reg),
11894               as_Register($src1$$reg),
11895               as_Register($src2$$reg),
11896               Assembler::ASR,
11897               $src3$$constant & 0x3f);
11898   %}
11899 
11900   ins_pipe(ialu_reg_reg_shift);
11901 %}
11902 
11903 // This pattern is automatically generated from aarch64_ad.m4
11904 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11905 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11906                          iRegIorL2I src1, iRegIorL2I src2,
11907                          immI src3) %{
11908   match(Set dst (XorI src1 (LShiftI src2 src3)));
11909 
11910   ins_cost(1.9 * INSN_COST);
11911   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11912 
11913   ins_encode %{
11914     __ eorw(as_Register($dst$$reg),
11915               as_Register($src1$$reg),
11916               as_Register($src2$$reg),
11917               Assembler::LSL,
11918               $src3$$constant & 0x1f);
11919   %}
11920 
11921   ins_pipe(ialu_reg_reg_shift);
11922 %}
11923 
11924 // This pattern is automatically generated from aarch64_ad.m4
11925 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11926 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11927                          iRegL src1, iRegL src2,
11928                          immI src3) %{
11929   match(Set dst (XorL src1 (LShiftL src2 src3)));
11930 
11931   ins_cost(1.9 * INSN_COST);
11932   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11933 
11934   ins_encode %{
11935     __ eor(as_Register($dst$$reg),
11936               as_Register($src1$$reg),
11937               as_Register($src2$$reg),
11938               Assembler::LSL,
11939               $src3$$constant & 0x3f);
11940   %}
11941 
11942   ins_pipe(ialu_reg_reg_shift);
11943 %}
11944 
11945 // This pattern is automatically generated from aarch64_ad.m4
11946 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11947 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
11948                          iRegIorL2I src1, iRegIorL2I src2,
11949                          immI src3) %{
11950   match(Set dst (XorI src1 (RotateRight src2 src3)));
11951 
11952   ins_cost(1.9 * INSN_COST);
11953   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
11954 
11955   ins_encode %{
11956     __ eorw(as_Register($dst$$reg),
11957               as_Register($src1$$reg),
11958               as_Register($src2$$reg),
11959               Assembler::ROR,
11960               $src3$$constant & 0x1f);
11961   %}
11962 
11963   ins_pipe(ialu_reg_reg_shift);
11964 %}
11965 
11966 // This pattern is automatically generated from aarch64_ad.m4
11967 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11968 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
11969                          iRegL src1, iRegL src2,
11970                          immI src3) %{
11971   match(Set dst (XorL src1 (RotateRight src2 src3)));
11972 
11973   ins_cost(1.9 * INSN_COST);
11974   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
11975 
11976   ins_encode %{
11977     __ eor(as_Register($dst$$reg),
11978               as_Register($src1$$reg),
11979               as_Register($src2$$reg),
11980               Assembler::ROR,
11981               $src3$$constant & 0x3f);
11982   %}
11983 
11984   ins_pipe(ialu_reg_reg_shift);
11985 %}
11986 
11987 // This pattern is automatically generated from aarch64_ad.m4
11988 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11989 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11990                          iRegIorL2I src1, iRegIorL2I src2,
11991                          immI src3) %{
11992   match(Set dst (OrI src1 (URShiftI src2 src3)));
11993 
11994   ins_cost(1.9 * INSN_COST);
11995   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11996 
11997   ins_encode %{
11998     __ orrw(as_Register($dst$$reg),
11999               as_Register($src1$$reg),
12000               as_Register($src2$$reg),
12001               Assembler::LSR,
12002               $src3$$constant & 0x1f);
12003   %}
12004 
12005   ins_pipe(ialu_reg_reg_shift);
12006 %}
12007 
12008 // This pattern is automatically generated from aarch64_ad.m4
12009 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12010 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12011                          iRegL src1, iRegL src2,
12012                          immI src3) %{
12013   match(Set dst (OrL src1 (URShiftL src2 src3)));
12014 
12015   ins_cost(1.9 * INSN_COST);
12016   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12017 
12018   ins_encode %{
12019     __ orr(as_Register($dst$$reg),
12020               as_Register($src1$$reg),
12021               as_Register($src2$$reg),
12022               Assembler::LSR,
12023               $src3$$constant & 0x3f);
12024   %}
12025 
12026   ins_pipe(ialu_reg_reg_shift);
12027 %}
12028 
12029 // This pattern is automatically generated from aarch64_ad.m4
12030 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12031 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12032                          iRegIorL2I src1, iRegIorL2I src2,
12033                          immI src3) %{
12034   match(Set dst (OrI src1 (RShiftI src2 src3)));
12035 
12036   ins_cost(1.9 * INSN_COST);
12037   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12038 
12039   ins_encode %{
12040     __ orrw(as_Register($dst$$reg),
12041               as_Register($src1$$reg),
12042               as_Register($src2$$reg),
12043               Assembler::ASR,
12044               $src3$$constant & 0x1f);
12045   %}
12046 
12047   ins_pipe(ialu_reg_reg_shift);
12048 %}
12049 
12050 // This pattern is automatically generated from aarch64_ad.m4
12051 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12052 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12053                          iRegL src1, iRegL src2,
12054                          immI src3) %{
12055   match(Set dst (OrL src1 (RShiftL src2 src3)));
12056 
12057   ins_cost(1.9 * INSN_COST);
12058   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12059 
12060   ins_encode %{
12061     __ orr(as_Register($dst$$reg),
12062               as_Register($src1$$reg),
12063               as_Register($src2$$reg),
12064               Assembler::ASR,
12065               $src3$$constant & 0x3f);
12066   %}
12067 
12068   ins_pipe(ialu_reg_reg_shift);
12069 %}
12070 
12071 // This pattern is automatically generated from aarch64_ad.m4
12072 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12073 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12074                          iRegIorL2I src1, iRegIorL2I src2,
12075                          immI src3) %{
12076   match(Set dst (OrI src1 (LShiftI src2 src3)));
12077 
12078   ins_cost(1.9 * INSN_COST);
12079   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12080 
12081   ins_encode %{
12082     __ orrw(as_Register($dst$$reg),
12083               as_Register($src1$$reg),
12084               as_Register($src2$$reg),
12085               Assembler::LSL,
12086               $src3$$constant & 0x1f);
12087   %}
12088 
12089   ins_pipe(ialu_reg_reg_shift);
12090 %}
12091 
12092 // This pattern is automatically generated from aarch64_ad.m4
12093 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12094 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12095                          iRegL src1, iRegL src2,
12096                          immI src3) %{
12097   match(Set dst (OrL src1 (LShiftL src2 src3)));
12098 
12099   ins_cost(1.9 * INSN_COST);
12100   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12101 
12102   ins_encode %{
12103     __ orr(as_Register($dst$$reg),
12104               as_Register($src1$$reg),
12105               as_Register($src2$$reg),
12106               Assembler::LSL,
12107               $src3$$constant & 0x3f);
12108   %}
12109 
12110   ins_pipe(ialu_reg_reg_shift);
12111 %}
12112 
12113 // This pattern is automatically generated from aarch64_ad.m4
12114 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12115 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
12116                          iRegIorL2I src1, iRegIorL2I src2,
12117                          immI src3) %{
12118   match(Set dst (OrI src1 (RotateRight src2 src3)));
12119 
12120   ins_cost(1.9 * INSN_COST);
12121   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
12122 
12123   ins_encode %{
12124     __ orrw(as_Register($dst$$reg),
12125               as_Register($src1$$reg),
12126               as_Register($src2$$reg),
12127               Assembler::ROR,
12128               $src3$$constant & 0x1f);
12129   %}
12130 
12131   ins_pipe(ialu_reg_reg_shift);
12132 %}
12133 
12134 // This pattern is automatically generated from aarch64_ad.m4
12135 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12136 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
12137                          iRegL src1, iRegL src2,
12138                          immI src3) %{
12139   match(Set dst (OrL src1 (RotateRight src2 src3)));
12140 
12141   ins_cost(1.9 * INSN_COST);
12142   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
12143 
12144   ins_encode %{
12145     __ orr(as_Register($dst$$reg),
12146               as_Register($src1$$reg),
12147               as_Register($src2$$reg),
12148               Assembler::ROR,
12149               $src3$$constant & 0x3f);
12150   %}
12151 
12152   ins_pipe(ialu_reg_reg_shift);
12153 %}
12154 
12155 // This pattern is automatically generated from aarch64_ad.m4
12156 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12157 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12158                          iRegIorL2I src1, iRegIorL2I src2,
12159                          immI src3) %{
12160   match(Set dst (AddI src1 (URShiftI src2 src3)));
12161 
12162   ins_cost(1.9 * INSN_COST);
12163   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12164 
12165   ins_encode %{
12166     __ addw(as_Register($dst$$reg),
12167               as_Register($src1$$reg),
12168               as_Register($src2$$reg),
12169               Assembler::LSR,
12170               $src3$$constant & 0x1f);
12171   %}
12172 
12173   ins_pipe(ialu_reg_reg_shift);
12174 %}
12175 
12176 // This pattern is automatically generated from aarch64_ad.m4
12177 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12178 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12179                          iRegL src1, iRegL src2,
12180                          immI src3) %{
12181   match(Set dst (AddL src1 (URShiftL src2 src3)));
12182 
12183   ins_cost(1.9 * INSN_COST);
12184   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12185 
12186   ins_encode %{
12187     __ add(as_Register($dst$$reg),
12188               as_Register($src1$$reg),
12189               as_Register($src2$$reg),
12190               Assembler::LSR,
12191               $src3$$constant & 0x3f);
12192   %}
12193 
12194   ins_pipe(ialu_reg_reg_shift);
12195 %}
12196 
12197 // This pattern is automatically generated from aarch64_ad.m4
12198 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12199 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12200                          iRegIorL2I src1, iRegIorL2I src2,
12201                          immI src3) %{
12202   match(Set dst (AddI src1 (RShiftI src2 src3)));
12203 
12204   ins_cost(1.9 * INSN_COST);
12205   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12206 
12207   ins_encode %{
12208     __ addw(as_Register($dst$$reg),
12209               as_Register($src1$$reg),
12210               as_Register($src2$$reg),
12211               Assembler::ASR,
12212               $src3$$constant & 0x1f);
12213   %}
12214 
12215   ins_pipe(ialu_reg_reg_shift);
12216 %}
12217 
12218 // This pattern is automatically generated from aarch64_ad.m4
12219 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12220 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12221                          iRegL src1, iRegL src2,
12222                          immI src3) %{
12223   match(Set dst (AddL src1 (RShiftL src2 src3)));
12224 
12225   ins_cost(1.9 * INSN_COST);
12226   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12227 
12228   ins_encode %{
12229     __ add(as_Register($dst$$reg),
12230               as_Register($src1$$reg),
12231               as_Register($src2$$reg),
12232               Assembler::ASR,
12233               $src3$$constant & 0x3f);
12234   %}
12235 
12236   ins_pipe(ialu_reg_reg_shift);
12237 %}
12238 
12239 // This pattern is automatically generated from aarch64_ad.m4
12240 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12241 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12242                          iRegIorL2I src1, iRegIorL2I src2,
12243                          immI src3) %{
12244   match(Set dst (AddI src1 (LShiftI src2 src3)));
12245 
12246   ins_cost(1.9 * INSN_COST);
12247   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12248 
12249   ins_encode %{
12250     __ addw(as_Register($dst$$reg),
12251               as_Register($src1$$reg),
12252               as_Register($src2$$reg),
12253               Assembler::LSL,
12254               $src3$$constant & 0x1f);
12255   %}
12256 
12257   ins_pipe(ialu_reg_reg_shift);
12258 %}
12259 
12260 // This pattern is automatically generated from aarch64_ad.m4
12261 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12262 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12263                          iRegL src1, iRegL src2,
12264                          immI src3) %{
12265   match(Set dst (AddL src1 (LShiftL src2 src3)));
12266 
12267   ins_cost(1.9 * INSN_COST);
12268   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12269 
12270   ins_encode %{
12271     __ add(as_Register($dst$$reg),
12272               as_Register($src1$$reg),
12273               as_Register($src2$$reg),
12274               Assembler::LSL,
12275               $src3$$constant & 0x3f);
12276   %}
12277 
12278   ins_pipe(ialu_reg_reg_shift);
12279 %}
12280 
12281 // This pattern is automatically generated from aarch64_ad.m4
12282 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12283 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12284                          iRegIorL2I src1, iRegIorL2I src2,
12285                          immI src3) %{
12286   match(Set dst (SubI src1 (URShiftI src2 src3)));
12287 
12288   ins_cost(1.9 * INSN_COST);
12289   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12290 
12291   ins_encode %{
12292     __ subw(as_Register($dst$$reg),
12293               as_Register($src1$$reg),
12294               as_Register($src2$$reg),
12295               Assembler::LSR,
12296               $src3$$constant & 0x1f);
12297   %}
12298 
12299   ins_pipe(ialu_reg_reg_shift);
12300 %}
12301 
12302 // This pattern is automatically generated from aarch64_ad.m4
12303 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12304 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12305                          iRegL src1, iRegL src2,
12306                          immI src3) %{
12307   match(Set dst (SubL src1 (URShiftL src2 src3)));
12308 
12309   ins_cost(1.9 * INSN_COST);
12310   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12311 
12312   ins_encode %{
12313     __ sub(as_Register($dst$$reg),
12314               as_Register($src1$$reg),
12315               as_Register($src2$$reg),
12316               Assembler::LSR,
12317               $src3$$constant & 0x3f);
12318   %}
12319 
12320   ins_pipe(ialu_reg_reg_shift);
12321 %}
12322 
12323 // This pattern is automatically generated from aarch64_ad.m4
12324 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12325 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12326                          iRegIorL2I src1, iRegIorL2I src2,
12327                          immI src3) %{
12328   match(Set dst (SubI src1 (RShiftI src2 src3)));
12329 
12330   ins_cost(1.9 * INSN_COST);
12331   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12332 
12333   ins_encode %{
12334     __ subw(as_Register($dst$$reg),
12335               as_Register($src1$$reg),
12336               as_Register($src2$$reg),
12337               Assembler::ASR,
12338               $src3$$constant & 0x1f);
12339   %}
12340 
12341   ins_pipe(ialu_reg_reg_shift);
12342 %}
12343 
12344 // This pattern is automatically generated from aarch64_ad.m4
12345 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12346 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12347                          iRegL src1, iRegL src2,
12348                          immI src3) %{
12349   match(Set dst (SubL src1 (RShiftL src2 src3)));
12350 
12351   ins_cost(1.9 * INSN_COST);
12352   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12353 
12354   ins_encode %{
12355     __ sub(as_Register($dst$$reg),
12356               as_Register($src1$$reg),
12357               as_Register($src2$$reg),
12358               Assembler::ASR,
12359               $src3$$constant & 0x3f);
12360   %}
12361 
12362   ins_pipe(ialu_reg_reg_shift);
12363 %}
12364 
12365 // This pattern is automatically generated from aarch64_ad.m4
12366 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12367 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12368                          iRegIorL2I src1, iRegIorL2I src2,
12369                          immI src3) %{
12370   match(Set dst (SubI src1 (LShiftI src2 src3)));
12371 
12372   ins_cost(1.9 * INSN_COST);
12373   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12374 
12375   ins_encode %{
12376     __ subw(as_Register($dst$$reg),
12377               as_Register($src1$$reg),
12378               as_Register($src2$$reg),
12379               Assembler::LSL,
12380               $src3$$constant & 0x1f);
12381   %}
12382 
12383   ins_pipe(ialu_reg_reg_shift);
12384 %}
12385 
12386 // This pattern is automatically generated from aarch64_ad.m4
12387 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12388 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12389                          iRegL src1, iRegL src2,
12390                          immI src3) %{
12391   match(Set dst (SubL src1 (LShiftL src2 src3)));
12392 
12393   ins_cost(1.9 * INSN_COST);
12394   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12395 
12396   ins_encode %{
12397     __ sub(as_Register($dst$$reg),
12398               as_Register($src1$$reg),
12399               as_Register($src2$$reg),
12400               Assembler::LSL,
12401               $src3$$constant & 0x3f);
12402   %}
12403 
12404   ins_pipe(ialu_reg_reg_shift);
12405 %}
12406 
12407 // This pattern is automatically generated from aarch64_ad.m4
12408 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12409 
12410 // Shift Left followed by Shift Right.
12411 // This idiom is used by the compiler for the i2b bytecode etc.
12412 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12413 %{
12414   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12415   ins_cost(INSN_COST * 2);
12416   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12417   ins_encode %{
12418     int lshift = $lshift_count$$constant & 63;
12419     int rshift = $rshift_count$$constant & 63;
12420     int s = 63 - lshift;
12421     int r = (rshift - lshift) & 63;
12422     __ sbfm(as_Register($dst$$reg),
12423             as_Register($src$$reg),
12424             r, s);
12425   %}
12426 
12427   ins_pipe(ialu_reg_shift);
12428 %}
12429 
12430 // This pattern is automatically generated from aarch64_ad.m4
12431 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12432 
12433 // Shift Left followed by Shift Right.
12434 // This idiom is used by the compiler for the i2b bytecode etc.
12435 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12436 %{
12437   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12438   ins_cost(INSN_COST * 2);
12439   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12440   ins_encode %{
12441     int lshift = $lshift_count$$constant & 31;
12442     int rshift = $rshift_count$$constant & 31;
12443     int s = 31 - lshift;
12444     int r = (rshift - lshift) & 31;
12445     __ sbfmw(as_Register($dst$$reg),
12446             as_Register($src$$reg),
12447             r, s);
12448   %}
12449 
12450   ins_pipe(ialu_reg_shift);
12451 %}
12452 
12453 // This pattern is automatically generated from aarch64_ad.m4
12454 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12455 
12456 // Shift Left followed by Shift Right.
12457 // This idiom is used by the compiler for the i2b bytecode etc.
12458 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12459 %{
12460   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12461   ins_cost(INSN_COST * 2);
12462   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12463   ins_encode %{
12464     int lshift = $lshift_count$$constant & 63;
12465     int rshift = $rshift_count$$constant & 63;
12466     int s = 63 - lshift;
12467     int r = (rshift - lshift) & 63;
12468     __ ubfm(as_Register($dst$$reg),
12469             as_Register($src$$reg),
12470             r, s);
12471   %}
12472 
12473   ins_pipe(ialu_reg_shift);
12474 %}
12475 
12476 // This pattern is automatically generated from aarch64_ad.m4
12477 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12478 
12479 // Shift Left followed by Shift Right.
12480 // This idiom is used by the compiler for the i2b bytecode etc.
12481 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12482 %{
12483   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12484   ins_cost(INSN_COST * 2);
12485   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12486   ins_encode %{
12487     int lshift = $lshift_count$$constant & 31;
12488     int rshift = $rshift_count$$constant & 31;
12489     int s = 31 - lshift;
12490     int r = (rshift - lshift) & 31;
12491     __ ubfmw(as_Register($dst$$reg),
12492             as_Register($src$$reg),
12493             r, s);
12494   %}
12495 
12496   ins_pipe(ialu_reg_shift);
12497 %}
12498 
12499 // Bitfield extract with shift & mask
12500 
12501 // This pattern is automatically generated from aarch64_ad.m4
12502 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12503 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12504 %{
12505   match(Set dst (AndI (URShiftI src rshift) mask));
12506   // Make sure we are not going to exceed what ubfxw can do.
12507   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12508 
12509   ins_cost(INSN_COST);
12510   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12511   ins_encode %{
12512     int rshift = $rshift$$constant & 31;
12513     intptr_t mask = $mask$$constant;
12514     int width = exact_log2(mask+1);
12515     __ ubfxw(as_Register($dst$$reg),
12516             as_Register($src$$reg), rshift, width);
12517   %}
12518   ins_pipe(ialu_reg_shift);
12519 %}
12520 
12521 // This pattern is automatically generated from aarch64_ad.m4
12522 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12523 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12524 %{
12525   match(Set dst (AndL (URShiftL src rshift) mask));
12526   // Make sure we are not going to exceed what ubfx can do.
12527   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12528 
12529   ins_cost(INSN_COST);
12530   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12531   ins_encode %{
12532     int rshift = $rshift$$constant & 63;
12533     intptr_t mask = $mask$$constant;
12534     int width = exact_log2_long(mask+1);
12535     __ ubfx(as_Register($dst$$reg),
12536             as_Register($src$$reg), rshift, width);
12537   %}
12538   ins_pipe(ialu_reg_shift);
12539 %}
12540 
12541 
12542 // This pattern is automatically generated from aarch64_ad.m4
12543 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12544 
12545 // We can use ubfx when extending an And with a mask when we know mask
12546 // is positive.  We know that because immI_bitmask guarantees it.
12547 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12548 %{
12549   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12550   // Make sure we are not going to exceed what ubfxw can do.
12551   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12552 
12553   ins_cost(INSN_COST * 2);
12554   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12555   ins_encode %{
12556     int rshift = $rshift$$constant & 31;
12557     intptr_t mask = $mask$$constant;
12558     int width = exact_log2(mask+1);
12559     __ ubfx(as_Register($dst$$reg),
12560             as_Register($src$$reg), rshift, width);
12561   %}
12562   ins_pipe(ialu_reg_shift);
12563 %}
12564 
12565 
12566 // This pattern is automatically generated from aarch64_ad.m4
12567 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12568 
12569 // We can use ubfiz when masking by a positive number and then left shifting the result.
12570 // We know that the mask is positive because immI_bitmask guarantees it.
12571 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12572 %{
12573   match(Set dst (LShiftI (AndI src mask) lshift));
12574   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12575 
12576   ins_cost(INSN_COST);
12577   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12578   ins_encode %{
12579     int lshift = $lshift$$constant & 31;
12580     intptr_t mask = $mask$$constant;
12581     int width = exact_log2(mask+1);
12582     __ ubfizw(as_Register($dst$$reg),
12583           as_Register($src$$reg), lshift, width);
12584   %}
12585   ins_pipe(ialu_reg_shift);
12586 %}
12587 
12588 // This pattern is automatically generated from aarch64_ad.m4
12589 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12590 
12591 // We can use ubfiz when masking by a positive number and then left shifting the result.
12592 // We know that the mask is positive because immL_bitmask guarantees it.
12593 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12594 %{
12595   match(Set dst (LShiftL (AndL src mask) lshift));
12596   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12597 
12598   ins_cost(INSN_COST);
12599   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12600   ins_encode %{
12601     int lshift = $lshift$$constant & 63;
12602     intptr_t mask = $mask$$constant;
12603     int width = exact_log2_long(mask+1);
12604     __ ubfiz(as_Register($dst$$reg),
12605           as_Register($src$$reg), lshift, width);
12606   %}
12607   ins_pipe(ialu_reg_shift);
12608 %}
12609 
12610 // This pattern is automatically generated from aarch64_ad.m4
12611 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12612 
12613 // We can use ubfiz when masking by a positive number and then left shifting the result.
12614 // We know that the mask is positive because immI_bitmask guarantees it.
12615 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12616 %{
12617   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12618   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12619 
12620   ins_cost(INSN_COST);
12621   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12622   ins_encode %{
12623     int lshift = $lshift$$constant & 31;
12624     intptr_t mask = $mask$$constant;
12625     int width = exact_log2(mask+1);
12626     __ ubfizw(as_Register($dst$$reg),
12627           as_Register($src$$reg), lshift, width);
12628   %}
12629   ins_pipe(ialu_reg_shift);
12630 %}
12631 
12632 // This pattern is automatically generated from aarch64_ad.m4
12633 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12634 
12635 // We can use ubfiz when masking by a positive number and then left shifting the result.
12636 // We know that the mask is positive because immL_bitmask guarantees it.
12637 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12638 %{
12639   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12640   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12641 
12642   ins_cost(INSN_COST);
12643   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12644   ins_encode %{
12645     int lshift = $lshift$$constant & 63;
12646     intptr_t mask = $mask$$constant;
12647     int width = exact_log2_long(mask+1);
12648     __ ubfiz(as_Register($dst$$reg),
12649           as_Register($src$$reg), lshift, width);
12650   %}
12651   ins_pipe(ialu_reg_shift);
12652 %}
12653 
12654 
12655 // This pattern is automatically generated from aarch64_ad.m4
12656 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12657 
12658 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12659 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12660 %{
12661   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12662   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12663 
12664   ins_cost(INSN_COST);
12665   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12666   ins_encode %{
12667     int lshift = $lshift$$constant & 63;
12668     intptr_t mask = $mask$$constant;
12669     int width = exact_log2(mask+1);
12670     __ ubfiz(as_Register($dst$$reg),
12671              as_Register($src$$reg), lshift, width);
12672   %}
12673   ins_pipe(ialu_reg_shift);
12674 %}
12675 
12676 // This pattern is automatically generated from aarch64_ad.m4
12677 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12678 
12679 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12680 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12681 %{
12682   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12683   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12684 
12685   ins_cost(INSN_COST);
12686   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12687   ins_encode %{
12688     int lshift = $lshift$$constant & 31;
12689     intptr_t mask = $mask$$constant;
12690     int width = exact_log2(mask+1);
12691     __ ubfiz(as_Register($dst$$reg),
12692              as_Register($src$$reg), lshift, width);
12693   %}
12694   ins_pipe(ialu_reg_shift);
12695 %}
12696 
12697 // This pattern is automatically generated from aarch64_ad.m4
12698 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12699 
12700 // Can skip int2long conversions after AND with small bitmask
12701 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12702 %{
12703   match(Set dst (ConvI2L (AndI src msk)));
12704   ins_cost(INSN_COST);
12705   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12706   ins_encode %{
12707     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12708   %}
12709   ins_pipe(ialu_reg_shift);
12710 %}
12711 
12712 
12713 // Rotations
12714 
12715 // This pattern is automatically generated from aarch64_ad.m4
12716 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12717 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12718 %{
12719   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12720   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12721 
12722   ins_cost(INSN_COST);
12723   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12724 
12725   ins_encode %{
12726     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12727             $rshift$$constant & 63);
12728   %}
12729   ins_pipe(ialu_reg_reg_extr);
12730 %}
12731 
12732 
12733 // This pattern is automatically generated from aarch64_ad.m4
12734 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12735 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12736 %{
12737   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12738   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12739 
12740   ins_cost(INSN_COST);
12741   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12742 
12743   ins_encode %{
12744     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12745             $rshift$$constant & 31);
12746   %}
12747   ins_pipe(ialu_reg_reg_extr);
12748 %}
12749 
12750 
12751 // This pattern is automatically generated from aarch64_ad.m4
12752 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12753 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12754 %{
12755   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12756   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12757 
12758   ins_cost(INSN_COST);
12759   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12760 
12761   ins_encode %{
12762     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12763             $rshift$$constant & 63);
12764   %}
12765   ins_pipe(ialu_reg_reg_extr);
12766 %}
12767 
12768 
12769 // This pattern is automatically generated from aarch64_ad.m4
12770 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12771 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12772 %{
12773   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12774   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12775 
12776   ins_cost(INSN_COST);
12777   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12778 
12779   ins_encode %{
12780     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12781             $rshift$$constant & 31);
12782   %}
12783   ins_pipe(ialu_reg_reg_extr);
12784 %}
12785 
12786 // This pattern is automatically generated from aarch64_ad.m4
12787 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12788 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
12789 %{
12790   match(Set dst (RotateRight src shift));
12791 
12792   ins_cost(INSN_COST);
12793   format %{ "ror    $dst, $src, $shift" %}
12794 
12795   ins_encode %{
12796      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12797                $shift$$constant & 0x1f);
12798   %}
12799   ins_pipe(ialu_reg_reg_vshift);
12800 %}
12801 
12802 // This pattern is automatically generated from aarch64_ad.m4
12803 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12804 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
12805 %{
12806   match(Set dst (RotateRight src shift));
12807 
12808   ins_cost(INSN_COST);
12809   format %{ "ror    $dst, $src, $shift" %}
12810 
12811   ins_encode %{
12812      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12813                $shift$$constant & 0x3f);
12814   %}
12815   ins_pipe(ialu_reg_reg_vshift);
12816 %}
12817 
12818 // This pattern is automatically generated from aarch64_ad.m4
12819 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12820 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12821 %{
12822   match(Set dst (RotateRight src shift));
12823 
12824   ins_cost(INSN_COST);
12825   format %{ "ror    $dst, $src, $shift" %}
12826 
12827   ins_encode %{
12828      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12829   %}
12830   ins_pipe(ialu_reg_reg_vshift);
12831 %}
12832 
12833 // This pattern is automatically generated from aarch64_ad.m4
12834 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12835 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12836 %{
12837   match(Set dst (RotateRight src shift));
12838 
12839   ins_cost(INSN_COST);
12840   format %{ "ror    $dst, $src, $shift" %}
12841 
12842   ins_encode %{
12843      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12844   %}
12845   ins_pipe(ialu_reg_reg_vshift);
12846 %}
12847 
12848 // This pattern is automatically generated from aarch64_ad.m4
12849 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12850 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12851 %{
12852   match(Set dst (RotateLeft src shift));
12853 
12854   ins_cost(INSN_COST);
12855   format %{ "rol    $dst, $src, $shift" %}
12856 
12857   ins_encode %{
12858      __ subw(rscratch1, zr, as_Register($shift$$reg));
12859      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12860   %}
12861   ins_pipe(ialu_reg_reg_vshift);
12862 %}
12863 
12864 // This pattern is automatically generated from aarch64_ad.m4
12865 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12866 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12867 %{
12868   match(Set dst (RotateLeft src shift));
12869 
12870   ins_cost(INSN_COST);
12871   format %{ "rol    $dst, $src, $shift" %}
12872 
12873   ins_encode %{
12874      __ subw(rscratch1, zr, as_Register($shift$$reg));
12875      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12876   %}
12877   ins_pipe(ialu_reg_reg_vshift);
12878 %}
12879 
12880 
12881 // Add/subtract (extended)
12882 
12883 // This pattern is automatically generated from aarch64_ad.m4
12884 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12885 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12886 %{
12887   match(Set dst (AddL src1 (ConvI2L src2)));
12888   ins_cost(INSN_COST);
12889   format %{ "add  $dst, $src1, $src2, sxtw" %}
12890 
12891    ins_encode %{
12892      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12893             as_Register($src2$$reg), ext::sxtw);
12894    %}
12895   ins_pipe(ialu_reg_reg);
12896 %}
12897 
12898 // This pattern is automatically generated from aarch64_ad.m4
12899 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12900 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12901 %{
12902   match(Set dst (SubL src1 (ConvI2L src2)));
12903   ins_cost(INSN_COST);
12904   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12905 
12906    ins_encode %{
12907      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12908             as_Register($src2$$reg), ext::sxtw);
12909    %}
12910   ins_pipe(ialu_reg_reg);
12911 %}
12912 
12913 // This pattern is automatically generated from aarch64_ad.m4
12914 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12915 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12916 %{
12917   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12918   ins_cost(INSN_COST);
12919   format %{ "add  $dst, $src1, $src2, sxth" %}
12920 
12921    ins_encode %{
12922      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12923             as_Register($src2$$reg), ext::sxth);
12924    %}
12925   ins_pipe(ialu_reg_reg);
12926 %}
12927 
12928 // This pattern is automatically generated from aarch64_ad.m4
12929 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12930 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12931 %{
12932   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12933   ins_cost(INSN_COST);
12934   format %{ "add  $dst, $src1, $src2, sxtb" %}
12935 
12936    ins_encode %{
12937      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12938             as_Register($src2$$reg), ext::sxtb);
12939    %}
12940   ins_pipe(ialu_reg_reg);
12941 %}
12942 
12943 // This pattern is automatically generated from aarch64_ad.m4
12944 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12945 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12946 %{
12947   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12948   ins_cost(INSN_COST);
12949   format %{ "add  $dst, $src1, $src2, uxtb" %}
12950 
12951    ins_encode %{
12952      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12953             as_Register($src2$$reg), ext::uxtb);
12954    %}
12955   ins_pipe(ialu_reg_reg);
12956 %}
12957 
12958 // This pattern is automatically generated from aarch64_ad.m4
12959 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12960 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12961 %{
12962   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12963   ins_cost(INSN_COST);
12964   format %{ "add  $dst, $src1, $src2, sxth" %}
12965 
12966    ins_encode %{
12967      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12968             as_Register($src2$$reg), ext::sxth);
12969    %}
12970   ins_pipe(ialu_reg_reg);
12971 %}
12972 
12973 // This pattern is automatically generated from aarch64_ad.m4
12974 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12975 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12976 %{
12977   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12978   ins_cost(INSN_COST);
12979   format %{ "add  $dst, $src1, $src2, sxtw" %}
12980 
12981    ins_encode %{
12982      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12983             as_Register($src2$$reg), ext::sxtw);
12984    %}
12985   ins_pipe(ialu_reg_reg);
12986 %}
12987 
12988 // This pattern is automatically generated from aarch64_ad.m4
12989 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12990 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12991 %{
12992   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12993   ins_cost(INSN_COST);
12994   format %{ "add  $dst, $src1, $src2, sxtb" %}
12995 
12996    ins_encode %{
12997      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12998             as_Register($src2$$reg), ext::sxtb);
12999    %}
13000   ins_pipe(ialu_reg_reg);
13001 %}
13002 
13003 // This pattern is automatically generated from aarch64_ad.m4
13004 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13005 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13006 %{
13007   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13008   ins_cost(INSN_COST);
13009   format %{ "add  $dst, $src1, $src2, uxtb" %}
13010 
13011    ins_encode %{
13012      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13013             as_Register($src2$$reg), ext::uxtb);
13014    %}
13015   ins_pipe(ialu_reg_reg);
13016 %}
13017 
13018 // This pattern is automatically generated from aarch64_ad.m4
13019 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13020 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13021 %{
13022   match(Set dst (AddI src1 (AndI src2 mask)));
13023   ins_cost(INSN_COST);
13024   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13025 
13026    ins_encode %{
13027      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13028             as_Register($src2$$reg), ext::uxtb);
13029    %}
13030   ins_pipe(ialu_reg_reg);
13031 %}
13032 
13033 // This pattern is automatically generated from aarch64_ad.m4
13034 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13035 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13036 %{
13037   match(Set dst (AddI src1 (AndI src2 mask)));
13038   ins_cost(INSN_COST);
13039   format %{ "addw  $dst, $src1, $src2, uxth" %}
13040 
13041    ins_encode %{
13042      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13043             as_Register($src2$$reg), ext::uxth);
13044    %}
13045   ins_pipe(ialu_reg_reg);
13046 %}
13047 
13048 // This pattern is automatically generated from aarch64_ad.m4
13049 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13050 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13051 %{
13052   match(Set dst (AddL src1 (AndL src2 mask)));
13053   ins_cost(INSN_COST);
13054   format %{ "add  $dst, $src1, $src2, uxtb" %}
13055 
13056    ins_encode %{
13057      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13058             as_Register($src2$$reg), ext::uxtb);
13059    %}
13060   ins_pipe(ialu_reg_reg);
13061 %}
13062 
13063 // This pattern is automatically generated from aarch64_ad.m4
13064 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13065 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13066 %{
13067   match(Set dst (AddL src1 (AndL src2 mask)));
13068   ins_cost(INSN_COST);
13069   format %{ "add  $dst, $src1, $src2, uxth" %}
13070 
13071    ins_encode %{
13072      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13073             as_Register($src2$$reg), ext::uxth);
13074    %}
13075   ins_pipe(ialu_reg_reg);
13076 %}
13077 
13078 // This pattern is automatically generated from aarch64_ad.m4
13079 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13080 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13081 %{
13082   match(Set dst (AddL src1 (AndL src2 mask)));
13083   ins_cost(INSN_COST);
13084   format %{ "add  $dst, $src1, $src2, uxtw" %}
13085 
13086    ins_encode %{
13087      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13088             as_Register($src2$$reg), ext::uxtw);
13089    %}
13090   ins_pipe(ialu_reg_reg);
13091 %}
13092 
13093 // This pattern is automatically generated from aarch64_ad.m4
13094 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13095 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13096 %{
13097   match(Set dst (SubI src1 (AndI src2 mask)));
13098   ins_cost(INSN_COST);
13099   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13100 
13101    ins_encode %{
13102      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13103             as_Register($src2$$reg), ext::uxtb);
13104    %}
13105   ins_pipe(ialu_reg_reg);
13106 %}
13107 
13108 // This pattern is automatically generated from aarch64_ad.m4
13109 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13110 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13111 %{
13112   match(Set dst (SubI src1 (AndI src2 mask)));
13113   ins_cost(INSN_COST);
13114   format %{ "subw  $dst, $src1, $src2, uxth" %}
13115 
13116    ins_encode %{
13117      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13118             as_Register($src2$$reg), ext::uxth);
13119    %}
13120   ins_pipe(ialu_reg_reg);
13121 %}
13122 
13123 // This pattern is automatically generated from aarch64_ad.m4
13124 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13125 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13126 %{
13127   match(Set dst (SubL src1 (AndL src2 mask)));
13128   ins_cost(INSN_COST);
13129   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13130 
13131    ins_encode %{
13132      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13133             as_Register($src2$$reg), ext::uxtb);
13134    %}
13135   ins_pipe(ialu_reg_reg);
13136 %}
13137 
13138 // This pattern is automatically generated from aarch64_ad.m4
13139 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13140 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13141 %{
13142   match(Set dst (SubL src1 (AndL src2 mask)));
13143   ins_cost(INSN_COST);
13144   format %{ "sub  $dst, $src1, $src2, uxth" %}
13145 
13146    ins_encode %{
13147      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13148             as_Register($src2$$reg), ext::uxth);
13149    %}
13150   ins_pipe(ialu_reg_reg);
13151 %}
13152 
13153 // This pattern is automatically generated from aarch64_ad.m4
13154 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13155 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13156 %{
13157   match(Set dst (SubL src1 (AndL src2 mask)));
13158   ins_cost(INSN_COST);
13159   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13160 
13161    ins_encode %{
13162      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13163             as_Register($src2$$reg), ext::uxtw);
13164    %}
13165   ins_pipe(ialu_reg_reg);
13166 %}
13167 
13168 
13169 // This pattern is automatically generated from aarch64_ad.m4
13170 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13171 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13172 %{
13173   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13174   ins_cost(1.9 * INSN_COST);
13175   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13176 
13177    ins_encode %{
13178      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13179             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13180    %}
13181   ins_pipe(ialu_reg_reg_shift);
13182 %}
13183 
13184 // This pattern is automatically generated from aarch64_ad.m4
13185 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13186 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13187 %{
13188   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13189   ins_cost(1.9 * INSN_COST);
13190   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13191 
13192    ins_encode %{
13193      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13194             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13195    %}
13196   ins_pipe(ialu_reg_reg_shift);
13197 %}
13198 
13199 // This pattern is automatically generated from aarch64_ad.m4
13200 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13201 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13202 %{
13203   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13204   ins_cost(1.9 * INSN_COST);
13205   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13206 
13207    ins_encode %{
13208      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13209             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13210    %}
13211   ins_pipe(ialu_reg_reg_shift);
13212 %}
13213 
13214 // This pattern is automatically generated from aarch64_ad.m4
13215 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13216 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13217 %{
13218   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13219   ins_cost(1.9 * INSN_COST);
13220   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13221 
13222    ins_encode %{
13223      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13224             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13225    %}
13226   ins_pipe(ialu_reg_reg_shift);
13227 %}
13228 
13229 // This pattern is automatically generated from aarch64_ad.m4
13230 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13231 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13232 %{
13233   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13234   ins_cost(1.9 * INSN_COST);
13235   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13236 
13237    ins_encode %{
13238      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13239             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13240    %}
13241   ins_pipe(ialu_reg_reg_shift);
13242 %}
13243 
13244 // This pattern is automatically generated from aarch64_ad.m4
13245 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13246 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13247 %{
13248   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13249   ins_cost(1.9 * INSN_COST);
13250   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13251 
13252    ins_encode %{
13253      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13254             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13255    %}
13256   ins_pipe(ialu_reg_reg_shift);
13257 %}
13258 
13259 // This pattern is automatically generated from aarch64_ad.m4
13260 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13261 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13262 %{
13263   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13264   ins_cost(1.9 * INSN_COST);
13265   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13266 
13267    ins_encode %{
13268      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13269             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13270    %}
13271   ins_pipe(ialu_reg_reg_shift);
13272 %}
13273 
13274 // This pattern is automatically generated from aarch64_ad.m4
13275 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13276 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13277 %{
13278   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13279   ins_cost(1.9 * INSN_COST);
13280   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13281 
13282    ins_encode %{
13283      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13284             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13285    %}
13286   ins_pipe(ialu_reg_reg_shift);
13287 %}
13288 
13289 // This pattern is automatically generated from aarch64_ad.m4
13290 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13291 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13292 %{
13293   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13294   ins_cost(1.9 * INSN_COST);
13295   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13296 
13297    ins_encode %{
13298      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13299             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13300    %}
13301   ins_pipe(ialu_reg_reg_shift);
13302 %}
13303 
13304 // This pattern is automatically generated from aarch64_ad.m4
13305 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13306 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13307 %{
13308   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13309   ins_cost(1.9 * INSN_COST);
13310   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13311 
13312    ins_encode %{
13313      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13314             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13315    %}
13316   ins_pipe(ialu_reg_reg_shift);
13317 %}
13318 
13319 // This pattern is automatically generated from aarch64_ad.m4
13320 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13321 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13322 %{
13323   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13324   ins_cost(1.9 * INSN_COST);
13325   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13326 
13327    ins_encode %{
13328      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13329             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13330    %}
13331   ins_pipe(ialu_reg_reg_shift);
13332 %}
13333 
13334 // This pattern is automatically generated from aarch64_ad.m4
13335 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13336 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13337 %{
13338   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13339   ins_cost(1.9 * INSN_COST);
13340   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13341 
13342    ins_encode %{
13343      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13344             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13345    %}
13346   ins_pipe(ialu_reg_reg_shift);
13347 %}
13348 
13349 // This pattern is automatically generated from aarch64_ad.m4
13350 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13351 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13352 %{
13353   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13354   ins_cost(1.9 * INSN_COST);
13355   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13356 
13357    ins_encode %{
13358      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13359             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13360    %}
13361   ins_pipe(ialu_reg_reg_shift);
13362 %}
13363 
13364 // This pattern is automatically generated from aarch64_ad.m4
13365 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13366 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13367 %{
13368   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13369   ins_cost(1.9 * INSN_COST);
13370   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13371 
13372    ins_encode %{
13373      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13374             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13375    %}
13376   ins_pipe(ialu_reg_reg_shift);
13377 %}
13378 
13379 // This pattern is automatically generated from aarch64_ad.m4
13380 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13381 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13382 %{
13383   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13384   ins_cost(1.9 * INSN_COST);
13385   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13386 
13387    ins_encode %{
13388      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13389             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13390    %}
13391   ins_pipe(ialu_reg_reg_shift);
13392 %}
13393 
13394 // This pattern is automatically generated from aarch64_ad.m4
13395 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13396 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13397 %{
13398   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13399   ins_cost(1.9 * INSN_COST);
13400   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13401 
13402    ins_encode %{
13403      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13404             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13405    %}
13406   ins_pipe(ialu_reg_reg_shift);
13407 %}
13408 
13409 // This pattern is automatically generated from aarch64_ad.m4
13410 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13411 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13412 %{
13413   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13414   ins_cost(1.9 * INSN_COST);
13415   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13416 
13417    ins_encode %{
13418      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13419             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13420    %}
13421   ins_pipe(ialu_reg_reg_shift);
13422 %}
13423 
13424 // This pattern is automatically generated from aarch64_ad.m4
13425 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13426 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13427 %{
13428   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13429   ins_cost(1.9 * INSN_COST);
13430   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13431 
13432    ins_encode %{
13433      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13434             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13435    %}
13436   ins_pipe(ialu_reg_reg_shift);
13437 %}
13438 
13439 // This pattern is automatically generated from aarch64_ad.m4
13440 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13441 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13442 %{
13443   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13444   ins_cost(1.9 * INSN_COST);
13445   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13446 
13447    ins_encode %{
13448      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13449             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13450    %}
13451   ins_pipe(ialu_reg_reg_shift);
13452 %}
13453 
13454 // This pattern is automatically generated from aarch64_ad.m4
13455 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13456 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13457 %{
13458   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13459   ins_cost(1.9 * INSN_COST);
13460   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13461 
13462    ins_encode %{
13463      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13464             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13465    %}
13466   ins_pipe(ialu_reg_reg_shift);
13467 %}
13468 
13469 // This pattern is automatically generated from aarch64_ad.m4
13470 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13471 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13472 %{
13473   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13474   ins_cost(1.9 * INSN_COST);
13475   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13476 
13477    ins_encode %{
13478      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13479             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13480    %}
13481   ins_pipe(ialu_reg_reg_shift);
13482 %}
13483 
13484 // This pattern is automatically generated from aarch64_ad.m4
13485 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13486 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13487 %{
13488   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13489   ins_cost(1.9 * INSN_COST);
13490   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13491 
13492    ins_encode %{
13493      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13494             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13495    %}
13496   ins_pipe(ialu_reg_reg_shift);
13497 %}
13498 
13499 // This pattern is automatically generated from aarch64_ad.m4
13500 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13501 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13502 %{
13503   effect(DEF dst, USE src1, USE src2, USE cr);
13504   ins_cost(INSN_COST * 2);
13505   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13506 
13507   ins_encode %{
13508     __ cselw($dst$$Register,
13509              $src1$$Register,
13510              $src2$$Register,
13511              Assembler::LT);
13512   %}
13513   ins_pipe(icond_reg_reg);
13514 %}
13515 
13516 // This pattern is automatically generated from aarch64_ad.m4
13517 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13518 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13519 %{
13520   effect(DEF dst, USE src1, USE src2, USE cr);
13521   ins_cost(INSN_COST * 2);
13522   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13523 
13524   ins_encode %{
13525     __ cselw($dst$$Register,
13526              $src1$$Register,
13527              $src2$$Register,
13528              Assembler::GT);
13529   %}
13530   ins_pipe(icond_reg_reg);
13531 %}
13532 
13533 // This pattern is automatically generated from aarch64_ad.m4
13534 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13535 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13536 %{
13537   effect(DEF dst, USE src1, USE cr);
13538   ins_cost(INSN_COST * 2);
13539   format %{ "cselw $dst, $src1, zr lt\t"  %}
13540 
13541   ins_encode %{
13542     __ cselw($dst$$Register,
13543              $src1$$Register,
13544              zr,
13545              Assembler::LT);
13546   %}
13547   ins_pipe(icond_reg);
13548 %}
13549 
13550 // This pattern is automatically generated from aarch64_ad.m4
13551 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13552 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13553 %{
13554   effect(DEF dst, USE src1, USE cr);
13555   ins_cost(INSN_COST * 2);
13556   format %{ "cselw $dst, $src1, zr gt\t"  %}
13557 
13558   ins_encode %{
13559     __ cselw($dst$$Register,
13560              $src1$$Register,
13561              zr,
13562              Assembler::GT);
13563   %}
13564   ins_pipe(icond_reg);
13565 %}
13566 
13567 // This pattern is automatically generated from aarch64_ad.m4
13568 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13569 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13570 %{
13571   effect(DEF dst, USE src1, USE cr);
13572   ins_cost(INSN_COST * 2);
13573   format %{ "csincw $dst, $src1, zr le\t"  %}
13574 
13575   ins_encode %{
13576     __ csincw($dst$$Register,
13577              $src1$$Register,
13578              zr,
13579              Assembler::LE);
13580   %}
13581   ins_pipe(icond_reg);
13582 %}
13583 
13584 // This pattern is automatically generated from aarch64_ad.m4
13585 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13586 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13587 %{
13588   effect(DEF dst, USE src1, USE cr);
13589   ins_cost(INSN_COST * 2);
13590   format %{ "csincw $dst, $src1, zr gt\t"  %}
13591 
13592   ins_encode %{
13593     __ csincw($dst$$Register,
13594              $src1$$Register,
13595              zr,
13596              Assembler::GT);
13597   %}
13598   ins_pipe(icond_reg);
13599 %}
13600 
13601 // This pattern is automatically generated from aarch64_ad.m4
13602 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13603 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13604 %{
13605   effect(DEF dst, USE src1, USE cr);
13606   ins_cost(INSN_COST * 2);
13607   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13608 
13609   ins_encode %{
13610     __ csinvw($dst$$Register,
13611              $src1$$Register,
13612              zr,
13613              Assembler::LT);
13614   %}
13615   ins_pipe(icond_reg);
13616 %}
13617 
13618 // This pattern is automatically generated from aarch64_ad.m4
13619 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13620 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13621 %{
13622   effect(DEF dst, USE src1, USE cr);
13623   ins_cost(INSN_COST * 2);
13624   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13625 
13626   ins_encode %{
13627     __ csinvw($dst$$Register,
13628              $src1$$Register,
13629              zr,
13630              Assembler::GE);
13631   %}
13632   ins_pipe(icond_reg);
13633 %}
13634 
13635 // This pattern is automatically generated from aarch64_ad.m4
13636 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13637 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13638 %{
13639   match(Set dst (MinI src imm));
13640   ins_cost(INSN_COST * 3);
13641   expand %{
13642     rFlagsReg cr;
13643     compI_reg_imm0(cr, src);
13644     cmovI_reg_imm0_lt(dst, src, cr);
13645   %}
13646 %}
13647 
13648 // This pattern is automatically generated from aarch64_ad.m4
13649 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13650 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13651 %{
13652   match(Set dst (MinI imm src));
13653   ins_cost(INSN_COST * 3);
13654   expand %{
13655     rFlagsReg cr;
13656     compI_reg_imm0(cr, src);
13657     cmovI_reg_imm0_lt(dst, src, cr);
13658   %}
13659 %}
13660 
13661 // This pattern is automatically generated from aarch64_ad.m4
13662 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13663 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13664 %{
13665   match(Set dst (MinI src imm));
13666   ins_cost(INSN_COST * 3);
13667   expand %{
13668     rFlagsReg cr;
13669     compI_reg_imm0(cr, src);
13670     cmovI_reg_imm1_le(dst, src, cr);
13671   %}
13672 %}
13673 
13674 // This pattern is automatically generated from aarch64_ad.m4
13675 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13676 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13677 %{
13678   match(Set dst (MinI imm src));
13679   ins_cost(INSN_COST * 3);
13680   expand %{
13681     rFlagsReg cr;
13682     compI_reg_imm0(cr, src);
13683     cmovI_reg_imm1_le(dst, src, cr);
13684   %}
13685 %}
13686 
13687 // This pattern is automatically generated from aarch64_ad.m4
13688 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13689 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13690 %{
13691   match(Set dst (MinI src imm));
13692   ins_cost(INSN_COST * 3);
13693   expand %{
13694     rFlagsReg cr;
13695     compI_reg_imm0(cr, src);
13696     cmovI_reg_immM1_lt(dst, src, cr);
13697   %}
13698 %}
13699 
13700 // This pattern is automatically generated from aarch64_ad.m4
13701 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13702 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13703 %{
13704   match(Set dst (MinI imm src));
13705   ins_cost(INSN_COST * 3);
13706   expand %{
13707     rFlagsReg cr;
13708     compI_reg_imm0(cr, src);
13709     cmovI_reg_immM1_lt(dst, src, cr);
13710   %}
13711 %}
13712 
13713 // This pattern is automatically generated from aarch64_ad.m4
13714 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13715 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13716 %{
13717   match(Set dst (MaxI src imm));
13718   ins_cost(INSN_COST * 3);
13719   expand %{
13720     rFlagsReg cr;
13721     compI_reg_imm0(cr, src);
13722     cmovI_reg_imm0_gt(dst, src, cr);
13723   %}
13724 %}
13725 
13726 // This pattern is automatically generated from aarch64_ad.m4
13727 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13728 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13729 %{
13730   match(Set dst (MaxI imm src));
13731   ins_cost(INSN_COST * 3);
13732   expand %{
13733     rFlagsReg cr;
13734     compI_reg_imm0(cr, src);
13735     cmovI_reg_imm0_gt(dst, src, cr);
13736   %}
13737 %}
13738 
13739 // This pattern is automatically generated from aarch64_ad.m4
13740 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13741 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13742 %{
13743   match(Set dst (MaxI src imm));
13744   ins_cost(INSN_COST * 3);
13745   expand %{
13746     rFlagsReg cr;
13747     compI_reg_imm0(cr, src);
13748     cmovI_reg_imm1_gt(dst, src, cr);
13749   %}
13750 %}
13751 
13752 // This pattern is automatically generated from aarch64_ad.m4
13753 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13754 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13755 %{
13756   match(Set dst (MaxI imm src));
13757   ins_cost(INSN_COST * 3);
13758   expand %{
13759     rFlagsReg cr;
13760     compI_reg_imm0(cr, src);
13761     cmovI_reg_imm1_gt(dst, src, cr);
13762   %}
13763 %}
13764 
13765 // This pattern is automatically generated from aarch64_ad.m4
13766 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13767 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13768 %{
13769   match(Set dst (MaxI src imm));
13770   ins_cost(INSN_COST * 3);
13771   expand %{
13772     rFlagsReg cr;
13773     compI_reg_imm0(cr, src);
13774     cmovI_reg_immM1_ge(dst, src, cr);
13775   %}
13776 %}
13777 
13778 // This pattern is automatically generated from aarch64_ad.m4
13779 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13780 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13781 %{
13782   match(Set dst (MaxI imm src));
13783   ins_cost(INSN_COST * 3);
13784   expand %{
13785     rFlagsReg cr;
13786     compI_reg_imm0(cr, src);
13787     cmovI_reg_immM1_ge(dst, src, cr);
13788   %}
13789 %}
13790 
13791 // This pattern is automatically generated from aarch64_ad.m4
13792 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13793 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
13794 %{
13795   match(Set dst (ReverseI src));
13796   ins_cost(INSN_COST);
13797   format %{ "rbitw  $dst, $src" %}
13798   ins_encode %{
13799     __ rbitw($dst$$Register, $src$$Register);
13800   %}
13801   ins_pipe(ialu_reg);
13802 %}
13803 
13804 // This pattern is automatically generated from aarch64_ad.m4
13805 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13806 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
13807 %{
13808   match(Set dst (ReverseL src));
13809   ins_cost(INSN_COST);
13810   format %{ "rbit  $dst, $src" %}
13811   ins_encode %{
13812     __ rbit($dst$$Register, $src$$Register);
13813   %}
13814   ins_pipe(ialu_reg);
13815 %}
13816 
13817 
13818 // END This section of the file is automatically generated. Do not edit --------------
13819 
13820 
13821 // ============================================================================
13822 // Floating Point Arithmetic Instructions
13823 
13824 instruct addHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13825   match(Set dst (AddHF src1 src2));
13826   format %{ "faddh $dst, $src1, $src2" %}
13827   ins_encode %{
13828     __ faddh($dst$$FloatRegister,
13829              $src1$$FloatRegister,
13830              $src2$$FloatRegister);
13831   %}
13832   ins_pipe(fp_dop_reg_reg_s);
13833 %}
13834 
13835 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13836   match(Set dst (AddF src1 src2));
13837 
13838   ins_cost(INSN_COST * 5);
13839   format %{ "fadds   $dst, $src1, $src2" %}
13840 
13841   ins_encode %{
13842     __ fadds(as_FloatRegister($dst$$reg),
13843              as_FloatRegister($src1$$reg),
13844              as_FloatRegister($src2$$reg));
13845   %}
13846 
13847   ins_pipe(fp_dop_reg_reg_s);
13848 %}
13849 
13850 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13851   match(Set dst (AddD src1 src2));
13852 
13853   ins_cost(INSN_COST * 5);
13854   format %{ "faddd   $dst, $src1, $src2" %}
13855 
13856   ins_encode %{
13857     __ faddd(as_FloatRegister($dst$$reg),
13858              as_FloatRegister($src1$$reg),
13859              as_FloatRegister($src2$$reg));
13860   %}
13861 
13862   ins_pipe(fp_dop_reg_reg_d);
13863 %}
13864 
13865 instruct subHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13866   match(Set dst (SubHF src1 src2));
13867   format %{ "fsubh $dst, $src1, $src2" %}
13868   ins_encode %{
13869     __ fsubh($dst$$FloatRegister,
13870              $src1$$FloatRegister,
13871              $src2$$FloatRegister);
13872   %}
13873   ins_pipe(fp_dop_reg_reg_s);
13874 %}
13875 
13876 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13877   match(Set dst (SubF src1 src2));
13878 
13879   ins_cost(INSN_COST * 5);
13880   format %{ "fsubs   $dst, $src1, $src2" %}
13881 
13882   ins_encode %{
13883     __ fsubs(as_FloatRegister($dst$$reg),
13884              as_FloatRegister($src1$$reg),
13885              as_FloatRegister($src2$$reg));
13886   %}
13887 
13888   ins_pipe(fp_dop_reg_reg_s);
13889 %}
13890 
13891 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13892   match(Set dst (SubD src1 src2));
13893 
13894   ins_cost(INSN_COST * 5);
13895   format %{ "fsubd   $dst, $src1, $src2" %}
13896 
13897   ins_encode %{
13898     __ fsubd(as_FloatRegister($dst$$reg),
13899              as_FloatRegister($src1$$reg),
13900              as_FloatRegister($src2$$reg));
13901   %}
13902 
13903   ins_pipe(fp_dop_reg_reg_d);
13904 %}
13905 
13906 instruct mulHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13907   match(Set dst (MulHF src1 src2));
13908   format %{ "fmulh $dst, $src1, $src2" %}
13909   ins_encode %{
13910     __ fmulh($dst$$FloatRegister,
13911              $src1$$FloatRegister,
13912              $src2$$FloatRegister);
13913   %}
13914   ins_pipe(fp_dop_reg_reg_s);
13915 %}
13916 
13917 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13918   match(Set dst (MulF src1 src2));
13919 
13920   ins_cost(INSN_COST * 6);
13921   format %{ "fmuls   $dst, $src1, $src2" %}
13922 
13923   ins_encode %{
13924     __ fmuls(as_FloatRegister($dst$$reg),
13925              as_FloatRegister($src1$$reg),
13926              as_FloatRegister($src2$$reg));
13927   %}
13928 
13929   ins_pipe(fp_dop_reg_reg_s);
13930 %}
13931 
13932 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13933   match(Set dst (MulD src1 src2));
13934 
13935   ins_cost(INSN_COST * 6);
13936   format %{ "fmuld   $dst, $src1, $src2" %}
13937 
13938   ins_encode %{
13939     __ fmuld(as_FloatRegister($dst$$reg),
13940              as_FloatRegister($src1$$reg),
13941              as_FloatRegister($src2$$reg));
13942   %}
13943 
13944   ins_pipe(fp_dop_reg_reg_d);
13945 %}
13946 
13947 // src1 * src2 + src3 (half-precision float)
13948 instruct maddHF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13949   match(Set dst (FmaHF src3 (Binary src1 src2)));
13950   format %{ "fmaddh $dst, $src1, $src2, $src3" %}
13951   ins_encode %{
13952     assert(UseFMA, "Needs FMA instructions support.");
13953     __ fmaddh($dst$$FloatRegister,
13954               $src1$$FloatRegister,
13955               $src2$$FloatRegister,
13956               $src3$$FloatRegister);
13957   %}
13958   ins_pipe(pipe_class_default);
13959 %}
13960 
13961 // src1 * src2 + src3
13962 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13963   match(Set dst (FmaF src3 (Binary src1 src2)));
13964 
13965   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13966 
13967   ins_encode %{
13968     assert(UseFMA, "Needs FMA instructions support.");
13969     __ fmadds(as_FloatRegister($dst$$reg),
13970              as_FloatRegister($src1$$reg),
13971              as_FloatRegister($src2$$reg),
13972              as_FloatRegister($src3$$reg));
13973   %}
13974 
13975   ins_pipe(pipe_class_default);
13976 %}
13977 
13978 // src1 * src2 + src3
13979 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13980   match(Set dst (FmaD src3 (Binary src1 src2)));
13981 
13982   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13983 
13984   ins_encode %{
13985     assert(UseFMA, "Needs FMA instructions support.");
13986     __ fmaddd(as_FloatRegister($dst$$reg),
13987              as_FloatRegister($src1$$reg),
13988              as_FloatRegister($src2$$reg),
13989              as_FloatRegister($src3$$reg));
13990   %}
13991 
13992   ins_pipe(pipe_class_default);
13993 %}
13994 
13995 // src1 * (-src2) + src3
13996 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13997 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13998   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13999 
14000   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
14001 
14002   ins_encode %{
14003     assert(UseFMA, "Needs FMA instructions support.");
14004     __ fmsubs(as_FloatRegister($dst$$reg),
14005               as_FloatRegister($src1$$reg),
14006               as_FloatRegister($src2$$reg),
14007               as_FloatRegister($src3$$reg));
14008   %}
14009 
14010   ins_pipe(pipe_class_default);
14011 %}
14012 
14013 // src1 * (-src2) + src3
14014 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
14015 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14016   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
14017 
14018   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
14019 
14020   ins_encode %{
14021     assert(UseFMA, "Needs FMA instructions support.");
14022     __ fmsubd(as_FloatRegister($dst$$reg),
14023               as_FloatRegister($src1$$reg),
14024               as_FloatRegister($src2$$reg),
14025               as_FloatRegister($src3$$reg));
14026   %}
14027 
14028   ins_pipe(pipe_class_default);
14029 %}
14030 
14031 // src1 * (-src2) - src3
14032 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
14033 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14034   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
14035 
14036   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
14037 
14038   ins_encode %{
14039     assert(UseFMA, "Needs FMA instructions support.");
14040     __ fnmadds(as_FloatRegister($dst$$reg),
14041                as_FloatRegister($src1$$reg),
14042                as_FloatRegister($src2$$reg),
14043                as_FloatRegister($src3$$reg));
14044   %}
14045 
14046   ins_pipe(pipe_class_default);
14047 %}
14048 
14049 // src1 * (-src2) - src3
14050 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
14051 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14052   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
14053 
14054   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
14055 
14056   ins_encode %{
14057     assert(UseFMA, "Needs FMA instructions support.");
14058     __ fnmaddd(as_FloatRegister($dst$$reg),
14059                as_FloatRegister($src1$$reg),
14060                as_FloatRegister($src2$$reg),
14061                as_FloatRegister($src3$$reg));
14062   %}
14063 
14064   ins_pipe(pipe_class_default);
14065 %}
14066 
14067 // src1 * src2 - src3
14068 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
14069   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
14070 
14071   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
14072 
14073   ins_encode %{
14074     assert(UseFMA, "Needs FMA instructions support.");
14075     __ fnmsubs(as_FloatRegister($dst$$reg),
14076                as_FloatRegister($src1$$reg),
14077                as_FloatRegister($src2$$reg),
14078                as_FloatRegister($src3$$reg));
14079   %}
14080 
14081   ins_pipe(pipe_class_default);
14082 %}
14083 
14084 // src1 * src2 - src3
14085 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
14086   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
14087 
14088   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
14089 
14090   ins_encode %{
14091     assert(UseFMA, "Needs FMA instructions support.");
14092     // n.b. insn name should be fnmsubd
14093     __ fnmsub(as_FloatRegister($dst$$reg),
14094               as_FloatRegister($src1$$reg),
14095               as_FloatRegister($src2$$reg),
14096               as_FloatRegister($src3$$reg));
14097   %}
14098 
14099   ins_pipe(pipe_class_default);
14100 %}
14101 
14102 // Math.max(HH)H (half-precision float)
14103 instruct maxHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14104   match(Set dst (MaxHF src1 src2));
14105   format %{ "fmaxh $dst, $src1, $src2" %}
14106   ins_encode %{
14107     __ fmaxh($dst$$FloatRegister,
14108              $src1$$FloatRegister,
14109              $src2$$FloatRegister);
14110   %}
14111   ins_pipe(fp_dop_reg_reg_s);
14112 %}
14113 
14114 // Math.min(HH)H (half-precision float)
14115 instruct minHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14116   match(Set dst (MinHF src1 src2));
14117   format %{ "fminh $dst, $src1, $src2" %}
14118   ins_encode %{
14119     __ fminh($dst$$FloatRegister,
14120              $src1$$FloatRegister,
14121              $src2$$FloatRegister);
14122   %}
14123   ins_pipe(fp_dop_reg_reg_s);
14124 %}
14125 
14126 // Math.max(FF)F
14127 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14128   match(Set dst (MaxF src1 src2));
14129 
14130   format %{ "fmaxs   $dst, $src1, $src2" %}
14131   ins_encode %{
14132     __ fmaxs(as_FloatRegister($dst$$reg),
14133              as_FloatRegister($src1$$reg),
14134              as_FloatRegister($src2$$reg));
14135   %}
14136 
14137   ins_pipe(fp_dop_reg_reg_s);
14138 %}
14139 
14140 // Math.min(FF)F
14141 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14142   match(Set dst (MinF src1 src2));
14143 
14144   format %{ "fmins   $dst, $src1, $src2" %}
14145   ins_encode %{
14146     __ fmins(as_FloatRegister($dst$$reg),
14147              as_FloatRegister($src1$$reg),
14148              as_FloatRegister($src2$$reg));
14149   %}
14150 
14151   ins_pipe(fp_dop_reg_reg_s);
14152 %}
14153 
14154 // Math.max(DD)D
14155 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14156   match(Set dst (MaxD src1 src2));
14157 
14158   format %{ "fmaxd   $dst, $src1, $src2" %}
14159   ins_encode %{
14160     __ fmaxd(as_FloatRegister($dst$$reg),
14161              as_FloatRegister($src1$$reg),
14162              as_FloatRegister($src2$$reg));
14163   %}
14164 
14165   ins_pipe(fp_dop_reg_reg_d);
14166 %}
14167 
14168 // Math.min(DD)D
14169 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14170   match(Set dst (MinD src1 src2));
14171 
14172   format %{ "fmind   $dst, $src1, $src2" %}
14173   ins_encode %{
14174     __ fmind(as_FloatRegister($dst$$reg),
14175              as_FloatRegister($src1$$reg),
14176              as_FloatRegister($src2$$reg));
14177   %}
14178 
14179   ins_pipe(fp_dop_reg_reg_d);
14180 %}
14181 
14182 instruct divHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14183   match(Set dst (DivHF src1  src2));
14184   format %{ "fdivh $dst, $src1, $src2" %}
14185   ins_encode %{
14186     __ fdivh($dst$$FloatRegister,
14187              $src1$$FloatRegister,
14188              $src2$$FloatRegister);
14189   %}
14190   ins_pipe(fp_div_s);
14191 %}
14192 
14193 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14194   match(Set dst (DivF src1  src2));
14195 
14196   ins_cost(INSN_COST * 18);
14197   format %{ "fdivs   $dst, $src1, $src2" %}
14198 
14199   ins_encode %{
14200     __ fdivs(as_FloatRegister($dst$$reg),
14201              as_FloatRegister($src1$$reg),
14202              as_FloatRegister($src2$$reg));
14203   %}
14204 
14205   ins_pipe(fp_div_s);
14206 %}
14207 
14208 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14209   match(Set dst (DivD src1  src2));
14210 
14211   ins_cost(INSN_COST * 32);
14212   format %{ "fdivd   $dst, $src1, $src2" %}
14213 
14214   ins_encode %{
14215     __ fdivd(as_FloatRegister($dst$$reg),
14216              as_FloatRegister($src1$$reg),
14217              as_FloatRegister($src2$$reg));
14218   %}
14219 
14220   ins_pipe(fp_div_d);
14221 %}
14222 
14223 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14224   match(Set dst (NegF src));
14225 
14226   ins_cost(INSN_COST * 3);
14227   format %{ "fneg   $dst, $src" %}
14228 
14229   ins_encode %{
14230     __ fnegs(as_FloatRegister($dst$$reg),
14231              as_FloatRegister($src$$reg));
14232   %}
14233 
14234   ins_pipe(fp_uop_s);
14235 %}
14236 
14237 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14238   match(Set dst (NegD src));
14239 
14240   ins_cost(INSN_COST * 3);
14241   format %{ "fnegd   $dst, $src" %}
14242 
14243   ins_encode %{
14244     __ fnegd(as_FloatRegister($dst$$reg),
14245              as_FloatRegister($src$$reg));
14246   %}
14247 
14248   ins_pipe(fp_uop_d);
14249 %}
14250 
14251 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14252 %{
14253   match(Set dst (AbsI src));
14254 
14255   effect(KILL cr);
14256   ins_cost(INSN_COST * 2);
14257   format %{ "cmpw  $src, zr\n\t"
14258             "cnegw $dst, $src, Assembler::LT\t# int abs"
14259   %}
14260 
14261   ins_encode %{
14262     __ cmpw(as_Register($src$$reg), zr);
14263     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14264   %}
14265   ins_pipe(pipe_class_default);
14266 %}
14267 
14268 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
14269 %{
14270   match(Set dst (AbsL src));
14271 
14272   effect(KILL cr);
14273   ins_cost(INSN_COST * 2);
14274   format %{ "cmp  $src, zr\n\t"
14275             "cneg $dst, $src, Assembler::LT\t# long abs"
14276   %}
14277 
14278   ins_encode %{
14279     __ cmp(as_Register($src$$reg), zr);
14280     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14281   %}
14282   ins_pipe(pipe_class_default);
14283 %}
14284 
14285 instruct absF_reg(vRegF dst, vRegF src) %{
14286   match(Set dst (AbsF src));
14287 
14288   ins_cost(INSN_COST * 3);
14289   format %{ "fabss   $dst, $src" %}
14290   ins_encode %{
14291     __ fabss(as_FloatRegister($dst$$reg),
14292              as_FloatRegister($src$$reg));
14293   %}
14294 
14295   ins_pipe(fp_uop_s);
14296 %}
14297 
14298 instruct absD_reg(vRegD dst, vRegD src) %{
14299   match(Set dst (AbsD src));
14300 
14301   ins_cost(INSN_COST * 3);
14302   format %{ "fabsd   $dst, $src" %}
14303   ins_encode %{
14304     __ fabsd(as_FloatRegister($dst$$reg),
14305              as_FloatRegister($src$$reg));
14306   %}
14307 
14308   ins_pipe(fp_uop_d);
14309 %}
14310 
14311 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14312   match(Set dst (AbsF (SubF src1 src2)));
14313 
14314   ins_cost(INSN_COST * 3);
14315   format %{ "fabds   $dst, $src1, $src2" %}
14316   ins_encode %{
14317     __ fabds(as_FloatRegister($dst$$reg),
14318              as_FloatRegister($src1$$reg),
14319              as_FloatRegister($src2$$reg));
14320   %}
14321 
14322   ins_pipe(fp_uop_s);
14323 %}
14324 
14325 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14326   match(Set dst (AbsD (SubD src1 src2)));
14327 
14328   ins_cost(INSN_COST * 3);
14329   format %{ "fabdd   $dst, $src1, $src2" %}
14330   ins_encode %{
14331     __ fabdd(as_FloatRegister($dst$$reg),
14332              as_FloatRegister($src1$$reg),
14333              as_FloatRegister($src2$$reg));
14334   %}
14335 
14336   ins_pipe(fp_uop_d);
14337 %}
14338 
14339 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14340   match(Set dst (SqrtD src));
14341 
14342   ins_cost(INSN_COST * 50);
14343   format %{ "fsqrtd  $dst, $src" %}
14344   ins_encode %{
14345     __ fsqrtd(as_FloatRegister($dst$$reg),
14346              as_FloatRegister($src$$reg));
14347   %}
14348 
14349   ins_pipe(fp_div_s);
14350 %}
14351 
14352 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14353   match(Set dst (SqrtF src));
14354 
14355   ins_cost(INSN_COST * 50);
14356   format %{ "fsqrts  $dst, $src" %}
14357   ins_encode %{
14358     __ fsqrts(as_FloatRegister($dst$$reg),
14359              as_FloatRegister($src$$reg));
14360   %}
14361 
14362   ins_pipe(fp_div_d);
14363 %}
14364 
14365 instruct sqrtHF_reg(vRegF dst, vRegF src) %{
14366   match(Set dst (SqrtHF src));
14367   format %{ "fsqrth $dst, $src" %}
14368   ins_encode %{
14369     __ fsqrth($dst$$FloatRegister,
14370               $src$$FloatRegister);
14371   %}
14372   ins_pipe(fp_div_s);
14373 %}
14374 
14375 // Math.rint, floor, ceil
14376 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14377   match(Set dst (RoundDoubleMode src rmode));
14378   format %{ "frint  $dst, $src, $rmode" %}
14379   ins_encode %{
14380     switch ($rmode$$constant) {
14381       case RoundDoubleModeNode::rmode_rint:
14382         __ frintnd(as_FloatRegister($dst$$reg),
14383                    as_FloatRegister($src$$reg));
14384         break;
14385       case RoundDoubleModeNode::rmode_floor:
14386         __ frintmd(as_FloatRegister($dst$$reg),
14387                    as_FloatRegister($src$$reg));
14388         break;
14389       case RoundDoubleModeNode::rmode_ceil:
14390         __ frintpd(as_FloatRegister($dst$$reg),
14391                    as_FloatRegister($src$$reg));
14392         break;
14393     }
14394   %}
14395   ins_pipe(fp_uop_d);
14396 %}
14397 
14398 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14399   match(Set dst (CopySignD src1 (Binary src2 zero)));
14400   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14401   format %{ "CopySignD  $dst $src1 $src2" %}
14402   ins_encode %{
14403     FloatRegister dst = as_FloatRegister($dst$$reg),
14404                   src1 = as_FloatRegister($src1$$reg),
14405                   src2 = as_FloatRegister($src2$$reg),
14406                   zero = as_FloatRegister($zero$$reg);
14407     __ fnegd(dst, zero);
14408     __ bsl(dst, __ T8B, src2, src1);
14409   %}
14410   ins_pipe(fp_uop_d);
14411 %}
14412 
14413 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14414   match(Set dst (CopySignF src1 src2));
14415   effect(TEMP_DEF dst, USE src1, USE src2);
14416   format %{ "CopySignF  $dst $src1 $src2" %}
14417   ins_encode %{
14418     FloatRegister dst = as_FloatRegister($dst$$reg),
14419                   src1 = as_FloatRegister($src1$$reg),
14420                   src2 = as_FloatRegister($src2$$reg);
14421     __ movi(dst, __ T2S, 0x80, 24);
14422     __ bsl(dst, __ T8B, src2, src1);
14423   %}
14424   ins_pipe(fp_uop_d);
14425 %}
14426 
14427 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14428   match(Set dst (SignumD src (Binary zero one)));
14429   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14430   format %{ "signumD  $dst, $src" %}
14431   ins_encode %{
14432     FloatRegister src = as_FloatRegister($src$$reg),
14433                   dst = as_FloatRegister($dst$$reg),
14434                   zero = as_FloatRegister($zero$$reg),
14435                   one = as_FloatRegister($one$$reg);
14436     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14437     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14438     // Bit selection instruction gets bit from "one" for each enabled bit in
14439     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14440     // NaN the whole "src" will be copied because "dst" is zero. For all other
14441     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14442     // from "src", and all other bits are copied from 1.0.
14443     __ bsl(dst, __ T8B, one, src);
14444   %}
14445   ins_pipe(fp_uop_d);
14446 %}
14447 
14448 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14449   match(Set dst (SignumF src (Binary zero one)));
14450   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14451   format %{ "signumF  $dst, $src" %}
14452   ins_encode %{
14453     FloatRegister src = as_FloatRegister($src$$reg),
14454                   dst = as_FloatRegister($dst$$reg),
14455                   zero = as_FloatRegister($zero$$reg),
14456                   one = as_FloatRegister($one$$reg);
14457     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14458     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14459     // Bit selection instruction gets bit from "one" for each enabled bit in
14460     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14461     // NaN the whole "src" will be copied because "dst" is zero. For all other
14462     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14463     // from "src", and all other bits are copied from 1.0.
14464     __ bsl(dst, __ T8B, one, src);
14465   %}
14466   ins_pipe(fp_uop_d);
14467 %}
14468 
14469 instruct onspinwait() %{
14470   match(OnSpinWait);
14471   ins_cost(INSN_COST);
14472 
14473   format %{ "onspinwait" %}
14474 
14475   ins_encode %{
14476     __ spin_wait();
14477   %}
14478   ins_pipe(pipe_class_empty);
14479 %}
14480 
14481 // ============================================================================
14482 // Logical Instructions
14483 
14484 // Integer Logical Instructions
14485 
14486 // And Instructions
14487 
14488 
14489 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14490   match(Set dst (AndI src1 src2));
14491 
14492   format %{ "andw  $dst, $src1, $src2\t# int" %}
14493 
14494   ins_cost(INSN_COST);
14495   ins_encode %{
14496     __ andw(as_Register($dst$$reg),
14497             as_Register($src1$$reg),
14498             as_Register($src2$$reg));
14499   %}
14500 
14501   ins_pipe(ialu_reg_reg);
14502 %}
14503 
14504 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14505   match(Set dst (AndI src1 src2));
14506 
14507   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14508 
14509   ins_cost(INSN_COST);
14510   ins_encode %{
14511     __ andw(as_Register($dst$$reg),
14512             as_Register($src1$$reg),
14513             (uint64_t)($src2$$constant));
14514   %}
14515 
14516   ins_pipe(ialu_reg_imm);
14517 %}
14518 
14519 // Or Instructions
14520 
14521 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14522   match(Set dst (OrI src1 src2));
14523 
14524   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14525 
14526   ins_cost(INSN_COST);
14527   ins_encode %{
14528     __ orrw(as_Register($dst$$reg),
14529             as_Register($src1$$reg),
14530             as_Register($src2$$reg));
14531   %}
14532 
14533   ins_pipe(ialu_reg_reg);
14534 %}
14535 
14536 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14537   match(Set dst (OrI src1 src2));
14538 
14539   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14540 
14541   ins_cost(INSN_COST);
14542   ins_encode %{
14543     __ orrw(as_Register($dst$$reg),
14544             as_Register($src1$$reg),
14545             (uint64_t)($src2$$constant));
14546   %}
14547 
14548   ins_pipe(ialu_reg_imm);
14549 %}
14550 
14551 // Xor Instructions
14552 
14553 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14554   match(Set dst (XorI src1 src2));
14555 
14556   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14557 
14558   ins_cost(INSN_COST);
14559   ins_encode %{
14560     __ eorw(as_Register($dst$$reg),
14561             as_Register($src1$$reg),
14562             as_Register($src2$$reg));
14563   %}
14564 
14565   ins_pipe(ialu_reg_reg);
14566 %}
14567 
14568 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14569   match(Set dst (XorI src1 src2));
14570 
14571   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14572 
14573   ins_cost(INSN_COST);
14574   ins_encode %{
14575     __ eorw(as_Register($dst$$reg),
14576             as_Register($src1$$reg),
14577             (uint64_t)($src2$$constant));
14578   %}
14579 
14580   ins_pipe(ialu_reg_imm);
14581 %}
14582 
14583 // Long Logical Instructions
14584 // TODO
14585 
14586 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14587   match(Set dst (AndL src1 src2));
14588 
14589   format %{ "and  $dst, $src1, $src2\t# int" %}
14590 
14591   ins_cost(INSN_COST);
14592   ins_encode %{
14593     __ andr(as_Register($dst$$reg),
14594             as_Register($src1$$reg),
14595             as_Register($src2$$reg));
14596   %}
14597 
14598   ins_pipe(ialu_reg_reg);
14599 %}
14600 
14601 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14602   match(Set dst (AndL src1 src2));
14603 
14604   format %{ "and  $dst, $src1, $src2\t# int" %}
14605 
14606   ins_cost(INSN_COST);
14607   ins_encode %{
14608     __ andr(as_Register($dst$$reg),
14609             as_Register($src1$$reg),
14610             (uint64_t)($src2$$constant));
14611   %}
14612 
14613   ins_pipe(ialu_reg_imm);
14614 %}
14615 
14616 // Or Instructions
14617 
14618 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14619   match(Set dst (OrL src1 src2));
14620 
14621   format %{ "orr  $dst, $src1, $src2\t# int" %}
14622 
14623   ins_cost(INSN_COST);
14624   ins_encode %{
14625     __ orr(as_Register($dst$$reg),
14626            as_Register($src1$$reg),
14627            as_Register($src2$$reg));
14628   %}
14629 
14630   ins_pipe(ialu_reg_reg);
14631 %}
14632 
14633 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14634   match(Set dst (OrL src1 src2));
14635 
14636   format %{ "orr  $dst, $src1, $src2\t# int" %}
14637 
14638   ins_cost(INSN_COST);
14639   ins_encode %{
14640     __ orr(as_Register($dst$$reg),
14641            as_Register($src1$$reg),
14642            (uint64_t)($src2$$constant));
14643   %}
14644 
14645   ins_pipe(ialu_reg_imm);
14646 %}
14647 
14648 // Xor Instructions
14649 
14650 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14651   match(Set dst (XorL src1 src2));
14652 
14653   format %{ "eor  $dst, $src1, $src2\t# int" %}
14654 
14655   ins_cost(INSN_COST);
14656   ins_encode %{
14657     __ eor(as_Register($dst$$reg),
14658            as_Register($src1$$reg),
14659            as_Register($src2$$reg));
14660   %}
14661 
14662   ins_pipe(ialu_reg_reg);
14663 %}
14664 
14665 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14666   match(Set dst (XorL src1 src2));
14667 
14668   ins_cost(INSN_COST);
14669   format %{ "eor  $dst, $src1, $src2\t# int" %}
14670 
14671   ins_encode %{
14672     __ eor(as_Register($dst$$reg),
14673            as_Register($src1$$reg),
14674            (uint64_t)($src2$$constant));
14675   %}
14676 
14677   ins_pipe(ialu_reg_imm);
14678 %}
14679 
14680 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14681 %{
14682   match(Set dst (ConvI2L src));
14683 
14684   ins_cost(INSN_COST);
14685   format %{ "sxtw  $dst, $src\t# i2l" %}
14686   ins_encode %{
14687     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14688   %}
14689   ins_pipe(ialu_reg_shift);
14690 %}
14691 
14692 // this pattern occurs in bigmath arithmetic
14693 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14694 %{
14695   match(Set dst (AndL (ConvI2L src) mask));
14696 
14697   ins_cost(INSN_COST);
14698   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14699   ins_encode %{
14700     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14701   %}
14702 
14703   ins_pipe(ialu_reg_shift);
14704 %}
14705 
14706 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14707   match(Set dst (ConvL2I src));
14708 
14709   ins_cost(INSN_COST);
14710   format %{ "movw  $dst, $src \t// l2i" %}
14711 
14712   ins_encode %{
14713     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14714   %}
14715 
14716   ins_pipe(ialu_reg);
14717 %}
14718 
14719 instruct convD2F_reg(vRegF dst, vRegD src) %{
14720   match(Set dst (ConvD2F src));
14721 
14722   ins_cost(INSN_COST * 5);
14723   format %{ "fcvtd  $dst, $src \t// d2f" %}
14724 
14725   ins_encode %{
14726     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14727   %}
14728 
14729   ins_pipe(fp_d2f);
14730 %}
14731 
14732 instruct convF2D_reg(vRegD dst, vRegF src) %{
14733   match(Set dst (ConvF2D src));
14734 
14735   ins_cost(INSN_COST * 5);
14736   format %{ "fcvts  $dst, $src \t// f2d" %}
14737 
14738   ins_encode %{
14739     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14740   %}
14741 
14742   ins_pipe(fp_f2d);
14743 %}
14744 
14745 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14746   match(Set dst (ConvF2I src));
14747 
14748   ins_cost(INSN_COST * 5);
14749   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14750 
14751   ins_encode %{
14752     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14753   %}
14754 
14755   ins_pipe(fp_f2i);
14756 %}
14757 
14758 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14759   match(Set dst (ConvF2L src));
14760 
14761   ins_cost(INSN_COST * 5);
14762   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14763 
14764   ins_encode %{
14765     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14766   %}
14767 
14768   ins_pipe(fp_f2l);
14769 %}
14770 
14771 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
14772   match(Set dst (ConvF2HF src));
14773   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
14774             "smov $dst, $tmp\t# move result from $tmp to $dst"
14775   %}
14776   effect(TEMP tmp);
14777   ins_encode %{
14778       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
14779   %}
14780   ins_pipe(pipe_slow);
14781 %}
14782 
14783 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
14784   match(Set dst (ConvHF2F src));
14785   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
14786             "fcvt $dst, $tmp\t# convert half to single precision"
14787   %}
14788   effect(TEMP tmp);
14789   ins_encode %{
14790       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
14791   %}
14792   ins_pipe(pipe_slow);
14793 %}
14794 
14795 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14796   match(Set dst (ConvI2F src));
14797 
14798   ins_cost(INSN_COST * 5);
14799   format %{ "scvtfws  $dst, $src \t// i2f" %}
14800 
14801   ins_encode %{
14802     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14803   %}
14804 
14805   ins_pipe(fp_i2f);
14806 %}
14807 
14808 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14809   match(Set dst (ConvL2F src));
14810 
14811   ins_cost(INSN_COST * 5);
14812   format %{ "scvtfs  $dst, $src \t// l2f" %}
14813 
14814   ins_encode %{
14815     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14816   %}
14817 
14818   ins_pipe(fp_l2f);
14819 %}
14820 
14821 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14822   match(Set dst (ConvD2I src));
14823 
14824   ins_cost(INSN_COST * 5);
14825   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14826 
14827   ins_encode %{
14828     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14829   %}
14830 
14831   ins_pipe(fp_d2i);
14832 %}
14833 
14834 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14835   match(Set dst (ConvD2L src));
14836 
14837   ins_cost(INSN_COST * 5);
14838   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14839 
14840   ins_encode %{
14841     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14842   %}
14843 
14844   ins_pipe(fp_d2l);
14845 %}
14846 
14847 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14848   match(Set dst (ConvI2D src));
14849 
14850   ins_cost(INSN_COST * 5);
14851   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14852 
14853   ins_encode %{
14854     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14855   %}
14856 
14857   ins_pipe(fp_i2d);
14858 %}
14859 
14860 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14861   match(Set dst (ConvL2D src));
14862 
14863   ins_cost(INSN_COST * 5);
14864   format %{ "scvtfd  $dst, $src \t// l2d" %}
14865 
14866   ins_encode %{
14867     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14868   %}
14869 
14870   ins_pipe(fp_l2d);
14871 %}
14872 
14873 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
14874 %{
14875   match(Set dst (RoundD src));
14876   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14877   format %{ "java_round_double $dst,$src"%}
14878   ins_encode %{
14879     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
14880                          as_FloatRegister($ftmp$$reg));
14881   %}
14882   ins_pipe(pipe_slow);
14883 %}
14884 
14885 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
14886 %{
14887   match(Set dst (RoundF src));
14888   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14889   format %{ "java_round_float $dst,$src"%}
14890   ins_encode %{
14891     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
14892                         as_FloatRegister($ftmp$$reg));
14893   %}
14894   ins_pipe(pipe_slow);
14895 %}
14896 
14897 // stack <-> reg and reg <-> reg shuffles with no conversion
14898 
14899 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14900 
14901   match(Set dst (MoveF2I src));
14902 
14903   effect(DEF dst, USE src);
14904 
14905   ins_cost(4 * INSN_COST);
14906 
14907   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14908 
14909   ins_encode %{
14910     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14911   %}
14912 
14913   ins_pipe(iload_reg_reg);
14914 
14915 %}
14916 
14917 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14918 
14919   match(Set dst (MoveI2F src));
14920 
14921   effect(DEF dst, USE src);
14922 
14923   ins_cost(4 * INSN_COST);
14924 
14925   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14926 
14927   ins_encode %{
14928     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14929   %}
14930 
14931   ins_pipe(pipe_class_memory);
14932 
14933 %}
14934 
14935 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14936 
14937   match(Set dst (MoveD2L src));
14938 
14939   effect(DEF dst, USE src);
14940 
14941   ins_cost(4 * INSN_COST);
14942 
14943   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14944 
14945   ins_encode %{
14946     __ ldr($dst$$Register, Address(sp, $src$$disp));
14947   %}
14948 
14949   ins_pipe(iload_reg_reg);
14950 
14951 %}
14952 
14953 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14954 
14955   match(Set dst (MoveL2D src));
14956 
14957   effect(DEF dst, USE src);
14958 
14959   ins_cost(4 * INSN_COST);
14960 
14961   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14962 
14963   ins_encode %{
14964     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14965   %}
14966 
14967   ins_pipe(pipe_class_memory);
14968 
14969 %}
14970 
14971 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14972 
14973   match(Set dst (MoveF2I src));
14974 
14975   effect(DEF dst, USE src);
14976 
14977   ins_cost(INSN_COST);
14978 
14979   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14980 
14981   ins_encode %{
14982     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14983   %}
14984 
14985   ins_pipe(pipe_class_memory);
14986 
14987 %}
14988 
14989 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14990 
14991   match(Set dst (MoveI2F src));
14992 
14993   effect(DEF dst, USE src);
14994 
14995   ins_cost(INSN_COST);
14996 
14997   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14998 
14999   ins_encode %{
15000     __ strw($src$$Register, Address(sp, $dst$$disp));
15001   %}
15002 
15003   ins_pipe(istore_reg_reg);
15004 
15005 %}
15006 
15007 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
15008 
15009   match(Set dst (MoveD2L src));
15010 
15011   effect(DEF dst, USE src);
15012 
15013   ins_cost(INSN_COST);
15014 
15015   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
15016 
15017   ins_encode %{
15018     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15019   %}
15020 
15021   ins_pipe(pipe_class_memory);
15022 
15023 %}
15024 
15025 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
15026 
15027   match(Set dst (MoveL2D src));
15028 
15029   effect(DEF dst, USE src);
15030 
15031   ins_cost(INSN_COST);
15032 
15033   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
15034 
15035   ins_encode %{
15036     __ str($src$$Register, Address(sp, $dst$$disp));
15037   %}
15038 
15039   ins_pipe(istore_reg_reg);
15040 
15041 %}
15042 
15043 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
15044 
15045   match(Set dst (MoveF2I src));
15046 
15047   effect(DEF dst, USE src);
15048 
15049   ins_cost(INSN_COST);
15050 
15051   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
15052 
15053   ins_encode %{
15054     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
15055   %}
15056 
15057   ins_pipe(fp_f2i);
15058 
15059 %}
15060 
15061 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
15062 
15063   match(Set dst (MoveI2F src));
15064 
15065   effect(DEF dst, USE src);
15066 
15067   ins_cost(INSN_COST);
15068 
15069   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
15070 
15071   ins_encode %{
15072     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
15073   %}
15074 
15075   ins_pipe(fp_i2f);
15076 
15077 %}
15078 
15079 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
15080 
15081   match(Set dst (MoveD2L src));
15082 
15083   effect(DEF dst, USE src);
15084 
15085   ins_cost(INSN_COST);
15086 
15087   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
15088 
15089   ins_encode %{
15090     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
15091   %}
15092 
15093   ins_pipe(fp_d2l);
15094 
15095 %}
15096 
15097 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
15098 
15099   match(Set dst (MoveL2D src));
15100 
15101   effect(DEF dst, USE src);
15102 
15103   ins_cost(INSN_COST);
15104 
15105   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
15106 
15107   ins_encode %{
15108     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
15109   %}
15110 
15111   ins_pipe(fp_l2d);
15112 
15113 %}
15114 
15115 // ============================================================================
15116 // clearing of an array
15117 
15118 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
15119 %{
15120   match(Set dummy (ClearArray cnt base));
15121   effect(USE_KILL cnt, USE_KILL base, KILL cr);
15122 
15123   ins_cost(4 * INSN_COST);
15124   format %{ "ClearArray $cnt, $base" %}
15125 
15126   ins_encode %{
15127     address tpc = __ zero_words($base$$Register, $cnt$$Register);
15128     if (tpc == nullptr) {
15129       ciEnv::current()->record_failure("CodeCache is full");
15130       return;
15131     }
15132   %}
15133 
15134   ins_pipe(pipe_class_memory);
15135 %}
15136 
15137 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15138 %{
15139   predicate((uint64_t)n->in(2)->get_long()
15140             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
15141   match(Set dummy (ClearArray cnt base));
15142   effect(TEMP temp, USE_KILL base, KILL cr);
15143 
15144   ins_cost(4 * INSN_COST);
15145   format %{ "ClearArray $cnt, $base" %}
15146 
15147   ins_encode %{
15148     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15149     if (tpc == nullptr) {
15150       ciEnv::current()->record_failure("CodeCache is full");
15151       return;
15152     }
15153   %}
15154 
15155   ins_pipe(pipe_class_memory);
15156 %}
15157 
15158 // ============================================================================
15159 // Overflow Math Instructions
15160 
15161 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15162 %{
15163   match(Set cr (OverflowAddI op1 op2));
15164 
15165   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15166   ins_cost(INSN_COST);
15167   ins_encode %{
15168     __ cmnw($op1$$Register, $op2$$Register);
15169   %}
15170 
15171   ins_pipe(icmp_reg_reg);
15172 %}
15173 
15174 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15175 %{
15176   match(Set cr (OverflowAddI op1 op2));
15177 
15178   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15179   ins_cost(INSN_COST);
15180   ins_encode %{
15181     __ cmnw($op1$$Register, $op2$$constant);
15182   %}
15183 
15184   ins_pipe(icmp_reg_imm);
15185 %}
15186 
15187 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15188 %{
15189   match(Set cr (OverflowAddL op1 op2));
15190 
15191   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15192   ins_cost(INSN_COST);
15193   ins_encode %{
15194     __ cmn($op1$$Register, $op2$$Register);
15195   %}
15196 
15197   ins_pipe(icmp_reg_reg);
15198 %}
15199 
15200 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15201 %{
15202   match(Set cr (OverflowAddL op1 op2));
15203 
15204   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
15205   ins_cost(INSN_COST);
15206   ins_encode %{
15207     __ adds(zr, $op1$$Register, $op2$$constant);
15208   %}
15209 
15210   ins_pipe(icmp_reg_imm);
15211 %}
15212 
15213 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15214 %{
15215   match(Set cr (OverflowSubI op1 op2));
15216 
15217   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15218   ins_cost(INSN_COST);
15219   ins_encode %{
15220     __ cmpw($op1$$Register, $op2$$Register);
15221   %}
15222 
15223   ins_pipe(icmp_reg_reg);
15224 %}
15225 
15226 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15227 %{
15228   match(Set cr (OverflowSubI op1 op2));
15229 
15230   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15231   ins_cost(INSN_COST);
15232   ins_encode %{
15233     __ cmpw($op1$$Register, $op2$$constant);
15234   %}
15235 
15236   ins_pipe(icmp_reg_imm);
15237 %}
15238 
15239 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15240 %{
15241   match(Set cr (OverflowSubL op1 op2));
15242 
15243   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15244   ins_cost(INSN_COST);
15245   ins_encode %{
15246     __ cmp($op1$$Register, $op2$$Register);
15247   %}
15248 
15249   ins_pipe(icmp_reg_reg);
15250 %}
15251 
15252 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15253 %{
15254   match(Set cr (OverflowSubL op1 op2));
15255 
15256   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15257   ins_cost(INSN_COST);
15258   ins_encode %{
15259     __ subs(zr, $op1$$Register, $op2$$constant);
15260   %}
15261 
15262   ins_pipe(icmp_reg_imm);
15263 %}
15264 
15265 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15266 %{
15267   match(Set cr (OverflowSubI zero op1));
15268 
15269   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15270   ins_cost(INSN_COST);
15271   ins_encode %{
15272     __ cmpw(zr, $op1$$Register);
15273   %}
15274 
15275   ins_pipe(icmp_reg_imm);
15276 %}
15277 
15278 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15279 %{
15280   match(Set cr (OverflowSubL zero op1));
15281 
15282   format %{ "cmp   zr, $op1\t# overflow check long" %}
15283   ins_cost(INSN_COST);
15284   ins_encode %{
15285     __ cmp(zr, $op1$$Register);
15286   %}
15287 
15288   ins_pipe(icmp_reg_imm);
15289 %}
15290 
15291 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15292 %{
15293   match(Set cr (OverflowMulI op1 op2));
15294 
15295   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15296             "cmp   rscratch1, rscratch1, sxtw\n\t"
15297             "movw  rscratch1, #0x80000000\n\t"
15298             "cselw rscratch1, rscratch1, zr, NE\n\t"
15299             "cmpw  rscratch1, #1" %}
15300   ins_cost(5 * INSN_COST);
15301   ins_encode %{
15302     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15303     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15304     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15305     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15306     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15307   %}
15308 
15309   ins_pipe(pipe_slow);
15310 %}
15311 
15312 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15313 %{
15314   match(If cmp (OverflowMulI op1 op2));
15315   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15316             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15317   effect(USE labl, KILL cr);
15318 
15319   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15320             "cmp   rscratch1, rscratch1, sxtw\n\t"
15321             "b$cmp   $labl" %}
15322   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15323   ins_encode %{
15324     Label* L = $labl$$label;
15325     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15326     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15327     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15328     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15329   %}
15330 
15331   ins_pipe(pipe_serial);
15332 %}
15333 
15334 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15335 %{
15336   match(Set cr (OverflowMulL op1 op2));
15337 
15338   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15339             "smulh rscratch2, $op1, $op2\n\t"
15340             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15341             "movw  rscratch1, #0x80000000\n\t"
15342             "cselw rscratch1, rscratch1, zr, NE\n\t"
15343             "cmpw  rscratch1, #1" %}
15344   ins_cost(6 * INSN_COST);
15345   ins_encode %{
15346     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15347     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15348     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15349     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15350     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15351     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15352   %}
15353 
15354   ins_pipe(pipe_slow);
15355 %}
15356 
15357 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15358 %{
15359   match(If cmp (OverflowMulL op1 op2));
15360   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15361             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15362   effect(USE labl, KILL cr);
15363 
15364   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15365             "smulh rscratch2, $op1, $op2\n\t"
15366             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15367             "b$cmp $labl" %}
15368   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15369   ins_encode %{
15370     Label* L = $labl$$label;
15371     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15372     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15373     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15374     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15375     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15376   %}
15377 
15378   ins_pipe(pipe_serial);
15379 %}
15380 
15381 // ============================================================================
15382 // Compare Instructions
15383 
15384 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15385 %{
15386   match(Set cr (CmpI op1 op2));
15387 
15388   effect(DEF cr, USE op1, USE op2);
15389 
15390   ins_cost(INSN_COST);
15391   format %{ "cmpw  $op1, $op2" %}
15392 
15393   ins_encode(aarch64_enc_cmpw(op1, op2));
15394 
15395   ins_pipe(icmp_reg_reg);
15396 %}
15397 
15398 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15399 %{
15400   match(Set cr (CmpI op1 zero));
15401 
15402   effect(DEF cr, USE op1);
15403 
15404   ins_cost(INSN_COST);
15405   format %{ "cmpw $op1, 0" %}
15406 
15407   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15408 
15409   ins_pipe(icmp_reg_imm);
15410 %}
15411 
15412 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15413 %{
15414   match(Set cr (CmpI op1 op2));
15415 
15416   effect(DEF cr, USE op1);
15417 
15418   ins_cost(INSN_COST);
15419   format %{ "cmpw  $op1, $op2" %}
15420 
15421   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15422 
15423   ins_pipe(icmp_reg_imm);
15424 %}
15425 
15426 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15427 %{
15428   match(Set cr (CmpI op1 op2));
15429 
15430   effect(DEF cr, USE op1);
15431 
15432   ins_cost(INSN_COST * 2);
15433   format %{ "cmpw  $op1, $op2" %}
15434 
15435   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15436 
15437   ins_pipe(icmp_reg_imm);
15438 %}
15439 
15440 // Unsigned compare Instructions; really, same as signed compare
15441 // except it should only be used to feed an If or a CMovI which takes a
15442 // cmpOpU.
15443 
15444 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15445 %{
15446   match(Set cr (CmpU op1 op2));
15447 
15448   effect(DEF cr, USE op1, USE op2);
15449 
15450   ins_cost(INSN_COST);
15451   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15452 
15453   ins_encode(aarch64_enc_cmpw(op1, op2));
15454 
15455   ins_pipe(icmp_reg_reg);
15456 %}
15457 
15458 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15459 %{
15460   match(Set cr (CmpU op1 zero));
15461 
15462   effect(DEF cr, USE op1);
15463 
15464   ins_cost(INSN_COST);
15465   format %{ "cmpw $op1, #0\t# unsigned" %}
15466 
15467   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15468 
15469   ins_pipe(icmp_reg_imm);
15470 %}
15471 
15472 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15473 %{
15474   match(Set cr (CmpU op1 op2));
15475 
15476   effect(DEF cr, USE op1);
15477 
15478   ins_cost(INSN_COST);
15479   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15480 
15481   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15482 
15483   ins_pipe(icmp_reg_imm);
15484 %}
15485 
15486 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15487 %{
15488   match(Set cr (CmpU op1 op2));
15489 
15490   effect(DEF cr, USE op1);
15491 
15492   ins_cost(INSN_COST * 2);
15493   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15494 
15495   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15496 
15497   ins_pipe(icmp_reg_imm);
15498 %}
15499 
15500 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15501 %{
15502   match(Set cr (CmpL op1 op2));
15503 
15504   effect(DEF cr, USE op1, USE op2);
15505 
15506   ins_cost(INSN_COST);
15507   format %{ "cmp  $op1, $op2" %}
15508 
15509   ins_encode(aarch64_enc_cmp(op1, op2));
15510 
15511   ins_pipe(icmp_reg_reg);
15512 %}
15513 
15514 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15515 %{
15516   match(Set cr (CmpL op1 zero));
15517 
15518   effect(DEF cr, USE op1);
15519 
15520   ins_cost(INSN_COST);
15521   format %{ "tst  $op1" %}
15522 
15523   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15524 
15525   ins_pipe(icmp_reg_imm);
15526 %}
15527 
15528 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15529 %{
15530   match(Set cr (CmpL op1 op2));
15531 
15532   effect(DEF cr, USE op1);
15533 
15534   ins_cost(INSN_COST);
15535   format %{ "cmp  $op1, $op2" %}
15536 
15537   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15538 
15539   ins_pipe(icmp_reg_imm);
15540 %}
15541 
15542 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15543 %{
15544   match(Set cr (CmpL op1 op2));
15545 
15546   effect(DEF cr, USE op1);
15547 
15548   ins_cost(INSN_COST * 2);
15549   format %{ "cmp  $op1, $op2" %}
15550 
15551   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15552 
15553   ins_pipe(icmp_reg_imm);
15554 %}
15555 
15556 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15557 %{
15558   match(Set cr (CmpUL op1 op2));
15559 
15560   effect(DEF cr, USE op1, USE op2);
15561 
15562   ins_cost(INSN_COST);
15563   format %{ "cmp  $op1, $op2" %}
15564 
15565   ins_encode(aarch64_enc_cmp(op1, op2));
15566 
15567   ins_pipe(icmp_reg_reg);
15568 %}
15569 
15570 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15571 %{
15572   match(Set cr (CmpUL op1 zero));
15573 
15574   effect(DEF cr, USE op1);
15575 
15576   ins_cost(INSN_COST);
15577   format %{ "tst  $op1" %}
15578 
15579   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15580 
15581   ins_pipe(icmp_reg_imm);
15582 %}
15583 
15584 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15585 %{
15586   match(Set cr (CmpUL op1 op2));
15587 
15588   effect(DEF cr, USE op1);
15589 
15590   ins_cost(INSN_COST);
15591   format %{ "cmp  $op1, $op2" %}
15592 
15593   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15594 
15595   ins_pipe(icmp_reg_imm);
15596 %}
15597 
15598 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15599 %{
15600   match(Set cr (CmpUL op1 op2));
15601 
15602   effect(DEF cr, USE op1);
15603 
15604   ins_cost(INSN_COST * 2);
15605   format %{ "cmp  $op1, $op2" %}
15606 
15607   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15608 
15609   ins_pipe(icmp_reg_imm);
15610 %}
15611 
15612 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15613 %{
15614   match(Set cr (CmpP op1 op2));
15615 
15616   effect(DEF cr, USE op1, USE op2);
15617 
15618   ins_cost(INSN_COST);
15619   format %{ "cmp  $op1, $op2\t // ptr" %}
15620 
15621   ins_encode(aarch64_enc_cmpp(op1, op2));
15622 
15623   ins_pipe(icmp_reg_reg);
15624 %}
15625 
15626 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15627 %{
15628   match(Set cr (CmpN op1 op2));
15629 
15630   effect(DEF cr, USE op1, USE op2);
15631 
15632   ins_cost(INSN_COST);
15633   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15634 
15635   ins_encode(aarch64_enc_cmpn(op1, op2));
15636 
15637   ins_pipe(icmp_reg_reg);
15638 %}
15639 
15640 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15641 %{
15642   match(Set cr (CmpP op1 zero));
15643 
15644   effect(DEF cr, USE op1, USE zero);
15645 
15646   ins_cost(INSN_COST);
15647   format %{ "cmp  $op1, 0\t // ptr" %}
15648 
15649   ins_encode(aarch64_enc_testp(op1));
15650 
15651   ins_pipe(icmp_reg_imm);
15652 %}
15653 
15654 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15655 %{
15656   match(Set cr (CmpN op1 zero));
15657 
15658   effect(DEF cr, USE op1, USE zero);
15659 
15660   ins_cost(INSN_COST);
15661   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15662 
15663   ins_encode(aarch64_enc_testn(op1));
15664 
15665   ins_pipe(icmp_reg_imm);
15666 %}
15667 
15668 // FP comparisons
15669 //
15670 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15671 // using normal cmpOp. See declaration of rFlagsReg for details.
15672 
15673 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15674 %{
15675   match(Set cr (CmpF src1 src2));
15676 
15677   ins_cost(3 * INSN_COST);
15678   format %{ "fcmps $src1, $src2" %}
15679 
15680   ins_encode %{
15681     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15682   %}
15683 
15684   ins_pipe(pipe_class_compare);
15685 %}
15686 
15687 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15688 %{
15689   match(Set cr (CmpF src1 src2));
15690 
15691   ins_cost(3 * INSN_COST);
15692   format %{ "fcmps $src1, 0.0" %}
15693 
15694   ins_encode %{
15695     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15696   %}
15697 
15698   ins_pipe(pipe_class_compare);
15699 %}
15700 // FROM HERE
15701 
15702 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15703 %{
15704   match(Set cr (CmpD src1 src2));
15705 
15706   ins_cost(3 * INSN_COST);
15707   format %{ "fcmpd $src1, $src2" %}
15708 
15709   ins_encode %{
15710     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15711   %}
15712 
15713   ins_pipe(pipe_class_compare);
15714 %}
15715 
15716 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15717 %{
15718   match(Set cr (CmpD src1 src2));
15719 
15720   ins_cost(3 * INSN_COST);
15721   format %{ "fcmpd $src1, 0.0" %}
15722 
15723   ins_encode %{
15724     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15725   %}
15726 
15727   ins_pipe(pipe_class_compare);
15728 %}
15729 
15730 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15731 %{
15732   match(Set dst (CmpF3 src1 src2));
15733   effect(KILL cr);
15734 
15735   ins_cost(5 * INSN_COST);
15736   format %{ "fcmps $src1, $src2\n\t"
15737             "csinvw($dst, zr, zr, eq\n\t"
15738             "csnegw($dst, $dst, $dst, lt)"
15739   %}
15740 
15741   ins_encode %{
15742     Label done;
15743     FloatRegister s1 = as_FloatRegister($src1$$reg);
15744     FloatRegister s2 = as_FloatRegister($src2$$reg);
15745     Register d = as_Register($dst$$reg);
15746     __ fcmps(s1, s2);
15747     // installs 0 if EQ else -1
15748     __ csinvw(d, zr, zr, Assembler::EQ);
15749     // keeps -1 if less or unordered else installs 1
15750     __ csnegw(d, d, d, Assembler::LT);
15751     __ bind(done);
15752   %}
15753 
15754   ins_pipe(pipe_class_default);
15755 
15756 %}
15757 
15758 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15759 %{
15760   match(Set dst (CmpD3 src1 src2));
15761   effect(KILL cr);
15762 
15763   ins_cost(5 * INSN_COST);
15764   format %{ "fcmpd $src1, $src2\n\t"
15765             "csinvw($dst, zr, zr, eq\n\t"
15766             "csnegw($dst, $dst, $dst, lt)"
15767   %}
15768 
15769   ins_encode %{
15770     Label done;
15771     FloatRegister s1 = as_FloatRegister($src1$$reg);
15772     FloatRegister s2 = as_FloatRegister($src2$$reg);
15773     Register d = as_Register($dst$$reg);
15774     __ fcmpd(s1, s2);
15775     // installs 0 if EQ else -1
15776     __ csinvw(d, zr, zr, Assembler::EQ);
15777     // keeps -1 if less or unordered else installs 1
15778     __ csnegw(d, d, d, Assembler::LT);
15779     __ bind(done);
15780   %}
15781   ins_pipe(pipe_class_default);
15782 
15783 %}
15784 
15785 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15786 %{
15787   match(Set dst (CmpF3 src1 zero));
15788   effect(KILL cr);
15789 
15790   ins_cost(5 * INSN_COST);
15791   format %{ "fcmps $src1, 0.0\n\t"
15792             "csinvw($dst, zr, zr, eq\n\t"
15793             "csnegw($dst, $dst, $dst, lt)"
15794   %}
15795 
15796   ins_encode %{
15797     Label done;
15798     FloatRegister s1 = as_FloatRegister($src1$$reg);
15799     Register d = as_Register($dst$$reg);
15800     __ fcmps(s1, 0.0);
15801     // installs 0 if EQ else -1
15802     __ csinvw(d, zr, zr, Assembler::EQ);
15803     // keeps -1 if less or unordered else installs 1
15804     __ csnegw(d, d, d, Assembler::LT);
15805     __ bind(done);
15806   %}
15807 
15808   ins_pipe(pipe_class_default);
15809 
15810 %}
15811 
15812 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15813 %{
15814   match(Set dst (CmpD3 src1 zero));
15815   effect(KILL cr);
15816 
15817   ins_cost(5 * INSN_COST);
15818   format %{ "fcmpd $src1, 0.0\n\t"
15819             "csinvw($dst, zr, zr, eq\n\t"
15820             "csnegw($dst, $dst, $dst, lt)"
15821   %}
15822 
15823   ins_encode %{
15824     Label done;
15825     FloatRegister s1 = as_FloatRegister($src1$$reg);
15826     Register d = as_Register($dst$$reg);
15827     __ fcmpd(s1, 0.0);
15828     // installs 0 if EQ else -1
15829     __ csinvw(d, zr, zr, Assembler::EQ);
15830     // keeps -1 if less or unordered else installs 1
15831     __ csnegw(d, d, d, Assembler::LT);
15832     __ bind(done);
15833   %}
15834   ins_pipe(pipe_class_default);
15835 
15836 %}
15837 
15838 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15839 %{
15840   match(Set dst (CmpLTMask p q));
15841   effect(KILL cr);
15842 
15843   ins_cost(3 * INSN_COST);
15844 
15845   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15846             "csetw $dst, lt\n\t"
15847             "subw $dst, zr, $dst"
15848   %}
15849 
15850   ins_encode %{
15851     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15852     __ csetw(as_Register($dst$$reg), Assembler::LT);
15853     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15854   %}
15855 
15856   ins_pipe(ialu_reg_reg);
15857 %}
15858 
15859 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15860 %{
15861   match(Set dst (CmpLTMask src zero));
15862   effect(KILL cr);
15863 
15864   ins_cost(INSN_COST);
15865 
15866   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15867 
15868   ins_encode %{
15869     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15870   %}
15871 
15872   ins_pipe(ialu_reg_shift);
15873 %}
15874 
15875 // ============================================================================
15876 // Max and Min
15877 
15878 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
15879 
15880 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
15881 %{
15882   effect(DEF cr, USE src);
15883   ins_cost(INSN_COST);
15884   format %{ "cmpw $src, 0" %}
15885 
15886   ins_encode %{
15887     __ cmpw($src$$Register, 0);
15888   %}
15889   ins_pipe(icmp_reg_imm);
15890 %}
15891 
15892 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15893 %{
15894   match(Set dst (MinI src1 src2));
15895   ins_cost(INSN_COST * 3);
15896 
15897   expand %{
15898     rFlagsReg cr;
15899     compI_reg_reg(cr, src1, src2);
15900     cmovI_reg_reg_lt(dst, src1, src2, cr);
15901   %}
15902 %}
15903 
15904 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15905 %{
15906   match(Set dst (MaxI src1 src2));
15907   ins_cost(INSN_COST * 3);
15908 
15909   expand %{
15910     rFlagsReg cr;
15911     compI_reg_reg(cr, src1, src2);
15912     cmovI_reg_reg_gt(dst, src1, src2, cr);
15913   %}
15914 %}
15915 
15916 
15917 // ============================================================================
15918 // Branch Instructions
15919 
15920 // Direct Branch.
15921 instruct branch(label lbl)
15922 %{
15923   match(Goto);
15924 
15925   effect(USE lbl);
15926 
15927   ins_cost(BRANCH_COST);
15928   format %{ "b  $lbl" %}
15929 
15930   ins_encode(aarch64_enc_b(lbl));
15931 
15932   ins_pipe(pipe_branch);
15933 %}
15934 
15935 // Conditional Near Branch
15936 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15937 %{
15938   // Same match rule as `branchConFar'.
15939   match(If cmp cr);
15940 
15941   effect(USE lbl);
15942 
15943   ins_cost(BRANCH_COST);
15944   // If set to 1 this indicates that the current instruction is a
15945   // short variant of a long branch. This avoids using this
15946   // instruction in first-pass matching. It will then only be used in
15947   // the `Shorten_branches' pass.
15948   // ins_short_branch(1);
15949   format %{ "b$cmp  $lbl" %}
15950 
15951   ins_encode(aarch64_enc_br_con(cmp, lbl));
15952 
15953   ins_pipe(pipe_branch_cond);
15954 %}
15955 
15956 // Conditional Near Branch Unsigned
15957 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15958 %{
15959   // Same match rule as `branchConFar'.
15960   match(If cmp cr);
15961 
15962   effect(USE lbl);
15963 
15964   ins_cost(BRANCH_COST);
15965   // If set to 1 this indicates that the current instruction is a
15966   // short variant of a long branch. This avoids using this
15967   // instruction in first-pass matching. It will then only be used in
15968   // the `Shorten_branches' pass.
15969   // ins_short_branch(1);
15970   format %{ "b$cmp  $lbl\t# unsigned" %}
15971 
15972   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15973 
15974   ins_pipe(pipe_branch_cond);
15975 %}
15976 
15977 // Make use of CBZ and CBNZ.  These instructions, as well as being
15978 // shorter than (cmp; branch), have the additional benefit of not
15979 // killing the flags.
15980 
15981 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15982   match(If cmp (CmpI op1 op2));
15983   effect(USE labl);
15984 
15985   ins_cost(BRANCH_COST);
15986   format %{ "cbw$cmp   $op1, $labl" %}
15987   ins_encode %{
15988     Label* L = $labl$$label;
15989     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15990     if (cond == Assembler::EQ)
15991       __ cbzw($op1$$Register, *L);
15992     else
15993       __ cbnzw($op1$$Register, *L);
15994   %}
15995   ins_pipe(pipe_cmp_branch);
15996 %}
15997 
15998 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15999   match(If cmp (CmpL op1 op2));
16000   effect(USE labl);
16001 
16002   ins_cost(BRANCH_COST);
16003   format %{ "cb$cmp   $op1, $labl" %}
16004   ins_encode %{
16005     Label* L = $labl$$label;
16006     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16007     if (cond == Assembler::EQ)
16008       __ cbz($op1$$Register, *L);
16009     else
16010       __ cbnz($op1$$Register, *L);
16011   %}
16012   ins_pipe(pipe_cmp_branch);
16013 %}
16014 
16015 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
16016   match(If cmp (CmpP op1 op2));
16017   effect(USE labl);
16018 
16019   ins_cost(BRANCH_COST);
16020   format %{ "cb$cmp   $op1, $labl" %}
16021   ins_encode %{
16022     Label* L = $labl$$label;
16023     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16024     if (cond == Assembler::EQ)
16025       __ cbz($op1$$Register, *L);
16026     else
16027       __ cbnz($op1$$Register, *L);
16028   %}
16029   ins_pipe(pipe_cmp_branch);
16030 %}
16031 
16032 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
16033   match(If cmp (CmpN op1 op2));
16034   effect(USE labl);
16035 
16036   ins_cost(BRANCH_COST);
16037   format %{ "cbw$cmp   $op1, $labl" %}
16038   ins_encode %{
16039     Label* L = $labl$$label;
16040     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16041     if (cond == Assembler::EQ)
16042       __ cbzw($op1$$Register, *L);
16043     else
16044       __ cbnzw($op1$$Register, *L);
16045   %}
16046   ins_pipe(pipe_cmp_branch);
16047 %}
16048 
16049 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
16050   match(If cmp (CmpP (DecodeN oop) zero));
16051   effect(USE labl);
16052 
16053   ins_cost(BRANCH_COST);
16054   format %{ "cb$cmp   $oop, $labl" %}
16055   ins_encode %{
16056     Label* L = $labl$$label;
16057     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16058     if (cond == Assembler::EQ)
16059       __ cbzw($oop$$Register, *L);
16060     else
16061       __ cbnzw($oop$$Register, *L);
16062   %}
16063   ins_pipe(pipe_cmp_branch);
16064 %}
16065 
16066 instruct cmpUI_imm0_branch(cmpOpUEqNeLeGt cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16067   match(If cmp (CmpU op1 op2));
16068   effect(USE labl);
16069 
16070   ins_cost(BRANCH_COST);
16071   format %{ "cbw$cmp   $op1, $labl" %}
16072   ins_encode %{
16073     Label* L = $labl$$label;
16074     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16075     if (cond == Assembler::EQ || cond == Assembler::LS) {
16076       __ cbzw($op1$$Register, *L);
16077     } else {
16078       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
16079       __ cbnzw($op1$$Register, *L);
16080     }
16081   %}
16082   ins_pipe(pipe_cmp_branch);
16083 %}
16084 
16085 instruct cmpUL_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 op2, label labl) %{
16086   match(If cmp (CmpUL op1 op2));
16087   effect(USE labl);
16088 
16089   ins_cost(BRANCH_COST);
16090   format %{ "cb$cmp   $op1, $labl" %}
16091   ins_encode %{
16092     Label* L = $labl$$label;
16093     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16094     if (cond == Assembler::EQ || cond == Assembler::LS) {
16095       __ cbz($op1$$Register, *L);
16096     } else {
16097       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
16098       __ cbnz($op1$$Register, *L);
16099     }
16100   %}
16101   ins_pipe(pipe_cmp_branch);
16102 %}
16103 
16104 // Test bit and Branch
16105 
16106 // Patterns for short (< 32KiB) variants
16107 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16108   match(If cmp (CmpL op1 op2));
16109   effect(USE labl);
16110 
16111   ins_cost(BRANCH_COST);
16112   format %{ "cb$cmp   $op1, $labl # long" %}
16113   ins_encode %{
16114     Label* L = $labl$$label;
16115     Assembler::Condition cond =
16116       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16117     __ tbr(cond, $op1$$Register, 63, *L);
16118   %}
16119   ins_pipe(pipe_cmp_branch);
16120   ins_short_branch(1);
16121 %}
16122 
16123 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16124   match(If cmp (CmpI op1 op2));
16125   effect(USE labl);
16126 
16127   ins_cost(BRANCH_COST);
16128   format %{ "cb$cmp   $op1, $labl # int" %}
16129   ins_encode %{
16130     Label* L = $labl$$label;
16131     Assembler::Condition cond =
16132       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16133     __ tbr(cond, $op1$$Register, 31, *L);
16134   %}
16135   ins_pipe(pipe_cmp_branch);
16136   ins_short_branch(1);
16137 %}
16138 
16139 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16140   match(If cmp (CmpL (AndL op1 op2) op3));
16141   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16142   effect(USE labl);
16143 
16144   ins_cost(BRANCH_COST);
16145   format %{ "tb$cmp   $op1, $op2, $labl" %}
16146   ins_encode %{
16147     Label* L = $labl$$label;
16148     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16149     int bit = exact_log2_long($op2$$constant);
16150     __ tbr(cond, $op1$$Register, bit, *L);
16151   %}
16152   ins_pipe(pipe_cmp_branch);
16153   ins_short_branch(1);
16154 %}
16155 
16156 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16157   match(If cmp (CmpI (AndI op1 op2) op3));
16158   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16159   effect(USE labl);
16160 
16161   ins_cost(BRANCH_COST);
16162   format %{ "tb$cmp   $op1, $op2, $labl" %}
16163   ins_encode %{
16164     Label* L = $labl$$label;
16165     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16166     int bit = exact_log2((juint)$op2$$constant);
16167     __ tbr(cond, $op1$$Register, bit, *L);
16168   %}
16169   ins_pipe(pipe_cmp_branch);
16170   ins_short_branch(1);
16171 %}
16172 
16173 // And far variants
16174 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16175   match(If cmp (CmpL op1 op2));
16176   effect(USE labl);
16177 
16178   ins_cost(BRANCH_COST);
16179   format %{ "cb$cmp   $op1, $labl # long" %}
16180   ins_encode %{
16181     Label* L = $labl$$label;
16182     Assembler::Condition cond =
16183       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16184     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
16185   %}
16186   ins_pipe(pipe_cmp_branch);
16187 %}
16188 
16189 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16190   match(If cmp (CmpI op1 op2));
16191   effect(USE labl);
16192 
16193   ins_cost(BRANCH_COST);
16194   format %{ "cb$cmp   $op1, $labl # int" %}
16195   ins_encode %{
16196     Label* L = $labl$$label;
16197     Assembler::Condition cond =
16198       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16199     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16200   %}
16201   ins_pipe(pipe_cmp_branch);
16202 %}
16203 
16204 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16205   match(If cmp (CmpL (AndL op1 op2) op3));
16206   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16207   effect(USE labl);
16208 
16209   ins_cost(BRANCH_COST);
16210   format %{ "tb$cmp   $op1, $op2, $labl" %}
16211   ins_encode %{
16212     Label* L = $labl$$label;
16213     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16214     int bit = exact_log2_long($op2$$constant);
16215     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16216   %}
16217   ins_pipe(pipe_cmp_branch);
16218 %}
16219 
16220 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16221   match(If cmp (CmpI (AndI op1 op2) op3));
16222   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16223   effect(USE labl);
16224 
16225   ins_cost(BRANCH_COST);
16226   format %{ "tb$cmp   $op1, $op2, $labl" %}
16227   ins_encode %{
16228     Label* L = $labl$$label;
16229     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16230     int bit = exact_log2((juint)$op2$$constant);
16231     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16232   %}
16233   ins_pipe(pipe_cmp_branch);
16234 %}
16235 
16236 // Test bits
16237 
16238 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16239   match(Set cr (CmpL (AndL op1 op2) op3));
16240   predicate(Assembler::operand_valid_for_logical_immediate
16241             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16242 
16243   ins_cost(INSN_COST);
16244   format %{ "tst $op1, $op2 # long" %}
16245   ins_encode %{
16246     __ tst($op1$$Register, $op2$$constant);
16247   %}
16248   ins_pipe(ialu_reg_reg);
16249 %}
16250 
16251 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16252   match(Set cr (CmpI (AndI op1 op2) op3));
16253   predicate(Assembler::operand_valid_for_logical_immediate
16254             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16255 
16256   ins_cost(INSN_COST);
16257   format %{ "tst $op1, $op2 # int" %}
16258   ins_encode %{
16259     __ tstw($op1$$Register, $op2$$constant);
16260   %}
16261   ins_pipe(ialu_reg_reg);
16262 %}
16263 
16264 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16265   match(Set cr (CmpL (AndL op1 op2) op3));
16266 
16267   ins_cost(INSN_COST);
16268   format %{ "tst $op1, $op2 # long" %}
16269   ins_encode %{
16270     __ tst($op1$$Register, $op2$$Register);
16271   %}
16272   ins_pipe(ialu_reg_reg);
16273 %}
16274 
16275 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16276   match(Set cr (CmpI (AndI op1 op2) op3));
16277 
16278   ins_cost(INSN_COST);
16279   format %{ "tstw $op1, $op2 # int" %}
16280   ins_encode %{
16281     __ tstw($op1$$Register, $op2$$Register);
16282   %}
16283   ins_pipe(ialu_reg_reg);
16284 %}
16285 
16286 
16287 // Conditional Far Branch
16288 // Conditional Far Branch Unsigned
16289 // TODO: fixme
16290 
16291 // counted loop end branch near
16292 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16293 %{
16294   match(CountedLoopEnd cmp cr);
16295 
16296   effect(USE lbl);
16297 
16298   ins_cost(BRANCH_COST);
16299   // short variant.
16300   // ins_short_branch(1);
16301   format %{ "b$cmp $lbl \t// counted loop end" %}
16302 
16303   ins_encode(aarch64_enc_br_con(cmp, lbl));
16304 
16305   ins_pipe(pipe_branch);
16306 %}
16307 
16308 // counted loop end branch far
16309 // TODO: fixme
16310 
16311 // ============================================================================
16312 // inlined locking and unlocking
16313 
16314 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16315 %{
16316   match(Set cr (FastLock object box));
16317   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16318 
16319   ins_cost(5 * INSN_COST);
16320   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16321 
16322   ins_encode %{
16323     __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16324   %}
16325 
16326   ins_pipe(pipe_serial);
16327 %}
16328 
16329 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16330 %{
16331   match(Set cr (FastUnlock object box));
16332   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16333 
16334   ins_cost(5 * INSN_COST);
16335   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %}
16336 
16337   ins_encode %{
16338     __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16339   %}
16340 
16341   ins_pipe(pipe_serial);
16342 %}
16343 
16344 // ============================================================================
16345 // Safepoint Instructions
16346 
16347 // TODO
16348 // provide a near and far version of this code
16349 
16350 instruct safePoint(rFlagsReg cr, iRegP poll)
16351 %{
16352   match(SafePoint poll);
16353   effect(KILL cr);
16354 
16355   format %{
16356     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16357   %}
16358   ins_encode %{
16359     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16360   %}
16361   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16362 %}
16363 
16364 
16365 // ============================================================================
16366 // Procedure Call/Return Instructions
16367 
16368 // Call Java Static Instruction
16369 
16370 instruct CallStaticJavaDirect(method meth)
16371 %{
16372   match(CallStaticJava);
16373 
16374   effect(USE meth);
16375 
16376   ins_cost(CALL_COST);
16377 
16378   format %{ "call,static $meth \t// ==> " %}
16379 
16380   ins_encode(aarch64_enc_java_static_call(meth),
16381              aarch64_enc_call_epilog);
16382 
16383   ins_pipe(pipe_class_call);
16384 %}
16385 
16386 // TO HERE
16387 
16388 // Call Java Dynamic Instruction
16389 instruct CallDynamicJavaDirect(method meth)
16390 %{
16391   match(CallDynamicJava);
16392 
16393   effect(USE meth);
16394 
16395   ins_cost(CALL_COST);
16396 
16397   format %{ "CALL,dynamic $meth \t// ==> " %}
16398 
16399   ins_encode(aarch64_enc_java_dynamic_call(meth),
16400              aarch64_enc_call_epilog);
16401 
16402   ins_pipe(pipe_class_call);
16403 %}
16404 
16405 // Call Runtime Instruction
16406 
16407 instruct CallRuntimeDirect(method meth)
16408 %{
16409   match(CallRuntime);
16410 
16411   effect(USE meth);
16412 
16413   ins_cost(CALL_COST);
16414 
16415   format %{ "CALL, runtime $meth" %}
16416 
16417   ins_encode( aarch64_enc_java_to_runtime(meth) );
16418 
16419   ins_pipe(pipe_class_call);
16420 %}
16421 
16422 // Call Runtime Instruction
16423 
16424 instruct CallLeafDirect(method meth)
16425 %{
16426   match(CallLeaf);
16427 
16428   effect(USE meth);
16429 
16430   ins_cost(CALL_COST);
16431 
16432   format %{ "CALL, runtime leaf $meth" %}
16433 
16434   ins_encode( aarch64_enc_java_to_runtime(meth) );
16435 
16436   ins_pipe(pipe_class_call);
16437 %}
16438 
16439 // Call Runtime Instruction without safepoint and with vector arguments
16440 instruct CallLeafDirectVector(method meth)
16441 %{
16442   match(CallLeafVector);
16443 
16444   effect(USE meth);
16445 
16446   ins_cost(CALL_COST);
16447 
16448   format %{ "CALL, runtime leaf vector $meth" %}
16449 
16450   ins_encode(aarch64_enc_java_to_runtime(meth));
16451 
16452   ins_pipe(pipe_class_call);
16453 %}
16454 
16455 // Call Runtime Instruction
16456 
16457 instruct CallLeafNoFPDirect(method meth)
16458 %{
16459   match(CallLeafNoFP);
16460 
16461   effect(USE meth);
16462 
16463   ins_cost(CALL_COST);
16464 
16465   format %{ "CALL, runtime leaf nofp $meth" %}
16466 
16467   ins_encode( aarch64_enc_java_to_runtime(meth) );
16468 
16469   ins_pipe(pipe_class_call);
16470 %}
16471 
16472 // Tail Call; Jump from runtime stub to Java code.
16473 // Also known as an 'interprocedural jump'.
16474 // Target of jump will eventually return to caller.
16475 // TailJump below removes the return address.
16476 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16477 // emitted just above the TailCall which has reset rfp to the caller state.
16478 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
16479 %{
16480   match(TailCall jump_target method_ptr);
16481 
16482   ins_cost(CALL_COST);
16483 
16484   format %{ "br $jump_target\t# $method_ptr holds method" %}
16485 
16486   ins_encode(aarch64_enc_tail_call(jump_target));
16487 
16488   ins_pipe(pipe_class_call);
16489 %}
16490 
16491 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
16492 %{
16493   match(TailJump jump_target ex_oop);
16494 
16495   ins_cost(CALL_COST);
16496 
16497   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16498 
16499   ins_encode(aarch64_enc_tail_jmp(jump_target));
16500 
16501   ins_pipe(pipe_class_call);
16502 %}
16503 
16504 // Forward exception.
16505 instruct ForwardExceptionjmp()
16506 %{
16507   match(ForwardException);
16508   ins_cost(CALL_COST);
16509 
16510   format %{ "b forward_exception_stub" %}
16511   ins_encode %{
16512     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
16513   %}
16514   ins_pipe(pipe_class_call);
16515 %}
16516 
16517 // Create exception oop: created by stack-crawling runtime code.
16518 // Created exception is now available to this handler, and is setup
16519 // just prior to jumping to this handler. No code emitted.
16520 // TODO check
16521 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16522 instruct CreateException(iRegP_R0 ex_oop)
16523 %{
16524   match(Set ex_oop (CreateEx));
16525 
16526   format %{ " -- \t// exception oop; no code emitted" %}
16527 
16528   size(0);
16529 
16530   ins_encode( /*empty*/ );
16531 
16532   ins_pipe(pipe_class_empty);
16533 %}
16534 
16535 // Rethrow exception: The exception oop will come in the first
16536 // argument position. Then JUMP (not call) to the rethrow stub code.
16537 instruct RethrowException() %{
16538   match(Rethrow);
16539   ins_cost(CALL_COST);
16540 
16541   format %{ "b rethrow_stub" %}
16542 
16543   ins_encode( aarch64_enc_rethrow() );
16544 
16545   ins_pipe(pipe_class_call);
16546 %}
16547 
16548 
16549 // Return Instruction
16550 // epilog node loads ret address into lr as part of frame pop
16551 instruct Ret()
16552 %{
16553   match(Return);
16554 
16555   format %{ "ret\t// return register" %}
16556 
16557   ins_encode( aarch64_enc_ret() );
16558 
16559   ins_pipe(pipe_branch);
16560 %}
16561 
16562 // Die now.
16563 instruct ShouldNotReachHere() %{
16564   match(Halt);
16565 
16566   ins_cost(CALL_COST);
16567   format %{ "ShouldNotReachHere" %}
16568 
16569   ins_encode %{
16570     if (is_reachable()) {
16571       const char* str = __ code_string(_halt_reason);
16572       __ stop(str);
16573     }
16574   %}
16575 
16576   ins_pipe(pipe_class_default);
16577 %}
16578 
16579 // ============================================================================
16580 // Partial Subtype Check
16581 //
16582 // superklass array for an instance of the superklass.  Set a hidden
16583 // internal cache on a hit (cache is checked with exposed code in
16584 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16585 // encoding ALSO sets flags.
16586 
16587 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16588 %{
16589   match(Set result (PartialSubtypeCheck sub super));
16590   predicate(!UseSecondarySupersTable);
16591   effect(KILL cr, KILL temp);
16592 
16593   ins_cost(20 * INSN_COST);  // slightly larger than the next version
16594   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16595 
16596   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16597 
16598   opcode(0x1); // Force zero of result reg on hit
16599 
16600   ins_pipe(pipe_class_memory);
16601 %}
16602 
16603 // Two versions of partialSubtypeCheck, both used when we need to
16604 // search for a super class in the secondary supers array. The first
16605 // is used when we don't know _a priori_ the class being searched
16606 // for. The second, far more common, is used when we do know: this is
16607 // used for instanceof, checkcast, and any case where C2 can determine
16608 // it by constant propagation.
16609 
16610 instruct partialSubtypeCheckVarSuper(iRegP_R4 sub, iRegP_R0 super, vRegD_V0 vtemp, iRegP_R5 result,
16611                                      iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16612                                      rFlagsReg cr)
16613 %{
16614   match(Set result (PartialSubtypeCheck sub super));
16615   predicate(UseSecondarySupersTable);
16616   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16617 
16618   ins_cost(10 * INSN_COST);  // slightly larger than the next version
16619   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16620 
16621   ins_encode %{
16622     __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
16623                                          $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16624                                          $vtemp$$FloatRegister,
16625                                          $result$$Register, /*L_success*/nullptr);
16626   %}
16627 
16628   ins_pipe(pipe_class_memory);
16629 %}
16630 
16631 instruct partialSubtypeCheckConstSuper(iRegP_R4 sub, iRegP_R0 super_reg, immP super_con, vRegD_V0 vtemp, iRegP_R5 result,
16632                                        iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16633                                        rFlagsReg cr)
16634 %{
16635   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
16636   predicate(UseSecondarySupersTable);
16637   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16638 
16639   ins_cost(5 * INSN_COST);  // smaller than the next version
16640   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
16641 
16642   ins_encode %{
16643     bool success = false;
16644     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
16645     if (InlineSecondarySupersTest) {
16646       success =
16647         __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
16648                                                $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16649                                                $vtemp$$FloatRegister,
16650                                                $result$$Register,
16651                                                super_klass_slot);
16652     } else {
16653       address call = __ trampoline_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
16654       success = (call != nullptr);
16655     }
16656     if (!success) {
16657       ciEnv::current()->record_failure("CodeCache is full");
16658       return;
16659     }
16660   %}
16661 
16662   ins_pipe(pipe_class_memory);
16663 %}
16664 
16665 // Intrisics for String.compareTo()
16666 
16667 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16668                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16669 %{
16670   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16671   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16672   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16673 
16674   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16675   ins_encode %{
16676     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16677     __ string_compare($str1$$Register, $str2$$Register,
16678                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16679                       $tmp1$$Register, $tmp2$$Register,
16680                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
16681   %}
16682   ins_pipe(pipe_class_memory);
16683 %}
16684 
16685 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16686                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16687 %{
16688   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16689   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16690   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16691 
16692   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16693   ins_encode %{
16694     __ string_compare($str1$$Register, $str2$$Register,
16695                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16696                       $tmp1$$Register, $tmp2$$Register,
16697                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
16698   %}
16699   ins_pipe(pipe_class_memory);
16700 %}
16701 
16702 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16703                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16704                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16705 %{
16706   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16707   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16708   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16709          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16710 
16711   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16712   ins_encode %{
16713     __ string_compare($str1$$Register, $str2$$Register,
16714                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16715                       $tmp1$$Register, $tmp2$$Register,
16716                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16717                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
16718   %}
16719   ins_pipe(pipe_class_memory);
16720 %}
16721 
16722 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16723                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16724                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16725 %{
16726   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16727   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16728   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16729          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16730 
16731   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16732   ins_encode %{
16733     __ string_compare($str1$$Register, $str2$$Register,
16734                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16735                       $tmp1$$Register, $tmp2$$Register,
16736                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16737                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
16738   %}
16739   ins_pipe(pipe_class_memory);
16740 %}
16741 
16742 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
16743 // these string_compare variants as NEON register type for convenience so that the prototype of
16744 // string_compare can be shared with all variants.
16745 
16746 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16747                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16748                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16749                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16750 %{
16751   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16752   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16753   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16754          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16755 
16756   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16757   ins_encode %{
16758     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16759     __ string_compare($str1$$Register, $str2$$Register,
16760                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16761                       $tmp1$$Register, $tmp2$$Register,
16762                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16763                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16764                       StrIntrinsicNode::LL);
16765   %}
16766   ins_pipe(pipe_class_memory);
16767 %}
16768 
16769 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16770                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16771                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16772                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16773 %{
16774   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16775   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16776   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16777          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16778 
16779   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16780   ins_encode %{
16781     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16782     __ string_compare($str1$$Register, $str2$$Register,
16783                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16784                       $tmp1$$Register, $tmp2$$Register,
16785                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16786                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16787                       StrIntrinsicNode::LU);
16788   %}
16789   ins_pipe(pipe_class_memory);
16790 %}
16791 
16792 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16793                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16794                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16795                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16796 %{
16797   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16798   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16799   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16800          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16801 
16802   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16803   ins_encode %{
16804     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16805     __ string_compare($str1$$Register, $str2$$Register,
16806                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16807                       $tmp1$$Register, $tmp2$$Register,
16808                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16809                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16810                       StrIntrinsicNode::UL);
16811   %}
16812   ins_pipe(pipe_class_memory);
16813 %}
16814 
16815 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16816                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16817                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16818                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16819 %{
16820   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16821   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16822   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16823          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16824 
16825   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16826   ins_encode %{
16827     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16828     __ string_compare($str1$$Register, $str2$$Register,
16829                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16830                       $tmp1$$Register, $tmp2$$Register,
16831                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16832                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16833                       StrIntrinsicNode::UU);
16834   %}
16835   ins_pipe(pipe_class_memory);
16836 %}
16837 
16838 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16839                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16840                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16841                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16842 %{
16843   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16844   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16845   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16846          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16847          TEMP vtmp0, TEMP vtmp1, KILL cr);
16848   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16849             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16850 
16851   ins_encode %{
16852     __ string_indexof($str1$$Register, $str2$$Register,
16853                       $cnt1$$Register, $cnt2$$Register,
16854                       $tmp1$$Register, $tmp2$$Register,
16855                       $tmp3$$Register, $tmp4$$Register,
16856                       $tmp5$$Register, $tmp6$$Register,
16857                       -1, $result$$Register, StrIntrinsicNode::UU);
16858   %}
16859   ins_pipe(pipe_class_memory);
16860 %}
16861 
16862 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16863                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16864                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16865                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16866 %{
16867   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16868   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16869   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16870          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16871          TEMP vtmp0, TEMP vtmp1, KILL cr);
16872   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16873             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16874 
16875   ins_encode %{
16876     __ string_indexof($str1$$Register, $str2$$Register,
16877                       $cnt1$$Register, $cnt2$$Register,
16878                       $tmp1$$Register, $tmp2$$Register,
16879                       $tmp3$$Register, $tmp4$$Register,
16880                       $tmp5$$Register, $tmp6$$Register,
16881                       -1, $result$$Register, StrIntrinsicNode::LL);
16882   %}
16883   ins_pipe(pipe_class_memory);
16884 %}
16885 
16886 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16887                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16888                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16889                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16890 %{
16891   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16892   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16893   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16894          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16895          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16896   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16897             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16898 
16899   ins_encode %{
16900     __ string_indexof($str1$$Register, $str2$$Register,
16901                       $cnt1$$Register, $cnt2$$Register,
16902                       $tmp1$$Register, $tmp2$$Register,
16903                       $tmp3$$Register, $tmp4$$Register,
16904                       $tmp5$$Register, $tmp6$$Register,
16905                       -1, $result$$Register, StrIntrinsicNode::UL);
16906   %}
16907   ins_pipe(pipe_class_memory);
16908 %}
16909 
16910 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16911                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16912                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16913 %{
16914   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16915   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16916   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16917          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16918   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
16919             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16920 
16921   ins_encode %{
16922     int icnt2 = (int)$int_cnt2$$constant;
16923     __ string_indexof($str1$$Register, $str2$$Register,
16924                       $cnt1$$Register, zr,
16925                       $tmp1$$Register, $tmp2$$Register,
16926                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16927                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16928   %}
16929   ins_pipe(pipe_class_memory);
16930 %}
16931 
16932 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16933                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16934                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16935 %{
16936   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16937   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16938   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16939          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16940   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
16941             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16942 
16943   ins_encode %{
16944     int icnt2 = (int)$int_cnt2$$constant;
16945     __ string_indexof($str1$$Register, $str2$$Register,
16946                       $cnt1$$Register, zr,
16947                       $tmp1$$Register, $tmp2$$Register,
16948                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16949                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16950   %}
16951   ins_pipe(pipe_class_memory);
16952 %}
16953 
16954 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16955                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16956                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16957 %{
16958   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16959   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16960   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16961          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16962   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
16963             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16964 
16965   ins_encode %{
16966     int icnt2 = (int)$int_cnt2$$constant;
16967     __ string_indexof($str1$$Register, $str2$$Register,
16968                       $cnt1$$Register, zr,
16969                       $tmp1$$Register, $tmp2$$Register,
16970                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16971                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16972   %}
16973   ins_pipe(pipe_class_memory);
16974 %}
16975 
16976 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16977                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16978                              iRegINoSp tmp3, rFlagsReg cr)
16979 %{
16980   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16981   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
16982   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16983          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16984 
16985   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16986 
16987   ins_encode %{
16988     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16989                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16990                            $tmp3$$Register);
16991   %}
16992   ins_pipe(pipe_class_memory);
16993 %}
16994 
16995 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16996                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16997                               iRegINoSp tmp3, rFlagsReg cr)
16998 %{
16999   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17000   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
17001   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17002          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17003 
17004   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17005 
17006   ins_encode %{
17007     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17008                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
17009                             $tmp3$$Register);
17010   %}
17011   ins_pipe(pipe_class_memory);
17012 %}
17013 
17014 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17015                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17016                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17017   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
17018   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17019   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17020   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17021   ins_encode %{
17022     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17023                                $result$$Register, $ztmp1$$FloatRegister,
17024                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17025                                $ptmp$$PRegister, true /* isL */);
17026   %}
17027   ins_pipe(pipe_class_memory);
17028 %}
17029 
17030 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17031                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17032                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17033   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
17034   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17035   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17036   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17037   ins_encode %{
17038     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17039                                $result$$Register, $ztmp1$$FloatRegister,
17040                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17041                                $ptmp$$PRegister, false /* isL */);
17042   %}
17043   ins_pipe(pipe_class_memory);
17044 %}
17045 
17046 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17047                         iRegI_R0 result, rFlagsReg cr)
17048 %{
17049   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
17050   match(Set result (StrEquals (Binary str1 str2) cnt));
17051   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17052 
17053   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17054   ins_encode %{
17055     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17056     __ string_equals($str1$$Register, $str2$$Register,
17057                      $result$$Register, $cnt$$Register);
17058   %}
17059   ins_pipe(pipe_class_memory);
17060 %}
17061 
17062 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17063                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17064                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17065                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17066                        iRegP_R10 tmp, rFlagsReg cr)
17067 %{
17068   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
17069   match(Set result (AryEq ary1 ary2));
17070   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17071          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17072          TEMP vtmp6, TEMP vtmp7, KILL cr);
17073 
17074   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17075   ins_encode %{
17076     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17077                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17078                                    $result$$Register, $tmp$$Register, 1);
17079     if (tpc == nullptr) {
17080       ciEnv::current()->record_failure("CodeCache is full");
17081       return;
17082     }
17083   %}
17084   ins_pipe(pipe_class_memory);
17085 %}
17086 
17087 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17088                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17089                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17090                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17091                        iRegP_R10 tmp, rFlagsReg cr)
17092 %{
17093   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
17094   match(Set result (AryEq ary1 ary2));
17095   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17096          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17097          TEMP vtmp6, TEMP vtmp7, KILL cr);
17098 
17099   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17100   ins_encode %{
17101     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17102                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17103                                    $result$$Register, $tmp$$Register, 2);
17104     if (tpc == nullptr) {
17105       ciEnv::current()->record_failure("CodeCache is full");
17106       return;
17107     }
17108   %}
17109   ins_pipe(pipe_class_memory);
17110 %}
17111 
17112 instruct arrays_hashcode(iRegP_R1 ary, iRegI_R2 cnt, iRegI_R0 result, immI basic_type,
17113                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17114                          vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17115                          vRegD_V12 vtmp8, vRegD_V13 vtmp9, rFlagsReg cr)
17116 %{
17117   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
17118   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, TEMP vtmp6,
17119          TEMP vtmp7, TEMP vtmp8, TEMP vtmp9, USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
17120 
17121   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
17122   ins_encode %{
17123     address tpc = __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
17124                                      $vtmp3$$FloatRegister, $vtmp2$$FloatRegister,
17125                                      $vtmp1$$FloatRegister, $vtmp0$$FloatRegister,
17126                                      $vtmp4$$FloatRegister, $vtmp5$$FloatRegister,
17127                                      $vtmp6$$FloatRegister, $vtmp7$$FloatRegister,
17128                                      $vtmp8$$FloatRegister, $vtmp9$$FloatRegister,
17129                                      (BasicType)$basic_type$$constant);
17130     if (tpc == nullptr) {
17131       ciEnv::current()->record_failure("CodeCache is full");
17132       return;
17133     }
17134   %}
17135   ins_pipe(pipe_class_memory);
17136 %}
17137 
17138 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
17139 %{
17140   match(Set result (CountPositives ary1 len));
17141   effect(USE_KILL ary1, USE_KILL len, KILL cr);
17142   format %{ "count positives byte[] $ary1,$len -> $result" %}
17143   ins_encode %{
17144     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
17145     if (tpc == nullptr) {
17146       ciEnv::current()->record_failure("CodeCache is full");
17147       return;
17148     }
17149   %}
17150   ins_pipe( pipe_slow );
17151 %}
17152 
17153 // fast char[] to byte[] compression
17154 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17155                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17156                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17157                          iRegI_R0 result, rFlagsReg cr)
17158 %{
17159   match(Set result (StrCompressedCopy src (Binary dst len)));
17160   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17161          USE_KILL src, USE_KILL dst, USE len, KILL cr);
17162 
17163   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17164   ins_encode %{
17165     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
17166                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17167                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17168                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17169   %}
17170   ins_pipe(pipe_slow);
17171 %}
17172 
17173 // fast byte[] to char[] inflation
17174 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
17175                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17176                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
17177 %{
17178   match(Set dummy (StrInflatedCopy src (Binary dst len)));
17179   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
17180          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
17181          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
17182 
17183   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
17184   ins_encode %{
17185     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
17186                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17187                                         $vtmp2$$FloatRegister, $tmp$$Register);
17188     if (tpc == nullptr) {
17189       ciEnv::current()->record_failure("CodeCache is full");
17190       return;
17191     }
17192   %}
17193   ins_pipe(pipe_class_memory);
17194 %}
17195 
17196 // encode char[] to byte[] in ISO_8859_1
17197 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17198                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17199                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17200                           iRegI_R0 result, rFlagsReg cr)
17201 %{
17202   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
17203   match(Set result (EncodeISOArray src (Binary dst len)));
17204   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17205          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17206 
17207   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17208   ins_encode %{
17209     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17210                         $result$$Register, false,
17211                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17212                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17213                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17214   %}
17215   ins_pipe(pipe_class_memory);
17216 %}
17217 
17218 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17219                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17220                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17221                             iRegI_R0 result, rFlagsReg cr)
17222 %{
17223   predicate(((EncodeISOArrayNode*)n)->is_ascii());
17224   match(Set result (EncodeISOArray src (Binary dst len)));
17225   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17226          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17227 
17228   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17229   ins_encode %{
17230     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17231                         $result$$Register, true,
17232                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17233                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17234                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17235   %}
17236   ins_pipe(pipe_class_memory);
17237 %}
17238 
17239 //----------------------------- CompressBits/ExpandBits ------------------------
17240 
17241 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17242                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17243   match(Set dst (CompressBits src mask));
17244   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17245   format %{ "mov    $tsrc, $src\n\t"
17246             "mov    $tmask, $mask\n\t"
17247             "bext   $tdst, $tsrc, $tmask\n\t"
17248             "mov    $dst, $tdst"
17249           %}
17250   ins_encode %{
17251     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17252     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17253     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17254     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17255   %}
17256   ins_pipe(pipe_slow);
17257 %}
17258 
17259 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17260                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17261   match(Set dst (CompressBits (LoadI mem) mask));
17262   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17263   format %{ "ldrs   $tsrc, $mem\n\t"
17264             "ldrs   $tmask, $mask\n\t"
17265             "bext   $tdst, $tsrc, $tmask\n\t"
17266             "mov    $dst, $tdst"
17267           %}
17268   ins_encode %{
17269     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17270               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17271     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17272     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17273     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17274   %}
17275   ins_pipe(pipe_slow);
17276 %}
17277 
17278 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17279                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
17280   match(Set dst (CompressBits src mask));
17281   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17282   format %{ "mov    $tsrc, $src\n\t"
17283             "mov    $tmask, $mask\n\t"
17284             "bext   $tdst, $tsrc, $tmask\n\t"
17285             "mov    $dst, $tdst"
17286           %}
17287   ins_encode %{
17288     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17289     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17290     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17291     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17292   %}
17293   ins_pipe(pipe_slow);
17294 %}
17295 
17296 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
17297                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17298   match(Set dst (CompressBits (LoadL mem) mask));
17299   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17300   format %{ "ldrd   $tsrc, $mem\n\t"
17301             "ldrd   $tmask, $mask\n\t"
17302             "bext   $tdst, $tsrc, $tmask\n\t"
17303             "mov    $dst, $tdst"
17304           %}
17305   ins_encode %{
17306     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17307               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17308     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17309     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17310     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17311   %}
17312   ins_pipe(pipe_slow);
17313 %}
17314 
17315 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17316                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17317   match(Set dst (ExpandBits src mask));
17318   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17319   format %{ "mov    $tsrc, $src\n\t"
17320             "mov    $tmask, $mask\n\t"
17321             "bdep   $tdst, $tsrc, $tmask\n\t"
17322             "mov    $dst, $tdst"
17323           %}
17324   ins_encode %{
17325     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17326     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17327     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17328     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17329   %}
17330   ins_pipe(pipe_slow);
17331 %}
17332 
17333 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17334                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17335   match(Set dst (ExpandBits (LoadI mem) mask));
17336   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17337   format %{ "ldrs   $tsrc, $mem\n\t"
17338             "ldrs   $tmask, $mask\n\t"
17339             "bdep   $tdst, $tsrc, $tmask\n\t"
17340             "mov    $dst, $tdst"
17341           %}
17342   ins_encode %{
17343     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17344               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17345     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17346     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17347     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17348   %}
17349   ins_pipe(pipe_slow);
17350 %}
17351 
17352 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17353                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
17354   match(Set dst (ExpandBits src mask));
17355   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17356   format %{ "mov    $tsrc, $src\n\t"
17357             "mov    $tmask, $mask\n\t"
17358             "bdep   $tdst, $tsrc, $tmask\n\t"
17359             "mov    $dst, $tdst"
17360           %}
17361   ins_encode %{
17362     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17363     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17364     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17365     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17366   %}
17367   ins_pipe(pipe_slow);
17368 %}
17369 
17370 
17371 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
17372                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17373   match(Set dst (ExpandBits (LoadL mem) mask));
17374   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17375   format %{ "ldrd   $tsrc, $mem\n\t"
17376             "ldrd   $tmask, $mask\n\t"
17377             "bdep   $tdst, $tsrc, $tmask\n\t"
17378             "mov    $dst, $tdst"
17379           %}
17380   ins_encode %{
17381     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17382               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17383     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17384     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17385     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17386   %}
17387   ins_pipe(pipe_slow);
17388 %}
17389 
17390 //----------------------------- Reinterpret ----------------------------------
17391 // Reinterpret a half-precision float value in a floating point register to a general purpose register
17392 instruct reinterpretHF2S(iRegINoSp dst, vRegF src) %{
17393   match(Set dst (ReinterpretHF2S src));
17394   format %{ "reinterpretHF2S $dst, $src" %}
17395   ins_encode %{
17396     __ smov($dst$$Register, $src$$FloatRegister, __ H, 0);
17397   %}
17398   ins_pipe(pipe_slow);
17399 %}
17400 
17401 // Reinterpret a half-precision float value in a general purpose register to a floating point register
17402 instruct reinterpretS2HF(vRegF dst, iRegINoSp src) %{
17403   match(Set dst (ReinterpretS2HF src));
17404   format %{ "reinterpretS2HF $dst, $src" %}
17405   ins_encode %{
17406     __ mov($dst$$FloatRegister, __ H, 0, $src$$Register);
17407   %}
17408   ins_pipe(pipe_slow);
17409 %}
17410 
17411 // Without this optimization, ReinterpretS2HF (ConvF2HF src) would result in the following
17412 // instructions (the first two are for ConvF2HF and the last instruction is for ReinterpretS2HF) -
17413 // fcvt $tmp1_fpr, $src_fpr    // Convert float to half-precision float
17414 // mov  $tmp2_gpr, $tmp1_fpr   // Move half-precision float in FPR to a GPR
17415 // mov  $dst_fpr,  $tmp2_gpr   // Move the result from a GPR to an FPR
17416 // The move from FPR to GPR in ConvF2HF and the move from GPR to FPR in ReinterpretS2HF
17417 // can be omitted in this pattern, resulting in -
17418 // fcvt $dst, $src  // Convert float to half-precision float
17419 instruct convF2HFAndS2HF(vRegF dst, vRegF src)
17420 %{
17421   match(Set dst (ReinterpretS2HF (ConvF2HF src)));
17422   format %{ "convF2HFAndS2HF $dst, $src" %}
17423   ins_encode %{
17424     __ fcvtsh($dst$$FloatRegister, $src$$FloatRegister);
17425   %}
17426   ins_pipe(pipe_slow);
17427 %}
17428 
17429 // Without this optimization, ConvHF2F (ReinterpretHF2S src) would result in the following
17430 // instructions (the first one is for ReinterpretHF2S and the last two are for ConvHF2F) -
17431 // mov  $tmp1_gpr, $src_fpr  // Move the half-precision float from an FPR to a GPR
17432 // mov  $tmp2_fpr, $tmp1_gpr // Move the same value from GPR to an FPR
17433 // fcvt $dst_fpr,  $tmp2_fpr // Convert the half-precision float to 32-bit float
17434 // The move from FPR to GPR in ReinterpretHF2S and the move from GPR to FPR in ConvHF2F
17435 // can be omitted as the input (src) is already in an FPR required for the fcvths instruction
17436 // resulting in -
17437 // fcvt $dst, $src  // Convert half-precision float to a 32-bit float
17438 instruct convHF2SAndHF2F(vRegF dst, vRegF src)
17439 %{
17440   match(Set dst (ConvHF2F (ReinterpretHF2S src)));
17441   format %{ "convHF2SAndHF2F $dst, $src" %}
17442   ins_encode %{
17443     __ fcvths($dst$$FloatRegister, $src$$FloatRegister);
17444   %}
17445   ins_pipe(pipe_slow);
17446 %}
17447 
17448 // ============================================================================
17449 // This name is KNOWN by the ADLC and cannot be changed.
17450 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17451 // for this guy.
17452 instruct tlsLoadP(thread_RegP dst)
17453 %{
17454   match(Set dst (ThreadLocal));
17455 
17456   ins_cost(0);
17457 
17458   format %{ " -- \t// $dst=Thread::current(), empty" %}
17459 
17460   size(0);
17461 
17462   ins_encode( /*empty*/ );
17463 
17464   ins_pipe(pipe_class_empty);
17465 %}
17466 
17467 //----------PEEPHOLE RULES-----------------------------------------------------
17468 // These must follow all instruction definitions as they use the names
17469 // defined in the instructions definitions.
17470 //
17471 // peepmatch ( root_instr_name [preceding_instruction]* );
17472 //
17473 // peepconstraint %{
17474 // (instruction_number.operand_name relational_op instruction_number.operand_name
17475 //  [, ...] );
17476 // // instruction numbers are zero-based using left to right order in peepmatch
17477 //
17478 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17479 // // provide an instruction_number.operand_name for each operand that appears
17480 // // in the replacement instruction's match rule
17481 //
17482 // ---------VM FLAGS---------------------------------------------------------
17483 //
17484 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17485 //
17486 // Each peephole rule is given an identifying number starting with zero and
17487 // increasing by one in the order seen by the parser.  An individual peephole
17488 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17489 // on the command-line.
17490 //
17491 // ---------CURRENT LIMITATIONS----------------------------------------------
17492 //
17493 // Only match adjacent instructions in same basic block
17494 // Only equality constraints
17495 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17496 // Only one replacement instruction
17497 //
17498 // ---------EXAMPLE----------------------------------------------------------
17499 //
17500 // // pertinent parts of existing instructions in architecture description
17501 // instruct movI(iRegINoSp dst, iRegI src)
17502 // %{
17503 //   match(Set dst (CopyI src));
17504 // %}
17505 //
17506 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17507 // %{
17508 //   match(Set dst (AddI dst src));
17509 //   effect(KILL cr);
17510 // %}
17511 //
17512 // // Change (inc mov) to lea
17513 // peephole %{
17514 //   // increment preceded by register-register move
17515 //   peepmatch ( incI_iReg movI );
17516 //   // require that the destination register of the increment
17517 //   // match the destination register of the move
17518 //   peepconstraint ( 0.dst == 1.dst );
17519 //   // construct a replacement instruction that sets
17520 //   // the destination to ( move's source register + one )
17521 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17522 // %}
17523 //
17524 
17525 // Implementation no longer uses movX instructions since
17526 // machine-independent system no longer uses CopyX nodes.
17527 //
17528 // peephole
17529 // %{
17530 //   peepmatch (incI_iReg movI);
17531 //   peepconstraint (0.dst == 1.dst);
17532 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17533 // %}
17534 
17535 // peephole
17536 // %{
17537 //   peepmatch (decI_iReg movI);
17538 //   peepconstraint (0.dst == 1.dst);
17539 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17540 // %}
17541 
17542 // peephole
17543 // %{
17544 //   peepmatch (addI_iReg_imm movI);
17545 //   peepconstraint (0.dst == 1.dst);
17546 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17547 // %}
17548 
17549 // peephole
17550 // %{
17551 //   peepmatch (incL_iReg movL);
17552 //   peepconstraint (0.dst == 1.dst);
17553 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17554 // %}
17555 
17556 // peephole
17557 // %{
17558 //   peepmatch (decL_iReg movL);
17559 //   peepconstraint (0.dst == 1.dst);
17560 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17561 // %}
17562 
17563 // peephole
17564 // %{
17565 //   peepmatch (addL_iReg_imm movL);
17566 //   peepconstraint (0.dst == 1.dst);
17567 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17568 // %}
17569 
17570 // peephole
17571 // %{
17572 //   peepmatch (addP_iReg_imm movP);
17573 //   peepconstraint (0.dst == 1.dst);
17574 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17575 // %}
17576 
17577 // // Change load of spilled value to only a spill
17578 // instruct storeI(memory mem, iRegI src)
17579 // %{
17580 //   match(Set mem (StoreI mem src));
17581 // %}
17582 //
17583 // instruct loadI(iRegINoSp dst, memory mem)
17584 // %{
17585 //   match(Set dst (LoadI mem));
17586 // %}
17587 //
17588 
17589 //----------SMARTSPILL RULES---------------------------------------------------
17590 // These must follow all instruction definitions as they use the names
17591 // defined in the instructions definitions.
17592 
17593 // Local Variables:
17594 // mode: c++
17595 // End: