1 //
    2 // Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // architecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182 
  183   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  184   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  185   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  186   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  187 
  188   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  189   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  190   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  191   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  192 
  193   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  194   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  195   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  196   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  197 
  198   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  199   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  200   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  201   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  202 
  203   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  204   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  205   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  206   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  207 
  208   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  209   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  210   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  211   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  212 
  213   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  214   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  215   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  216   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  217 
  218   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  219   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  220   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  221   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  222 
  223   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  224   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  225   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  226   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  227 
  228   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  229   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  230   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  231   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  232 
  233   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  234   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  235   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  236   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  237 
  238   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  239   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  240   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  241   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  242 
  243   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  244   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  245   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  246   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  247 
  248   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  249   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  250   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  251   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  252 
  253   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  254   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  255   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  256   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  257 
  258   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  259   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  260   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  261   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  262 
  263   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  264   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  265   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  266   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  267 
  268   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  269   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  270   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  271   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  272 
  273   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  274   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  275   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  276   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  277 
  278   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  279   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  280   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  281   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  282 
  283   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  284   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  285   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  286   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  287 
  288   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  289   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  290   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  291   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  292 
  293   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  294   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  295   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  296   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  297 
  298   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  299   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  300   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  301   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  302 
  303   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  304   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  305   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  306   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  307 
  308   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  309   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  310   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  311   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  312 
  313   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  314   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  315   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  316   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  317 
  318   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  319   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  320   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  321   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  322 
  323   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  324   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  325   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  326   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  327 
  328   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  329   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  330   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  331   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  332 
  333   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  334   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  335   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  336   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  337 
  338 // ----------------------------
  339 // SVE Predicate Registers
  340 // ----------------------------
  341   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  342   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  343   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  344   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  345   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  346   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  347   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  348   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  349   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  350   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  351   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  352   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  353   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  354   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  355   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  356   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  357 
  358 // ----------------------------
  359 // Special Registers
  360 // ----------------------------
  361 
  362 // the AArch64 CSPR status flag register is not directly accessible as
  363 // instruction operand. the FPSR status flag register is a system
  364 // register which can be written/read using MSR/MRS but again does not
  365 // appear as an operand (a code identifying the FSPR occurs as an
  366 // immediate value in the instruction).
  367 
  368 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  369 
  370 // Specify priority of register selection within phases of register
  371 // allocation.  Highest priority is first.  A useful heuristic is to
  372 // give registers a low priority when they are required by machine
  373 // instructions, like EAX and EDX on I486, and choose no-save registers
  374 // before save-on-call, & save-on-call before save-on-entry.  Registers
  375 // which participate in fixed calling sequences should come last.
  376 // Registers which are used as pairs must fall on an even boundary.
  377 
  378 alloc_class chunk0(
  379     // volatiles
  380     R10, R10_H,
  381     R11, R11_H,
  382     R12, R12_H,
  383     R13, R13_H,
  384     R14, R14_H,
  385     R15, R15_H,
  386     R16, R16_H,
  387     R17, R17_H,
  388     R18, R18_H,
  389 
  390     // arg registers
  391     R0, R0_H,
  392     R1, R1_H,
  393     R2, R2_H,
  394     R3, R3_H,
  395     R4, R4_H,
  396     R5, R5_H,
  397     R6, R6_H,
  398     R7, R7_H,
  399 
  400     // non-volatiles
  401     R19, R19_H,
  402     R20, R20_H,
  403     R21, R21_H,
  404     R22, R22_H,
  405     R23, R23_H,
  406     R24, R24_H,
  407     R25, R25_H,
  408     R26, R26_H,
  409 
  410     // non-allocatable registers
  411 
  412     R27, R27_H, // heapbase
  413     R28, R28_H, // thread
  414     R29, R29_H, // fp
  415     R30, R30_H, // lr
  416     R31, R31_H, // sp
  417     R8, R8_H,   // rscratch1
  418     R9, R9_H,   // rscratch2
  419 );
  420 
  421 alloc_class chunk1(
  422 
  423     // no save
  424     V16, V16_H, V16_J, V16_K,
  425     V17, V17_H, V17_J, V17_K,
  426     V18, V18_H, V18_J, V18_K,
  427     V19, V19_H, V19_J, V19_K,
  428     V20, V20_H, V20_J, V20_K,
  429     V21, V21_H, V21_J, V21_K,
  430     V22, V22_H, V22_J, V22_K,
  431     V23, V23_H, V23_J, V23_K,
  432     V24, V24_H, V24_J, V24_K,
  433     V25, V25_H, V25_J, V25_K,
  434     V26, V26_H, V26_J, V26_K,
  435     V27, V27_H, V27_J, V27_K,
  436     V28, V28_H, V28_J, V28_K,
  437     V29, V29_H, V29_J, V29_K,
  438     V30, V30_H, V30_J, V30_K,
  439     V31, V31_H, V31_J, V31_K,
  440 
  441     // arg registers
  442     V0, V0_H, V0_J, V0_K,
  443     V1, V1_H, V1_J, V1_K,
  444     V2, V2_H, V2_J, V2_K,
  445     V3, V3_H, V3_J, V3_K,
  446     V4, V4_H, V4_J, V4_K,
  447     V5, V5_H, V5_J, V5_K,
  448     V6, V6_H, V6_J, V6_K,
  449     V7, V7_H, V7_J, V7_K,
  450 
  451     // non-volatiles
  452     V8, V8_H, V8_J, V8_K,
  453     V9, V9_H, V9_J, V9_K,
  454     V10, V10_H, V10_J, V10_K,
  455     V11, V11_H, V11_J, V11_K,
  456     V12, V12_H, V12_J, V12_K,
  457     V13, V13_H, V13_J, V13_K,
  458     V14, V14_H, V14_J, V14_K,
  459     V15, V15_H, V15_J, V15_K,
  460 );
  461 
  462 alloc_class chunk2 (
  463     // Governing predicates for load/store and arithmetic
  464     P0,
  465     P1,
  466     P2,
  467     P3,
  468     P4,
  469     P5,
  470     P6,
  471 
  472     // Extra predicates
  473     P8,
  474     P9,
  475     P10,
  476     P11,
  477     P12,
  478     P13,
  479     P14,
  480     P15,
  481 
  482     // Preserved for all-true predicate
  483     P7,
  484 );
  485 
  486 alloc_class chunk3(RFLAGS);
  487 
  488 //----------Architecture Description Register Classes--------------------------
  489 // Several register classes are automatically defined based upon information in
  490 // this architecture description.
  491 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  492 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  493 //
  494 
  495 // Class for all 32 bit general purpose registers
  496 reg_class all_reg32(
  497     R0,
  498     R1,
  499     R2,
  500     R3,
  501     R4,
  502     R5,
  503     R6,
  504     R7,
  505     R10,
  506     R11,
  507     R12,
  508     R13,
  509     R14,
  510     R15,
  511     R16,
  512     R17,
  513     R18,
  514     R19,
  515     R20,
  516     R21,
  517     R22,
  518     R23,
  519     R24,
  520     R25,
  521     R26,
  522     R27,
  523     R28,
  524     R29,
  525     R30,
  526     R31
  527 );
  528 
  529 
  530 // Class for all 32 bit integer registers (excluding SP which
  531 // will never be used as an integer register)
  532 reg_class any_reg32 %{
  533   return _ANY_REG32_mask;
  534 %}
  535 
  536 // Singleton class for R0 int register
  537 reg_class int_r0_reg(R0);
  538 
  539 // Singleton class for R2 int register
  540 reg_class int_r2_reg(R2);
  541 
  542 // Singleton class for R3 int register
  543 reg_class int_r3_reg(R3);
  544 
  545 // Singleton class for R4 int register
  546 reg_class int_r4_reg(R4);
  547 
  548 // Singleton class for R31 int register
  549 reg_class int_r31_reg(R31);
  550 
  551 // Class for all 64 bit general purpose registers
  552 reg_class all_reg(
  553     R0, R0_H,
  554     R1, R1_H,
  555     R2, R2_H,
  556     R3, R3_H,
  557     R4, R4_H,
  558     R5, R5_H,
  559     R6, R6_H,
  560     R7, R7_H,
  561     R10, R10_H,
  562     R11, R11_H,
  563     R12, R12_H,
  564     R13, R13_H,
  565     R14, R14_H,
  566     R15, R15_H,
  567     R16, R16_H,
  568     R17, R17_H,
  569     R18, R18_H,
  570     R19, R19_H,
  571     R20, R20_H,
  572     R21, R21_H,
  573     R22, R22_H,
  574     R23, R23_H,
  575     R24, R24_H,
  576     R25, R25_H,
  577     R26, R26_H,
  578     R27, R27_H,
  579     R28, R28_H,
  580     R29, R29_H,
  581     R30, R30_H,
  582     R31, R31_H
  583 );
  584 
  585 // Class for all long integer registers (including SP)
  586 reg_class any_reg %{
  587   return _ANY_REG_mask;
  588 %}
  589 
  590 // Class for non-allocatable 32 bit registers
  591 reg_class non_allocatable_reg32(
  592 #ifdef R18_RESERVED
  593     // See comment in register_aarch64.hpp
  594     R18,                        // tls on Windows
  595 #endif
  596     R28,                        // thread
  597     R30,                        // lr
  598     R31                         // sp
  599 );
  600 
  601 // Class for non-allocatable 64 bit registers
  602 reg_class non_allocatable_reg(
  603 #ifdef R18_RESERVED
  604     // See comment in register_aarch64.hpp
  605     R18, R18_H,                 // tls on Windows, platform register on macOS
  606 #endif
  607     R28, R28_H,                 // thread
  608     R30, R30_H,                 // lr
  609     R31, R31_H                  // sp
  610 );
  611 
  612 // Class for all non-special integer registers
  613 reg_class no_special_reg32 %{
  614   return _NO_SPECIAL_REG32_mask;
  615 %}
  616 
  617 // Class for all non-special long integer registers
  618 reg_class no_special_reg %{
  619   return _NO_SPECIAL_REG_mask;
  620 %}
  621 
  622 // Class for 64 bit register r0
  623 reg_class r0_reg(
  624     R0, R0_H
  625 );
  626 
  627 // Class for 64 bit register r1
  628 reg_class r1_reg(
  629     R1, R1_H
  630 );
  631 
  632 // Class for 64 bit register r2
  633 reg_class r2_reg(
  634     R2, R2_H
  635 );
  636 
  637 // Class for 64 bit register r3
  638 reg_class r3_reg(
  639     R3, R3_H
  640 );
  641 
  642 // Class for 64 bit register r4
  643 reg_class r4_reg(
  644     R4, R4_H
  645 );
  646 
  647 // Class for 64 bit register r5
  648 reg_class r5_reg(
  649     R5, R5_H
  650 );
  651 
  652 // Class for 64 bit register r10
  653 reg_class r10_reg(
  654     R10, R10_H
  655 );
  656 
  657 // Class for 64 bit register r11
  658 reg_class r11_reg(
  659     R11, R11_H
  660 );
  661 
  662 // Class for method register
  663 reg_class method_reg(
  664     R12, R12_H
  665 );
  666 
  667 // Class for thread register
  668 reg_class thread_reg(
  669     R28, R28_H
  670 );
  671 
  672 // Class for frame pointer register
  673 reg_class fp_reg(
  674     R29, R29_H
  675 );
  676 
  677 // Class for link register
  678 reg_class lr_reg(
  679     R30, R30_H
  680 );
  681 
  682 // Class for long sp register
  683 reg_class sp_reg(
  684   R31, R31_H
  685 );
  686 
  687 // Class for all pointer registers
  688 reg_class ptr_reg %{
  689   return _PTR_REG_mask;
  690 %}
  691 
  692 // Class for all non_special pointer registers
  693 reg_class no_special_ptr_reg %{
  694   return _NO_SPECIAL_PTR_REG_mask;
  695 %}
  696 
  697 // Class for all non_special pointer registers (excluding rfp)
  698 reg_class no_special_no_rfp_ptr_reg %{
  699   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  700 %}
  701 
  702 // Class for all float registers
  703 reg_class float_reg(
  704     V0,
  705     V1,
  706     V2,
  707     V3,
  708     V4,
  709     V5,
  710     V6,
  711     V7,
  712     V8,
  713     V9,
  714     V10,
  715     V11,
  716     V12,
  717     V13,
  718     V14,
  719     V15,
  720     V16,
  721     V17,
  722     V18,
  723     V19,
  724     V20,
  725     V21,
  726     V22,
  727     V23,
  728     V24,
  729     V25,
  730     V26,
  731     V27,
  732     V28,
  733     V29,
  734     V30,
  735     V31
  736 );
  737 
  738 // Double precision float registers have virtual `high halves' that
  739 // are needed by the allocator.
  740 // Class for all double registers
  741 reg_class double_reg(
  742     V0, V0_H,
  743     V1, V1_H,
  744     V2, V2_H,
  745     V3, V3_H,
  746     V4, V4_H,
  747     V5, V5_H,
  748     V6, V6_H,
  749     V7, V7_H,
  750     V8, V8_H,
  751     V9, V9_H,
  752     V10, V10_H,
  753     V11, V11_H,
  754     V12, V12_H,
  755     V13, V13_H,
  756     V14, V14_H,
  757     V15, V15_H,
  758     V16, V16_H,
  759     V17, V17_H,
  760     V18, V18_H,
  761     V19, V19_H,
  762     V20, V20_H,
  763     V21, V21_H,
  764     V22, V22_H,
  765     V23, V23_H,
  766     V24, V24_H,
  767     V25, V25_H,
  768     V26, V26_H,
  769     V27, V27_H,
  770     V28, V28_H,
  771     V29, V29_H,
  772     V30, V30_H,
  773     V31, V31_H
  774 );
  775 
  776 // Class for all SVE vector registers.
  777 reg_class vectora_reg (
  778     V0, V0_H, V0_J, V0_K,
  779     V1, V1_H, V1_J, V1_K,
  780     V2, V2_H, V2_J, V2_K,
  781     V3, V3_H, V3_J, V3_K,
  782     V4, V4_H, V4_J, V4_K,
  783     V5, V5_H, V5_J, V5_K,
  784     V6, V6_H, V6_J, V6_K,
  785     V7, V7_H, V7_J, V7_K,
  786     V8, V8_H, V8_J, V8_K,
  787     V9, V9_H, V9_J, V9_K,
  788     V10, V10_H, V10_J, V10_K,
  789     V11, V11_H, V11_J, V11_K,
  790     V12, V12_H, V12_J, V12_K,
  791     V13, V13_H, V13_J, V13_K,
  792     V14, V14_H, V14_J, V14_K,
  793     V15, V15_H, V15_J, V15_K,
  794     V16, V16_H, V16_J, V16_K,
  795     V17, V17_H, V17_J, V17_K,
  796     V18, V18_H, V18_J, V18_K,
  797     V19, V19_H, V19_J, V19_K,
  798     V20, V20_H, V20_J, V20_K,
  799     V21, V21_H, V21_J, V21_K,
  800     V22, V22_H, V22_J, V22_K,
  801     V23, V23_H, V23_J, V23_K,
  802     V24, V24_H, V24_J, V24_K,
  803     V25, V25_H, V25_J, V25_K,
  804     V26, V26_H, V26_J, V26_K,
  805     V27, V27_H, V27_J, V27_K,
  806     V28, V28_H, V28_J, V28_K,
  807     V29, V29_H, V29_J, V29_K,
  808     V30, V30_H, V30_J, V30_K,
  809     V31, V31_H, V31_J, V31_K,
  810 );
  811 
  812 // Class for all 64bit vector registers
  813 reg_class vectord_reg(
  814     V0, V0_H,
  815     V1, V1_H,
  816     V2, V2_H,
  817     V3, V3_H,
  818     V4, V4_H,
  819     V5, V5_H,
  820     V6, V6_H,
  821     V7, V7_H,
  822     V8, V8_H,
  823     V9, V9_H,
  824     V10, V10_H,
  825     V11, V11_H,
  826     V12, V12_H,
  827     V13, V13_H,
  828     V14, V14_H,
  829     V15, V15_H,
  830     V16, V16_H,
  831     V17, V17_H,
  832     V18, V18_H,
  833     V19, V19_H,
  834     V20, V20_H,
  835     V21, V21_H,
  836     V22, V22_H,
  837     V23, V23_H,
  838     V24, V24_H,
  839     V25, V25_H,
  840     V26, V26_H,
  841     V27, V27_H,
  842     V28, V28_H,
  843     V29, V29_H,
  844     V30, V30_H,
  845     V31, V31_H
  846 );
  847 
  848 // Class for all 128bit vector registers
  849 reg_class vectorx_reg(
  850     V0, V0_H, V0_J, V0_K,
  851     V1, V1_H, V1_J, V1_K,
  852     V2, V2_H, V2_J, V2_K,
  853     V3, V3_H, V3_J, V3_K,
  854     V4, V4_H, V4_J, V4_K,
  855     V5, V5_H, V5_J, V5_K,
  856     V6, V6_H, V6_J, V6_K,
  857     V7, V7_H, V7_J, V7_K,
  858     V8, V8_H, V8_J, V8_K,
  859     V9, V9_H, V9_J, V9_K,
  860     V10, V10_H, V10_J, V10_K,
  861     V11, V11_H, V11_J, V11_K,
  862     V12, V12_H, V12_J, V12_K,
  863     V13, V13_H, V13_J, V13_K,
  864     V14, V14_H, V14_J, V14_K,
  865     V15, V15_H, V15_J, V15_K,
  866     V16, V16_H, V16_J, V16_K,
  867     V17, V17_H, V17_J, V17_K,
  868     V18, V18_H, V18_J, V18_K,
  869     V19, V19_H, V19_J, V19_K,
  870     V20, V20_H, V20_J, V20_K,
  871     V21, V21_H, V21_J, V21_K,
  872     V22, V22_H, V22_J, V22_K,
  873     V23, V23_H, V23_J, V23_K,
  874     V24, V24_H, V24_J, V24_K,
  875     V25, V25_H, V25_J, V25_K,
  876     V26, V26_H, V26_J, V26_K,
  877     V27, V27_H, V27_J, V27_K,
  878     V28, V28_H, V28_J, V28_K,
  879     V29, V29_H, V29_J, V29_K,
  880     V30, V30_H, V30_J, V30_K,
  881     V31, V31_H, V31_J, V31_K
  882 );
  883 
  884 // Class for 128 bit register v0
  885 reg_class v0_reg(
  886     V0, V0_H
  887 );
  888 
  889 // Class for 128 bit register v1
  890 reg_class v1_reg(
  891     V1, V1_H
  892 );
  893 
  894 // Class for 128 bit register v2
  895 reg_class v2_reg(
  896     V2, V2_H
  897 );
  898 
  899 // Class for 128 bit register v3
  900 reg_class v3_reg(
  901     V3, V3_H
  902 );
  903 
  904 // Class for 128 bit register v4
  905 reg_class v4_reg(
  906     V4, V4_H
  907 );
  908 
  909 // Class for 128 bit register v5
  910 reg_class v5_reg(
  911     V5, V5_H
  912 );
  913 
  914 // Class for 128 bit register v6
  915 reg_class v6_reg(
  916     V6, V6_H
  917 );
  918 
  919 // Class for 128 bit register v7
  920 reg_class v7_reg(
  921     V7, V7_H
  922 );
  923 
  924 // Class for 128 bit register v8
  925 reg_class v8_reg(
  926     V8, V8_H
  927 );
  928 
  929 // Class for 128 bit register v9
  930 reg_class v9_reg(
  931     V9, V9_H
  932 );
  933 
  934 // Class for 128 bit register v10
  935 reg_class v10_reg(
  936     V10, V10_H
  937 );
  938 
  939 // Class for 128 bit register v11
  940 reg_class v11_reg(
  941     V11, V11_H
  942 );
  943 
  944 // Class for 128 bit register v12
  945 reg_class v12_reg(
  946     V12, V12_H
  947 );
  948 
  949 // Class for 128 bit register v13
  950 reg_class v13_reg(
  951     V13, V13_H
  952 );
  953 
  954 // Class for 128 bit register v14
  955 reg_class v14_reg(
  956     V14, V14_H
  957 );
  958 
  959 // Class for 128 bit register v15
  960 reg_class v15_reg(
  961     V15, V15_H
  962 );
  963 
  964 // Class for 128 bit register v16
  965 reg_class v16_reg(
  966     V16, V16_H
  967 );
  968 
  969 // Class for 128 bit register v17
  970 reg_class v17_reg(
  971     V17, V17_H
  972 );
  973 
  974 // Class for 128 bit register v18
  975 reg_class v18_reg(
  976     V18, V18_H
  977 );
  978 
  979 // Class for 128 bit register v19
  980 reg_class v19_reg(
  981     V19, V19_H
  982 );
  983 
  984 // Class for 128 bit register v20
  985 reg_class v20_reg(
  986     V20, V20_H
  987 );
  988 
  989 // Class for 128 bit register v21
  990 reg_class v21_reg(
  991     V21, V21_H
  992 );
  993 
  994 // Class for 128 bit register v22
  995 reg_class v22_reg(
  996     V22, V22_H
  997 );
  998 
  999 // Class for 128 bit register v23
 1000 reg_class v23_reg(
 1001     V23, V23_H
 1002 );
 1003 
 1004 // Class for 128 bit register v24
 1005 reg_class v24_reg(
 1006     V24, V24_H
 1007 );
 1008 
 1009 // Class for 128 bit register v25
 1010 reg_class v25_reg(
 1011     V25, V25_H
 1012 );
 1013 
 1014 // Class for 128 bit register v26
 1015 reg_class v26_reg(
 1016     V26, V26_H
 1017 );
 1018 
 1019 // Class for 128 bit register v27
 1020 reg_class v27_reg(
 1021     V27, V27_H
 1022 );
 1023 
 1024 // Class for 128 bit register v28
 1025 reg_class v28_reg(
 1026     V28, V28_H
 1027 );
 1028 
 1029 // Class for 128 bit register v29
 1030 reg_class v29_reg(
 1031     V29, V29_H
 1032 );
 1033 
 1034 // Class for 128 bit register v30
 1035 reg_class v30_reg(
 1036     V30, V30_H
 1037 );
 1038 
 1039 // Class for 128 bit register v31
 1040 reg_class v31_reg(
 1041     V31, V31_H
 1042 );
 1043 
 1044 // Class for all SVE predicate registers.
 1045 reg_class pr_reg (
 1046     P0,
 1047     P1,
 1048     P2,
 1049     P3,
 1050     P4,
 1051     P5,
 1052     P6,
 1053     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1054     P8,
 1055     P9,
 1056     P10,
 1057     P11,
 1058     P12,
 1059     P13,
 1060     P14,
 1061     P15
 1062 );
 1063 
 1064 // Class for SVE governing predicate registers, which are used
 1065 // to determine the active elements of a predicated instruction.
 1066 reg_class gov_pr (
 1067     P0,
 1068     P1,
 1069     P2,
 1070     P3,
 1071     P4,
 1072     P5,
 1073     P6,
 1074     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1075 );
 1076 
 1077 reg_class p0_reg(P0);
 1078 reg_class p1_reg(P1);
 1079 
 1080 // Singleton class for condition codes
 1081 reg_class int_flags(RFLAGS);
 1082 
 1083 %}
 1084 
 1085 //----------DEFINITION BLOCK---------------------------------------------------
 1086 // Define name --> value mappings to inform the ADLC of an integer valued name
 1087 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1088 // Format:
 1089 //        int_def  <name>         ( <int_value>, <expression>);
 1090 // Generated Code in ad_<arch>.hpp
 1091 //        #define  <name>   (<expression>)
 1092 //        // value == <int_value>
 1093 // Generated code in ad_<arch>.cpp adlc_verification()
 1094 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1095 //
 1096 
 1097 // we follow the ppc-aix port in using a simple cost model which ranks
 1098 // register operations as cheap, memory ops as more expensive and
 1099 // branches as most expensive. the first two have a low as well as a
 1100 // normal cost. huge cost appears to be a way of saying don't do
 1101 // something
 1102 
 1103 definitions %{
 1104   // The default cost (of a register move instruction).
 1105   int_def INSN_COST            (    100,     100);
 1106   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1107   int_def CALL_COST            (    200,     2 * INSN_COST);
 1108   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1109 %}
 1110 
 1111 
 1112 //----------SOURCE BLOCK-------------------------------------------------------
 1113 // This is a block of C++ code which provides values, functions, and
 1114 // definitions necessary in the rest of the architecture description
 1115 
 1116 source_hpp %{
 1117 
 1118 #include "asm/macroAssembler.hpp"
 1119 #include "gc/shared/barrierSetAssembler.hpp"
 1120 #include "gc/shared/cardTable.hpp"
 1121 #include "gc/shared/cardTableBarrierSet.hpp"
 1122 #include "gc/shared/collectedHeap.hpp"
 1123 #include "opto/addnode.hpp"
 1124 #include "opto/convertnode.hpp"
 1125 #include "runtime/objectMonitor.hpp"
 1126 
 1127 extern RegMask _ANY_REG32_mask;
 1128 extern RegMask _ANY_REG_mask;
 1129 extern RegMask _PTR_REG_mask;
 1130 extern RegMask _NO_SPECIAL_REG32_mask;
 1131 extern RegMask _NO_SPECIAL_REG_mask;
 1132 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1133 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1134 
 1135 class CallStubImpl {
 1136 
 1137   //--------------------------------------------------------------
 1138   //---<  Used for optimization in Compile::shorten_branches  >---
 1139   //--------------------------------------------------------------
 1140 
 1141  public:
 1142   // Size of call trampoline stub.
 1143   static uint size_call_trampoline() {
 1144     return 0; // no call trampolines on this platform
 1145   }
 1146 
 1147   // number of relocations needed by a call trampoline stub
 1148   static uint reloc_call_trampoline() {
 1149     return 0; // no call trampolines on this platform
 1150   }
 1151 };
 1152 
 1153 class HandlerImpl {
 1154 
 1155  public:
 1156 
 1157   static int emit_exception_handler(CodeBuffer &cbuf);
 1158   static int emit_deopt_handler(CodeBuffer& cbuf);
 1159 
 1160   static uint size_exception_handler() {
 1161     return MacroAssembler::far_codestub_branch_size();
 1162   }
 1163 
 1164   static uint size_deopt_handler() {
 1165     // count one adr and one far branch instruction
 1166     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1167   }
 1168 };
 1169 
 1170 class Node::PD {
 1171 public:
 1172   enum NodeFlags {
 1173     _last_flag = Node::_last_flag
 1174   };
 1175 };
 1176 
 1177   bool is_CAS(int opcode, bool maybe_volatile);
 1178 
 1179   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1180 
 1181   bool unnecessary_acquire(const Node *barrier);
 1182   bool needs_acquiring_load(const Node *load);
 1183 
 1184   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1185 
 1186   bool unnecessary_release(const Node *barrier);
 1187   bool unnecessary_volatile(const Node *barrier);
 1188   bool needs_releasing_store(const Node *store);
 1189 
 1190   // predicate controlling translation of CompareAndSwapX
 1191   bool needs_acquiring_load_exclusive(const Node *load);
 1192 
 1193   // predicate controlling addressing modes
 1194   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1195 
 1196   // Convert BootTest condition to Assembler condition.
 1197   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1198   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1199 %}
 1200 
 1201 source %{
 1202 
 1203   // Derived RegMask with conditionally allocatable registers
 1204 
 1205   void PhaseOutput::pd_perform_mach_node_analysis() {
 1206   }
 1207 
 1208   int MachNode::pd_alignment_required() const {
 1209     return 1;
 1210   }
 1211 
 1212   int MachNode::compute_padding(int current_offset) const {
 1213     return 0;
 1214   }
 1215 
 1216   RegMask _ANY_REG32_mask;
 1217   RegMask _ANY_REG_mask;
 1218   RegMask _PTR_REG_mask;
 1219   RegMask _NO_SPECIAL_REG32_mask;
 1220   RegMask _NO_SPECIAL_REG_mask;
 1221   RegMask _NO_SPECIAL_PTR_REG_mask;
 1222   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1223 
 1224   void reg_mask_init() {
 1225     // We derive below RegMask(s) from the ones which are auto-generated from
 1226     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1227     // registers conditionally reserved.
 1228 
 1229     _ANY_REG32_mask = _ALL_REG32_mask;
 1230     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1231 
 1232     _ANY_REG_mask = _ALL_REG_mask;
 1233 
 1234     _PTR_REG_mask = _ALL_REG_mask;
 1235 
 1236     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1237     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1238 
 1239     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1240     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1241 
 1242     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1243     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1244 
 1245     // r27 is not allocatable when compressed oops is on and heapbase is not
 1246     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1247     if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL)) {
 1248       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1249       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1250       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1251     }
 1252 
 1253     // r29 is not allocatable when PreserveFramePointer is on
 1254     if (PreserveFramePointer) {
 1255       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1256       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1257       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1258     }
 1259 
 1260     _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1261     _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1262   }
 1263 
 1264   // Optimizaton of volatile gets and puts
 1265   // -------------------------------------
 1266   //
 1267   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1268   // use to implement volatile reads and writes. For a volatile read
 1269   // we simply need
 1270   //
 1271   //   ldar<x>
 1272   //
 1273   // and for a volatile write we need
 1274   //
 1275   //   stlr<x>
 1276   //
 1277   // Alternatively, we can implement them by pairing a normal
 1278   // load/store with a memory barrier. For a volatile read we need
 1279   //
 1280   //   ldr<x>
 1281   //   dmb ishld
 1282   //
 1283   // for a volatile write
 1284   //
 1285   //   dmb ish
 1286   //   str<x>
 1287   //   dmb ish
 1288   //
 1289   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1290   // sequences. These are normally translated to an instruction
 1291   // sequence like the following
 1292   //
 1293   //   dmb      ish
 1294   // retry:
 1295   //   ldxr<x>   rval raddr
 1296   //   cmp       rval rold
 1297   //   b.ne done
 1298   //   stlxr<x>  rval, rnew, rold
 1299   //   cbnz      rval retry
 1300   // done:
 1301   //   cset      r0, eq
 1302   //   dmb ishld
 1303   //
 1304   // Note that the exclusive store is already using an stlxr
 1305   // instruction. That is required to ensure visibility to other
 1306   // threads of the exclusive write (assuming it succeeds) before that
 1307   // of any subsequent writes.
 1308   //
 1309   // The following instruction sequence is an improvement on the above
 1310   //
 1311   // retry:
 1312   //   ldaxr<x>  rval raddr
 1313   //   cmp       rval rold
 1314   //   b.ne done
 1315   //   stlxr<x>  rval, rnew, rold
 1316   //   cbnz      rval retry
 1317   // done:
 1318   //   cset      r0, eq
 1319   //
 1320   // We don't need the leading dmb ish since the stlxr guarantees
 1321   // visibility of prior writes in the case that the swap is
 1322   // successful. Crucially we don't have to worry about the case where
 1323   // the swap is not successful since no valid program should be
 1324   // relying on visibility of prior changes by the attempting thread
 1325   // in the case where the CAS fails.
 1326   //
 1327   // Similarly, we don't need the trailing dmb ishld if we substitute
 1328   // an ldaxr instruction since that will provide all the guarantees we
 1329   // require regarding observation of changes made by other threads
 1330   // before any change to the CAS address observed by the load.
 1331   //
 1332   // In order to generate the desired instruction sequence we need to
 1333   // be able to identify specific 'signature' ideal graph node
 1334   // sequences which i) occur as a translation of a volatile reads or
 1335   // writes or CAS operations and ii) do not occur through any other
 1336   // translation or graph transformation. We can then provide
 1337   // alternative aldc matching rules which translate these node
 1338   // sequences to the desired machine code sequences. Selection of the
 1339   // alternative rules can be implemented by predicates which identify
 1340   // the relevant node sequences.
 1341   //
 1342   // The ideal graph generator translates a volatile read to the node
 1343   // sequence
 1344   //
 1345   //   LoadX[mo_acquire]
 1346   //   MemBarAcquire
 1347   //
 1348   // As a special case when using the compressed oops optimization we
 1349   // may also see this variant
 1350   //
 1351   //   LoadN[mo_acquire]
 1352   //   DecodeN
 1353   //   MemBarAcquire
 1354   //
 1355   // A volatile write is translated to the node sequence
 1356   //
 1357   //   MemBarRelease
 1358   //   StoreX[mo_release] {CardMark}-optional
 1359   //   MemBarVolatile
 1360   //
 1361   // n.b. the above node patterns are generated with a strict
 1362   // 'signature' configuration of input and output dependencies (see
 1363   // the predicates below for exact details). The card mark may be as
 1364   // simple as a few extra nodes or, in a few GC configurations, may
 1365   // include more complex control flow between the leading and
 1366   // trailing memory barriers. However, whatever the card mark
 1367   // configuration these signatures are unique to translated volatile
 1368   // reads/stores -- they will not appear as a result of any other
 1369   // bytecode translation or inlining nor as a consequence of
 1370   // optimizing transforms.
 1371   //
 1372   // We also want to catch inlined unsafe volatile gets and puts and
 1373   // be able to implement them using either ldar<x>/stlr<x> or some
 1374   // combination of ldr<x>/stlr<x> and dmb instructions.
 1375   //
 1376   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1377   // normal volatile put node sequence containing an extra cpuorder
 1378   // membar
 1379   //
 1380   //   MemBarRelease
 1381   //   MemBarCPUOrder
 1382   //   StoreX[mo_release] {CardMark}-optional
 1383   //   MemBarCPUOrder
 1384   //   MemBarVolatile
 1385   //
 1386   // n.b. as an aside, a cpuorder membar is not itself subject to
 1387   // matching and translation by adlc rules.  However, the rule
 1388   // predicates need to detect its presence in order to correctly
 1389   // select the desired adlc rules.
 1390   //
 1391   // Inlined unsafe volatile gets manifest as a slightly different
 1392   // node sequence to a normal volatile get because of the
 1393   // introduction of some CPUOrder memory barriers to bracket the
 1394   // Load. However, but the same basic skeleton of a LoadX feeding a
 1395   // MemBarAcquire, possibly through an optional DecodeN, is still
 1396   // present
 1397   //
 1398   //   MemBarCPUOrder
 1399   //        ||       \\
 1400   //   MemBarCPUOrder LoadX[mo_acquire]
 1401   //        ||            |
 1402   //        ||       {DecodeN} optional
 1403   //        ||       /
 1404   //     MemBarAcquire
 1405   //
 1406   // In this case the acquire membar does not directly depend on the
 1407   // load. However, we can be sure that the load is generated from an
 1408   // inlined unsafe volatile get if we see it dependent on this unique
 1409   // sequence of membar nodes. Similarly, given an acquire membar we
 1410   // can know that it was added because of an inlined unsafe volatile
 1411   // get if it is fed and feeds a cpuorder membar and if its feed
 1412   // membar also feeds an acquiring load.
 1413   //
 1414   // Finally an inlined (Unsafe) CAS operation is translated to the
 1415   // following ideal graph
 1416   //
 1417   //   MemBarRelease
 1418   //   MemBarCPUOrder
 1419   //   CompareAndSwapX {CardMark}-optional
 1420   //   MemBarCPUOrder
 1421   //   MemBarAcquire
 1422   //
 1423   // So, where we can identify these volatile read and write
 1424   // signatures we can choose to plant either of the above two code
 1425   // sequences. For a volatile read we can simply plant a normal
 1426   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1427   // also choose to inhibit translation of the MemBarAcquire and
 1428   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1429   //
 1430   // When we recognise a volatile store signature we can choose to
 1431   // plant at a dmb ish as a translation for the MemBarRelease, a
 1432   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1433   // Alternatively, we can inhibit translation of the MemBarRelease
 1434   // and MemBarVolatile and instead plant a simple stlr<x>
 1435   // instruction.
 1436   //
 1437   // when we recognise a CAS signature we can choose to plant a dmb
 1438   // ish as a translation for the MemBarRelease, the conventional
 1439   // macro-instruction sequence for the CompareAndSwap node (which
 1440   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1441   // Alternatively, we can elide generation of the dmb instructions
 1442   // and plant the alternative CompareAndSwap macro-instruction
 1443   // sequence (which uses ldaxr<x>).
 1444   //
 1445   // Of course, the above only applies when we see these signature
 1446   // configurations. We still want to plant dmb instructions in any
 1447   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1448   // MemBarVolatile. For example, at the end of a constructor which
 1449   // writes final/volatile fields we will see a MemBarRelease
 1450   // instruction and this needs a 'dmb ish' lest we risk the
 1451   // constructed object being visible without making the
 1452   // final/volatile field writes visible.
 1453   //
 1454   // n.b. the translation rules below which rely on detection of the
 1455   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1456   // If we see anything other than the signature configurations we
 1457   // always just translate the loads and stores to ldr<x> and str<x>
 1458   // and translate acquire, release and volatile membars to the
 1459   // relevant dmb instructions.
 1460   //
 1461 
 1462   // is_CAS(int opcode, bool maybe_volatile)
 1463   //
 1464   // return true if opcode is one of the possible CompareAndSwapX
 1465   // values otherwise false.
 1466 
 1467   bool is_CAS(int opcode, bool maybe_volatile)
 1468   {
 1469     switch(opcode) {
 1470       // We handle these
 1471     case Op_CompareAndSwapI:
 1472     case Op_CompareAndSwapL:
 1473     case Op_CompareAndSwapP:
 1474     case Op_CompareAndSwapN:
 1475     case Op_ShenandoahCompareAndSwapP:
 1476     case Op_ShenandoahCompareAndSwapN:
 1477     case Op_CompareAndSwapB:
 1478     case Op_CompareAndSwapS:
 1479     case Op_GetAndSetI:
 1480     case Op_GetAndSetL:
 1481     case Op_GetAndSetP:
 1482     case Op_GetAndSetN:
 1483     case Op_GetAndAddI:
 1484     case Op_GetAndAddL:
 1485       return true;
 1486     case Op_CompareAndExchangeI:
 1487     case Op_CompareAndExchangeN:
 1488     case Op_CompareAndExchangeB:
 1489     case Op_CompareAndExchangeS:
 1490     case Op_CompareAndExchangeL:
 1491     case Op_CompareAndExchangeP:
 1492     case Op_WeakCompareAndSwapB:
 1493     case Op_WeakCompareAndSwapS:
 1494     case Op_WeakCompareAndSwapI:
 1495     case Op_WeakCompareAndSwapL:
 1496     case Op_WeakCompareAndSwapP:
 1497     case Op_WeakCompareAndSwapN:
 1498     case Op_ShenandoahWeakCompareAndSwapP:
 1499     case Op_ShenandoahWeakCompareAndSwapN:
 1500     case Op_ShenandoahCompareAndExchangeP:
 1501     case Op_ShenandoahCompareAndExchangeN:
 1502       return maybe_volatile;
 1503     default:
 1504       return false;
 1505     }
 1506   }
 1507 
 1508   // helper to determine the maximum number of Phi nodes we may need to
 1509   // traverse when searching from a card mark membar for the merge mem
 1510   // feeding a trailing membar or vice versa
 1511 
 1512 // predicates controlling emit of ldr<x>/ldar<x>
 1513 
 1514 bool unnecessary_acquire(const Node *barrier)
 1515 {
 1516   assert(barrier->is_MemBar(), "expecting a membar");
 1517 
 1518   MemBarNode* mb = barrier->as_MemBar();
 1519 
 1520   if (mb->trailing_load()) {
 1521     return true;
 1522   }
 1523 
 1524   if (mb->trailing_load_store()) {
 1525     Node* load_store = mb->in(MemBarNode::Precedent);
 1526     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1527     return is_CAS(load_store->Opcode(), true);
 1528   }
 1529 
 1530   return false;
 1531 }
 1532 
 1533 bool needs_acquiring_load(const Node *n)
 1534 {
 1535   assert(n->is_Load(), "expecting a load");
 1536   LoadNode *ld = n->as_Load();
 1537   return ld->is_acquire();
 1538 }
 1539 
 1540 bool unnecessary_release(const Node *n)
 1541 {
 1542   assert((n->is_MemBar() &&
 1543           n->Opcode() == Op_MemBarRelease),
 1544          "expecting a release membar");
 1545 
 1546   MemBarNode *barrier = n->as_MemBar();
 1547   if (!barrier->leading()) {
 1548     return false;
 1549   } else {
 1550     Node* trailing = barrier->trailing_membar();
 1551     MemBarNode* trailing_mb = trailing->as_MemBar();
 1552     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1553     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1554 
 1555     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1556     if (mem->is_Store()) {
 1557       assert(mem->as_Store()->is_release(), "");
 1558       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1559       return true;
 1560     } else {
 1561       assert(mem->is_LoadStore(), "");
 1562       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1563       return is_CAS(mem->Opcode(), true);
 1564     }
 1565   }
 1566   return false;
 1567 }
 1568 
 1569 bool unnecessary_volatile(const Node *n)
 1570 {
 1571   // assert n->is_MemBar();
 1572   MemBarNode *mbvol = n->as_MemBar();
 1573 
 1574   bool release = mbvol->trailing_store();
 1575   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1576 #ifdef ASSERT
 1577   if (release) {
 1578     Node* leading = mbvol->leading_membar();
 1579     assert(leading->Opcode() == Op_MemBarRelease, "");
 1580     assert(leading->as_MemBar()->leading_store(), "");
 1581     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1582   }
 1583 #endif
 1584 
 1585   return release;
 1586 }
 1587 
 1588 // predicates controlling emit of str<x>/stlr<x>
 1589 
 1590 bool needs_releasing_store(const Node *n)
 1591 {
 1592   // assert n->is_Store();
 1593   StoreNode *st = n->as_Store();
 1594   return st->trailing_membar() != NULL;
 1595 }
 1596 
 1597 // predicate controlling translation of CAS
 1598 //
 1599 // returns true if CAS needs to use an acquiring load otherwise false
 1600 
 1601 bool needs_acquiring_load_exclusive(const Node *n)
 1602 {
 1603   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1604   LoadStoreNode* ldst = n->as_LoadStore();
 1605   if (is_CAS(n->Opcode(), false)) {
 1606     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
 1607   } else {
 1608     return ldst->trailing_membar() != NULL;
 1609   }
 1610 
 1611   // so we can just return true here
 1612   return true;
 1613 }
 1614 
 1615 #define __ _masm.
 1616 
 1617 // advance declarations for helper functions to convert register
 1618 // indices to register objects
 1619 
 1620 // the ad file has to provide implementations of certain methods
 1621 // expected by the generic code
 1622 //
 1623 // REQUIRED FUNCTIONALITY
 1624 
 1625 //=============================================================================
 1626 
 1627 // !!!!! Special hack to get all types of calls to specify the byte offset
 1628 //       from the start of the call to the point where the return address
 1629 //       will point.
 1630 
 1631 int MachCallStaticJavaNode::ret_addr_offset()
 1632 {
 1633   // call should be a simple bl
 1634   int off = 4;
 1635   return off;
 1636 }
 1637 
 1638 int MachCallDynamicJavaNode::ret_addr_offset()
 1639 {
 1640   return 16; // movz, movk, movk, bl
 1641 }
 1642 
 1643 int MachCallRuntimeNode::ret_addr_offset() {
 1644   // for generated stubs the call will be
 1645   //   bl(addr)
 1646   // or with far branches
 1647   //   bl(trampoline_stub)
 1648   // for real runtime callouts it will be six instructions
 1649   // see aarch64_enc_java_to_runtime
 1650   //   adr(rscratch2, retaddr)
 1651   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1652   //   lea(rscratch1, RuntimeAddress(addr)
 1653   //   blr(rscratch1)
 1654   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1655   if (cb) {
 1656     return 1 * NativeInstruction::instruction_size;
 1657   } else {
 1658     return 6 * NativeInstruction::instruction_size;
 1659   }
 1660 }
 1661 
 1662 //=============================================================================
 1663 
 1664 #ifndef PRODUCT
 1665 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1666   st->print("BREAKPOINT");
 1667 }
 1668 #endif
 1669 
 1670 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1671   C2_MacroAssembler _masm(&cbuf);
 1672   __ brk(0);
 1673 }
 1674 
 1675 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1676   return MachNode::size(ra_);
 1677 }
 1678 
 1679 //=============================================================================
 1680 
 1681 #ifndef PRODUCT
 1682   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1683     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1684   }
 1685 #endif
 1686 
 1687   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
 1688     C2_MacroAssembler _masm(&cbuf);
 1689     for (int i = 0; i < _count; i++) {
 1690       __ nop();
 1691     }
 1692   }
 1693 
 1694   uint MachNopNode::size(PhaseRegAlloc*) const {
 1695     return _count * NativeInstruction::instruction_size;
 1696   }
 1697 
 1698 //=============================================================================
 1699 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1700 
 1701 int ConstantTable::calculate_table_base_offset() const {
 1702   return 0;  // absolute addressing, no offset
 1703 }
 1704 
 1705 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1706 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1707   ShouldNotReachHere();
 1708 }
 1709 
 1710 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 1711   // Empty encoding
 1712 }
 1713 
 1714 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1715   return 0;
 1716 }
 1717 
 1718 #ifndef PRODUCT
 1719 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1720   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1721 }
 1722 #endif
 1723 
 1724 #ifndef PRODUCT
 1725 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1726   Compile* C = ra_->C;
 1727 
 1728   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1729 
 1730   if (C->output()->need_stack_bang(framesize))
 1731     st->print("# stack bang size=%d\n\t", framesize);
 1732 
 1733   if (VM_Version::use_rop_protection()) {
 1734     st->print("ldr  zr, [lr]\n\t");
 1735     st->print("paciaz\n\t");
 1736   }
 1737   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1738     st->print("sub  sp, sp, #%d\n\t", framesize);
 1739     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1740     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1741   } else {
 1742     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1743     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1744     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1745     st->print("sub  sp, sp, rscratch1");
 1746   }
 1747   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1748     st->print("\n\t");
 1749     st->print("ldr  rscratch1, [guard]\n\t");
 1750     st->print("dmb ishld\n\t");
 1751     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1752     st->print("cmp  rscratch1, rscratch2\n\t");
 1753     st->print("b.eq skip");
 1754     st->print("\n\t");
 1755     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1756     st->print("b skip\n\t");
 1757     st->print("guard: int\n\t");
 1758     st->print("\n\t");
 1759     st->print("skip:\n\t");
 1760   }
 1761 }
 1762 #endif
 1763 
 1764 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1765   Compile* C = ra_->C;
 1766   C2_MacroAssembler _masm(&cbuf);
 1767 
 1768   // n.b. frame size includes space for return pc and rfp
 1769   const int framesize = C->output()->frame_size_in_bytes();
 1770 
 1771   // insert a nop at the start of the prolog so we can patch in a
 1772   // branch if we need to invalidate the method later
 1773   __ nop();
 1774 
 1775   if (C->clinit_barrier_on_entry()) {
 1776     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1777 
 1778     Label L_skip_barrier;
 1779 
 1780     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1781     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1782     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1783     __ bind(L_skip_barrier);
 1784   }
 1785 
 1786   if (C->max_vector_size() > 0) {
 1787     __ reinitialize_ptrue();
 1788   }
 1789 
 1790   int bangsize = C->output()->bang_size_in_bytes();
 1791   if (C->output()->need_stack_bang(bangsize))
 1792     __ generate_stack_overflow_check(bangsize);
 1793 
 1794   __ build_frame(framesize);
 1795 
 1796   if (C->stub_function() == NULL) {
 1797     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1798     if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1799       // Dummy labels for just measuring the code size
 1800       Label dummy_slow_path;
 1801       Label dummy_continuation;
 1802       Label dummy_guard;
 1803       Label* slow_path = &dummy_slow_path;
 1804       Label* continuation = &dummy_continuation;
 1805       Label* guard = &dummy_guard;
 1806       if (!Compile::current()->output()->in_scratch_emit_size()) {
 1807         // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1808         C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1809         Compile::current()->output()->add_stub(stub);
 1810         slow_path = &stub->entry();
 1811         continuation = &stub->continuation();
 1812         guard = &stub->guard();
 1813       }
 1814       // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1815       bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
 1816     }
 1817   }
 1818 
 1819   if (VerifyStackAtCalls) {
 1820     Unimplemented();
 1821   }
 1822 
 1823   C->output()->set_frame_complete(cbuf.insts_size());
 1824 
 1825   if (C->has_mach_constant_base_node()) {
 1826     // NOTE: We set the table base offset here because users might be
 1827     // emitted before MachConstantBaseNode.
 1828     ConstantTable& constant_table = C->output()->constant_table();
 1829     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1830   }
 1831 }
 1832 
 1833 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1834 {
 1835   return MachNode::size(ra_); // too many variables; just compute it
 1836                               // the hard way
 1837 }
 1838 
 1839 int MachPrologNode::reloc() const
 1840 {
 1841   return 0;
 1842 }
 1843 
 1844 //=============================================================================
 1845 
 1846 #ifndef PRODUCT
 1847 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1848   Compile* C = ra_->C;
 1849   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1850 
 1851   st->print("# pop frame %d\n\t",framesize);
 1852 
 1853   if (framesize == 0) {
 1854     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1855   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1856     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1857     st->print("add  sp, sp, #%d\n\t", framesize);
 1858   } else {
 1859     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1860     st->print("add  sp, sp, rscratch1\n\t");
 1861     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1862   }
 1863   if (VM_Version::use_rop_protection()) {
 1864     st->print("autiaz\n\t");
 1865     st->print("ldr  zr, [lr]\n\t");
 1866   }
 1867 
 1868   if (do_polling() && C->is_method_compilation()) {
 1869     st->print("# test polling word\n\t");
 1870     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1871     st->print("cmp  sp, rscratch1\n\t");
 1872     st->print("bhi #slow_path");
 1873   }
 1874 }
 1875 #endif
 1876 
 1877 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1878   Compile* C = ra_->C;
 1879   C2_MacroAssembler _masm(&cbuf);
 1880   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1881 
 1882   __ remove_frame(framesize);
 1883 
 1884   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1885     __ reserved_stack_check();
 1886   }
 1887 
 1888   if (do_polling() && C->is_method_compilation()) {
 1889     Label dummy_label;
 1890     Label* code_stub = &dummy_label;
 1891     if (!C->output()->in_scratch_emit_size()) {
 1892       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1893       C->output()->add_stub(stub);
 1894       code_stub = &stub->entry();
 1895     }
 1896     __ relocate(relocInfo::poll_return_type);
 1897     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1898   }
 1899 }
 1900 
 1901 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1902   // Variable size. Determine dynamically.
 1903   return MachNode::size(ra_);
 1904 }
 1905 
 1906 int MachEpilogNode::reloc() const {
 1907   // Return number of relocatable values contained in this instruction.
 1908   return 1; // 1 for polling page.
 1909 }
 1910 
 1911 const Pipeline * MachEpilogNode::pipeline() const {
 1912   return MachNode::pipeline_class();
 1913 }
 1914 
 1915 //=============================================================================
 1916 
 1917 // Figure out which register class each belongs in: rc_int, rc_float or
 1918 // rc_stack.
 1919 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 1920 
 1921 static enum RC rc_class(OptoReg::Name reg) {
 1922 
 1923   if (reg == OptoReg::Bad) {
 1924     return rc_bad;
 1925   }
 1926 
 1927   // we have 32 int registers * 2 halves
 1928   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1929 
 1930   if (reg < slots_of_int_registers) {
 1931     return rc_int;
 1932   }
 1933 
 1934   // we have 32 float register * 8 halves
 1935   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1936   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1937     return rc_float;
 1938   }
 1939 
 1940   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1941   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1942     return rc_predicate;
 1943   }
 1944 
 1945   // Between predicate regs & stack is the flags.
 1946   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1947 
 1948   return rc_stack;
 1949 }
 1950 
 1951 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1952   Compile* C = ra_->C;
 1953 
 1954   // Get registers to move.
 1955   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1956   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1957   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1958   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1959 
 1960   enum RC src_hi_rc = rc_class(src_hi);
 1961   enum RC src_lo_rc = rc_class(src_lo);
 1962   enum RC dst_hi_rc = rc_class(dst_hi);
 1963   enum RC dst_lo_rc = rc_class(dst_lo);
 1964 
 1965   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1966 
 1967   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1968     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1969            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1970            "expected aligned-adjacent pairs");
 1971   }
 1972 
 1973   if (src_lo == dst_lo && src_hi == dst_hi) {
 1974     return 0;            // Self copy, no move.
 1975   }
 1976 
 1977   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1978               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1979   int src_offset = ra_->reg2offset(src_lo);
 1980   int dst_offset = ra_->reg2offset(dst_lo);
 1981 
 1982   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1983     uint ireg = ideal_reg();
 1984     if (ireg == Op_VecA && cbuf) {
 1985       C2_MacroAssembler _masm(cbuf);
 1986       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1987       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1988         // stack->stack
 1989         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 1990                                                 sve_vector_reg_size_in_bytes);
 1991       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1992         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 1993                             sve_vector_reg_size_in_bytes);
 1994       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 1995         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 1996                               sve_vector_reg_size_in_bytes);
 1997       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1998         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1999                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 2000                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 2001       } else {
 2002         ShouldNotReachHere();
 2003       }
 2004     } else if (cbuf) {
 2005       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2006       C2_MacroAssembler _masm(cbuf);
 2007       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2008       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2009         // stack->stack
 2010         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2011         if (ireg == Op_VecD) {
 2012           __ unspill(rscratch1, true, src_offset);
 2013           __ spill(rscratch1, true, dst_offset);
 2014         } else {
 2015           __ spill_copy128(src_offset, dst_offset);
 2016         }
 2017       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2018         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2019                ireg == Op_VecD ? __ T8B : __ T16B,
 2020                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2021       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2022         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2023                  ireg == Op_VecD ? __ D : __ Q,
 2024                  ra_->reg2offset(dst_lo));
 2025       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2026         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2027                    ireg == Op_VecD ? __ D : __ Q,
 2028                    ra_->reg2offset(src_lo));
 2029       } else {
 2030         ShouldNotReachHere();
 2031       }
 2032     }
 2033   } else if (cbuf) {
 2034     C2_MacroAssembler _masm(cbuf);
 2035     switch (src_lo_rc) {
 2036     case rc_int:
 2037       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2038         if (is64) {
 2039             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2040                    as_Register(Matcher::_regEncode[src_lo]));
 2041         } else {
 2042             C2_MacroAssembler _masm(cbuf);
 2043             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2044                     as_Register(Matcher::_regEncode[src_lo]));
 2045         }
 2046       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2047         if (is64) {
 2048             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2049                      as_Register(Matcher::_regEncode[src_lo]));
 2050         } else {
 2051             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2052                      as_Register(Matcher::_regEncode[src_lo]));
 2053         }
 2054       } else {                    // gpr --> stack spill
 2055         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2056         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2057       }
 2058       break;
 2059     case rc_float:
 2060       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2061         if (is64) {
 2062             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2063                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2064         } else {
 2065             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2066                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2067         }
 2068       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2069         if (is64) {
 2070             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2071                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2072         } else {
 2073             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2074                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2075         }
 2076       } else {                    // fpr --> stack spill
 2077         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2078         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2079                  is64 ? __ D : __ S, dst_offset);
 2080       }
 2081       break;
 2082     case rc_stack:
 2083       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2084         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2085       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2086         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2087                    is64 ? __ D : __ S, src_offset);
 2088       } else if (dst_lo_rc == rc_predicate) {
 2089         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2090                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2091       } else {                    // stack --> stack copy
 2092         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2093         if (ideal_reg() == Op_RegVectMask) {
 2094           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2095                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2096         } else {
 2097           __ unspill(rscratch1, is64, src_offset);
 2098           __ spill(rscratch1, is64, dst_offset);
 2099         }
 2100       }
 2101       break;
 2102     case rc_predicate:
 2103       if (dst_lo_rc == rc_predicate) {
 2104         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2105       } else if (dst_lo_rc == rc_stack) {
 2106         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2107                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2108       } else {
 2109         assert(false, "bad src and dst rc_class combination.");
 2110         ShouldNotReachHere();
 2111       }
 2112       break;
 2113     default:
 2114       assert(false, "bad rc_class for spill");
 2115       ShouldNotReachHere();
 2116     }
 2117   }
 2118 
 2119   if (st) {
 2120     st->print("spill ");
 2121     if (src_lo_rc == rc_stack) {
 2122       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2123     } else {
 2124       st->print("%s -> ", Matcher::regName[src_lo]);
 2125     }
 2126     if (dst_lo_rc == rc_stack) {
 2127       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2128     } else {
 2129       st->print("%s", Matcher::regName[dst_lo]);
 2130     }
 2131     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2132       int vsize = 0;
 2133       switch (ideal_reg()) {
 2134       case Op_VecD:
 2135         vsize = 64;
 2136         break;
 2137       case Op_VecX:
 2138         vsize = 128;
 2139         break;
 2140       case Op_VecA:
 2141         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2142         break;
 2143       default:
 2144         assert(false, "bad register type for spill");
 2145         ShouldNotReachHere();
 2146       }
 2147       st->print("\t# vector spill size = %d", vsize);
 2148     } else if (ideal_reg() == Op_RegVectMask) {
 2149       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2150       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2151       st->print("\t# predicate spill size = %d", vsize);
 2152     } else {
 2153       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2154     }
 2155   }
 2156 
 2157   return 0;
 2158 
 2159 }
 2160 
 2161 #ifndef PRODUCT
 2162 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2163   if (!ra_)
 2164     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2165   else
 2166     implementation(NULL, ra_, false, st);
 2167 }
 2168 #endif
 2169 
 2170 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2171   implementation(&cbuf, ra_, false, NULL);
 2172 }
 2173 
 2174 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2175   return MachNode::size(ra_);
 2176 }
 2177 
 2178 //=============================================================================
 2179 
 2180 #ifndef PRODUCT
 2181 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2182   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2183   int reg = ra_->get_reg_first(this);
 2184   st->print("add %s, rsp, #%d]\t# box lock",
 2185             Matcher::regName[reg], offset);
 2186 }
 2187 #endif
 2188 
 2189 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2190   C2_MacroAssembler _masm(&cbuf);
 2191 
 2192   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2193   int reg    = ra_->get_encode(this);
 2194 
 2195   // This add will handle any 24-bit signed offset. 24 bits allows an
 2196   // 8 megabyte stack frame.
 2197   __ add(as_Register(reg), sp, offset);
 2198 }
 2199 
 2200 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2201   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2202   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2203 
 2204   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2205     return NativeInstruction::instruction_size;
 2206   } else {
 2207     return 2 * NativeInstruction::instruction_size;
 2208   }
 2209 }
 2210 
 2211 //=============================================================================
 2212 
 2213 #ifndef PRODUCT
 2214 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2215 {
 2216   st->print_cr("# MachUEPNode");
 2217   if (UseCompressedClassPointers) {
 2218     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2219     if (CompressedKlassPointers::shift() != 0) {
 2220       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2221     }
 2222   } else {
 2223    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2224   }
 2225   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2226   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2227 }
 2228 #endif
 2229 
 2230 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2231 {
 2232   // This is the unverified entry point.
 2233   C2_MacroAssembler _masm(&cbuf);
 2234 
 2235   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2236   Label skip;
 2237   // TODO
 2238   // can we avoid this skip and still use a reloc?
 2239   __ br(Assembler::EQ, skip);
 2240   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2241   __ bind(skip);
 2242 }
 2243 
 2244 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2245 {
 2246   return MachNode::size(ra_);
 2247 }
 2248 
 2249 // REQUIRED EMIT CODE
 2250 
 2251 //=============================================================================
 2252 
 2253 // Emit exception handler code.
 2254 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2255 {
 2256   // mov rscratch1 #exception_blob_entry_point
 2257   // br rscratch1
 2258   // Note that the code buffer's insts_mark is always relative to insts.
 2259   // That's why we must use the macroassembler to generate a handler.
 2260   C2_MacroAssembler _masm(&cbuf);
 2261   address base = __ start_a_stub(size_exception_handler());
 2262   if (base == NULL) {
 2263     ciEnv::current()->record_failure("CodeCache is full");
 2264     return 0;  // CodeBuffer::expand failed
 2265   }
 2266   int offset = __ offset();
 2267   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2268   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2269   __ end_a_stub();
 2270   return offset;
 2271 }
 2272 
 2273 // Emit deopt handler code.
 2274 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 2275 {
 2276   // Note that the code buffer's insts_mark is always relative to insts.
 2277   // That's why we must use the macroassembler to generate a handler.
 2278   C2_MacroAssembler _masm(&cbuf);
 2279   address base = __ start_a_stub(size_deopt_handler());
 2280   if (base == NULL) {
 2281     ciEnv::current()->record_failure("CodeCache is full");
 2282     return 0;  // CodeBuffer::expand failed
 2283   }
 2284   int offset = __ offset();
 2285 
 2286   __ adr(lr, __ pc());
 2287   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2288 
 2289   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2290   __ end_a_stub();
 2291   return offset;
 2292 }
 2293 
 2294 // REQUIRED MATCHER CODE
 2295 
 2296 //=============================================================================
 2297 
 2298 const bool Matcher::match_rule_supported(int opcode) {
 2299   if (!has_match_rule(opcode))
 2300     return false;
 2301 
 2302   bool ret_value = true;
 2303   switch (opcode) {
 2304     case Op_OnSpinWait:
 2305       return VM_Version::supports_on_spin_wait();
 2306     case Op_CacheWB:
 2307     case Op_CacheWBPreSync:
 2308     case Op_CacheWBPostSync:
 2309       if (!VM_Version::supports_data_cache_line_flush()) {
 2310         ret_value = false;
 2311       }
 2312       break;
 2313     case Op_ExpandBits:
 2314     case Op_CompressBits:
 2315       if (!(UseSVE > 1 && VM_Version::supports_svebitperm())) {
 2316         ret_value = false;
 2317       }
 2318       break;
 2319   }
 2320 
 2321   return ret_value; // Per default match rules are supported.
 2322 }
 2323 
 2324 const RegMask* Matcher::predicate_reg_mask(void) {
 2325   return &_PR_REG_mask;
 2326 }
 2327 
 2328 const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) {
 2329   return new TypeVectMask(elemTy, length);
 2330 }
 2331 
 2332 // Vector calling convention not yet implemented.
 2333 const bool Matcher::supports_vector_calling_convention(void) {
 2334   return false;
 2335 }
 2336 
 2337 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2338   Unimplemented();
 2339   return OptoRegPair(0, 0);
 2340 }
 2341 
 2342 // Is this branch offset short enough that a short branch can be used?
 2343 //
 2344 // NOTE: If the platform does not provide any short branch variants, then
 2345 //       this method should return false for offset 0.
 2346 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2347   // The passed offset is relative to address of the branch.
 2348 
 2349   return (-32768 <= offset && offset < 32768);
 2350 }
 2351 
 2352 // Vector width in bytes.
 2353 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2354   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2355   int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize);
 2356   // Minimum 2 values in vector
 2357   if (size < 2*type2aelembytes(bt)) size = 0;
 2358   // But never < 4
 2359   if (size < 4) size = 0;
 2360   return size;
 2361 }
 2362 
 2363 // Limits on vector size (number of elements) loaded into vector.
 2364 const int Matcher::max_vector_size(const BasicType bt) {
 2365   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2366 }
 2367 
 2368 const int Matcher::min_vector_size(const BasicType bt) {
 2369   int max_size = max_vector_size(bt);
 2370   // Limit the min vector size to 8 bytes.
 2371   int size = 8 / type2aelembytes(bt);
 2372   if (bt == T_BYTE) {
 2373     // To support vector api shuffle/rearrange.
 2374     size = 4;
 2375   } else if (bt == T_BOOLEAN) {
 2376     // To support vector api load/store mask.
 2377     size = 2;
 2378   }
 2379   if (size < 2) size = 2;
 2380   return MIN2(size, max_size);
 2381 }
 2382 
 2383 const int Matcher::superword_max_vector_size(const BasicType bt) {
 2384   return Matcher::max_vector_size(bt);
 2385 }
 2386 
 2387 // Actual max scalable vector register length.
 2388 const int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2389   return Matcher::max_vector_size(bt);
 2390 }
 2391 
 2392 // Vector ideal reg.
 2393 const uint Matcher::vector_ideal_reg(int len) {
 2394   if (UseSVE > 0 && 16 < len && len <= 256) {
 2395     return Op_VecA;
 2396   }
 2397   switch(len) {
 2398     // For 16-bit/32-bit mask vector, reuse VecD.
 2399     case  2:
 2400     case  4:
 2401     case  8: return Op_VecD;
 2402     case 16: return Op_VecX;
 2403   }
 2404   ShouldNotReachHere();
 2405   return 0;
 2406 }
 2407 
 2408 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2409   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2410   switch (ideal_reg) {
 2411     case Op_VecA: return new vecAOper();
 2412     case Op_VecD: return new vecDOper();
 2413     case Op_VecX: return new vecXOper();
 2414   }
 2415   ShouldNotReachHere();
 2416   return NULL;
 2417 }
 2418 
 2419 bool Matcher::is_reg2reg_move(MachNode* m) {
 2420   return false;
 2421 }
 2422 
 2423 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2424   return opnd->opcode() == VREG;
 2425 }
 2426 
 2427 // Return whether or not this register is ever used as an argument.
 2428 // This function is used on startup to build the trampoline stubs in
 2429 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2430 // call in the trampoline, and arguments in those registers not be
 2431 // available to the callee.
 2432 bool Matcher::can_be_java_arg(int reg)
 2433 {
 2434   return
 2435     reg ==  R0_num || reg == R0_H_num ||
 2436     reg ==  R1_num || reg == R1_H_num ||
 2437     reg ==  R2_num || reg == R2_H_num ||
 2438     reg ==  R3_num || reg == R3_H_num ||
 2439     reg ==  R4_num || reg == R4_H_num ||
 2440     reg ==  R5_num || reg == R5_H_num ||
 2441     reg ==  R6_num || reg == R6_H_num ||
 2442     reg ==  R7_num || reg == R7_H_num ||
 2443     reg ==  V0_num || reg == V0_H_num ||
 2444     reg ==  V1_num || reg == V1_H_num ||
 2445     reg ==  V2_num || reg == V2_H_num ||
 2446     reg ==  V3_num || reg == V3_H_num ||
 2447     reg ==  V4_num || reg == V4_H_num ||
 2448     reg ==  V5_num || reg == V5_H_num ||
 2449     reg ==  V6_num || reg == V6_H_num ||
 2450     reg ==  V7_num || reg == V7_H_num;
 2451 }
 2452 
 2453 bool Matcher::is_spillable_arg(int reg)
 2454 {
 2455   return can_be_java_arg(reg);
 2456 }
 2457 
 2458 uint Matcher::int_pressure_limit()
 2459 {
 2460   // JDK-8183543: When taking the number of available registers as int
 2461   // register pressure threshold, the jtreg test:
 2462   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2463   // failed due to C2 compilation failure with
 2464   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2465   //
 2466   // A derived pointer is live at CallNode and then is flagged by RA
 2467   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2468   // derived pointers and lastly fail to spill after reaching maximum
 2469   // number of iterations. Lowering the default pressure threshold to
 2470   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2471   // a high register pressure area of the code so that split_DEF can
 2472   // generate DefinitionSpillCopy for the derived pointer.
 2473   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2474   if (!PreserveFramePointer) {
 2475     // When PreserveFramePointer is off, frame pointer is allocatable,
 2476     // but different from other SOC registers, it is excluded from
 2477     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2478     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2479     // See check_pressure_at_fatproj().
 2480     default_int_pressure_threshold--;
 2481   }
 2482   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2483 }
 2484 
 2485 uint Matcher::float_pressure_limit()
 2486 {
 2487   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2488   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2489 }
 2490 
 2491 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2492   return false;
 2493 }
 2494 
 2495 RegMask Matcher::divI_proj_mask() {
 2496   ShouldNotReachHere();
 2497   return RegMask();
 2498 }
 2499 
 2500 // Register for MODI projection of divmodI.
 2501 RegMask Matcher::modI_proj_mask() {
 2502   ShouldNotReachHere();
 2503   return RegMask();
 2504 }
 2505 
 2506 // Register for DIVL projection of divmodL.
 2507 RegMask Matcher::divL_proj_mask() {
 2508   ShouldNotReachHere();
 2509   return RegMask();
 2510 }
 2511 
 2512 // Register for MODL projection of divmodL.
 2513 RegMask Matcher::modL_proj_mask() {
 2514   ShouldNotReachHere();
 2515   return RegMask();
 2516 }
 2517 
 2518 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2519   return FP_REG_mask();
 2520 }
 2521 
 2522 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2523   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2524     Node* u = addp->fast_out(i);
 2525     if (u->is_LoadStore()) {
 2526       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2527       // instructions) only take register indirect as an operand, so
 2528       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2529       // must fail.
 2530       return false;
 2531     }
 2532     if (u->is_Mem()) {
 2533       int opsize = u->as_Mem()->memory_size();
 2534       assert(opsize > 0, "unexpected memory operand size");
 2535       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2536         return false;
 2537       }
 2538     }
 2539   }
 2540   return true;
 2541 }
 2542 
 2543 // Convert BootTest condition to Assembler condition.
 2544 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2545 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2546   Assembler::Condition result;
 2547   switch(cond) {
 2548     case BoolTest::eq:
 2549       result = Assembler::EQ; break;
 2550     case BoolTest::ne:
 2551       result = Assembler::NE; break;
 2552     case BoolTest::le:
 2553       result = Assembler::LE; break;
 2554     case BoolTest::ge:
 2555       result = Assembler::GE; break;
 2556     case BoolTest::lt:
 2557       result = Assembler::LT; break;
 2558     case BoolTest::gt:
 2559       result = Assembler::GT; break;
 2560     case BoolTest::ule:
 2561       result = Assembler::LS; break;
 2562     case BoolTest::uge:
 2563       result = Assembler::HS; break;
 2564     case BoolTest::ult:
 2565       result = Assembler::LO; break;
 2566     case BoolTest::ugt:
 2567       result = Assembler::HI; break;
 2568     case BoolTest::overflow:
 2569       result = Assembler::VS; break;
 2570     case BoolTest::no_overflow:
 2571       result = Assembler::VC; break;
 2572     default:
 2573       ShouldNotReachHere();
 2574       return Assembler::Condition(-1);
 2575   }
 2576 
 2577   // Check conversion
 2578   if (cond & BoolTest::unsigned_compare) {
 2579     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2580   } else {
 2581     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2582   }
 2583 
 2584   return result;
 2585 }
 2586 
 2587 // Binary src (Replicate con)
 2588 bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2589   if (n == NULL || m == NULL) {
 2590     return false;
 2591   }
 2592 
 2593   if (UseSVE == 0 || !VectorNode::is_invariant_vector(m)) {
 2594     return false;
 2595   }
 2596 
 2597   Node* imm_node = m->in(1);
 2598   if (!imm_node->is_Con()) {
 2599     return false;
 2600   }
 2601 
 2602   const Type* t = imm_node->bottom_type();
 2603   if (!(t->isa_int() || t->isa_long())) {
 2604     return false;
 2605   }
 2606 
 2607   switch (n->Opcode()) {
 2608   case Op_AndV:
 2609   case Op_OrV:
 2610   case Op_XorV: {
 2611     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2612     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2613     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2614   }
 2615   case Op_AddVB:
 2616     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2617   case Op_AddVS:
 2618   case Op_AddVI:
 2619     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2620   case Op_AddVL:
 2621     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2622   default:
 2623     return false;
 2624   }
 2625 }
 2626 
 2627 // (XorV src (Replicate m1))
 2628 // (XorVMask src (MaskAll m1))
 2629 bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2630   if (n != NULL && m != NULL) {
 2631     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2632            VectorNode::is_all_ones_vector(m);
 2633   }
 2634   return false;
 2635 }
 2636 
 2637 // Should the matcher clone input 'm' of node 'n'?
 2638 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2639   if (is_vshift_con_pattern(n, m) ||
 2640       is_vector_bitwise_not_pattern(n, m) ||
 2641       is_valid_sve_arith_imm_pattern(n, m)) {
 2642     mstack.push(m, Visit);
 2643     return true;
 2644   }
 2645   return false;
 2646 }
 2647 
 2648 // Should the Matcher clone shifts on addressing modes, expecting them
 2649 // to be subsumed into complex addressing expressions or compute them
 2650 // into registers?
 2651 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2652   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2653     return true;
 2654   }
 2655 
 2656   Node *off = m->in(AddPNode::Offset);
 2657   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2658       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2659       // Are there other uses besides address expressions?
 2660       !is_visited(off)) {
 2661     address_visited.set(off->_idx); // Flag as address_visited
 2662     mstack.push(off->in(2), Visit);
 2663     Node *conv = off->in(1);
 2664     if (conv->Opcode() == Op_ConvI2L &&
 2665         // Are there other uses besides address expressions?
 2666         !is_visited(conv)) {
 2667       address_visited.set(conv->_idx); // Flag as address_visited
 2668       mstack.push(conv->in(1), Pre_Visit);
 2669     } else {
 2670       mstack.push(conv, Pre_Visit);
 2671     }
 2672     address_visited.test_set(m->_idx); // Flag as address_visited
 2673     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2674     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2675     return true;
 2676   } else if (off->Opcode() == Op_ConvI2L &&
 2677              // Are there other uses besides address expressions?
 2678              !is_visited(off)) {
 2679     address_visited.test_set(m->_idx); // Flag as address_visited
 2680     address_visited.set(off->_idx); // Flag as address_visited
 2681     mstack.push(off->in(1), Pre_Visit);
 2682     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2683     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2684     return true;
 2685   }
 2686   return false;
 2687 }
 2688 
 2689 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2690   C2_MacroAssembler _masm(&cbuf);                                       \
 2691   {                                                                     \
 2692     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2693     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2694     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2695     __ INSN(REG, as_Register(BASE));                                    \
 2696   }
 2697 
 2698 
 2699 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2700   {
 2701     Address::extend scale;
 2702 
 2703     // Hooboy, this is fugly.  We need a way to communicate to the
 2704     // encoder that the index needs to be sign extended, so we have to
 2705     // enumerate all the cases.
 2706     switch (opcode) {
 2707     case INDINDEXSCALEDI2L:
 2708     case INDINDEXSCALEDI2LN:
 2709     case INDINDEXI2L:
 2710     case INDINDEXI2LN:
 2711       scale = Address::sxtw(size);
 2712       break;
 2713     default:
 2714       scale = Address::lsl(size);
 2715     }
 2716 
 2717     if (index == -1) {
 2718       return Address(base, disp);
 2719     } else {
 2720       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2721       return Address(base, as_Register(index), scale);
 2722     }
 2723   }
 2724 
 2725 
 2726 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2727 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2728 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2729 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2730                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2731 
 2732   // Used for all non-volatile memory accesses.  The use of
 2733   // $mem->opcode() to discover whether this pattern uses sign-extended
 2734   // offsets is something of a kludge.
 2735   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
 2736                         Register reg, int opcode,
 2737                         Register base, int index, int scale, int disp,
 2738                         int size_in_memory)
 2739   {
 2740     Address addr = mem2address(opcode, base, index, scale, disp);
 2741     if (addr.getMode() == Address::base_plus_offset) {
 2742       /* Fix up any out-of-range offsets. */
 2743       assert_different_registers(rscratch1, base);
 2744       assert_different_registers(rscratch1, reg);
 2745       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2746     }
 2747     (masm.*insn)(reg, addr);
 2748   }
 2749 
 2750   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
 2751                         FloatRegister reg, int opcode,
 2752                         Register base, int index, int size, int disp,
 2753                         int size_in_memory)
 2754   {
 2755     Address::extend scale;
 2756 
 2757     switch (opcode) {
 2758     case INDINDEXSCALEDI2L:
 2759     case INDINDEXSCALEDI2LN:
 2760       scale = Address::sxtw(size);
 2761       break;
 2762     default:
 2763       scale = Address::lsl(size);
 2764     }
 2765 
 2766     if (index == -1) {
 2767       /* If we get an out-of-range offset it is a bug in the compiler,
 2768          so we assert here. */
 2769       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
 2770       /* Fix up any out-of-range offsets. */
 2771       assert_different_registers(rscratch1, base);
 2772       Address addr = Address(base, disp);
 2773       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2774       (masm.*insn)(reg, addr);
 2775     } else {
 2776       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2777       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 2778     }
 2779   }
 2780 
 2781   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
 2782                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2783                         int opcode, Register base, int index, int size, int disp)
 2784   {
 2785     if (index == -1) {
 2786       (masm.*insn)(reg, T, Address(base, disp));
 2787     } else {
 2788       assert(disp == 0, "unsupported address mode");
 2789       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2790     }
 2791   }
 2792 
 2793 %}
 2794 
 2795 
 2796 
 2797 //----------ENCODING BLOCK-----------------------------------------------------
 2798 // This block specifies the encoding classes used by the compiler to
 2799 // output byte streams.  Encoding classes are parameterized macros
 2800 // used by Machine Instruction Nodes in order to generate the bit
 2801 // encoding of the instruction.  Operands specify their base encoding
 2802 // interface with the interface keyword.  There are currently
 2803 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2804 // COND_INTER.  REG_INTER causes an operand to generate a function
 2805 // which returns its register number when queried.  CONST_INTER causes
 2806 // an operand to generate a function which returns the value of the
 2807 // constant when queried.  MEMORY_INTER causes an operand to generate
 2808 // four functions which return the Base Register, the Index Register,
 2809 // the Scale Value, and the Offset Value of the operand when queried.
 2810 // COND_INTER causes an operand to generate six functions which return
 2811 // the encoding code (ie - encoding bits for the instruction)
 2812 // associated with each basic boolean condition for a conditional
 2813 // instruction.
 2814 //
 2815 // Instructions specify two basic values for encoding.  Again, a
 2816 // function is available to check if the constant displacement is an
 2817 // oop. They use the ins_encode keyword to specify their encoding
 2818 // classes (which must be a sequence of enc_class names, and their
 2819 // parameters, specified in the encoding block), and they use the
 2820 // opcode keyword to specify, in order, their primary, secondary, and
 2821 // tertiary opcode.  Only the opcode sections which a particular
 2822 // instruction needs for encoding need to be specified.
 2823 encode %{
 2824   // Build emit functions for each basic byte or larger field in the
 2825   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2826   // from C++ code in the enc_class source block.  Emit functions will
 2827   // live in the main source block for now.  In future, we can
 2828   // generalize this by adding a syntax that specifies the sizes of
 2829   // fields in an order, so that the adlc can build the emit functions
 2830   // automagically
 2831 
 2832   // catch all for unimplemented encodings
 2833   enc_class enc_unimplemented %{
 2834     C2_MacroAssembler _masm(&cbuf);
 2835     __ unimplemented("C2 catch all");
 2836   %}
 2837 
 2838   // BEGIN Non-volatile memory access
 2839 
 2840   // This encoding class is generated automatically from ad_encode.m4.
 2841   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2842   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2843     Register dst_reg = as_Register($dst$$reg);
 2844     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2845                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2846   %}
 2847 
 2848   // This encoding class is generated automatically from ad_encode.m4.
 2849   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2850   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2851     Register dst_reg = as_Register($dst$$reg);
 2852     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2853                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2854   %}
 2855 
 2856   // This encoding class is generated automatically from ad_encode.m4.
 2857   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2858   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2859     Register dst_reg = as_Register($dst$$reg);
 2860     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2861                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2862   %}
 2863 
 2864   // This encoding class is generated automatically from ad_encode.m4.
 2865   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2866   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2867     Register dst_reg = as_Register($dst$$reg);
 2868     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2869                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2870   %}
 2871 
 2872   // This encoding class is generated automatically from ad_encode.m4.
 2873   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2874   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2875     Register dst_reg = as_Register($dst$$reg);
 2876     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2877                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2878   %}
 2879 
 2880   // This encoding class is generated automatically from ad_encode.m4.
 2881   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2882   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2883     Register dst_reg = as_Register($dst$$reg);
 2884     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2885                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2886   %}
 2887 
 2888   // This encoding class is generated automatically from ad_encode.m4.
 2889   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2890   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2891     Register dst_reg = as_Register($dst$$reg);
 2892     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2893                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2894   %}
 2895 
 2896   // This encoding class is generated automatically from ad_encode.m4.
 2897   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2898   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2899     Register dst_reg = as_Register($dst$$reg);
 2900     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2901                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2902   %}
 2903 
 2904   // This encoding class is generated automatically from ad_encode.m4.
 2905   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2906   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2907     Register dst_reg = as_Register($dst$$reg);
 2908     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2909                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2910   %}
 2911 
 2912   // This encoding class is generated automatically from ad_encode.m4.
 2913   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2914   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2915     Register dst_reg = as_Register($dst$$reg);
 2916     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2917                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2918   %}
 2919 
 2920   // This encoding class is generated automatically from ad_encode.m4.
 2921   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2922   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2923     Register dst_reg = as_Register($dst$$reg);
 2924     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2925                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2926   %}
 2927 
 2928   // This encoding class is generated automatically from ad_encode.m4.
 2929   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2930   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2931     Register dst_reg = as_Register($dst$$reg);
 2932     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2933                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2934   %}
 2935 
 2936   // This encoding class is generated automatically from ad_encode.m4.
 2937   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2938   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2939     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2940     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2941                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2942   %}
 2943 
 2944   // This encoding class is generated automatically from ad_encode.m4.
 2945   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2946   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2947     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2948     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2949                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2950   %}
 2951 
 2952   // This encoding class is generated automatically from ad_encode.m4.
 2953   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2954   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2955     Register src_reg = as_Register($src$$reg);
 2956     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
 2957                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2958   %}
 2959 
 2960   // This encoding class is generated automatically from ad_encode.m4.
 2961   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2962   enc_class aarch64_enc_strb0(memory1 mem) %{
 2963     C2_MacroAssembler _masm(&cbuf);
 2964     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2965                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2966   %}
 2967 
 2968   // This encoding class is generated automatically from ad_encode.m4.
 2969   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2970   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2971     Register src_reg = as_Register($src$$reg);
 2972     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
 2973                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2974   %}
 2975 
 2976   // This encoding class is generated automatically from ad_encode.m4.
 2977   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2978   enc_class aarch64_enc_strh0(memory2 mem) %{
 2979     C2_MacroAssembler _masm(&cbuf);
 2980     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2981                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2982   %}
 2983 
 2984   // This encoding class is generated automatically from ad_encode.m4.
 2985   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2986   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 2987     Register src_reg = as_Register($src$$reg);
 2988     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
 2989                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2990   %}
 2991 
 2992   // This encoding class is generated automatically from ad_encode.m4.
 2993   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2994   enc_class aarch64_enc_strw0(memory4 mem) %{
 2995     C2_MacroAssembler _masm(&cbuf);
 2996     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
 2997                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2998   %}
 2999 
 3000   // This encoding class is generated automatically from ad_encode.m4.
 3001   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3002   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3003     Register src_reg = as_Register($src$$reg);
 3004     // we sometimes get asked to store the stack pointer into the
 3005     // current thread -- we cannot do that directly on AArch64
 3006     if (src_reg == r31_sp) {
 3007       C2_MacroAssembler _masm(&cbuf);
 3008       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3009       __ mov(rscratch2, sp);
 3010       src_reg = rscratch2;
 3011     }
 3012     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
 3013                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3014   %}
 3015 
 3016   // This encoding class is generated automatically from ad_encode.m4.
 3017   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3018   enc_class aarch64_enc_str0(memory8 mem) %{
 3019     C2_MacroAssembler _masm(&cbuf);
 3020     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
 3021                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3022   %}
 3023 
 3024   // This encoding class is generated automatically from ad_encode.m4.
 3025   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3026   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3027     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3028     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
 3029                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3030   %}
 3031 
 3032   // This encoding class is generated automatically from ad_encode.m4.
 3033   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3034   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3035     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3036     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
 3037                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3038   %}
 3039 
 3040   // This encoding class is generated automatically from ad_encode.m4.
 3041   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3042   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3043       C2_MacroAssembler _masm(&cbuf);
 3044       __ membar(Assembler::StoreStore);
 3045       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3046                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3047   %}
 3048 
 3049   // END Non-volatile memory access
 3050 
 3051   // Vector loads and stores
 3052   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3053     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3054     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3055        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3056   %}
 3057 
 3058   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3059     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3060     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3061        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3062   %}
 3063 
 3064   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3065     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3066     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3067        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3068   %}
 3069 
 3070   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3071     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3072     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3073        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3074   %}
 3075 
 3076   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3077     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3078     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
 3079        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3080   %}
 3081 
 3082   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3083     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3084     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
 3085        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3086   %}
 3087 
 3088   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3089     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3090     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
 3091        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3092   %}
 3093 
 3094   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3095     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3096     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3097        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3098   %}
 3099 
 3100   // volatile loads and stores
 3101 
 3102   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3103     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3104                  rscratch1, stlrb);
 3105   %}
 3106 
 3107   enc_class aarch64_enc_stlrb0(memory mem) %{
 3108     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3109                  rscratch1, stlrb);
 3110   %}
 3111 
 3112   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3113     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3114                  rscratch1, stlrh);
 3115   %}
 3116 
 3117   enc_class aarch64_enc_stlrh0(memory mem) %{
 3118     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3119                  rscratch1, stlrh);
 3120   %}
 3121 
 3122   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3123     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3124                  rscratch1, stlrw);
 3125   %}
 3126 
 3127   enc_class aarch64_enc_stlrw0(memory mem) %{
 3128     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3129                  rscratch1, stlrw);
 3130   %}
 3131 
 3132   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3133     Register dst_reg = as_Register($dst$$reg);
 3134     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3135              rscratch1, ldarb);
 3136     __ sxtbw(dst_reg, dst_reg);
 3137   %}
 3138 
 3139   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3140     Register dst_reg = as_Register($dst$$reg);
 3141     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3142              rscratch1, ldarb);
 3143     __ sxtb(dst_reg, dst_reg);
 3144   %}
 3145 
 3146   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3147     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3148              rscratch1, ldarb);
 3149   %}
 3150 
 3151   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3152     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3153              rscratch1, ldarb);
 3154   %}
 3155 
 3156   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3157     Register dst_reg = as_Register($dst$$reg);
 3158     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3159              rscratch1, ldarh);
 3160     __ sxthw(dst_reg, dst_reg);
 3161   %}
 3162 
 3163   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3164     Register dst_reg = as_Register($dst$$reg);
 3165     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3166              rscratch1, ldarh);
 3167     __ sxth(dst_reg, dst_reg);
 3168   %}
 3169 
 3170   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3171     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3172              rscratch1, ldarh);
 3173   %}
 3174 
 3175   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3176     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3177              rscratch1, ldarh);
 3178   %}
 3179 
 3180   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3181     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3182              rscratch1, ldarw);
 3183   %}
 3184 
 3185   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3186     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3187              rscratch1, ldarw);
 3188   %}
 3189 
 3190   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3191     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3192              rscratch1, ldar);
 3193   %}
 3194 
 3195   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3196     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3197              rscratch1, ldarw);
 3198     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3199   %}
 3200 
 3201   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3202     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3203              rscratch1, ldar);
 3204     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3205   %}
 3206 
 3207   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3208     Register src_reg = as_Register($src$$reg);
 3209     // we sometimes get asked to store the stack pointer into the
 3210     // current thread -- we cannot do that directly on AArch64
 3211     if (src_reg == r31_sp) {
 3212       C2_MacroAssembler _masm(&cbuf);
 3213       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3214       __ mov(rscratch2, sp);
 3215       src_reg = rscratch2;
 3216     }
 3217     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3218                  rscratch1, stlr);
 3219   %}
 3220 
 3221   enc_class aarch64_enc_stlr0(memory mem) %{
 3222     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3223                  rscratch1, stlr);
 3224   %}
 3225 
 3226   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3227     {
 3228       C2_MacroAssembler _masm(&cbuf);
 3229       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3230       __ fmovs(rscratch2, src_reg);
 3231     }
 3232     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3233                  rscratch1, stlrw);
 3234   %}
 3235 
 3236   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3237     {
 3238       C2_MacroAssembler _masm(&cbuf);
 3239       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3240       __ fmovd(rscratch2, src_reg);
 3241     }
 3242     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3243                  rscratch1, stlr);
 3244   %}
 3245 
 3246   // synchronized read/update encodings
 3247 
 3248   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3249     C2_MacroAssembler _masm(&cbuf);
 3250     Register dst_reg = as_Register($dst$$reg);
 3251     Register base = as_Register($mem$$base);
 3252     int index = $mem$$index;
 3253     int scale = $mem$$scale;
 3254     int disp = $mem$$disp;
 3255     if (index == -1) {
 3256        if (disp != 0) {
 3257         __ lea(rscratch1, Address(base, disp));
 3258         __ ldaxr(dst_reg, rscratch1);
 3259       } else {
 3260         // TODO
 3261         // should we ever get anything other than this case?
 3262         __ ldaxr(dst_reg, base);
 3263       }
 3264     } else {
 3265       Register index_reg = as_Register(index);
 3266       if (disp == 0) {
 3267         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3268         __ ldaxr(dst_reg, rscratch1);
 3269       } else {
 3270         __ lea(rscratch1, Address(base, disp));
 3271         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3272         __ ldaxr(dst_reg, rscratch1);
 3273       }
 3274     }
 3275   %}
 3276 
 3277   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3278     C2_MacroAssembler _masm(&cbuf);
 3279     Register src_reg = as_Register($src$$reg);
 3280     Register base = as_Register($mem$$base);
 3281     int index = $mem$$index;
 3282     int scale = $mem$$scale;
 3283     int disp = $mem$$disp;
 3284     if (index == -1) {
 3285        if (disp != 0) {
 3286         __ lea(rscratch2, Address(base, disp));
 3287         __ stlxr(rscratch1, src_reg, rscratch2);
 3288       } else {
 3289         // TODO
 3290         // should we ever get anything other than this case?
 3291         __ stlxr(rscratch1, src_reg, base);
 3292       }
 3293     } else {
 3294       Register index_reg = as_Register(index);
 3295       if (disp == 0) {
 3296         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3297         __ stlxr(rscratch1, src_reg, rscratch2);
 3298       } else {
 3299         __ lea(rscratch2, Address(base, disp));
 3300         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3301         __ stlxr(rscratch1, src_reg, rscratch2);
 3302       }
 3303     }
 3304     __ cmpw(rscratch1, zr);
 3305   %}
 3306 
 3307   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3308     C2_MacroAssembler _masm(&cbuf);
 3309     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3310     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3311                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3312                /*weak*/ false, noreg);
 3313   %}
 3314 
 3315   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3316     C2_MacroAssembler _masm(&cbuf);
 3317     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3318     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3319                Assembler::word, /*acquire*/ false, /*release*/ true,
 3320                /*weak*/ false, noreg);
 3321   %}
 3322 
 3323   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3324     C2_MacroAssembler _masm(&cbuf);
 3325     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3326     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3327                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3328                /*weak*/ false, noreg);
 3329   %}
 3330 
 3331   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3332     C2_MacroAssembler _masm(&cbuf);
 3333     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3334     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3335                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3336                /*weak*/ false, noreg);
 3337   %}
 3338 
 3339 
 3340   // The only difference between aarch64_enc_cmpxchg and
 3341   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3342   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3343   // lock.
 3344   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3345     C2_MacroAssembler _masm(&cbuf);
 3346     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3347     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3348                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3349                /*weak*/ false, noreg);
 3350   %}
 3351 
 3352   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3353     C2_MacroAssembler _masm(&cbuf);
 3354     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3355     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3356                Assembler::word, /*acquire*/ true, /*release*/ true,
 3357                /*weak*/ false, noreg);
 3358   %}
 3359 
 3360   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3361     C2_MacroAssembler _masm(&cbuf);
 3362     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3363     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3364                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3365                /*weak*/ false, noreg);
 3366   %}
 3367 
 3368   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3369     C2_MacroAssembler _masm(&cbuf);
 3370     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3371     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3372                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3373                /*weak*/ false, noreg);
 3374   %}
 3375 
 3376   // auxiliary used for CompareAndSwapX to set result register
 3377   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3378     C2_MacroAssembler _masm(&cbuf);
 3379     Register res_reg = as_Register($res$$reg);
 3380     __ cset(res_reg, Assembler::EQ);
 3381   %}
 3382 
 3383   // prefetch encodings
 3384 
 3385   enc_class aarch64_enc_prefetchw(memory mem) %{
 3386     C2_MacroAssembler _masm(&cbuf);
 3387     Register base = as_Register($mem$$base);
 3388     int index = $mem$$index;
 3389     int scale = $mem$$scale;
 3390     int disp = $mem$$disp;
 3391     if (index == -1) {
 3392       __ prfm(Address(base, disp), PSTL1KEEP);
 3393     } else {
 3394       Register index_reg = as_Register(index);
 3395       if (disp == 0) {
 3396         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3397       } else {
 3398         __ lea(rscratch1, Address(base, disp));
 3399 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3400       }
 3401     }
 3402   %}
 3403 
 3404   /// mov envcodings
 3405 
 3406   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3407     C2_MacroAssembler _masm(&cbuf);
 3408     uint32_t con = (uint32_t)$src$$constant;
 3409     Register dst_reg = as_Register($dst$$reg);
 3410     if (con == 0) {
 3411       __ movw(dst_reg, zr);
 3412     } else {
 3413       __ movw(dst_reg, con);
 3414     }
 3415   %}
 3416 
 3417   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3418     C2_MacroAssembler _masm(&cbuf);
 3419     Register dst_reg = as_Register($dst$$reg);
 3420     uint64_t con = (uint64_t)$src$$constant;
 3421     if (con == 0) {
 3422       __ mov(dst_reg, zr);
 3423     } else {
 3424       __ mov(dst_reg, con);
 3425     }
 3426   %}
 3427 
 3428   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3429     C2_MacroAssembler _masm(&cbuf);
 3430     Register dst_reg = as_Register($dst$$reg);
 3431     address con = (address)$src$$constant;
 3432     if (con == NULL || con == (address)1) {
 3433       ShouldNotReachHere();
 3434     } else {
 3435       relocInfo::relocType rtype = $src->constant_reloc();
 3436       if (rtype == relocInfo::oop_type) {
 3437         __ movoop(dst_reg, (jobject)con);
 3438       } else if (rtype == relocInfo::metadata_type) {
 3439         __ mov_metadata(dst_reg, (Metadata*)con);
 3440       } else {
 3441         assert(rtype == relocInfo::none, "unexpected reloc type");
 3442         if (! __ is_valid_AArch64_address(con) ||
 3443             con < (address)(uintptr_t)os::vm_page_size()) {
 3444           __ mov(dst_reg, con);
 3445         } else {
 3446           uint64_t offset;
 3447           __ adrp(dst_reg, con, offset);
 3448           __ add(dst_reg, dst_reg, offset);
 3449         }
 3450       }
 3451     }
 3452   %}
 3453 
 3454   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3455     C2_MacroAssembler _masm(&cbuf);
 3456     Register dst_reg = as_Register($dst$$reg);
 3457     __ mov(dst_reg, zr);
 3458   %}
 3459 
 3460   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3461     C2_MacroAssembler _masm(&cbuf);
 3462     Register dst_reg = as_Register($dst$$reg);
 3463     __ mov(dst_reg, (uint64_t)1);
 3464   %}
 3465 
 3466   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3467     C2_MacroAssembler _masm(&cbuf);
 3468     __ load_byte_map_base($dst$$Register);
 3469   %}
 3470 
 3471   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3472     C2_MacroAssembler _masm(&cbuf);
 3473     Register dst_reg = as_Register($dst$$reg);
 3474     address con = (address)$src$$constant;
 3475     if (con == NULL) {
 3476       ShouldNotReachHere();
 3477     } else {
 3478       relocInfo::relocType rtype = $src->constant_reloc();
 3479       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3480       __ set_narrow_oop(dst_reg, (jobject)con);
 3481     }
 3482   %}
 3483 
 3484   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3485     C2_MacroAssembler _masm(&cbuf);
 3486     Register dst_reg = as_Register($dst$$reg);
 3487     __ mov(dst_reg, zr);
 3488   %}
 3489 
 3490   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3491     C2_MacroAssembler _masm(&cbuf);
 3492     Register dst_reg = as_Register($dst$$reg);
 3493     address con = (address)$src$$constant;
 3494     if (con == NULL) {
 3495       ShouldNotReachHere();
 3496     } else {
 3497       relocInfo::relocType rtype = $src->constant_reloc();
 3498       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3499       __ set_narrow_klass(dst_reg, (Klass *)con);
 3500     }
 3501   %}
 3502 
 3503   // arithmetic encodings
 3504 
 3505   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3506     C2_MacroAssembler _masm(&cbuf);
 3507     Register dst_reg = as_Register($dst$$reg);
 3508     Register src_reg = as_Register($src1$$reg);
 3509     int32_t con = (int32_t)$src2$$constant;
 3510     // add has primary == 0, subtract has primary == 1
 3511     if ($primary) { con = -con; }
 3512     if (con < 0) {
 3513       __ subw(dst_reg, src_reg, -con);
 3514     } else {
 3515       __ addw(dst_reg, src_reg, con);
 3516     }
 3517   %}
 3518 
 3519   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3520     C2_MacroAssembler _masm(&cbuf);
 3521     Register dst_reg = as_Register($dst$$reg);
 3522     Register src_reg = as_Register($src1$$reg);
 3523     int32_t con = (int32_t)$src2$$constant;
 3524     // add has primary == 0, subtract has primary == 1
 3525     if ($primary) { con = -con; }
 3526     if (con < 0) {
 3527       __ sub(dst_reg, src_reg, -con);
 3528     } else {
 3529       __ add(dst_reg, src_reg, con);
 3530     }
 3531   %}
 3532 
 3533   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3534     C2_MacroAssembler _masm(&cbuf);
 3535    Register dst_reg = as_Register($dst$$reg);
 3536    Register src1_reg = as_Register($src1$$reg);
 3537    Register src2_reg = as_Register($src2$$reg);
 3538     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3539   %}
 3540 
 3541   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3542     C2_MacroAssembler _masm(&cbuf);
 3543    Register dst_reg = as_Register($dst$$reg);
 3544    Register src1_reg = as_Register($src1$$reg);
 3545    Register src2_reg = as_Register($src2$$reg);
 3546     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3547   %}
 3548 
 3549   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3550     C2_MacroAssembler _masm(&cbuf);
 3551    Register dst_reg = as_Register($dst$$reg);
 3552    Register src1_reg = as_Register($src1$$reg);
 3553    Register src2_reg = as_Register($src2$$reg);
 3554     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3555   %}
 3556 
 3557   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3558     C2_MacroAssembler _masm(&cbuf);
 3559    Register dst_reg = as_Register($dst$$reg);
 3560    Register src1_reg = as_Register($src1$$reg);
 3561    Register src2_reg = as_Register($src2$$reg);
 3562     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3563   %}
 3564 
 3565   // compare instruction encodings
 3566 
 3567   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3568     C2_MacroAssembler _masm(&cbuf);
 3569     Register reg1 = as_Register($src1$$reg);
 3570     Register reg2 = as_Register($src2$$reg);
 3571     __ cmpw(reg1, reg2);
 3572   %}
 3573 
 3574   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3575     C2_MacroAssembler _masm(&cbuf);
 3576     Register reg = as_Register($src1$$reg);
 3577     int32_t val = $src2$$constant;
 3578     if (val >= 0) {
 3579       __ subsw(zr, reg, val);
 3580     } else {
 3581       __ addsw(zr, reg, -val);
 3582     }
 3583   %}
 3584 
 3585   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3586     C2_MacroAssembler _masm(&cbuf);
 3587     Register reg1 = as_Register($src1$$reg);
 3588     uint32_t val = (uint32_t)$src2$$constant;
 3589     __ movw(rscratch1, val);
 3590     __ cmpw(reg1, rscratch1);
 3591   %}
 3592 
 3593   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3594     C2_MacroAssembler _masm(&cbuf);
 3595     Register reg1 = as_Register($src1$$reg);
 3596     Register reg2 = as_Register($src2$$reg);
 3597     __ cmp(reg1, reg2);
 3598   %}
 3599 
 3600   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3601     C2_MacroAssembler _masm(&cbuf);
 3602     Register reg = as_Register($src1$$reg);
 3603     int64_t val = $src2$$constant;
 3604     if (val >= 0) {
 3605       __ subs(zr, reg, val);
 3606     } else if (val != -val) {
 3607       __ adds(zr, reg, -val);
 3608     } else {
 3609     // aargh, Long.MIN_VALUE is a special case
 3610       __ orr(rscratch1, zr, (uint64_t)val);
 3611       __ subs(zr, reg, rscratch1);
 3612     }
 3613   %}
 3614 
 3615   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3616     C2_MacroAssembler _masm(&cbuf);
 3617     Register reg1 = as_Register($src1$$reg);
 3618     uint64_t val = (uint64_t)$src2$$constant;
 3619     __ mov(rscratch1, val);
 3620     __ cmp(reg1, rscratch1);
 3621   %}
 3622 
 3623   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3624     C2_MacroAssembler _masm(&cbuf);
 3625     Register reg1 = as_Register($src1$$reg);
 3626     Register reg2 = as_Register($src2$$reg);
 3627     __ cmp(reg1, reg2);
 3628   %}
 3629 
 3630   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3631     C2_MacroAssembler _masm(&cbuf);
 3632     Register reg1 = as_Register($src1$$reg);
 3633     Register reg2 = as_Register($src2$$reg);
 3634     __ cmpw(reg1, reg2);
 3635   %}
 3636 
 3637   enc_class aarch64_enc_testp(iRegP src) %{
 3638     C2_MacroAssembler _masm(&cbuf);
 3639     Register reg = as_Register($src$$reg);
 3640     __ cmp(reg, zr);
 3641   %}
 3642 
 3643   enc_class aarch64_enc_testn(iRegN src) %{
 3644     C2_MacroAssembler _masm(&cbuf);
 3645     Register reg = as_Register($src$$reg);
 3646     __ cmpw(reg, zr);
 3647   %}
 3648 
 3649   enc_class aarch64_enc_b(label lbl) %{
 3650     C2_MacroAssembler _masm(&cbuf);
 3651     Label *L = $lbl$$label;
 3652     __ b(*L);
 3653   %}
 3654 
 3655   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3656     C2_MacroAssembler _masm(&cbuf);
 3657     Label *L = $lbl$$label;
 3658     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3659   %}
 3660 
 3661   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3662     C2_MacroAssembler _masm(&cbuf);
 3663     Label *L = $lbl$$label;
 3664     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3665   %}
 3666 
 3667   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3668   %{
 3669      Register sub_reg = as_Register($sub$$reg);
 3670      Register super_reg = as_Register($super$$reg);
 3671      Register temp_reg = as_Register($temp$$reg);
 3672      Register result_reg = as_Register($result$$reg);
 3673 
 3674      Label miss;
 3675      C2_MacroAssembler _masm(&cbuf);
 3676      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3677                                      NULL, &miss,
 3678                                      /*set_cond_codes:*/ true);
 3679      if ($primary) {
 3680        __ mov(result_reg, zr);
 3681      }
 3682      __ bind(miss);
 3683   %}
 3684 
 3685   enc_class aarch64_enc_java_static_call(method meth) %{
 3686     C2_MacroAssembler _masm(&cbuf);
 3687 
 3688     address addr = (address)$meth$$method;
 3689     address call;
 3690     if (!_method) {
 3691       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3692       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3693       if (call == NULL) {
 3694         ciEnv::current()->record_failure("CodeCache is full");
 3695         return;
 3696       }
 3697     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3698       // The NOP here is purely to ensure that eliding a call to
 3699       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3700       __ nop();
 3701       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3702     } else {
 3703       int method_index = resolved_method_index(cbuf);
 3704       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3705                                                   : static_call_Relocation::spec(method_index);
 3706       call = __ trampoline_call(Address(addr, rspec));
 3707       if (call == NULL) {
 3708         ciEnv::current()->record_failure("CodeCache is full");
 3709         return;
 3710       }
 3711       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3712         // Calls of the same statically bound method can share
 3713         // a stub to the interpreter.
 3714         cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
 3715       } else {
 3716         // Emit stub for static call
 3717         address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
 3718         if (stub == NULL) {
 3719           ciEnv::current()->record_failure("CodeCache is full");
 3720           return;
 3721         }
 3722       }
 3723     }
 3724 
 3725     __ post_call_nop();
 3726 
 3727     // Only non uncommon_trap calls need to reinitialize ptrue.
 3728     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3729       __ reinitialize_ptrue();
 3730     }
 3731   %}
 3732 
 3733   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3734     C2_MacroAssembler _masm(&cbuf);
 3735     int method_index = resolved_method_index(cbuf);
 3736     address call = __ ic_call((address)$meth$$method, method_index);
 3737     if (call == NULL) {
 3738       ciEnv::current()->record_failure("CodeCache is full");
 3739       return;
 3740     }
 3741     __ post_call_nop();
 3742     if (Compile::current()->max_vector_size() > 0) {
 3743       __ reinitialize_ptrue();
 3744     }
 3745   %}
 3746 
 3747   enc_class aarch64_enc_call_epilog() %{
 3748     C2_MacroAssembler _masm(&cbuf);
 3749     if (VerifyStackAtCalls) {
 3750       // Check that stack depth is unchanged: find majik cookie on stack
 3751       __ call_Unimplemented();
 3752     }
 3753   %}
 3754 
 3755   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3756     C2_MacroAssembler _masm(&cbuf);
 3757 
 3758     // some calls to generated routines (arraycopy code) are scheduled
 3759     // by C2 as runtime calls. if so we can call them using a br (they
 3760     // will be in a reachable segment) otherwise we have to use a blr
 3761     // which loads the absolute address into a register.
 3762     address entry = (address)$meth$$method;
 3763     CodeBlob *cb = CodeCache::find_blob(entry);
 3764     if (cb) {
 3765       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3766       if (call == NULL) {
 3767         ciEnv::current()->record_failure("CodeCache is full");
 3768         return;
 3769       }
 3770       __ post_call_nop();
 3771     } else {
 3772       Label retaddr;
 3773       // Make the anchor frame walkable
 3774       __ adr(rscratch2, retaddr);
 3775       __ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 3776       __ lea(rscratch1, RuntimeAddress(entry));
 3777       __ blr(rscratch1);
 3778       __ bind(retaddr);
 3779       __ post_call_nop();
 3780     }
 3781     if (Compile::current()->max_vector_size() > 0) {
 3782       __ reinitialize_ptrue();
 3783     }
 3784   %}
 3785 
 3786   enc_class aarch64_enc_rethrow() %{
 3787     C2_MacroAssembler _masm(&cbuf);
 3788     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3789   %}
 3790 
 3791   enc_class aarch64_enc_ret() %{
 3792     C2_MacroAssembler _masm(&cbuf);
 3793 #ifdef ASSERT
 3794     if (Compile::current()->max_vector_size() > 0) {
 3795       __ verify_ptrue();
 3796     }
 3797 #endif
 3798     __ ret(lr);
 3799   %}
 3800 
 3801   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3802     C2_MacroAssembler _masm(&cbuf);
 3803     Register target_reg = as_Register($jump_target$$reg);
 3804     __ br(target_reg);
 3805   %}
 3806 
 3807   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3808     C2_MacroAssembler _masm(&cbuf);
 3809     Register target_reg = as_Register($jump_target$$reg);
 3810     // exception oop should be in r0
 3811     // ret addr has been popped into lr
 3812     // callee expects it in r3
 3813     __ mov(r3, lr);
 3814     __ br(target_reg);
 3815   %}
 3816 
 3817 %}
 3818 
 3819 //----------FRAME--------------------------------------------------------------
 3820 // Definition of frame structure and management information.
 3821 //
 3822 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3823 //                             |   (to get allocators register number
 3824 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3825 //  r   CALLER     |        |
 3826 //  o     |        +--------+      pad to even-align allocators stack-slot
 3827 //  w     V        |  pad0  |        numbers; owned by CALLER
 3828 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3829 //  h     ^        |   in   |  5
 3830 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3831 //  |     |        |        |  3
 3832 //  |     |        +--------+
 3833 //  V     |        | old out|      Empty on Intel, window on Sparc
 3834 //        |    old |preserve|      Must be even aligned.
 3835 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3836 //        |        |   in   |  3   area for Intel ret address
 3837 //     Owned by    |preserve|      Empty on Sparc.
 3838 //       SELF      +--------+
 3839 //        |        |  pad2  |  2   pad to align old SP
 3840 //        |        +--------+  1
 3841 //        |        | locks  |  0
 3842 //        |        +--------+----> OptoReg::stack0(), even aligned
 3843 //        |        |  pad1  | 11   pad to align new SP
 3844 //        |        +--------+
 3845 //        |        |        | 10
 3846 //        |        | spills |  9   spills
 3847 //        V        |        |  8   (pad0 slot for callee)
 3848 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3849 //        ^        |  out   |  7
 3850 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3851 //     Owned by    +--------+
 3852 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3853 //        |    new |preserve|      Must be even-aligned.
 3854 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3855 //        |        |        |
 3856 //
 3857 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3858 //         known from SELF's arguments and the Java calling convention.
 3859 //         Region 6-7 is determined per call site.
 3860 // Note 2: If the calling convention leaves holes in the incoming argument
 3861 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3862 //         are owned by the CALLEE.  Holes should not be necessary in the
 3863 //         incoming area, as the Java calling convention is completely under
 3864 //         the control of the AD file.  Doubles can be sorted and packed to
 3865 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3866 //         varargs C calling conventions.
 3867 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3868 //         even aligned with pad0 as needed.
 3869 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3870 //           (the latter is true on Intel but is it false on AArch64?)
 3871 //         region 6-11 is even aligned; it may be padded out more so that
 3872 //         the region from SP to FP meets the minimum stack alignment.
 3873 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3874 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3875 //         SP meets the minimum alignment.
 3876 
 3877 frame %{
 3878   // These three registers define part of the calling convention
 3879   // between compiled code and the interpreter.
 3880 
 3881   // Inline Cache Register or Method for I2C.
 3882   inline_cache_reg(R12);
 3883 
 3884   // Number of stack slots consumed by locking an object
 3885   sync_stack_slots(2);
 3886 
 3887   // Compiled code's Frame Pointer
 3888   frame_pointer(R31);
 3889 
 3890   // Interpreter stores its frame pointer in a register which is
 3891   // stored to the stack by I2CAdaptors.
 3892   // I2CAdaptors convert from interpreted java to compiled java.
 3893   interpreter_frame_pointer(R29);
 3894 
 3895   // Stack alignment requirement
 3896   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3897 
 3898   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3899   // for calls to C.  Supports the var-args backing area for register parms.
 3900   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3901 
 3902   // The after-PROLOG location of the return address.  Location of
 3903   // return address specifies a type (REG or STACK) and a number
 3904   // representing the register number (i.e. - use a register name) or
 3905   // stack slot.
 3906   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3907   // Otherwise, it is above the locks and verification slot and alignment word
 3908   // TODO this may well be correct but need to check why that - 2 is there
 3909   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3910   // which folds in the space used for monitors
 3911   return_addr(STACK - 2 +
 3912               align_up((Compile::current()->in_preserve_stack_slots() +
 3913                         Compile::current()->fixed_slots()),
 3914                        stack_alignment_in_slots()));
 3915 
 3916   // Location of compiled Java return values.  Same as C for now.
 3917   return_value
 3918   %{
 3919     // TODO do we allow ideal_reg == Op_RegN???
 3920     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3921            "only return normal values");
 3922 
 3923     static const int lo[Op_RegL + 1] = { // enum name
 3924       0,                                 // Op_Node
 3925       0,                                 // Op_Set
 3926       R0_num,                            // Op_RegN
 3927       R0_num,                            // Op_RegI
 3928       R0_num,                            // Op_RegP
 3929       V0_num,                            // Op_RegF
 3930       V0_num,                            // Op_RegD
 3931       R0_num                             // Op_RegL
 3932     };
 3933 
 3934     static const int hi[Op_RegL + 1] = { // enum name
 3935       0,                                 // Op_Node
 3936       0,                                 // Op_Set
 3937       OptoReg::Bad,                      // Op_RegN
 3938       OptoReg::Bad,                      // Op_RegI
 3939       R0_H_num,                          // Op_RegP
 3940       OptoReg::Bad,                      // Op_RegF
 3941       V0_H_num,                          // Op_RegD
 3942       R0_H_num                           // Op_RegL
 3943     };
 3944 
 3945     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3946   %}
 3947 %}
 3948 
 3949 //----------ATTRIBUTES---------------------------------------------------------
 3950 //----------Operand Attributes-------------------------------------------------
 3951 op_attrib op_cost(1);        // Required cost attribute
 3952 
 3953 //----------Instruction Attributes---------------------------------------------
 3954 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3955 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3956 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3957                                 // a non-matching short branch variant
 3958                                 // of some long branch?
 3959 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3960                                 // be a power of 2) specifies the
 3961                                 // alignment that some part of the
 3962                                 // instruction (not necessarily the
 3963                                 // start) requires.  If > 1, a
 3964                                 // compute_padding() function must be
 3965                                 // provided for the instruction
 3966 
 3967 //----------OPERANDS-----------------------------------------------------------
 3968 // Operand definitions must precede instruction definitions for correct parsing
 3969 // in the ADLC because operands constitute user defined types which are used in
 3970 // instruction definitions.
 3971 
 3972 //----------Simple Operands----------------------------------------------------
 3973 
 3974 // Integer operands 32 bit
 3975 // 32 bit immediate
 3976 operand immI()
 3977 %{
 3978   match(ConI);
 3979 
 3980   op_cost(0);
 3981   format %{ %}
 3982   interface(CONST_INTER);
 3983 %}
 3984 
 3985 // 32 bit zero
 3986 operand immI0()
 3987 %{
 3988   predicate(n->get_int() == 0);
 3989   match(ConI);
 3990 
 3991   op_cost(0);
 3992   format %{ %}
 3993   interface(CONST_INTER);
 3994 %}
 3995 
 3996 // 32 bit unit increment
 3997 operand immI_1()
 3998 %{
 3999   predicate(n->get_int() == 1);
 4000   match(ConI);
 4001 
 4002   op_cost(0);
 4003   format %{ %}
 4004   interface(CONST_INTER);
 4005 %}
 4006 
 4007 // 32 bit unit decrement
 4008 operand immI_M1()
 4009 %{
 4010   predicate(n->get_int() == -1);
 4011   match(ConI);
 4012 
 4013   op_cost(0);
 4014   format %{ %}
 4015   interface(CONST_INTER);
 4016 %}
 4017 
 4018 // Shift values for add/sub extension shift
 4019 operand immIExt()
 4020 %{
 4021   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 4022   match(ConI);
 4023 
 4024   op_cost(0);
 4025   format %{ %}
 4026   interface(CONST_INTER);
 4027 %}
 4028 
 4029 operand immI_gt_1()
 4030 %{
 4031   predicate(n->get_int() > 1);
 4032   match(ConI);
 4033 
 4034   op_cost(0);
 4035   format %{ %}
 4036   interface(CONST_INTER);
 4037 %}
 4038 
 4039 operand immI_le_4()
 4040 %{
 4041   predicate(n->get_int() <= 4);
 4042   match(ConI);
 4043 
 4044   op_cost(0);
 4045   format %{ %}
 4046   interface(CONST_INTER);
 4047 %}
 4048 
 4049 operand immI_16()
 4050 %{
 4051   predicate(n->get_int() == 16);
 4052   match(ConI);
 4053 
 4054   op_cost(0);
 4055   format %{ %}
 4056   interface(CONST_INTER);
 4057 %}
 4058 
 4059 operand immI_24()
 4060 %{
 4061   predicate(n->get_int() == 24);
 4062   match(ConI);
 4063 
 4064   op_cost(0);
 4065   format %{ %}
 4066   interface(CONST_INTER);
 4067 %}
 4068 
 4069 operand immI_32()
 4070 %{
 4071   predicate(n->get_int() == 32);
 4072   match(ConI);
 4073 
 4074   op_cost(0);
 4075   format %{ %}
 4076   interface(CONST_INTER);
 4077 %}
 4078 
 4079 operand immI_48()
 4080 %{
 4081   predicate(n->get_int() == 48);
 4082   match(ConI);
 4083 
 4084   op_cost(0);
 4085   format %{ %}
 4086   interface(CONST_INTER);
 4087 %}
 4088 
 4089 operand immI_56()
 4090 %{
 4091   predicate(n->get_int() == 56);
 4092   match(ConI);
 4093 
 4094   op_cost(0);
 4095   format %{ %}
 4096   interface(CONST_INTER);
 4097 %}
 4098 
 4099 operand immI_63()
 4100 %{
 4101   predicate(n->get_int() == 63);
 4102   match(ConI);
 4103 
 4104   op_cost(0);
 4105   format %{ %}
 4106   interface(CONST_INTER);
 4107 %}
 4108 
 4109 operand immI_64()
 4110 %{
 4111   predicate(n->get_int() == 64);
 4112   match(ConI);
 4113 
 4114   op_cost(0);
 4115   format %{ %}
 4116   interface(CONST_INTER);
 4117 %}
 4118 
 4119 operand immI_255()
 4120 %{
 4121   predicate(n->get_int() == 255);
 4122   match(ConI);
 4123 
 4124   op_cost(0);
 4125   format %{ %}
 4126   interface(CONST_INTER);
 4127 %}
 4128 
 4129 operand immI_65535()
 4130 %{
 4131   predicate(n->get_int() == 65535);
 4132   match(ConI);
 4133 
 4134   op_cost(0);
 4135   format %{ %}
 4136   interface(CONST_INTER);
 4137 %}
 4138 
 4139 operand immI_positive()
 4140 %{
 4141   predicate(n->get_int() > 0);
 4142   match(ConI);
 4143 
 4144   op_cost(0);
 4145   format %{ %}
 4146   interface(CONST_INTER);
 4147 %}
 4148 
 4149 // BoolTest condition for signed compare
 4150 operand immI_cmp_cond()
 4151 %{
 4152   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4153   match(ConI);
 4154 
 4155   op_cost(0);
 4156   format %{ %}
 4157   interface(CONST_INTER);
 4158 %}
 4159 
 4160 // BoolTest condition for unsigned compare
 4161 operand immI_cmpU_cond()
 4162 %{
 4163   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4164   match(ConI);
 4165 
 4166   op_cost(0);
 4167   format %{ %}
 4168   interface(CONST_INTER);
 4169 %}
 4170 
 4171 operand immL_255()
 4172 %{
 4173   predicate(n->get_long() == 255L);
 4174   match(ConL);
 4175 
 4176   op_cost(0);
 4177   format %{ %}
 4178   interface(CONST_INTER);
 4179 %}
 4180 
 4181 operand immL_65535()
 4182 %{
 4183   predicate(n->get_long() == 65535L);
 4184   match(ConL);
 4185 
 4186   op_cost(0);
 4187   format %{ %}
 4188   interface(CONST_INTER);
 4189 %}
 4190 
 4191 operand immL_4294967295()
 4192 %{
 4193   predicate(n->get_long() == 4294967295L);
 4194   match(ConL);
 4195 
 4196   op_cost(0);
 4197   format %{ %}
 4198   interface(CONST_INTER);
 4199 %}
 4200 
 4201 operand immL_bitmask()
 4202 %{
 4203   predicate((n->get_long() != 0)
 4204             && ((n->get_long() & 0xc000000000000000l) == 0)
 4205             && is_power_of_2(n->get_long() + 1));
 4206   match(ConL);
 4207 
 4208   op_cost(0);
 4209   format %{ %}
 4210   interface(CONST_INTER);
 4211 %}
 4212 
 4213 operand immI_bitmask()
 4214 %{
 4215   predicate((n->get_int() != 0)
 4216             && ((n->get_int() & 0xc0000000) == 0)
 4217             && is_power_of_2(n->get_int() + 1));
 4218   match(ConI);
 4219 
 4220   op_cost(0);
 4221   format %{ %}
 4222   interface(CONST_INTER);
 4223 %}
 4224 
 4225 operand immL_positive_bitmaskI()
 4226 %{
 4227   predicate((n->get_long() != 0)
 4228             && ((julong)n->get_long() < 0x80000000ULL)
 4229             && is_power_of_2(n->get_long() + 1));
 4230   match(ConL);
 4231 
 4232   op_cost(0);
 4233   format %{ %}
 4234   interface(CONST_INTER);
 4235 %}
 4236 
 4237 // Scale values for scaled offset addressing modes (up to long but not quad)
 4238 operand immIScale()
 4239 %{
 4240   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4241   match(ConI);
 4242 
 4243   op_cost(0);
 4244   format %{ %}
 4245   interface(CONST_INTER);
 4246 %}
 4247 
 4248 // 26 bit signed offset -- for pc-relative branches
 4249 operand immI26()
 4250 %{
 4251   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
 4252   match(ConI);
 4253 
 4254   op_cost(0);
 4255   format %{ %}
 4256   interface(CONST_INTER);
 4257 %}
 4258 
 4259 // 19 bit signed offset -- for pc-relative loads
 4260 operand immI19()
 4261 %{
 4262   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
 4263   match(ConI);
 4264 
 4265   op_cost(0);
 4266   format %{ %}
 4267   interface(CONST_INTER);
 4268 %}
 4269 
 4270 // 5 bit signed integer
 4271 operand immI5()
 4272 %{
 4273   predicate(Assembler::is_simm(n->get_int(), 5));
 4274   match(ConI);
 4275 
 4276   op_cost(0);
 4277   format %{ %}
 4278   interface(CONST_INTER);
 4279 %}
 4280 
 4281 // 7 bit unsigned integer
 4282 operand immIU7()
 4283 %{
 4284   predicate(Assembler::is_uimm(n->get_int(), 7));
 4285   match(ConI);
 4286 
 4287   op_cost(0);
 4288   format %{ %}
 4289   interface(CONST_INTER);
 4290 %}
 4291 
 4292 // 12 bit unsigned offset -- for base plus immediate loads
 4293 operand immIU12()
 4294 %{
 4295   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
 4296   match(ConI);
 4297 
 4298   op_cost(0);
 4299   format %{ %}
 4300   interface(CONST_INTER);
 4301 %}
 4302 
 4303 operand immLU12()
 4304 %{
 4305   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
 4306   match(ConL);
 4307 
 4308   op_cost(0);
 4309   format %{ %}
 4310   interface(CONST_INTER);
 4311 %}
 4312 
 4313 // Offset for scaled or unscaled immediate loads and stores
 4314 operand immIOffset()
 4315 %{
 4316   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4317   match(ConI);
 4318 
 4319   op_cost(0);
 4320   format %{ %}
 4321   interface(CONST_INTER);
 4322 %}
 4323 
 4324 operand immIOffset1()
 4325 %{
 4326   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4327   match(ConI);
 4328 
 4329   op_cost(0);
 4330   format %{ %}
 4331   interface(CONST_INTER);
 4332 %}
 4333 
 4334 operand immIOffset2()
 4335 %{
 4336   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4337   match(ConI);
 4338 
 4339   op_cost(0);
 4340   format %{ %}
 4341   interface(CONST_INTER);
 4342 %}
 4343 
 4344 operand immIOffset4()
 4345 %{
 4346   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4347   match(ConI);
 4348 
 4349   op_cost(0);
 4350   format %{ %}
 4351   interface(CONST_INTER);
 4352 %}
 4353 
 4354 operand immIOffset8()
 4355 %{
 4356   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4357   match(ConI);
 4358 
 4359   op_cost(0);
 4360   format %{ %}
 4361   interface(CONST_INTER);
 4362 %}
 4363 
 4364 operand immIOffset16()
 4365 %{
 4366   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4367   match(ConI);
 4368 
 4369   op_cost(0);
 4370   format %{ %}
 4371   interface(CONST_INTER);
 4372 %}
 4373 
 4374 operand immLoffset()
 4375 %{
 4376   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4377   match(ConL);
 4378 
 4379   op_cost(0);
 4380   format %{ %}
 4381   interface(CONST_INTER);
 4382 %}
 4383 
 4384 operand immLoffset1()
 4385 %{
 4386   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4387   match(ConL);
 4388 
 4389   op_cost(0);
 4390   format %{ %}
 4391   interface(CONST_INTER);
 4392 %}
 4393 
 4394 operand immLoffset2()
 4395 %{
 4396   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4397   match(ConL);
 4398 
 4399   op_cost(0);
 4400   format %{ %}
 4401   interface(CONST_INTER);
 4402 %}
 4403 
 4404 operand immLoffset4()
 4405 %{
 4406   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4407   match(ConL);
 4408 
 4409   op_cost(0);
 4410   format %{ %}
 4411   interface(CONST_INTER);
 4412 %}
 4413 
 4414 operand immLoffset8()
 4415 %{
 4416   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4417   match(ConL);
 4418 
 4419   op_cost(0);
 4420   format %{ %}
 4421   interface(CONST_INTER);
 4422 %}
 4423 
 4424 operand immLoffset16()
 4425 %{
 4426   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4427   match(ConL);
 4428 
 4429   op_cost(0);
 4430   format %{ %}
 4431   interface(CONST_INTER);
 4432 %}
 4433 
 4434 // 5 bit signed long integer
 4435 operand immL5()
 4436 %{
 4437   predicate(Assembler::is_simm(n->get_long(), 5));
 4438   match(ConL);
 4439 
 4440   op_cost(0);
 4441   format %{ %}
 4442   interface(CONST_INTER);
 4443 %}
 4444 
 4445 // 7 bit unsigned long integer
 4446 operand immLU7()
 4447 %{
 4448   predicate(Assembler::is_uimm(n->get_long(), 7));
 4449   match(ConL);
 4450 
 4451   op_cost(0);
 4452   format %{ %}
 4453   interface(CONST_INTER);
 4454 %}
 4455 
 4456 // 8 bit signed value.
 4457 operand immI8()
 4458 %{
 4459   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4460   match(ConI);
 4461 
 4462   op_cost(0);
 4463   format %{ %}
 4464   interface(CONST_INTER);
 4465 %}
 4466 
 4467 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4468 operand immI8_shift8()
 4469 %{
 4470   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4471             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4472   match(ConI);
 4473 
 4474   op_cost(0);
 4475   format %{ %}
 4476   interface(CONST_INTER);
 4477 %}
 4478 
 4479 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4480 operand immL8_shift8()
 4481 %{
 4482   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4483             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4484   match(ConL);
 4485 
 4486   op_cost(0);
 4487   format %{ %}
 4488   interface(CONST_INTER);
 4489 %}
 4490 
 4491 // 8 bit integer valid for vector add sub immediate
 4492 operand immBAddSubV()
 4493 %{
 4494   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4495   match(ConI);
 4496 
 4497   op_cost(0);
 4498   format %{ %}
 4499   interface(CONST_INTER);
 4500 %}
 4501 
 4502 // 32 bit integer valid for add sub immediate
 4503 operand immIAddSub()
 4504 %{
 4505   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4506   match(ConI);
 4507   op_cost(0);
 4508   format %{ %}
 4509   interface(CONST_INTER);
 4510 %}
 4511 
 4512 // 32 bit integer valid for vector add sub immediate
 4513 operand immIAddSubV()
 4514 %{
 4515   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4516   match(ConI);
 4517 
 4518   op_cost(0);
 4519   format %{ %}
 4520   interface(CONST_INTER);
 4521 %}
 4522 
 4523 // 32 bit unsigned integer valid for logical immediate
 4524 
 4525 operand immBLog()
 4526 %{
 4527   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4528   match(ConI);
 4529 
 4530   op_cost(0);
 4531   format %{ %}
 4532   interface(CONST_INTER);
 4533 %}
 4534 
 4535 operand immSLog()
 4536 %{
 4537   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4538   match(ConI);
 4539 
 4540   op_cost(0);
 4541   format %{ %}
 4542   interface(CONST_INTER);
 4543 %}
 4544 
 4545 operand immILog()
 4546 %{
 4547   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4548   match(ConI);
 4549 
 4550   op_cost(0);
 4551   format %{ %}
 4552   interface(CONST_INTER);
 4553 %}
 4554 
 4555 // Integer operands 64 bit
 4556 // 64 bit immediate
 4557 operand immL()
 4558 %{
 4559   match(ConL);
 4560 
 4561   op_cost(0);
 4562   format %{ %}
 4563   interface(CONST_INTER);
 4564 %}
 4565 
 4566 // 64 bit zero
 4567 operand immL0()
 4568 %{
 4569   predicate(n->get_long() == 0);
 4570   match(ConL);
 4571 
 4572   op_cost(0);
 4573   format %{ %}
 4574   interface(CONST_INTER);
 4575 %}
 4576 
 4577 // 64 bit unit increment
 4578 operand immL_1()
 4579 %{
 4580   predicate(n->get_long() == 1);
 4581   match(ConL);
 4582 
 4583   op_cost(0);
 4584   format %{ %}
 4585   interface(CONST_INTER);
 4586 %}
 4587 
 4588 // 64 bit unit decrement
 4589 operand immL_M1()
 4590 %{
 4591   predicate(n->get_long() == -1);
 4592   match(ConL);
 4593 
 4594   op_cost(0);
 4595   format %{ %}
 4596   interface(CONST_INTER);
 4597 %}
 4598 
 4599 // 32 bit offset of pc in thread anchor
 4600 
 4601 operand immL_pc_off()
 4602 %{
 4603   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
 4604                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 4605   match(ConL);
 4606 
 4607   op_cost(0);
 4608   format %{ %}
 4609   interface(CONST_INTER);
 4610 %}
 4611 
 4612 // 64 bit integer valid for add sub immediate
 4613 operand immLAddSub()
 4614 %{
 4615   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4616   match(ConL);
 4617   op_cost(0);
 4618   format %{ %}
 4619   interface(CONST_INTER);
 4620 %}
 4621 
 4622 // 64 bit integer valid for addv subv immediate
 4623 operand immLAddSubV()
 4624 %{
 4625   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4626   match(ConL);
 4627 
 4628   op_cost(0);
 4629   format %{ %}
 4630   interface(CONST_INTER);
 4631 %}
 4632 
 4633 // 64 bit integer valid for logical immediate
 4634 operand immLLog()
 4635 %{
 4636   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4637   match(ConL);
 4638   op_cost(0);
 4639   format %{ %}
 4640   interface(CONST_INTER);
 4641 %}
 4642 
 4643 // Long Immediate: low 32-bit mask
 4644 operand immL_32bits()
 4645 %{
 4646   predicate(n->get_long() == 0xFFFFFFFFL);
 4647   match(ConL);
 4648   op_cost(0);
 4649   format %{ %}
 4650   interface(CONST_INTER);
 4651 %}
 4652 
 4653 // Pointer operands
 4654 // Pointer Immediate
 4655 operand immP()
 4656 %{
 4657   match(ConP);
 4658 
 4659   op_cost(0);
 4660   format %{ %}
 4661   interface(CONST_INTER);
 4662 %}
 4663 
 4664 // NULL Pointer Immediate
 4665 operand immP0()
 4666 %{
 4667   predicate(n->get_ptr() == 0);
 4668   match(ConP);
 4669 
 4670   op_cost(0);
 4671   format %{ %}
 4672   interface(CONST_INTER);
 4673 %}
 4674 
 4675 // Pointer Immediate One
 4676 // this is used in object initialization (initial object header)
 4677 operand immP_1()
 4678 %{
 4679   predicate(n->get_ptr() == 1);
 4680   match(ConP);
 4681 
 4682   op_cost(0);
 4683   format %{ %}
 4684   interface(CONST_INTER);
 4685 %}
 4686 
 4687 // Card Table Byte Map Base
 4688 operand immByteMapBase()
 4689 %{
 4690   // Get base of card map
 4691   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4692             SHENANDOAHGC_ONLY(!BarrierSet::barrier_set()->is_a(BarrierSet::ShenandoahBarrierSet) &&)
 4693             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4694   match(ConP);
 4695 
 4696   op_cost(0);
 4697   format %{ %}
 4698   interface(CONST_INTER);
 4699 %}
 4700 
 4701 // Pointer Immediate Minus One
 4702 // this is used when we want to write the current PC to the thread anchor
 4703 operand immP_M1()
 4704 %{
 4705   predicate(n->get_ptr() == -1);
 4706   match(ConP);
 4707 
 4708   op_cost(0);
 4709   format %{ %}
 4710   interface(CONST_INTER);
 4711 %}
 4712 
 4713 // Pointer Immediate Minus Two
 4714 // this is used when we want to write the current PC to the thread anchor
 4715 operand immP_M2()
 4716 %{
 4717   predicate(n->get_ptr() == -2);
 4718   match(ConP);
 4719 
 4720   op_cost(0);
 4721   format %{ %}
 4722   interface(CONST_INTER);
 4723 %}
 4724 
 4725 // Float and Double operands
 4726 // Double Immediate
 4727 operand immD()
 4728 %{
 4729   match(ConD);
 4730   op_cost(0);
 4731   format %{ %}
 4732   interface(CONST_INTER);
 4733 %}
 4734 
 4735 // Double Immediate: +0.0d
 4736 operand immD0()
 4737 %{
 4738   predicate(jlong_cast(n->getd()) == 0);
 4739   match(ConD);
 4740 
 4741   op_cost(0);
 4742   format %{ %}
 4743   interface(CONST_INTER);
 4744 %}
 4745 
 4746 // constant 'double +0.0'.
 4747 operand immDPacked()
 4748 %{
 4749   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4750   match(ConD);
 4751   op_cost(0);
 4752   format %{ %}
 4753   interface(CONST_INTER);
 4754 %}
 4755 
 4756 // Float Immediate
 4757 operand immF()
 4758 %{
 4759   match(ConF);
 4760   op_cost(0);
 4761   format %{ %}
 4762   interface(CONST_INTER);
 4763 %}
 4764 
 4765 // Float Immediate: +0.0f.
 4766 operand immF0()
 4767 %{
 4768   predicate(jint_cast(n->getf()) == 0);
 4769   match(ConF);
 4770 
 4771   op_cost(0);
 4772   format %{ %}
 4773   interface(CONST_INTER);
 4774 %}
 4775 
 4776 //
 4777 operand immFPacked()
 4778 %{
 4779   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4780   match(ConF);
 4781   op_cost(0);
 4782   format %{ %}
 4783   interface(CONST_INTER);
 4784 %}
 4785 
 4786 // Narrow pointer operands
 4787 // Narrow Pointer Immediate
 4788 operand immN()
 4789 %{
 4790   match(ConN);
 4791 
 4792   op_cost(0);
 4793   format %{ %}
 4794   interface(CONST_INTER);
 4795 %}
 4796 
 4797 // Narrow NULL Pointer Immediate
 4798 operand immN0()
 4799 %{
 4800   predicate(n->get_narrowcon() == 0);
 4801   match(ConN);
 4802 
 4803   op_cost(0);
 4804   format %{ %}
 4805   interface(CONST_INTER);
 4806 %}
 4807 
 4808 operand immNKlass()
 4809 %{
 4810   match(ConNKlass);
 4811 
 4812   op_cost(0);
 4813   format %{ %}
 4814   interface(CONST_INTER);
 4815 %}
 4816 
 4817 // Integer 32 bit Register Operands
 4818 // Integer 32 bitRegister (excludes SP)
 4819 operand iRegI()
 4820 %{
 4821   constraint(ALLOC_IN_RC(any_reg32));
 4822   match(RegI);
 4823   match(iRegINoSp);
 4824   op_cost(0);
 4825   format %{ %}
 4826   interface(REG_INTER);
 4827 %}
 4828 
 4829 // Integer 32 bit Register not Special
 4830 operand iRegINoSp()
 4831 %{
 4832   constraint(ALLOC_IN_RC(no_special_reg32));
 4833   match(RegI);
 4834   op_cost(0);
 4835   format %{ %}
 4836   interface(REG_INTER);
 4837 %}
 4838 
 4839 // Integer 64 bit Register Operands
 4840 // Integer 64 bit Register (includes SP)
 4841 operand iRegL()
 4842 %{
 4843   constraint(ALLOC_IN_RC(any_reg));
 4844   match(RegL);
 4845   match(iRegLNoSp);
 4846   op_cost(0);
 4847   format %{ %}
 4848   interface(REG_INTER);
 4849 %}
 4850 
 4851 // Integer 64 bit Register not Special
 4852 operand iRegLNoSp()
 4853 %{
 4854   constraint(ALLOC_IN_RC(no_special_reg));
 4855   match(RegL);
 4856   match(iRegL_R0);
 4857   format %{ %}
 4858   interface(REG_INTER);
 4859 %}
 4860 
 4861 // Pointer Register Operands
 4862 // Pointer Register
 4863 operand iRegP()
 4864 %{
 4865   constraint(ALLOC_IN_RC(ptr_reg));
 4866   match(RegP);
 4867   match(iRegPNoSp);
 4868   match(iRegP_R0);
 4869   //match(iRegP_R2);
 4870   //match(iRegP_R4);
 4871   match(iRegP_R5);
 4872   match(thread_RegP);
 4873   op_cost(0);
 4874   format %{ %}
 4875   interface(REG_INTER);
 4876 %}
 4877 
 4878 // Pointer 64 bit Register not Special
 4879 operand iRegPNoSp()
 4880 %{
 4881   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4882   match(RegP);
 4883   // match(iRegP);
 4884   // match(iRegP_R0);
 4885   // match(iRegP_R2);
 4886   // match(iRegP_R4);
 4887   // match(iRegP_R5);
 4888   // match(thread_RegP);
 4889   op_cost(0);
 4890   format %{ %}
 4891   interface(REG_INTER);
 4892 %}
 4893 
 4894 // This operand is not allowed to use rfp even if
 4895 // rfp is not used to hold the frame pointer.
 4896 operand iRegPNoSpNoRfp()
 4897 %{
 4898   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4899   match(RegP);
 4900   match(iRegPNoSp);
 4901   op_cost(0);
 4902   format %{ %}
 4903   interface(REG_INTER);
 4904 %}
 4905 
 4906 // Pointer 64 bit Register R0 only
 4907 operand iRegP_R0()
 4908 %{
 4909   constraint(ALLOC_IN_RC(r0_reg));
 4910   match(RegP);
 4911   // match(iRegP);
 4912   match(iRegPNoSp);
 4913   op_cost(0);
 4914   format %{ %}
 4915   interface(REG_INTER);
 4916 %}
 4917 
 4918 // Pointer 64 bit Register R1 only
 4919 operand iRegP_R1()
 4920 %{
 4921   constraint(ALLOC_IN_RC(r1_reg));
 4922   match(RegP);
 4923   // match(iRegP);
 4924   match(iRegPNoSp);
 4925   op_cost(0);
 4926   format %{ %}
 4927   interface(REG_INTER);
 4928 %}
 4929 
 4930 // Pointer 64 bit Register R2 only
 4931 operand iRegP_R2()
 4932 %{
 4933   constraint(ALLOC_IN_RC(r2_reg));
 4934   match(RegP);
 4935   // match(iRegP);
 4936   match(iRegPNoSp);
 4937   op_cost(0);
 4938   format %{ %}
 4939   interface(REG_INTER);
 4940 %}
 4941 
 4942 // Pointer 64 bit Register R3 only
 4943 operand iRegP_R3()
 4944 %{
 4945   constraint(ALLOC_IN_RC(r3_reg));
 4946   match(RegP);
 4947   // match(iRegP);
 4948   match(iRegPNoSp);
 4949   op_cost(0);
 4950   format %{ %}
 4951   interface(REG_INTER);
 4952 %}
 4953 
 4954 // Pointer 64 bit Register R4 only
 4955 operand iRegP_R4()
 4956 %{
 4957   constraint(ALLOC_IN_RC(r4_reg));
 4958   match(RegP);
 4959   // match(iRegP);
 4960   match(iRegPNoSp);
 4961   op_cost(0);
 4962   format %{ %}
 4963   interface(REG_INTER);
 4964 %}
 4965 
 4966 // Pointer 64 bit Register R5 only
 4967 operand iRegP_R5()
 4968 %{
 4969   constraint(ALLOC_IN_RC(r5_reg));
 4970   match(RegP);
 4971   // match(iRegP);
 4972   match(iRegPNoSp);
 4973   op_cost(0);
 4974   format %{ %}
 4975   interface(REG_INTER);
 4976 %}
 4977 
 4978 // Pointer 64 bit Register R10 only
 4979 operand iRegP_R10()
 4980 %{
 4981   constraint(ALLOC_IN_RC(r10_reg));
 4982   match(RegP);
 4983   // match(iRegP);
 4984   match(iRegPNoSp);
 4985   op_cost(0);
 4986   format %{ %}
 4987   interface(REG_INTER);
 4988 %}
 4989 
 4990 // Long 64 bit Register R0 only
 4991 operand iRegL_R0()
 4992 %{
 4993   constraint(ALLOC_IN_RC(r0_reg));
 4994   match(RegL);
 4995   match(iRegLNoSp);
 4996   op_cost(0);
 4997   format %{ %}
 4998   interface(REG_INTER);
 4999 %}
 5000 
 5001 // Long 64 bit Register R2 only
 5002 operand iRegL_R2()
 5003 %{
 5004   constraint(ALLOC_IN_RC(r2_reg));
 5005   match(RegL);
 5006   match(iRegLNoSp);
 5007   op_cost(0);
 5008   format %{ %}
 5009   interface(REG_INTER);
 5010 %}
 5011 
 5012 // Long 64 bit Register R3 only
 5013 operand iRegL_R3()
 5014 %{
 5015   constraint(ALLOC_IN_RC(r3_reg));
 5016   match(RegL);
 5017   match(iRegLNoSp);
 5018   op_cost(0);
 5019   format %{ %}
 5020   interface(REG_INTER);
 5021 %}
 5022 
 5023 // Long 64 bit Register R11 only
 5024 operand iRegL_R11()
 5025 %{
 5026   constraint(ALLOC_IN_RC(r11_reg));
 5027   match(RegL);
 5028   match(iRegLNoSp);
 5029   op_cost(0);
 5030   format %{ %}
 5031   interface(REG_INTER);
 5032 %}
 5033 
 5034 // Pointer 64 bit Register FP only
 5035 operand iRegP_FP()
 5036 %{
 5037   constraint(ALLOC_IN_RC(fp_reg));
 5038   match(RegP);
 5039   // match(iRegP);
 5040   op_cost(0);
 5041   format %{ %}
 5042   interface(REG_INTER);
 5043 %}
 5044 
 5045 // Register R0 only
 5046 operand iRegI_R0()
 5047 %{
 5048   constraint(ALLOC_IN_RC(int_r0_reg));
 5049   match(RegI);
 5050   match(iRegINoSp);
 5051   op_cost(0);
 5052   format %{ %}
 5053   interface(REG_INTER);
 5054 %}
 5055 
 5056 // Register R2 only
 5057 operand iRegI_R2()
 5058 %{
 5059   constraint(ALLOC_IN_RC(int_r2_reg));
 5060   match(RegI);
 5061   match(iRegINoSp);
 5062   op_cost(0);
 5063   format %{ %}
 5064   interface(REG_INTER);
 5065 %}
 5066 
 5067 // Register R3 only
 5068 operand iRegI_R3()
 5069 %{
 5070   constraint(ALLOC_IN_RC(int_r3_reg));
 5071   match(RegI);
 5072   match(iRegINoSp);
 5073   op_cost(0);
 5074   format %{ %}
 5075   interface(REG_INTER);
 5076 %}
 5077 
 5078 
 5079 // Register R4 only
 5080 operand iRegI_R4()
 5081 %{
 5082   constraint(ALLOC_IN_RC(int_r4_reg));
 5083   match(RegI);
 5084   match(iRegINoSp);
 5085   op_cost(0);
 5086   format %{ %}
 5087   interface(REG_INTER);
 5088 %}
 5089 
 5090 
 5091 // Pointer Register Operands
 5092 // Narrow Pointer Register
 5093 operand iRegN()
 5094 %{
 5095   constraint(ALLOC_IN_RC(any_reg32));
 5096   match(RegN);
 5097   match(iRegNNoSp);
 5098   op_cost(0);
 5099   format %{ %}
 5100   interface(REG_INTER);
 5101 %}
 5102 
 5103 operand iRegN_R0()
 5104 %{
 5105   constraint(ALLOC_IN_RC(r0_reg));
 5106   match(iRegN);
 5107   op_cost(0);
 5108   format %{ %}
 5109   interface(REG_INTER);
 5110 %}
 5111 
 5112 operand iRegN_R2()
 5113 %{
 5114   constraint(ALLOC_IN_RC(r2_reg));
 5115   match(iRegN);
 5116   op_cost(0);
 5117   format %{ %}
 5118   interface(REG_INTER);
 5119 %}
 5120 
 5121 operand iRegN_R3()
 5122 %{
 5123   constraint(ALLOC_IN_RC(r3_reg));
 5124   match(iRegN);
 5125   op_cost(0);
 5126   format %{ %}
 5127   interface(REG_INTER);
 5128 %}
 5129 
 5130 // Integer 64 bit Register not Special
 5131 operand iRegNNoSp()
 5132 %{
 5133   constraint(ALLOC_IN_RC(no_special_reg32));
 5134   match(RegN);
 5135   op_cost(0);
 5136   format %{ %}
 5137   interface(REG_INTER);
 5138 %}
 5139 
 5140 // Float Register
 5141 // Float register operands
 5142 operand vRegF()
 5143 %{
 5144   constraint(ALLOC_IN_RC(float_reg));
 5145   match(RegF);
 5146 
 5147   op_cost(0);
 5148   format %{ %}
 5149   interface(REG_INTER);
 5150 %}
 5151 
 5152 // Double Register
 5153 // Double register operands
 5154 operand vRegD()
 5155 %{
 5156   constraint(ALLOC_IN_RC(double_reg));
 5157   match(RegD);
 5158 
 5159   op_cost(0);
 5160   format %{ %}
 5161   interface(REG_INTER);
 5162 %}
 5163 
 5164 // Generic vector class. This will be used for
 5165 // all vector operands, including NEON and SVE.
 5166 operand vReg()
 5167 %{
 5168   constraint(ALLOC_IN_RC(dynamic));
 5169   match(VecA);
 5170   match(VecD);
 5171   match(VecX);
 5172 
 5173   op_cost(0);
 5174   format %{ %}
 5175   interface(REG_INTER);
 5176 %}
 5177 
 5178 operand vecA()
 5179 %{
 5180   constraint(ALLOC_IN_RC(vectora_reg));
 5181   match(VecA);
 5182 
 5183   op_cost(0);
 5184   format %{ %}
 5185   interface(REG_INTER);
 5186 %}
 5187 
 5188 operand vecD()
 5189 %{
 5190   constraint(ALLOC_IN_RC(vectord_reg));
 5191   match(VecD);
 5192 
 5193   op_cost(0);
 5194   format %{ %}
 5195   interface(REG_INTER);
 5196 %}
 5197 
 5198 operand vecX()
 5199 %{
 5200   constraint(ALLOC_IN_RC(vectorx_reg));
 5201   match(VecX);
 5202 
 5203   op_cost(0);
 5204   format %{ %}
 5205   interface(REG_INTER);
 5206 %}
 5207 
 5208 operand vRegD_V0()
 5209 %{
 5210   constraint(ALLOC_IN_RC(v0_reg));
 5211   match(RegD);
 5212   op_cost(0);
 5213   format %{ %}
 5214   interface(REG_INTER);
 5215 %}
 5216 
 5217 operand vRegD_V1()
 5218 %{
 5219   constraint(ALLOC_IN_RC(v1_reg));
 5220   match(RegD);
 5221   op_cost(0);
 5222   format %{ %}
 5223   interface(REG_INTER);
 5224 %}
 5225 
 5226 operand vRegD_V2()
 5227 %{
 5228   constraint(ALLOC_IN_RC(v2_reg));
 5229   match(RegD);
 5230   op_cost(0);
 5231   format %{ %}
 5232   interface(REG_INTER);
 5233 %}
 5234 
 5235 operand vRegD_V3()
 5236 %{
 5237   constraint(ALLOC_IN_RC(v3_reg));
 5238   match(RegD);
 5239   op_cost(0);
 5240   format %{ %}
 5241   interface(REG_INTER);
 5242 %}
 5243 
 5244 operand vRegD_V4()
 5245 %{
 5246   constraint(ALLOC_IN_RC(v4_reg));
 5247   match(RegD);
 5248   op_cost(0);
 5249   format %{ %}
 5250   interface(REG_INTER);
 5251 %}
 5252 
 5253 operand vRegD_V5()
 5254 %{
 5255   constraint(ALLOC_IN_RC(v5_reg));
 5256   match(RegD);
 5257   op_cost(0);
 5258   format %{ %}
 5259   interface(REG_INTER);
 5260 %}
 5261 
 5262 operand vRegD_V6()
 5263 %{
 5264   constraint(ALLOC_IN_RC(v6_reg));
 5265   match(RegD);
 5266   op_cost(0);
 5267   format %{ %}
 5268   interface(REG_INTER);
 5269 %}
 5270 
 5271 operand vRegD_V7()
 5272 %{
 5273   constraint(ALLOC_IN_RC(v7_reg));
 5274   match(RegD);
 5275   op_cost(0);
 5276   format %{ %}
 5277   interface(REG_INTER);
 5278 %}
 5279 
 5280 operand vRegD_V8()
 5281 %{
 5282   constraint(ALLOC_IN_RC(v8_reg));
 5283   match(RegD);
 5284   op_cost(0);
 5285   format %{ %}
 5286   interface(REG_INTER);
 5287 %}
 5288 
 5289 operand vRegD_V9()
 5290 %{
 5291   constraint(ALLOC_IN_RC(v9_reg));
 5292   match(RegD);
 5293   op_cost(0);
 5294   format %{ %}
 5295   interface(REG_INTER);
 5296 %}
 5297 
 5298 operand vRegD_V10()
 5299 %{
 5300   constraint(ALLOC_IN_RC(v10_reg));
 5301   match(RegD);
 5302   op_cost(0);
 5303   format %{ %}
 5304   interface(REG_INTER);
 5305 %}
 5306 
 5307 operand vRegD_V11()
 5308 %{
 5309   constraint(ALLOC_IN_RC(v11_reg));
 5310   match(RegD);
 5311   op_cost(0);
 5312   format %{ %}
 5313   interface(REG_INTER);
 5314 %}
 5315 
 5316 operand vRegD_V12()
 5317 %{
 5318   constraint(ALLOC_IN_RC(v12_reg));
 5319   match(RegD);
 5320   op_cost(0);
 5321   format %{ %}
 5322   interface(REG_INTER);
 5323 %}
 5324 
 5325 operand vRegD_V13()
 5326 %{
 5327   constraint(ALLOC_IN_RC(v13_reg));
 5328   match(RegD);
 5329   op_cost(0);
 5330   format %{ %}
 5331   interface(REG_INTER);
 5332 %}
 5333 
 5334 operand vRegD_V14()
 5335 %{
 5336   constraint(ALLOC_IN_RC(v14_reg));
 5337   match(RegD);
 5338   op_cost(0);
 5339   format %{ %}
 5340   interface(REG_INTER);
 5341 %}
 5342 
 5343 operand vRegD_V15()
 5344 %{
 5345   constraint(ALLOC_IN_RC(v15_reg));
 5346   match(RegD);
 5347   op_cost(0);
 5348   format %{ %}
 5349   interface(REG_INTER);
 5350 %}
 5351 
 5352 operand vRegD_V16()
 5353 %{
 5354   constraint(ALLOC_IN_RC(v16_reg));
 5355   match(RegD);
 5356   op_cost(0);
 5357   format %{ %}
 5358   interface(REG_INTER);
 5359 %}
 5360 
 5361 operand vRegD_V17()
 5362 %{
 5363   constraint(ALLOC_IN_RC(v17_reg));
 5364   match(RegD);
 5365   op_cost(0);
 5366   format %{ %}
 5367   interface(REG_INTER);
 5368 %}
 5369 
 5370 operand vRegD_V18()
 5371 %{
 5372   constraint(ALLOC_IN_RC(v18_reg));
 5373   match(RegD);
 5374   op_cost(0);
 5375   format %{ %}
 5376   interface(REG_INTER);
 5377 %}
 5378 
 5379 operand vRegD_V19()
 5380 %{
 5381   constraint(ALLOC_IN_RC(v19_reg));
 5382   match(RegD);
 5383   op_cost(0);
 5384   format %{ %}
 5385   interface(REG_INTER);
 5386 %}
 5387 
 5388 operand vRegD_V20()
 5389 %{
 5390   constraint(ALLOC_IN_RC(v20_reg));
 5391   match(RegD);
 5392   op_cost(0);
 5393   format %{ %}
 5394   interface(REG_INTER);
 5395 %}
 5396 
 5397 operand vRegD_V21()
 5398 %{
 5399   constraint(ALLOC_IN_RC(v21_reg));
 5400   match(RegD);
 5401   op_cost(0);
 5402   format %{ %}
 5403   interface(REG_INTER);
 5404 %}
 5405 
 5406 operand vRegD_V22()
 5407 %{
 5408   constraint(ALLOC_IN_RC(v22_reg));
 5409   match(RegD);
 5410   op_cost(0);
 5411   format %{ %}
 5412   interface(REG_INTER);
 5413 %}
 5414 
 5415 operand vRegD_V23()
 5416 %{
 5417   constraint(ALLOC_IN_RC(v23_reg));
 5418   match(RegD);
 5419   op_cost(0);
 5420   format %{ %}
 5421   interface(REG_INTER);
 5422 %}
 5423 
 5424 operand vRegD_V24()
 5425 %{
 5426   constraint(ALLOC_IN_RC(v24_reg));
 5427   match(RegD);
 5428   op_cost(0);
 5429   format %{ %}
 5430   interface(REG_INTER);
 5431 %}
 5432 
 5433 operand vRegD_V25()
 5434 %{
 5435   constraint(ALLOC_IN_RC(v25_reg));
 5436   match(RegD);
 5437   op_cost(0);
 5438   format %{ %}
 5439   interface(REG_INTER);
 5440 %}
 5441 
 5442 operand vRegD_V26()
 5443 %{
 5444   constraint(ALLOC_IN_RC(v26_reg));
 5445   match(RegD);
 5446   op_cost(0);
 5447   format %{ %}
 5448   interface(REG_INTER);
 5449 %}
 5450 
 5451 operand vRegD_V27()
 5452 %{
 5453   constraint(ALLOC_IN_RC(v27_reg));
 5454   match(RegD);
 5455   op_cost(0);
 5456   format %{ %}
 5457   interface(REG_INTER);
 5458 %}
 5459 
 5460 operand vRegD_V28()
 5461 %{
 5462   constraint(ALLOC_IN_RC(v28_reg));
 5463   match(RegD);
 5464   op_cost(0);
 5465   format %{ %}
 5466   interface(REG_INTER);
 5467 %}
 5468 
 5469 operand vRegD_V29()
 5470 %{
 5471   constraint(ALLOC_IN_RC(v29_reg));
 5472   match(RegD);
 5473   op_cost(0);
 5474   format %{ %}
 5475   interface(REG_INTER);
 5476 %}
 5477 
 5478 operand vRegD_V30()
 5479 %{
 5480   constraint(ALLOC_IN_RC(v30_reg));
 5481   match(RegD);
 5482   op_cost(0);
 5483   format %{ %}
 5484   interface(REG_INTER);
 5485 %}
 5486 
 5487 operand vRegD_V31()
 5488 %{
 5489   constraint(ALLOC_IN_RC(v31_reg));
 5490   match(RegD);
 5491   op_cost(0);
 5492   format %{ %}
 5493   interface(REG_INTER);
 5494 %}
 5495 
 5496 operand pReg()
 5497 %{
 5498   constraint(ALLOC_IN_RC(pr_reg));
 5499   match(RegVectMask);
 5500   match(pRegGov);
 5501   op_cost(0);
 5502   format %{ %}
 5503   interface(REG_INTER);
 5504 %}
 5505 
 5506 operand pRegGov()
 5507 %{
 5508   constraint(ALLOC_IN_RC(gov_pr));
 5509   match(RegVectMask);
 5510   match(pReg);
 5511   op_cost(0);
 5512   format %{ %}
 5513   interface(REG_INTER);
 5514 %}
 5515 
 5516 operand pRegGov_P0()
 5517 %{
 5518   constraint(ALLOC_IN_RC(p0_reg));
 5519   match(RegVectMask);
 5520   op_cost(0);
 5521   format %{ %}
 5522   interface(REG_INTER);
 5523 %}
 5524 
 5525 operand pRegGov_P1()
 5526 %{
 5527   constraint(ALLOC_IN_RC(p1_reg));
 5528   match(RegVectMask);
 5529   op_cost(0);
 5530   format %{ %}
 5531   interface(REG_INTER);
 5532 %}
 5533 
 5534 // Flags register, used as output of signed compare instructions
 5535 
 5536 // note that on AArch64 we also use this register as the output for
 5537 // for floating point compare instructions (CmpF CmpD). this ensures
 5538 // that ordered inequality tests use GT, GE, LT or LE none of which
 5539 // pass through cases where the result is unordered i.e. one or both
 5540 // inputs to the compare is a NaN. this means that the ideal code can
 5541 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5542 // (where the comparison should always fail). EQ and NE tests are
 5543 // always generated in ideal code so that unordered folds into the NE
 5544 // case, matching the behaviour of AArch64 NE.
 5545 //
 5546 // This differs from x86 where the outputs of FP compares use a
 5547 // special FP flags registers and where compares based on this
 5548 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5549 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5550 // to explicitly handle the unordered case in branches. x86 also has
 5551 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5552 
 5553 operand rFlagsReg()
 5554 %{
 5555   constraint(ALLOC_IN_RC(int_flags));
 5556   match(RegFlags);
 5557 
 5558   op_cost(0);
 5559   format %{ "RFLAGS" %}
 5560   interface(REG_INTER);
 5561 %}
 5562 
 5563 // Flags register, used as output of unsigned compare instructions
 5564 operand rFlagsRegU()
 5565 %{
 5566   constraint(ALLOC_IN_RC(int_flags));
 5567   match(RegFlags);
 5568 
 5569   op_cost(0);
 5570   format %{ "RFLAGSU" %}
 5571   interface(REG_INTER);
 5572 %}
 5573 
 5574 // Special Registers
 5575 
 5576 // Method Register
 5577 operand inline_cache_RegP(iRegP reg)
 5578 %{
 5579   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5580   match(reg);
 5581   match(iRegPNoSp);
 5582   op_cost(0);
 5583   format %{ %}
 5584   interface(REG_INTER);
 5585 %}
 5586 
 5587 // Thread Register
 5588 operand thread_RegP(iRegP reg)
 5589 %{
 5590   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5591   match(reg);
 5592   op_cost(0);
 5593   format %{ %}
 5594   interface(REG_INTER);
 5595 %}
 5596 
 5597 operand lr_RegP(iRegP reg)
 5598 %{
 5599   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
 5600   match(reg);
 5601   op_cost(0);
 5602   format %{ %}
 5603   interface(REG_INTER);
 5604 %}
 5605 
 5606 //----------Memory Operands----------------------------------------------------
 5607 
 5608 operand indirect(iRegP reg)
 5609 %{
 5610   constraint(ALLOC_IN_RC(ptr_reg));
 5611   match(reg);
 5612   op_cost(0);
 5613   format %{ "[$reg]" %}
 5614   interface(MEMORY_INTER) %{
 5615     base($reg);
 5616     index(0xffffffff);
 5617     scale(0x0);
 5618     disp(0x0);
 5619   %}
 5620 %}
 5621 
 5622 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5623 %{
 5624   constraint(ALLOC_IN_RC(ptr_reg));
 5625   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5626   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5627   op_cost(0);
 5628   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5629   interface(MEMORY_INTER) %{
 5630     base($reg);
 5631     index($ireg);
 5632     scale($scale);
 5633     disp(0x0);
 5634   %}
 5635 %}
 5636 
 5637 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5638 %{
 5639   constraint(ALLOC_IN_RC(ptr_reg));
 5640   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5641   match(AddP reg (LShiftL lreg scale));
 5642   op_cost(0);
 5643   format %{ "$reg, $lreg lsl($scale)" %}
 5644   interface(MEMORY_INTER) %{
 5645     base($reg);
 5646     index($lreg);
 5647     scale($scale);
 5648     disp(0x0);
 5649   %}
 5650 %}
 5651 
 5652 operand indIndexI2L(iRegP reg, iRegI ireg)
 5653 %{
 5654   constraint(ALLOC_IN_RC(ptr_reg));
 5655   match(AddP reg (ConvI2L ireg));
 5656   op_cost(0);
 5657   format %{ "$reg, $ireg, 0, I2L" %}
 5658   interface(MEMORY_INTER) %{
 5659     base($reg);
 5660     index($ireg);
 5661     scale(0x0);
 5662     disp(0x0);
 5663   %}
 5664 %}
 5665 
 5666 operand indIndex(iRegP reg, iRegL lreg)
 5667 %{
 5668   constraint(ALLOC_IN_RC(ptr_reg));
 5669   match(AddP reg lreg);
 5670   op_cost(0);
 5671   format %{ "$reg, $lreg" %}
 5672   interface(MEMORY_INTER) %{
 5673     base($reg);
 5674     index($lreg);
 5675     scale(0x0);
 5676     disp(0x0);
 5677   %}
 5678 %}
 5679 
 5680 operand indOffI(iRegP reg, immIOffset off)
 5681 %{
 5682   constraint(ALLOC_IN_RC(ptr_reg));
 5683   match(AddP reg off);
 5684   op_cost(0);
 5685   format %{ "[$reg, $off]" %}
 5686   interface(MEMORY_INTER) %{
 5687     base($reg);
 5688     index(0xffffffff);
 5689     scale(0x0);
 5690     disp($off);
 5691   %}
 5692 %}
 5693 
 5694 operand indOffI1(iRegP reg, immIOffset1 off)
 5695 %{
 5696   constraint(ALLOC_IN_RC(ptr_reg));
 5697   match(AddP reg off);
 5698   op_cost(0);
 5699   format %{ "[$reg, $off]" %}
 5700   interface(MEMORY_INTER) %{
 5701     base($reg);
 5702     index(0xffffffff);
 5703     scale(0x0);
 5704     disp($off);
 5705   %}
 5706 %}
 5707 
 5708 operand indOffI2(iRegP reg, immIOffset2 off)
 5709 %{
 5710   constraint(ALLOC_IN_RC(ptr_reg));
 5711   match(AddP reg off);
 5712   op_cost(0);
 5713   format %{ "[$reg, $off]" %}
 5714   interface(MEMORY_INTER) %{
 5715     base($reg);
 5716     index(0xffffffff);
 5717     scale(0x0);
 5718     disp($off);
 5719   %}
 5720 %}
 5721 
 5722 operand indOffI4(iRegP reg, immIOffset4 off)
 5723 %{
 5724   constraint(ALLOC_IN_RC(ptr_reg));
 5725   match(AddP reg off);
 5726   op_cost(0);
 5727   format %{ "[$reg, $off]" %}
 5728   interface(MEMORY_INTER) %{
 5729     base($reg);
 5730     index(0xffffffff);
 5731     scale(0x0);
 5732     disp($off);
 5733   %}
 5734 %}
 5735 
 5736 operand indOffI8(iRegP reg, immIOffset8 off)
 5737 %{
 5738   constraint(ALLOC_IN_RC(ptr_reg));
 5739   match(AddP reg off);
 5740   op_cost(0);
 5741   format %{ "[$reg, $off]" %}
 5742   interface(MEMORY_INTER) %{
 5743     base($reg);
 5744     index(0xffffffff);
 5745     scale(0x0);
 5746     disp($off);
 5747   %}
 5748 %}
 5749 
 5750 operand indOffI16(iRegP reg, immIOffset16 off)
 5751 %{
 5752   constraint(ALLOC_IN_RC(ptr_reg));
 5753   match(AddP reg off);
 5754   op_cost(0);
 5755   format %{ "[$reg, $off]" %}
 5756   interface(MEMORY_INTER) %{
 5757     base($reg);
 5758     index(0xffffffff);
 5759     scale(0x0);
 5760     disp($off);
 5761   %}
 5762 %}
 5763 
 5764 operand indOffL(iRegP reg, immLoffset off)
 5765 %{
 5766   constraint(ALLOC_IN_RC(ptr_reg));
 5767   match(AddP reg off);
 5768   op_cost(0);
 5769   format %{ "[$reg, $off]" %}
 5770   interface(MEMORY_INTER) %{
 5771     base($reg);
 5772     index(0xffffffff);
 5773     scale(0x0);
 5774     disp($off);
 5775   %}
 5776 %}
 5777 
 5778 operand indOffL1(iRegP reg, immLoffset1 off)
 5779 %{
 5780   constraint(ALLOC_IN_RC(ptr_reg));
 5781   match(AddP reg off);
 5782   op_cost(0);
 5783   format %{ "[$reg, $off]" %}
 5784   interface(MEMORY_INTER) %{
 5785     base($reg);
 5786     index(0xffffffff);
 5787     scale(0x0);
 5788     disp($off);
 5789   %}
 5790 %}
 5791 
 5792 operand indOffL2(iRegP reg, immLoffset2 off)
 5793 %{
 5794   constraint(ALLOC_IN_RC(ptr_reg));
 5795   match(AddP reg off);
 5796   op_cost(0);
 5797   format %{ "[$reg, $off]" %}
 5798   interface(MEMORY_INTER) %{
 5799     base($reg);
 5800     index(0xffffffff);
 5801     scale(0x0);
 5802     disp($off);
 5803   %}
 5804 %}
 5805 
 5806 operand indOffL4(iRegP reg, immLoffset4 off)
 5807 %{
 5808   constraint(ALLOC_IN_RC(ptr_reg));
 5809   match(AddP reg off);
 5810   op_cost(0);
 5811   format %{ "[$reg, $off]" %}
 5812   interface(MEMORY_INTER) %{
 5813     base($reg);
 5814     index(0xffffffff);
 5815     scale(0x0);
 5816     disp($off);
 5817   %}
 5818 %}
 5819 
 5820 operand indOffL8(iRegP reg, immLoffset8 off)
 5821 %{
 5822   constraint(ALLOC_IN_RC(ptr_reg));
 5823   match(AddP reg off);
 5824   op_cost(0);
 5825   format %{ "[$reg, $off]" %}
 5826   interface(MEMORY_INTER) %{
 5827     base($reg);
 5828     index(0xffffffff);
 5829     scale(0x0);
 5830     disp($off);
 5831   %}
 5832 %}
 5833 
 5834 operand indOffL16(iRegP reg, immLoffset16 off)
 5835 %{
 5836   constraint(ALLOC_IN_RC(ptr_reg));
 5837   match(AddP reg off);
 5838   op_cost(0);
 5839   format %{ "[$reg, $off]" %}
 5840   interface(MEMORY_INTER) %{
 5841     base($reg);
 5842     index(0xffffffff);
 5843     scale(0x0);
 5844     disp($off);
 5845   %}
 5846 %}
 5847 
 5848 operand indirectN(iRegN reg)
 5849 %{
 5850   predicate(CompressedOops::shift() == 0);
 5851   constraint(ALLOC_IN_RC(ptr_reg));
 5852   match(DecodeN reg);
 5853   op_cost(0);
 5854   format %{ "[$reg]\t# narrow" %}
 5855   interface(MEMORY_INTER) %{
 5856     base($reg);
 5857     index(0xffffffff);
 5858     scale(0x0);
 5859     disp(0x0);
 5860   %}
 5861 %}
 5862 
 5863 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5864 %{
 5865   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5866   constraint(ALLOC_IN_RC(ptr_reg));
 5867   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5868   op_cost(0);
 5869   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5870   interface(MEMORY_INTER) %{
 5871     base($reg);
 5872     index($ireg);
 5873     scale($scale);
 5874     disp(0x0);
 5875   %}
 5876 %}
 5877 
 5878 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5879 %{
 5880   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5881   constraint(ALLOC_IN_RC(ptr_reg));
 5882   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5883   op_cost(0);
 5884   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5885   interface(MEMORY_INTER) %{
 5886     base($reg);
 5887     index($lreg);
 5888     scale($scale);
 5889     disp(0x0);
 5890   %}
 5891 %}
 5892 
 5893 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5894 %{
 5895   predicate(CompressedOops::shift() == 0);
 5896   constraint(ALLOC_IN_RC(ptr_reg));
 5897   match(AddP (DecodeN reg) (ConvI2L ireg));
 5898   op_cost(0);
 5899   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5900   interface(MEMORY_INTER) %{
 5901     base($reg);
 5902     index($ireg);
 5903     scale(0x0);
 5904     disp(0x0);
 5905   %}
 5906 %}
 5907 
 5908 operand indIndexN(iRegN reg, iRegL lreg)
 5909 %{
 5910   predicate(CompressedOops::shift() == 0);
 5911   constraint(ALLOC_IN_RC(ptr_reg));
 5912   match(AddP (DecodeN reg) lreg);
 5913   op_cost(0);
 5914   format %{ "$reg, $lreg\t# narrow" %}
 5915   interface(MEMORY_INTER) %{
 5916     base($reg);
 5917     index($lreg);
 5918     scale(0x0);
 5919     disp(0x0);
 5920   %}
 5921 %}
 5922 
 5923 operand indOffIN(iRegN reg, immIOffset off)
 5924 %{
 5925   predicate(CompressedOops::shift() == 0);
 5926   constraint(ALLOC_IN_RC(ptr_reg));
 5927   match(AddP (DecodeN reg) off);
 5928   op_cost(0);
 5929   format %{ "[$reg, $off]\t# narrow" %}
 5930   interface(MEMORY_INTER) %{
 5931     base($reg);
 5932     index(0xffffffff);
 5933     scale(0x0);
 5934     disp($off);
 5935   %}
 5936 %}
 5937 
 5938 operand indOffLN(iRegN reg, immLoffset off)
 5939 %{
 5940   predicate(CompressedOops::shift() == 0);
 5941   constraint(ALLOC_IN_RC(ptr_reg));
 5942   match(AddP (DecodeN reg) off);
 5943   op_cost(0);
 5944   format %{ "[$reg, $off]\t# narrow" %}
 5945   interface(MEMORY_INTER) %{
 5946     base($reg);
 5947     index(0xffffffff);
 5948     scale(0x0);
 5949     disp($off);
 5950   %}
 5951 %}
 5952 
 5953 
 5954 
 5955 // AArch64 opto stubs need to write to the pc slot in the thread anchor
 5956 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
 5957 %{
 5958   constraint(ALLOC_IN_RC(ptr_reg));
 5959   match(AddP reg off);
 5960   op_cost(0);
 5961   format %{ "[$reg, $off]" %}
 5962   interface(MEMORY_INTER) %{
 5963     base($reg);
 5964     index(0xffffffff);
 5965     scale(0x0);
 5966     disp($off);
 5967   %}
 5968 %}
 5969 
 5970 //----------Special Memory Operands--------------------------------------------
 5971 // Stack Slot Operand - This operand is used for loading and storing temporary
 5972 //                      values on the stack where a match requires a value to
 5973 //                      flow through memory.
 5974 operand stackSlotP(sRegP reg)
 5975 %{
 5976   constraint(ALLOC_IN_RC(stack_slots));
 5977   op_cost(100);
 5978   // No match rule because this operand is only generated in matching
 5979   // match(RegP);
 5980   format %{ "[$reg]" %}
 5981   interface(MEMORY_INTER) %{
 5982     base(0x1e);  // RSP
 5983     index(0x0);  // No Index
 5984     scale(0x0);  // No Scale
 5985     disp($reg);  // Stack Offset
 5986   %}
 5987 %}
 5988 
 5989 operand stackSlotI(sRegI reg)
 5990 %{
 5991   constraint(ALLOC_IN_RC(stack_slots));
 5992   // No match rule because this operand is only generated in matching
 5993   // match(RegI);
 5994   format %{ "[$reg]" %}
 5995   interface(MEMORY_INTER) %{
 5996     base(0x1e);  // RSP
 5997     index(0x0);  // No Index
 5998     scale(0x0);  // No Scale
 5999     disp($reg);  // Stack Offset
 6000   %}
 6001 %}
 6002 
 6003 operand stackSlotF(sRegF reg)
 6004 %{
 6005   constraint(ALLOC_IN_RC(stack_slots));
 6006   // No match rule because this operand is only generated in matching
 6007   // match(RegF);
 6008   format %{ "[$reg]" %}
 6009   interface(MEMORY_INTER) %{
 6010     base(0x1e);  // RSP
 6011     index(0x0);  // No Index
 6012     scale(0x0);  // No Scale
 6013     disp($reg);  // Stack Offset
 6014   %}
 6015 %}
 6016 
 6017 operand stackSlotD(sRegD reg)
 6018 %{
 6019   constraint(ALLOC_IN_RC(stack_slots));
 6020   // No match rule because this operand is only generated in matching
 6021   // match(RegD);
 6022   format %{ "[$reg]" %}
 6023   interface(MEMORY_INTER) %{
 6024     base(0x1e);  // RSP
 6025     index(0x0);  // No Index
 6026     scale(0x0);  // No Scale
 6027     disp($reg);  // Stack Offset
 6028   %}
 6029 %}
 6030 
 6031 operand stackSlotL(sRegL reg)
 6032 %{
 6033   constraint(ALLOC_IN_RC(stack_slots));
 6034   // No match rule because this operand is only generated in matching
 6035   // match(RegL);
 6036   format %{ "[$reg]" %}
 6037   interface(MEMORY_INTER) %{
 6038     base(0x1e);  // RSP
 6039     index(0x0);  // No Index
 6040     scale(0x0);  // No Scale
 6041     disp($reg);  // Stack Offset
 6042   %}
 6043 %}
 6044 
 6045 // Operands for expressing Control Flow
 6046 // NOTE: Label is a predefined operand which should not be redefined in
 6047 //       the AD file. It is generically handled within the ADLC.
 6048 
 6049 //----------Conditional Branch Operands----------------------------------------
 6050 // Comparison Op  - This is the operation of the comparison, and is limited to
 6051 //                  the following set of codes:
 6052 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 6053 //
 6054 // Other attributes of the comparison, such as unsignedness, are specified
 6055 // by the comparison instruction that sets a condition code flags register.
 6056 // That result is represented by a flags operand whose subtype is appropriate
 6057 // to the unsignedness (etc.) of the comparison.
 6058 //
 6059 // Later, the instruction which matches both the Comparison Op (a Bool) and
 6060 // the flags (produced by the Cmp) specifies the coding of the comparison op
 6061 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 6062 
 6063 // used for signed integral comparisons and fp comparisons
 6064 
 6065 operand cmpOp()
 6066 %{
 6067   match(Bool);
 6068 
 6069   format %{ "" %}
 6070   interface(COND_INTER) %{
 6071     equal(0x0, "eq");
 6072     not_equal(0x1, "ne");
 6073     less(0xb, "lt");
 6074     greater_equal(0xa, "ge");
 6075     less_equal(0xd, "le");
 6076     greater(0xc, "gt");
 6077     overflow(0x6, "vs");
 6078     no_overflow(0x7, "vc");
 6079   %}
 6080 %}
 6081 
 6082 // used for unsigned integral comparisons
 6083 
 6084 operand cmpOpU()
 6085 %{
 6086   match(Bool);
 6087 
 6088   format %{ "" %}
 6089   interface(COND_INTER) %{
 6090     equal(0x0, "eq");
 6091     not_equal(0x1, "ne");
 6092     less(0x3, "lo");
 6093     greater_equal(0x2, "hs");
 6094     less_equal(0x9, "ls");
 6095     greater(0x8, "hi");
 6096     overflow(0x6, "vs");
 6097     no_overflow(0x7, "vc");
 6098   %}
 6099 %}
 6100 
 6101 // used for certain integral comparisons which can be
 6102 // converted to cbxx or tbxx instructions
 6103 
 6104 operand cmpOpEqNe()
 6105 %{
 6106   match(Bool);
 6107   op_cost(0);
 6108   predicate(n->as_Bool()->_test._test == BoolTest::ne
 6109             || n->as_Bool()->_test._test == BoolTest::eq);
 6110 
 6111   format %{ "" %}
 6112   interface(COND_INTER) %{
 6113     equal(0x0, "eq");
 6114     not_equal(0x1, "ne");
 6115     less(0xb, "lt");
 6116     greater_equal(0xa, "ge");
 6117     less_equal(0xd, "le");
 6118     greater(0xc, "gt");
 6119     overflow(0x6, "vs");
 6120     no_overflow(0x7, "vc");
 6121   %}
 6122 %}
 6123 
 6124 // used for certain integral comparisons which can be
 6125 // converted to cbxx or tbxx instructions
 6126 
 6127 operand cmpOpLtGe()
 6128 %{
 6129   match(Bool);
 6130   op_cost(0);
 6131 
 6132   predicate(n->as_Bool()->_test._test == BoolTest::lt
 6133             || n->as_Bool()->_test._test == BoolTest::ge);
 6134 
 6135   format %{ "" %}
 6136   interface(COND_INTER) %{
 6137     equal(0x0, "eq");
 6138     not_equal(0x1, "ne");
 6139     less(0xb, "lt");
 6140     greater_equal(0xa, "ge");
 6141     less_equal(0xd, "le");
 6142     greater(0xc, "gt");
 6143     overflow(0x6, "vs");
 6144     no_overflow(0x7, "vc");
 6145   %}
 6146 %}
 6147 
 6148 // used for certain unsigned integral comparisons which can be
 6149 // converted to cbxx or tbxx instructions
 6150 
 6151 operand cmpOpUEqNeLtGe()
 6152 %{
 6153   match(Bool);
 6154   op_cost(0);
 6155 
 6156   predicate(n->as_Bool()->_test._test == BoolTest::eq
 6157             || n->as_Bool()->_test._test == BoolTest::ne
 6158             || n->as_Bool()->_test._test == BoolTest::lt
 6159             || n->as_Bool()->_test._test == BoolTest::ge);
 6160 
 6161   format %{ "" %}
 6162   interface(COND_INTER) %{
 6163     equal(0x0, "eq");
 6164     not_equal(0x1, "ne");
 6165     less(0xb, "lt");
 6166     greater_equal(0xa, "ge");
 6167     less_equal(0xd, "le");
 6168     greater(0xc, "gt");
 6169     overflow(0x6, "vs");
 6170     no_overflow(0x7, "vc");
 6171   %}
 6172 %}
 6173 
 6174 // Special operand allowing long args to int ops to be truncated for free
 6175 
 6176 operand iRegL2I(iRegL reg) %{
 6177 
 6178   op_cost(0);
 6179 
 6180   match(ConvL2I reg);
 6181 
 6182   format %{ "l2i($reg)" %}
 6183 
 6184   interface(REG_INTER)
 6185 %}
 6186 
 6187 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 6188 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 6189 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 6190 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 6191 
 6192 //----------OPERAND CLASSES----------------------------------------------------
 6193 // Operand Classes are groups of operands that are used as to simplify
 6194 // instruction definitions by not requiring the AD writer to specify
 6195 // separate instructions for every form of operand when the
 6196 // instruction accepts multiple operand types with the same basic
 6197 // encoding and format. The classic case of this is memory operands.
 6198 
 6199 // memory is used to define read/write location for load/store
 6200 // instruction defs. we can turn a memory op into an Address
 6201 
 6202 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 6203                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6204 
 6205 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 6206                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6207 
 6208 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 6209                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6210 
 6211 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 6212                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6213 
 6214 // All of the memory operands. For the pipeline description.
 6215 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 6216                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 6217                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6218 
 6219 
 6220 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 6221 // operations. it allows the src to be either an iRegI or a (ConvL2I
 6222 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 6223 // can be elided because the 32-bit instruction will just employ the
 6224 // lower 32 bits anyway.
 6225 //
 6226 // n.b. this does not elide all L2I conversions. if the truncated
 6227 // value is consumed by more than one operation then the ConvL2I
 6228 // cannot be bundled into the consuming nodes so an l2i gets planted
 6229 // (actually a movw $dst $src) and the downstream instructions consume
 6230 // the result of the l2i as an iRegI input. That's a shame since the
 6231 // movw is actually redundant but its not too costly.
 6232 
 6233 opclass iRegIorL2I(iRegI, iRegL2I);
 6234 
 6235 //----------PIPELINE-----------------------------------------------------------
 6236 // Rules which define the behavior of the target architectures pipeline.
 6237 
 6238 // For specific pipelines, eg A53, define the stages of that pipeline
 6239 //pipe_desc(ISS, EX1, EX2, WR);
 6240 #define ISS S0
 6241 #define EX1 S1
 6242 #define EX2 S2
 6243 #define WR  S3
 6244 
 6245 // Integer ALU reg operation
 6246 pipeline %{
 6247 
 6248 attributes %{
 6249   // ARM instructions are of fixed length
 6250   fixed_size_instructions;        // Fixed size instructions TODO does
 6251   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 6252   // ARM instructions come in 32-bit word units
 6253   instruction_unit_size = 4;         // An instruction is 4 bytes long
 6254   instruction_fetch_unit_size = 64;  // The processor fetches one line
 6255   instruction_fetch_units = 1;       // of 64 bytes
 6256 
 6257   // List of nop instructions
 6258   nops( MachNop );
 6259 %}
 6260 
 6261 // We don't use an actual pipeline model so don't care about resources
 6262 // or description. we do use pipeline classes to introduce fixed
 6263 // latencies
 6264 
 6265 //----------RESOURCES----------------------------------------------------------
 6266 // Resources are the functional units available to the machine
 6267 
 6268 resources( INS0, INS1, INS01 = INS0 | INS1,
 6269            ALU0, ALU1, ALU = ALU0 | ALU1,
 6270            MAC,
 6271            DIV,
 6272            BRANCH,
 6273            LDST,
 6274            NEON_FP);
 6275 
 6276 //----------PIPELINE DESCRIPTION-----------------------------------------------
 6277 // Pipeline Description specifies the stages in the machine's pipeline
 6278 
 6279 // Define the pipeline as a generic 6 stage pipeline
 6280 pipe_desc(S0, S1, S2, S3, S4, S5);
 6281 
 6282 //----------PIPELINE CLASSES---------------------------------------------------
 6283 // Pipeline Classes describe the stages in which input and output are
 6284 // referenced by the hardware pipeline.
 6285 
 6286 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 6287 %{
 6288   single_instruction;
 6289   src1   : S1(read);
 6290   src2   : S2(read);
 6291   dst    : S5(write);
 6292   INS01  : ISS;
 6293   NEON_FP : S5;
 6294 %}
 6295 
 6296 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6297 %{
 6298   single_instruction;
 6299   src1   : S1(read);
 6300   src2   : S2(read);
 6301   dst    : S5(write);
 6302   INS01  : ISS;
 6303   NEON_FP : S5;
 6304 %}
 6305 
 6306 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6307 %{
 6308   single_instruction;
 6309   src    : S1(read);
 6310   dst    : S5(write);
 6311   INS01  : ISS;
 6312   NEON_FP : S5;
 6313 %}
 6314 
 6315 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6316 %{
 6317   single_instruction;
 6318   src    : S1(read);
 6319   dst    : S5(write);
 6320   INS01  : ISS;
 6321   NEON_FP : S5;
 6322 %}
 6323 
 6324 pipe_class fp_d2f(vRegF dst, vRegD src)
 6325 %{
 6326   single_instruction;
 6327   src    : S1(read);
 6328   dst    : S5(write);
 6329   INS01  : ISS;
 6330   NEON_FP : S5;
 6331 %}
 6332 
 6333 pipe_class fp_f2d(vRegD dst, vRegF src)
 6334 %{
 6335   single_instruction;
 6336   src    : S1(read);
 6337   dst    : S5(write);
 6338   INS01  : ISS;
 6339   NEON_FP : S5;
 6340 %}
 6341 
 6342 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6343 %{
 6344   single_instruction;
 6345   src    : S1(read);
 6346   dst    : S5(write);
 6347   INS01  : ISS;
 6348   NEON_FP : S5;
 6349 %}
 6350 
 6351 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6352 %{
 6353   single_instruction;
 6354   src    : S1(read);
 6355   dst    : S5(write);
 6356   INS01  : ISS;
 6357   NEON_FP : S5;
 6358 %}
 6359 
 6360 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6361 %{
 6362   single_instruction;
 6363   src    : S1(read);
 6364   dst    : S5(write);
 6365   INS01  : ISS;
 6366   NEON_FP : S5;
 6367 %}
 6368 
 6369 pipe_class fp_l2f(vRegF dst, iRegL src)
 6370 %{
 6371   single_instruction;
 6372   src    : S1(read);
 6373   dst    : S5(write);
 6374   INS01  : ISS;
 6375   NEON_FP : S5;
 6376 %}
 6377 
 6378 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6379 %{
 6380   single_instruction;
 6381   src    : S1(read);
 6382   dst    : S5(write);
 6383   INS01  : ISS;
 6384   NEON_FP : S5;
 6385 %}
 6386 
 6387 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6388 %{
 6389   single_instruction;
 6390   src    : S1(read);
 6391   dst    : S5(write);
 6392   INS01  : ISS;
 6393   NEON_FP : S5;
 6394 %}
 6395 
 6396 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6397 %{
 6398   single_instruction;
 6399   src    : S1(read);
 6400   dst    : S5(write);
 6401   INS01  : ISS;
 6402   NEON_FP : S5;
 6403 %}
 6404 
 6405 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6406 %{
 6407   single_instruction;
 6408   src    : S1(read);
 6409   dst    : S5(write);
 6410   INS01  : ISS;
 6411   NEON_FP : S5;
 6412 %}
 6413 
 6414 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6415 %{
 6416   single_instruction;
 6417   src1   : S1(read);
 6418   src2   : S2(read);
 6419   dst    : S5(write);
 6420   INS0   : ISS;
 6421   NEON_FP : S5;
 6422 %}
 6423 
 6424 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6425 %{
 6426   single_instruction;
 6427   src1   : S1(read);
 6428   src2   : S2(read);
 6429   dst    : S5(write);
 6430   INS0   : ISS;
 6431   NEON_FP : S5;
 6432 %}
 6433 
 6434 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6435 %{
 6436   single_instruction;
 6437   cr     : S1(read);
 6438   src1   : S1(read);
 6439   src2   : S1(read);
 6440   dst    : S3(write);
 6441   INS01  : ISS;
 6442   NEON_FP : S3;
 6443 %}
 6444 
 6445 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6446 %{
 6447   single_instruction;
 6448   cr     : S1(read);
 6449   src1   : S1(read);
 6450   src2   : S1(read);
 6451   dst    : S3(write);
 6452   INS01  : ISS;
 6453   NEON_FP : S3;
 6454 %}
 6455 
 6456 pipe_class fp_imm_s(vRegF dst)
 6457 %{
 6458   single_instruction;
 6459   dst    : S3(write);
 6460   INS01  : ISS;
 6461   NEON_FP : S3;
 6462 %}
 6463 
 6464 pipe_class fp_imm_d(vRegD dst)
 6465 %{
 6466   single_instruction;
 6467   dst    : S3(write);
 6468   INS01  : ISS;
 6469   NEON_FP : S3;
 6470 %}
 6471 
 6472 pipe_class fp_load_constant_s(vRegF dst)
 6473 %{
 6474   single_instruction;
 6475   dst    : S4(write);
 6476   INS01  : ISS;
 6477   NEON_FP : S4;
 6478 %}
 6479 
 6480 pipe_class fp_load_constant_d(vRegD dst)
 6481 %{
 6482   single_instruction;
 6483   dst    : S4(write);
 6484   INS01  : ISS;
 6485   NEON_FP : S4;
 6486 %}
 6487 
 6488 //------- Integer ALU operations --------------------------
 6489 
 6490 // Integer ALU reg-reg operation
 6491 // Operands needed in EX1, result generated in EX2
 6492 // Eg.  ADD     x0, x1, x2
 6493 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6494 %{
 6495   single_instruction;
 6496   dst    : EX2(write);
 6497   src1   : EX1(read);
 6498   src2   : EX1(read);
 6499   INS01  : ISS; // Dual issue as instruction 0 or 1
 6500   ALU    : EX2;
 6501 %}
 6502 
 6503 // Integer ALU reg-reg operation with constant shift
 6504 // Shifted register must be available in LATE_ISS instead of EX1
 6505 // Eg.  ADD     x0, x1, x2, LSL #2
 6506 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6507 %{
 6508   single_instruction;
 6509   dst    : EX2(write);
 6510   src1   : EX1(read);
 6511   src2   : ISS(read);
 6512   INS01  : ISS;
 6513   ALU    : EX2;
 6514 %}
 6515 
 6516 // Integer ALU reg operation with constant shift
 6517 // Eg.  LSL     x0, x1, #shift
 6518 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6519 %{
 6520   single_instruction;
 6521   dst    : EX2(write);
 6522   src1   : ISS(read);
 6523   INS01  : ISS;
 6524   ALU    : EX2;
 6525 %}
 6526 
 6527 // Integer ALU reg-reg operation with variable shift
 6528 // Both operands must be available in LATE_ISS instead of EX1
 6529 // Result is available in EX1 instead of EX2
 6530 // Eg.  LSLV    x0, x1, x2
 6531 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6532 %{
 6533   single_instruction;
 6534   dst    : EX1(write);
 6535   src1   : ISS(read);
 6536   src2   : ISS(read);
 6537   INS01  : ISS;
 6538   ALU    : EX1;
 6539 %}
 6540 
 6541 // Integer ALU reg-reg operation with extract
 6542 // As for _vshift above, but result generated in EX2
 6543 // Eg.  EXTR    x0, x1, x2, #N
 6544 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6545 %{
 6546   single_instruction;
 6547   dst    : EX2(write);
 6548   src1   : ISS(read);
 6549   src2   : ISS(read);
 6550   INS1   : ISS; // Can only dual issue as Instruction 1
 6551   ALU    : EX1;
 6552 %}
 6553 
 6554 // Integer ALU reg operation
 6555 // Eg.  NEG     x0, x1
 6556 pipe_class ialu_reg(iRegI dst, iRegI src)
 6557 %{
 6558   single_instruction;
 6559   dst    : EX2(write);
 6560   src    : EX1(read);
 6561   INS01  : ISS;
 6562   ALU    : EX2;
 6563 %}
 6564 
 6565 // Integer ALU reg mmediate operation
 6566 // Eg.  ADD     x0, x1, #N
 6567 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6568 %{
 6569   single_instruction;
 6570   dst    : EX2(write);
 6571   src1   : EX1(read);
 6572   INS01  : ISS;
 6573   ALU    : EX2;
 6574 %}
 6575 
 6576 // Integer ALU immediate operation (no source operands)
 6577 // Eg.  MOV     x0, #N
 6578 pipe_class ialu_imm(iRegI dst)
 6579 %{
 6580   single_instruction;
 6581   dst    : EX1(write);
 6582   INS01  : ISS;
 6583   ALU    : EX1;
 6584 %}
 6585 
 6586 //------- Compare operation -------------------------------
 6587 
 6588 // Compare reg-reg
 6589 // Eg.  CMP     x0, x1
 6590 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6591 %{
 6592   single_instruction;
 6593 //  fixed_latency(16);
 6594   cr     : EX2(write);
 6595   op1    : EX1(read);
 6596   op2    : EX1(read);
 6597   INS01  : ISS;
 6598   ALU    : EX2;
 6599 %}
 6600 
 6601 // Compare reg-reg
 6602 // Eg.  CMP     x0, #N
 6603 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6604 %{
 6605   single_instruction;
 6606 //  fixed_latency(16);
 6607   cr     : EX2(write);
 6608   op1    : EX1(read);
 6609   INS01  : ISS;
 6610   ALU    : EX2;
 6611 %}
 6612 
 6613 //------- Conditional instructions ------------------------
 6614 
 6615 // Conditional no operands
 6616 // Eg.  CSINC   x0, zr, zr, <cond>
 6617 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6618 %{
 6619   single_instruction;
 6620   cr     : EX1(read);
 6621   dst    : EX2(write);
 6622   INS01  : ISS;
 6623   ALU    : EX2;
 6624 %}
 6625 
 6626 // Conditional 2 operand
 6627 // EG.  CSEL    X0, X1, X2, <cond>
 6628 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6629 %{
 6630   single_instruction;
 6631   cr     : EX1(read);
 6632   src1   : EX1(read);
 6633   src2   : EX1(read);
 6634   dst    : EX2(write);
 6635   INS01  : ISS;
 6636   ALU    : EX2;
 6637 %}
 6638 
 6639 // Conditional 2 operand
 6640 // EG.  CSEL    X0, X1, X2, <cond>
 6641 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6642 %{
 6643   single_instruction;
 6644   cr     : EX1(read);
 6645   src    : EX1(read);
 6646   dst    : EX2(write);
 6647   INS01  : ISS;
 6648   ALU    : EX2;
 6649 %}
 6650 
 6651 //------- Multiply pipeline operations --------------------
 6652 
 6653 // Multiply reg-reg
 6654 // Eg.  MUL     w0, w1, w2
 6655 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6656 %{
 6657   single_instruction;
 6658   dst    : WR(write);
 6659   src1   : ISS(read);
 6660   src2   : ISS(read);
 6661   INS01  : ISS;
 6662   MAC    : WR;
 6663 %}
 6664 
 6665 // Multiply accumulate
 6666 // Eg.  MADD    w0, w1, w2, w3
 6667 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6668 %{
 6669   single_instruction;
 6670   dst    : WR(write);
 6671   src1   : ISS(read);
 6672   src2   : ISS(read);
 6673   src3   : ISS(read);
 6674   INS01  : ISS;
 6675   MAC    : WR;
 6676 %}
 6677 
 6678 // Eg.  MUL     w0, w1, w2
 6679 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6680 %{
 6681   single_instruction;
 6682   fixed_latency(3); // Maximum latency for 64 bit mul
 6683   dst    : WR(write);
 6684   src1   : ISS(read);
 6685   src2   : ISS(read);
 6686   INS01  : ISS;
 6687   MAC    : WR;
 6688 %}
 6689 
 6690 // Multiply accumulate
 6691 // Eg.  MADD    w0, w1, w2, w3
 6692 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6693 %{
 6694   single_instruction;
 6695   fixed_latency(3); // Maximum latency for 64 bit mul
 6696   dst    : WR(write);
 6697   src1   : ISS(read);
 6698   src2   : ISS(read);
 6699   src3   : ISS(read);
 6700   INS01  : ISS;
 6701   MAC    : WR;
 6702 %}
 6703 
 6704 //------- Divide pipeline operations --------------------
 6705 
 6706 // Eg.  SDIV    w0, w1, w2
 6707 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6708 %{
 6709   single_instruction;
 6710   fixed_latency(8); // Maximum latency for 32 bit divide
 6711   dst    : WR(write);
 6712   src1   : ISS(read);
 6713   src2   : ISS(read);
 6714   INS0   : ISS; // Can only dual issue as instruction 0
 6715   DIV    : WR;
 6716 %}
 6717 
 6718 // Eg.  SDIV    x0, x1, x2
 6719 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6720 %{
 6721   single_instruction;
 6722   fixed_latency(16); // Maximum latency for 64 bit divide
 6723   dst    : WR(write);
 6724   src1   : ISS(read);
 6725   src2   : ISS(read);
 6726   INS0   : ISS; // Can only dual issue as instruction 0
 6727   DIV    : WR;
 6728 %}
 6729 
 6730 //------- Load pipeline operations ------------------------
 6731 
 6732 // Load - prefetch
 6733 // Eg.  PFRM    <mem>
 6734 pipe_class iload_prefetch(memory mem)
 6735 %{
 6736   single_instruction;
 6737   mem    : ISS(read);
 6738   INS01  : ISS;
 6739   LDST   : WR;
 6740 %}
 6741 
 6742 // Load - reg, mem
 6743 // Eg.  LDR     x0, <mem>
 6744 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6745 %{
 6746   single_instruction;
 6747   dst    : WR(write);
 6748   mem    : ISS(read);
 6749   INS01  : ISS;
 6750   LDST   : WR;
 6751 %}
 6752 
 6753 // Load - reg, reg
 6754 // Eg.  LDR     x0, [sp, x1]
 6755 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6756 %{
 6757   single_instruction;
 6758   dst    : WR(write);
 6759   src    : ISS(read);
 6760   INS01  : ISS;
 6761   LDST   : WR;
 6762 %}
 6763 
 6764 //------- Store pipeline operations -----------------------
 6765 
 6766 // Store - zr, mem
 6767 // Eg.  STR     zr, <mem>
 6768 pipe_class istore_mem(memory mem)
 6769 %{
 6770   single_instruction;
 6771   mem    : ISS(read);
 6772   INS01  : ISS;
 6773   LDST   : WR;
 6774 %}
 6775 
 6776 // Store - reg, mem
 6777 // Eg.  STR     x0, <mem>
 6778 pipe_class istore_reg_mem(iRegI src, memory mem)
 6779 %{
 6780   single_instruction;
 6781   mem    : ISS(read);
 6782   src    : EX2(read);
 6783   INS01  : ISS;
 6784   LDST   : WR;
 6785 %}
 6786 
 6787 // Store - reg, reg
 6788 // Eg. STR      x0, [sp, x1]
 6789 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6790 %{
 6791   single_instruction;
 6792   dst    : ISS(read);
 6793   src    : EX2(read);
 6794   INS01  : ISS;
 6795   LDST   : WR;
 6796 %}
 6797 
 6798 //------- Store pipeline operations -----------------------
 6799 
 6800 // Branch
 6801 pipe_class pipe_branch()
 6802 %{
 6803   single_instruction;
 6804   INS01  : ISS;
 6805   BRANCH : EX1;
 6806 %}
 6807 
 6808 // Conditional branch
 6809 pipe_class pipe_branch_cond(rFlagsReg cr)
 6810 %{
 6811   single_instruction;
 6812   cr     : EX1(read);
 6813   INS01  : ISS;
 6814   BRANCH : EX1;
 6815 %}
 6816 
 6817 // Compare & Branch
 6818 // EG.  CBZ/CBNZ
 6819 pipe_class pipe_cmp_branch(iRegI op1)
 6820 %{
 6821   single_instruction;
 6822   op1    : EX1(read);
 6823   INS01  : ISS;
 6824   BRANCH : EX1;
 6825 %}
 6826 
 6827 //------- Synchronisation operations ----------------------
 6828 
 6829 // Any operation requiring serialization.
 6830 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6831 pipe_class pipe_serial()
 6832 %{
 6833   single_instruction;
 6834   force_serialization;
 6835   fixed_latency(16);
 6836   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6837   LDST   : WR;
 6838 %}
 6839 
 6840 // Generic big/slow expanded idiom - also serialized
 6841 pipe_class pipe_slow()
 6842 %{
 6843   instruction_count(10);
 6844   multiple_bundles;
 6845   force_serialization;
 6846   fixed_latency(16);
 6847   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6848   LDST   : WR;
 6849 %}
 6850 
 6851 // Empty pipeline class
 6852 pipe_class pipe_class_empty()
 6853 %{
 6854   single_instruction;
 6855   fixed_latency(0);
 6856 %}
 6857 
 6858 // Default pipeline class.
 6859 pipe_class pipe_class_default()
 6860 %{
 6861   single_instruction;
 6862   fixed_latency(2);
 6863 %}
 6864 
 6865 // Pipeline class for compares.
 6866 pipe_class pipe_class_compare()
 6867 %{
 6868   single_instruction;
 6869   fixed_latency(16);
 6870 %}
 6871 
 6872 // Pipeline class for memory operations.
 6873 pipe_class pipe_class_memory()
 6874 %{
 6875   single_instruction;
 6876   fixed_latency(16);
 6877 %}
 6878 
 6879 // Pipeline class for call.
 6880 pipe_class pipe_class_call()
 6881 %{
 6882   single_instruction;
 6883   fixed_latency(100);
 6884 %}
 6885 
 6886 // Define the class for the Nop node.
 6887 define %{
 6888    MachNop = pipe_class_empty;
 6889 %}
 6890 
 6891 %}
 6892 //----------INSTRUCTIONS-------------------------------------------------------
 6893 //
 6894 // match      -- States which machine-independent subtree may be replaced
 6895 //               by this instruction.
 6896 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6897 //               selection to identify a minimum cost tree of machine
 6898 //               instructions that matches a tree of machine-independent
 6899 //               instructions.
 6900 // format     -- A string providing the disassembly for this instruction.
 6901 //               The value of an instruction's operand may be inserted
 6902 //               by referring to it with a '$' prefix.
 6903 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6904 //               to within an encode class as $primary, $secondary, and $tertiary
 6905 //               rrspectively.  The primary opcode is commonly used to
 6906 //               indicate the type of machine instruction, while secondary
 6907 //               and tertiary are often used for prefix options or addressing
 6908 //               modes.
 6909 // ins_encode -- A list of encode classes with parameters. The encode class
 6910 //               name must have been defined in an 'enc_class' specification
 6911 //               in the encode section of the architecture description.
 6912 
 6913 // ============================================================================
 6914 // Memory (Load/Store) Instructions
 6915 
 6916 // Load Instructions
 6917 
 6918 // Load Byte (8 bit signed)
 6919 instruct loadB(iRegINoSp dst, memory1 mem)
 6920 %{
 6921   match(Set dst (LoadB mem));
 6922   predicate(!needs_acquiring_load(n));
 6923 
 6924   ins_cost(4 * INSN_COST);
 6925   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6926 
 6927   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6928 
 6929   ins_pipe(iload_reg_mem);
 6930 %}
 6931 
 6932 // Load Byte (8 bit signed) into long
 6933 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6934 %{
 6935   match(Set dst (ConvI2L (LoadB mem)));
 6936   predicate(!needs_acquiring_load(n->in(1)));
 6937 
 6938   ins_cost(4 * INSN_COST);
 6939   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6940 
 6941   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6942 
 6943   ins_pipe(iload_reg_mem);
 6944 %}
 6945 
 6946 // Load Byte (8 bit unsigned)
 6947 instruct loadUB(iRegINoSp dst, memory1 mem)
 6948 %{
 6949   match(Set dst (LoadUB mem));
 6950   predicate(!needs_acquiring_load(n));
 6951 
 6952   ins_cost(4 * INSN_COST);
 6953   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6954 
 6955   ins_encode(aarch64_enc_ldrb(dst, mem));
 6956 
 6957   ins_pipe(iload_reg_mem);
 6958 %}
 6959 
 6960 // Load Byte (8 bit unsigned) into long
 6961 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6962 %{
 6963   match(Set dst (ConvI2L (LoadUB mem)));
 6964   predicate(!needs_acquiring_load(n->in(1)));
 6965 
 6966   ins_cost(4 * INSN_COST);
 6967   format %{ "ldrb  $dst, $mem\t# byte" %}
 6968 
 6969   ins_encode(aarch64_enc_ldrb(dst, mem));
 6970 
 6971   ins_pipe(iload_reg_mem);
 6972 %}
 6973 
 6974 // Load Short (16 bit signed)
 6975 instruct loadS(iRegINoSp dst, memory2 mem)
 6976 %{
 6977   match(Set dst (LoadS mem));
 6978   predicate(!needs_acquiring_load(n));
 6979 
 6980   ins_cost(4 * INSN_COST);
 6981   format %{ "ldrshw  $dst, $mem\t# short" %}
 6982 
 6983   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6984 
 6985   ins_pipe(iload_reg_mem);
 6986 %}
 6987 
 6988 // Load Short (16 bit signed) into long
 6989 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6990 %{
 6991   match(Set dst (ConvI2L (LoadS mem)));
 6992   predicate(!needs_acquiring_load(n->in(1)));
 6993 
 6994   ins_cost(4 * INSN_COST);
 6995   format %{ "ldrsh  $dst, $mem\t# short" %}
 6996 
 6997   ins_encode(aarch64_enc_ldrsh(dst, mem));
 6998 
 6999   ins_pipe(iload_reg_mem);
 7000 %}
 7001 
 7002 // Load Char (16 bit unsigned)
 7003 instruct loadUS(iRegINoSp dst, memory2 mem)
 7004 %{
 7005   match(Set dst (LoadUS mem));
 7006   predicate(!needs_acquiring_load(n));
 7007 
 7008   ins_cost(4 * INSN_COST);
 7009   format %{ "ldrh  $dst, $mem\t# short" %}
 7010 
 7011   ins_encode(aarch64_enc_ldrh(dst, mem));
 7012 
 7013   ins_pipe(iload_reg_mem);
 7014 %}
 7015 
 7016 // Load Short/Char (16 bit unsigned) into long
 7017 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 7018 %{
 7019   match(Set dst (ConvI2L (LoadUS mem)));
 7020   predicate(!needs_acquiring_load(n->in(1)));
 7021 
 7022   ins_cost(4 * INSN_COST);
 7023   format %{ "ldrh  $dst, $mem\t# short" %}
 7024 
 7025   ins_encode(aarch64_enc_ldrh(dst, mem));
 7026 
 7027   ins_pipe(iload_reg_mem);
 7028 %}
 7029 
 7030 // Load Integer (32 bit signed)
 7031 instruct loadI(iRegINoSp dst, memory4 mem)
 7032 %{
 7033   match(Set dst (LoadI mem));
 7034   predicate(!needs_acquiring_load(n));
 7035 
 7036   ins_cost(4 * INSN_COST);
 7037   format %{ "ldrw  $dst, $mem\t# int" %}
 7038 
 7039   ins_encode(aarch64_enc_ldrw(dst, mem));
 7040 
 7041   ins_pipe(iload_reg_mem);
 7042 %}
 7043 
 7044 // Load Integer (32 bit signed) into long
 7045 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 7046 %{
 7047   match(Set dst (ConvI2L (LoadI mem)));
 7048   predicate(!needs_acquiring_load(n->in(1)));
 7049 
 7050   ins_cost(4 * INSN_COST);
 7051   format %{ "ldrsw  $dst, $mem\t# int" %}
 7052 
 7053   ins_encode(aarch64_enc_ldrsw(dst, mem));
 7054 
 7055   ins_pipe(iload_reg_mem);
 7056 %}
 7057 
 7058 // Load Integer (32 bit unsigned) into long
 7059 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 7060 %{
 7061   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7062   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 7063 
 7064   ins_cost(4 * INSN_COST);
 7065   format %{ "ldrw  $dst, $mem\t# int" %}
 7066 
 7067   ins_encode(aarch64_enc_ldrw(dst, mem));
 7068 
 7069   ins_pipe(iload_reg_mem);
 7070 %}
 7071 
 7072 // Load Long (64 bit signed)
 7073 instruct loadL(iRegLNoSp dst, memory8 mem)
 7074 %{
 7075   match(Set dst (LoadL mem));
 7076   predicate(!needs_acquiring_load(n));
 7077 
 7078   ins_cost(4 * INSN_COST);
 7079   format %{ "ldr  $dst, $mem\t# int" %}
 7080 
 7081   ins_encode(aarch64_enc_ldr(dst, mem));
 7082 
 7083   ins_pipe(iload_reg_mem);
 7084 %}
 7085 
 7086 // Load Range
 7087 instruct loadRange(iRegINoSp dst, memory4 mem)
 7088 %{
 7089   match(Set dst (LoadRange mem));
 7090 
 7091   ins_cost(4 * INSN_COST);
 7092   format %{ "ldrw  $dst, $mem\t# range" %}
 7093 
 7094   ins_encode(aarch64_enc_ldrw(dst, mem));
 7095 
 7096   ins_pipe(iload_reg_mem);
 7097 %}
 7098 
 7099 // Load Pointer
 7100 instruct loadP(iRegPNoSp dst, memory8 mem)
 7101 %{
 7102   match(Set dst (LoadP mem));
 7103   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 7104 
 7105   ins_cost(4 * INSN_COST);
 7106   format %{ "ldr  $dst, $mem\t# ptr" %}
 7107 
 7108   ins_encode(aarch64_enc_ldr(dst, mem));
 7109 
 7110   ins_pipe(iload_reg_mem);
 7111 %}
 7112 
 7113 // Load Compressed Pointer
 7114 instruct loadN(iRegNNoSp dst, memory4 mem)
 7115 %{
 7116   match(Set dst (LoadN mem));
 7117   predicate(!needs_acquiring_load(n));
 7118 
 7119   ins_cost(4 * INSN_COST);
 7120   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 7121 
 7122   ins_encode(aarch64_enc_ldrw(dst, mem));
 7123 
 7124   ins_pipe(iload_reg_mem);
 7125 %}
 7126 
 7127 // Load Klass Pointer
 7128 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7129 %{
 7130   match(Set dst (LoadKlass mem));
 7131   predicate(!needs_acquiring_load(n));
 7132 
 7133   ins_cost(4 * INSN_COST);
 7134   format %{ "ldr  $dst, $mem\t# class" %}
 7135 
 7136   ins_encode(aarch64_enc_ldr(dst, mem));
 7137 
 7138   ins_pipe(iload_reg_mem);
 7139 %}
 7140 
 7141 // Load Narrow Klass Pointer
 7142 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7143 %{
 7144   match(Set dst (LoadNKlass mem));
 7145   predicate(!needs_acquiring_load(n));
 7146 
 7147   ins_cost(4 * INSN_COST);
 7148   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7149 
 7150   ins_encode(aarch64_enc_ldrw(dst, mem));
 7151 
 7152   ins_pipe(iload_reg_mem);
 7153 %}
 7154 
 7155 // Load Float
 7156 instruct loadF(vRegF dst, memory4 mem)
 7157 %{
 7158   match(Set dst (LoadF mem));
 7159   predicate(!needs_acquiring_load(n));
 7160 
 7161   ins_cost(4 * INSN_COST);
 7162   format %{ "ldrs  $dst, $mem\t# float" %}
 7163 
 7164   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7165 
 7166   ins_pipe(pipe_class_memory);
 7167 %}
 7168 
 7169 // Load Double
 7170 instruct loadD(vRegD dst, memory8 mem)
 7171 %{
 7172   match(Set dst (LoadD mem));
 7173   predicate(!needs_acquiring_load(n));
 7174 
 7175   ins_cost(4 * INSN_COST);
 7176   format %{ "ldrd  $dst, $mem\t# double" %}
 7177 
 7178   ins_encode( aarch64_enc_ldrd(dst, mem) );
 7179 
 7180   ins_pipe(pipe_class_memory);
 7181 %}
 7182 
 7183 
 7184 // Load Int Constant
 7185 instruct loadConI(iRegINoSp dst, immI src)
 7186 %{
 7187   match(Set dst src);
 7188 
 7189   ins_cost(INSN_COST);
 7190   format %{ "mov $dst, $src\t# int" %}
 7191 
 7192   ins_encode( aarch64_enc_movw_imm(dst, src) );
 7193 
 7194   ins_pipe(ialu_imm);
 7195 %}
 7196 
 7197 // Load Long Constant
 7198 instruct loadConL(iRegLNoSp dst, immL src)
 7199 %{
 7200   match(Set dst src);
 7201 
 7202   ins_cost(INSN_COST);
 7203   format %{ "mov $dst, $src\t# long" %}
 7204 
 7205   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7206 
 7207   ins_pipe(ialu_imm);
 7208 %}
 7209 
 7210 // Load Pointer Constant
 7211 
 7212 instruct loadConP(iRegPNoSp dst, immP con)
 7213 %{
 7214   match(Set dst con);
 7215 
 7216   ins_cost(INSN_COST * 4);
 7217   format %{
 7218     "mov  $dst, $con\t# ptr\n\t"
 7219   %}
 7220 
 7221   ins_encode(aarch64_enc_mov_p(dst, con));
 7222 
 7223   ins_pipe(ialu_imm);
 7224 %}
 7225 
 7226 // Load Null Pointer Constant
 7227 
 7228 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7229 %{
 7230   match(Set dst con);
 7231 
 7232   ins_cost(INSN_COST);
 7233   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7234 
 7235   ins_encode(aarch64_enc_mov_p0(dst, con));
 7236 
 7237   ins_pipe(ialu_imm);
 7238 %}
 7239 
 7240 // Load Pointer Constant One
 7241 
 7242 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 7243 %{
 7244   match(Set dst con);
 7245 
 7246   ins_cost(INSN_COST);
 7247   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7248 
 7249   ins_encode(aarch64_enc_mov_p1(dst, con));
 7250 
 7251   ins_pipe(ialu_imm);
 7252 %}
 7253 
 7254 // Load Byte Map Base Constant
 7255 
 7256 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 7257 %{
 7258   match(Set dst con);
 7259 
 7260   ins_cost(INSN_COST);
 7261   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 7262 
 7263   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 7264 
 7265   ins_pipe(ialu_imm);
 7266 %}
 7267 
 7268 // Load Narrow Pointer Constant
 7269 
 7270 instruct loadConN(iRegNNoSp dst, immN con)
 7271 %{
 7272   match(Set dst con);
 7273 
 7274   ins_cost(INSN_COST * 4);
 7275   format %{ "mov  $dst, $con\t# compressed ptr" %}
 7276 
 7277   ins_encode(aarch64_enc_mov_n(dst, con));
 7278 
 7279   ins_pipe(ialu_imm);
 7280 %}
 7281 
 7282 // Load Narrow Null Pointer Constant
 7283 
 7284 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7285 %{
 7286   match(Set dst con);
 7287 
 7288   ins_cost(INSN_COST);
 7289   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
 7290 
 7291   ins_encode(aarch64_enc_mov_n0(dst, con));
 7292 
 7293   ins_pipe(ialu_imm);
 7294 %}
 7295 
 7296 // Load Narrow Klass Constant
 7297 
 7298 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7299 %{
 7300   match(Set dst con);
 7301 
 7302   ins_cost(INSN_COST);
 7303   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7304 
 7305   ins_encode(aarch64_enc_mov_nk(dst, con));
 7306 
 7307   ins_pipe(ialu_imm);
 7308 %}
 7309 
 7310 // Load Packed Float Constant
 7311 
 7312 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7313   match(Set dst con);
 7314   ins_cost(INSN_COST * 4);
 7315   format %{ "fmovs  $dst, $con"%}
 7316   ins_encode %{
 7317     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7318   %}
 7319 
 7320   ins_pipe(fp_imm_s);
 7321 %}
 7322 
 7323 // Load Float Constant
 7324 
 7325 instruct loadConF(vRegF dst, immF con) %{
 7326   match(Set dst con);
 7327 
 7328   ins_cost(INSN_COST * 4);
 7329 
 7330   format %{
 7331     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7332   %}
 7333 
 7334   ins_encode %{
 7335     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7336   %}
 7337 
 7338   ins_pipe(fp_load_constant_s);
 7339 %}
 7340 
 7341 // Load Packed Double Constant
 7342 
 7343 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7344   match(Set dst con);
 7345   ins_cost(INSN_COST);
 7346   format %{ "fmovd  $dst, $con"%}
 7347   ins_encode %{
 7348     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7349   %}
 7350 
 7351   ins_pipe(fp_imm_d);
 7352 %}
 7353 
 7354 // Load Double Constant
 7355 
 7356 instruct loadConD(vRegD dst, immD con) %{
 7357   match(Set dst con);
 7358 
 7359   ins_cost(INSN_COST * 5);
 7360   format %{
 7361     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7362   %}
 7363 
 7364   ins_encode %{
 7365     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7366   %}
 7367 
 7368   ins_pipe(fp_load_constant_d);
 7369 %}
 7370 
 7371 // Store Instructions
 7372 
 7373 // Store CMS card-mark Immediate
 7374 instruct storeimmCM0(immI0 zero, memory1 mem)
 7375 %{
 7376   match(Set mem (StoreCM mem zero));
 7377 
 7378   ins_cost(INSN_COST);
 7379   format %{ "storestore (elided)\n\t"
 7380             "strb zr, $mem\t# byte" %}
 7381 
 7382   ins_encode(aarch64_enc_strb0(mem));
 7383 
 7384   ins_pipe(istore_mem);
 7385 %}
 7386 
 7387 // Store CMS card-mark Immediate with intervening StoreStore
 7388 // needed when using CMS with no conditional card marking
 7389 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
 7390 %{
 7391   match(Set mem (StoreCM mem zero));
 7392 
 7393   ins_cost(INSN_COST * 2);
 7394   format %{ "storestore\n\t"
 7395             "dmb ishst"
 7396             "\n\tstrb zr, $mem\t# byte" %}
 7397 
 7398   ins_encode(aarch64_enc_strb0_ordered(mem));
 7399 
 7400   ins_pipe(istore_mem);
 7401 %}
 7402 
 7403 // Store Byte
 7404 instruct storeB(iRegIorL2I src, memory1 mem)
 7405 %{
 7406   match(Set mem (StoreB mem src));
 7407   predicate(!needs_releasing_store(n));
 7408 
 7409   ins_cost(INSN_COST);
 7410   format %{ "strb  $src, $mem\t# byte" %}
 7411 
 7412   ins_encode(aarch64_enc_strb(src, mem));
 7413 
 7414   ins_pipe(istore_reg_mem);
 7415 %}
 7416 
 7417 
 7418 instruct storeimmB0(immI0 zero, memory1 mem)
 7419 %{
 7420   match(Set mem (StoreB mem zero));
 7421   predicate(!needs_releasing_store(n));
 7422 
 7423   ins_cost(INSN_COST);
 7424   format %{ "strb rscractch2, $mem\t# byte" %}
 7425 
 7426   ins_encode(aarch64_enc_strb0(mem));
 7427 
 7428   ins_pipe(istore_mem);
 7429 %}
 7430 
 7431 // Store Char/Short
 7432 instruct storeC(iRegIorL2I src, memory2 mem)
 7433 %{
 7434   match(Set mem (StoreC mem src));
 7435   predicate(!needs_releasing_store(n));
 7436 
 7437   ins_cost(INSN_COST);
 7438   format %{ "strh  $src, $mem\t# short" %}
 7439 
 7440   ins_encode(aarch64_enc_strh(src, mem));
 7441 
 7442   ins_pipe(istore_reg_mem);
 7443 %}
 7444 
 7445 instruct storeimmC0(immI0 zero, memory2 mem)
 7446 %{
 7447   match(Set mem (StoreC mem zero));
 7448   predicate(!needs_releasing_store(n));
 7449 
 7450   ins_cost(INSN_COST);
 7451   format %{ "strh  zr, $mem\t# short" %}
 7452 
 7453   ins_encode(aarch64_enc_strh0(mem));
 7454 
 7455   ins_pipe(istore_mem);
 7456 %}
 7457 
 7458 // Store Integer
 7459 
 7460 instruct storeI(iRegIorL2I src, memory4 mem)
 7461 %{
 7462   match(Set mem(StoreI mem src));
 7463   predicate(!needs_releasing_store(n));
 7464 
 7465   ins_cost(INSN_COST);
 7466   format %{ "strw  $src, $mem\t# int" %}
 7467 
 7468   ins_encode(aarch64_enc_strw(src, mem));
 7469 
 7470   ins_pipe(istore_reg_mem);
 7471 %}
 7472 
 7473 instruct storeimmI0(immI0 zero, memory4 mem)
 7474 %{
 7475   match(Set mem(StoreI mem zero));
 7476   predicate(!needs_releasing_store(n));
 7477 
 7478   ins_cost(INSN_COST);
 7479   format %{ "strw  zr, $mem\t# int" %}
 7480 
 7481   ins_encode(aarch64_enc_strw0(mem));
 7482 
 7483   ins_pipe(istore_mem);
 7484 %}
 7485 
 7486 // Store Long (64 bit signed)
 7487 instruct storeL(iRegL src, memory8 mem)
 7488 %{
 7489   match(Set mem (StoreL mem src));
 7490   predicate(!needs_releasing_store(n));
 7491 
 7492   ins_cost(INSN_COST);
 7493   format %{ "str  $src, $mem\t# int" %}
 7494 
 7495   ins_encode(aarch64_enc_str(src, mem));
 7496 
 7497   ins_pipe(istore_reg_mem);
 7498 %}
 7499 
 7500 // Store Long (64 bit signed)
 7501 instruct storeimmL0(immL0 zero, memory8 mem)
 7502 %{
 7503   match(Set mem (StoreL mem zero));
 7504   predicate(!needs_releasing_store(n));
 7505 
 7506   ins_cost(INSN_COST);
 7507   format %{ "str  zr, $mem\t# int" %}
 7508 
 7509   ins_encode(aarch64_enc_str0(mem));
 7510 
 7511   ins_pipe(istore_mem);
 7512 %}
 7513 
 7514 // Store Pointer
 7515 instruct storeP(iRegP src, memory8 mem)
 7516 %{
 7517   match(Set mem (StoreP mem src));
 7518   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7519 
 7520   ins_cost(INSN_COST);
 7521   format %{ "str  $src, $mem\t# ptr" %}
 7522 
 7523   ins_encode(aarch64_enc_str(src, mem));
 7524 
 7525   ins_pipe(istore_reg_mem);
 7526 %}
 7527 
 7528 // Store Pointer
 7529 instruct storeimmP0(immP0 zero, memory8 mem)
 7530 %{
 7531   match(Set mem (StoreP mem zero));
 7532   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7533 
 7534   ins_cost(INSN_COST);
 7535   format %{ "str zr, $mem\t# ptr" %}
 7536 
 7537   ins_encode(aarch64_enc_str0(mem));
 7538 
 7539   ins_pipe(istore_mem);
 7540 %}
 7541 
 7542 // Store Compressed Pointer
 7543 instruct storeN(iRegN src, memory4 mem)
 7544 %{
 7545   match(Set mem (StoreN mem src));
 7546   predicate(!needs_releasing_store(n));
 7547 
 7548   ins_cost(INSN_COST);
 7549   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7550 
 7551   ins_encode(aarch64_enc_strw(src, mem));
 7552 
 7553   ins_pipe(istore_reg_mem);
 7554 %}
 7555 
 7556 instruct storeImmN0(immN0 zero, memory4 mem)
 7557 %{
 7558   match(Set mem (StoreN mem zero));
 7559   predicate(!needs_releasing_store(n));
 7560 
 7561   ins_cost(INSN_COST);
 7562   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7563 
 7564   ins_encode(aarch64_enc_strw0(mem));
 7565 
 7566   ins_pipe(istore_mem);
 7567 %}
 7568 
 7569 // Store Float
 7570 instruct storeF(vRegF src, memory4 mem)
 7571 %{
 7572   match(Set mem (StoreF mem src));
 7573   predicate(!needs_releasing_store(n));
 7574 
 7575   ins_cost(INSN_COST);
 7576   format %{ "strs  $src, $mem\t# float" %}
 7577 
 7578   ins_encode( aarch64_enc_strs(src, mem) );
 7579 
 7580   ins_pipe(pipe_class_memory);
 7581 %}
 7582 
 7583 // TODO
 7584 // implement storeImmF0 and storeFImmPacked
 7585 
 7586 // Store Double
 7587 instruct storeD(vRegD src, memory8 mem)
 7588 %{
 7589   match(Set mem (StoreD mem src));
 7590   predicate(!needs_releasing_store(n));
 7591 
 7592   ins_cost(INSN_COST);
 7593   format %{ "strd  $src, $mem\t# double" %}
 7594 
 7595   ins_encode( aarch64_enc_strd(src, mem) );
 7596 
 7597   ins_pipe(pipe_class_memory);
 7598 %}
 7599 
 7600 // Store Compressed Klass Pointer
 7601 instruct storeNKlass(iRegN src, memory4 mem)
 7602 %{
 7603   predicate(!needs_releasing_store(n));
 7604   match(Set mem (StoreNKlass mem src));
 7605 
 7606   ins_cost(INSN_COST);
 7607   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7608 
 7609   ins_encode(aarch64_enc_strw(src, mem));
 7610 
 7611   ins_pipe(istore_reg_mem);
 7612 %}
 7613 
 7614 // TODO
 7615 // implement storeImmD0 and storeDImmPacked
 7616 
 7617 // prefetch instructions
 7618 // Must be safe to execute with invalid address (cannot fault).
 7619 
 7620 instruct prefetchalloc( memory8 mem ) %{
 7621   match(PrefetchAllocation mem);
 7622 
 7623   ins_cost(INSN_COST);
 7624   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7625 
 7626   ins_encode( aarch64_enc_prefetchw(mem) );
 7627 
 7628   ins_pipe(iload_prefetch);
 7629 %}
 7630 
 7631 //  ---------------- volatile loads and stores ----------------
 7632 
 7633 // Load Byte (8 bit signed)
 7634 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7635 %{
 7636   match(Set dst (LoadB mem));
 7637 
 7638   ins_cost(VOLATILE_REF_COST);
 7639   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7640 
 7641   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7642 
 7643   ins_pipe(pipe_serial);
 7644 %}
 7645 
 7646 // Load Byte (8 bit signed) into long
 7647 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7648 %{
 7649   match(Set dst (ConvI2L (LoadB mem)));
 7650 
 7651   ins_cost(VOLATILE_REF_COST);
 7652   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7653 
 7654   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7655 
 7656   ins_pipe(pipe_serial);
 7657 %}
 7658 
 7659 // Load Byte (8 bit unsigned)
 7660 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7661 %{
 7662   match(Set dst (LoadUB mem));
 7663 
 7664   ins_cost(VOLATILE_REF_COST);
 7665   format %{ "ldarb  $dst, $mem\t# byte" %}
 7666 
 7667   ins_encode(aarch64_enc_ldarb(dst, mem));
 7668 
 7669   ins_pipe(pipe_serial);
 7670 %}
 7671 
 7672 // Load Byte (8 bit unsigned) into long
 7673 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7674 %{
 7675   match(Set dst (ConvI2L (LoadUB mem)));
 7676 
 7677   ins_cost(VOLATILE_REF_COST);
 7678   format %{ "ldarb  $dst, $mem\t# byte" %}
 7679 
 7680   ins_encode(aarch64_enc_ldarb(dst, mem));
 7681 
 7682   ins_pipe(pipe_serial);
 7683 %}
 7684 
 7685 // Load Short (16 bit signed)
 7686 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7687 %{
 7688   match(Set dst (LoadS mem));
 7689 
 7690   ins_cost(VOLATILE_REF_COST);
 7691   format %{ "ldarshw  $dst, $mem\t# short" %}
 7692 
 7693   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7694 
 7695   ins_pipe(pipe_serial);
 7696 %}
 7697 
 7698 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7699 %{
 7700   match(Set dst (LoadUS mem));
 7701 
 7702   ins_cost(VOLATILE_REF_COST);
 7703   format %{ "ldarhw  $dst, $mem\t# short" %}
 7704 
 7705   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7706 
 7707   ins_pipe(pipe_serial);
 7708 %}
 7709 
 7710 // Load Short/Char (16 bit unsigned) into long
 7711 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7712 %{
 7713   match(Set dst (ConvI2L (LoadUS mem)));
 7714 
 7715   ins_cost(VOLATILE_REF_COST);
 7716   format %{ "ldarh  $dst, $mem\t# short" %}
 7717 
 7718   ins_encode(aarch64_enc_ldarh(dst, mem));
 7719 
 7720   ins_pipe(pipe_serial);
 7721 %}
 7722 
 7723 // Load Short/Char (16 bit signed) into long
 7724 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7725 %{
 7726   match(Set dst (ConvI2L (LoadS mem)));
 7727 
 7728   ins_cost(VOLATILE_REF_COST);
 7729   format %{ "ldarh  $dst, $mem\t# short" %}
 7730 
 7731   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7732 
 7733   ins_pipe(pipe_serial);
 7734 %}
 7735 
 7736 // Load Integer (32 bit signed)
 7737 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7738 %{
 7739   match(Set dst (LoadI mem));
 7740 
 7741   ins_cost(VOLATILE_REF_COST);
 7742   format %{ "ldarw  $dst, $mem\t# int" %}
 7743 
 7744   ins_encode(aarch64_enc_ldarw(dst, mem));
 7745 
 7746   ins_pipe(pipe_serial);
 7747 %}
 7748 
 7749 // Load Integer (32 bit unsigned) into long
 7750 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7751 %{
 7752   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7753 
 7754   ins_cost(VOLATILE_REF_COST);
 7755   format %{ "ldarw  $dst, $mem\t# int" %}
 7756 
 7757   ins_encode(aarch64_enc_ldarw(dst, mem));
 7758 
 7759   ins_pipe(pipe_serial);
 7760 %}
 7761 
 7762 // Load Long (64 bit signed)
 7763 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7764 %{
 7765   match(Set dst (LoadL mem));
 7766 
 7767   ins_cost(VOLATILE_REF_COST);
 7768   format %{ "ldar  $dst, $mem\t# int" %}
 7769 
 7770   ins_encode(aarch64_enc_ldar(dst, mem));
 7771 
 7772   ins_pipe(pipe_serial);
 7773 %}
 7774 
 7775 // Load Pointer
 7776 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7777 %{
 7778   match(Set dst (LoadP mem));
 7779   predicate(n->as_Load()->barrier_data() == 0);
 7780 
 7781   ins_cost(VOLATILE_REF_COST);
 7782   format %{ "ldar  $dst, $mem\t# ptr" %}
 7783 
 7784   ins_encode(aarch64_enc_ldar(dst, mem));
 7785 
 7786   ins_pipe(pipe_serial);
 7787 %}
 7788 
 7789 // Load Compressed Pointer
 7790 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7791 %{
 7792   match(Set dst (LoadN mem));
 7793 
 7794   ins_cost(VOLATILE_REF_COST);
 7795   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7796 
 7797   ins_encode(aarch64_enc_ldarw(dst, mem));
 7798 
 7799   ins_pipe(pipe_serial);
 7800 %}
 7801 
 7802 // Load Float
 7803 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7804 %{
 7805   match(Set dst (LoadF mem));
 7806 
 7807   ins_cost(VOLATILE_REF_COST);
 7808   format %{ "ldars  $dst, $mem\t# float" %}
 7809 
 7810   ins_encode( aarch64_enc_fldars(dst, mem) );
 7811 
 7812   ins_pipe(pipe_serial);
 7813 %}
 7814 
 7815 // Load Double
 7816 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7817 %{
 7818   match(Set dst (LoadD mem));
 7819 
 7820   ins_cost(VOLATILE_REF_COST);
 7821   format %{ "ldard  $dst, $mem\t# double" %}
 7822 
 7823   ins_encode( aarch64_enc_fldard(dst, mem) );
 7824 
 7825   ins_pipe(pipe_serial);
 7826 %}
 7827 
 7828 // Store Byte
 7829 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7830 %{
 7831   match(Set mem (StoreB mem src));
 7832 
 7833   ins_cost(VOLATILE_REF_COST);
 7834   format %{ "stlrb  $src, $mem\t# byte" %}
 7835 
 7836   ins_encode(aarch64_enc_stlrb(src, mem));
 7837 
 7838   ins_pipe(pipe_class_memory);
 7839 %}
 7840 
 7841 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7842 %{
 7843   match(Set mem (StoreB mem zero));
 7844 
 7845   ins_cost(VOLATILE_REF_COST);
 7846   format %{ "stlrb  zr, $mem\t# byte" %}
 7847 
 7848   ins_encode(aarch64_enc_stlrb0(mem));
 7849 
 7850   ins_pipe(pipe_class_memory);
 7851 %}
 7852 
 7853 // Store Char/Short
 7854 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7855 %{
 7856   match(Set mem (StoreC mem src));
 7857 
 7858   ins_cost(VOLATILE_REF_COST);
 7859   format %{ "stlrh  $src, $mem\t# short" %}
 7860 
 7861   ins_encode(aarch64_enc_stlrh(src, mem));
 7862 
 7863   ins_pipe(pipe_class_memory);
 7864 %}
 7865 
 7866 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7867 %{
 7868   match(Set mem (StoreC mem zero));
 7869 
 7870   ins_cost(VOLATILE_REF_COST);
 7871   format %{ "stlrh  zr, $mem\t# short" %}
 7872 
 7873   ins_encode(aarch64_enc_stlrh0(mem));
 7874 
 7875   ins_pipe(pipe_class_memory);
 7876 %}
 7877 
 7878 // Store Integer
 7879 
 7880 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7881 %{
 7882   match(Set mem(StoreI mem src));
 7883 
 7884   ins_cost(VOLATILE_REF_COST);
 7885   format %{ "stlrw  $src, $mem\t# int" %}
 7886 
 7887   ins_encode(aarch64_enc_stlrw(src, mem));
 7888 
 7889   ins_pipe(pipe_class_memory);
 7890 %}
 7891 
 7892 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7893 %{
 7894   match(Set mem(StoreI mem zero));
 7895 
 7896   ins_cost(VOLATILE_REF_COST);
 7897   format %{ "stlrw  zr, $mem\t# int" %}
 7898 
 7899   ins_encode(aarch64_enc_stlrw0(mem));
 7900 
 7901   ins_pipe(pipe_class_memory);
 7902 %}
 7903 
 7904 // Store Long (64 bit signed)
 7905 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7906 %{
 7907   match(Set mem (StoreL mem src));
 7908 
 7909   ins_cost(VOLATILE_REF_COST);
 7910   format %{ "stlr  $src, $mem\t# int" %}
 7911 
 7912   ins_encode(aarch64_enc_stlr(src, mem));
 7913 
 7914   ins_pipe(pipe_class_memory);
 7915 %}
 7916 
 7917 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7918 %{
 7919   match(Set mem (StoreL mem zero));
 7920 
 7921   ins_cost(VOLATILE_REF_COST);
 7922   format %{ "stlr  zr, $mem\t# int" %}
 7923 
 7924   ins_encode(aarch64_enc_stlr0(mem));
 7925 
 7926   ins_pipe(pipe_class_memory);
 7927 %}
 7928 
 7929 // Store Pointer
 7930 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7931 %{
 7932   match(Set mem (StoreP mem src));
 7933   predicate(n->as_Store()->barrier_data() == 0);
 7934 
 7935   ins_cost(VOLATILE_REF_COST);
 7936   format %{ "stlr  $src, $mem\t# ptr" %}
 7937 
 7938   ins_encode(aarch64_enc_stlr(src, mem));
 7939 
 7940   ins_pipe(pipe_class_memory);
 7941 %}
 7942 
 7943 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7944 %{
 7945   match(Set mem (StoreP mem zero));
 7946   predicate(n->as_Store()->barrier_data() == 0);
 7947 
 7948   ins_cost(VOLATILE_REF_COST);
 7949   format %{ "stlr  zr, $mem\t# ptr" %}
 7950 
 7951   ins_encode(aarch64_enc_stlr0(mem));
 7952 
 7953   ins_pipe(pipe_class_memory);
 7954 %}
 7955 
 7956 // Store Compressed Pointer
 7957 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7958 %{
 7959   match(Set mem (StoreN mem src));
 7960 
 7961   ins_cost(VOLATILE_REF_COST);
 7962   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7963 
 7964   ins_encode(aarch64_enc_stlrw(src, mem));
 7965 
 7966   ins_pipe(pipe_class_memory);
 7967 %}
 7968 
 7969 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7970 %{
 7971   match(Set mem (StoreN mem zero));
 7972 
 7973   ins_cost(VOLATILE_REF_COST);
 7974   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7975 
 7976   ins_encode(aarch64_enc_stlrw0(mem));
 7977 
 7978   ins_pipe(pipe_class_memory);
 7979 %}
 7980 
 7981 // Store Float
 7982 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7983 %{
 7984   match(Set mem (StoreF mem src));
 7985 
 7986   ins_cost(VOLATILE_REF_COST);
 7987   format %{ "stlrs  $src, $mem\t# float" %}
 7988 
 7989   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7990 
 7991   ins_pipe(pipe_class_memory);
 7992 %}
 7993 
 7994 // TODO
 7995 // implement storeImmF0 and storeFImmPacked
 7996 
 7997 // Store Double
 7998 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 7999 %{
 8000   match(Set mem (StoreD mem src));
 8001 
 8002   ins_cost(VOLATILE_REF_COST);
 8003   format %{ "stlrd  $src, $mem\t# double" %}
 8004 
 8005   ins_encode( aarch64_enc_fstlrd(src, mem) );
 8006 
 8007   ins_pipe(pipe_class_memory);
 8008 %}
 8009 
 8010 //  ---------------- end of volatile loads and stores ----------------
 8011 
 8012 instruct cacheWB(indirect addr)
 8013 %{
 8014   predicate(VM_Version::supports_data_cache_line_flush());
 8015   match(CacheWB addr);
 8016 
 8017   ins_cost(100);
 8018   format %{"cache wb $addr" %}
 8019   ins_encode %{
 8020     assert($addr->index_position() < 0, "should be");
 8021     assert($addr$$disp == 0, "should be");
 8022     __ cache_wb(Address($addr$$base$$Register, 0));
 8023   %}
 8024   ins_pipe(pipe_slow); // XXX
 8025 %}
 8026 
 8027 instruct cacheWBPreSync()
 8028 %{
 8029   predicate(VM_Version::supports_data_cache_line_flush());
 8030   match(CacheWBPreSync);
 8031 
 8032   ins_cost(100);
 8033   format %{"cache wb presync" %}
 8034   ins_encode %{
 8035     __ cache_wbsync(true);
 8036   %}
 8037   ins_pipe(pipe_slow); // XXX
 8038 %}
 8039 
 8040 instruct cacheWBPostSync()
 8041 %{
 8042   predicate(VM_Version::supports_data_cache_line_flush());
 8043   match(CacheWBPostSync);
 8044 
 8045   ins_cost(100);
 8046   format %{"cache wb postsync" %}
 8047   ins_encode %{
 8048     __ cache_wbsync(false);
 8049   %}
 8050   ins_pipe(pipe_slow); // XXX
 8051 %}
 8052 
 8053 // ============================================================================
 8054 // BSWAP Instructions
 8055 
 8056 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 8057   match(Set dst (ReverseBytesI src));
 8058 
 8059   ins_cost(INSN_COST);
 8060   format %{ "revw  $dst, $src" %}
 8061 
 8062   ins_encode %{
 8063     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 8064   %}
 8065 
 8066   ins_pipe(ialu_reg);
 8067 %}
 8068 
 8069 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 8070   match(Set dst (ReverseBytesL src));
 8071 
 8072   ins_cost(INSN_COST);
 8073   format %{ "rev  $dst, $src" %}
 8074 
 8075   ins_encode %{
 8076     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 8077   %}
 8078 
 8079   ins_pipe(ialu_reg);
 8080 %}
 8081 
 8082 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 8083   match(Set dst (ReverseBytesUS src));
 8084 
 8085   ins_cost(INSN_COST);
 8086   format %{ "rev16w  $dst, $src" %}
 8087 
 8088   ins_encode %{
 8089     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8090   %}
 8091 
 8092   ins_pipe(ialu_reg);
 8093 %}
 8094 
 8095 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 8096   match(Set dst (ReverseBytesS src));
 8097 
 8098   ins_cost(INSN_COST);
 8099   format %{ "rev16w  $dst, $src\n\t"
 8100             "sbfmw $dst, $dst, #0, #15" %}
 8101 
 8102   ins_encode %{
 8103     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8104     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 8105   %}
 8106 
 8107   ins_pipe(ialu_reg);
 8108 %}
 8109 
 8110 // ============================================================================
 8111 // Zero Count Instructions
 8112 
 8113 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8114   match(Set dst (CountLeadingZerosI src));
 8115 
 8116   ins_cost(INSN_COST);
 8117   format %{ "clzw  $dst, $src" %}
 8118   ins_encode %{
 8119     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 8120   %}
 8121 
 8122   ins_pipe(ialu_reg);
 8123 %}
 8124 
 8125 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 8126   match(Set dst (CountLeadingZerosL src));
 8127 
 8128   ins_cost(INSN_COST);
 8129   format %{ "clz   $dst, $src" %}
 8130   ins_encode %{
 8131     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 8132   %}
 8133 
 8134   ins_pipe(ialu_reg);
 8135 %}
 8136 
 8137 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8138   match(Set dst (CountTrailingZerosI src));
 8139 
 8140   ins_cost(INSN_COST * 2);
 8141   format %{ "rbitw  $dst, $src\n\t"
 8142             "clzw   $dst, $dst" %}
 8143   ins_encode %{
 8144     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 8145     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 8146   %}
 8147 
 8148   ins_pipe(ialu_reg);
 8149 %}
 8150 
 8151 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 8152   match(Set dst (CountTrailingZerosL src));
 8153 
 8154   ins_cost(INSN_COST * 2);
 8155   format %{ "rbit   $dst, $src\n\t"
 8156             "clz    $dst, $dst" %}
 8157   ins_encode %{
 8158     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 8159     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 8160   %}
 8161 
 8162   ins_pipe(ialu_reg);
 8163 %}
 8164 
 8165 //---------- Population Count Instructions -------------------------------------
 8166 //
 8167 
 8168 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 8169   match(Set dst (PopCountI src));
 8170   effect(TEMP tmp);
 8171   ins_cost(INSN_COST * 13);
 8172 
 8173   format %{ "movw   $src, $src\n\t"
 8174             "mov    $tmp, $src\t# vector (1D)\n\t"
 8175             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8176             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8177             "mov    $dst, $tmp\t# vector (1D)" %}
 8178   ins_encode %{
 8179     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 8180     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 8181     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8182     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8183     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8184   %}
 8185 
 8186   ins_pipe(pipe_class_default);
 8187 %}
 8188 
 8189 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 8190   match(Set dst (PopCountI (LoadI mem)));
 8191   effect(TEMP tmp);
 8192   ins_cost(INSN_COST * 13);
 8193 
 8194   format %{ "ldrs   $tmp, $mem\n\t"
 8195             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8196             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8197             "mov    $dst, $tmp\t# vector (1D)" %}
 8198   ins_encode %{
 8199     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8200     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 8201               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 8202     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8203     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8204     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8205   %}
 8206 
 8207   ins_pipe(pipe_class_default);
 8208 %}
 8209 
 8210 // Note: Long.bitCount(long) returns an int.
 8211 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 8212   match(Set dst (PopCountL src));
 8213   effect(TEMP tmp);
 8214   ins_cost(INSN_COST * 13);
 8215 
 8216   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 8217             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8218             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8219             "mov    $dst, $tmp\t# vector (1D)" %}
 8220   ins_encode %{
 8221     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 8222     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8223     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8224     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8225   %}
 8226 
 8227   ins_pipe(pipe_class_default);
 8228 %}
 8229 
 8230 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 8231   match(Set dst (PopCountL (LoadL mem)));
 8232   effect(TEMP tmp);
 8233   ins_cost(INSN_COST * 13);
 8234 
 8235   format %{ "ldrd   $tmp, $mem\n\t"
 8236             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8237             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8238             "mov    $dst, $tmp\t# vector (1D)" %}
 8239   ins_encode %{
 8240     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8241     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 8242               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 8243     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8244     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8245     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8246   %}
 8247 
 8248   ins_pipe(pipe_class_default);
 8249 %}
 8250 
 8251 // ============================================================================
 8252 // MemBar Instruction
 8253 
 8254 instruct load_fence() %{
 8255   match(LoadFence);
 8256   ins_cost(VOLATILE_REF_COST);
 8257 
 8258   format %{ "load_fence" %}
 8259 
 8260   ins_encode %{
 8261     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8262   %}
 8263   ins_pipe(pipe_serial);
 8264 %}
 8265 
 8266 instruct unnecessary_membar_acquire() %{
 8267   predicate(unnecessary_acquire(n));
 8268   match(MemBarAcquire);
 8269   ins_cost(0);
 8270 
 8271   format %{ "membar_acquire (elided)" %}
 8272 
 8273   ins_encode %{
 8274     __ block_comment("membar_acquire (elided)");
 8275   %}
 8276 
 8277   ins_pipe(pipe_class_empty);
 8278 %}
 8279 
 8280 instruct membar_acquire() %{
 8281   match(MemBarAcquire);
 8282   ins_cost(VOLATILE_REF_COST);
 8283 
 8284   format %{ "membar_acquire\n\t"
 8285             "dmb ish" %}
 8286 
 8287   ins_encode %{
 8288     __ block_comment("membar_acquire");
 8289     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8290   %}
 8291 
 8292   ins_pipe(pipe_serial);
 8293 %}
 8294 
 8295 
 8296 instruct membar_acquire_lock() %{
 8297   match(MemBarAcquireLock);
 8298   ins_cost(VOLATILE_REF_COST);
 8299 
 8300   format %{ "membar_acquire_lock (elided)" %}
 8301 
 8302   ins_encode %{
 8303     __ block_comment("membar_acquire_lock (elided)");
 8304   %}
 8305 
 8306   ins_pipe(pipe_serial);
 8307 %}
 8308 
 8309 instruct store_fence() %{
 8310   match(StoreFence);
 8311   ins_cost(VOLATILE_REF_COST);
 8312 
 8313   format %{ "store_fence" %}
 8314 
 8315   ins_encode %{
 8316     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8317   %}
 8318   ins_pipe(pipe_serial);
 8319 %}
 8320 
 8321 instruct unnecessary_membar_release() %{
 8322   predicate(unnecessary_release(n));
 8323   match(MemBarRelease);
 8324   ins_cost(0);
 8325 
 8326   format %{ "membar_release (elided)" %}
 8327 
 8328   ins_encode %{
 8329     __ block_comment("membar_release (elided)");
 8330   %}
 8331   ins_pipe(pipe_serial);
 8332 %}
 8333 
 8334 instruct membar_release() %{
 8335   match(MemBarRelease);
 8336   ins_cost(VOLATILE_REF_COST);
 8337 
 8338   format %{ "membar_release\n\t"
 8339             "dmb ish" %}
 8340 
 8341   ins_encode %{
 8342     __ block_comment("membar_release");
 8343     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8344   %}
 8345   ins_pipe(pipe_serial);
 8346 %}
 8347 
 8348 instruct membar_storestore() %{
 8349   match(MemBarStoreStore);
 8350   match(StoreStoreFence);
 8351   ins_cost(VOLATILE_REF_COST);
 8352 
 8353   format %{ "MEMBAR-store-store" %}
 8354 
 8355   ins_encode %{
 8356     __ membar(Assembler::StoreStore);
 8357   %}
 8358   ins_pipe(pipe_serial);
 8359 %}
 8360 
 8361 instruct membar_release_lock() %{
 8362   match(MemBarReleaseLock);
 8363   ins_cost(VOLATILE_REF_COST);
 8364 
 8365   format %{ "membar_release_lock (elided)" %}
 8366 
 8367   ins_encode %{
 8368     __ block_comment("membar_release_lock (elided)");
 8369   %}
 8370 
 8371   ins_pipe(pipe_serial);
 8372 %}
 8373 
 8374 instruct unnecessary_membar_volatile() %{
 8375   predicate(unnecessary_volatile(n));
 8376   match(MemBarVolatile);
 8377   ins_cost(0);
 8378 
 8379   format %{ "membar_volatile (elided)" %}
 8380 
 8381   ins_encode %{
 8382     __ block_comment("membar_volatile (elided)");
 8383   %}
 8384 
 8385   ins_pipe(pipe_serial);
 8386 %}
 8387 
 8388 instruct membar_volatile() %{
 8389   match(MemBarVolatile);
 8390   ins_cost(VOLATILE_REF_COST*100);
 8391 
 8392   format %{ "membar_volatile\n\t"
 8393              "dmb ish"%}
 8394 
 8395   ins_encode %{
 8396     __ block_comment("membar_volatile");
 8397     __ membar(Assembler::StoreLoad);
 8398   %}
 8399 
 8400   ins_pipe(pipe_serial);
 8401 %}
 8402 
 8403 // ============================================================================
 8404 // Cast/Convert Instructions
 8405 
 8406 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8407   match(Set dst (CastX2P src));
 8408 
 8409   ins_cost(INSN_COST);
 8410   format %{ "mov $dst, $src\t# long -> ptr" %}
 8411 
 8412   ins_encode %{
 8413     if ($dst$$reg != $src$$reg) {
 8414       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8415     }
 8416   %}
 8417 
 8418   ins_pipe(ialu_reg);
 8419 %}
 8420 
 8421 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8422   match(Set dst (CastP2X src));
 8423 
 8424   ins_cost(INSN_COST);
 8425   format %{ "mov $dst, $src\t# ptr -> long" %}
 8426 
 8427   ins_encode %{
 8428     if ($dst$$reg != $src$$reg) {
 8429       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8430     }
 8431   %}
 8432 
 8433   ins_pipe(ialu_reg);
 8434 %}
 8435 
 8436 // Convert oop into int for vectors alignment masking
 8437 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8438   match(Set dst (ConvL2I (CastP2X src)));
 8439 
 8440   ins_cost(INSN_COST);
 8441   format %{ "movw $dst, $src\t# ptr -> int" %}
 8442   ins_encode %{
 8443     __ movw($dst$$Register, $src$$Register);
 8444   %}
 8445 
 8446   ins_pipe(ialu_reg);
 8447 %}
 8448 
 8449 // Convert compressed oop into int for vectors alignment masking
 8450 // in case of 32bit oops (heap < 4Gb).
 8451 instruct convN2I(iRegINoSp dst, iRegN src)
 8452 %{
 8453   predicate(CompressedOops::shift() == 0);
 8454   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8455 
 8456   ins_cost(INSN_COST);
 8457   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8458   ins_encode %{
 8459     __ movw($dst$$Register, $src$$Register);
 8460   %}
 8461 
 8462   ins_pipe(ialu_reg);
 8463 %}
 8464 
 8465 
 8466 // Convert oop pointer into compressed form
 8467 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8468   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8469   match(Set dst (EncodeP src));
 8470   effect(KILL cr);
 8471   ins_cost(INSN_COST * 3);
 8472   format %{ "encode_heap_oop $dst, $src" %}
 8473   ins_encode %{
 8474     Register s = $src$$Register;
 8475     Register d = $dst$$Register;
 8476     __ encode_heap_oop(d, s);
 8477   %}
 8478   ins_pipe(ialu_reg);
 8479 %}
 8480 
 8481 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8482   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8483   match(Set dst (EncodeP src));
 8484   ins_cost(INSN_COST * 3);
 8485   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8486   ins_encode %{
 8487     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8488   %}
 8489   ins_pipe(ialu_reg);
 8490 %}
 8491 
 8492 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8493   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8494             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8495   match(Set dst (DecodeN src));
 8496   ins_cost(INSN_COST * 3);
 8497   format %{ "decode_heap_oop $dst, $src" %}
 8498   ins_encode %{
 8499     Register s = $src$$Register;
 8500     Register d = $dst$$Register;
 8501     __ decode_heap_oop(d, s);
 8502   %}
 8503   ins_pipe(ialu_reg);
 8504 %}
 8505 
 8506 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8507   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8508             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8509   match(Set dst (DecodeN src));
 8510   ins_cost(INSN_COST * 3);
 8511   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8512   ins_encode %{
 8513     Register s = $src$$Register;
 8514     Register d = $dst$$Register;
 8515     __ decode_heap_oop_not_null(d, s);
 8516   %}
 8517   ins_pipe(ialu_reg);
 8518 %}
 8519 
 8520 // n.b. AArch64 implementations of encode_klass_not_null and
 8521 // decode_klass_not_null do not modify the flags register so, unlike
 8522 // Intel, we don't kill CR as a side effect here
 8523 
 8524 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8525   match(Set dst (EncodePKlass src));
 8526 
 8527   ins_cost(INSN_COST * 3);
 8528   format %{ "encode_klass_not_null $dst,$src" %}
 8529 
 8530   ins_encode %{
 8531     Register src_reg = as_Register($src$$reg);
 8532     Register dst_reg = as_Register($dst$$reg);
 8533     __ encode_klass_not_null(dst_reg, src_reg);
 8534   %}
 8535 
 8536    ins_pipe(ialu_reg);
 8537 %}
 8538 
 8539 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8540   match(Set dst (DecodeNKlass src));
 8541 
 8542   ins_cost(INSN_COST * 3);
 8543   format %{ "decode_klass_not_null $dst,$src" %}
 8544 
 8545   ins_encode %{
 8546     Register src_reg = as_Register($src$$reg);
 8547     Register dst_reg = as_Register($dst$$reg);
 8548     if (dst_reg != src_reg) {
 8549       __ decode_klass_not_null(dst_reg, src_reg);
 8550     } else {
 8551       __ decode_klass_not_null(dst_reg);
 8552     }
 8553   %}
 8554 
 8555    ins_pipe(ialu_reg);
 8556 %}
 8557 
 8558 instruct checkCastPP(iRegPNoSp dst)
 8559 %{
 8560   match(Set dst (CheckCastPP dst));
 8561 
 8562   size(0);
 8563   format %{ "# checkcastPP of $dst" %}
 8564   ins_encode(/* empty encoding */);
 8565   ins_pipe(pipe_class_empty);
 8566 %}
 8567 
 8568 instruct castPP(iRegPNoSp dst)
 8569 %{
 8570   match(Set dst (CastPP dst));
 8571 
 8572   size(0);
 8573   format %{ "# castPP of $dst" %}
 8574   ins_encode(/* empty encoding */);
 8575   ins_pipe(pipe_class_empty);
 8576 %}
 8577 
 8578 instruct castII(iRegI dst)
 8579 %{
 8580   match(Set dst (CastII dst));
 8581 
 8582   size(0);
 8583   format %{ "# castII of $dst" %}
 8584   ins_encode(/* empty encoding */);
 8585   ins_cost(0);
 8586   ins_pipe(pipe_class_empty);
 8587 %}
 8588 
 8589 instruct castLL(iRegL dst)
 8590 %{
 8591   match(Set dst (CastLL dst));
 8592 
 8593   size(0);
 8594   format %{ "# castLL of $dst" %}
 8595   ins_encode(/* empty encoding */);
 8596   ins_cost(0);
 8597   ins_pipe(pipe_class_empty);
 8598 %}
 8599 
 8600 instruct castFF(vRegF dst)
 8601 %{
 8602   match(Set dst (CastFF dst));
 8603 
 8604   size(0);
 8605   format %{ "# castFF of $dst" %}
 8606   ins_encode(/* empty encoding */);
 8607   ins_cost(0);
 8608   ins_pipe(pipe_class_empty);
 8609 %}
 8610 
 8611 instruct castDD(vRegD dst)
 8612 %{
 8613   match(Set dst (CastDD dst));
 8614 
 8615   size(0);
 8616   format %{ "# castDD of $dst" %}
 8617   ins_encode(/* empty encoding */);
 8618   ins_cost(0);
 8619   ins_pipe(pipe_class_empty);
 8620 %}
 8621 
 8622 instruct castVV(vReg dst)
 8623 %{
 8624   match(Set dst (CastVV dst));
 8625 
 8626   size(0);
 8627   format %{ "# castVV of $dst" %}
 8628   ins_encode(/* empty encoding */);
 8629   ins_cost(0);
 8630   ins_pipe(pipe_class_empty);
 8631 %}
 8632 
 8633 instruct castVVMask(pRegGov dst)
 8634 %{
 8635   match(Set dst (CastVV dst));
 8636 
 8637   size(0);
 8638   format %{ "# castVV of $dst" %}
 8639   ins_encode(/* empty encoding */);
 8640   ins_cost(0);
 8641   ins_pipe(pipe_class_empty);
 8642 %}
 8643 
 8644 // ============================================================================
 8645 // Atomic operation instructions
 8646 //
 8647 
 8648 // standard CompareAndSwapX when we are using barriers
 8649 // these have higher priority than the rules selected by a predicate
 8650 
 8651 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8652 // can't match them
 8653 
 8654 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8655 
 8656   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8657   ins_cost(2 * VOLATILE_REF_COST);
 8658 
 8659   effect(KILL cr);
 8660 
 8661   format %{
 8662     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8663     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8664   %}
 8665 
 8666   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8667             aarch64_enc_cset_eq(res));
 8668 
 8669   ins_pipe(pipe_slow);
 8670 %}
 8671 
 8672 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8673 
 8674   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8675   ins_cost(2 * VOLATILE_REF_COST);
 8676 
 8677   effect(KILL cr);
 8678 
 8679   format %{
 8680     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8681     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8682   %}
 8683 
 8684   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8685             aarch64_enc_cset_eq(res));
 8686 
 8687   ins_pipe(pipe_slow);
 8688 %}
 8689 
 8690 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8691 
 8692   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8693   ins_cost(2 * VOLATILE_REF_COST);
 8694 
 8695   effect(KILL cr);
 8696 
 8697  format %{
 8698     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8699     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8700  %}
 8701 
 8702  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8703             aarch64_enc_cset_eq(res));
 8704 
 8705   ins_pipe(pipe_slow);
 8706 %}
 8707 
 8708 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8709 
 8710   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8711   ins_cost(2 * VOLATILE_REF_COST);
 8712 
 8713   effect(KILL cr);
 8714 
 8715  format %{
 8716     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8717     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8718  %}
 8719 
 8720  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8721             aarch64_enc_cset_eq(res));
 8722 
 8723   ins_pipe(pipe_slow);
 8724 %}
 8725 
 8726 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8727 
 8728   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8729   predicate(n->as_LoadStore()->barrier_data() == 0);
 8730   ins_cost(2 * VOLATILE_REF_COST);
 8731 
 8732   effect(KILL cr);
 8733 
 8734  format %{
 8735     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8736     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8737  %}
 8738 
 8739  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8740             aarch64_enc_cset_eq(res));
 8741 
 8742   ins_pipe(pipe_slow);
 8743 %}
 8744 
 8745 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8746 
 8747   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8748   ins_cost(2 * VOLATILE_REF_COST);
 8749 
 8750   effect(KILL cr);
 8751 
 8752  format %{
 8753     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8754     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8755  %}
 8756 
 8757  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8758             aarch64_enc_cset_eq(res));
 8759 
 8760   ins_pipe(pipe_slow);
 8761 %}
 8762 
 8763 // alternative CompareAndSwapX when we are eliding barriers
 8764 
 8765 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8766 
 8767   predicate(needs_acquiring_load_exclusive(n));
 8768   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8769   ins_cost(VOLATILE_REF_COST);
 8770 
 8771   effect(KILL cr);
 8772 
 8773   format %{
 8774     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8775     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8776   %}
 8777 
 8778   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8779             aarch64_enc_cset_eq(res));
 8780 
 8781   ins_pipe(pipe_slow);
 8782 %}
 8783 
 8784 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8785 
 8786   predicate(needs_acquiring_load_exclusive(n));
 8787   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8788   ins_cost(VOLATILE_REF_COST);
 8789 
 8790   effect(KILL cr);
 8791 
 8792   format %{
 8793     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8794     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8795   %}
 8796 
 8797   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8798             aarch64_enc_cset_eq(res));
 8799 
 8800   ins_pipe(pipe_slow);
 8801 %}
 8802 
 8803 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8804 
 8805   predicate(needs_acquiring_load_exclusive(n));
 8806   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8807   ins_cost(VOLATILE_REF_COST);
 8808 
 8809   effect(KILL cr);
 8810 
 8811  format %{
 8812     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8813     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8814  %}
 8815 
 8816  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8817             aarch64_enc_cset_eq(res));
 8818 
 8819   ins_pipe(pipe_slow);
 8820 %}
 8821 
 8822 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8823 
 8824   predicate(needs_acquiring_load_exclusive(n));
 8825   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8826   ins_cost(VOLATILE_REF_COST);
 8827 
 8828   effect(KILL cr);
 8829 
 8830  format %{
 8831     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8832     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8833  %}
 8834 
 8835  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8836             aarch64_enc_cset_eq(res));
 8837 
 8838   ins_pipe(pipe_slow);
 8839 %}
 8840 
 8841 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8842 
 8843   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8844   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8845   ins_cost(VOLATILE_REF_COST);
 8846 
 8847   effect(KILL cr);
 8848 
 8849  format %{
 8850     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8851     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8852  %}
 8853 
 8854  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8855             aarch64_enc_cset_eq(res));
 8856 
 8857   ins_pipe(pipe_slow);
 8858 %}
 8859 
 8860 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8861 
 8862   predicate(needs_acquiring_load_exclusive(n));
 8863   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8864   ins_cost(VOLATILE_REF_COST);
 8865 
 8866   effect(KILL cr);
 8867 
 8868  format %{
 8869     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8870     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8871  %}
 8872 
 8873  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8874             aarch64_enc_cset_eq(res));
 8875 
 8876   ins_pipe(pipe_slow);
 8877 %}
 8878 
 8879 
 8880 // ---------------------------------------------------------------------
 8881 
 8882 // BEGIN This section of the file is automatically generated. Do not edit --------------
 8883 
 8884 // Sundry CAS operations.  Note that release is always true,
 8885 // regardless of the memory ordering of the CAS.  This is because we
 8886 // need the volatile case to be sequentially consistent but there is
 8887 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 8888 // can't check the type of memory ordering here, so we always emit a
 8889 // STLXR.
 8890 
 8891 // This section is generated from cas.m4
 8892 
 8893 
 8894 // This pattern is generated automatically from cas.m4.
 8895 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8896 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8897   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8898   ins_cost(2 * VOLATILE_REF_COST);
 8899   effect(TEMP_DEF res, KILL cr);
 8900   format %{
 8901     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8902   %}
 8903   ins_encode %{
 8904     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8905                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8906                /*weak*/ false, $res$$Register);
 8907     __ sxtbw($res$$Register, $res$$Register);
 8908   %}
 8909   ins_pipe(pipe_slow);
 8910 %}
 8911 
 8912 // This pattern is generated automatically from cas.m4.
 8913 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8914 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8915   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8916   ins_cost(2 * VOLATILE_REF_COST);
 8917   effect(TEMP_DEF res, KILL cr);
 8918   format %{
 8919     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8920   %}
 8921   ins_encode %{
 8922     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8923                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8924                /*weak*/ false, $res$$Register);
 8925     __ sxthw($res$$Register, $res$$Register);
 8926   %}
 8927   ins_pipe(pipe_slow);
 8928 %}
 8929 
 8930 // This pattern is generated automatically from cas.m4.
 8931 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8932 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8933   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8934   ins_cost(2 * VOLATILE_REF_COST);
 8935   effect(TEMP_DEF res, KILL cr);
 8936   format %{
 8937     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8938   %}
 8939   ins_encode %{
 8940     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8941                Assembler::word, /*acquire*/ false, /*release*/ true,
 8942                /*weak*/ false, $res$$Register);
 8943   %}
 8944   ins_pipe(pipe_slow);
 8945 %}
 8946 
 8947 // This pattern is generated automatically from cas.m4.
 8948 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8949 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8950   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8951   ins_cost(2 * VOLATILE_REF_COST);
 8952   effect(TEMP_DEF res, KILL cr);
 8953   format %{
 8954     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8955   %}
 8956   ins_encode %{
 8957     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8958                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8959                /*weak*/ false, $res$$Register);
 8960   %}
 8961   ins_pipe(pipe_slow);
 8962 %}
 8963 
 8964 // This pattern is generated automatically from cas.m4.
 8965 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8966 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8967   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8968   ins_cost(2 * VOLATILE_REF_COST);
 8969   effect(TEMP_DEF res, KILL cr);
 8970   format %{
 8971     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8972   %}
 8973   ins_encode %{
 8974     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8975                Assembler::word, /*acquire*/ false, /*release*/ true,
 8976                /*weak*/ false, $res$$Register);
 8977   %}
 8978   ins_pipe(pipe_slow);
 8979 %}
 8980 
 8981 // This pattern is generated automatically from cas.m4.
 8982 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8983 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8984   predicate(n->as_LoadStore()->barrier_data() == 0);
 8985   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8986   ins_cost(2 * VOLATILE_REF_COST);
 8987   effect(TEMP_DEF res, KILL cr);
 8988   format %{
 8989     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8990   %}
 8991   ins_encode %{
 8992     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8993                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8994                /*weak*/ false, $res$$Register);
 8995   %}
 8996   ins_pipe(pipe_slow);
 8997 %}
 8998 
 8999 // This pattern is generated automatically from cas.m4.
 9000 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9001 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9002   predicate(needs_acquiring_load_exclusive(n));
 9003   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9004   ins_cost(VOLATILE_REF_COST);
 9005   effect(TEMP_DEF res, KILL cr);
 9006   format %{
 9007     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9008   %}
 9009   ins_encode %{
 9010     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9011                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9012                /*weak*/ false, $res$$Register);
 9013     __ sxtbw($res$$Register, $res$$Register);
 9014   %}
 9015   ins_pipe(pipe_slow);
 9016 %}
 9017 
 9018 // This pattern is generated automatically from cas.m4.
 9019 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9020 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9021   predicate(needs_acquiring_load_exclusive(n));
 9022   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9023   ins_cost(VOLATILE_REF_COST);
 9024   effect(TEMP_DEF res, KILL cr);
 9025   format %{
 9026     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9027   %}
 9028   ins_encode %{
 9029     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9030                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9031                /*weak*/ false, $res$$Register);
 9032     __ sxthw($res$$Register, $res$$Register);
 9033   %}
 9034   ins_pipe(pipe_slow);
 9035 %}
 9036 
 9037 // This pattern is generated automatically from cas.m4.
 9038 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9039 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9040   predicate(needs_acquiring_load_exclusive(n));
 9041   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9042   ins_cost(VOLATILE_REF_COST);
 9043   effect(TEMP_DEF res, KILL cr);
 9044   format %{
 9045     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9046   %}
 9047   ins_encode %{
 9048     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9049                Assembler::word, /*acquire*/ true, /*release*/ true,
 9050                /*weak*/ false, $res$$Register);
 9051   %}
 9052   ins_pipe(pipe_slow);
 9053 %}
 9054 
 9055 // This pattern is generated automatically from cas.m4.
 9056 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9057 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9058   predicate(needs_acquiring_load_exclusive(n));
 9059   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9060   ins_cost(VOLATILE_REF_COST);
 9061   effect(TEMP_DEF res, KILL cr);
 9062   format %{
 9063     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9064   %}
 9065   ins_encode %{
 9066     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9067                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9068                /*weak*/ false, $res$$Register);
 9069   %}
 9070   ins_pipe(pipe_slow);
 9071 %}
 9072 
 9073 // This pattern is generated automatically from cas.m4.
 9074 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9075 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9076   predicate(needs_acquiring_load_exclusive(n));
 9077   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9078   ins_cost(VOLATILE_REF_COST);
 9079   effect(TEMP_DEF res, KILL cr);
 9080   format %{
 9081     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9082   %}
 9083   ins_encode %{
 9084     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9085                Assembler::word, /*acquire*/ true, /*release*/ true,
 9086                /*weak*/ false, $res$$Register);
 9087   %}
 9088   ins_pipe(pipe_slow);
 9089 %}
 9090 
 9091 // This pattern is generated automatically from cas.m4.
 9092 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9093 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9094   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9095   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9096   ins_cost(VOLATILE_REF_COST);
 9097   effect(TEMP_DEF res, KILL cr);
 9098   format %{
 9099     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9100   %}
 9101   ins_encode %{
 9102     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9103                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9104                /*weak*/ false, $res$$Register);
 9105   %}
 9106   ins_pipe(pipe_slow);
 9107 %}
 9108 
 9109 // This pattern is generated automatically from cas.m4.
 9110 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9111 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9112   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9113   ins_cost(2 * VOLATILE_REF_COST);
 9114   effect(KILL cr);
 9115   format %{
 9116     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9117     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9118   %}
 9119   ins_encode %{
 9120     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9121                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9122                /*weak*/ true, noreg);
 9123     __ csetw($res$$Register, Assembler::EQ);
 9124   %}
 9125   ins_pipe(pipe_slow);
 9126 %}
 9127 
 9128 // This pattern is generated automatically from cas.m4.
 9129 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9130 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9131   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9132   ins_cost(2 * VOLATILE_REF_COST);
 9133   effect(KILL cr);
 9134   format %{
 9135     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9136     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9137   %}
 9138   ins_encode %{
 9139     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9140                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9141                /*weak*/ true, noreg);
 9142     __ csetw($res$$Register, Assembler::EQ);
 9143   %}
 9144   ins_pipe(pipe_slow);
 9145 %}
 9146 
 9147 // This pattern is generated automatically from cas.m4.
 9148 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9149 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9150   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9151   ins_cost(2 * VOLATILE_REF_COST);
 9152   effect(KILL cr);
 9153   format %{
 9154     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9155     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9156   %}
 9157   ins_encode %{
 9158     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9159                Assembler::word, /*acquire*/ false, /*release*/ true,
 9160                /*weak*/ true, noreg);
 9161     __ csetw($res$$Register, Assembler::EQ);
 9162   %}
 9163   ins_pipe(pipe_slow);
 9164 %}
 9165 
 9166 // This pattern is generated automatically from cas.m4.
 9167 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9168 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9169   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9170   ins_cost(2 * VOLATILE_REF_COST);
 9171   effect(KILL cr);
 9172   format %{
 9173     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9174     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9175   %}
 9176   ins_encode %{
 9177     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9178                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9179                /*weak*/ true, noreg);
 9180     __ csetw($res$$Register, Assembler::EQ);
 9181   %}
 9182   ins_pipe(pipe_slow);
 9183 %}
 9184 
 9185 // This pattern is generated automatically from cas.m4.
 9186 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9187 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9188   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9189   ins_cost(2 * VOLATILE_REF_COST);
 9190   effect(KILL cr);
 9191   format %{
 9192     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9193     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9194   %}
 9195   ins_encode %{
 9196     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9197                Assembler::word, /*acquire*/ false, /*release*/ true,
 9198                /*weak*/ true, noreg);
 9199     __ csetw($res$$Register, Assembler::EQ);
 9200   %}
 9201   ins_pipe(pipe_slow);
 9202 %}
 9203 
 9204 // This pattern is generated automatically from cas.m4.
 9205 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9206 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9207   predicate(n->as_LoadStore()->barrier_data() == 0);
 9208   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9209   ins_cost(2 * VOLATILE_REF_COST);
 9210   effect(KILL cr);
 9211   format %{
 9212     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9213     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9214   %}
 9215   ins_encode %{
 9216     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9217                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9218                /*weak*/ true, noreg);
 9219     __ csetw($res$$Register, Assembler::EQ);
 9220   %}
 9221   ins_pipe(pipe_slow);
 9222 %}
 9223 
 9224 // This pattern is generated automatically from cas.m4.
 9225 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9226 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9227   predicate(needs_acquiring_load_exclusive(n));
 9228   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9229   ins_cost(VOLATILE_REF_COST);
 9230   effect(KILL cr);
 9231   format %{
 9232     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9233     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9234   %}
 9235   ins_encode %{
 9236     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9237                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9238                /*weak*/ true, noreg);
 9239     __ csetw($res$$Register, Assembler::EQ);
 9240   %}
 9241   ins_pipe(pipe_slow);
 9242 %}
 9243 
 9244 // This pattern is generated automatically from cas.m4.
 9245 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9246 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9247   predicate(needs_acquiring_load_exclusive(n));
 9248   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9249   ins_cost(VOLATILE_REF_COST);
 9250   effect(KILL cr);
 9251   format %{
 9252     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9253     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9254   %}
 9255   ins_encode %{
 9256     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9257                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9258                /*weak*/ true, noreg);
 9259     __ csetw($res$$Register, Assembler::EQ);
 9260   %}
 9261   ins_pipe(pipe_slow);
 9262 %}
 9263 
 9264 // This pattern is generated automatically from cas.m4.
 9265 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9266 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9267   predicate(needs_acquiring_load_exclusive(n));
 9268   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9269   ins_cost(VOLATILE_REF_COST);
 9270   effect(KILL cr);
 9271   format %{
 9272     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9273     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9274   %}
 9275   ins_encode %{
 9276     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9277                Assembler::word, /*acquire*/ true, /*release*/ true,
 9278                /*weak*/ true, noreg);
 9279     __ csetw($res$$Register, Assembler::EQ);
 9280   %}
 9281   ins_pipe(pipe_slow);
 9282 %}
 9283 
 9284 // This pattern is generated automatically from cas.m4.
 9285 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9286 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9287   predicate(needs_acquiring_load_exclusive(n));
 9288   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9289   ins_cost(VOLATILE_REF_COST);
 9290   effect(KILL cr);
 9291   format %{
 9292     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9293     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9294   %}
 9295   ins_encode %{
 9296     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9297                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9298                /*weak*/ true, noreg);
 9299     __ csetw($res$$Register, Assembler::EQ);
 9300   %}
 9301   ins_pipe(pipe_slow);
 9302 %}
 9303 
 9304 // This pattern is generated automatically from cas.m4.
 9305 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9306 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9307   predicate(needs_acquiring_load_exclusive(n));
 9308   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9309   ins_cost(VOLATILE_REF_COST);
 9310   effect(KILL cr);
 9311   format %{
 9312     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9313     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9314   %}
 9315   ins_encode %{
 9316     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9317                Assembler::word, /*acquire*/ true, /*release*/ true,
 9318                /*weak*/ true, noreg);
 9319     __ csetw($res$$Register, Assembler::EQ);
 9320   %}
 9321   ins_pipe(pipe_slow);
 9322 %}
 9323 
 9324 // This pattern is generated automatically from cas.m4.
 9325 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9326 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9327   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9328   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9329   ins_cost(VOLATILE_REF_COST);
 9330   effect(KILL cr);
 9331   format %{
 9332     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9333     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9334   %}
 9335   ins_encode %{
 9336     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9337                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9338                /*weak*/ true, noreg);
 9339     __ csetw($res$$Register, Assembler::EQ);
 9340   %}
 9341   ins_pipe(pipe_slow);
 9342 %}
 9343 
 9344 // END This section of the file is automatically generated. Do not edit --------------
 9345 // ---------------------------------------------------------------------
 9346 
 9347 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9348   match(Set prev (GetAndSetI mem newv));
 9349   ins_cost(2 * VOLATILE_REF_COST);
 9350   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9351   ins_encode %{
 9352     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9353   %}
 9354   ins_pipe(pipe_serial);
 9355 %}
 9356 
 9357 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9358   match(Set prev (GetAndSetL mem newv));
 9359   ins_cost(2 * VOLATILE_REF_COST);
 9360   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9361   ins_encode %{
 9362     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9363   %}
 9364   ins_pipe(pipe_serial);
 9365 %}
 9366 
 9367 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9368   match(Set prev (GetAndSetN mem newv));
 9369   ins_cost(2 * VOLATILE_REF_COST);
 9370   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9371   ins_encode %{
 9372     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9373   %}
 9374   ins_pipe(pipe_serial);
 9375 %}
 9376 
 9377 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9378   predicate(n->as_LoadStore()->barrier_data() == 0);
 9379   match(Set prev (GetAndSetP mem newv));
 9380   ins_cost(2 * VOLATILE_REF_COST);
 9381   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9382   ins_encode %{
 9383     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9384   %}
 9385   ins_pipe(pipe_serial);
 9386 %}
 9387 
 9388 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9389   predicate(needs_acquiring_load_exclusive(n));
 9390   match(Set prev (GetAndSetI mem newv));
 9391   ins_cost(VOLATILE_REF_COST);
 9392   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9393   ins_encode %{
 9394     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9395   %}
 9396   ins_pipe(pipe_serial);
 9397 %}
 9398 
 9399 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9400   predicate(needs_acquiring_load_exclusive(n));
 9401   match(Set prev (GetAndSetL mem newv));
 9402   ins_cost(VOLATILE_REF_COST);
 9403   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9404   ins_encode %{
 9405     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9406   %}
 9407   ins_pipe(pipe_serial);
 9408 %}
 9409 
 9410 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9411   predicate(needs_acquiring_load_exclusive(n));
 9412   match(Set prev (GetAndSetN mem newv));
 9413   ins_cost(VOLATILE_REF_COST);
 9414   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9415   ins_encode %{
 9416     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9417   %}
 9418   ins_pipe(pipe_serial);
 9419 %}
 9420 
 9421 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9422   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9423   match(Set prev (GetAndSetP mem newv));
 9424   ins_cost(VOLATILE_REF_COST);
 9425   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9426   ins_encode %{
 9427     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9428   %}
 9429   ins_pipe(pipe_serial);
 9430 %}
 9431 
 9432 
 9433 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9434   match(Set newval (GetAndAddL mem incr));
 9435   ins_cost(2 * VOLATILE_REF_COST + 1);
 9436   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9437   ins_encode %{
 9438     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9439   %}
 9440   ins_pipe(pipe_serial);
 9441 %}
 9442 
 9443 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9444   predicate(n->as_LoadStore()->result_not_used());
 9445   match(Set dummy (GetAndAddL mem incr));
 9446   ins_cost(2 * VOLATILE_REF_COST);
 9447   format %{ "get_and_addL [$mem], $incr" %}
 9448   ins_encode %{
 9449     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9450   %}
 9451   ins_pipe(pipe_serial);
 9452 %}
 9453 
 9454 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9455   match(Set newval (GetAndAddL mem incr));
 9456   ins_cost(2 * VOLATILE_REF_COST + 1);
 9457   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9458   ins_encode %{
 9459     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9460   %}
 9461   ins_pipe(pipe_serial);
 9462 %}
 9463 
 9464 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9465   predicate(n->as_LoadStore()->result_not_used());
 9466   match(Set dummy (GetAndAddL mem incr));
 9467   ins_cost(2 * VOLATILE_REF_COST);
 9468   format %{ "get_and_addL [$mem], $incr" %}
 9469   ins_encode %{
 9470     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9471   %}
 9472   ins_pipe(pipe_serial);
 9473 %}
 9474 
 9475 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9476   match(Set newval (GetAndAddI mem incr));
 9477   ins_cost(2 * VOLATILE_REF_COST + 1);
 9478   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9479   ins_encode %{
 9480     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9481   %}
 9482   ins_pipe(pipe_serial);
 9483 %}
 9484 
 9485 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9486   predicate(n->as_LoadStore()->result_not_used());
 9487   match(Set dummy (GetAndAddI mem incr));
 9488   ins_cost(2 * VOLATILE_REF_COST);
 9489   format %{ "get_and_addI [$mem], $incr" %}
 9490   ins_encode %{
 9491     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9492   %}
 9493   ins_pipe(pipe_serial);
 9494 %}
 9495 
 9496 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9497   match(Set newval (GetAndAddI mem incr));
 9498   ins_cost(2 * VOLATILE_REF_COST + 1);
 9499   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9500   ins_encode %{
 9501     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9502   %}
 9503   ins_pipe(pipe_serial);
 9504 %}
 9505 
 9506 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9507   predicate(n->as_LoadStore()->result_not_used());
 9508   match(Set dummy (GetAndAddI mem incr));
 9509   ins_cost(2 * VOLATILE_REF_COST);
 9510   format %{ "get_and_addI [$mem], $incr" %}
 9511   ins_encode %{
 9512     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9513   %}
 9514   ins_pipe(pipe_serial);
 9515 %}
 9516 
 9517 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9518   predicate(needs_acquiring_load_exclusive(n));
 9519   match(Set newval (GetAndAddL mem incr));
 9520   ins_cost(VOLATILE_REF_COST + 1);
 9521   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9522   ins_encode %{
 9523     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9524   %}
 9525   ins_pipe(pipe_serial);
 9526 %}
 9527 
 9528 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9529   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9530   match(Set dummy (GetAndAddL mem incr));
 9531   ins_cost(VOLATILE_REF_COST);
 9532   format %{ "get_and_addL_acq [$mem], $incr" %}
 9533   ins_encode %{
 9534     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9535   %}
 9536   ins_pipe(pipe_serial);
 9537 %}
 9538 
 9539 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9540   predicate(needs_acquiring_load_exclusive(n));
 9541   match(Set newval (GetAndAddL mem incr));
 9542   ins_cost(VOLATILE_REF_COST + 1);
 9543   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9544   ins_encode %{
 9545     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9546   %}
 9547   ins_pipe(pipe_serial);
 9548 %}
 9549 
 9550 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9551   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9552   match(Set dummy (GetAndAddL mem incr));
 9553   ins_cost(VOLATILE_REF_COST);
 9554   format %{ "get_and_addL_acq [$mem], $incr" %}
 9555   ins_encode %{
 9556     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9557   %}
 9558   ins_pipe(pipe_serial);
 9559 %}
 9560 
 9561 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9562   predicate(needs_acquiring_load_exclusive(n));
 9563   match(Set newval (GetAndAddI mem incr));
 9564   ins_cost(VOLATILE_REF_COST + 1);
 9565   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9566   ins_encode %{
 9567     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9568   %}
 9569   ins_pipe(pipe_serial);
 9570 %}
 9571 
 9572 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9573   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9574   match(Set dummy (GetAndAddI mem incr));
 9575   ins_cost(VOLATILE_REF_COST);
 9576   format %{ "get_and_addI_acq [$mem], $incr" %}
 9577   ins_encode %{
 9578     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9579   %}
 9580   ins_pipe(pipe_serial);
 9581 %}
 9582 
 9583 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9584   predicate(needs_acquiring_load_exclusive(n));
 9585   match(Set newval (GetAndAddI mem incr));
 9586   ins_cost(VOLATILE_REF_COST + 1);
 9587   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9588   ins_encode %{
 9589     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9590   %}
 9591   ins_pipe(pipe_serial);
 9592 %}
 9593 
 9594 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9595   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9596   match(Set dummy (GetAndAddI mem incr));
 9597   ins_cost(VOLATILE_REF_COST);
 9598   format %{ "get_and_addI_acq [$mem], $incr" %}
 9599   ins_encode %{
 9600     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9601   %}
 9602   ins_pipe(pipe_serial);
 9603 %}
 9604 
 9605 // Manifest a CmpU result in an integer register.
 9606 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9607 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 9608 %{
 9609   match(Set dst (CmpU3 src1 src2));
 9610   effect(KILL flags);
 9611 
 9612   ins_cost(INSN_COST * 3);
 9613   format %{
 9614       "cmpw $src1, $src2\n\t"
 9615       "csetw $dst, ne\n\t"
 9616       "cnegw $dst, lo\t# CmpU3(reg)"
 9617   %}
 9618   ins_encode %{
 9619     __ cmpw($src1$$Register, $src2$$Register);
 9620     __ csetw($dst$$Register, Assembler::NE);
 9621     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9622   %}
 9623 
 9624   ins_pipe(pipe_class_default);
 9625 %}
 9626 
 9627 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 9628 %{
 9629   match(Set dst (CmpU3 src1 src2));
 9630   effect(KILL flags);
 9631 
 9632   ins_cost(INSN_COST * 3);
 9633   format %{
 9634       "subsw zr, $src1, $src2\n\t"
 9635       "csetw $dst, ne\n\t"
 9636       "cnegw $dst, lo\t# CmpU3(imm)"
 9637   %}
 9638   ins_encode %{
 9639     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 9640     __ csetw($dst$$Register, Assembler::NE);
 9641     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9642   %}
 9643 
 9644   ins_pipe(pipe_class_default);
 9645 %}
 9646 
 9647 // Manifest a CmpUL result in an integer register.
 9648 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9649 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9650 %{
 9651   match(Set dst (CmpUL3 src1 src2));
 9652   effect(KILL flags);
 9653 
 9654   ins_cost(INSN_COST * 3);
 9655   format %{
 9656       "cmp $src1, $src2\n\t"
 9657       "csetw $dst, ne\n\t"
 9658       "cnegw $dst, lo\t# CmpUL3(reg)"
 9659   %}
 9660   ins_encode %{
 9661     __ cmp($src1$$Register, $src2$$Register);
 9662     __ csetw($dst$$Register, Assembler::NE);
 9663     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9664   %}
 9665 
 9666   ins_pipe(pipe_class_default);
 9667 %}
 9668 
 9669 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9670 %{
 9671   match(Set dst (CmpUL3 src1 src2));
 9672   effect(KILL flags);
 9673 
 9674   ins_cost(INSN_COST * 3);
 9675   format %{
 9676       "subs zr, $src1, $src2\n\t"
 9677       "csetw $dst, ne\n\t"
 9678       "cnegw $dst, lo\t# CmpUL3(imm)"
 9679   %}
 9680   ins_encode %{
 9681     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9682     __ csetw($dst$$Register, Assembler::NE);
 9683     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9684   %}
 9685 
 9686   ins_pipe(pipe_class_default);
 9687 %}
 9688 
 9689 // Manifest a CmpL result in an integer register.
 9690 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9691 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9692 %{
 9693   match(Set dst (CmpL3 src1 src2));
 9694   effect(KILL flags);
 9695 
 9696   ins_cost(INSN_COST * 3);
 9697   format %{
 9698       "cmp $src1, $src2\n\t"
 9699       "csetw $dst, ne\n\t"
 9700       "cnegw $dst, lt\t# CmpL3(reg)"
 9701   %}
 9702   ins_encode %{
 9703     __ cmp($src1$$Register, $src2$$Register);
 9704     __ csetw($dst$$Register, Assembler::NE);
 9705     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9706   %}
 9707 
 9708   ins_pipe(pipe_class_default);
 9709 %}
 9710 
 9711 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9712 %{
 9713   match(Set dst (CmpL3 src1 src2));
 9714   effect(KILL flags);
 9715 
 9716   ins_cost(INSN_COST * 3);
 9717   format %{
 9718       "subs zr, $src1, $src2\n\t"
 9719       "csetw $dst, ne\n\t"
 9720       "cnegw $dst, lt\t# CmpL3(imm)"
 9721   %}
 9722   ins_encode %{
 9723     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9724     __ csetw($dst$$Register, Assembler::NE);
 9725     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9726   %}
 9727 
 9728   ins_pipe(pipe_class_default);
 9729 %}
 9730 
 9731 // ============================================================================
 9732 // Conditional Move Instructions
 9733 
 9734 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9735 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9736 // define an op class which merged both inputs and use it to type the
 9737 // argument to a single rule. unfortunatelyt his fails because the
 9738 // opclass does not live up to the COND_INTER interface of its
 9739 // component operands. When the generic code tries to negate the
 9740 // operand it ends up running the generci Machoper::negate method
 9741 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9742 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9743 
 9744 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9745   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9746 
 9747   ins_cost(INSN_COST * 2);
 9748   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9749 
 9750   ins_encode %{
 9751     __ cselw(as_Register($dst$$reg),
 9752              as_Register($src2$$reg),
 9753              as_Register($src1$$reg),
 9754              (Assembler::Condition)$cmp$$cmpcode);
 9755   %}
 9756 
 9757   ins_pipe(icond_reg_reg);
 9758 %}
 9759 
 9760 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9761   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9762 
 9763   ins_cost(INSN_COST * 2);
 9764   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9765 
 9766   ins_encode %{
 9767     __ cselw(as_Register($dst$$reg),
 9768              as_Register($src2$$reg),
 9769              as_Register($src1$$reg),
 9770              (Assembler::Condition)$cmp$$cmpcode);
 9771   %}
 9772 
 9773   ins_pipe(icond_reg_reg);
 9774 %}
 9775 
 9776 // special cases where one arg is zero
 9777 
 9778 // n.b. this is selected in preference to the rule above because it
 9779 // avoids loading constant 0 into a source register
 9780 
 9781 // TODO
 9782 // we ought only to be able to cull one of these variants as the ideal
 9783 // transforms ought always to order the zero consistently (to left/right?)
 9784 
 9785 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9786   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9787 
 9788   ins_cost(INSN_COST * 2);
 9789   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9790 
 9791   ins_encode %{
 9792     __ cselw(as_Register($dst$$reg),
 9793              as_Register($src$$reg),
 9794              zr,
 9795              (Assembler::Condition)$cmp$$cmpcode);
 9796   %}
 9797 
 9798   ins_pipe(icond_reg);
 9799 %}
 9800 
 9801 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9802   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9803 
 9804   ins_cost(INSN_COST * 2);
 9805   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9806 
 9807   ins_encode %{
 9808     __ cselw(as_Register($dst$$reg),
 9809              as_Register($src$$reg),
 9810              zr,
 9811              (Assembler::Condition)$cmp$$cmpcode);
 9812   %}
 9813 
 9814   ins_pipe(icond_reg);
 9815 %}
 9816 
 9817 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9818   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9819 
 9820   ins_cost(INSN_COST * 2);
 9821   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9822 
 9823   ins_encode %{
 9824     __ cselw(as_Register($dst$$reg),
 9825              zr,
 9826              as_Register($src$$reg),
 9827              (Assembler::Condition)$cmp$$cmpcode);
 9828   %}
 9829 
 9830   ins_pipe(icond_reg);
 9831 %}
 9832 
 9833 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9834   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9835 
 9836   ins_cost(INSN_COST * 2);
 9837   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9838 
 9839   ins_encode %{
 9840     __ cselw(as_Register($dst$$reg),
 9841              zr,
 9842              as_Register($src$$reg),
 9843              (Assembler::Condition)$cmp$$cmpcode);
 9844   %}
 9845 
 9846   ins_pipe(icond_reg);
 9847 %}
 9848 
 9849 // special case for creating a boolean 0 or 1
 9850 
 9851 // n.b. this is selected in preference to the rule above because it
 9852 // avoids loading constants 0 and 1 into a source register
 9853 
 9854 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9855   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9856 
 9857   ins_cost(INSN_COST * 2);
 9858   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9859 
 9860   ins_encode %{
 9861     // equivalently
 9862     // cset(as_Register($dst$$reg),
 9863     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9864     __ csincw(as_Register($dst$$reg),
 9865              zr,
 9866              zr,
 9867              (Assembler::Condition)$cmp$$cmpcode);
 9868   %}
 9869 
 9870   ins_pipe(icond_none);
 9871 %}
 9872 
 9873 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9874   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9875 
 9876   ins_cost(INSN_COST * 2);
 9877   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9878 
 9879   ins_encode %{
 9880     // equivalently
 9881     // cset(as_Register($dst$$reg),
 9882     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9883     __ csincw(as_Register($dst$$reg),
 9884              zr,
 9885              zr,
 9886              (Assembler::Condition)$cmp$$cmpcode);
 9887   %}
 9888 
 9889   ins_pipe(icond_none);
 9890 %}
 9891 
 9892 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9893   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9894 
 9895   ins_cost(INSN_COST * 2);
 9896   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9897 
 9898   ins_encode %{
 9899     __ csel(as_Register($dst$$reg),
 9900             as_Register($src2$$reg),
 9901             as_Register($src1$$reg),
 9902             (Assembler::Condition)$cmp$$cmpcode);
 9903   %}
 9904 
 9905   ins_pipe(icond_reg_reg);
 9906 %}
 9907 
 9908 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9909   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9910 
 9911   ins_cost(INSN_COST * 2);
 9912   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9913 
 9914   ins_encode %{
 9915     __ csel(as_Register($dst$$reg),
 9916             as_Register($src2$$reg),
 9917             as_Register($src1$$reg),
 9918             (Assembler::Condition)$cmp$$cmpcode);
 9919   %}
 9920 
 9921   ins_pipe(icond_reg_reg);
 9922 %}
 9923 
 9924 // special cases where one arg is zero
 9925 
 9926 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9927   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9928 
 9929   ins_cost(INSN_COST * 2);
 9930   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 9931 
 9932   ins_encode %{
 9933     __ csel(as_Register($dst$$reg),
 9934             zr,
 9935             as_Register($src$$reg),
 9936             (Assembler::Condition)$cmp$$cmpcode);
 9937   %}
 9938 
 9939   ins_pipe(icond_reg);
 9940 %}
 9941 
 9942 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9943   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9944 
 9945   ins_cost(INSN_COST * 2);
 9946   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 9947 
 9948   ins_encode %{
 9949     __ csel(as_Register($dst$$reg),
 9950             zr,
 9951             as_Register($src$$reg),
 9952             (Assembler::Condition)$cmp$$cmpcode);
 9953   %}
 9954 
 9955   ins_pipe(icond_reg);
 9956 %}
 9957 
 9958 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9959   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9960 
 9961   ins_cost(INSN_COST * 2);
 9962   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 9963 
 9964   ins_encode %{
 9965     __ csel(as_Register($dst$$reg),
 9966             as_Register($src$$reg),
 9967             zr,
 9968             (Assembler::Condition)$cmp$$cmpcode);
 9969   %}
 9970 
 9971   ins_pipe(icond_reg);
 9972 %}
 9973 
 9974 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9975   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9976 
 9977   ins_cost(INSN_COST * 2);
 9978   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 9979 
 9980   ins_encode %{
 9981     __ csel(as_Register($dst$$reg),
 9982             as_Register($src$$reg),
 9983             zr,
 9984             (Assembler::Condition)$cmp$$cmpcode);
 9985   %}
 9986 
 9987   ins_pipe(icond_reg);
 9988 %}
 9989 
 9990 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9991   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9992 
 9993   ins_cost(INSN_COST * 2);
 9994   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
 9995 
 9996   ins_encode %{
 9997     __ csel(as_Register($dst$$reg),
 9998             as_Register($src2$$reg),
 9999             as_Register($src1$$reg),
10000             (Assembler::Condition)$cmp$$cmpcode);
10001   %}
10002 
10003   ins_pipe(icond_reg_reg);
10004 %}
10005 
10006 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10007   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10008 
10009   ins_cost(INSN_COST * 2);
10010   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10011 
10012   ins_encode %{
10013     __ csel(as_Register($dst$$reg),
10014             as_Register($src2$$reg),
10015             as_Register($src1$$reg),
10016             (Assembler::Condition)$cmp$$cmpcode);
10017   %}
10018 
10019   ins_pipe(icond_reg_reg);
10020 %}
10021 
10022 // special cases where one arg is zero
10023 
10024 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10025   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10026 
10027   ins_cost(INSN_COST * 2);
10028   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10029 
10030   ins_encode %{
10031     __ csel(as_Register($dst$$reg),
10032             zr,
10033             as_Register($src$$reg),
10034             (Assembler::Condition)$cmp$$cmpcode);
10035   %}
10036 
10037   ins_pipe(icond_reg);
10038 %}
10039 
10040 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10041   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10042 
10043   ins_cost(INSN_COST * 2);
10044   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10045 
10046   ins_encode %{
10047     __ csel(as_Register($dst$$reg),
10048             zr,
10049             as_Register($src$$reg),
10050             (Assembler::Condition)$cmp$$cmpcode);
10051   %}
10052 
10053   ins_pipe(icond_reg);
10054 %}
10055 
10056 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10057   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10058 
10059   ins_cost(INSN_COST * 2);
10060   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10061 
10062   ins_encode %{
10063     __ csel(as_Register($dst$$reg),
10064             as_Register($src$$reg),
10065             zr,
10066             (Assembler::Condition)$cmp$$cmpcode);
10067   %}
10068 
10069   ins_pipe(icond_reg);
10070 %}
10071 
10072 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10073   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10074 
10075   ins_cost(INSN_COST * 2);
10076   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10077 
10078   ins_encode %{
10079     __ csel(as_Register($dst$$reg),
10080             as_Register($src$$reg),
10081             zr,
10082             (Assembler::Condition)$cmp$$cmpcode);
10083   %}
10084 
10085   ins_pipe(icond_reg);
10086 %}
10087 
10088 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10089   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10090 
10091   ins_cost(INSN_COST * 2);
10092   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10093 
10094   ins_encode %{
10095     __ cselw(as_Register($dst$$reg),
10096              as_Register($src2$$reg),
10097              as_Register($src1$$reg),
10098              (Assembler::Condition)$cmp$$cmpcode);
10099   %}
10100 
10101   ins_pipe(icond_reg_reg);
10102 %}
10103 
10104 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10105   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10106 
10107   ins_cost(INSN_COST * 2);
10108   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10109 
10110   ins_encode %{
10111     __ cselw(as_Register($dst$$reg),
10112              as_Register($src2$$reg),
10113              as_Register($src1$$reg),
10114              (Assembler::Condition)$cmp$$cmpcode);
10115   %}
10116 
10117   ins_pipe(icond_reg_reg);
10118 %}
10119 
10120 // special cases where one arg is zero
10121 
10122 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10123   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10124 
10125   ins_cost(INSN_COST * 2);
10126   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10127 
10128   ins_encode %{
10129     __ cselw(as_Register($dst$$reg),
10130              zr,
10131              as_Register($src$$reg),
10132              (Assembler::Condition)$cmp$$cmpcode);
10133   %}
10134 
10135   ins_pipe(icond_reg);
10136 %}
10137 
10138 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10139   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10140 
10141   ins_cost(INSN_COST * 2);
10142   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10143 
10144   ins_encode %{
10145     __ cselw(as_Register($dst$$reg),
10146              zr,
10147              as_Register($src$$reg),
10148              (Assembler::Condition)$cmp$$cmpcode);
10149   %}
10150 
10151   ins_pipe(icond_reg);
10152 %}
10153 
10154 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10155   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10156 
10157   ins_cost(INSN_COST * 2);
10158   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10159 
10160   ins_encode %{
10161     __ cselw(as_Register($dst$$reg),
10162              as_Register($src$$reg),
10163              zr,
10164              (Assembler::Condition)$cmp$$cmpcode);
10165   %}
10166 
10167   ins_pipe(icond_reg);
10168 %}
10169 
10170 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10171   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10172 
10173   ins_cost(INSN_COST * 2);
10174   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10175 
10176   ins_encode %{
10177     __ cselw(as_Register($dst$$reg),
10178              as_Register($src$$reg),
10179              zr,
10180              (Assembler::Condition)$cmp$$cmpcode);
10181   %}
10182 
10183   ins_pipe(icond_reg);
10184 %}
10185 
10186 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10187 %{
10188   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10189 
10190   ins_cost(INSN_COST * 3);
10191 
10192   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10193   ins_encode %{
10194     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10195     __ fcsels(as_FloatRegister($dst$$reg),
10196               as_FloatRegister($src2$$reg),
10197               as_FloatRegister($src1$$reg),
10198               cond);
10199   %}
10200 
10201   ins_pipe(fp_cond_reg_reg_s);
10202 %}
10203 
10204 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10205 %{
10206   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10207 
10208   ins_cost(INSN_COST * 3);
10209 
10210   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10211   ins_encode %{
10212     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10213     __ fcsels(as_FloatRegister($dst$$reg),
10214               as_FloatRegister($src2$$reg),
10215               as_FloatRegister($src1$$reg),
10216               cond);
10217   %}
10218 
10219   ins_pipe(fp_cond_reg_reg_s);
10220 %}
10221 
10222 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10223 %{
10224   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10225 
10226   ins_cost(INSN_COST * 3);
10227 
10228   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10229   ins_encode %{
10230     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10231     __ fcseld(as_FloatRegister($dst$$reg),
10232               as_FloatRegister($src2$$reg),
10233               as_FloatRegister($src1$$reg),
10234               cond);
10235   %}
10236 
10237   ins_pipe(fp_cond_reg_reg_d);
10238 %}
10239 
10240 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10241 %{
10242   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10243 
10244   ins_cost(INSN_COST * 3);
10245 
10246   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10247   ins_encode %{
10248     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10249     __ fcseld(as_FloatRegister($dst$$reg),
10250               as_FloatRegister($src2$$reg),
10251               as_FloatRegister($src1$$reg),
10252               cond);
10253   %}
10254 
10255   ins_pipe(fp_cond_reg_reg_d);
10256 %}
10257 
10258 // ============================================================================
10259 // Arithmetic Instructions
10260 //
10261 
10262 // Integer Addition
10263 
10264 // TODO
10265 // these currently employ operations which do not set CR and hence are
10266 // not flagged as killing CR but we would like to isolate the cases
10267 // where we want to set flags from those where we don't. need to work
10268 // out how to do that.
10269 
10270 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10271   match(Set dst (AddI src1 src2));
10272 
10273   ins_cost(INSN_COST);
10274   format %{ "addw  $dst, $src1, $src2" %}
10275 
10276   ins_encode %{
10277     __ addw(as_Register($dst$$reg),
10278             as_Register($src1$$reg),
10279             as_Register($src2$$reg));
10280   %}
10281 
10282   ins_pipe(ialu_reg_reg);
10283 %}
10284 
10285 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10286   match(Set dst (AddI src1 src2));
10287 
10288   ins_cost(INSN_COST);
10289   format %{ "addw $dst, $src1, $src2" %}
10290 
10291   // use opcode to indicate that this is an add not a sub
10292   opcode(0x0);
10293 
10294   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10295 
10296   ins_pipe(ialu_reg_imm);
10297 %}
10298 
10299 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10300   match(Set dst (AddI (ConvL2I src1) src2));
10301 
10302   ins_cost(INSN_COST);
10303   format %{ "addw $dst, $src1, $src2" %}
10304 
10305   // use opcode to indicate that this is an add not a sub
10306   opcode(0x0);
10307 
10308   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10309 
10310   ins_pipe(ialu_reg_imm);
10311 %}
10312 
10313 // Pointer Addition
10314 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10315   match(Set dst (AddP src1 src2));
10316 
10317   ins_cost(INSN_COST);
10318   format %{ "add $dst, $src1, $src2\t# ptr" %}
10319 
10320   ins_encode %{
10321     __ add(as_Register($dst$$reg),
10322            as_Register($src1$$reg),
10323            as_Register($src2$$reg));
10324   %}
10325 
10326   ins_pipe(ialu_reg_reg);
10327 %}
10328 
10329 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10330   match(Set dst (AddP src1 (ConvI2L src2)));
10331 
10332   ins_cost(1.9 * INSN_COST);
10333   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10334 
10335   ins_encode %{
10336     __ add(as_Register($dst$$reg),
10337            as_Register($src1$$reg),
10338            as_Register($src2$$reg), ext::sxtw);
10339   %}
10340 
10341   ins_pipe(ialu_reg_reg);
10342 %}
10343 
10344 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10345   match(Set dst (AddP src1 (LShiftL src2 scale)));
10346 
10347   ins_cost(1.9 * INSN_COST);
10348   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10349 
10350   ins_encode %{
10351     __ lea(as_Register($dst$$reg),
10352            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10353                    Address::lsl($scale$$constant)));
10354   %}
10355 
10356   ins_pipe(ialu_reg_reg_shift);
10357 %}
10358 
10359 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10360   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10361 
10362   ins_cost(1.9 * INSN_COST);
10363   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10364 
10365   ins_encode %{
10366     __ lea(as_Register($dst$$reg),
10367            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10368                    Address::sxtw($scale$$constant)));
10369   %}
10370 
10371   ins_pipe(ialu_reg_reg_shift);
10372 %}
10373 
10374 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10375   match(Set dst (LShiftL (ConvI2L src) scale));
10376 
10377   ins_cost(INSN_COST);
10378   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10379 
10380   ins_encode %{
10381     __ sbfiz(as_Register($dst$$reg),
10382           as_Register($src$$reg),
10383           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10384   %}
10385 
10386   ins_pipe(ialu_reg_shift);
10387 %}
10388 
10389 // Pointer Immediate Addition
10390 // n.b. this needs to be more expensive than using an indirect memory
10391 // operand
10392 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10393   match(Set dst (AddP src1 src2));
10394 
10395   ins_cost(INSN_COST);
10396   format %{ "add $dst, $src1, $src2\t# ptr" %}
10397 
10398   // use opcode to indicate that this is an add not a sub
10399   opcode(0x0);
10400 
10401   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10402 
10403   ins_pipe(ialu_reg_imm);
10404 %}
10405 
10406 // Long Addition
10407 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10408 
10409   match(Set dst (AddL src1 src2));
10410 
10411   ins_cost(INSN_COST);
10412   format %{ "add  $dst, $src1, $src2" %}
10413 
10414   ins_encode %{
10415     __ add(as_Register($dst$$reg),
10416            as_Register($src1$$reg),
10417            as_Register($src2$$reg));
10418   %}
10419 
10420   ins_pipe(ialu_reg_reg);
10421 %}
10422 
10423 // No constant pool entries requiredLong Immediate Addition.
10424 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10425   match(Set dst (AddL src1 src2));
10426 
10427   ins_cost(INSN_COST);
10428   format %{ "add $dst, $src1, $src2" %}
10429 
10430   // use opcode to indicate that this is an add not a sub
10431   opcode(0x0);
10432 
10433   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10434 
10435   ins_pipe(ialu_reg_imm);
10436 %}
10437 
10438 // Integer Subtraction
10439 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10440   match(Set dst (SubI src1 src2));
10441 
10442   ins_cost(INSN_COST);
10443   format %{ "subw  $dst, $src1, $src2" %}
10444 
10445   ins_encode %{
10446     __ subw(as_Register($dst$$reg),
10447             as_Register($src1$$reg),
10448             as_Register($src2$$reg));
10449   %}
10450 
10451   ins_pipe(ialu_reg_reg);
10452 %}
10453 
10454 // Immediate Subtraction
10455 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10456   match(Set dst (SubI src1 src2));
10457 
10458   ins_cost(INSN_COST);
10459   format %{ "subw $dst, $src1, $src2" %}
10460 
10461   // use opcode to indicate that this is a sub not an add
10462   opcode(0x1);
10463 
10464   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10465 
10466   ins_pipe(ialu_reg_imm);
10467 %}
10468 
10469 // Long Subtraction
10470 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10471 
10472   match(Set dst (SubL src1 src2));
10473 
10474   ins_cost(INSN_COST);
10475   format %{ "sub  $dst, $src1, $src2" %}
10476 
10477   ins_encode %{
10478     __ sub(as_Register($dst$$reg),
10479            as_Register($src1$$reg),
10480            as_Register($src2$$reg));
10481   %}
10482 
10483   ins_pipe(ialu_reg_reg);
10484 %}
10485 
10486 // No constant pool entries requiredLong Immediate Subtraction.
10487 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10488   match(Set dst (SubL src1 src2));
10489 
10490   ins_cost(INSN_COST);
10491   format %{ "sub$dst, $src1, $src2" %}
10492 
10493   // use opcode to indicate that this is a sub not an add
10494   opcode(0x1);
10495 
10496   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10497 
10498   ins_pipe(ialu_reg_imm);
10499 %}
10500 
10501 // Integer Negation (special case for sub)
10502 
10503 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10504   match(Set dst (SubI zero src));
10505 
10506   ins_cost(INSN_COST);
10507   format %{ "negw $dst, $src\t# int" %}
10508 
10509   ins_encode %{
10510     __ negw(as_Register($dst$$reg),
10511             as_Register($src$$reg));
10512   %}
10513 
10514   ins_pipe(ialu_reg);
10515 %}
10516 
10517 // Long Negation
10518 
10519 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10520   match(Set dst (SubL zero src));
10521 
10522   ins_cost(INSN_COST);
10523   format %{ "neg $dst, $src\t# long" %}
10524 
10525   ins_encode %{
10526     __ neg(as_Register($dst$$reg),
10527            as_Register($src$$reg));
10528   %}
10529 
10530   ins_pipe(ialu_reg);
10531 %}
10532 
10533 // Integer Multiply
10534 
10535 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10536   match(Set dst (MulI src1 src2));
10537 
10538   ins_cost(INSN_COST * 3);
10539   format %{ "mulw  $dst, $src1, $src2" %}
10540 
10541   ins_encode %{
10542     __ mulw(as_Register($dst$$reg),
10543             as_Register($src1$$reg),
10544             as_Register($src2$$reg));
10545   %}
10546 
10547   ins_pipe(imul_reg_reg);
10548 %}
10549 
10550 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10551   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10552 
10553   ins_cost(INSN_COST * 3);
10554   format %{ "smull  $dst, $src1, $src2" %}
10555 
10556   ins_encode %{
10557     __ smull(as_Register($dst$$reg),
10558              as_Register($src1$$reg),
10559              as_Register($src2$$reg));
10560   %}
10561 
10562   ins_pipe(imul_reg_reg);
10563 %}
10564 
10565 // Long Multiply
10566 
10567 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10568   match(Set dst (MulL src1 src2));
10569 
10570   ins_cost(INSN_COST * 5);
10571   format %{ "mul  $dst, $src1, $src2" %}
10572 
10573   ins_encode %{
10574     __ mul(as_Register($dst$$reg),
10575            as_Register($src1$$reg),
10576            as_Register($src2$$reg));
10577   %}
10578 
10579   ins_pipe(lmul_reg_reg);
10580 %}
10581 
10582 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10583 %{
10584   match(Set dst (MulHiL src1 src2));
10585 
10586   ins_cost(INSN_COST * 7);
10587   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
10588 
10589   ins_encode %{
10590     __ smulh(as_Register($dst$$reg),
10591              as_Register($src1$$reg),
10592              as_Register($src2$$reg));
10593   %}
10594 
10595   ins_pipe(lmul_reg_reg);
10596 %}
10597 
10598 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10599 %{
10600   match(Set dst (UMulHiL src1 src2));
10601 
10602   ins_cost(INSN_COST * 7);
10603   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
10604 
10605   ins_encode %{
10606     __ umulh(as_Register($dst$$reg),
10607              as_Register($src1$$reg),
10608              as_Register($src2$$reg));
10609   %}
10610 
10611   ins_pipe(lmul_reg_reg);
10612 %}
10613 
10614 // Combined Integer Multiply & Add/Sub
10615 
10616 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10617   match(Set dst (AddI src3 (MulI src1 src2)));
10618 
10619   ins_cost(INSN_COST * 3);
10620   format %{ "madd  $dst, $src1, $src2, $src3" %}
10621 
10622   ins_encode %{
10623     __ maddw(as_Register($dst$$reg),
10624              as_Register($src1$$reg),
10625              as_Register($src2$$reg),
10626              as_Register($src3$$reg));
10627   %}
10628 
10629   ins_pipe(imac_reg_reg);
10630 %}
10631 
10632 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10633   match(Set dst (SubI src3 (MulI src1 src2)));
10634 
10635   ins_cost(INSN_COST * 3);
10636   format %{ "msub  $dst, $src1, $src2, $src3" %}
10637 
10638   ins_encode %{
10639     __ msubw(as_Register($dst$$reg),
10640              as_Register($src1$$reg),
10641              as_Register($src2$$reg),
10642              as_Register($src3$$reg));
10643   %}
10644 
10645   ins_pipe(imac_reg_reg);
10646 %}
10647 
10648 // Combined Integer Multiply & Neg
10649 
10650 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10651   match(Set dst (MulI (SubI zero src1) src2));
10652 
10653   ins_cost(INSN_COST * 3);
10654   format %{ "mneg  $dst, $src1, $src2" %}
10655 
10656   ins_encode %{
10657     __ mnegw(as_Register($dst$$reg),
10658              as_Register($src1$$reg),
10659              as_Register($src2$$reg));
10660   %}
10661 
10662   ins_pipe(imac_reg_reg);
10663 %}
10664 
10665 // Combined Long Multiply & Add/Sub
10666 
10667 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10668   match(Set dst (AddL src3 (MulL src1 src2)));
10669 
10670   ins_cost(INSN_COST * 5);
10671   format %{ "madd  $dst, $src1, $src2, $src3" %}
10672 
10673   ins_encode %{
10674     __ madd(as_Register($dst$$reg),
10675             as_Register($src1$$reg),
10676             as_Register($src2$$reg),
10677             as_Register($src3$$reg));
10678   %}
10679 
10680   ins_pipe(lmac_reg_reg);
10681 %}
10682 
10683 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10684   match(Set dst (SubL src3 (MulL src1 src2)));
10685 
10686   ins_cost(INSN_COST * 5);
10687   format %{ "msub  $dst, $src1, $src2, $src3" %}
10688 
10689   ins_encode %{
10690     __ msub(as_Register($dst$$reg),
10691             as_Register($src1$$reg),
10692             as_Register($src2$$reg),
10693             as_Register($src3$$reg));
10694   %}
10695 
10696   ins_pipe(lmac_reg_reg);
10697 %}
10698 
10699 // Combined Long Multiply & Neg
10700 
10701 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10702   match(Set dst (MulL (SubL zero src1) src2));
10703 
10704   ins_cost(INSN_COST * 5);
10705   format %{ "mneg  $dst, $src1, $src2" %}
10706 
10707   ins_encode %{
10708     __ mneg(as_Register($dst$$reg),
10709             as_Register($src1$$reg),
10710             as_Register($src2$$reg));
10711   %}
10712 
10713   ins_pipe(lmac_reg_reg);
10714 %}
10715 
10716 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10717 
10718 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10719   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10720 
10721   ins_cost(INSN_COST * 3);
10722   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10723 
10724   ins_encode %{
10725     __ smaddl(as_Register($dst$$reg),
10726               as_Register($src1$$reg),
10727               as_Register($src2$$reg),
10728               as_Register($src3$$reg));
10729   %}
10730 
10731   ins_pipe(imac_reg_reg);
10732 %}
10733 
10734 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10735   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10736 
10737   ins_cost(INSN_COST * 3);
10738   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10739 
10740   ins_encode %{
10741     __ smsubl(as_Register($dst$$reg),
10742               as_Register($src1$$reg),
10743               as_Register($src2$$reg),
10744               as_Register($src3$$reg));
10745   %}
10746 
10747   ins_pipe(imac_reg_reg);
10748 %}
10749 
10750 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10751   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10752 
10753   ins_cost(INSN_COST * 3);
10754   format %{ "smnegl  $dst, $src1, $src2" %}
10755 
10756   ins_encode %{
10757     __ smnegl(as_Register($dst$$reg),
10758               as_Register($src1$$reg),
10759               as_Register($src2$$reg));
10760   %}
10761 
10762   ins_pipe(imac_reg_reg);
10763 %}
10764 
10765 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10766 
10767 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10768   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10769 
10770   ins_cost(INSN_COST * 5);
10771   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10772             "maddw $dst, $src3, $src4, rscratch1" %}
10773 
10774   ins_encode %{
10775     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10776     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10777 
10778   ins_pipe(imac_reg_reg);
10779 %}
10780 
10781 // Integer Divide
10782 
10783 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10784   match(Set dst (DivI src1 src2));
10785 
10786   ins_cost(INSN_COST * 19);
10787   format %{ "sdivw  $dst, $src1, $src2" %}
10788 
10789   ins_encode(aarch64_enc_divw(dst, src1, src2));
10790   ins_pipe(idiv_reg_reg);
10791 %}
10792 
10793 // Long Divide
10794 
10795 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10796   match(Set dst (DivL src1 src2));
10797 
10798   ins_cost(INSN_COST * 35);
10799   format %{ "sdiv   $dst, $src1, $src2" %}
10800 
10801   ins_encode(aarch64_enc_div(dst, src1, src2));
10802   ins_pipe(ldiv_reg_reg);
10803 %}
10804 
10805 // Integer Remainder
10806 
10807 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10808   match(Set dst (ModI src1 src2));
10809 
10810   ins_cost(INSN_COST * 22);
10811   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10812             "msubw  $dst, rscratch1, $src2, $src1" %}
10813 
10814   ins_encode(aarch64_enc_modw(dst, src1, src2));
10815   ins_pipe(idiv_reg_reg);
10816 %}
10817 
10818 // Long Remainder
10819 
10820 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10821   match(Set dst (ModL src1 src2));
10822 
10823   ins_cost(INSN_COST * 38);
10824   format %{ "sdiv   rscratch1, $src1, $src2\n"
10825             "msub   $dst, rscratch1, $src2, $src1" %}
10826 
10827   ins_encode(aarch64_enc_mod(dst, src1, src2));
10828   ins_pipe(ldiv_reg_reg);
10829 %}
10830 
10831 // Unsigned Integer Divide
10832 
10833 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10834   match(Set dst (UDivI src1 src2));
10835 
10836   ins_cost(INSN_COST * 19);
10837   format %{ "udivw  $dst, $src1, $src2" %}
10838 
10839   ins_encode %{
10840     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
10841   %}
10842 
10843   ins_pipe(idiv_reg_reg);
10844 %}
10845 
10846 //  Unsigned Long Divide
10847 
10848 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10849   match(Set dst (UDivL src1 src2));
10850 
10851   ins_cost(INSN_COST * 35);
10852   format %{ "udiv   $dst, $src1, $src2" %}
10853 
10854   ins_encode %{
10855     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
10856   %}
10857 
10858   ins_pipe(ldiv_reg_reg);
10859 %}
10860 
10861 // Unsigned Integer Remainder
10862 
10863 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10864   match(Set dst (UModI src1 src2));
10865 
10866   ins_cost(INSN_COST * 22);
10867   format %{ "udivw  rscratch1, $src1, $src2\n\t"
10868             "msubw  $dst, rscratch1, $src2, $src1" %}
10869 
10870   ins_encode %{
10871     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
10872     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10873   %}
10874 
10875   ins_pipe(idiv_reg_reg);
10876 %}
10877 
10878 // Unsigned Long Remainder
10879 
10880 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10881   match(Set dst (UModL src1 src2));
10882 
10883   ins_cost(INSN_COST * 38);
10884   format %{ "udiv   rscratch1, $src1, $src2\n"
10885             "msub   $dst, rscratch1, $src2, $src1" %}
10886 
10887   ins_encode %{
10888     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
10889     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10890   %}
10891 
10892   ins_pipe(ldiv_reg_reg);
10893 %}
10894 
10895 // Integer Shifts
10896 
10897 // Shift Left Register
10898 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10899   match(Set dst (LShiftI src1 src2));
10900 
10901   ins_cost(INSN_COST * 2);
10902   format %{ "lslvw  $dst, $src1, $src2" %}
10903 
10904   ins_encode %{
10905     __ lslvw(as_Register($dst$$reg),
10906              as_Register($src1$$reg),
10907              as_Register($src2$$reg));
10908   %}
10909 
10910   ins_pipe(ialu_reg_reg_vshift);
10911 %}
10912 
10913 // Shift Left Immediate
10914 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10915   match(Set dst (LShiftI src1 src2));
10916 
10917   ins_cost(INSN_COST);
10918   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10919 
10920   ins_encode %{
10921     __ lslw(as_Register($dst$$reg),
10922             as_Register($src1$$reg),
10923             $src2$$constant & 0x1f);
10924   %}
10925 
10926   ins_pipe(ialu_reg_shift);
10927 %}
10928 
10929 // Shift Right Logical Register
10930 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10931   match(Set dst (URShiftI src1 src2));
10932 
10933   ins_cost(INSN_COST * 2);
10934   format %{ "lsrvw  $dst, $src1, $src2" %}
10935 
10936   ins_encode %{
10937     __ lsrvw(as_Register($dst$$reg),
10938              as_Register($src1$$reg),
10939              as_Register($src2$$reg));
10940   %}
10941 
10942   ins_pipe(ialu_reg_reg_vshift);
10943 %}
10944 
10945 // Shift Right Logical Immediate
10946 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10947   match(Set dst (URShiftI src1 src2));
10948 
10949   ins_cost(INSN_COST);
10950   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10951 
10952   ins_encode %{
10953     __ lsrw(as_Register($dst$$reg),
10954             as_Register($src1$$reg),
10955             $src2$$constant & 0x1f);
10956   %}
10957 
10958   ins_pipe(ialu_reg_shift);
10959 %}
10960 
10961 // Shift Right Arithmetic Register
10962 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10963   match(Set dst (RShiftI src1 src2));
10964 
10965   ins_cost(INSN_COST * 2);
10966   format %{ "asrvw  $dst, $src1, $src2" %}
10967 
10968   ins_encode %{
10969     __ asrvw(as_Register($dst$$reg),
10970              as_Register($src1$$reg),
10971              as_Register($src2$$reg));
10972   %}
10973 
10974   ins_pipe(ialu_reg_reg_vshift);
10975 %}
10976 
10977 // Shift Right Arithmetic Immediate
10978 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10979   match(Set dst (RShiftI src1 src2));
10980 
10981   ins_cost(INSN_COST);
10982   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10983 
10984   ins_encode %{
10985     __ asrw(as_Register($dst$$reg),
10986             as_Register($src1$$reg),
10987             $src2$$constant & 0x1f);
10988   %}
10989 
10990   ins_pipe(ialu_reg_shift);
10991 %}
10992 
10993 // Combined Int Mask and Right Shift (using UBFM)
10994 // TODO
10995 
10996 // Long Shifts
10997 
10998 // Shift Left Register
10999 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11000   match(Set dst (LShiftL src1 src2));
11001 
11002   ins_cost(INSN_COST * 2);
11003   format %{ "lslv  $dst, $src1, $src2" %}
11004 
11005   ins_encode %{
11006     __ lslv(as_Register($dst$$reg),
11007             as_Register($src1$$reg),
11008             as_Register($src2$$reg));
11009   %}
11010 
11011   ins_pipe(ialu_reg_reg_vshift);
11012 %}
11013 
11014 // Shift Left Immediate
11015 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11016   match(Set dst (LShiftL src1 src2));
11017 
11018   ins_cost(INSN_COST);
11019   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11020 
11021   ins_encode %{
11022     __ lsl(as_Register($dst$$reg),
11023             as_Register($src1$$reg),
11024             $src2$$constant & 0x3f);
11025   %}
11026 
11027   ins_pipe(ialu_reg_shift);
11028 %}
11029 
11030 // Shift Right Logical Register
11031 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11032   match(Set dst (URShiftL src1 src2));
11033 
11034   ins_cost(INSN_COST * 2);
11035   format %{ "lsrv  $dst, $src1, $src2" %}
11036 
11037   ins_encode %{
11038     __ lsrv(as_Register($dst$$reg),
11039             as_Register($src1$$reg),
11040             as_Register($src2$$reg));
11041   %}
11042 
11043   ins_pipe(ialu_reg_reg_vshift);
11044 %}
11045 
11046 // Shift Right Logical Immediate
11047 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11048   match(Set dst (URShiftL src1 src2));
11049 
11050   ins_cost(INSN_COST);
11051   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11052 
11053   ins_encode %{
11054     __ lsr(as_Register($dst$$reg),
11055            as_Register($src1$$reg),
11056            $src2$$constant & 0x3f);
11057   %}
11058 
11059   ins_pipe(ialu_reg_shift);
11060 %}
11061 
11062 // A special-case pattern for card table stores.
11063 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11064   match(Set dst (URShiftL (CastP2X src1) src2));
11065 
11066   ins_cost(INSN_COST);
11067   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11068 
11069   ins_encode %{
11070     __ lsr(as_Register($dst$$reg),
11071            as_Register($src1$$reg),
11072            $src2$$constant & 0x3f);
11073   %}
11074 
11075   ins_pipe(ialu_reg_shift);
11076 %}
11077 
11078 // Shift Right Arithmetic Register
11079 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11080   match(Set dst (RShiftL src1 src2));
11081 
11082   ins_cost(INSN_COST * 2);
11083   format %{ "asrv  $dst, $src1, $src2" %}
11084 
11085   ins_encode %{
11086     __ asrv(as_Register($dst$$reg),
11087             as_Register($src1$$reg),
11088             as_Register($src2$$reg));
11089   %}
11090 
11091   ins_pipe(ialu_reg_reg_vshift);
11092 %}
11093 
11094 // Shift Right Arithmetic Immediate
11095 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11096   match(Set dst (RShiftL src1 src2));
11097 
11098   ins_cost(INSN_COST);
11099   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11100 
11101   ins_encode %{
11102     __ asr(as_Register($dst$$reg),
11103            as_Register($src1$$reg),
11104            $src2$$constant & 0x3f);
11105   %}
11106 
11107   ins_pipe(ialu_reg_shift);
11108 %}
11109 
11110 // BEGIN This section of the file is automatically generated. Do not edit --------------
11111 // This section is generated from aarch64_ad.m4
11112 
11113 // This pattern is automatically generated from aarch64_ad.m4
11114 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11115 instruct regL_not_reg(iRegLNoSp dst,
11116                          iRegL src1, immL_M1 m1,
11117                          rFlagsReg cr) %{
11118   match(Set dst (XorL src1 m1));
11119   ins_cost(INSN_COST);
11120   format %{ "eon  $dst, $src1, zr" %}
11121 
11122   ins_encode %{
11123     __ eon(as_Register($dst$$reg),
11124               as_Register($src1$$reg),
11125               zr,
11126               Assembler::LSL, 0);
11127   %}
11128 
11129   ins_pipe(ialu_reg);
11130 %}
11131 
11132 // This pattern is automatically generated from aarch64_ad.m4
11133 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11134 instruct regI_not_reg(iRegINoSp dst,
11135                          iRegIorL2I src1, immI_M1 m1,
11136                          rFlagsReg cr) %{
11137   match(Set dst (XorI src1 m1));
11138   ins_cost(INSN_COST);
11139   format %{ "eonw  $dst, $src1, zr" %}
11140 
11141   ins_encode %{
11142     __ eonw(as_Register($dst$$reg),
11143               as_Register($src1$$reg),
11144               zr,
11145               Assembler::LSL, 0);
11146   %}
11147 
11148   ins_pipe(ialu_reg);
11149 %}
11150 
11151 // This pattern is automatically generated from aarch64_ad.m4
11152 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11153 instruct NegI_reg_URShift_reg(iRegINoSp dst,
11154                               immI0 zero, iRegIorL2I src1, immI src2) %{
11155   match(Set dst (SubI zero (URShiftI src1 src2)));
11156 
11157   ins_cost(1.9 * INSN_COST);
11158   format %{ "negw  $dst, $src1, LSR $src2" %}
11159 
11160   ins_encode %{
11161     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11162             Assembler::LSR, $src2$$constant & 0x1f);
11163   %}
11164 
11165   ins_pipe(ialu_reg_shift);
11166 %}
11167 
11168 // This pattern is automatically generated from aarch64_ad.m4
11169 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11170 instruct NegI_reg_RShift_reg(iRegINoSp dst,
11171                               immI0 zero, iRegIorL2I src1, immI src2) %{
11172   match(Set dst (SubI zero (RShiftI src1 src2)));
11173 
11174   ins_cost(1.9 * INSN_COST);
11175   format %{ "negw  $dst, $src1, ASR $src2" %}
11176 
11177   ins_encode %{
11178     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11179             Assembler::ASR, $src2$$constant & 0x1f);
11180   %}
11181 
11182   ins_pipe(ialu_reg_shift);
11183 %}
11184 
11185 // This pattern is automatically generated from aarch64_ad.m4
11186 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11187 instruct NegI_reg_LShift_reg(iRegINoSp dst,
11188                               immI0 zero, iRegIorL2I src1, immI src2) %{
11189   match(Set dst (SubI zero (LShiftI src1 src2)));
11190 
11191   ins_cost(1.9 * INSN_COST);
11192   format %{ "negw  $dst, $src1, LSL $src2" %}
11193 
11194   ins_encode %{
11195     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11196             Assembler::LSL, $src2$$constant & 0x1f);
11197   %}
11198 
11199   ins_pipe(ialu_reg_shift);
11200 %}
11201 
11202 // This pattern is automatically generated from aarch64_ad.m4
11203 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11204 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
11205                               immL0 zero, iRegL src1, immI src2) %{
11206   match(Set dst (SubL zero (URShiftL src1 src2)));
11207 
11208   ins_cost(1.9 * INSN_COST);
11209   format %{ "neg  $dst, $src1, LSR $src2" %}
11210 
11211   ins_encode %{
11212     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11213             Assembler::LSR, $src2$$constant & 0x3f);
11214   %}
11215 
11216   ins_pipe(ialu_reg_shift);
11217 %}
11218 
11219 // This pattern is automatically generated from aarch64_ad.m4
11220 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11221 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
11222                               immL0 zero, iRegL src1, immI src2) %{
11223   match(Set dst (SubL zero (RShiftL src1 src2)));
11224 
11225   ins_cost(1.9 * INSN_COST);
11226   format %{ "neg  $dst, $src1, ASR $src2" %}
11227 
11228   ins_encode %{
11229     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11230             Assembler::ASR, $src2$$constant & 0x3f);
11231   %}
11232 
11233   ins_pipe(ialu_reg_shift);
11234 %}
11235 
11236 // This pattern is automatically generated from aarch64_ad.m4
11237 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11238 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
11239                               immL0 zero, iRegL src1, immI src2) %{
11240   match(Set dst (SubL zero (LShiftL src1 src2)));
11241 
11242   ins_cost(1.9 * INSN_COST);
11243   format %{ "neg  $dst, $src1, LSL $src2" %}
11244 
11245   ins_encode %{
11246     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11247             Assembler::LSL, $src2$$constant & 0x3f);
11248   %}
11249 
11250   ins_pipe(ialu_reg_shift);
11251 %}
11252 
11253 // This pattern is automatically generated from aarch64_ad.m4
11254 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11255 instruct AndI_reg_not_reg(iRegINoSp dst,
11256                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11257   match(Set dst (AndI src1 (XorI src2 m1)));
11258   ins_cost(INSN_COST);
11259   format %{ "bicw  $dst, $src1, $src2" %}
11260 
11261   ins_encode %{
11262     __ bicw(as_Register($dst$$reg),
11263               as_Register($src1$$reg),
11264               as_Register($src2$$reg),
11265               Assembler::LSL, 0);
11266   %}
11267 
11268   ins_pipe(ialu_reg_reg);
11269 %}
11270 
11271 // This pattern is automatically generated from aarch64_ad.m4
11272 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11273 instruct AndL_reg_not_reg(iRegLNoSp dst,
11274                          iRegL src1, iRegL src2, immL_M1 m1) %{
11275   match(Set dst (AndL src1 (XorL src2 m1)));
11276   ins_cost(INSN_COST);
11277   format %{ "bic  $dst, $src1, $src2" %}
11278 
11279   ins_encode %{
11280     __ bic(as_Register($dst$$reg),
11281               as_Register($src1$$reg),
11282               as_Register($src2$$reg),
11283               Assembler::LSL, 0);
11284   %}
11285 
11286   ins_pipe(ialu_reg_reg);
11287 %}
11288 
11289 // This pattern is automatically generated from aarch64_ad.m4
11290 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11291 instruct OrI_reg_not_reg(iRegINoSp dst,
11292                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11293   match(Set dst (OrI src1 (XorI src2 m1)));
11294   ins_cost(INSN_COST);
11295   format %{ "ornw  $dst, $src1, $src2" %}
11296 
11297   ins_encode %{
11298     __ ornw(as_Register($dst$$reg),
11299               as_Register($src1$$reg),
11300               as_Register($src2$$reg),
11301               Assembler::LSL, 0);
11302   %}
11303 
11304   ins_pipe(ialu_reg_reg);
11305 %}
11306 
11307 // This pattern is automatically generated from aarch64_ad.m4
11308 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11309 instruct OrL_reg_not_reg(iRegLNoSp dst,
11310                          iRegL src1, iRegL src2, immL_M1 m1) %{
11311   match(Set dst (OrL src1 (XorL src2 m1)));
11312   ins_cost(INSN_COST);
11313   format %{ "orn  $dst, $src1, $src2" %}
11314 
11315   ins_encode %{
11316     __ orn(as_Register($dst$$reg),
11317               as_Register($src1$$reg),
11318               as_Register($src2$$reg),
11319               Assembler::LSL, 0);
11320   %}
11321 
11322   ins_pipe(ialu_reg_reg);
11323 %}
11324 
11325 // This pattern is automatically generated from aarch64_ad.m4
11326 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11327 instruct XorI_reg_not_reg(iRegINoSp dst,
11328                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11329   match(Set dst (XorI m1 (XorI src2 src1)));
11330   ins_cost(INSN_COST);
11331   format %{ "eonw  $dst, $src1, $src2" %}
11332 
11333   ins_encode %{
11334     __ eonw(as_Register($dst$$reg),
11335               as_Register($src1$$reg),
11336               as_Register($src2$$reg),
11337               Assembler::LSL, 0);
11338   %}
11339 
11340   ins_pipe(ialu_reg_reg);
11341 %}
11342 
11343 // This pattern is automatically generated from aarch64_ad.m4
11344 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11345 instruct XorL_reg_not_reg(iRegLNoSp dst,
11346                          iRegL src1, iRegL src2, immL_M1 m1) %{
11347   match(Set dst (XorL m1 (XorL src2 src1)));
11348   ins_cost(INSN_COST);
11349   format %{ "eon  $dst, $src1, $src2" %}
11350 
11351   ins_encode %{
11352     __ eon(as_Register($dst$$reg),
11353               as_Register($src1$$reg),
11354               as_Register($src2$$reg),
11355               Assembler::LSL, 0);
11356   %}
11357 
11358   ins_pipe(ialu_reg_reg);
11359 %}
11360 
11361 // This pattern is automatically generated from aarch64_ad.m4
11362 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11363 // val & (-1 ^ (val >>> shift)) ==> bicw
11364 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11365                          iRegIorL2I src1, iRegIorL2I src2,
11366                          immI src3, immI_M1 src4) %{
11367   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11368   ins_cost(1.9 * INSN_COST);
11369   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11370 
11371   ins_encode %{
11372     __ bicw(as_Register($dst$$reg),
11373               as_Register($src1$$reg),
11374               as_Register($src2$$reg),
11375               Assembler::LSR,
11376               $src3$$constant & 0x1f);
11377   %}
11378 
11379   ins_pipe(ialu_reg_reg_shift);
11380 %}
11381 
11382 // This pattern is automatically generated from aarch64_ad.m4
11383 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11384 // val & (-1 ^ (val >>> shift)) ==> bic
11385 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11386                          iRegL src1, iRegL src2,
11387                          immI src3, immL_M1 src4) %{
11388   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11389   ins_cost(1.9 * INSN_COST);
11390   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11391 
11392   ins_encode %{
11393     __ bic(as_Register($dst$$reg),
11394               as_Register($src1$$reg),
11395               as_Register($src2$$reg),
11396               Assembler::LSR,
11397               $src3$$constant & 0x3f);
11398   %}
11399 
11400   ins_pipe(ialu_reg_reg_shift);
11401 %}
11402 
11403 // This pattern is automatically generated from aarch64_ad.m4
11404 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11405 // val & (-1 ^ (val >> shift)) ==> bicw
11406 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11407                          iRegIorL2I src1, iRegIorL2I src2,
11408                          immI src3, immI_M1 src4) %{
11409   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11410   ins_cost(1.9 * INSN_COST);
11411   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11412 
11413   ins_encode %{
11414     __ bicw(as_Register($dst$$reg),
11415               as_Register($src1$$reg),
11416               as_Register($src2$$reg),
11417               Assembler::ASR,
11418               $src3$$constant & 0x1f);
11419   %}
11420 
11421   ins_pipe(ialu_reg_reg_shift);
11422 %}
11423 
11424 // This pattern is automatically generated from aarch64_ad.m4
11425 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11426 // val & (-1 ^ (val >> shift)) ==> bic
11427 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11428                          iRegL src1, iRegL src2,
11429                          immI src3, immL_M1 src4) %{
11430   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11431   ins_cost(1.9 * INSN_COST);
11432   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11433 
11434   ins_encode %{
11435     __ bic(as_Register($dst$$reg),
11436               as_Register($src1$$reg),
11437               as_Register($src2$$reg),
11438               Assembler::ASR,
11439               $src3$$constant & 0x3f);
11440   %}
11441 
11442   ins_pipe(ialu_reg_reg_shift);
11443 %}
11444 
11445 // This pattern is automatically generated from aarch64_ad.m4
11446 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11447 // val & (-1 ^ (val ror shift)) ==> bicw
11448 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11449                          iRegIorL2I src1, iRegIorL2I src2,
11450                          immI src3, immI_M1 src4) %{
11451   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11452   ins_cost(1.9 * INSN_COST);
11453   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11454 
11455   ins_encode %{
11456     __ bicw(as_Register($dst$$reg),
11457               as_Register($src1$$reg),
11458               as_Register($src2$$reg),
11459               Assembler::ROR,
11460               $src3$$constant & 0x1f);
11461   %}
11462 
11463   ins_pipe(ialu_reg_reg_shift);
11464 %}
11465 
11466 // This pattern is automatically generated from aarch64_ad.m4
11467 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11468 // val & (-1 ^ (val ror shift)) ==> bic
11469 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11470                          iRegL src1, iRegL src2,
11471                          immI src3, immL_M1 src4) %{
11472   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11473   ins_cost(1.9 * INSN_COST);
11474   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11475 
11476   ins_encode %{
11477     __ bic(as_Register($dst$$reg),
11478               as_Register($src1$$reg),
11479               as_Register($src2$$reg),
11480               Assembler::ROR,
11481               $src3$$constant & 0x3f);
11482   %}
11483 
11484   ins_pipe(ialu_reg_reg_shift);
11485 %}
11486 
11487 // This pattern is automatically generated from aarch64_ad.m4
11488 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11489 // val & (-1 ^ (val << shift)) ==> bicw
11490 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11491                          iRegIorL2I src1, iRegIorL2I src2,
11492                          immI src3, immI_M1 src4) %{
11493   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11494   ins_cost(1.9 * INSN_COST);
11495   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11496 
11497   ins_encode %{
11498     __ bicw(as_Register($dst$$reg),
11499               as_Register($src1$$reg),
11500               as_Register($src2$$reg),
11501               Assembler::LSL,
11502               $src3$$constant & 0x1f);
11503   %}
11504 
11505   ins_pipe(ialu_reg_reg_shift);
11506 %}
11507 
11508 // This pattern is automatically generated from aarch64_ad.m4
11509 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11510 // val & (-1 ^ (val << shift)) ==> bic
11511 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11512                          iRegL src1, iRegL src2,
11513                          immI src3, immL_M1 src4) %{
11514   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11515   ins_cost(1.9 * INSN_COST);
11516   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11517 
11518   ins_encode %{
11519     __ bic(as_Register($dst$$reg),
11520               as_Register($src1$$reg),
11521               as_Register($src2$$reg),
11522               Assembler::LSL,
11523               $src3$$constant & 0x3f);
11524   %}
11525 
11526   ins_pipe(ialu_reg_reg_shift);
11527 %}
11528 
11529 // This pattern is automatically generated from aarch64_ad.m4
11530 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11531 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11532 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11533                          iRegIorL2I src1, iRegIorL2I src2,
11534                          immI src3, immI_M1 src4) %{
11535   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11536   ins_cost(1.9 * INSN_COST);
11537   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11538 
11539   ins_encode %{
11540     __ eonw(as_Register($dst$$reg),
11541               as_Register($src1$$reg),
11542               as_Register($src2$$reg),
11543               Assembler::LSR,
11544               $src3$$constant & 0x1f);
11545   %}
11546 
11547   ins_pipe(ialu_reg_reg_shift);
11548 %}
11549 
11550 // This pattern is automatically generated from aarch64_ad.m4
11551 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11552 // val ^ (-1 ^ (val >>> shift)) ==> eon
11553 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11554                          iRegL src1, iRegL src2,
11555                          immI src3, immL_M1 src4) %{
11556   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11557   ins_cost(1.9 * INSN_COST);
11558   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11559 
11560   ins_encode %{
11561     __ eon(as_Register($dst$$reg),
11562               as_Register($src1$$reg),
11563               as_Register($src2$$reg),
11564               Assembler::LSR,
11565               $src3$$constant & 0x3f);
11566   %}
11567 
11568   ins_pipe(ialu_reg_reg_shift);
11569 %}
11570 
11571 // This pattern is automatically generated from aarch64_ad.m4
11572 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11573 // val ^ (-1 ^ (val >> shift)) ==> eonw
11574 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11575                          iRegIorL2I src1, iRegIorL2I src2,
11576                          immI src3, immI_M1 src4) %{
11577   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11578   ins_cost(1.9 * INSN_COST);
11579   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11580 
11581   ins_encode %{
11582     __ eonw(as_Register($dst$$reg),
11583               as_Register($src1$$reg),
11584               as_Register($src2$$reg),
11585               Assembler::ASR,
11586               $src3$$constant & 0x1f);
11587   %}
11588 
11589   ins_pipe(ialu_reg_reg_shift);
11590 %}
11591 
11592 // This pattern is automatically generated from aarch64_ad.m4
11593 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11594 // val ^ (-1 ^ (val >> shift)) ==> eon
11595 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11596                          iRegL src1, iRegL src2,
11597                          immI src3, immL_M1 src4) %{
11598   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11599   ins_cost(1.9 * INSN_COST);
11600   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11601 
11602   ins_encode %{
11603     __ eon(as_Register($dst$$reg),
11604               as_Register($src1$$reg),
11605               as_Register($src2$$reg),
11606               Assembler::ASR,
11607               $src3$$constant & 0x3f);
11608   %}
11609 
11610   ins_pipe(ialu_reg_reg_shift);
11611 %}
11612 
11613 // This pattern is automatically generated from aarch64_ad.m4
11614 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11615 // val ^ (-1 ^ (val ror shift)) ==> eonw
11616 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11617                          iRegIorL2I src1, iRegIorL2I src2,
11618                          immI src3, immI_M1 src4) %{
11619   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11620   ins_cost(1.9 * INSN_COST);
11621   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11622 
11623   ins_encode %{
11624     __ eonw(as_Register($dst$$reg),
11625               as_Register($src1$$reg),
11626               as_Register($src2$$reg),
11627               Assembler::ROR,
11628               $src3$$constant & 0x1f);
11629   %}
11630 
11631   ins_pipe(ialu_reg_reg_shift);
11632 %}
11633 
11634 // This pattern is automatically generated from aarch64_ad.m4
11635 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11636 // val ^ (-1 ^ (val ror shift)) ==> eon
11637 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11638                          iRegL src1, iRegL src2,
11639                          immI src3, immL_M1 src4) %{
11640   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11641   ins_cost(1.9 * INSN_COST);
11642   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11643 
11644   ins_encode %{
11645     __ eon(as_Register($dst$$reg),
11646               as_Register($src1$$reg),
11647               as_Register($src2$$reg),
11648               Assembler::ROR,
11649               $src3$$constant & 0x3f);
11650   %}
11651 
11652   ins_pipe(ialu_reg_reg_shift);
11653 %}
11654 
11655 // This pattern is automatically generated from aarch64_ad.m4
11656 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11657 // val ^ (-1 ^ (val << shift)) ==> eonw
11658 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11659                          iRegIorL2I src1, iRegIorL2I src2,
11660                          immI src3, immI_M1 src4) %{
11661   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11662   ins_cost(1.9 * INSN_COST);
11663   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11664 
11665   ins_encode %{
11666     __ eonw(as_Register($dst$$reg),
11667               as_Register($src1$$reg),
11668               as_Register($src2$$reg),
11669               Assembler::LSL,
11670               $src3$$constant & 0x1f);
11671   %}
11672 
11673   ins_pipe(ialu_reg_reg_shift);
11674 %}
11675 
11676 // This pattern is automatically generated from aarch64_ad.m4
11677 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11678 // val ^ (-1 ^ (val << shift)) ==> eon
11679 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11680                          iRegL src1, iRegL src2,
11681                          immI src3, immL_M1 src4) %{
11682   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11683   ins_cost(1.9 * INSN_COST);
11684   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11685 
11686   ins_encode %{
11687     __ eon(as_Register($dst$$reg),
11688               as_Register($src1$$reg),
11689               as_Register($src2$$reg),
11690               Assembler::LSL,
11691               $src3$$constant & 0x3f);
11692   %}
11693 
11694   ins_pipe(ialu_reg_reg_shift);
11695 %}
11696 
11697 // This pattern is automatically generated from aarch64_ad.m4
11698 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11699 // val | (-1 ^ (val >>> shift)) ==> ornw
11700 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11701                          iRegIorL2I src1, iRegIorL2I src2,
11702                          immI src3, immI_M1 src4) %{
11703   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11704   ins_cost(1.9 * INSN_COST);
11705   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11706 
11707   ins_encode %{
11708     __ ornw(as_Register($dst$$reg),
11709               as_Register($src1$$reg),
11710               as_Register($src2$$reg),
11711               Assembler::LSR,
11712               $src3$$constant & 0x1f);
11713   %}
11714 
11715   ins_pipe(ialu_reg_reg_shift);
11716 %}
11717 
11718 // This pattern is automatically generated from aarch64_ad.m4
11719 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11720 // val | (-1 ^ (val >>> shift)) ==> orn
11721 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11722                          iRegL src1, iRegL src2,
11723                          immI src3, immL_M1 src4) %{
11724   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11725   ins_cost(1.9 * INSN_COST);
11726   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11727 
11728   ins_encode %{
11729     __ orn(as_Register($dst$$reg),
11730               as_Register($src1$$reg),
11731               as_Register($src2$$reg),
11732               Assembler::LSR,
11733               $src3$$constant & 0x3f);
11734   %}
11735 
11736   ins_pipe(ialu_reg_reg_shift);
11737 %}
11738 
11739 // This pattern is automatically generated from aarch64_ad.m4
11740 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11741 // val | (-1 ^ (val >> shift)) ==> ornw
11742 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11743                          iRegIorL2I src1, iRegIorL2I src2,
11744                          immI src3, immI_M1 src4) %{
11745   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11746   ins_cost(1.9 * INSN_COST);
11747   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11748 
11749   ins_encode %{
11750     __ ornw(as_Register($dst$$reg),
11751               as_Register($src1$$reg),
11752               as_Register($src2$$reg),
11753               Assembler::ASR,
11754               $src3$$constant & 0x1f);
11755   %}
11756 
11757   ins_pipe(ialu_reg_reg_shift);
11758 %}
11759 
11760 // This pattern is automatically generated from aarch64_ad.m4
11761 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11762 // val | (-1 ^ (val >> shift)) ==> orn
11763 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11764                          iRegL src1, iRegL src2,
11765                          immI src3, immL_M1 src4) %{
11766   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11767   ins_cost(1.9 * INSN_COST);
11768   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11769 
11770   ins_encode %{
11771     __ orn(as_Register($dst$$reg),
11772               as_Register($src1$$reg),
11773               as_Register($src2$$reg),
11774               Assembler::ASR,
11775               $src3$$constant & 0x3f);
11776   %}
11777 
11778   ins_pipe(ialu_reg_reg_shift);
11779 %}
11780 
11781 // This pattern is automatically generated from aarch64_ad.m4
11782 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11783 // val | (-1 ^ (val ror shift)) ==> ornw
11784 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11785                          iRegIorL2I src1, iRegIorL2I src2,
11786                          immI src3, immI_M1 src4) %{
11787   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11788   ins_cost(1.9 * INSN_COST);
11789   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11790 
11791   ins_encode %{
11792     __ ornw(as_Register($dst$$reg),
11793               as_Register($src1$$reg),
11794               as_Register($src2$$reg),
11795               Assembler::ROR,
11796               $src3$$constant & 0x1f);
11797   %}
11798 
11799   ins_pipe(ialu_reg_reg_shift);
11800 %}
11801 
11802 // This pattern is automatically generated from aarch64_ad.m4
11803 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11804 // val | (-1 ^ (val ror shift)) ==> orn
11805 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11806                          iRegL src1, iRegL src2,
11807                          immI src3, immL_M1 src4) %{
11808   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11809   ins_cost(1.9 * INSN_COST);
11810   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11811 
11812   ins_encode %{
11813     __ orn(as_Register($dst$$reg),
11814               as_Register($src1$$reg),
11815               as_Register($src2$$reg),
11816               Assembler::ROR,
11817               $src3$$constant & 0x3f);
11818   %}
11819 
11820   ins_pipe(ialu_reg_reg_shift);
11821 %}
11822 
11823 // This pattern is automatically generated from aarch64_ad.m4
11824 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11825 // val | (-1 ^ (val << shift)) ==> ornw
11826 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11827                          iRegIorL2I src1, iRegIorL2I src2,
11828                          immI src3, immI_M1 src4) %{
11829   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11830   ins_cost(1.9 * INSN_COST);
11831   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11832 
11833   ins_encode %{
11834     __ ornw(as_Register($dst$$reg),
11835               as_Register($src1$$reg),
11836               as_Register($src2$$reg),
11837               Assembler::LSL,
11838               $src3$$constant & 0x1f);
11839   %}
11840 
11841   ins_pipe(ialu_reg_reg_shift);
11842 %}
11843 
11844 // This pattern is automatically generated from aarch64_ad.m4
11845 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11846 // val | (-1 ^ (val << shift)) ==> orn
11847 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11848                          iRegL src1, iRegL src2,
11849                          immI src3, immL_M1 src4) %{
11850   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11851   ins_cost(1.9 * INSN_COST);
11852   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11853 
11854   ins_encode %{
11855     __ orn(as_Register($dst$$reg),
11856               as_Register($src1$$reg),
11857               as_Register($src2$$reg),
11858               Assembler::LSL,
11859               $src3$$constant & 0x3f);
11860   %}
11861 
11862   ins_pipe(ialu_reg_reg_shift);
11863 %}
11864 
11865 // This pattern is automatically generated from aarch64_ad.m4
11866 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11867 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11868                          iRegIorL2I src1, iRegIorL2I src2,
11869                          immI src3) %{
11870   match(Set dst (AndI src1 (URShiftI src2 src3)));
11871 
11872   ins_cost(1.9 * INSN_COST);
11873   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11874 
11875   ins_encode %{
11876     __ andw(as_Register($dst$$reg),
11877               as_Register($src1$$reg),
11878               as_Register($src2$$reg),
11879               Assembler::LSR,
11880               $src3$$constant & 0x1f);
11881   %}
11882 
11883   ins_pipe(ialu_reg_reg_shift);
11884 %}
11885 
11886 // This pattern is automatically generated from aarch64_ad.m4
11887 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11888 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11889                          iRegL src1, iRegL src2,
11890                          immI src3) %{
11891   match(Set dst (AndL src1 (URShiftL src2 src3)));
11892 
11893   ins_cost(1.9 * INSN_COST);
11894   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11895 
11896   ins_encode %{
11897     __ andr(as_Register($dst$$reg),
11898               as_Register($src1$$reg),
11899               as_Register($src2$$reg),
11900               Assembler::LSR,
11901               $src3$$constant & 0x3f);
11902   %}
11903 
11904   ins_pipe(ialu_reg_reg_shift);
11905 %}
11906 
11907 // This pattern is automatically generated from aarch64_ad.m4
11908 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11909 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11910                          iRegIorL2I src1, iRegIorL2I src2,
11911                          immI src3) %{
11912   match(Set dst (AndI src1 (RShiftI src2 src3)));
11913 
11914   ins_cost(1.9 * INSN_COST);
11915   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11916 
11917   ins_encode %{
11918     __ andw(as_Register($dst$$reg),
11919               as_Register($src1$$reg),
11920               as_Register($src2$$reg),
11921               Assembler::ASR,
11922               $src3$$constant & 0x1f);
11923   %}
11924 
11925   ins_pipe(ialu_reg_reg_shift);
11926 %}
11927 
11928 // This pattern is automatically generated from aarch64_ad.m4
11929 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11930 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11931                          iRegL src1, iRegL src2,
11932                          immI src3) %{
11933   match(Set dst (AndL src1 (RShiftL src2 src3)));
11934 
11935   ins_cost(1.9 * INSN_COST);
11936   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11937 
11938   ins_encode %{
11939     __ andr(as_Register($dst$$reg),
11940               as_Register($src1$$reg),
11941               as_Register($src2$$reg),
11942               Assembler::ASR,
11943               $src3$$constant & 0x3f);
11944   %}
11945 
11946   ins_pipe(ialu_reg_reg_shift);
11947 %}
11948 
11949 // This pattern is automatically generated from aarch64_ad.m4
11950 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11951 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11952                          iRegIorL2I src1, iRegIorL2I src2,
11953                          immI src3) %{
11954   match(Set dst (AndI src1 (LShiftI src2 src3)));
11955 
11956   ins_cost(1.9 * INSN_COST);
11957   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11958 
11959   ins_encode %{
11960     __ andw(as_Register($dst$$reg),
11961               as_Register($src1$$reg),
11962               as_Register($src2$$reg),
11963               Assembler::LSL,
11964               $src3$$constant & 0x1f);
11965   %}
11966 
11967   ins_pipe(ialu_reg_reg_shift);
11968 %}
11969 
11970 // This pattern is automatically generated from aarch64_ad.m4
11971 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11972 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11973                          iRegL src1, iRegL src2,
11974                          immI src3) %{
11975   match(Set dst (AndL src1 (LShiftL src2 src3)));
11976 
11977   ins_cost(1.9 * INSN_COST);
11978   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11979 
11980   ins_encode %{
11981     __ andr(as_Register($dst$$reg),
11982               as_Register($src1$$reg),
11983               as_Register($src2$$reg),
11984               Assembler::LSL,
11985               $src3$$constant & 0x3f);
11986   %}
11987 
11988   ins_pipe(ialu_reg_reg_shift);
11989 %}
11990 
11991 // This pattern is automatically generated from aarch64_ad.m4
11992 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11993 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
11994                          iRegIorL2I src1, iRegIorL2I src2,
11995                          immI src3) %{
11996   match(Set dst (AndI src1 (RotateRight src2 src3)));
11997 
11998   ins_cost(1.9 * INSN_COST);
11999   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
12000 
12001   ins_encode %{
12002     __ andw(as_Register($dst$$reg),
12003               as_Register($src1$$reg),
12004               as_Register($src2$$reg),
12005               Assembler::ROR,
12006               $src3$$constant & 0x1f);
12007   %}
12008 
12009   ins_pipe(ialu_reg_reg_shift);
12010 %}
12011 
12012 // This pattern is automatically generated from aarch64_ad.m4
12013 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12014 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
12015                          iRegL src1, iRegL src2,
12016                          immI src3) %{
12017   match(Set dst (AndL src1 (RotateRight src2 src3)));
12018 
12019   ins_cost(1.9 * INSN_COST);
12020   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
12021 
12022   ins_encode %{
12023     __ andr(as_Register($dst$$reg),
12024               as_Register($src1$$reg),
12025               as_Register($src2$$reg),
12026               Assembler::ROR,
12027               $src3$$constant & 0x3f);
12028   %}
12029 
12030   ins_pipe(ialu_reg_reg_shift);
12031 %}
12032 
12033 // This pattern is automatically generated from aarch64_ad.m4
12034 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12035 instruct XorI_reg_URShift_reg(iRegINoSp dst,
12036                          iRegIorL2I src1, iRegIorL2I src2,
12037                          immI src3) %{
12038   match(Set dst (XorI src1 (URShiftI src2 src3)));
12039 
12040   ins_cost(1.9 * INSN_COST);
12041   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
12042 
12043   ins_encode %{
12044     __ eorw(as_Register($dst$$reg),
12045               as_Register($src1$$reg),
12046               as_Register($src2$$reg),
12047               Assembler::LSR,
12048               $src3$$constant & 0x1f);
12049   %}
12050 
12051   ins_pipe(ialu_reg_reg_shift);
12052 %}
12053 
12054 // This pattern is automatically generated from aarch64_ad.m4
12055 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12056 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12057                          iRegL src1, iRegL src2,
12058                          immI src3) %{
12059   match(Set dst (XorL src1 (URShiftL src2 src3)));
12060 
12061   ins_cost(1.9 * INSN_COST);
12062   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12063 
12064   ins_encode %{
12065     __ eor(as_Register($dst$$reg),
12066               as_Register($src1$$reg),
12067               as_Register($src2$$reg),
12068               Assembler::LSR,
12069               $src3$$constant & 0x3f);
12070   %}
12071 
12072   ins_pipe(ialu_reg_reg_shift);
12073 %}
12074 
12075 // This pattern is automatically generated from aarch64_ad.m4
12076 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12077 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12078                          iRegIorL2I src1, iRegIorL2I src2,
12079                          immI src3) %{
12080   match(Set dst (XorI src1 (RShiftI src2 src3)));
12081 
12082   ins_cost(1.9 * INSN_COST);
12083   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12084 
12085   ins_encode %{
12086     __ eorw(as_Register($dst$$reg),
12087               as_Register($src1$$reg),
12088               as_Register($src2$$reg),
12089               Assembler::ASR,
12090               $src3$$constant & 0x1f);
12091   %}
12092 
12093   ins_pipe(ialu_reg_reg_shift);
12094 %}
12095 
12096 // This pattern is automatically generated from aarch64_ad.m4
12097 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12098 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12099                          iRegL src1, iRegL src2,
12100                          immI src3) %{
12101   match(Set dst (XorL src1 (RShiftL src2 src3)));
12102 
12103   ins_cost(1.9 * INSN_COST);
12104   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12105 
12106   ins_encode %{
12107     __ eor(as_Register($dst$$reg),
12108               as_Register($src1$$reg),
12109               as_Register($src2$$reg),
12110               Assembler::ASR,
12111               $src3$$constant & 0x3f);
12112   %}
12113 
12114   ins_pipe(ialu_reg_reg_shift);
12115 %}
12116 
12117 // This pattern is automatically generated from aarch64_ad.m4
12118 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12119 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12120                          iRegIorL2I src1, iRegIorL2I src2,
12121                          immI src3) %{
12122   match(Set dst (XorI src1 (LShiftI src2 src3)));
12123 
12124   ins_cost(1.9 * INSN_COST);
12125   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12126 
12127   ins_encode %{
12128     __ eorw(as_Register($dst$$reg),
12129               as_Register($src1$$reg),
12130               as_Register($src2$$reg),
12131               Assembler::LSL,
12132               $src3$$constant & 0x1f);
12133   %}
12134 
12135   ins_pipe(ialu_reg_reg_shift);
12136 %}
12137 
12138 // This pattern is automatically generated from aarch64_ad.m4
12139 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12140 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12141                          iRegL src1, iRegL src2,
12142                          immI src3) %{
12143   match(Set dst (XorL src1 (LShiftL src2 src3)));
12144 
12145   ins_cost(1.9 * INSN_COST);
12146   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12147 
12148   ins_encode %{
12149     __ eor(as_Register($dst$$reg),
12150               as_Register($src1$$reg),
12151               as_Register($src2$$reg),
12152               Assembler::LSL,
12153               $src3$$constant & 0x3f);
12154   %}
12155 
12156   ins_pipe(ialu_reg_reg_shift);
12157 %}
12158 
12159 // This pattern is automatically generated from aarch64_ad.m4
12160 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12161 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
12162                          iRegIorL2I src1, iRegIorL2I src2,
12163                          immI src3) %{
12164   match(Set dst (XorI src1 (RotateRight src2 src3)));
12165 
12166   ins_cost(1.9 * INSN_COST);
12167   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
12168 
12169   ins_encode %{
12170     __ eorw(as_Register($dst$$reg),
12171               as_Register($src1$$reg),
12172               as_Register($src2$$reg),
12173               Assembler::ROR,
12174               $src3$$constant & 0x1f);
12175   %}
12176 
12177   ins_pipe(ialu_reg_reg_shift);
12178 %}
12179 
12180 // This pattern is automatically generated from aarch64_ad.m4
12181 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12182 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
12183                          iRegL src1, iRegL src2,
12184                          immI src3) %{
12185   match(Set dst (XorL src1 (RotateRight src2 src3)));
12186 
12187   ins_cost(1.9 * INSN_COST);
12188   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
12189 
12190   ins_encode %{
12191     __ eor(as_Register($dst$$reg),
12192               as_Register($src1$$reg),
12193               as_Register($src2$$reg),
12194               Assembler::ROR,
12195               $src3$$constant & 0x3f);
12196   %}
12197 
12198   ins_pipe(ialu_reg_reg_shift);
12199 %}
12200 
12201 // This pattern is automatically generated from aarch64_ad.m4
12202 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12203 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12204                          iRegIorL2I src1, iRegIorL2I src2,
12205                          immI src3) %{
12206   match(Set dst (OrI src1 (URShiftI src2 src3)));
12207 
12208   ins_cost(1.9 * INSN_COST);
12209   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12210 
12211   ins_encode %{
12212     __ orrw(as_Register($dst$$reg),
12213               as_Register($src1$$reg),
12214               as_Register($src2$$reg),
12215               Assembler::LSR,
12216               $src3$$constant & 0x1f);
12217   %}
12218 
12219   ins_pipe(ialu_reg_reg_shift);
12220 %}
12221 
12222 // This pattern is automatically generated from aarch64_ad.m4
12223 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12224 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12225                          iRegL src1, iRegL src2,
12226                          immI src3) %{
12227   match(Set dst (OrL src1 (URShiftL src2 src3)));
12228 
12229   ins_cost(1.9 * INSN_COST);
12230   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12231 
12232   ins_encode %{
12233     __ orr(as_Register($dst$$reg),
12234               as_Register($src1$$reg),
12235               as_Register($src2$$reg),
12236               Assembler::LSR,
12237               $src3$$constant & 0x3f);
12238   %}
12239 
12240   ins_pipe(ialu_reg_reg_shift);
12241 %}
12242 
12243 // This pattern is automatically generated from aarch64_ad.m4
12244 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12245 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12246                          iRegIorL2I src1, iRegIorL2I src2,
12247                          immI src3) %{
12248   match(Set dst (OrI src1 (RShiftI src2 src3)));
12249 
12250   ins_cost(1.9 * INSN_COST);
12251   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12252 
12253   ins_encode %{
12254     __ orrw(as_Register($dst$$reg),
12255               as_Register($src1$$reg),
12256               as_Register($src2$$reg),
12257               Assembler::ASR,
12258               $src3$$constant & 0x1f);
12259   %}
12260 
12261   ins_pipe(ialu_reg_reg_shift);
12262 %}
12263 
12264 // This pattern is automatically generated from aarch64_ad.m4
12265 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12266 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12267                          iRegL src1, iRegL src2,
12268                          immI src3) %{
12269   match(Set dst (OrL src1 (RShiftL src2 src3)));
12270 
12271   ins_cost(1.9 * INSN_COST);
12272   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12273 
12274   ins_encode %{
12275     __ orr(as_Register($dst$$reg),
12276               as_Register($src1$$reg),
12277               as_Register($src2$$reg),
12278               Assembler::ASR,
12279               $src3$$constant & 0x3f);
12280   %}
12281 
12282   ins_pipe(ialu_reg_reg_shift);
12283 %}
12284 
12285 // This pattern is automatically generated from aarch64_ad.m4
12286 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12287 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12288                          iRegIorL2I src1, iRegIorL2I src2,
12289                          immI src3) %{
12290   match(Set dst (OrI src1 (LShiftI src2 src3)));
12291 
12292   ins_cost(1.9 * INSN_COST);
12293   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12294 
12295   ins_encode %{
12296     __ orrw(as_Register($dst$$reg),
12297               as_Register($src1$$reg),
12298               as_Register($src2$$reg),
12299               Assembler::LSL,
12300               $src3$$constant & 0x1f);
12301   %}
12302 
12303   ins_pipe(ialu_reg_reg_shift);
12304 %}
12305 
12306 // This pattern is automatically generated from aarch64_ad.m4
12307 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12308 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12309                          iRegL src1, iRegL src2,
12310                          immI src3) %{
12311   match(Set dst (OrL src1 (LShiftL src2 src3)));
12312 
12313   ins_cost(1.9 * INSN_COST);
12314   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12315 
12316   ins_encode %{
12317     __ orr(as_Register($dst$$reg),
12318               as_Register($src1$$reg),
12319               as_Register($src2$$reg),
12320               Assembler::LSL,
12321               $src3$$constant & 0x3f);
12322   %}
12323 
12324   ins_pipe(ialu_reg_reg_shift);
12325 %}
12326 
12327 // This pattern is automatically generated from aarch64_ad.m4
12328 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12329 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
12330                          iRegIorL2I src1, iRegIorL2I src2,
12331                          immI src3) %{
12332   match(Set dst (OrI src1 (RotateRight src2 src3)));
12333 
12334   ins_cost(1.9 * INSN_COST);
12335   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
12336 
12337   ins_encode %{
12338     __ orrw(as_Register($dst$$reg),
12339               as_Register($src1$$reg),
12340               as_Register($src2$$reg),
12341               Assembler::ROR,
12342               $src3$$constant & 0x1f);
12343   %}
12344 
12345   ins_pipe(ialu_reg_reg_shift);
12346 %}
12347 
12348 // This pattern is automatically generated from aarch64_ad.m4
12349 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12350 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
12351                          iRegL src1, iRegL src2,
12352                          immI src3) %{
12353   match(Set dst (OrL src1 (RotateRight src2 src3)));
12354 
12355   ins_cost(1.9 * INSN_COST);
12356   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
12357 
12358   ins_encode %{
12359     __ orr(as_Register($dst$$reg),
12360               as_Register($src1$$reg),
12361               as_Register($src2$$reg),
12362               Assembler::ROR,
12363               $src3$$constant & 0x3f);
12364   %}
12365 
12366   ins_pipe(ialu_reg_reg_shift);
12367 %}
12368 
12369 // This pattern is automatically generated from aarch64_ad.m4
12370 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12371 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12372                          iRegIorL2I src1, iRegIorL2I src2,
12373                          immI src3) %{
12374   match(Set dst (AddI src1 (URShiftI src2 src3)));
12375 
12376   ins_cost(1.9 * INSN_COST);
12377   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12378 
12379   ins_encode %{
12380     __ addw(as_Register($dst$$reg),
12381               as_Register($src1$$reg),
12382               as_Register($src2$$reg),
12383               Assembler::LSR,
12384               $src3$$constant & 0x1f);
12385   %}
12386 
12387   ins_pipe(ialu_reg_reg_shift);
12388 %}
12389 
12390 // This pattern is automatically generated from aarch64_ad.m4
12391 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12392 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12393                          iRegL src1, iRegL src2,
12394                          immI src3) %{
12395   match(Set dst (AddL src1 (URShiftL src2 src3)));
12396 
12397   ins_cost(1.9 * INSN_COST);
12398   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12399 
12400   ins_encode %{
12401     __ add(as_Register($dst$$reg),
12402               as_Register($src1$$reg),
12403               as_Register($src2$$reg),
12404               Assembler::LSR,
12405               $src3$$constant & 0x3f);
12406   %}
12407 
12408   ins_pipe(ialu_reg_reg_shift);
12409 %}
12410 
12411 // This pattern is automatically generated from aarch64_ad.m4
12412 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12413 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12414                          iRegIorL2I src1, iRegIorL2I src2,
12415                          immI src3) %{
12416   match(Set dst (AddI src1 (RShiftI src2 src3)));
12417 
12418   ins_cost(1.9 * INSN_COST);
12419   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12420 
12421   ins_encode %{
12422     __ addw(as_Register($dst$$reg),
12423               as_Register($src1$$reg),
12424               as_Register($src2$$reg),
12425               Assembler::ASR,
12426               $src3$$constant & 0x1f);
12427   %}
12428 
12429   ins_pipe(ialu_reg_reg_shift);
12430 %}
12431 
12432 // This pattern is automatically generated from aarch64_ad.m4
12433 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12434 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12435                          iRegL src1, iRegL src2,
12436                          immI src3) %{
12437   match(Set dst (AddL src1 (RShiftL src2 src3)));
12438 
12439   ins_cost(1.9 * INSN_COST);
12440   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12441 
12442   ins_encode %{
12443     __ add(as_Register($dst$$reg),
12444               as_Register($src1$$reg),
12445               as_Register($src2$$reg),
12446               Assembler::ASR,
12447               $src3$$constant & 0x3f);
12448   %}
12449 
12450   ins_pipe(ialu_reg_reg_shift);
12451 %}
12452 
12453 // This pattern is automatically generated from aarch64_ad.m4
12454 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12455 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12456                          iRegIorL2I src1, iRegIorL2I src2,
12457                          immI src3) %{
12458   match(Set dst (AddI src1 (LShiftI src2 src3)));
12459 
12460   ins_cost(1.9 * INSN_COST);
12461   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12462 
12463   ins_encode %{
12464     __ addw(as_Register($dst$$reg),
12465               as_Register($src1$$reg),
12466               as_Register($src2$$reg),
12467               Assembler::LSL,
12468               $src3$$constant & 0x1f);
12469   %}
12470 
12471   ins_pipe(ialu_reg_reg_shift);
12472 %}
12473 
12474 // This pattern is automatically generated from aarch64_ad.m4
12475 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12476 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12477                          iRegL src1, iRegL src2,
12478                          immI src3) %{
12479   match(Set dst (AddL src1 (LShiftL src2 src3)));
12480 
12481   ins_cost(1.9 * INSN_COST);
12482   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12483 
12484   ins_encode %{
12485     __ add(as_Register($dst$$reg),
12486               as_Register($src1$$reg),
12487               as_Register($src2$$reg),
12488               Assembler::LSL,
12489               $src3$$constant & 0x3f);
12490   %}
12491 
12492   ins_pipe(ialu_reg_reg_shift);
12493 %}
12494 
12495 // This pattern is automatically generated from aarch64_ad.m4
12496 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12497 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12498                          iRegIorL2I src1, iRegIorL2I src2,
12499                          immI src3) %{
12500   match(Set dst (SubI src1 (URShiftI src2 src3)));
12501 
12502   ins_cost(1.9 * INSN_COST);
12503   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12504 
12505   ins_encode %{
12506     __ subw(as_Register($dst$$reg),
12507               as_Register($src1$$reg),
12508               as_Register($src2$$reg),
12509               Assembler::LSR,
12510               $src3$$constant & 0x1f);
12511   %}
12512 
12513   ins_pipe(ialu_reg_reg_shift);
12514 %}
12515 
12516 // This pattern is automatically generated from aarch64_ad.m4
12517 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12518 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12519                          iRegL src1, iRegL src2,
12520                          immI src3) %{
12521   match(Set dst (SubL src1 (URShiftL src2 src3)));
12522 
12523   ins_cost(1.9 * INSN_COST);
12524   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12525 
12526   ins_encode %{
12527     __ sub(as_Register($dst$$reg),
12528               as_Register($src1$$reg),
12529               as_Register($src2$$reg),
12530               Assembler::LSR,
12531               $src3$$constant & 0x3f);
12532   %}
12533 
12534   ins_pipe(ialu_reg_reg_shift);
12535 %}
12536 
12537 // This pattern is automatically generated from aarch64_ad.m4
12538 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12539 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12540                          iRegIorL2I src1, iRegIorL2I src2,
12541                          immI src3) %{
12542   match(Set dst (SubI src1 (RShiftI src2 src3)));
12543 
12544   ins_cost(1.9 * INSN_COST);
12545   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12546 
12547   ins_encode %{
12548     __ subw(as_Register($dst$$reg),
12549               as_Register($src1$$reg),
12550               as_Register($src2$$reg),
12551               Assembler::ASR,
12552               $src3$$constant & 0x1f);
12553   %}
12554 
12555   ins_pipe(ialu_reg_reg_shift);
12556 %}
12557 
12558 // This pattern is automatically generated from aarch64_ad.m4
12559 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12560 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12561                          iRegL src1, iRegL src2,
12562                          immI src3) %{
12563   match(Set dst (SubL src1 (RShiftL src2 src3)));
12564 
12565   ins_cost(1.9 * INSN_COST);
12566   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12567 
12568   ins_encode %{
12569     __ sub(as_Register($dst$$reg),
12570               as_Register($src1$$reg),
12571               as_Register($src2$$reg),
12572               Assembler::ASR,
12573               $src3$$constant & 0x3f);
12574   %}
12575 
12576   ins_pipe(ialu_reg_reg_shift);
12577 %}
12578 
12579 // This pattern is automatically generated from aarch64_ad.m4
12580 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12581 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12582                          iRegIorL2I src1, iRegIorL2I src2,
12583                          immI src3) %{
12584   match(Set dst (SubI src1 (LShiftI src2 src3)));
12585 
12586   ins_cost(1.9 * INSN_COST);
12587   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12588 
12589   ins_encode %{
12590     __ subw(as_Register($dst$$reg),
12591               as_Register($src1$$reg),
12592               as_Register($src2$$reg),
12593               Assembler::LSL,
12594               $src3$$constant & 0x1f);
12595   %}
12596 
12597   ins_pipe(ialu_reg_reg_shift);
12598 %}
12599 
12600 // This pattern is automatically generated from aarch64_ad.m4
12601 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12602 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12603                          iRegL src1, iRegL src2,
12604                          immI src3) %{
12605   match(Set dst (SubL src1 (LShiftL src2 src3)));
12606 
12607   ins_cost(1.9 * INSN_COST);
12608   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12609 
12610   ins_encode %{
12611     __ sub(as_Register($dst$$reg),
12612               as_Register($src1$$reg),
12613               as_Register($src2$$reg),
12614               Assembler::LSL,
12615               $src3$$constant & 0x3f);
12616   %}
12617 
12618   ins_pipe(ialu_reg_reg_shift);
12619 %}
12620 
12621 // This pattern is automatically generated from aarch64_ad.m4
12622 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12623 
12624 // Shift Left followed by Shift Right.
12625 // This idiom is used by the compiler for the i2b bytecode etc.
12626 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12627 %{
12628   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12629   ins_cost(INSN_COST * 2);
12630   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12631   ins_encode %{
12632     int lshift = $lshift_count$$constant & 63;
12633     int rshift = $rshift_count$$constant & 63;
12634     int s = 63 - lshift;
12635     int r = (rshift - lshift) & 63;
12636     __ sbfm(as_Register($dst$$reg),
12637             as_Register($src$$reg),
12638             r, s);
12639   %}
12640 
12641   ins_pipe(ialu_reg_shift);
12642 %}
12643 
12644 // This pattern is automatically generated from aarch64_ad.m4
12645 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12646 
12647 // Shift Left followed by Shift Right.
12648 // This idiom is used by the compiler for the i2b bytecode etc.
12649 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12650 %{
12651   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12652   ins_cost(INSN_COST * 2);
12653   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12654   ins_encode %{
12655     int lshift = $lshift_count$$constant & 31;
12656     int rshift = $rshift_count$$constant & 31;
12657     int s = 31 - lshift;
12658     int r = (rshift - lshift) & 31;
12659     __ sbfmw(as_Register($dst$$reg),
12660             as_Register($src$$reg),
12661             r, s);
12662   %}
12663 
12664   ins_pipe(ialu_reg_shift);
12665 %}
12666 
12667 // This pattern is automatically generated from aarch64_ad.m4
12668 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12669 
12670 // Shift Left followed by Shift Right.
12671 // This idiom is used by the compiler for the i2b bytecode etc.
12672 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12673 %{
12674   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12675   ins_cost(INSN_COST * 2);
12676   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12677   ins_encode %{
12678     int lshift = $lshift_count$$constant & 63;
12679     int rshift = $rshift_count$$constant & 63;
12680     int s = 63 - lshift;
12681     int r = (rshift - lshift) & 63;
12682     __ ubfm(as_Register($dst$$reg),
12683             as_Register($src$$reg),
12684             r, s);
12685   %}
12686 
12687   ins_pipe(ialu_reg_shift);
12688 %}
12689 
12690 // This pattern is automatically generated from aarch64_ad.m4
12691 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12692 
12693 // Shift Left followed by Shift Right.
12694 // This idiom is used by the compiler for the i2b bytecode etc.
12695 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12696 %{
12697   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12698   ins_cost(INSN_COST * 2);
12699   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12700   ins_encode %{
12701     int lshift = $lshift_count$$constant & 31;
12702     int rshift = $rshift_count$$constant & 31;
12703     int s = 31 - lshift;
12704     int r = (rshift - lshift) & 31;
12705     __ ubfmw(as_Register($dst$$reg),
12706             as_Register($src$$reg),
12707             r, s);
12708   %}
12709 
12710   ins_pipe(ialu_reg_shift);
12711 %}
12712 
12713 // Bitfield extract with shift & mask
12714 
12715 // This pattern is automatically generated from aarch64_ad.m4
12716 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12717 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12718 %{
12719   match(Set dst (AndI (URShiftI src rshift) mask));
12720   // Make sure we are not going to exceed what ubfxw can do.
12721   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12722 
12723   ins_cost(INSN_COST);
12724   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12725   ins_encode %{
12726     int rshift = $rshift$$constant & 31;
12727     intptr_t mask = $mask$$constant;
12728     int width = exact_log2(mask+1);
12729     __ ubfxw(as_Register($dst$$reg),
12730             as_Register($src$$reg), rshift, width);
12731   %}
12732   ins_pipe(ialu_reg_shift);
12733 %}
12734 
12735 // This pattern is automatically generated from aarch64_ad.m4
12736 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12737 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12738 %{
12739   match(Set dst (AndL (URShiftL src rshift) mask));
12740   // Make sure we are not going to exceed what ubfx can do.
12741   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12742 
12743   ins_cost(INSN_COST);
12744   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12745   ins_encode %{
12746     int rshift = $rshift$$constant & 63;
12747     intptr_t mask = $mask$$constant;
12748     int width = exact_log2_long(mask+1);
12749     __ ubfx(as_Register($dst$$reg),
12750             as_Register($src$$reg), rshift, width);
12751   %}
12752   ins_pipe(ialu_reg_shift);
12753 %}
12754 
12755 
12756 // This pattern is automatically generated from aarch64_ad.m4
12757 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12758 
12759 // We can use ubfx when extending an And with a mask when we know mask
12760 // is positive.  We know that because immI_bitmask guarantees it.
12761 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12762 %{
12763   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12764   // Make sure we are not going to exceed what ubfxw can do.
12765   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12766 
12767   ins_cost(INSN_COST * 2);
12768   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12769   ins_encode %{
12770     int rshift = $rshift$$constant & 31;
12771     intptr_t mask = $mask$$constant;
12772     int width = exact_log2(mask+1);
12773     __ ubfx(as_Register($dst$$reg),
12774             as_Register($src$$reg), rshift, width);
12775   %}
12776   ins_pipe(ialu_reg_shift);
12777 %}
12778 
12779 
12780 // This pattern is automatically generated from aarch64_ad.m4
12781 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12782 
12783 // We can use ubfiz when masking by a positive number and then left shifting the result.
12784 // We know that the mask is positive because immI_bitmask guarantees it.
12785 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12786 %{
12787   match(Set dst (LShiftI (AndI src mask) lshift));
12788   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12789 
12790   ins_cost(INSN_COST);
12791   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12792   ins_encode %{
12793     int lshift = $lshift$$constant & 31;
12794     intptr_t mask = $mask$$constant;
12795     int width = exact_log2(mask+1);
12796     __ ubfizw(as_Register($dst$$reg),
12797           as_Register($src$$reg), lshift, width);
12798   %}
12799   ins_pipe(ialu_reg_shift);
12800 %}
12801 
12802 // This pattern is automatically generated from aarch64_ad.m4
12803 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12804 
12805 // We can use ubfiz when masking by a positive number and then left shifting the result.
12806 // We know that the mask is positive because immL_bitmask guarantees it.
12807 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12808 %{
12809   match(Set dst (LShiftL (AndL src mask) lshift));
12810   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12811 
12812   ins_cost(INSN_COST);
12813   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12814   ins_encode %{
12815     int lshift = $lshift$$constant & 63;
12816     intptr_t mask = $mask$$constant;
12817     int width = exact_log2_long(mask+1);
12818     __ ubfiz(as_Register($dst$$reg),
12819           as_Register($src$$reg), lshift, width);
12820   %}
12821   ins_pipe(ialu_reg_shift);
12822 %}
12823 
12824 // This pattern is automatically generated from aarch64_ad.m4
12825 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12826 
12827 // We can use ubfiz when masking by a positive number and then left shifting the result.
12828 // We know that the mask is positive because immI_bitmask guarantees it.
12829 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12830 %{
12831   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12832   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12833 
12834   ins_cost(INSN_COST);
12835   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12836   ins_encode %{
12837     int lshift = $lshift$$constant & 31;
12838     intptr_t mask = $mask$$constant;
12839     int width = exact_log2(mask+1);
12840     __ ubfizw(as_Register($dst$$reg),
12841           as_Register($src$$reg), lshift, width);
12842   %}
12843   ins_pipe(ialu_reg_shift);
12844 %}
12845 
12846 // This pattern is automatically generated from aarch64_ad.m4
12847 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12848 
12849 // We can use ubfiz when masking by a positive number and then left shifting the result.
12850 // We know that the mask is positive because immL_bitmask guarantees it.
12851 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12852 %{
12853   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12854   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12855 
12856   ins_cost(INSN_COST);
12857   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12858   ins_encode %{
12859     int lshift = $lshift$$constant & 63;
12860     intptr_t mask = $mask$$constant;
12861     int width = exact_log2_long(mask+1);
12862     __ ubfiz(as_Register($dst$$reg),
12863           as_Register($src$$reg), lshift, width);
12864   %}
12865   ins_pipe(ialu_reg_shift);
12866 %}
12867 
12868 
12869 // This pattern is automatically generated from aarch64_ad.m4
12870 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12871 
12872 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12873 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12874 %{
12875   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12876   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12877 
12878   ins_cost(INSN_COST);
12879   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12880   ins_encode %{
12881     int lshift = $lshift$$constant & 63;
12882     intptr_t mask = $mask$$constant;
12883     int width = exact_log2(mask+1);
12884     __ ubfiz(as_Register($dst$$reg),
12885              as_Register($src$$reg), lshift, width);
12886   %}
12887   ins_pipe(ialu_reg_shift);
12888 %}
12889 
12890 // This pattern is automatically generated from aarch64_ad.m4
12891 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12892 
12893 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12894 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12895 %{
12896   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12897   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12898 
12899   ins_cost(INSN_COST);
12900   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12901   ins_encode %{
12902     int lshift = $lshift$$constant & 31;
12903     intptr_t mask = $mask$$constant;
12904     int width = exact_log2(mask+1);
12905     __ ubfiz(as_Register($dst$$reg),
12906              as_Register($src$$reg), lshift, width);
12907   %}
12908   ins_pipe(ialu_reg_shift);
12909 %}
12910 
12911 // This pattern is automatically generated from aarch64_ad.m4
12912 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12913 
12914 // Can skip int2long conversions after AND with small bitmask
12915 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12916 %{
12917   match(Set dst (ConvI2L (AndI src msk)));
12918   ins_cost(INSN_COST);
12919   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12920   ins_encode %{
12921     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12922   %}
12923   ins_pipe(ialu_reg_shift);
12924 %}
12925 
12926 
12927 // Rotations
12928 
12929 // This pattern is automatically generated from aarch64_ad.m4
12930 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12931 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12932 %{
12933   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12934   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12935 
12936   ins_cost(INSN_COST);
12937   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12938 
12939   ins_encode %{
12940     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12941             $rshift$$constant & 63);
12942   %}
12943   ins_pipe(ialu_reg_reg_extr);
12944 %}
12945 
12946 
12947 // This pattern is automatically generated from aarch64_ad.m4
12948 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12949 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12950 %{
12951   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12952   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12953 
12954   ins_cost(INSN_COST);
12955   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12956 
12957   ins_encode %{
12958     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12959             $rshift$$constant & 31);
12960   %}
12961   ins_pipe(ialu_reg_reg_extr);
12962 %}
12963 
12964 
12965 // This pattern is automatically generated from aarch64_ad.m4
12966 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12967 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12968 %{
12969   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12970   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12971 
12972   ins_cost(INSN_COST);
12973   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12974 
12975   ins_encode %{
12976     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12977             $rshift$$constant & 63);
12978   %}
12979   ins_pipe(ialu_reg_reg_extr);
12980 %}
12981 
12982 
12983 // This pattern is automatically generated from aarch64_ad.m4
12984 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12985 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12986 %{
12987   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12988   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12989 
12990   ins_cost(INSN_COST);
12991   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12992 
12993   ins_encode %{
12994     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12995             $rshift$$constant & 31);
12996   %}
12997   ins_pipe(ialu_reg_reg_extr);
12998 %}
12999 
13000 // This pattern is automatically generated from aarch64_ad.m4
13001 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13002 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
13003 %{
13004   match(Set dst (RotateRight src shift));
13005 
13006   ins_cost(INSN_COST);
13007   format %{ "ror    $dst, $src, $shift" %}
13008 
13009   ins_encode %{
13010      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
13011                $shift$$constant & 0x1f);
13012   %}
13013   ins_pipe(ialu_reg_reg_vshift);
13014 %}
13015 
13016 // This pattern is automatically generated from aarch64_ad.m4
13017 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13018 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
13019 %{
13020   match(Set dst (RotateRight src shift));
13021 
13022   ins_cost(INSN_COST);
13023   format %{ "ror    $dst, $src, $shift" %}
13024 
13025   ins_encode %{
13026      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
13027                $shift$$constant & 0x3f);
13028   %}
13029   ins_pipe(ialu_reg_reg_vshift);
13030 %}
13031 
13032 // This pattern is automatically generated from aarch64_ad.m4
13033 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13034 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
13035 %{
13036   match(Set dst (RotateRight src shift));
13037 
13038   ins_cost(INSN_COST);
13039   format %{ "ror    $dst, $src, $shift" %}
13040 
13041   ins_encode %{
13042      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
13043   %}
13044   ins_pipe(ialu_reg_reg_vshift);
13045 %}
13046 
13047 // This pattern is automatically generated from aarch64_ad.m4
13048 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13049 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
13050 %{
13051   match(Set dst (RotateRight src shift));
13052 
13053   ins_cost(INSN_COST);
13054   format %{ "ror    $dst, $src, $shift" %}
13055 
13056   ins_encode %{
13057      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
13058   %}
13059   ins_pipe(ialu_reg_reg_vshift);
13060 %}
13061 
13062 // This pattern is automatically generated from aarch64_ad.m4
13063 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13064 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
13065 %{
13066   match(Set dst (RotateLeft src shift));
13067 
13068   ins_cost(INSN_COST);
13069   format %{ "rol    $dst, $src, $shift" %}
13070 
13071   ins_encode %{
13072      __ subw(rscratch1, zr, as_Register($shift$$reg));
13073      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
13074   %}
13075   ins_pipe(ialu_reg_reg_vshift);
13076 %}
13077 
13078 // This pattern is automatically generated from aarch64_ad.m4
13079 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13080 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
13081 %{
13082   match(Set dst (RotateLeft src shift));
13083 
13084   ins_cost(INSN_COST);
13085   format %{ "rol    $dst, $src, $shift" %}
13086 
13087   ins_encode %{
13088      __ subw(rscratch1, zr, as_Register($shift$$reg));
13089      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
13090   %}
13091   ins_pipe(ialu_reg_reg_vshift);
13092 %}
13093 
13094 
13095 // Add/subtract (extended)
13096 
13097 // This pattern is automatically generated from aarch64_ad.m4
13098 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13099 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13100 %{
13101   match(Set dst (AddL src1 (ConvI2L src2)));
13102   ins_cost(INSN_COST);
13103   format %{ "add  $dst, $src1, $src2, sxtw" %}
13104 
13105    ins_encode %{
13106      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13107             as_Register($src2$$reg), ext::sxtw);
13108    %}
13109   ins_pipe(ialu_reg_reg);
13110 %}
13111 
13112 // This pattern is automatically generated from aarch64_ad.m4
13113 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13114 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13115 %{
13116   match(Set dst (SubL src1 (ConvI2L src2)));
13117   ins_cost(INSN_COST);
13118   format %{ "sub  $dst, $src1, $src2, sxtw" %}
13119 
13120    ins_encode %{
13121      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13122             as_Register($src2$$reg), ext::sxtw);
13123    %}
13124   ins_pipe(ialu_reg_reg);
13125 %}
13126 
13127 // This pattern is automatically generated from aarch64_ad.m4
13128 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13129 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
13130 %{
13131   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13132   ins_cost(INSN_COST);
13133   format %{ "add  $dst, $src1, $src2, sxth" %}
13134 
13135    ins_encode %{
13136      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13137             as_Register($src2$$reg), ext::sxth);
13138    %}
13139   ins_pipe(ialu_reg_reg);
13140 %}
13141 
13142 // This pattern is automatically generated from aarch64_ad.m4
13143 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13144 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13145 %{
13146   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13147   ins_cost(INSN_COST);
13148   format %{ "add  $dst, $src1, $src2, sxtb" %}
13149 
13150    ins_encode %{
13151      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13152             as_Register($src2$$reg), ext::sxtb);
13153    %}
13154   ins_pipe(ialu_reg_reg);
13155 %}
13156 
13157 // This pattern is automatically generated from aarch64_ad.m4
13158 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13159 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13160 %{
13161   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
13162   ins_cost(INSN_COST);
13163   format %{ "add  $dst, $src1, $src2, uxtb" %}
13164 
13165    ins_encode %{
13166      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13167             as_Register($src2$$reg), ext::uxtb);
13168    %}
13169   ins_pipe(ialu_reg_reg);
13170 %}
13171 
13172 // This pattern is automatically generated from aarch64_ad.m4
13173 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13174 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
13175 %{
13176   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13177   ins_cost(INSN_COST);
13178   format %{ "add  $dst, $src1, $src2, sxth" %}
13179 
13180    ins_encode %{
13181      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13182             as_Register($src2$$reg), ext::sxth);
13183    %}
13184   ins_pipe(ialu_reg_reg);
13185 %}
13186 
13187 // This pattern is automatically generated from aarch64_ad.m4
13188 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13189 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
13190 %{
13191   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13192   ins_cost(INSN_COST);
13193   format %{ "add  $dst, $src1, $src2, sxtw" %}
13194 
13195    ins_encode %{
13196      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13197             as_Register($src2$$reg), ext::sxtw);
13198    %}
13199   ins_pipe(ialu_reg_reg);
13200 %}
13201 
13202 // This pattern is automatically generated from aarch64_ad.m4
13203 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13204 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13205 %{
13206   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13207   ins_cost(INSN_COST);
13208   format %{ "add  $dst, $src1, $src2, sxtb" %}
13209 
13210    ins_encode %{
13211      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13212             as_Register($src2$$reg), ext::sxtb);
13213    %}
13214   ins_pipe(ialu_reg_reg);
13215 %}
13216 
13217 // This pattern is automatically generated from aarch64_ad.m4
13218 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13219 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13220 %{
13221   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13222   ins_cost(INSN_COST);
13223   format %{ "add  $dst, $src1, $src2, uxtb" %}
13224 
13225    ins_encode %{
13226      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13227             as_Register($src2$$reg), ext::uxtb);
13228    %}
13229   ins_pipe(ialu_reg_reg);
13230 %}
13231 
13232 // This pattern is automatically generated from aarch64_ad.m4
13233 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13234 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13235 %{
13236   match(Set dst (AddI src1 (AndI src2 mask)));
13237   ins_cost(INSN_COST);
13238   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13239 
13240    ins_encode %{
13241      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13242             as_Register($src2$$reg), ext::uxtb);
13243    %}
13244   ins_pipe(ialu_reg_reg);
13245 %}
13246 
13247 // This pattern is automatically generated from aarch64_ad.m4
13248 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13249 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13250 %{
13251   match(Set dst (AddI src1 (AndI src2 mask)));
13252   ins_cost(INSN_COST);
13253   format %{ "addw  $dst, $src1, $src2, uxth" %}
13254 
13255    ins_encode %{
13256      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13257             as_Register($src2$$reg), ext::uxth);
13258    %}
13259   ins_pipe(ialu_reg_reg);
13260 %}
13261 
13262 // This pattern is automatically generated from aarch64_ad.m4
13263 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13264 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13265 %{
13266   match(Set dst (AddL src1 (AndL src2 mask)));
13267   ins_cost(INSN_COST);
13268   format %{ "add  $dst, $src1, $src2, uxtb" %}
13269 
13270    ins_encode %{
13271      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13272             as_Register($src2$$reg), ext::uxtb);
13273    %}
13274   ins_pipe(ialu_reg_reg);
13275 %}
13276 
13277 // This pattern is automatically generated from aarch64_ad.m4
13278 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13279 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13280 %{
13281   match(Set dst (AddL src1 (AndL src2 mask)));
13282   ins_cost(INSN_COST);
13283   format %{ "add  $dst, $src1, $src2, uxth" %}
13284 
13285    ins_encode %{
13286      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13287             as_Register($src2$$reg), ext::uxth);
13288    %}
13289   ins_pipe(ialu_reg_reg);
13290 %}
13291 
13292 // This pattern is automatically generated from aarch64_ad.m4
13293 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13294 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13295 %{
13296   match(Set dst (AddL src1 (AndL src2 mask)));
13297   ins_cost(INSN_COST);
13298   format %{ "add  $dst, $src1, $src2, uxtw" %}
13299 
13300    ins_encode %{
13301      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13302             as_Register($src2$$reg), ext::uxtw);
13303    %}
13304   ins_pipe(ialu_reg_reg);
13305 %}
13306 
13307 // This pattern is automatically generated from aarch64_ad.m4
13308 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13309 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13310 %{
13311   match(Set dst (SubI src1 (AndI src2 mask)));
13312   ins_cost(INSN_COST);
13313   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13314 
13315    ins_encode %{
13316      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13317             as_Register($src2$$reg), ext::uxtb);
13318    %}
13319   ins_pipe(ialu_reg_reg);
13320 %}
13321 
13322 // This pattern is automatically generated from aarch64_ad.m4
13323 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13324 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13325 %{
13326   match(Set dst (SubI src1 (AndI src2 mask)));
13327   ins_cost(INSN_COST);
13328   format %{ "subw  $dst, $src1, $src2, uxth" %}
13329 
13330    ins_encode %{
13331      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13332             as_Register($src2$$reg), ext::uxth);
13333    %}
13334   ins_pipe(ialu_reg_reg);
13335 %}
13336 
13337 // This pattern is automatically generated from aarch64_ad.m4
13338 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13339 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13340 %{
13341   match(Set dst (SubL src1 (AndL src2 mask)));
13342   ins_cost(INSN_COST);
13343   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13344 
13345    ins_encode %{
13346      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13347             as_Register($src2$$reg), ext::uxtb);
13348    %}
13349   ins_pipe(ialu_reg_reg);
13350 %}
13351 
13352 // This pattern is automatically generated from aarch64_ad.m4
13353 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13354 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13355 %{
13356   match(Set dst (SubL src1 (AndL src2 mask)));
13357   ins_cost(INSN_COST);
13358   format %{ "sub  $dst, $src1, $src2, uxth" %}
13359 
13360    ins_encode %{
13361      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13362             as_Register($src2$$reg), ext::uxth);
13363    %}
13364   ins_pipe(ialu_reg_reg);
13365 %}
13366 
13367 // This pattern is automatically generated from aarch64_ad.m4
13368 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13369 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13370 %{
13371   match(Set dst (SubL src1 (AndL src2 mask)));
13372   ins_cost(INSN_COST);
13373   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13374 
13375    ins_encode %{
13376      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13377             as_Register($src2$$reg), ext::uxtw);
13378    %}
13379   ins_pipe(ialu_reg_reg);
13380 %}
13381 
13382 
13383 // This pattern is automatically generated from aarch64_ad.m4
13384 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13385 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13386 %{
13387   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13388   ins_cost(1.9 * INSN_COST);
13389   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13390 
13391    ins_encode %{
13392      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13393             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13394    %}
13395   ins_pipe(ialu_reg_reg_shift);
13396 %}
13397 
13398 // This pattern is automatically generated from aarch64_ad.m4
13399 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13400 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13401 %{
13402   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13403   ins_cost(1.9 * INSN_COST);
13404   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13405 
13406    ins_encode %{
13407      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13408             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13409    %}
13410   ins_pipe(ialu_reg_reg_shift);
13411 %}
13412 
13413 // This pattern is automatically generated from aarch64_ad.m4
13414 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13415 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13416 %{
13417   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13418   ins_cost(1.9 * INSN_COST);
13419   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13420 
13421    ins_encode %{
13422      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13423             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13424    %}
13425   ins_pipe(ialu_reg_reg_shift);
13426 %}
13427 
13428 // This pattern is automatically generated from aarch64_ad.m4
13429 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13430 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13431 %{
13432   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13433   ins_cost(1.9 * INSN_COST);
13434   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13435 
13436    ins_encode %{
13437      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13438             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13439    %}
13440   ins_pipe(ialu_reg_reg_shift);
13441 %}
13442 
13443 // This pattern is automatically generated from aarch64_ad.m4
13444 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13445 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13446 %{
13447   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13448   ins_cost(1.9 * INSN_COST);
13449   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13450 
13451    ins_encode %{
13452      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13453             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13454    %}
13455   ins_pipe(ialu_reg_reg_shift);
13456 %}
13457 
13458 // This pattern is automatically generated from aarch64_ad.m4
13459 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13460 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13461 %{
13462   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13463   ins_cost(1.9 * INSN_COST);
13464   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13465 
13466    ins_encode %{
13467      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13468             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13469    %}
13470   ins_pipe(ialu_reg_reg_shift);
13471 %}
13472 
13473 // This pattern is automatically generated from aarch64_ad.m4
13474 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13475 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13476 %{
13477   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13478   ins_cost(1.9 * INSN_COST);
13479   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13480 
13481    ins_encode %{
13482      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13483             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13484    %}
13485   ins_pipe(ialu_reg_reg_shift);
13486 %}
13487 
13488 // This pattern is automatically generated from aarch64_ad.m4
13489 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13490 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13491 %{
13492   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13493   ins_cost(1.9 * INSN_COST);
13494   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13495 
13496    ins_encode %{
13497      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13498             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13499    %}
13500   ins_pipe(ialu_reg_reg_shift);
13501 %}
13502 
13503 // This pattern is automatically generated from aarch64_ad.m4
13504 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13505 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13506 %{
13507   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13508   ins_cost(1.9 * INSN_COST);
13509   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13510 
13511    ins_encode %{
13512      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13513             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13514    %}
13515   ins_pipe(ialu_reg_reg_shift);
13516 %}
13517 
13518 // This pattern is automatically generated from aarch64_ad.m4
13519 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13520 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13521 %{
13522   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13523   ins_cost(1.9 * INSN_COST);
13524   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13525 
13526    ins_encode %{
13527      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13528             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13529    %}
13530   ins_pipe(ialu_reg_reg_shift);
13531 %}
13532 
13533 // This pattern is automatically generated from aarch64_ad.m4
13534 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13535 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13536 %{
13537   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13538   ins_cost(1.9 * INSN_COST);
13539   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13540 
13541    ins_encode %{
13542      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13543             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13544    %}
13545   ins_pipe(ialu_reg_reg_shift);
13546 %}
13547 
13548 // This pattern is automatically generated from aarch64_ad.m4
13549 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13550 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13551 %{
13552   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13553   ins_cost(1.9 * INSN_COST);
13554   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13555 
13556    ins_encode %{
13557      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13558             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13559    %}
13560   ins_pipe(ialu_reg_reg_shift);
13561 %}
13562 
13563 // This pattern is automatically generated from aarch64_ad.m4
13564 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13565 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13566 %{
13567   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13568   ins_cost(1.9 * INSN_COST);
13569   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13570 
13571    ins_encode %{
13572      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13573             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13574    %}
13575   ins_pipe(ialu_reg_reg_shift);
13576 %}
13577 
13578 // This pattern is automatically generated from aarch64_ad.m4
13579 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13580 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13581 %{
13582   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13583   ins_cost(1.9 * INSN_COST);
13584   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13585 
13586    ins_encode %{
13587      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13588             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13589    %}
13590   ins_pipe(ialu_reg_reg_shift);
13591 %}
13592 
13593 // This pattern is automatically generated from aarch64_ad.m4
13594 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13595 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13596 %{
13597   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13598   ins_cost(1.9 * INSN_COST);
13599   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13600 
13601    ins_encode %{
13602      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13603             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13604    %}
13605   ins_pipe(ialu_reg_reg_shift);
13606 %}
13607 
13608 // This pattern is automatically generated from aarch64_ad.m4
13609 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13610 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13611 %{
13612   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13613   ins_cost(1.9 * INSN_COST);
13614   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13615 
13616    ins_encode %{
13617      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13618             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13619    %}
13620   ins_pipe(ialu_reg_reg_shift);
13621 %}
13622 
13623 // This pattern is automatically generated from aarch64_ad.m4
13624 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13625 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13626 %{
13627   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13628   ins_cost(1.9 * INSN_COST);
13629   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13630 
13631    ins_encode %{
13632      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13633             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13634    %}
13635   ins_pipe(ialu_reg_reg_shift);
13636 %}
13637 
13638 // This pattern is automatically generated from aarch64_ad.m4
13639 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13640 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13641 %{
13642   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13643   ins_cost(1.9 * INSN_COST);
13644   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13645 
13646    ins_encode %{
13647      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13648             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13649    %}
13650   ins_pipe(ialu_reg_reg_shift);
13651 %}
13652 
13653 // This pattern is automatically generated from aarch64_ad.m4
13654 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13655 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13656 %{
13657   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13658   ins_cost(1.9 * INSN_COST);
13659   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13660 
13661    ins_encode %{
13662      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13663             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13664    %}
13665   ins_pipe(ialu_reg_reg_shift);
13666 %}
13667 
13668 // This pattern is automatically generated from aarch64_ad.m4
13669 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13670 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13671 %{
13672   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13673   ins_cost(1.9 * INSN_COST);
13674   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13675 
13676    ins_encode %{
13677      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13678             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13679    %}
13680   ins_pipe(ialu_reg_reg_shift);
13681 %}
13682 
13683 // This pattern is automatically generated from aarch64_ad.m4
13684 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13685 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13686 %{
13687   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13688   ins_cost(1.9 * INSN_COST);
13689   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13690 
13691    ins_encode %{
13692      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13693             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13694    %}
13695   ins_pipe(ialu_reg_reg_shift);
13696 %}
13697 
13698 // This pattern is automatically generated from aarch64_ad.m4
13699 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13700 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13701 %{
13702   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13703   ins_cost(1.9 * INSN_COST);
13704   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13705 
13706    ins_encode %{
13707      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13708             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13709    %}
13710   ins_pipe(ialu_reg_reg_shift);
13711 %}
13712 
13713 // This pattern is automatically generated from aarch64_ad.m4
13714 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13715 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13716 %{
13717   effect(DEF dst, USE src1, USE src2, USE cr);
13718   ins_cost(INSN_COST * 2);
13719   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13720 
13721   ins_encode %{
13722     __ cselw($dst$$Register,
13723              $src1$$Register,
13724              $src2$$Register,
13725              Assembler::LT);
13726   %}
13727   ins_pipe(icond_reg_reg);
13728 %}
13729 
13730 // This pattern is automatically generated from aarch64_ad.m4
13731 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13732 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13733 %{
13734   effect(DEF dst, USE src1, USE src2, USE cr);
13735   ins_cost(INSN_COST * 2);
13736   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13737 
13738   ins_encode %{
13739     __ cselw($dst$$Register,
13740              $src1$$Register,
13741              $src2$$Register,
13742              Assembler::GT);
13743   %}
13744   ins_pipe(icond_reg_reg);
13745 %}
13746 
13747 // This pattern is automatically generated from aarch64_ad.m4
13748 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13749 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13750 %{
13751   effect(DEF dst, USE src1, USE cr);
13752   ins_cost(INSN_COST * 2);
13753   format %{ "cselw $dst, $src1, zr lt\t"  %}
13754 
13755   ins_encode %{
13756     __ cselw($dst$$Register,
13757              $src1$$Register,
13758              zr,
13759              Assembler::LT);
13760   %}
13761   ins_pipe(icond_reg);
13762 %}
13763 
13764 // This pattern is automatically generated from aarch64_ad.m4
13765 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13766 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13767 %{
13768   effect(DEF dst, USE src1, USE cr);
13769   ins_cost(INSN_COST * 2);
13770   format %{ "cselw $dst, $src1, zr gt\t"  %}
13771 
13772   ins_encode %{
13773     __ cselw($dst$$Register,
13774              $src1$$Register,
13775              zr,
13776              Assembler::GT);
13777   %}
13778   ins_pipe(icond_reg);
13779 %}
13780 
13781 // This pattern is automatically generated from aarch64_ad.m4
13782 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13783 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13784 %{
13785   effect(DEF dst, USE src1, USE cr);
13786   ins_cost(INSN_COST * 2);
13787   format %{ "csincw $dst, $src1, zr le\t"  %}
13788 
13789   ins_encode %{
13790     __ csincw($dst$$Register,
13791              $src1$$Register,
13792              zr,
13793              Assembler::LE);
13794   %}
13795   ins_pipe(icond_reg);
13796 %}
13797 
13798 // This pattern is automatically generated from aarch64_ad.m4
13799 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13800 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13801 %{
13802   effect(DEF dst, USE src1, USE cr);
13803   ins_cost(INSN_COST * 2);
13804   format %{ "csincw $dst, $src1, zr gt\t"  %}
13805 
13806   ins_encode %{
13807     __ csincw($dst$$Register,
13808              $src1$$Register,
13809              zr,
13810              Assembler::GT);
13811   %}
13812   ins_pipe(icond_reg);
13813 %}
13814 
13815 // This pattern is automatically generated from aarch64_ad.m4
13816 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13817 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13818 %{
13819   effect(DEF dst, USE src1, USE cr);
13820   ins_cost(INSN_COST * 2);
13821   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13822 
13823   ins_encode %{
13824     __ csinvw($dst$$Register,
13825              $src1$$Register,
13826              zr,
13827              Assembler::LT);
13828   %}
13829   ins_pipe(icond_reg);
13830 %}
13831 
13832 // This pattern is automatically generated from aarch64_ad.m4
13833 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13834 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13835 %{
13836   effect(DEF dst, USE src1, USE cr);
13837   ins_cost(INSN_COST * 2);
13838   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13839 
13840   ins_encode %{
13841     __ csinvw($dst$$Register,
13842              $src1$$Register,
13843              zr,
13844              Assembler::GE);
13845   %}
13846   ins_pipe(icond_reg);
13847 %}
13848 
13849 // This pattern is automatically generated from aarch64_ad.m4
13850 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13851 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13852 %{
13853   match(Set dst (MinI src imm));
13854   ins_cost(INSN_COST * 3);
13855   expand %{
13856     rFlagsReg cr;
13857     compI_reg_imm0(cr, src);
13858     cmovI_reg_imm0_lt(dst, src, cr);
13859   %}
13860 %}
13861 
13862 // This pattern is automatically generated from aarch64_ad.m4
13863 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13864 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13865 %{
13866   match(Set dst (MinI imm src));
13867   ins_cost(INSN_COST * 3);
13868   expand %{
13869     rFlagsReg cr;
13870     compI_reg_imm0(cr, src);
13871     cmovI_reg_imm0_lt(dst, src, cr);
13872   %}
13873 %}
13874 
13875 // This pattern is automatically generated from aarch64_ad.m4
13876 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13877 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13878 %{
13879   match(Set dst (MinI src imm));
13880   ins_cost(INSN_COST * 3);
13881   expand %{
13882     rFlagsReg cr;
13883     compI_reg_imm0(cr, src);
13884     cmovI_reg_imm1_le(dst, src, cr);
13885   %}
13886 %}
13887 
13888 // This pattern is automatically generated from aarch64_ad.m4
13889 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13890 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13891 %{
13892   match(Set dst (MinI imm src));
13893   ins_cost(INSN_COST * 3);
13894   expand %{
13895     rFlagsReg cr;
13896     compI_reg_imm0(cr, src);
13897     cmovI_reg_imm1_le(dst, src, cr);
13898   %}
13899 %}
13900 
13901 // This pattern is automatically generated from aarch64_ad.m4
13902 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13903 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13904 %{
13905   match(Set dst (MinI src imm));
13906   ins_cost(INSN_COST * 3);
13907   expand %{
13908     rFlagsReg cr;
13909     compI_reg_imm0(cr, src);
13910     cmovI_reg_immM1_lt(dst, src, cr);
13911   %}
13912 %}
13913 
13914 // This pattern is automatically generated from aarch64_ad.m4
13915 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13916 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13917 %{
13918   match(Set dst (MinI imm src));
13919   ins_cost(INSN_COST * 3);
13920   expand %{
13921     rFlagsReg cr;
13922     compI_reg_imm0(cr, src);
13923     cmovI_reg_immM1_lt(dst, src, cr);
13924   %}
13925 %}
13926 
13927 // This pattern is automatically generated from aarch64_ad.m4
13928 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13929 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13930 %{
13931   match(Set dst (MaxI src imm));
13932   ins_cost(INSN_COST * 3);
13933   expand %{
13934     rFlagsReg cr;
13935     compI_reg_imm0(cr, src);
13936     cmovI_reg_imm0_gt(dst, src, cr);
13937   %}
13938 %}
13939 
13940 // This pattern is automatically generated from aarch64_ad.m4
13941 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13942 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13943 %{
13944   match(Set dst (MaxI imm src));
13945   ins_cost(INSN_COST * 3);
13946   expand %{
13947     rFlagsReg cr;
13948     compI_reg_imm0(cr, src);
13949     cmovI_reg_imm0_gt(dst, src, cr);
13950   %}
13951 %}
13952 
13953 // This pattern is automatically generated from aarch64_ad.m4
13954 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13955 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13956 %{
13957   match(Set dst (MaxI src imm));
13958   ins_cost(INSN_COST * 3);
13959   expand %{
13960     rFlagsReg cr;
13961     compI_reg_imm0(cr, src);
13962     cmovI_reg_imm1_gt(dst, src, cr);
13963   %}
13964 %}
13965 
13966 // This pattern is automatically generated from aarch64_ad.m4
13967 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13968 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13969 %{
13970   match(Set dst (MaxI imm src));
13971   ins_cost(INSN_COST * 3);
13972   expand %{
13973     rFlagsReg cr;
13974     compI_reg_imm0(cr, src);
13975     cmovI_reg_imm1_gt(dst, src, cr);
13976   %}
13977 %}
13978 
13979 // This pattern is automatically generated from aarch64_ad.m4
13980 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13981 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13982 %{
13983   match(Set dst (MaxI src imm));
13984   ins_cost(INSN_COST * 3);
13985   expand %{
13986     rFlagsReg cr;
13987     compI_reg_imm0(cr, src);
13988     cmovI_reg_immM1_ge(dst, src, cr);
13989   %}
13990 %}
13991 
13992 // This pattern is automatically generated from aarch64_ad.m4
13993 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13994 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13995 %{
13996   match(Set dst (MaxI imm src));
13997   ins_cost(INSN_COST * 3);
13998   expand %{
13999     rFlagsReg cr;
14000     compI_reg_imm0(cr, src);
14001     cmovI_reg_immM1_ge(dst, src, cr);
14002   %}
14003 %}
14004 
14005 // This pattern is automatically generated from aarch64_ad.m4
14006 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14007 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
14008 %{
14009   match(Set dst (ReverseI src));
14010   ins_cost(INSN_COST);
14011   format %{ "rbitw  $dst, $src" %}
14012   ins_encode %{
14013     __ rbitw($dst$$Register, $src$$Register);
14014   %}
14015   ins_pipe(ialu_reg);
14016 %}
14017 
14018 // This pattern is automatically generated from aarch64_ad.m4
14019 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14020 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
14021 %{
14022   match(Set dst (ReverseL src));
14023   ins_cost(INSN_COST);
14024   format %{ "rbit  $dst, $src" %}
14025   ins_encode %{
14026     __ rbit($dst$$Register, $src$$Register);
14027   %}
14028   ins_pipe(ialu_reg);
14029 %}
14030 
14031 
14032 // END This section of the file is automatically generated. Do not edit --------------
14033 
14034 
14035 // ============================================================================
14036 // Floating Point Arithmetic Instructions
14037 
14038 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14039   match(Set dst (AddF src1 src2));
14040 
14041   ins_cost(INSN_COST * 5);
14042   format %{ "fadds   $dst, $src1, $src2" %}
14043 
14044   ins_encode %{
14045     __ fadds(as_FloatRegister($dst$$reg),
14046              as_FloatRegister($src1$$reg),
14047              as_FloatRegister($src2$$reg));
14048   %}
14049 
14050   ins_pipe(fp_dop_reg_reg_s);
14051 %}
14052 
14053 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14054   match(Set dst (AddD src1 src2));
14055 
14056   ins_cost(INSN_COST * 5);
14057   format %{ "faddd   $dst, $src1, $src2" %}
14058 
14059   ins_encode %{
14060     __ faddd(as_FloatRegister($dst$$reg),
14061              as_FloatRegister($src1$$reg),
14062              as_FloatRegister($src2$$reg));
14063   %}
14064 
14065   ins_pipe(fp_dop_reg_reg_d);
14066 %}
14067 
14068 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14069   match(Set dst (SubF src1 src2));
14070 
14071   ins_cost(INSN_COST * 5);
14072   format %{ "fsubs   $dst, $src1, $src2" %}
14073 
14074   ins_encode %{
14075     __ fsubs(as_FloatRegister($dst$$reg),
14076              as_FloatRegister($src1$$reg),
14077              as_FloatRegister($src2$$reg));
14078   %}
14079 
14080   ins_pipe(fp_dop_reg_reg_s);
14081 %}
14082 
14083 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14084   match(Set dst (SubD src1 src2));
14085 
14086   ins_cost(INSN_COST * 5);
14087   format %{ "fsubd   $dst, $src1, $src2" %}
14088 
14089   ins_encode %{
14090     __ fsubd(as_FloatRegister($dst$$reg),
14091              as_FloatRegister($src1$$reg),
14092              as_FloatRegister($src2$$reg));
14093   %}
14094 
14095   ins_pipe(fp_dop_reg_reg_d);
14096 %}
14097 
14098 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14099   match(Set dst (MulF src1 src2));
14100 
14101   ins_cost(INSN_COST * 6);
14102   format %{ "fmuls   $dst, $src1, $src2" %}
14103 
14104   ins_encode %{
14105     __ fmuls(as_FloatRegister($dst$$reg),
14106              as_FloatRegister($src1$$reg),
14107              as_FloatRegister($src2$$reg));
14108   %}
14109 
14110   ins_pipe(fp_dop_reg_reg_s);
14111 %}
14112 
14113 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14114   match(Set dst (MulD src1 src2));
14115 
14116   ins_cost(INSN_COST * 6);
14117   format %{ "fmuld   $dst, $src1, $src2" %}
14118 
14119   ins_encode %{
14120     __ fmuld(as_FloatRegister($dst$$reg),
14121              as_FloatRegister($src1$$reg),
14122              as_FloatRegister($src2$$reg));
14123   %}
14124 
14125   ins_pipe(fp_dop_reg_reg_d);
14126 %}
14127 
14128 // src1 * src2 + src3
14129 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14130   predicate(UseFMA);
14131   match(Set dst (FmaF src3 (Binary src1 src2)));
14132 
14133   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
14134 
14135   ins_encode %{
14136     __ fmadds(as_FloatRegister($dst$$reg),
14137              as_FloatRegister($src1$$reg),
14138              as_FloatRegister($src2$$reg),
14139              as_FloatRegister($src3$$reg));
14140   %}
14141 
14142   ins_pipe(pipe_class_default);
14143 %}
14144 
14145 // src1 * src2 + src3
14146 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14147   predicate(UseFMA);
14148   match(Set dst (FmaD src3 (Binary src1 src2)));
14149 
14150   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
14151 
14152   ins_encode %{
14153     __ fmaddd(as_FloatRegister($dst$$reg),
14154              as_FloatRegister($src1$$reg),
14155              as_FloatRegister($src2$$reg),
14156              as_FloatRegister($src3$$reg));
14157   %}
14158 
14159   ins_pipe(pipe_class_default);
14160 %}
14161 
14162 // -src1 * src2 + src3
14163 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14164   predicate(UseFMA);
14165   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
14166   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
14167 
14168   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
14169 
14170   ins_encode %{
14171     __ fmsubs(as_FloatRegister($dst$$reg),
14172               as_FloatRegister($src1$$reg),
14173               as_FloatRegister($src2$$reg),
14174               as_FloatRegister($src3$$reg));
14175   %}
14176 
14177   ins_pipe(pipe_class_default);
14178 %}
14179 
14180 // -src1 * src2 + src3
14181 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14182   predicate(UseFMA);
14183   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
14184   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
14185 
14186   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
14187 
14188   ins_encode %{
14189     __ fmsubd(as_FloatRegister($dst$$reg),
14190               as_FloatRegister($src1$$reg),
14191               as_FloatRegister($src2$$reg),
14192               as_FloatRegister($src3$$reg));
14193   %}
14194 
14195   ins_pipe(pipe_class_default);
14196 %}
14197 
14198 // -src1 * src2 - src3
14199 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14200   predicate(UseFMA);
14201   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
14202   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
14203 
14204   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
14205 
14206   ins_encode %{
14207     __ fnmadds(as_FloatRegister($dst$$reg),
14208                as_FloatRegister($src1$$reg),
14209                as_FloatRegister($src2$$reg),
14210                as_FloatRegister($src3$$reg));
14211   %}
14212 
14213   ins_pipe(pipe_class_default);
14214 %}
14215 
14216 // -src1 * src2 - src3
14217 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14218   predicate(UseFMA);
14219   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
14220   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
14221 
14222   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
14223 
14224   ins_encode %{
14225     __ fnmaddd(as_FloatRegister($dst$$reg),
14226                as_FloatRegister($src1$$reg),
14227                as_FloatRegister($src2$$reg),
14228                as_FloatRegister($src3$$reg));
14229   %}
14230 
14231   ins_pipe(pipe_class_default);
14232 %}
14233 
14234 // src1 * src2 - src3
14235 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
14236   predicate(UseFMA);
14237   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
14238 
14239   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
14240 
14241   ins_encode %{
14242     __ fnmsubs(as_FloatRegister($dst$$reg),
14243                as_FloatRegister($src1$$reg),
14244                as_FloatRegister($src2$$reg),
14245                as_FloatRegister($src3$$reg));
14246   %}
14247 
14248   ins_pipe(pipe_class_default);
14249 %}
14250 
14251 // src1 * src2 - src3
14252 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
14253   predicate(UseFMA);
14254   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
14255 
14256   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
14257 
14258   ins_encode %{
14259   // n.b. insn name should be fnmsubd
14260     __ fnmsub(as_FloatRegister($dst$$reg),
14261               as_FloatRegister($src1$$reg),
14262               as_FloatRegister($src2$$reg),
14263               as_FloatRegister($src3$$reg));
14264   %}
14265 
14266   ins_pipe(pipe_class_default);
14267 %}
14268 
14269 
14270 // Math.max(FF)F
14271 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14272   match(Set dst (MaxF src1 src2));
14273 
14274   format %{ "fmaxs   $dst, $src1, $src2" %}
14275   ins_encode %{
14276     __ fmaxs(as_FloatRegister($dst$$reg),
14277              as_FloatRegister($src1$$reg),
14278              as_FloatRegister($src2$$reg));
14279   %}
14280 
14281   ins_pipe(fp_dop_reg_reg_s);
14282 %}
14283 
14284 // Math.min(FF)F
14285 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14286   match(Set dst (MinF src1 src2));
14287 
14288   format %{ "fmins   $dst, $src1, $src2" %}
14289   ins_encode %{
14290     __ fmins(as_FloatRegister($dst$$reg),
14291              as_FloatRegister($src1$$reg),
14292              as_FloatRegister($src2$$reg));
14293   %}
14294 
14295   ins_pipe(fp_dop_reg_reg_s);
14296 %}
14297 
14298 // Math.max(DD)D
14299 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14300   match(Set dst (MaxD src1 src2));
14301 
14302   format %{ "fmaxd   $dst, $src1, $src2" %}
14303   ins_encode %{
14304     __ fmaxd(as_FloatRegister($dst$$reg),
14305              as_FloatRegister($src1$$reg),
14306              as_FloatRegister($src2$$reg));
14307   %}
14308 
14309   ins_pipe(fp_dop_reg_reg_d);
14310 %}
14311 
14312 // Math.min(DD)D
14313 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14314   match(Set dst (MinD src1 src2));
14315 
14316   format %{ "fmind   $dst, $src1, $src2" %}
14317   ins_encode %{
14318     __ fmind(as_FloatRegister($dst$$reg),
14319              as_FloatRegister($src1$$reg),
14320              as_FloatRegister($src2$$reg));
14321   %}
14322 
14323   ins_pipe(fp_dop_reg_reg_d);
14324 %}
14325 
14326 
14327 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14328   match(Set dst (DivF src1  src2));
14329 
14330   ins_cost(INSN_COST * 18);
14331   format %{ "fdivs   $dst, $src1, $src2" %}
14332 
14333   ins_encode %{
14334     __ fdivs(as_FloatRegister($dst$$reg),
14335              as_FloatRegister($src1$$reg),
14336              as_FloatRegister($src2$$reg));
14337   %}
14338 
14339   ins_pipe(fp_div_s);
14340 %}
14341 
14342 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14343   match(Set dst (DivD src1  src2));
14344 
14345   ins_cost(INSN_COST * 32);
14346   format %{ "fdivd   $dst, $src1, $src2" %}
14347 
14348   ins_encode %{
14349     __ fdivd(as_FloatRegister($dst$$reg),
14350              as_FloatRegister($src1$$reg),
14351              as_FloatRegister($src2$$reg));
14352   %}
14353 
14354   ins_pipe(fp_div_d);
14355 %}
14356 
14357 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14358   match(Set dst (NegF src));
14359 
14360   ins_cost(INSN_COST * 3);
14361   format %{ "fneg   $dst, $src" %}
14362 
14363   ins_encode %{
14364     __ fnegs(as_FloatRegister($dst$$reg),
14365              as_FloatRegister($src$$reg));
14366   %}
14367 
14368   ins_pipe(fp_uop_s);
14369 %}
14370 
14371 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14372   match(Set dst (NegD src));
14373 
14374   ins_cost(INSN_COST * 3);
14375   format %{ "fnegd   $dst, $src" %}
14376 
14377   ins_encode %{
14378     __ fnegd(as_FloatRegister($dst$$reg),
14379              as_FloatRegister($src$$reg));
14380   %}
14381 
14382   ins_pipe(fp_uop_d);
14383 %}
14384 
14385 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14386 %{
14387   match(Set dst (AbsI src));
14388 
14389   effect(KILL cr);
14390   ins_cost(INSN_COST * 2);
14391   format %{ "cmpw  $src, zr\n\t"
14392             "cnegw $dst, $src, Assembler::LT\t# int abs"
14393   %}
14394 
14395   ins_encode %{
14396     __ cmpw(as_Register($src$$reg), zr);
14397     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14398   %}
14399   ins_pipe(pipe_class_default);
14400 %}
14401 
14402 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
14403 %{
14404   match(Set dst (AbsL src));
14405 
14406   effect(KILL cr);
14407   ins_cost(INSN_COST * 2);
14408   format %{ "cmp  $src, zr\n\t"
14409             "cneg $dst, $src, Assembler::LT\t# long abs"
14410   %}
14411 
14412   ins_encode %{
14413     __ cmp(as_Register($src$$reg), zr);
14414     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14415   %}
14416   ins_pipe(pipe_class_default);
14417 %}
14418 
14419 instruct absF_reg(vRegF dst, vRegF src) %{
14420   match(Set dst (AbsF src));
14421 
14422   ins_cost(INSN_COST * 3);
14423   format %{ "fabss   $dst, $src" %}
14424   ins_encode %{
14425     __ fabss(as_FloatRegister($dst$$reg),
14426              as_FloatRegister($src$$reg));
14427   %}
14428 
14429   ins_pipe(fp_uop_s);
14430 %}
14431 
14432 instruct absD_reg(vRegD dst, vRegD src) %{
14433   match(Set dst (AbsD src));
14434 
14435   ins_cost(INSN_COST * 3);
14436   format %{ "fabsd   $dst, $src" %}
14437   ins_encode %{
14438     __ fabsd(as_FloatRegister($dst$$reg),
14439              as_FloatRegister($src$$reg));
14440   %}
14441 
14442   ins_pipe(fp_uop_d);
14443 %}
14444 
14445 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14446   match(Set dst (AbsF (SubF src1 src2)));
14447 
14448   ins_cost(INSN_COST * 3);
14449   format %{ "fabds   $dst, $src1, $src2" %}
14450   ins_encode %{
14451     __ fabds(as_FloatRegister($dst$$reg),
14452              as_FloatRegister($src1$$reg),
14453              as_FloatRegister($src2$$reg));
14454   %}
14455 
14456   ins_pipe(fp_uop_s);
14457 %}
14458 
14459 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14460   match(Set dst (AbsD (SubD src1 src2)));
14461 
14462   ins_cost(INSN_COST * 3);
14463   format %{ "fabdd   $dst, $src1, $src2" %}
14464   ins_encode %{
14465     __ fabdd(as_FloatRegister($dst$$reg),
14466              as_FloatRegister($src1$$reg),
14467              as_FloatRegister($src2$$reg));
14468   %}
14469 
14470   ins_pipe(fp_uop_d);
14471 %}
14472 
14473 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14474   match(Set dst (SqrtD src));
14475 
14476   ins_cost(INSN_COST * 50);
14477   format %{ "fsqrtd  $dst, $src" %}
14478   ins_encode %{
14479     __ fsqrtd(as_FloatRegister($dst$$reg),
14480              as_FloatRegister($src$$reg));
14481   %}
14482 
14483   ins_pipe(fp_div_s);
14484 %}
14485 
14486 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14487   match(Set dst (SqrtF src));
14488 
14489   ins_cost(INSN_COST * 50);
14490   format %{ "fsqrts  $dst, $src" %}
14491   ins_encode %{
14492     __ fsqrts(as_FloatRegister($dst$$reg),
14493              as_FloatRegister($src$$reg));
14494   %}
14495 
14496   ins_pipe(fp_div_d);
14497 %}
14498 
14499 // Math.rint, floor, ceil
14500 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14501   match(Set dst (RoundDoubleMode src rmode));
14502   format %{ "frint  $dst, $src, $rmode" %}
14503   ins_encode %{
14504     switch ($rmode$$constant) {
14505       case RoundDoubleModeNode::rmode_rint:
14506         __ frintnd(as_FloatRegister($dst$$reg),
14507                    as_FloatRegister($src$$reg));
14508         break;
14509       case RoundDoubleModeNode::rmode_floor:
14510         __ frintmd(as_FloatRegister($dst$$reg),
14511                    as_FloatRegister($src$$reg));
14512         break;
14513       case RoundDoubleModeNode::rmode_ceil:
14514         __ frintpd(as_FloatRegister($dst$$reg),
14515                    as_FloatRegister($src$$reg));
14516         break;
14517     }
14518   %}
14519   ins_pipe(fp_uop_d);
14520 %}
14521 
14522 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14523   match(Set dst (CopySignD src1 (Binary src2 zero)));
14524   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14525   format %{ "CopySignD  $dst $src1 $src2" %}
14526   ins_encode %{
14527     FloatRegister dst = as_FloatRegister($dst$$reg),
14528                   src1 = as_FloatRegister($src1$$reg),
14529                   src2 = as_FloatRegister($src2$$reg),
14530                   zero = as_FloatRegister($zero$$reg);
14531     __ fnegd(dst, zero);
14532     __ bsl(dst, __ T8B, src2, src1);
14533   %}
14534   ins_pipe(fp_uop_d);
14535 %}
14536 
14537 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14538   match(Set dst (CopySignF src1 src2));
14539   effect(TEMP_DEF dst, USE src1, USE src2);
14540   format %{ "CopySignF  $dst $src1 $src2" %}
14541   ins_encode %{
14542     FloatRegister dst = as_FloatRegister($dst$$reg),
14543                   src1 = as_FloatRegister($src1$$reg),
14544                   src2 = as_FloatRegister($src2$$reg);
14545     __ movi(dst, __ T2S, 0x80, 24);
14546     __ bsl(dst, __ T8B, src2, src1);
14547   %}
14548   ins_pipe(fp_uop_d);
14549 %}
14550 
14551 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14552   match(Set dst (SignumD src (Binary zero one)));
14553   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14554   format %{ "signumD  $dst, $src" %}
14555   ins_encode %{
14556     FloatRegister src = as_FloatRegister($src$$reg),
14557                   dst = as_FloatRegister($dst$$reg),
14558                   zero = as_FloatRegister($zero$$reg),
14559                   one = as_FloatRegister($one$$reg);
14560     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14561     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14562     // Bit selection instruction gets bit from "one" for each enabled bit in
14563     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14564     // NaN the whole "src" will be copied because "dst" is zero. For all other
14565     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14566     // from "src", and all other bits are copied from 1.0.
14567     __ bsl(dst, __ T8B, one, src);
14568   %}
14569   ins_pipe(fp_uop_d);
14570 %}
14571 
14572 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14573   match(Set dst (SignumF src (Binary zero one)));
14574   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14575   format %{ "signumF  $dst, $src" %}
14576   ins_encode %{
14577     FloatRegister src = as_FloatRegister($src$$reg),
14578                   dst = as_FloatRegister($dst$$reg),
14579                   zero = as_FloatRegister($zero$$reg),
14580                   one = as_FloatRegister($one$$reg);
14581     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14582     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14583     // Bit selection instruction gets bit from "one" for each enabled bit in
14584     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14585     // NaN the whole "src" will be copied because "dst" is zero. For all other
14586     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14587     // from "src", and all other bits are copied from 1.0.
14588     __ bsl(dst, __ T8B, one, src);
14589   %}
14590   ins_pipe(fp_uop_d);
14591 %}
14592 
14593 instruct onspinwait() %{
14594   match(OnSpinWait);
14595   ins_cost(INSN_COST);
14596 
14597   format %{ "onspinwait" %}
14598 
14599   ins_encode %{
14600     __ spin_wait();
14601   %}
14602   ins_pipe(pipe_class_empty);
14603 %}
14604 
14605 // ============================================================================
14606 // Logical Instructions
14607 
14608 // Integer Logical Instructions
14609 
14610 // And Instructions
14611 
14612 
14613 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14614   match(Set dst (AndI src1 src2));
14615 
14616   format %{ "andw  $dst, $src1, $src2\t# int" %}
14617 
14618   ins_cost(INSN_COST);
14619   ins_encode %{
14620     __ andw(as_Register($dst$$reg),
14621             as_Register($src1$$reg),
14622             as_Register($src2$$reg));
14623   %}
14624 
14625   ins_pipe(ialu_reg_reg);
14626 %}
14627 
14628 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14629   match(Set dst (AndI src1 src2));
14630 
14631   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14632 
14633   ins_cost(INSN_COST);
14634   ins_encode %{
14635     __ andw(as_Register($dst$$reg),
14636             as_Register($src1$$reg),
14637             (uint64_t)($src2$$constant));
14638   %}
14639 
14640   ins_pipe(ialu_reg_imm);
14641 %}
14642 
14643 // Or Instructions
14644 
14645 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14646   match(Set dst (OrI src1 src2));
14647 
14648   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14649 
14650   ins_cost(INSN_COST);
14651   ins_encode %{
14652     __ orrw(as_Register($dst$$reg),
14653             as_Register($src1$$reg),
14654             as_Register($src2$$reg));
14655   %}
14656 
14657   ins_pipe(ialu_reg_reg);
14658 %}
14659 
14660 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14661   match(Set dst (OrI src1 src2));
14662 
14663   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14664 
14665   ins_cost(INSN_COST);
14666   ins_encode %{
14667     __ orrw(as_Register($dst$$reg),
14668             as_Register($src1$$reg),
14669             (uint64_t)($src2$$constant));
14670   %}
14671 
14672   ins_pipe(ialu_reg_imm);
14673 %}
14674 
14675 // Xor Instructions
14676 
14677 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14678   match(Set dst (XorI src1 src2));
14679 
14680   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14681 
14682   ins_cost(INSN_COST);
14683   ins_encode %{
14684     __ eorw(as_Register($dst$$reg),
14685             as_Register($src1$$reg),
14686             as_Register($src2$$reg));
14687   %}
14688 
14689   ins_pipe(ialu_reg_reg);
14690 %}
14691 
14692 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14693   match(Set dst (XorI src1 src2));
14694 
14695   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14696 
14697   ins_cost(INSN_COST);
14698   ins_encode %{
14699     __ eorw(as_Register($dst$$reg),
14700             as_Register($src1$$reg),
14701             (uint64_t)($src2$$constant));
14702   %}
14703 
14704   ins_pipe(ialu_reg_imm);
14705 %}
14706 
14707 // Long Logical Instructions
14708 // TODO
14709 
14710 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14711   match(Set dst (AndL src1 src2));
14712 
14713   format %{ "and  $dst, $src1, $src2\t# int" %}
14714 
14715   ins_cost(INSN_COST);
14716   ins_encode %{
14717     __ andr(as_Register($dst$$reg),
14718             as_Register($src1$$reg),
14719             as_Register($src2$$reg));
14720   %}
14721 
14722   ins_pipe(ialu_reg_reg);
14723 %}
14724 
14725 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14726   match(Set dst (AndL src1 src2));
14727 
14728   format %{ "and  $dst, $src1, $src2\t# int" %}
14729 
14730   ins_cost(INSN_COST);
14731   ins_encode %{
14732     __ andr(as_Register($dst$$reg),
14733             as_Register($src1$$reg),
14734             (uint64_t)($src2$$constant));
14735   %}
14736 
14737   ins_pipe(ialu_reg_imm);
14738 %}
14739 
14740 // Or Instructions
14741 
14742 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14743   match(Set dst (OrL src1 src2));
14744 
14745   format %{ "orr  $dst, $src1, $src2\t# int" %}
14746 
14747   ins_cost(INSN_COST);
14748   ins_encode %{
14749     __ orr(as_Register($dst$$reg),
14750            as_Register($src1$$reg),
14751            as_Register($src2$$reg));
14752   %}
14753 
14754   ins_pipe(ialu_reg_reg);
14755 %}
14756 
14757 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14758   match(Set dst (OrL src1 src2));
14759 
14760   format %{ "orr  $dst, $src1, $src2\t# int" %}
14761 
14762   ins_cost(INSN_COST);
14763   ins_encode %{
14764     __ orr(as_Register($dst$$reg),
14765            as_Register($src1$$reg),
14766            (uint64_t)($src2$$constant));
14767   %}
14768 
14769   ins_pipe(ialu_reg_imm);
14770 %}
14771 
14772 // Xor Instructions
14773 
14774 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14775   match(Set dst (XorL src1 src2));
14776 
14777   format %{ "eor  $dst, $src1, $src2\t# int" %}
14778 
14779   ins_cost(INSN_COST);
14780   ins_encode %{
14781     __ eor(as_Register($dst$$reg),
14782            as_Register($src1$$reg),
14783            as_Register($src2$$reg));
14784   %}
14785 
14786   ins_pipe(ialu_reg_reg);
14787 %}
14788 
14789 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14790   match(Set dst (XorL src1 src2));
14791 
14792   ins_cost(INSN_COST);
14793   format %{ "eor  $dst, $src1, $src2\t# int" %}
14794 
14795   ins_encode %{
14796     __ eor(as_Register($dst$$reg),
14797            as_Register($src1$$reg),
14798            (uint64_t)($src2$$constant));
14799   %}
14800 
14801   ins_pipe(ialu_reg_imm);
14802 %}
14803 
14804 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14805 %{
14806   match(Set dst (ConvI2L src));
14807 
14808   ins_cost(INSN_COST);
14809   format %{ "sxtw  $dst, $src\t# i2l" %}
14810   ins_encode %{
14811     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14812   %}
14813   ins_pipe(ialu_reg_shift);
14814 %}
14815 
14816 // this pattern occurs in bigmath arithmetic
14817 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14818 %{
14819   match(Set dst (AndL (ConvI2L src) mask));
14820 
14821   ins_cost(INSN_COST);
14822   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14823   ins_encode %{
14824     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14825   %}
14826 
14827   ins_pipe(ialu_reg_shift);
14828 %}
14829 
14830 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14831   match(Set dst (ConvL2I src));
14832 
14833   ins_cost(INSN_COST);
14834   format %{ "movw  $dst, $src \t// l2i" %}
14835 
14836   ins_encode %{
14837     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14838   %}
14839 
14840   ins_pipe(ialu_reg);
14841 %}
14842 
14843 instruct convD2F_reg(vRegF dst, vRegD src) %{
14844   match(Set dst (ConvD2F src));
14845 
14846   ins_cost(INSN_COST * 5);
14847   format %{ "fcvtd  $dst, $src \t// d2f" %}
14848 
14849   ins_encode %{
14850     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14851   %}
14852 
14853   ins_pipe(fp_d2f);
14854 %}
14855 
14856 instruct convF2D_reg(vRegD dst, vRegF src) %{
14857   match(Set dst (ConvF2D src));
14858 
14859   ins_cost(INSN_COST * 5);
14860   format %{ "fcvts  $dst, $src \t// f2d" %}
14861 
14862   ins_encode %{
14863     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14864   %}
14865 
14866   ins_pipe(fp_f2d);
14867 %}
14868 
14869 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14870   match(Set dst (ConvF2I src));
14871 
14872   ins_cost(INSN_COST * 5);
14873   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14874 
14875   ins_encode %{
14876     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14877   %}
14878 
14879   ins_pipe(fp_f2i);
14880 %}
14881 
14882 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14883   match(Set dst (ConvF2L src));
14884 
14885   ins_cost(INSN_COST * 5);
14886   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14887 
14888   ins_encode %{
14889     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14890   %}
14891 
14892   ins_pipe(fp_f2l);
14893 %}
14894 
14895 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
14896   match(Set dst (ConvF2HF src));
14897   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
14898             "smov $dst, $tmp\t# move result from $tmp to $dst"
14899   %}
14900   effect(TEMP tmp);
14901   ins_encode %{
14902       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
14903   %}
14904   ins_pipe(pipe_slow);
14905 %}
14906 
14907 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
14908   match(Set dst (ConvHF2F src));
14909   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
14910             "fcvt $dst, $tmp\t# convert half to single precision"
14911   %}
14912   effect(TEMP tmp);
14913   ins_encode %{
14914       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
14915   %}
14916   ins_pipe(pipe_slow);
14917 %}
14918 
14919 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14920   match(Set dst (ConvI2F src));
14921 
14922   ins_cost(INSN_COST * 5);
14923   format %{ "scvtfws  $dst, $src \t// i2f" %}
14924 
14925   ins_encode %{
14926     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14927   %}
14928 
14929   ins_pipe(fp_i2f);
14930 %}
14931 
14932 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14933   match(Set dst (ConvL2F src));
14934 
14935   ins_cost(INSN_COST * 5);
14936   format %{ "scvtfs  $dst, $src \t// l2f" %}
14937 
14938   ins_encode %{
14939     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14940   %}
14941 
14942   ins_pipe(fp_l2f);
14943 %}
14944 
14945 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14946   match(Set dst (ConvD2I src));
14947 
14948   ins_cost(INSN_COST * 5);
14949   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14950 
14951   ins_encode %{
14952     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14953   %}
14954 
14955   ins_pipe(fp_d2i);
14956 %}
14957 
14958 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14959   match(Set dst (ConvD2L src));
14960 
14961   ins_cost(INSN_COST * 5);
14962   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14963 
14964   ins_encode %{
14965     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14966   %}
14967 
14968   ins_pipe(fp_d2l);
14969 %}
14970 
14971 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14972   match(Set dst (ConvI2D src));
14973 
14974   ins_cost(INSN_COST * 5);
14975   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14976 
14977   ins_encode %{
14978     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14979   %}
14980 
14981   ins_pipe(fp_i2d);
14982 %}
14983 
14984 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14985   match(Set dst (ConvL2D src));
14986 
14987   ins_cost(INSN_COST * 5);
14988   format %{ "scvtfd  $dst, $src \t// l2d" %}
14989 
14990   ins_encode %{
14991     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14992   %}
14993 
14994   ins_pipe(fp_l2d);
14995 %}
14996 
14997 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
14998 %{
14999   match(Set dst (RoundD src));
15000   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
15001   format %{ "java_round_double $dst,$src"%}
15002   ins_encode %{
15003     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
15004                          as_FloatRegister($ftmp$$reg));
15005   %}
15006   ins_pipe(pipe_slow);
15007 %}
15008 
15009 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
15010 %{
15011   match(Set dst (RoundF src));
15012   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
15013   format %{ "java_round_float $dst,$src"%}
15014   ins_encode %{
15015     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
15016                         as_FloatRegister($ftmp$$reg));
15017   %}
15018   ins_pipe(pipe_slow);
15019 %}
15020 
15021 // stack <-> reg and reg <-> reg shuffles with no conversion
15022 
15023 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
15024 
15025   match(Set dst (MoveF2I src));
15026 
15027   effect(DEF dst, USE src);
15028 
15029   ins_cost(4 * INSN_COST);
15030 
15031   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
15032 
15033   ins_encode %{
15034     __ ldrw($dst$$Register, Address(sp, $src$$disp));
15035   %}
15036 
15037   ins_pipe(iload_reg_reg);
15038 
15039 %}
15040 
15041 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
15042 
15043   match(Set dst (MoveI2F src));
15044 
15045   effect(DEF dst, USE src);
15046 
15047   ins_cost(4 * INSN_COST);
15048 
15049   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
15050 
15051   ins_encode %{
15052     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
15053   %}
15054 
15055   ins_pipe(pipe_class_memory);
15056 
15057 %}
15058 
15059 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
15060 
15061   match(Set dst (MoveD2L src));
15062 
15063   effect(DEF dst, USE src);
15064 
15065   ins_cost(4 * INSN_COST);
15066 
15067   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
15068 
15069   ins_encode %{
15070     __ ldr($dst$$Register, Address(sp, $src$$disp));
15071   %}
15072 
15073   ins_pipe(iload_reg_reg);
15074 
15075 %}
15076 
15077 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
15078 
15079   match(Set dst (MoveL2D src));
15080 
15081   effect(DEF dst, USE src);
15082 
15083   ins_cost(4 * INSN_COST);
15084 
15085   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
15086 
15087   ins_encode %{
15088     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
15089   %}
15090 
15091   ins_pipe(pipe_class_memory);
15092 
15093 %}
15094 
15095 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
15096 
15097   match(Set dst (MoveF2I src));
15098 
15099   effect(DEF dst, USE src);
15100 
15101   ins_cost(INSN_COST);
15102 
15103   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
15104 
15105   ins_encode %{
15106     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15107   %}
15108 
15109   ins_pipe(pipe_class_memory);
15110 
15111 %}
15112 
15113 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
15114 
15115   match(Set dst (MoveI2F src));
15116 
15117   effect(DEF dst, USE src);
15118 
15119   ins_cost(INSN_COST);
15120 
15121   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
15122 
15123   ins_encode %{
15124     __ strw($src$$Register, Address(sp, $dst$$disp));
15125   %}
15126 
15127   ins_pipe(istore_reg_reg);
15128 
15129 %}
15130 
15131 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
15132 
15133   match(Set dst (MoveD2L src));
15134 
15135   effect(DEF dst, USE src);
15136 
15137   ins_cost(INSN_COST);
15138 
15139   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
15140 
15141   ins_encode %{
15142     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15143   %}
15144 
15145   ins_pipe(pipe_class_memory);
15146 
15147 %}
15148 
15149 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
15150 
15151   match(Set dst (MoveL2D src));
15152 
15153   effect(DEF dst, USE src);
15154 
15155   ins_cost(INSN_COST);
15156 
15157   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
15158 
15159   ins_encode %{
15160     __ str($src$$Register, Address(sp, $dst$$disp));
15161   %}
15162 
15163   ins_pipe(istore_reg_reg);
15164 
15165 %}
15166 
15167 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
15168 
15169   match(Set dst (MoveF2I src));
15170 
15171   effect(DEF dst, USE src);
15172 
15173   ins_cost(INSN_COST);
15174 
15175   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
15176 
15177   ins_encode %{
15178     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
15179   %}
15180 
15181   ins_pipe(fp_f2i);
15182 
15183 %}
15184 
15185 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
15186 
15187   match(Set dst (MoveI2F src));
15188 
15189   effect(DEF dst, USE src);
15190 
15191   ins_cost(INSN_COST);
15192 
15193   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
15194 
15195   ins_encode %{
15196     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
15197   %}
15198 
15199   ins_pipe(fp_i2f);
15200 
15201 %}
15202 
15203 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
15204 
15205   match(Set dst (MoveD2L src));
15206 
15207   effect(DEF dst, USE src);
15208 
15209   ins_cost(INSN_COST);
15210 
15211   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
15212 
15213   ins_encode %{
15214     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
15215   %}
15216 
15217   ins_pipe(fp_d2l);
15218 
15219 %}
15220 
15221 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
15222 
15223   match(Set dst (MoveL2D src));
15224 
15225   effect(DEF dst, USE src);
15226 
15227   ins_cost(INSN_COST);
15228 
15229   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
15230 
15231   ins_encode %{
15232     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
15233   %}
15234 
15235   ins_pipe(fp_l2d);
15236 
15237 %}
15238 
15239 // ============================================================================
15240 // clearing of an array
15241 
15242 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
15243 %{
15244   match(Set dummy (ClearArray cnt base));
15245   effect(USE_KILL cnt, USE_KILL base, KILL cr);
15246 
15247   ins_cost(4 * INSN_COST);
15248   format %{ "ClearArray $cnt, $base" %}
15249 
15250   ins_encode %{
15251     address tpc = __ zero_words($base$$Register, $cnt$$Register);
15252     if (tpc == NULL) {
15253       ciEnv::current()->record_failure("CodeCache is full");
15254       return;
15255     }
15256   %}
15257 
15258   ins_pipe(pipe_class_memory);
15259 %}
15260 
15261 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15262 %{
15263   predicate((uint64_t)n->in(2)->get_long()
15264             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
15265   match(Set dummy (ClearArray cnt base));
15266   effect(TEMP temp, USE_KILL base, KILL cr);
15267 
15268   ins_cost(4 * INSN_COST);
15269   format %{ "ClearArray $cnt, $base" %}
15270 
15271   ins_encode %{
15272     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15273     if (tpc == NULL) {
15274       ciEnv::current()->record_failure("CodeCache is full");
15275       return;
15276     }
15277   %}
15278 
15279   ins_pipe(pipe_class_memory);
15280 %}
15281 
15282 // ============================================================================
15283 // Overflow Math Instructions
15284 
15285 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15286 %{
15287   match(Set cr (OverflowAddI op1 op2));
15288 
15289   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15290   ins_cost(INSN_COST);
15291   ins_encode %{
15292     __ cmnw($op1$$Register, $op2$$Register);
15293   %}
15294 
15295   ins_pipe(icmp_reg_reg);
15296 %}
15297 
15298 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15299 %{
15300   match(Set cr (OverflowAddI op1 op2));
15301 
15302   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15303   ins_cost(INSN_COST);
15304   ins_encode %{
15305     __ cmnw($op1$$Register, $op2$$constant);
15306   %}
15307 
15308   ins_pipe(icmp_reg_imm);
15309 %}
15310 
15311 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15312 %{
15313   match(Set cr (OverflowAddL op1 op2));
15314 
15315   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15316   ins_cost(INSN_COST);
15317   ins_encode %{
15318     __ cmn($op1$$Register, $op2$$Register);
15319   %}
15320 
15321   ins_pipe(icmp_reg_reg);
15322 %}
15323 
15324 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15325 %{
15326   match(Set cr (OverflowAddL op1 op2));
15327 
15328   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
15329   ins_cost(INSN_COST);
15330   ins_encode %{
15331     __ adds(zr, $op1$$Register, $op2$$constant);
15332   %}
15333 
15334   ins_pipe(icmp_reg_imm);
15335 %}
15336 
15337 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15338 %{
15339   match(Set cr (OverflowSubI op1 op2));
15340 
15341   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15342   ins_cost(INSN_COST);
15343   ins_encode %{
15344     __ cmpw($op1$$Register, $op2$$Register);
15345   %}
15346 
15347   ins_pipe(icmp_reg_reg);
15348 %}
15349 
15350 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15351 %{
15352   match(Set cr (OverflowSubI op1 op2));
15353 
15354   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15355   ins_cost(INSN_COST);
15356   ins_encode %{
15357     __ cmpw($op1$$Register, $op2$$constant);
15358   %}
15359 
15360   ins_pipe(icmp_reg_imm);
15361 %}
15362 
15363 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15364 %{
15365   match(Set cr (OverflowSubL op1 op2));
15366 
15367   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15368   ins_cost(INSN_COST);
15369   ins_encode %{
15370     __ cmp($op1$$Register, $op2$$Register);
15371   %}
15372 
15373   ins_pipe(icmp_reg_reg);
15374 %}
15375 
15376 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15377 %{
15378   match(Set cr (OverflowSubL op1 op2));
15379 
15380   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15381   ins_cost(INSN_COST);
15382   ins_encode %{
15383     __ subs(zr, $op1$$Register, $op2$$constant);
15384   %}
15385 
15386   ins_pipe(icmp_reg_imm);
15387 %}
15388 
15389 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15390 %{
15391   match(Set cr (OverflowSubI zero op1));
15392 
15393   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15394   ins_cost(INSN_COST);
15395   ins_encode %{
15396     __ cmpw(zr, $op1$$Register);
15397   %}
15398 
15399   ins_pipe(icmp_reg_imm);
15400 %}
15401 
15402 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15403 %{
15404   match(Set cr (OverflowSubL zero op1));
15405 
15406   format %{ "cmp   zr, $op1\t# overflow check long" %}
15407   ins_cost(INSN_COST);
15408   ins_encode %{
15409     __ cmp(zr, $op1$$Register);
15410   %}
15411 
15412   ins_pipe(icmp_reg_imm);
15413 %}
15414 
15415 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15416 %{
15417   match(Set cr (OverflowMulI op1 op2));
15418 
15419   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15420             "cmp   rscratch1, rscratch1, sxtw\n\t"
15421             "movw  rscratch1, #0x80000000\n\t"
15422             "cselw rscratch1, rscratch1, zr, NE\n\t"
15423             "cmpw  rscratch1, #1" %}
15424   ins_cost(5 * INSN_COST);
15425   ins_encode %{
15426     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15427     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15428     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15429     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15430     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15431   %}
15432 
15433   ins_pipe(pipe_slow);
15434 %}
15435 
15436 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15437 %{
15438   match(If cmp (OverflowMulI op1 op2));
15439   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15440             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15441   effect(USE labl, KILL cr);
15442 
15443   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15444             "cmp   rscratch1, rscratch1, sxtw\n\t"
15445             "b$cmp   $labl" %}
15446   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15447   ins_encode %{
15448     Label* L = $labl$$label;
15449     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15450     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15451     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15452     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15453   %}
15454 
15455   ins_pipe(pipe_serial);
15456 %}
15457 
15458 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15459 %{
15460   match(Set cr (OverflowMulL op1 op2));
15461 
15462   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15463             "smulh rscratch2, $op1, $op2\n\t"
15464             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15465             "movw  rscratch1, #0x80000000\n\t"
15466             "cselw rscratch1, rscratch1, zr, NE\n\t"
15467             "cmpw  rscratch1, #1" %}
15468   ins_cost(6 * INSN_COST);
15469   ins_encode %{
15470     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15471     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15472     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15473     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15474     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15475     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15476   %}
15477 
15478   ins_pipe(pipe_slow);
15479 %}
15480 
15481 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15482 %{
15483   match(If cmp (OverflowMulL op1 op2));
15484   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15485             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15486   effect(USE labl, KILL cr);
15487 
15488   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15489             "smulh rscratch2, $op1, $op2\n\t"
15490             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15491             "b$cmp $labl" %}
15492   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15493   ins_encode %{
15494     Label* L = $labl$$label;
15495     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15496     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15497     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15498     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15499     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15500   %}
15501 
15502   ins_pipe(pipe_serial);
15503 %}
15504 
15505 // ============================================================================
15506 // Compare Instructions
15507 
15508 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15509 %{
15510   match(Set cr (CmpI op1 op2));
15511 
15512   effect(DEF cr, USE op1, USE op2);
15513 
15514   ins_cost(INSN_COST);
15515   format %{ "cmpw  $op1, $op2" %}
15516 
15517   ins_encode(aarch64_enc_cmpw(op1, op2));
15518 
15519   ins_pipe(icmp_reg_reg);
15520 %}
15521 
15522 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15523 %{
15524   match(Set cr (CmpI op1 zero));
15525 
15526   effect(DEF cr, USE op1);
15527 
15528   ins_cost(INSN_COST);
15529   format %{ "cmpw $op1, 0" %}
15530 
15531   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15532 
15533   ins_pipe(icmp_reg_imm);
15534 %}
15535 
15536 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15537 %{
15538   match(Set cr (CmpI op1 op2));
15539 
15540   effect(DEF cr, USE op1);
15541 
15542   ins_cost(INSN_COST);
15543   format %{ "cmpw  $op1, $op2" %}
15544 
15545   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15546 
15547   ins_pipe(icmp_reg_imm);
15548 %}
15549 
15550 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15551 %{
15552   match(Set cr (CmpI op1 op2));
15553 
15554   effect(DEF cr, USE op1);
15555 
15556   ins_cost(INSN_COST * 2);
15557   format %{ "cmpw  $op1, $op2" %}
15558 
15559   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15560 
15561   ins_pipe(icmp_reg_imm);
15562 %}
15563 
15564 // Unsigned compare Instructions; really, same as signed compare
15565 // except it should only be used to feed an If or a CMovI which takes a
15566 // cmpOpU.
15567 
15568 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15569 %{
15570   match(Set cr (CmpU op1 op2));
15571 
15572   effect(DEF cr, USE op1, USE op2);
15573 
15574   ins_cost(INSN_COST);
15575   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15576 
15577   ins_encode(aarch64_enc_cmpw(op1, op2));
15578 
15579   ins_pipe(icmp_reg_reg);
15580 %}
15581 
15582 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15583 %{
15584   match(Set cr (CmpU op1 zero));
15585 
15586   effect(DEF cr, USE op1);
15587 
15588   ins_cost(INSN_COST);
15589   format %{ "cmpw $op1, #0\t# unsigned" %}
15590 
15591   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15592 
15593   ins_pipe(icmp_reg_imm);
15594 %}
15595 
15596 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15597 %{
15598   match(Set cr (CmpU op1 op2));
15599 
15600   effect(DEF cr, USE op1);
15601 
15602   ins_cost(INSN_COST);
15603   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15604 
15605   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15606 
15607   ins_pipe(icmp_reg_imm);
15608 %}
15609 
15610 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15611 %{
15612   match(Set cr (CmpU op1 op2));
15613 
15614   effect(DEF cr, USE op1);
15615 
15616   ins_cost(INSN_COST * 2);
15617   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15618 
15619   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15620 
15621   ins_pipe(icmp_reg_imm);
15622 %}
15623 
15624 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15625 %{
15626   match(Set cr (CmpL op1 op2));
15627 
15628   effect(DEF cr, USE op1, USE op2);
15629 
15630   ins_cost(INSN_COST);
15631   format %{ "cmp  $op1, $op2" %}
15632 
15633   ins_encode(aarch64_enc_cmp(op1, op2));
15634 
15635   ins_pipe(icmp_reg_reg);
15636 %}
15637 
15638 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15639 %{
15640   match(Set cr (CmpL op1 zero));
15641 
15642   effect(DEF cr, USE op1);
15643 
15644   ins_cost(INSN_COST);
15645   format %{ "tst  $op1" %}
15646 
15647   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15648 
15649   ins_pipe(icmp_reg_imm);
15650 %}
15651 
15652 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15653 %{
15654   match(Set cr (CmpL op1 op2));
15655 
15656   effect(DEF cr, USE op1);
15657 
15658   ins_cost(INSN_COST);
15659   format %{ "cmp  $op1, $op2" %}
15660 
15661   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15662 
15663   ins_pipe(icmp_reg_imm);
15664 %}
15665 
15666 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15667 %{
15668   match(Set cr (CmpL op1 op2));
15669 
15670   effect(DEF cr, USE op1);
15671 
15672   ins_cost(INSN_COST * 2);
15673   format %{ "cmp  $op1, $op2" %}
15674 
15675   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15676 
15677   ins_pipe(icmp_reg_imm);
15678 %}
15679 
15680 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15681 %{
15682   match(Set cr (CmpUL op1 op2));
15683 
15684   effect(DEF cr, USE op1, USE op2);
15685 
15686   ins_cost(INSN_COST);
15687   format %{ "cmp  $op1, $op2" %}
15688 
15689   ins_encode(aarch64_enc_cmp(op1, op2));
15690 
15691   ins_pipe(icmp_reg_reg);
15692 %}
15693 
15694 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15695 %{
15696   match(Set cr (CmpUL op1 zero));
15697 
15698   effect(DEF cr, USE op1);
15699 
15700   ins_cost(INSN_COST);
15701   format %{ "tst  $op1" %}
15702 
15703   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15704 
15705   ins_pipe(icmp_reg_imm);
15706 %}
15707 
15708 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15709 %{
15710   match(Set cr (CmpUL op1 op2));
15711 
15712   effect(DEF cr, USE op1);
15713 
15714   ins_cost(INSN_COST);
15715   format %{ "cmp  $op1, $op2" %}
15716 
15717   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15718 
15719   ins_pipe(icmp_reg_imm);
15720 %}
15721 
15722 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15723 %{
15724   match(Set cr (CmpUL op1 op2));
15725 
15726   effect(DEF cr, USE op1);
15727 
15728   ins_cost(INSN_COST * 2);
15729   format %{ "cmp  $op1, $op2" %}
15730 
15731   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15732 
15733   ins_pipe(icmp_reg_imm);
15734 %}
15735 
15736 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15737 %{
15738   match(Set cr (CmpP op1 op2));
15739 
15740   effect(DEF cr, USE op1, USE op2);
15741 
15742   ins_cost(INSN_COST);
15743   format %{ "cmp  $op1, $op2\t // ptr" %}
15744 
15745   ins_encode(aarch64_enc_cmpp(op1, op2));
15746 
15747   ins_pipe(icmp_reg_reg);
15748 %}
15749 
15750 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15751 %{
15752   match(Set cr (CmpN op1 op2));
15753 
15754   effect(DEF cr, USE op1, USE op2);
15755 
15756   ins_cost(INSN_COST);
15757   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15758 
15759   ins_encode(aarch64_enc_cmpn(op1, op2));
15760 
15761   ins_pipe(icmp_reg_reg);
15762 %}
15763 
15764 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15765 %{
15766   match(Set cr (CmpP op1 zero));
15767 
15768   effect(DEF cr, USE op1, USE zero);
15769 
15770   ins_cost(INSN_COST);
15771   format %{ "cmp  $op1, 0\t // ptr" %}
15772 
15773   ins_encode(aarch64_enc_testp(op1));
15774 
15775   ins_pipe(icmp_reg_imm);
15776 %}
15777 
15778 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15779 %{
15780   match(Set cr (CmpN op1 zero));
15781 
15782   effect(DEF cr, USE op1, USE zero);
15783 
15784   ins_cost(INSN_COST);
15785   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15786 
15787   ins_encode(aarch64_enc_testn(op1));
15788 
15789   ins_pipe(icmp_reg_imm);
15790 %}
15791 
15792 // FP comparisons
15793 //
15794 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15795 // using normal cmpOp. See declaration of rFlagsReg for details.
15796 
15797 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15798 %{
15799   match(Set cr (CmpF src1 src2));
15800 
15801   ins_cost(3 * INSN_COST);
15802   format %{ "fcmps $src1, $src2" %}
15803 
15804   ins_encode %{
15805     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15806   %}
15807 
15808   ins_pipe(pipe_class_compare);
15809 %}
15810 
15811 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15812 %{
15813   match(Set cr (CmpF src1 src2));
15814 
15815   ins_cost(3 * INSN_COST);
15816   format %{ "fcmps $src1, 0.0" %}
15817 
15818   ins_encode %{
15819     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15820   %}
15821 
15822   ins_pipe(pipe_class_compare);
15823 %}
15824 // FROM HERE
15825 
15826 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15827 %{
15828   match(Set cr (CmpD src1 src2));
15829 
15830   ins_cost(3 * INSN_COST);
15831   format %{ "fcmpd $src1, $src2" %}
15832 
15833   ins_encode %{
15834     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15835   %}
15836 
15837   ins_pipe(pipe_class_compare);
15838 %}
15839 
15840 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15841 %{
15842   match(Set cr (CmpD src1 src2));
15843 
15844   ins_cost(3 * INSN_COST);
15845   format %{ "fcmpd $src1, 0.0" %}
15846 
15847   ins_encode %{
15848     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15849   %}
15850 
15851   ins_pipe(pipe_class_compare);
15852 %}
15853 
15854 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15855 %{
15856   match(Set dst (CmpF3 src1 src2));
15857   effect(KILL cr);
15858 
15859   ins_cost(5 * INSN_COST);
15860   format %{ "fcmps $src1, $src2\n\t"
15861             "csinvw($dst, zr, zr, eq\n\t"
15862             "csnegw($dst, $dst, $dst, lt)"
15863   %}
15864 
15865   ins_encode %{
15866     Label done;
15867     FloatRegister s1 = as_FloatRegister($src1$$reg);
15868     FloatRegister s2 = as_FloatRegister($src2$$reg);
15869     Register d = as_Register($dst$$reg);
15870     __ fcmps(s1, s2);
15871     // installs 0 if EQ else -1
15872     __ csinvw(d, zr, zr, Assembler::EQ);
15873     // keeps -1 if less or unordered else installs 1
15874     __ csnegw(d, d, d, Assembler::LT);
15875     __ bind(done);
15876   %}
15877 
15878   ins_pipe(pipe_class_default);
15879 
15880 %}
15881 
15882 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15883 %{
15884   match(Set dst (CmpD3 src1 src2));
15885   effect(KILL cr);
15886 
15887   ins_cost(5 * INSN_COST);
15888   format %{ "fcmpd $src1, $src2\n\t"
15889             "csinvw($dst, zr, zr, eq\n\t"
15890             "csnegw($dst, $dst, $dst, lt)"
15891   %}
15892 
15893   ins_encode %{
15894     Label done;
15895     FloatRegister s1 = as_FloatRegister($src1$$reg);
15896     FloatRegister s2 = as_FloatRegister($src2$$reg);
15897     Register d = as_Register($dst$$reg);
15898     __ fcmpd(s1, s2);
15899     // installs 0 if EQ else -1
15900     __ csinvw(d, zr, zr, Assembler::EQ);
15901     // keeps -1 if less or unordered else installs 1
15902     __ csnegw(d, d, d, Assembler::LT);
15903     __ bind(done);
15904   %}
15905   ins_pipe(pipe_class_default);
15906 
15907 %}
15908 
15909 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15910 %{
15911   match(Set dst (CmpF3 src1 zero));
15912   effect(KILL cr);
15913 
15914   ins_cost(5 * INSN_COST);
15915   format %{ "fcmps $src1, 0.0\n\t"
15916             "csinvw($dst, zr, zr, eq\n\t"
15917             "csnegw($dst, $dst, $dst, lt)"
15918   %}
15919 
15920   ins_encode %{
15921     Label done;
15922     FloatRegister s1 = as_FloatRegister($src1$$reg);
15923     Register d = as_Register($dst$$reg);
15924     __ fcmps(s1, 0.0);
15925     // installs 0 if EQ else -1
15926     __ csinvw(d, zr, zr, Assembler::EQ);
15927     // keeps -1 if less or unordered else installs 1
15928     __ csnegw(d, d, d, Assembler::LT);
15929     __ bind(done);
15930   %}
15931 
15932   ins_pipe(pipe_class_default);
15933 
15934 %}
15935 
15936 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15937 %{
15938   match(Set dst (CmpD3 src1 zero));
15939   effect(KILL cr);
15940 
15941   ins_cost(5 * INSN_COST);
15942   format %{ "fcmpd $src1, 0.0\n\t"
15943             "csinvw($dst, zr, zr, eq\n\t"
15944             "csnegw($dst, $dst, $dst, lt)"
15945   %}
15946 
15947   ins_encode %{
15948     Label done;
15949     FloatRegister s1 = as_FloatRegister($src1$$reg);
15950     Register d = as_Register($dst$$reg);
15951     __ fcmpd(s1, 0.0);
15952     // installs 0 if EQ else -1
15953     __ csinvw(d, zr, zr, Assembler::EQ);
15954     // keeps -1 if less or unordered else installs 1
15955     __ csnegw(d, d, d, Assembler::LT);
15956     __ bind(done);
15957   %}
15958   ins_pipe(pipe_class_default);
15959 
15960 %}
15961 
15962 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15963 %{
15964   match(Set dst (CmpLTMask p q));
15965   effect(KILL cr);
15966 
15967   ins_cost(3 * INSN_COST);
15968 
15969   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15970             "csetw $dst, lt\n\t"
15971             "subw $dst, zr, $dst"
15972   %}
15973 
15974   ins_encode %{
15975     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15976     __ csetw(as_Register($dst$$reg), Assembler::LT);
15977     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15978   %}
15979 
15980   ins_pipe(ialu_reg_reg);
15981 %}
15982 
15983 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15984 %{
15985   match(Set dst (CmpLTMask src zero));
15986   effect(KILL cr);
15987 
15988   ins_cost(INSN_COST);
15989 
15990   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15991 
15992   ins_encode %{
15993     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15994   %}
15995 
15996   ins_pipe(ialu_reg_shift);
15997 %}
15998 
15999 // ============================================================================
16000 // Max and Min
16001 
16002 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
16003 
16004 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
16005 %{
16006   effect(DEF cr, USE src);
16007   ins_cost(INSN_COST);
16008   format %{ "cmpw $src, 0" %}
16009 
16010   ins_encode %{
16011     __ cmpw($src$$Register, 0);
16012   %}
16013   ins_pipe(icmp_reg_imm);
16014 %}
16015 
16016 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
16017 %{
16018   match(Set dst (MinI src1 src2));
16019   ins_cost(INSN_COST * 3);
16020 
16021   expand %{
16022     rFlagsReg cr;
16023     compI_reg_reg(cr, src1, src2);
16024     cmovI_reg_reg_lt(dst, src1, src2, cr);
16025   %}
16026 %}
16027 
16028 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
16029 %{
16030   match(Set dst (MaxI src1 src2));
16031   ins_cost(INSN_COST * 3);
16032 
16033   expand %{
16034     rFlagsReg cr;
16035     compI_reg_reg(cr, src1, src2);
16036     cmovI_reg_reg_gt(dst, src1, src2, cr);
16037   %}
16038 %}
16039 
16040 
16041 // ============================================================================
16042 // Branch Instructions
16043 
16044 // Direct Branch.
16045 instruct branch(label lbl)
16046 %{
16047   match(Goto);
16048 
16049   effect(USE lbl);
16050 
16051   ins_cost(BRANCH_COST);
16052   format %{ "b  $lbl" %}
16053 
16054   ins_encode(aarch64_enc_b(lbl));
16055 
16056   ins_pipe(pipe_branch);
16057 %}
16058 
16059 // Conditional Near Branch
16060 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
16061 %{
16062   // Same match rule as `branchConFar'.
16063   match(If cmp cr);
16064 
16065   effect(USE lbl);
16066 
16067   ins_cost(BRANCH_COST);
16068   // If set to 1 this indicates that the current instruction is a
16069   // short variant of a long branch. This avoids using this
16070   // instruction in first-pass matching. It will then only be used in
16071   // the `Shorten_branches' pass.
16072   // ins_short_branch(1);
16073   format %{ "b$cmp  $lbl" %}
16074 
16075   ins_encode(aarch64_enc_br_con(cmp, lbl));
16076 
16077   ins_pipe(pipe_branch_cond);
16078 %}
16079 
16080 // Conditional Near Branch Unsigned
16081 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
16082 %{
16083   // Same match rule as `branchConFar'.
16084   match(If cmp cr);
16085 
16086   effect(USE lbl);
16087 
16088   ins_cost(BRANCH_COST);
16089   // If set to 1 this indicates that the current instruction is a
16090   // short variant of a long branch. This avoids using this
16091   // instruction in first-pass matching. It will then only be used in
16092   // the `Shorten_branches' pass.
16093   // ins_short_branch(1);
16094   format %{ "b$cmp  $lbl\t# unsigned" %}
16095 
16096   ins_encode(aarch64_enc_br_conU(cmp, lbl));
16097 
16098   ins_pipe(pipe_branch_cond);
16099 %}
16100 
16101 // Make use of CBZ and CBNZ.  These instructions, as well as being
16102 // shorter than (cmp; branch), have the additional benefit of not
16103 // killing the flags.
16104 
16105 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
16106   match(If cmp (CmpI op1 op2));
16107   effect(USE labl);
16108 
16109   ins_cost(BRANCH_COST);
16110   format %{ "cbw$cmp   $op1, $labl" %}
16111   ins_encode %{
16112     Label* L = $labl$$label;
16113     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16114     if (cond == Assembler::EQ)
16115       __ cbzw($op1$$Register, *L);
16116     else
16117       __ cbnzw($op1$$Register, *L);
16118   %}
16119   ins_pipe(pipe_cmp_branch);
16120 %}
16121 
16122 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
16123   match(If cmp (CmpL op1 op2));
16124   effect(USE labl);
16125 
16126   ins_cost(BRANCH_COST);
16127   format %{ "cb$cmp   $op1, $labl" %}
16128   ins_encode %{
16129     Label* L = $labl$$label;
16130     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16131     if (cond == Assembler::EQ)
16132       __ cbz($op1$$Register, *L);
16133     else
16134       __ cbnz($op1$$Register, *L);
16135   %}
16136   ins_pipe(pipe_cmp_branch);
16137 %}
16138 
16139 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
16140   match(If cmp (CmpP op1 op2));
16141   effect(USE labl);
16142 
16143   ins_cost(BRANCH_COST);
16144   format %{ "cb$cmp   $op1, $labl" %}
16145   ins_encode %{
16146     Label* L = $labl$$label;
16147     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16148     if (cond == Assembler::EQ)
16149       __ cbz($op1$$Register, *L);
16150     else
16151       __ cbnz($op1$$Register, *L);
16152   %}
16153   ins_pipe(pipe_cmp_branch);
16154 %}
16155 
16156 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
16157   match(If cmp (CmpN op1 op2));
16158   effect(USE labl);
16159 
16160   ins_cost(BRANCH_COST);
16161   format %{ "cbw$cmp   $op1, $labl" %}
16162   ins_encode %{
16163     Label* L = $labl$$label;
16164     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16165     if (cond == Assembler::EQ)
16166       __ cbzw($op1$$Register, *L);
16167     else
16168       __ cbnzw($op1$$Register, *L);
16169   %}
16170   ins_pipe(pipe_cmp_branch);
16171 %}
16172 
16173 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
16174   match(If cmp (CmpP (DecodeN oop) zero));
16175   effect(USE labl);
16176 
16177   ins_cost(BRANCH_COST);
16178   format %{ "cb$cmp   $oop, $labl" %}
16179   ins_encode %{
16180     Label* L = $labl$$label;
16181     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16182     if (cond == Assembler::EQ)
16183       __ cbzw($oop$$Register, *L);
16184     else
16185       __ cbnzw($oop$$Register, *L);
16186   %}
16187   ins_pipe(pipe_cmp_branch);
16188 %}
16189 
16190 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
16191   match(If cmp (CmpU op1 op2));
16192   effect(USE labl);
16193 
16194   ins_cost(BRANCH_COST);
16195   format %{ "cbw$cmp   $op1, $labl" %}
16196   ins_encode %{
16197     Label* L = $labl$$label;
16198     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16199     if (cond == Assembler::EQ || cond == Assembler::LS)
16200       __ cbzw($op1$$Register, *L);
16201     else
16202       __ cbnzw($op1$$Register, *L);
16203   %}
16204   ins_pipe(pipe_cmp_branch);
16205 %}
16206 
16207 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
16208   match(If cmp (CmpUL op1 op2));
16209   effect(USE labl);
16210 
16211   ins_cost(BRANCH_COST);
16212   format %{ "cb$cmp   $op1, $labl" %}
16213   ins_encode %{
16214     Label* L = $labl$$label;
16215     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16216     if (cond == Assembler::EQ || cond == Assembler::LS)
16217       __ cbz($op1$$Register, *L);
16218     else
16219       __ cbnz($op1$$Register, *L);
16220   %}
16221   ins_pipe(pipe_cmp_branch);
16222 %}
16223 
16224 // Test bit and Branch
16225 
16226 // Patterns for short (< 32KiB) variants
16227 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16228   match(If cmp (CmpL op1 op2));
16229   effect(USE labl);
16230 
16231   ins_cost(BRANCH_COST);
16232   format %{ "cb$cmp   $op1, $labl # long" %}
16233   ins_encode %{
16234     Label* L = $labl$$label;
16235     Assembler::Condition cond =
16236       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16237     __ tbr(cond, $op1$$Register, 63, *L);
16238   %}
16239   ins_pipe(pipe_cmp_branch);
16240   ins_short_branch(1);
16241 %}
16242 
16243 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16244   match(If cmp (CmpI op1 op2));
16245   effect(USE labl);
16246 
16247   ins_cost(BRANCH_COST);
16248   format %{ "cb$cmp   $op1, $labl # int" %}
16249   ins_encode %{
16250     Label* L = $labl$$label;
16251     Assembler::Condition cond =
16252       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16253     __ tbr(cond, $op1$$Register, 31, *L);
16254   %}
16255   ins_pipe(pipe_cmp_branch);
16256   ins_short_branch(1);
16257 %}
16258 
16259 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16260   match(If cmp (CmpL (AndL op1 op2) op3));
16261   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16262   effect(USE labl);
16263 
16264   ins_cost(BRANCH_COST);
16265   format %{ "tb$cmp   $op1, $op2, $labl" %}
16266   ins_encode %{
16267     Label* L = $labl$$label;
16268     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16269     int bit = exact_log2_long($op2$$constant);
16270     __ tbr(cond, $op1$$Register, bit, *L);
16271   %}
16272   ins_pipe(pipe_cmp_branch);
16273   ins_short_branch(1);
16274 %}
16275 
16276 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16277   match(If cmp (CmpI (AndI op1 op2) op3));
16278   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16279   effect(USE labl);
16280 
16281   ins_cost(BRANCH_COST);
16282   format %{ "tb$cmp   $op1, $op2, $labl" %}
16283   ins_encode %{
16284     Label* L = $labl$$label;
16285     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16286     int bit = exact_log2((juint)$op2$$constant);
16287     __ tbr(cond, $op1$$Register, bit, *L);
16288   %}
16289   ins_pipe(pipe_cmp_branch);
16290   ins_short_branch(1);
16291 %}
16292 
16293 // And far variants
16294 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16295   match(If cmp (CmpL op1 op2));
16296   effect(USE labl);
16297 
16298   ins_cost(BRANCH_COST);
16299   format %{ "cb$cmp   $op1, $labl # long" %}
16300   ins_encode %{
16301     Label* L = $labl$$label;
16302     Assembler::Condition cond =
16303       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16304     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
16305   %}
16306   ins_pipe(pipe_cmp_branch);
16307 %}
16308 
16309 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16310   match(If cmp (CmpI op1 op2));
16311   effect(USE labl);
16312 
16313   ins_cost(BRANCH_COST);
16314   format %{ "cb$cmp   $op1, $labl # int" %}
16315   ins_encode %{
16316     Label* L = $labl$$label;
16317     Assembler::Condition cond =
16318       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16319     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16320   %}
16321   ins_pipe(pipe_cmp_branch);
16322 %}
16323 
16324 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16325   match(If cmp (CmpL (AndL op1 op2) op3));
16326   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16327   effect(USE labl);
16328 
16329   ins_cost(BRANCH_COST);
16330   format %{ "tb$cmp   $op1, $op2, $labl" %}
16331   ins_encode %{
16332     Label* L = $labl$$label;
16333     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16334     int bit = exact_log2_long($op2$$constant);
16335     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16336   %}
16337   ins_pipe(pipe_cmp_branch);
16338 %}
16339 
16340 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16341   match(If cmp (CmpI (AndI op1 op2) op3));
16342   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16343   effect(USE labl);
16344 
16345   ins_cost(BRANCH_COST);
16346   format %{ "tb$cmp   $op1, $op2, $labl" %}
16347   ins_encode %{
16348     Label* L = $labl$$label;
16349     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16350     int bit = exact_log2((juint)$op2$$constant);
16351     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16352   %}
16353   ins_pipe(pipe_cmp_branch);
16354 %}
16355 
16356 // Test bits
16357 
16358 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16359   match(Set cr (CmpL (AndL op1 op2) op3));
16360   predicate(Assembler::operand_valid_for_logical_immediate
16361             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16362 
16363   ins_cost(INSN_COST);
16364   format %{ "tst $op1, $op2 # long" %}
16365   ins_encode %{
16366     __ tst($op1$$Register, $op2$$constant);
16367   %}
16368   ins_pipe(ialu_reg_reg);
16369 %}
16370 
16371 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16372   match(Set cr (CmpI (AndI op1 op2) op3));
16373   predicate(Assembler::operand_valid_for_logical_immediate
16374             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16375 
16376   ins_cost(INSN_COST);
16377   format %{ "tst $op1, $op2 # int" %}
16378   ins_encode %{
16379     __ tstw($op1$$Register, $op2$$constant);
16380   %}
16381   ins_pipe(ialu_reg_reg);
16382 %}
16383 
16384 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16385   match(Set cr (CmpL (AndL op1 op2) op3));
16386 
16387   ins_cost(INSN_COST);
16388   format %{ "tst $op1, $op2 # long" %}
16389   ins_encode %{
16390     __ tst($op1$$Register, $op2$$Register);
16391   %}
16392   ins_pipe(ialu_reg_reg);
16393 %}
16394 
16395 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16396   match(Set cr (CmpI (AndI op1 op2) op3));
16397 
16398   ins_cost(INSN_COST);
16399   format %{ "tstw $op1, $op2 # int" %}
16400   ins_encode %{
16401     __ tstw($op1$$Register, $op2$$Register);
16402   %}
16403   ins_pipe(ialu_reg_reg);
16404 %}
16405 
16406 
16407 // Conditional Far Branch
16408 // Conditional Far Branch Unsigned
16409 // TODO: fixme
16410 
16411 // counted loop end branch near
16412 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16413 %{
16414   match(CountedLoopEnd cmp cr);
16415 
16416   effect(USE lbl);
16417 
16418   ins_cost(BRANCH_COST);
16419   // short variant.
16420   // ins_short_branch(1);
16421   format %{ "b$cmp $lbl \t// counted loop end" %}
16422 
16423   ins_encode(aarch64_enc_br_con(cmp, lbl));
16424 
16425   ins_pipe(pipe_branch);
16426 %}
16427 
16428 // counted loop end branch far
16429 // TODO: fixme
16430 
16431 // ============================================================================
16432 // inlined locking and unlocking
16433 
16434 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16435 %{
16436   match(Set cr (FastLock object box));
16437   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16438 
16439   // TODO
16440   // identify correct cost
16441   ins_cost(5 * INSN_COST);
16442   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
16443 
16444   ins_encode %{
16445     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16446   %}
16447 
16448   ins_pipe(pipe_serial);
16449 %}
16450 
16451 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16452 %{
16453   match(Set cr (FastUnlock object box));
16454   effect(TEMP tmp, TEMP tmp2);
16455 
16456   ins_cost(5 * INSN_COST);
16457   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16458 
16459   ins_encode %{
16460     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16461   %}
16462 
16463   ins_pipe(pipe_serial);
16464 %}
16465 
16466 
16467 // ============================================================================
16468 // Safepoint Instructions
16469 
16470 // TODO
16471 // provide a near and far version of this code
16472 
16473 instruct safePoint(rFlagsReg cr, iRegP poll)
16474 %{
16475   match(SafePoint poll);
16476   effect(KILL cr);
16477 
16478   format %{
16479     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16480   %}
16481   ins_encode %{
16482     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16483   %}
16484   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16485 %}
16486 
16487 
16488 // ============================================================================
16489 // Procedure Call/Return Instructions
16490 
16491 // Call Java Static Instruction
16492 
16493 instruct CallStaticJavaDirect(method meth)
16494 %{
16495   match(CallStaticJava);
16496 
16497   effect(USE meth);
16498 
16499   ins_cost(CALL_COST);
16500 
16501   format %{ "call,static $meth \t// ==> " %}
16502 
16503   ins_encode(aarch64_enc_java_static_call(meth),
16504              aarch64_enc_call_epilog);
16505 
16506   ins_pipe(pipe_class_call);
16507 %}
16508 
16509 // TO HERE
16510 
16511 // Call Java Dynamic Instruction
16512 instruct CallDynamicJavaDirect(method meth)
16513 %{
16514   match(CallDynamicJava);
16515 
16516   effect(USE meth);
16517 
16518   ins_cost(CALL_COST);
16519 
16520   format %{ "CALL,dynamic $meth \t// ==> " %}
16521 
16522   ins_encode(aarch64_enc_java_dynamic_call(meth),
16523              aarch64_enc_call_epilog);
16524 
16525   ins_pipe(pipe_class_call);
16526 %}
16527 
16528 // Call Runtime Instruction
16529 
16530 instruct CallRuntimeDirect(method meth)
16531 %{
16532   match(CallRuntime);
16533 
16534   effect(USE meth);
16535 
16536   ins_cost(CALL_COST);
16537 
16538   format %{ "CALL, runtime $meth" %}
16539 
16540   ins_encode( aarch64_enc_java_to_runtime(meth) );
16541 
16542   ins_pipe(pipe_class_call);
16543 %}
16544 
16545 // Call Runtime Instruction
16546 
16547 instruct CallLeafDirect(method meth)
16548 %{
16549   match(CallLeaf);
16550 
16551   effect(USE meth);
16552 
16553   ins_cost(CALL_COST);
16554 
16555   format %{ "CALL, runtime leaf $meth" %}
16556 
16557   ins_encode( aarch64_enc_java_to_runtime(meth) );
16558 
16559   ins_pipe(pipe_class_call);
16560 %}
16561 
16562 // Call Runtime Instruction
16563 
16564 instruct CallLeafNoFPDirect(method meth)
16565 %{
16566   match(CallLeafNoFP);
16567 
16568   effect(USE meth);
16569 
16570   ins_cost(CALL_COST);
16571 
16572   format %{ "CALL, runtime leaf nofp $meth" %}
16573 
16574   ins_encode( aarch64_enc_java_to_runtime(meth) );
16575 
16576   ins_pipe(pipe_class_call);
16577 %}
16578 
16579 // Tail Call; Jump from runtime stub to Java code.
16580 // Also known as an 'interprocedural jump'.
16581 // Target of jump will eventually return to caller.
16582 // TailJump below removes the return address.
16583 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16584 // emitted just above the TailCall which has reset rfp to the caller state.
16585 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
16586 %{
16587   match(TailCall jump_target method_ptr);
16588 
16589   ins_cost(CALL_COST);
16590 
16591   format %{ "br $jump_target\t# $method_ptr holds method" %}
16592 
16593   ins_encode(aarch64_enc_tail_call(jump_target));
16594 
16595   ins_pipe(pipe_class_call);
16596 %}
16597 
16598 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
16599 %{
16600   match(TailJump jump_target ex_oop);
16601 
16602   ins_cost(CALL_COST);
16603 
16604   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16605 
16606   ins_encode(aarch64_enc_tail_jmp(jump_target));
16607 
16608   ins_pipe(pipe_class_call);
16609 %}
16610 
16611 // Create exception oop: created by stack-crawling runtime code.
16612 // Created exception is now available to this handler, and is setup
16613 // just prior to jumping to this handler. No code emitted.
16614 // TODO check
16615 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16616 instruct CreateException(iRegP_R0 ex_oop)
16617 %{
16618   match(Set ex_oop (CreateEx));
16619 
16620   format %{ " -- \t// exception oop; no code emitted" %}
16621 
16622   size(0);
16623 
16624   ins_encode( /*empty*/ );
16625 
16626   ins_pipe(pipe_class_empty);
16627 %}
16628 
16629 // Rethrow exception: The exception oop will come in the first
16630 // argument position. Then JUMP (not call) to the rethrow stub code.
16631 instruct RethrowException() %{
16632   match(Rethrow);
16633   ins_cost(CALL_COST);
16634 
16635   format %{ "b rethrow_stub" %}
16636 
16637   ins_encode( aarch64_enc_rethrow() );
16638 
16639   ins_pipe(pipe_class_call);
16640 %}
16641 
16642 
16643 // Return Instruction
16644 // epilog node loads ret address into lr as part of frame pop
16645 instruct Ret()
16646 %{
16647   match(Return);
16648 
16649   format %{ "ret\t// return register" %}
16650 
16651   ins_encode( aarch64_enc_ret() );
16652 
16653   ins_pipe(pipe_branch);
16654 %}
16655 
16656 // Die now.
16657 instruct ShouldNotReachHere() %{
16658   match(Halt);
16659 
16660   ins_cost(CALL_COST);
16661   format %{ "ShouldNotReachHere" %}
16662 
16663   ins_encode %{
16664     if (is_reachable()) {
16665       __ stop(_halt_reason);
16666     }
16667   %}
16668 
16669   ins_pipe(pipe_class_default);
16670 %}
16671 
16672 // ============================================================================
16673 // Partial Subtype Check
16674 //
16675 // superklass array for an instance of the superklass.  Set a hidden
16676 // internal cache on a hit (cache is checked with exposed code in
16677 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16678 // encoding ALSO sets flags.
16679 
16680 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16681 %{
16682   match(Set result (PartialSubtypeCheck sub super));
16683   effect(KILL cr, KILL temp);
16684 
16685   ins_cost(1100);  // slightly larger than the next version
16686   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16687 
16688   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16689 
16690   opcode(0x1); // Force zero of result reg on hit
16691 
16692   ins_pipe(pipe_class_memory);
16693 %}
16694 
16695 instruct partialSubtypeCheckConstSuper(iRegP_R4 sub, iRegP_R0 super_reg, immP super_con, vRegD_V0 vtemp, iRegP_R5 result,
16696                                        iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16697                                        rFlagsReg cr)
16698 %{
16699   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
16700   predicate(UseSecondarySupersTable);
16701   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16702 
16703   ins_cost(700);  // smaller than the next version
16704   format %{ "partialSubtypeCheck $result, $sub, super" %}
16705 
16706   ins_encode %{
16707     bool success = false;
16708     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
16709     if (InlineSecondarySupersTest) {
16710       success = __ lookup_secondary_supers_table($sub$$Register, $super_reg$$Register,
16711                                                  $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16712                                                  $vtemp$$FloatRegister,
16713                                                  $result$$Register,
16714                                                  super_klass_slot);
16715     } else {
16716       address call = __ trampoline_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
16717       success = (call != nullptr);
16718     }
16719     if (!success) {
16720       ciEnv::current()->record_failure("CodeCache is full");
16721       return;
16722     }
16723   %}
16724 
16725   ins_pipe(pipe_class_memory);
16726 %}
16727 
16728 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
16729 %{
16730   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
16731   effect(KILL temp, KILL result);
16732 
16733   ins_cost(1100);  // slightly larger than the next version
16734   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16735 
16736   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16737 
16738   opcode(0x0); // Don't zero result reg on hit
16739 
16740   ins_pipe(pipe_class_memory);
16741 %}
16742 
16743 // Intrisics for String.compareTo()
16744 
16745 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16746                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16747 %{
16748   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16749   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16750   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16751 
16752   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16753   ins_encode %{
16754     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16755     __ string_compare($str1$$Register, $str2$$Register,
16756                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16757                       $tmp1$$Register, $tmp2$$Register,
16758                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
16759   %}
16760   ins_pipe(pipe_class_memory);
16761 %}
16762 
16763 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16764                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16765 %{
16766   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16767   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16768   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16769 
16770   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16771   ins_encode %{
16772     __ string_compare($str1$$Register, $str2$$Register,
16773                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16774                       $tmp1$$Register, $tmp2$$Register,
16775                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
16776   %}
16777   ins_pipe(pipe_class_memory);
16778 %}
16779 
16780 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16781                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16782                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16783 %{
16784   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16785   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16786   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16787          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16788 
16789   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16790   ins_encode %{
16791     __ string_compare($str1$$Register, $str2$$Register,
16792                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16793                       $tmp1$$Register, $tmp2$$Register,
16794                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16795                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
16796   %}
16797   ins_pipe(pipe_class_memory);
16798 %}
16799 
16800 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16801                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16802                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16803 %{
16804   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16805   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16806   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16807          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16808 
16809   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16810   ins_encode %{
16811     __ string_compare($str1$$Register, $str2$$Register,
16812                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16813                       $tmp1$$Register, $tmp2$$Register,
16814                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16815                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
16816   %}
16817   ins_pipe(pipe_class_memory);
16818 %}
16819 
16820 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
16821 // these string_compare variants as NEON register type for convenience so that the prototype of
16822 // string_compare can be shared with all variants.
16823 
16824 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16825                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16826                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16827                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16828 %{
16829   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16830   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16831   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16832          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16833 
16834   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16835   ins_encode %{
16836     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16837     __ string_compare($str1$$Register, $str2$$Register,
16838                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16839                       $tmp1$$Register, $tmp2$$Register,
16840                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16841                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16842                       StrIntrinsicNode::LL);
16843   %}
16844   ins_pipe(pipe_class_memory);
16845 %}
16846 
16847 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16848                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16849                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16850                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16851 %{
16852   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16853   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16854   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16855          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16856 
16857   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16858   ins_encode %{
16859     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16860     __ string_compare($str1$$Register, $str2$$Register,
16861                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16862                       $tmp1$$Register, $tmp2$$Register,
16863                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16864                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16865                       StrIntrinsicNode::LU);
16866   %}
16867   ins_pipe(pipe_class_memory);
16868 %}
16869 
16870 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16871                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16872                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16873                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16874 %{
16875   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16876   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16877   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16878          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16879 
16880   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16881   ins_encode %{
16882     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16883     __ string_compare($str1$$Register, $str2$$Register,
16884                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16885                       $tmp1$$Register, $tmp2$$Register,
16886                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16887                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16888                       StrIntrinsicNode::UL);
16889   %}
16890   ins_pipe(pipe_class_memory);
16891 %}
16892 
16893 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16894                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16895                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16896                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16897 %{
16898   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16899   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16900   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16901          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16902 
16903   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16904   ins_encode %{
16905     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16906     __ string_compare($str1$$Register, $str2$$Register,
16907                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16908                       $tmp1$$Register, $tmp2$$Register,
16909                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16910                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16911                       StrIntrinsicNode::UU);
16912   %}
16913   ins_pipe(pipe_class_memory);
16914 %}
16915 
16916 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16917                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16918                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16919                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16920 %{
16921   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16922   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16923   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16924          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16925          TEMP vtmp0, TEMP vtmp1, KILL cr);
16926   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16927             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16928 
16929   ins_encode %{
16930     __ string_indexof($str1$$Register, $str2$$Register,
16931                       $cnt1$$Register, $cnt2$$Register,
16932                       $tmp1$$Register, $tmp2$$Register,
16933                       $tmp3$$Register, $tmp4$$Register,
16934                       $tmp5$$Register, $tmp6$$Register,
16935                       -1, $result$$Register, StrIntrinsicNode::UU);
16936   %}
16937   ins_pipe(pipe_class_memory);
16938 %}
16939 
16940 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16941                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16942                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16943                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16944 %{
16945   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16946   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16947   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16948          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16949          TEMP vtmp0, TEMP vtmp1, KILL cr);
16950   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16951             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16952 
16953   ins_encode %{
16954     __ string_indexof($str1$$Register, $str2$$Register,
16955                       $cnt1$$Register, $cnt2$$Register,
16956                       $tmp1$$Register, $tmp2$$Register,
16957                       $tmp3$$Register, $tmp4$$Register,
16958                       $tmp5$$Register, $tmp6$$Register,
16959                       -1, $result$$Register, StrIntrinsicNode::LL);
16960   %}
16961   ins_pipe(pipe_class_memory);
16962 %}
16963 
16964 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16965                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16966                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16967                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16968 %{
16969   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16970   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16971   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16972          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16973          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16974   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16975             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16976 
16977   ins_encode %{
16978     __ string_indexof($str1$$Register, $str2$$Register,
16979                       $cnt1$$Register, $cnt2$$Register,
16980                       $tmp1$$Register, $tmp2$$Register,
16981                       $tmp3$$Register, $tmp4$$Register,
16982                       $tmp5$$Register, $tmp6$$Register,
16983                       -1, $result$$Register, StrIntrinsicNode::UL);
16984   %}
16985   ins_pipe(pipe_class_memory);
16986 %}
16987 
16988 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16989                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16990                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16991 %{
16992   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16993   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16994   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16995          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16996   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
16997             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16998 
16999   ins_encode %{
17000     int icnt2 = (int)$int_cnt2$$constant;
17001     __ string_indexof($str1$$Register, $str2$$Register,
17002                       $cnt1$$Register, zr,
17003                       $tmp1$$Register, $tmp2$$Register,
17004                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17005                       icnt2, $result$$Register, StrIntrinsicNode::UU);
17006   %}
17007   ins_pipe(pipe_class_memory);
17008 %}
17009 
17010 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17011                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17012                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17013 %{
17014   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
17015   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17016   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17017          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17018   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
17019             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17020 
17021   ins_encode %{
17022     int icnt2 = (int)$int_cnt2$$constant;
17023     __ string_indexof($str1$$Register, $str2$$Register,
17024                       $cnt1$$Register, zr,
17025                       $tmp1$$Register, $tmp2$$Register,
17026                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17027                       icnt2, $result$$Register, StrIntrinsicNode::LL);
17028   %}
17029   ins_pipe(pipe_class_memory);
17030 %}
17031 
17032 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17033                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17034                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17035 %{
17036   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
17037   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17038   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17039          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17040   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
17041             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17042 
17043   ins_encode %{
17044     int icnt2 = (int)$int_cnt2$$constant;
17045     __ string_indexof($str1$$Register, $str2$$Register,
17046                       $cnt1$$Register, zr,
17047                       $tmp1$$Register, $tmp2$$Register,
17048                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17049                       icnt2, $result$$Register, StrIntrinsicNode::UL);
17050   %}
17051   ins_pipe(pipe_class_memory);
17052 %}
17053 
17054 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17055                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
17056                              iRegINoSp tmp3, rFlagsReg cr)
17057 %{
17058   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17059   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
17060   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17061          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17062 
17063   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17064 
17065   ins_encode %{
17066     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17067                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
17068                            $tmp3$$Register);
17069   %}
17070   ins_pipe(pipe_class_memory);
17071 %}
17072 
17073 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17074                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
17075                               iRegINoSp tmp3, rFlagsReg cr)
17076 %{
17077   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17078   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
17079   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17080          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17081 
17082   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17083 
17084   ins_encode %{
17085     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17086                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
17087                             $tmp3$$Register);
17088   %}
17089   ins_pipe(pipe_class_memory);
17090 %}
17091 
17092 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17093                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17094                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17095   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
17096   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17097   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17098   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17099   ins_encode %{
17100     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17101                                $result$$Register, $ztmp1$$FloatRegister,
17102                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17103                                $ptmp$$PRegister, true /* isL */);
17104   %}
17105   ins_pipe(pipe_class_memory);
17106 %}
17107 
17108 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17109                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17110                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17111   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
17112   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17113   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17114   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17115   ins_encode %{
17116     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17117                                $result$$Register, $ztmp1$$FloatRegister,
17118                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17119                                $ptmp$$PRegister, false /* isL */);
17120   %}
17121   ins_pipe(pipe_class_memory);
17122 %}
17123 
17124 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17125                         iRegI_R0 result, rFlagsReg cr)
17126 %{
17127   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
17128   match(Set result (StrEquals (Binary str1 str2) cnt));
17129   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17130 
17131   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17132   ins_encode %{
17133     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17134     __ string_equals($str1$$Register, $str2$$Register,
17135                      $result$$Register, $cnt$$Register, 1);
17136   %}
17137   ins_pipe(pipe_class_memory);
17138 %}
17139 
17140 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17141                         iRegI_R0 result, rFlagsReg cr)
17142 %{
17143   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
17144   match(Set result (StrEquals (Binary str1 str2) cnt));
17145   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17146 
17147   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17148   ins_encode %{
17149     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17150     __ string_equals($str1$$Register, $str2$$Register,
17151                      $result$$Register, $cnt$$Register, 2);
17152   %}
17153   ins_pipe(pipe_class_memory);
17154 %}
17155 
17156 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17157                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17158                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17159                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17160                        iRegP_R10 tmp, rFlagsReg cr)
17161 %{
17162   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
17163   match(Set result (AryEq ary1 ary2));
17164   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17165          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17166          TEMP vtmp6, TEMP vtmp7, KILL cr);
17167 
17168   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17169   ins_encode %{
17170     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17171                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17172                                    $result$$Register, $tmp$$Register, 1);
17173     if (tpc == NULL) {
17174       ciEnv::current()->record_failure("CodeCache is full");
17175       return;
17176     }
17177   %}
17178   ins_pipe(pipe_class_memory);
17179 %}
17180 
17181 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17182                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17183                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17184                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17185                        iRegP_R10 tmp, rFlagsReg cr)
17186 %{
17187   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
17188   match(Set result (AryEq ary1 ary2));
17189   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17190          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17191          TEMP vtmp6, TEMP vtmp7, KILL cr);
17192 
17193   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17194   ins_encode %{
17195     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17196                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17197                                    $result$$Register, $tmp$$Register, 2);
17198     if (tpc == NULL) {
17199       ciEnv::current()->record_failure("CodeCache is full");
17200       return;
17201     }
17202   %}
17203   ins_pipe(pipe_class_memory);
17204 %}
17205 
17206 instruct arrays_hashcode(iRegP_R1 ary, iRegI_R2 cnt, iRegI_R0 result, immI basic_type,
17207                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17208                          vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17209                          vRegD_V12 vtmp8, vRegD_V13 vtmp9, rFlagsReg cr)
17210 %{
17211   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
17212   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, TEMP vtmp6,
17213          TEMP vtmp7, TEMP vtmp8, TEMP vtmp9, USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
17214 
17215   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
17216   ins_encode %{
17217     address tpc = __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
17218                                      $vtmp3$$FloatRegister, $vtmp2$$FloatRegister,
17219                                      $vtmp1$$FloatRegister, $vtmp0$$FloatRegister,
17220                                      $vtmp4$$FloatRegister, $vtmp5$$FloatRegister,
17221                                      $vtmp6$$FloatRegister, $vtmp7$$FloatRegister,
17222                                      $vtmp8$$FloatRegister, $vtmp9$$FloatRegister,
17223                                      (BasicType)$basic_type$$constant);
17224     if (tpc == nullptr) {
17225       ciEnv::current()->record_failure("CodeCache is full");
17226       return;
17227     }
17228   %}
17229   ins_pipe(pipe_class_memory);
17230 %}
17231 
17232 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
17233 %{
17234   match(Set result (CountPositives ary1 len));
17235   effect(USE_KILL ary1, USE_KILL len, KILL cr);
17236   format %{ "count positives byte[] $ary1,$len -> $result" %}
17237   ins_encode %{
17238     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
17239     if (tpc == NULL) {
17240       ciEnv::current()->record_failure("CodeCache is full");
17241       return;
17242     }
17243   %}
17244   ins_pipe( pipe_slow );
17245 %}
17246 
17247 // fast char[] to byte[] compression
17248 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17249                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17250                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17251                          iRegI_R0 result, rFlagsReg cr)
17252 %{
17253   match(Set result (StrCompressedCopy src (Binary dst len)));
17254   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17255          USE_KILL src, USE_KILL dst, USE len, KILL cr);
17256 
17257   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17258   ins_encode %{
17259     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
17260                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17261                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17262                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17263   %}
17264   ins_pipe(pipe_slow);
17265 %}
17266 
17267 // fast byte[] to char[] inflation
17268 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
17269                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17270                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
17271 %{
17272   match(Set dummy (StrInflatedCopy src (Binary dst len)));
17273   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
17274          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
17275          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
17276 
17277   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
17278   ins_encode %{
17279     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
17280                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17281                                         $vtmp2$$FloatRegister, $tmp$$Register);
17282     if (tpc == NULL) {
17283       ciEnv::current()->record_failure("CodeCache is full");
17284       return;
17285     }
17286   %}
17287   ins_pipe(pipe_class_memory);
17288 %}
17289 
17290 // encode char[] to byte[] in ISO_8859_1
17291 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17292                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17293                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17294                           iRegI_R0 result, rFlagsReg cr)
17295 %{
17296   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
17297   match(Set result (EncodeISOArray src (Binary dst len)));
17298   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17299          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17300 
17301   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17302   ins_encode %{
17303     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17304                         $result$$Register, false,
17305                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17306                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17307                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17308   %}
17309   ins_pipe(pipe_class_memory);
17310 %}
17311 
17312 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17313                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17314                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17315                             iRegI_R0 result, rFlagsReg cr)
17316 %{
17317   predicate(((EncodeISOArrayNode*)n)->is_ascii());
17318   match(Set result (EncodeISOArray src (Binary dst len)));
17319   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17320          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17321 
17322   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17323   ins_encode %{
17324     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17325                         $result$$Register, true,
17326                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17327                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17328                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17329   %}
17330   ins_pipe(pipe_class_memory);
17331 %}
17332 
17333 //----------------------------- CompressBits/ExpandBits ------------------------
17334 
17335 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17336                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17337   match(Set dst (CompressBits src mask));
17338   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17339   format %{ "mov    $tsrc, $src\n\t"
17340             "mov    $tmask, $mask\n\t"
17341             "bext   $tdst, $tsrc, $tmask\n\t"
17342             "mov    $dst, $tdst"
17343           %}
17344   ins_encode %{
17345     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17346     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17347     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17348     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17349   %}
17350   ins_pipe(pipe_slow);
17351 %}
17352 
17353 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17354                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17355   match(Set dst (CompressBits (LoadI mem) mask));
17356   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17357   format %{ "ldrs   $tsrc, $mem\n\t"
17358             "ldrs   $tmask, $mask\n\t"
17359             "bext   $tdst, $tsrc, $tmask\n\t"
17360             "mov    $dst, $tdst"
17361           %}
17362   ins_encode %{
17363     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17364               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17365     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17366     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17367     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17368   %}
17369   ins_pipe(pipe_slow);
17370 %}
17371 
17372 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17373                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
17374   match(Set dst (CompressBits src mask));
17375   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17376   format %{ "mov    $tsrc, $src\n\t"
17377             "mov    $tmask, $mask\n\t"
17378             "bext   $tdst, $tsrc, $tmask\n\t"
17379             "mov    $dst, $tdst"
17380           %}
17381   ins_encode %{
17382     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17383     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17384     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17385     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17386   %}
17387   ins_pipe(pipe_slow);
17388 %}
17389 
17390 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
17391                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17392   match(Set dst (CompressBits (LoadL mem) mask));
17393   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17394   format %{ "ldrd   $tsrc, $mem\n\t"
17395             "ldrd   $tmask, $mask\n\t"
17396             "bext   $tdst, $tsrc, $tmask\n\t"
17397             "mov    $dst, $tdst"
17398           %}
17399   ins_encode %{
17400     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17401               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17402     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17403     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17404     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17405   %}
17406   ins_pipe(pipe_slow);
17407 %}
17408 
17409 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17410                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17411   match(Set dst (ExpandBits src mask));
17412   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17413   format %{ "mov    $tsrc, $src\n\t"
17414             "mov    $tmask, $mask\n\t"
17415             "bdep   $tdst, $tsrc, $tmask\n\t"
17416             "mov    $dst, $tdst"
17417           %}
17418   ins_encode %{
17419     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17420     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17421     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17422     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17423   %}
17424   ins_pipe(pipe_slow);
17425 %}
17426 
17427 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17428                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17429   match(Set dst (ExpandBits (LoadI mem) mask));
17430   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17431   format %{ "ldrs   $tsrc, $mem\n\t"
17432             "ldrs   $tmask, $mask\n\t"
17433             "bdep   $tdst, $tsrc, $tmask\n\t"
17434             "mov    $dst, $tdst"
17435           %}
17436   ins_encode %{
17437     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17438               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17439     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17440     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17441     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17442   %}
17443   ins_pipe(pipe_slow);
17444 %}
17445 
17446 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17447                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
17448   match(Set dst (ExpandBits src mask));
17449   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17450   format %{ "mov    $tsrc, $src\n\t"
17451             "mov    $tmask, $mask\n\t"
17452             "bdep   $tdst, $tsrc, $tmask\n\t"
17453             "mov    $dst, $tdst"
17454           %}
17455   ins_encode %{
17456     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17457     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17458     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17459     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17460   %}
17461   ins_pipe(pipe_slow);
17462 %}
17463 
17464 
17465 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
17466                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17467   match(Set dst (ExpandBits (LoadL mem) mask));
17468   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17469   format %{ "ldrd   $tsrc, $mem\n\t"
17470             "ldrd   $tmask, $mask\n\t"
17471             "bdep   $tdst, $tsrc, $tmask\n\t"
17472             "mov    $dst, $tdst"
17473           %}
17474   ins_encode %{
17475     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17476               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17477     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17478     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17479     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17480   %}
17481   ins_pipe(pipe_slow);
17482 %}
17483 
17484 // ============================================================================
17485 // This name is KNOWN by the ADLC and cannot be changed.
17486 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17487 // for this guy.
17488 instruct tlsLoadP(thread_RegP dst)
17489 %{
17490   match(Set dst (ThreadLocal));
17491 
17492   ins_cost(0);
17493 
17494   format %{ " -- \t// $dst=Thread::current(), empty" %}
17495 
17496   size(0);
17497 
17498   ins_encode( /*empty*/ );
17499 
17500   ins_pipe(pipe_class_empty);
17501 %}
17502 
17503 //----------PEEPHOLE RULES-----------------------------------------------------
17504 // These must follow all instruction definitions as they use the names
17505 // defined in the instructions definitions.
17506 //
17507 // peepmatch ( root_instr_name [preceding_instruction]* );
17508 //
17509 // peepconstraint %{
17510 // (instruction_number.operand_name relational_op instruction_number.operand_name
17511 //  [, ...] );
17512 // // instruction numbers are zero-based using left to right order in peepmatch
17513 //
17514 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17515 // // provide an instruction_number.operand_name for each operand that appears
17516 // // in the replacement instruction's match rule
17517 //
17518 // ---------VM FLAGS---------------------------------------------------------
17519 //
17520 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17521 //
17522 // Each peephole rule is given an identifying number starting with zero and
17523 // increasing by one in the order seen by the parser.  An individual peephole
17524 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17525 // on the command-line.
17526 //
17527 // ---------CURRENT LIMITATIONS----------------------------------------------
17528 //
17529 // Only match adjacent instructions in same basic block
17530 // Only equality constraints
17531 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17532 // Only one replacement instruction
17533 //
17534 // ---------EXAMPLE----------------------------------------------------------
17535 //
17536 // // pertinent parts of existing instructions in architecture description
17537 // instruct movI(iRegINoSp dst, iRegI src)
17538 // %{
17539 //   match(Set dst (CopyI src));
17540 // %}
17541 //
17542 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17543 // %{
17544 //   match(Set dst (AddI dst src));
17545 //   effect(KILL cr);
17546 // %}
17547 //
17548 // // Change (inc mov) to lea
17549 // peephole %{
17550 //   // increment preceded by register-register move
17551 //   peepmatch ( incI_iReg movI );
17552 //   // require that the destination register of the increment
17553 //   // match the destination register of the move
17554 //   peepconstraint ( 0.dst == 1.dst );
17555 //   // construct a replacement instruction that sets
17556 //   // the destination to ( move's source register + one )
17557 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17558 // %}
17559 //
17560 
17561 // Implementation no longer uses movX instructions since
17562 // machine-independent system no longer uses CopyX nodes.
17563 //
17564 // peephole
17565 // %{
17566 //   peepmatch (incI_iReg movI);
17567 //   peepconstraint (0.dst == 1.dst);
17568 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17569 // %}
17570 
17571 // peephole
17572 // %{
17573 //   peepmatch (decI_iReg movI);
17574 //   peepconstraint (0.dst == 1.dst);
17575 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17576 // %}
17577 
17578 // peephole
17579 // %{
17580 //   peepmatch (addI_iReg_imm movI);
17581 //   peepconstraint (0.dst == 1.dst);
17582 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17583 // %}
17584 
17585 // peephole
17586 // %{
17587 //   peepmatch (incL_iReg movL);
17588 //   peepconstraint (0.dst == 1.dst);
17589 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17590 // %}
17591 
17592 // peephole
17593 // %{
17594 //   peepmatch (decL_iReg movL);
17595 //   peepconstraint (0.dst == 1.dst);
17596 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17597 // %}
17598 
17599 // peephole
17600 // %{
17601 //   peepmatch (addL_iReg_imm movL);
17602 //   peepconstraint (0.dst == 1.dst);
17603 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17604 // %}
17605 
17606 // peephole
17607 // %{
17608 //   peepmatch (addP_iReg_imm movP);
17609 //   peepconstraint (0.dst == 1.dst);
17610 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17611 // %}
17612 
17613 // // Change load of spilled value to only a spill
17614 // instruct storeI(memory mem, iRegI src)
17615 // %{
17616 //   match(Set mem (StoreI mem src));
17617 // %}
17618 //
17619 // instruct loadI(iRegINoSp dst, memory mem)
17620 // %{
17621 //   match(Set dst (LoadI mem));
17622 // %}
17623 //
17624 
17625 //----------SMARTSPILL RULES---------------------------------------------------
17626 // These must follow all instruction definitions as they use the names
17627 // defined in the instructions definitions.
17628 
17629 // Local Variables:
17630 // mode: c++
17631 // End: