1 //
    2 // Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // architecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182 
  183   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  184   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  185   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  186   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  187 
  188   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  189   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  190   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  191   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  192 
  193   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  194   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  195   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  196   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  197 
  198   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  199   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  200   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  201   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  202 
  203   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  204   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  205   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  206   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  207 
  208   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  209   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  210   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  211   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  212 
  213   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  214   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  215   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  216   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  217 
  218   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  219   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  220   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  221   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  222 
  223   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  224   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  225   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  226   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  227 
  228   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  229   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  230   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  231   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  232 
  233   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  234   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  235   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  236   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  237 
  238   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  239   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  240   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  241   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  242 
  243   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  244   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  245   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  246   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  247 
  248   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  249   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  250   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  251   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  252 
  253   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  254   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  255   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  256   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  257 
  258   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  259   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  260   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  261   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  262 
  263   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  264   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  265   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  266   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  267 
  268   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  269   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  270   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  271   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  272 
  273   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  274   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  275   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  276   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  277 
  278   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  279   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  280   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  281   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  282 
  283   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  284   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  285   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  286   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  287 
  288   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  289   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  290   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  291   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  292 
  293   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  294   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  295   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  296   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  297 
  298   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  299   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  300   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  301   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  302 
  303   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  304   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  305   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  306   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  307 
  308   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  309   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  310   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  311   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  312 
  313   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  314   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  315   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  316   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  317 
  318   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  319   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  320   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  321   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  322 
  323   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  324   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  325   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  326   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  327 
  328   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  329   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  330   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  331   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  332 
  333   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  334   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  335   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  336   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  337 
  338 // ----------------------------
  339 // SVE Predicate Registers
  340 // ----------------------------
  341   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  342   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  343   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  344   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  345   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  346   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  347   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  348   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  349   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  350   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  351   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  352   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  353   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  354   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  355   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  356   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  357 
  358 // ----------------------------
  359 // Special Registers
  360 // ----------------------------
  361 
  362 // the AArch64 CSPR status flag register is not directly accessible as
  363 // instruction operand. the FPSR status flag register is a system
  364 // register which can be written/read using MSR/MRS but again does not
  365 // appear as an operand (a code identifying the FSPR occurs as an
  366 // immediate value in the instruction).
  367 
  368 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  369 
  370 // Specify priority of register selection within phases of register
  371 // allocation.  Highest priority is first.  A useful heuristic is to
  372 // give registers a low priority when they are required by machine
  373 // instructions, like EAX and EDX on I486, and choose no-save registers
  374 // before save-on-call, & save-on-call before save-on-entry.  Registers
  375 // which participate in fixed calling sequences should come last.
  376 // Registers which are used as pairs must fall on an even boundary.
  377 
  378 alloc_class chunk0(
  379     // volatiles
  380     R10, R10_H,
  381     R11, R11_H,
  382     R12, R12_H,
  383     R13, R13_H,
  384     R14, R14_H,
  385     R15, R15_H,
  386     R16, R16_H,
  387     R17, R17_H,
  388     R18, R18_H,
  389 
  390     // arg registers
  391     R0, R0_H,
  392     R1, R1_H,
  393     R2, R2_H,
  394     R3, R3_H,
  395     R4, R4_H,
  396     R5, R5_H,
  397     R6, R6_H,
  398     R7, R7_H,
  399 
  400     // non-volatiles
  401     R19, R19_H,
  402     R20, R20_H,
  403     R21, R21_H,
  404     R22, R22_H,
  405     R23, R23_H,
  406     R24, R24_H,
  407     R25, R25_H,
  408     R26, R26_H,
  409 
  410     // non-allocatable registers
  411 
  412     R27, R27_H, // heapbase
  413     R28, R28_H, // thread
  414     R29, R29_H, // fp
  415     R30, R30_H, // lr
  416     R31, R31_H, // sp
  417     R8, R8_H,   // rscratch1
  418     R9, R9_H,   // rscratch2
  419 );
  420 
  421 alloc_class chunk1(
  422 
  423     // no save
  424     V16, V16_H, V16_J, V16_K,
  425     V17, V17_H, V17_J, V17_K,
  426     V18, V18_H, V18_J, V18_K,
  427     V19, V19_H, V19_J, V19_K,
  428     V20, V20_H, V20_J, V20_K,
  429     V21, V21_H, V21_J, V21_K,
  430     V22, V22_H, V22_J, V22_K,
  431     V23, V23_H, V23_J, V23_K,
  432     V24, V24_H, V24_J, V24_K,
  433     V25, V25_H, V25_J, V25_K,
  434     V26, V26_H, V26_J, V26_K,
  435     V27, V27_H, V27_J, V27_K,
  436     V28, V28_H, V28_J, V28_K,
  437     V29, V29_H, V29_J, V29_K,
  438     V30, V30_H, V30_J, V30_K,
  439     V31, V31_H, V31_J, V31_K,
  440 
  441     // arg registers
  442     V0, V0_H, V0_J, V0_K,
  443     V1, V1_H, V1_J, V1_K,
  444     V2, V2_H, V2_J, V2_K,
  445     V3, V3_H, V3_J, V3_K,
  446     V4, V4_H, V4_J, V4_K,
  447     V5, V5_H, V5_J, V5_K,
  448     V6, V6_H, V6_J, V6_K,
  449     V7, V7_H, V7_J, V7_K,
  450 
  451     // non-volatiles
  452     V8, V8_H, V8_J, V8_K,
  453     V9, V9_H, V9_J, V9_K,
  454     V10, V10_H, V10_J, V10_K,
  455     V11, V11_H, V11_J, V11_K,
  456     V12, V12_H, V12_J, V12_K,
  457     V13, V13_H, V13_J, V13_K,
  458     V14, V14_H, V14_J, V14_K,
  459     V15, V15_H, V15_J, V15_K,
  460 );
  461 
  462 alloc_class chunk2 (
  463     // Governing predicates for load/store and arithmetic
  464     P0,
  465     P1,
  466     P2,
  467     P3,
  468     P4,
  469     P5,
  470     P6,
  471 
  472     // Extra predicates
  473     P8,
  474     P9,
  475     P10,
  476     P11,
  477     P12,
  478     P13,
  479     P14,
  480     P15,
  481 
  482     // Preserved for all-true predicate
  483     P7,
  484 );
  485 
  486 alloc_class chunk3(RFLAGS);
  487 
  488 //----------Architecture Description Register Classes--------------------------
  489 // Several register classes are automatically defined based upon information in
  490 // this architecture description.
  491 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  492 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  493 //
  494 
  495 // Class for all 32 bit general purpose registers
  496 reg_class all_reg32(
  497     R0,
  498     R1,
  499     R2,
  500     R3,
  501     R4,
  502     R5,
  503     R6,
  504     R7,
  505     R10,
  506     R11,
  507     R12,
  508     R13,
  509     R14,
  510     R15,
  511     R16,
  512     R17,
  513     R18,
  514     R19,
  515     R20,
  516     R21,
  517     R22,
  518     R23,
  519     R24,
  520     R25,
  521     R26,
  522     R27,
  523     R28,
  524     R29,
  525     R30,
  526     R31
  527 );
  528 
  529 
  530 // Class for all 32 bit integer registers (excluding SP which
  531 // will never be used as an integer register)
  532 reg_class any_reg32 %{
  533   return _ANY_REG32_mask;
  534 %}
  535 
  536 // Singleton class for R0 int register
  537 reg_class int_r0_reg(R0);
  538 
  539 // Singleton class for R2 int register
  540 reg_class int_r2_reg(R2);
  541 
  542 // Singleton class for R3 int register
  543 reg_class int_r3_reg(R3);
  544 
  545 // Singleton class for R4 int register
  546 reg_class int_r4_reg(R4);
  547 
  548 // Singleton class for R31 int register
  549 reg_class int_r31_reg(R31);
  550 
  551 // Class for all 64 bit general purpose registers
  552 reg_class all_reg(
  553     R0, R0_H,
  554     R1, R1_H,
  555     R2, R2_H,
  556     R3, R3_H,
  557     R4, R4_H,
  558     R5, R5_H,
  559     R6, R6_H,
  560     R7, R7_H,
  561     R10, R10_H,
  562     R11, R11_H,
  563     R12, R12_H,
  564     R13, R13_H,
  565     R14, R14_H,
  566     R15, R15_H,
  567     R16, R16_H,
  568     R17, R17_H,
  569     R18, R18_H,
  570     R19, R19_H,
  571     R20, R20_H,
  572     R21, R21_H,
  573     R22, R22_H,
  574     R23, R23_H,
  575     R24, R24_H,
  576     R25, R25_H,
  577     R26, R26_H,
  578     R27, R27_H,
  579     R28, R28_H,
  580     R29, R29_H,
  581     R30, R30_H,
  582     R31, R31_H
  583 );
  584 
  585 // Class for all long integer registers (including SP)
  586 reg_class any_reg %{
  587   return _ANY_REG_mask;
  588 %}
  589 
  590 // Class for non-allocatable 32 bit registers
  591 reg_class non_allocatable_reg32(
  592 #ifdef R18_RESERVED
  593     // See comment in register_aarch64.hpp
  594     R18,                        // tls on Windows
  595 #endif
  596     R28,                        // thread
  597     R30,                        // lr
  598     R31                         // sp
  599 );
  600 
  601 // Class for non-allocatable 64 bit registers
  602 reg_class non_allocatable_reg(
  603 #ifdef R18_RESERVED
  604     // See comment in register_aarch64.hpp
  605     R18, R18_H,                 // tls on Windows, platform register on macOS
  606 #endif
  607     R28, R28_H,                 // thread
  608     R30, R30_H,                 // lr
  609     R31, R31_H                  // sp
  610 );
  611 
  612 // Class for all non-special integer registers
  613 reg_class no_special_reg32 %{
  614   return _NO_SPECIAL_REG32_mask;
  615 %}
  616 
  617 // Class for all non-special long integer registers
  618 reg_class no_special_reg %{
  619   return _NO_SPECIAL_REG_mask;
  620 %}
  621 
  622 // Class for 64 bit register r0
  623 reg_class r0_reg(
  624     R0, R0_H
  625 );
  626 
  627 // Class for 64 bit register r1
  628 reg_class r1_reg(
  629     R1, R1_H
  630 );
  631 
  632 // Class for 64 bit register r2
  633 reg_class r2_reg(
  634     R2, R2_H
  635 );
  636 
  637 // Class for 64 bit register r3
  638 reg_class r3_reg(
  639     R3, R3_H
  640 );
  641 
  642 // Class for 64 bit register r4
  643 reg_class r4_reg(
  644     R4, R4_H
  645 );
  646 
  647 // Class for 64 bit register r5
  648 reg_class r5_reg(
  649     R5, R5_H
  650 );
  651 
  652 // Class for 64 bit register r10
  653 reg_class r10_reg(
  654     R10, R10_H
  655 );
  656 
  657 // Class for 64 bit register r11
  658 reg_class r11_reg(
  659     R11, R11_H
  660 );
  661 
  662 // Class for method register
  663 reg_class method_reg(
  664     R12, R12_H
  665 );
  666 
  667 // Class for thread register
  668 reg_class thread_reg(
  669     R28, R28_H
  670 );
  671 
  672 // Class for frame pointer register
  673 reg_class fp_reg(
  674     R29, R29_H
  675 );
  676 
  677 // Class for link register
  678 reg_class lr_reg(
  679     R30, R30_H
  680 );
  681 
  682 // Class for long sp register
  683 reg_class sp_reg(
  684   R31, R31_H
  685 );
  686 
  687 // Class for all pointer registers
  688 reg_class ptr_reg %{
  689   return _PTR_REG_mask;
  690 %}
  691 
  692 // Class for all non_special pointer registers
  693 reg_class no_special_ptr_reg %{
  694   return _NO_SPECIAL_PTR_REG_mask;
  695 %}
  696 
  697 // Class for all non_special pointer registers (excluding rfp)
  698 reg_class no_special_no_rfp_ptr_reg %{
  699   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  700 %}
  701 
  702 // Class for all float registers
  703 reg_class float_reg(
  704     V0,
  705     V1,
  706     V2,
  707     V3,
  708     V4,
  709     V5,
  710     V6,
  711     V7,
  712     V8,
  713     V9,
  714     V10,
  715     V11,
  716     V12,
  717     V13,
  718     V14,
  719     V15,
  720     V16,
  721     V17,
  722     V18,
  723     V19,
  724     V20,
  725     V21,
  726     V22,
  727     V23,
  728     V24,
  729     V25,
  730     V26,
  731     V27,
  732     V28,
  733     V29,
  734     V30,
  735     V31
  736 );
  737 
  738 // Double precision float registers have virtual `high halves' that
  739 // are needed by the allocator.
  740 // Class for all double registers
  741 reg_class double_reg(
  742     V0, V0_H,
  743     V1, V1_H,
  744     V2, V2_H,
  745     V3, V3_H,
  746     V4, V4_H,
  747     V5, V5_H,
  748     V6, V6_H,
  749     V7, V7_H,
  750     V8, V8_H,
  751     V9, V9_H,
  752     V10, V10_H,
  753     V11, V11_H,
  754     V12, V12_H,
  755     V13, V13_H,
  756     V14, V14_H,
  757     V15, V15_H,
  758     V16, V16_H,
  759     V17, V17_H,
  760     V18, V18_H,
  761     V19, V19_H,
  762     V20, V20_H,
  763     V21, V21_H,
  764     V22, V22_H,
  765     V23, V23_H,
  766     V24, V24_H,
  767     V25, V25_H,
  768     V26, V26_H,
  769     V27, V27_H,
  770     V28, V28_H,
  771     V29, V29_H,
  772     V30, V30_H,
  773     V31, V31_H
  774 );
  775 
  776 // Class for all SVE vector registers.
  777 reg_class vectora_reg (
  778     V0, V0_H, V0_J, V0_K,
  779     V1, V1_H, V1_J, V1_K,
  780     V2, V2_H, V2_J, V2_K,
  781     V3, V3_H, V3_J, V3_K,
  782     V4, V4_H, V4_J, V4_K,
  783     V5, V5_H, V5_J, V5_K,
  784     V6, V6_H, V6_J, V6_K,
  785     V7, V7_H, V7_J, V7_K,
  786     V8, V8_H, V8_J, V8_K,
  787     V9, V9_H, V9_J, V9_K,
  788     V10, V10_H, V10_J, V10_K,
  789     V11, V11_H, V11_J, V11_K,
  790     V12, V12_H, V12_J, V12_K,
  791     V13, V13_H, V13_J, V13_K,
  792     V14, V14_H, V14_J, V14_K,
  793     V15, V15_H, V15_J, V15_K,
  794     V16, V16_H, V16_J, V16_K,
  795     V17, V17_H, V17_J, V17_K,
  796     V18, V18_H, V18_J, V18_K,
  797     V19, V19_H, V19_J, V19_K,
  798     V20, V20_H, V20_J, V20_K,
  799     V21, V21_H, V21_J, V21_K,
  800     V22, V22_H, V22_J, V22_K,
  801     V23, V23_H, V23_J, V23_K,
  802     V24, V24_H, V24_J, V24_K,
  803     V25, V25_H, V25_J, V25_K,
  804     V26, V26_H, V26_J, V26_K,
  805     V27, V27_H, V27_J, V27_K,
  806     V28, V28_H, V28_J, V28_K,
  807     V29, V29_H, V29_J, V29_K,
  808     V30, V30_H, V30_J, V30_K,
  809     V31, V31_H, V31_J, V31_K,
  810 );
  811 
  812 // Class for all 64bit vector registers
  813 reg_class vectord_reg(
  814     V0, V0_H,
  815     V1, V1_H,
  816     V2, V2_H,
  817     V3, V3_H,
  818     V4, V4_H,
  819     V5, V5_H,
  820     V6, V6_H,
  821     V7, V7_H,
  822     V8, V8_H,
  823     V9, V9_H,
  824     V10, V10_H,
  825     V11, V11_H,
  826     V12, V12_H,
  827     V13, V13_H,
  828     V14, V14_H,
  829     V15, V15_H,
  830     V16, V16_H,
  831     V17, V17_H,
  832     V18, V18_H,
  833     V19, V19_H,
  834     V20, V20_H,
  835     V21, V21_H,
  836     V22, V22_H,
  837     V23, V23_H,
  838     V24, V24_H,
  839     V25, V25_H,
  840     V26, V26_H,
  841     V27, V27_H,
  842     V28, V28_H,
  843     V29, V29_H,
  844     V30, V30_H,
  845     V31, V31_H
  846 );
  847 
  848 // Class for all 128bit vector registers
  849 reg_class vectorx_reg(
  850     V0, V0_H, V0_J, V0_K,
  851     V1, V1_H, V1_J, V1_K,
  852     V2, V2_H, V2_J, V2_K,
  853     V3, V3_H, V3_J, V3_K,
  854     V4, V4_H, V4_J, V4_K,
  855     V5, V5_H, V5_J, V5_K,
  856     V6, V6_H, V6_J, V6_K,
  857     V7, V7_H, V7_J, V7_K,
  858     V8, V8_H, V8_J, V8_K,
  859     V9, V9_H, V9_J, V9_K,
  860     V10, V10_H, V10_J, V10_K,
  861     V11, V11_H, V11_J, V11_K,
  862     V12, V12_H, V12_J, V12_K,
  863     V13, V13_H, V13_J, V13_K,
  864     V14, V14_H, V14_J, V14_K,
  865     V15, V15_H, V15_J, V15_K,
  866     V16, V16_H, V16_J, V16_K,
  867     V17, V17_H, V17_J, V17_K,
  868     V18, V18_H, V18_J, V18_K,
  869     V19, V19_H, V19_J, V19_K,
  870     V20, V20_H, V20_J, V20_K,
  871     V21, V21_H, V21_J, V21_K,
  872     V22, V22_H, V22_J, V22_K,
  873     V23, V23_H, V23_J, V23_K,
  874     V24, V24_H, V24_J, V24_K,
  875     V25, V25_H, V25_J, V25_K,
  876     V26, V26_H, V26_J, V26_K,
  877     V27, V27_H, V27_J, V27_K,
  878     V28, V28_H, V28_J, V28_K,
  879     V29, V29_H, V29_J, V29_K,
  880     V30, V30_H, V30_J, V30_K,
  881     V31, V31_H, V31_J, V31_K
  882 );
  883 
  884 // Class for 128 bit register v0
  885 reg_class v0_reg(
  886     V0, V0_H
  887 );
  888 
  889 // Class for 128 bit register v1
  890 reg_class v1_reg(
  891     V1, V1_H
  892 );
  893 
  894 // Class for 128 bit register v2
  895 reg_class v2_reg(
  896     V2, V2_H
  897 );
  898 
  899 // Class for 128 bit register v3
  900 reg_class v3_reg(
  901     V3, V3_H
  902 );
  903 
  904 // Class for 128 bit register v4
  905 reg_class v4_reg(
  906     V4, V4_H
  907 );
  908 
  909 // Class for 128 bit register v5
  910 reg_class v5_reg(
  911     V5, V5_H
  912 );
  913 
  914 // Class for 128 bit register v6
  915 reg_class v6_reg(
  916     V6, V6_H
  917 );
  918 
  919 // Class for 128 bit register v7
  920 reg_class v7_reg(
  921     V7, V7_H
  922 );
  923 
  924 // Class for 128 bit register v8
  925 reg_class v8_reg(
  926     V8, V8_H
  927 );
  928 
  929 // Class for 128 bit register v9
  930 reg_class v9_reg(
  931     V9, V9_H
  932 );
  933 
  934 // Class for 128 bit register v10
  935 reg_class v10_reg(
  936     V10, V10_H
  937 );
  938 
  939 // Class for 128 bit register v11
  940 reg_class v11_reg(
  941     V11, V11_H
  942 );
  943 
  944 // Class for 128 bit register v12
  945 reg_class v12_reg(
  946     V12, V12_H
  947 );
  948 
  949 // Class for 128 bit register v13
  950 reg_class v13_reg(
  951     V13, V13_H
  952 );
  953 
  954 // Class for 128 bit register v14
  955 reg_class v14_reg(
  956     V14, V14_H
  957 );
  958 
  959 // Class for 128 bit register v15
  960 reg_class v15_reg(
  961     V15, V15_H
  962 );
  963 
  964 // Class for 128 bit register v16
  965 reg_class v16_reg(
  966     V16, V16_H
  967 );
  968 
  969 // Class for 128 bit register v17
  970 reg_class v17_reg(
  971     V17, V17_H
  972 );
  973 
  974 // Class for 128 bit register v18
  975 reg_class v18_reg(
  976     V18, V18_H
  977 );
  978 
  979 // Class for 128 bit register v19
  980 reg_class v19_reg(
  981     V19, V19_H
  982 );
  983 
  984 // Class for 128 bit register v20
  985 reg_class v20_reg(
  986     V20, V20_H
  987 );
  988 
  989 // Class for 128 bit register v21
  990 reg_class v21_reg(
  991     V21, V21_H
  992 );
  993 
  994 // Class for 128 bit register v22
  995 reg_class v22_reg(
  996     V22, V22_H
  997 );
  998 
  999 // Class for 128 bit register v23
 1000 reg_class v23_reg(
 1001     V23, V23_H
 1002 );
 1003 
 1004 // Class for 128 bit register v24
 1005 reg_class v24_reg(
 1006     V24, V24_H
 1007 );
 1008 
 1009 // Class for 128 bit register v25
 1010 reg_class v25_reg(
 1011     V25, V25_H
 1012 );
 1013 
 1014 // Class for 128 bit register v26
 1015 reg_class v26_reg(
 1016     V26, V26_H
 1017 );
 1018 
 1019 // Class for 128 bit register v27
 1020 reg_class v27_reg(
 1021     V27, V27_H
 1022 );
 1023 
 1024 // Class for 128 bit register v28
 1025 reg_class v28_reg(
 1026     V28, V28_H
 1027 );
 1028 
 1029 // Class for 128 bit register v29
 1030 reg_class v29_reg(
 1031     V29, V29_H
 1032 );
 1033 
 1034 // Class for 128 bit register v30
 1035 reg_class v30_reg(
 1036     V30, V30_H
 1037 );
 1038 
 1039 // Class for 128 bit register v31
 1040 reg_class v31_reg(
 1041     V31, V31_H
 1042 );
 1043 
 1044 // Class for all SVE predicate registers.
 1045 reg_class pr_reg (
 1046     P0,
 1047     P1,
 1048     P2,
 1049     P3,
 1050     P4,
 1051     P5,
 1052     P6,
 1053     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1054     P8,
 1055     P9,
 1056     P10,
 1057     P11,
 1058     P12,
 1059     P13,
 1060     P14,
 1061     P15
 1062 );
 1063 
 1064 // Class for SVE governing predicate registers, which are used
 1065 // to determine the active elements of a predicated instruction.
 1066 reg_class gov_pr (
 1067     P0,
 1068     P1,
 1069     P2,
 1070     P3,
 1071     P4,
 1072     P5,
 1073     P6,
 1074     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1075 );
 1076 
 1077 reg_class p0_reg(P0);
 1078 reg_class p1_reg(P1);
 1079 
 1080 // Singleton class for condition codes
 1081 reg_class int_flags(RFLAGS);
 1082 
 1083 %}
 1084 
 1085 //----------DEFINITION BLOCK---------------------------------------------------
 1086 // Define name --> value mappings to inform the ADLC of an integer valued name
 1087 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1088 // Format:
 1089 //        int_def  <name>         ( <int_value>, <expression>);
 1090 // Generated Code in ad_<arch>.hpp
 1091 //        #define  <name>   (<expression>)
 1092 //        // value == <int_value>
 1093 // Generated code in ad_<arch>.cpp adlc_verification()
 1094 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1095 //
 1096 
 1097 // we follow the ppc-aix port in using a simple cost model which ranks
 1098 // register operations as cheap, memory ops as more expensive and
 1099 // branches as most expensive. the first two have a low as well as a
 1100 // normal cost. huge cost appears to be a way of saying don't do
 1101 // something
 1102 
 1103 definitions %{
 1104   // The default cost (of a register move instruction).
 1105   int_def INSN_COST            (    100,     100);
 1106   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1107   int_def CALL_COST            (    200,     2 * INSN_COST);
 1108   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1109 %}
 1110 
 1111 
 1112 //----------SOURCE BLOCK-------------------------------------------------------
 1113 // This is a block of C++ code which provides values, functions, and
 1114 // definitions necessary in the rest of the architecture description
 1115 
 1116 source_hpp %{
 1117 
 1118 #include "asm/macroAssembler.hpp"
 1119 #include "gc/shared/barrierSetAssembler.hpp"
 1120 #include "gc/shared/cardTable.hpp"
 1121 #include "gc/shared/cardTableBarrierSet.hpp"
 1122 #include "gc/shared/collectedHeap.hpp"
 1123 #include "opto/addnode.hpp"
 1124 #include "opto/convertnode.hpp"
 1125 #include "runtime/objectMonitor.hpp"
 1126 
 1127 extern RegMask _ANY_REG32_mask;
 1128 extern RegMask _ANY_REG_mask;
 1129 extern RegMask _PTR_REG_mask;
 1130 extern RegMask _NO_SPECIAL_REG32_mask;
 1131 extern RegMask _NO_SPECIAL_REG_mask;
 1132 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1133 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1134 
 1135 class CallStubImpl {
 1136 
 1137   //--------------------------------------------------------------
 1138   //---<  Used for optimization in Compile::shorten_branches  >---
 1139   //--------------------------------------------------------------
 1140 
 1141  public:
 1142   // Size of call trampoline stub.
 1143   static uint size_call_trampoline() {
 1144     return 0; // no call trampolines on this platform
 1145   }
 1146 
 1147   // number of relocations needed by a call trampoline stub
 1148   static uint reloc_call_trampoline() {
 1149     return 0; // no call trampolines on this platform
 1150   }
 1151 };
 1152 
 1153 class HandlerImpl {
 1154 
 1155  public:
 1156 
 1157   static int emit_exception_handler(C2_MacroAssembler *masm);
 1158   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1159 
 1160   static uint size_exception_handler() {
 1161     return MacroAssembler::far_codestub_branch_size();
 1162   }
 1163 
 1164   static uint size_deopt_handler() {
 1165     // count one adr and one far branch instruction
 1166     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1167   }
 1168 };
 1169 
 1170 class Node::PD {
 1171 public:
 1172   enum NodeFlags {
 1173     _last_flag = Node::_last_flag
 1174   };
 1175 };
 1176 
 1177   bool is_CAS(int opcode, bool maybe_volatile);
 1178 
 1179   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1180 
 1181   bool unnecessary_acquire(const Node *barrier);
 1182   bool needs_acquiring_load(const Node *load);
 1183 
 1184   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1185 
 1186   bool unnecessary_release(const Node *barrier);
 1187   bool unnecessary_volatile(const Node *barrier);
 1188   bool needs_releasing_store(const Node *store);
 1189 
 1190   // predicate controlling translation of CompareAndSwapX
 1191   bool needs_acquiring_load_exclusive(const Node *load);
 1192 
 1193   // predicate controlling addressing modes
 1194   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1195 
 1196   // Convert BootTest condition to Assembler condition.
 1197   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1198   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1199 %}
 1200 
 1201 source %{
 1202 
 1203   // Derived RegMask with conditionally allocatable registers
 1204 
 1205   void PhaseOutput::pd_perform_mach_node_analysis() {
 1206   }
 1207 
 1208   int MachNode::pd_alignment_required() const {
 1209     return 1;
 1210   }
 1211 
 1212   int MachNode::compute_padding(int current_offset) const {
 1213     return 0;
 1214   }
 1215 
 1216   RegMask _ANY_REG32_mask;
 1217   RegMask _ANY_REG_mask;
 1218   RegMask _PTR_REG_mask;
 1219   RegMask _NO_SPECIAL_REG32_mask;
 1220   RegMask _NO_SPECIAL_REG_mask;
 1221   RegMask _NO_SPECIAL_PTR_REG_mask;
 1222   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1223 
 1224   void reg_mask_init() {
 1225     // We derive below RegMask(s) from the ones which are auto-generated from
 1226     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1227     // registers conditionally reserved.
 1228 
 1229     _ANY_REG32_mask = _ALL_REG32_mask;
 1230     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1231 
 1232     _ANY_REG_mask = _ALL_REG_mask;
 1233 
 1234     _PTR_REG_mask = _ALL_REG_mask;
 1235 
 1236     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1237     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1238 
 1239     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1240     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1241 
 1242     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1243     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1244 
 1245     // r27 is not allocatable when compressed oops is on and heapbase is not
 1246     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1247     if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
 1248       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1249       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1250       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1251     }
 1252 
 1253     // r29 is not allocatable when PreserveFramePointer is on
 1254     if (PreserveFramePointer) {
 1255       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1256       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1257       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1258     }
 1259 
 1260     _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1261     _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1262   }
 1263 
 1264   // Optimizaton of volatile gets and puts
 1265   // -------------------------------------
 1266   //
 1267   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1268   // use to implement volatile reads and writes. For a volatile read
 1269   // we simply need
 1270   //
 1271   //   ldar<x>
 1272   //
 1273   // and for a volatile write we need
 1274   //
 1275   //   stlr<x>
 1276   //
 1277   // Alternatively, we can implement them by pairing a normal
 1278   // load/store with a memory barrier. For a volatile read we need
 1279   //
 1280   //   ldr<x>
 1281   //   dmb ishld
 1282   //
 1283   // for a volatile write
 1284   //
 1285   //   dmb ish
 1286   //   str<x>
 1287   //   dmb ish
 1288   //
 1289   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1290   // sequences. These are normally translated to an instruction
 1291   // sequence like the following
 1292   //
 1293   //   dmb      ish
 1294   // retry:
 1295   //   ldxr<x>   rval raddr
 1296   //   cmp       rval rold
 1297   //   b.ne done
 1298   //   stlxr<x>  rval, rnew, rold
 1299   //   cbnz      rval retry
 1300   // done:
 1301   //   cset      r0, eq
 1302   //   dmb ishld
 1303   //
 1304   // Note that the exclusive store is already using an stlxr
 1305   // instruction. That is required to ensure visibility to other
 1306   // threads of the exclusive write (assuming it succeeds) before that
 1307   // of any subsequent writes.
 1308   //
 1309   // The following instruction sequence is an improvement on the above
 1310   //
 1311   // retry:
 1312   //   ldaxr<x>  rval raddr
 1313   //   cmp       rval rold
 1314   //   b.ne done
 1315   //   stlxr<x>  rval, rnew, rold
 1316   //   cbnz      rval retry
 1317   // done:
 1318   //   cset      r0, eq
 1319   //
 1320   // We don't need the leading dmb ish since the stlxr guarantees
 1321   // visibility of prior writes in the case that the swap is
 1322   // successful. Crucially we don't have to worry about the case where
 1323   // the swap is not successful since no valid program should be
 1324   // relying on visibility of prior changes by the attempting thread
 1325   // in the case where the CAS fails.
 1326   //
 1327   // Similarly, we don't need the trailing dmb ishld if we substitute
 1328   // an ldaxr instruction since that will provide all the guarantees we
 1329   // require regarding observation of changes made by other threads
 1330   // before any change to the CAS address observed by the load.
 1331   //
 1332   // In order to generate the desired instruction sequence we need to
 1333   // be able to identify specific 'signature' ideal graph node
 1334   // sequences which i) occur as a translation of a volatile reads or
 1335   // writes or CAS operations and ii) do not occur through any other
 1336   // translation or graph transformation. We can then provide
 1337   // alternative aldc matching rules which translate these node
 1338   // sequences to the desired machine code sequences. Selection of the
 1339   // alternative rules can be implemented by predicates which identify
 1340   // the relevant node sequences.
 1341   //
 1342   // The ideal graph generator translates a volatile read to the node
 1343   // sequence
 1344   //
 1345   //   LoadX[mo_acquire]
 1346   //   MemBarAcquire
 1347   //
 1348   // As a special case when using the compressed oops optimization we
 1349   // may also see this variant
 1350   //
 1351   //   LoadN[mo_acquire]
 1352   //   DecodeN
 1353   //   MemBarAcquire
 1354   //
 1355   // A volatile write is translated to the node sequence
 1356   //
 1357   //   MemBarRelease
 1358   //   StoreX[mo_release] {CardMark}-optional
 1359   //   MemBarVolatile
 1360   //
 1361   // n.b. the above node patterns are generated with a strict
 1362   // 'signature' configuration of input and output dependencies (see
 1363   // the predicates below for exact details). The card mark may be as
 1364   // simple as a few extra nodes or, in a few GC configurations, may
 1365   // include more complex control flow between the leading and
 1366   // trailing memory barriers. However, whatever the card mark
 1367   // configuration these signatures are unique to translated volatile
 1368   // reads/stores -- they will not appear as a result of any other
 1369   // bytecode translation or inlining nor as a consequence of
 1370   // optimizing transforms.
 1371   //
 1372   // We also want to catch inlined unsafe volatile gets and puts and
 1373   // be able to implement them using either ldar<x>/stlr<x> or some
 1374   // combination of ldr<x>/stlr<x> and dmb instructions.
 1375   //
 1376   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1377   // normal volatile put node sequence containing an extra cpuorder
 1378   // membar
 1379   //
 1380   //   MemBarRelease
 1381   //   MemBarCPUOrder
 1382   //   StoreX[mo_release] {CardMark}-optional
 1383   //   MemBarCPUOrder
 1384   //   MemBarVolatile
 1385   //
 1386   // n.b. as an aside, a cpuorder membar is not itself subject to
 1387   // matching and translation by adlc rules.  However, the rule
 1388   // predicates need to detect its presence in order to correctly
 1389   // select the desired adlc rules.
 1390   //
 1391   // Inlined unsafe volatile gets manifest as a slightly different
 1392   // node sequence to a normal volatile get because of the
 1393   // introduction of some CPUOrder memory barriers to bracket the
 1394   // Load. However, but the same basic skeleton of a LoadX feeding a
 1395   // MemBarAcquire, possibly through an optional DecodeN, is still
 1396   // present
 1397   //
 1398   //   MemBarCPUOrder
 1399   //        ||       \\
 1400   //   MemBarCPUOrder LoadX[mo_acquire]
 1401   //        ||            |
 1402   //        ||       {DecodeN} optional
 1403   //        ||       /
 1404   //     MemBarAcquire
 1405   //
 1406   // In this case the acquire membar does not directly depend on the
 1407   // load. However, we can be sure that the load is generated from an
 1408   // inlined unsafe volatile get if we see it dependent on this unique
 1409   // sequence of membar nodes. Similarly, given an acquire membar we
 1410   // can know that it was added because of an inlined unsafe volatile
 1411   // get if it is fed and feeds a cpuorder membar and if its feed
 1412   // membar also feeds an acquiring load.
 1413   //
 1414   // Finally an inlined (Unsafe) CAS operation is translated to the
 1415   // following ideal graph
 1416   //
 1417   //   MemBarRelease
 1418   //   MemBarCPUOrder
 1419   //   CompareAndSwapX {CardMark}-optional
 1420   //   MemBarCPUOrder
 1421   //   MemBarAcquire
 1422   //
 1423   // So, where we can identify these volatile read and write
 1424   // signatures we can choose to plant either of the above two code
 1425   // sequences. For a volatile read we can simply plant a normal
 1426   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1427   // also choose to inhibit translation of the MemBarAcquire and
 1428   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1429   //
 1430   // When we recognise a volatile store signature we can choose to
 1431   // plant at a dmb ish as a translation for the MemBarRelease, a
 1432   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1433   // Alternatively, we can inhibit translation of the MemBarRelease
 1434   // and MemBarVolatile and instead plant a simple stlr<x>
 1435   // instruction.
 1436   //
 1437   // when we recognise a CAS signature we can choose to plant a dmb
 1438   // ish as a translation for the MemBarRelease, the conventional
 1439   // macro-instruction sequence for the CompareAndSwap node (which
 1440   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1441   // Alternatively, we can elide generation of the dmb instructions
 1442   // and plant the alternative CompareAndSwap macro-instruction
 1443   // sequence (which uses ldaxr<x>).
 1444   //
 1445   // Of course, the above only applies when we see these signature
 1446   // configurations. We still want to plant dmb instructions in any
 1447   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1448   // MemBarVolatile. For example, at the end of a constructor which
 1449   // writes final/volatile fields we will see a MemBarRelease
 1450   // instruction and this needs a 'dmb ish' lest we risk the
 1451   // constructed object being visible without making the
 1452   // final/volatile field writes visible.
 1453   //
 1454   // n.b. the translation rules below which rely on detection of the
 1455   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1456   // If we see anything other than the signature configurations we
 1457   // always just translate the loads and stores to ldr<x> and str<x>
 1458   // and translate acquire, release and volatile membars to the
 1459   // relevant dmb instructions.
 1460   //
 1461 
 1462   // is_CAS(int opcode, bool maybe_volatile)
 1463   //
 1464   // return true if opcode is one of the possible CompareAndSwapX
 1465   // values otherwise false.
 1466 
 1467   bool is_CAS(int opcode, bool maybe_volatile)
 1468   {
 1469     switch(opcode) {
 1470       // We handle these
 1471     case Op_CompareAndSwapI:
 1472     case Op_CompareAndSwapL:
 1473     case Op_CompareAndSwapP:
 1474     case Op_CompareAndSwapN:
 1475     case Op_ShenandoahCompareAndSwapP:
 1476     case Op_ShenandoahCompareAndSwapN:
 1477     case Op_CompareAndSwapB:
 1478     case Op_CompareAndSwapS:
 1479     case Op_GetAndSetI:
 1480     case Op_GetAndSetL:
 1481     case Op_GetAndSetP:
 1482     case Op_GetAndSetN:
 1483     case Op_GetAndAddI:
 1484     case Op_GetAndAddL:
 1485       return true;
 1486     case Op_CompareAndExchangeI:
 1487     case Op_CompareAndExchangeN:
 1488     case Op_CompareAndExchangeB:
 1489     case Op_CompareAndExchangeS:
 1490     case Op_CompareAndExchangeL:
 1491     case Op_CompareAndExchangeP:
 1492     case Op_WeakCompareAndSwapB:
 1493     case Op_WeakCompareAndSwapS:
 1494     case Op_WeakCompareAndSwapI:
 1495     case Op_WeakCompareAndSwapL:
 1496     case Op_WeakCompareAndSwapP:
 1497     case Op_WeakCompareAndSwapN:
 1498     case Op_ShenandoahWeakCompareAndSwapP:
 1499     case Op_ShenandoahWeakCompareAndSwapN:
 1500     case Op_ShenandoahCompareAndExchangeP:
 1501     case Op_ShenandoahCompareAndExchangeN:
 1502       return maybe_volatile;
 1503     default:
 1504       return false;
 1505     }
 1506   }
 1507 
 1508   // helper to determine the maximum number of Phi nodes we may need to
 1509   // traverse when searching from a card mark membar for the merge mem
 1510   // feeding a trailing membar or vice versa
 1511 
 1512 // predicates controlling emit of ldr<x>/ldar<x>
 1513 
 1514 bool unnecessary_acquire(const Node *barrier)
 1515 {
 1516   assert(barrier->is_MemBar(), "expecting a membar");
 1517 
 1518   MemBarNode* mb = barrier->as_MemBar();
 1519 
 1520   if (mb->trailing_load()) {
 1521     return true;
 1522   }
 1523 
 1524   if (mb->trailing_load_store()) {
 1525     Node* load_store = mb->in(MemBarNode::Precedent);
 1526     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1527     return is_CAS(load_store->Opcode(), true);
 1528   }
 1529 
 1530   return false;
 1531 }
 1532 
 1533 bool needs_acquiring_load(const Node *n)
 1534 {
 1535   assert(n->is_Load(), "expecting a load");
 1536   LoadNode *ld = n->as_Load();
 1537   return ld->is_acquire();
 1538 }
 1539 
 1540 bool unnecessary_release(const Node *n)
 1541 {
 1542   assert((n->is_MemBar() &&
 1543           n->Opcode() == Op_MemBarRelease),
 1544          "expecting a release membar");
 1545 
 1546   MemBarNode *barrier = n->as_MemBar();
 1547   if (!barrier->leading()) {
 1548     return false;
 1549   } else {
 1550     Node* trailing = barrier->trailing_membar();
 1551     MemBarNode* trailing_mb = trailing->as_MemBar();
 1552     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1553     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1554 
 1555     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1556     if (mem->is_Store()) {
 1557       assert(mem->as_Store()->is_release(), "");
 1558       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1559       return true;
 1560     } else {
 1561       assert(mem->is_LoadStore(), "");
 1562       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1563       return is_CAS(mem->Opcode(), true);
 1564     }
 1565   }
 1566   return false;
 1567 }
 1568 
 1569 bool unnecessary_volatile(const Node *n)
 1570 {
 1571   // assert n->is_MemBar();
 1572   MemBarNode *mbvol = n->as_MemBar();
 1573 
 1574   bool release = mbvol->trailing_store();
 1575   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1576 #ifdef ASSERT
 1577   if (release) {
 1578     Node* leading = mbvol->leading_membar();
 1579     assert(leading->Opcode() == Op_MemBarRelease, "");
 1580     assert(leading->as_MemBar()->leading_store(), "");
 1581     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1582   }
 1583 #endif
 1584 
 1585   return release;
 1586 }
 1587 
 1588 // predicates controlling emit of str<x>/stlr<x>
 1589 
 1590 bool needs_releasing_store(const Node *n)
 1591 {
 1592   // assert n->is_Store();
 1593   StoreNode *st = n->as_Store();
 1594   return st->trailing_membar() != nullptr;
 1595 }
 1596 
 1597 // predicate controlling translation of CAS
 1598 //
 1599 // returns true if CAS needs to use an acquiring load otherwise false
 1600 
 1601 bool needs_acquiring_load_exclusive(const Node *n)
 1602 {
 1603   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1604   LoadStoreNode* ldst = n->as_LoadStore();
 1605   if (is_CAS(n->Opcode(), false)) {
 1606     assert(ldst->trailing_membar() != nullptr, "expected trailing membar");
 1607   } else {
 1608     return ldst->trailing_membar() != nullptr;
 1609   }
 1610 
 1611   // so we can just return true here
 1612   return true;
 1613 }
 1614 
 1615 #define __ masm->
 1616 
 1617 // advance declarations for helper functions to convert register
 1618 // indices to register objects
 1619 
 1620 // the ad file has to provide implementations of certain methods
 1621 // expected by the generic code
 1622 //
 1623 // REQUIRED FUNCTIONALITY
 1624 
 1625 //=============================================================================
 1626 
 1627 // !!!!! Special hack to get all types of calls to specify the byte offset
 1628 //       from the start of the call to the point where the return address
 1629 //       will point.
 1630 
 1631 int MachCallStaticJavaNode::ret_addr_offset()
 1632 {
 1633   // call should be a simple bl
 1634   int off = 4;
 1635   return off;
 1636 }
 1637 
 1638 int MachCallDynamicJavaNode::ret_addr_offset()
 1639 {
 1640   return 16; // movz, movk, movk, bl
 1641 }
 1642 
 1643 int MachCallRuntimeNode::ret_addr_offset() {
 1644   // for generated stubs the call will be
 1645   //   bl(addr)
 1646   // or with far branches
 1647   //   bl(trampoline_stub)
 1648   // for real runtime callouts it will be six instructions
 1649   // see aarch64_enc_java_to_runtime
 1650   //   adr(rscratch2, retaddr)
 1651   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1652   //   lea(rscratch1, RuntimeAddress(addr)
 1653   //   blr(rscratch1)
 1654   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1655   if (cb) {
 1656     return 1 * NativeInstruction::instruction_size;
 1657   } else {
 1658     return 6 * NativeInstruction::instruction_size;
 1659   }
 1660 }
 1661 
 1662 //=============================================================================
 1663 
 1664 #ifndef PRODUCT
 1665 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1666   st->print("BREAKPOINT");
 1667 }
 1668 #endif
 1669 
 1670 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1671   __ brk(0);
 1672 }
 1673 
 1674 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1675   return MachNode::size(ra_);
 1676 }
 1677 
 1678 //=============================================================================
 1679 
 1680 #ifndef PRODUCT
 1681   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1682     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1683   }
 1684 #endif
 1685 
 1686   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1687     for (int i = 0; i < _count; i++) {
 1688       __ nop();
 1689     }
 1690   }
 1691 
 1692   uint MachNopNode::size(PhaseRegAlloc*) const {
 1693     return _count * NativeInstruction::instruction_size;
 1694   }
 1695 
 1696 //=============================================================================
 1697 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1698 
 1699 int ConstantTable::calculate_table_base_offset() const {
 1700   return 0;  // absolute addressing, no offset
 1701 }
 1702 
 1703 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1704 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1705   ShouldNotReachHere();
 1706 }
 1707 
 1708 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1709   // Empty encoding
 1710 }
 1711 
 1712 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1713   return 0;
 1714 }
 1715 
 1716 #ifndef PRODUCT
 1717 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1718   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1719 }
 1720 #endif
 1721 
 1722 #ifndef PRODUCT
 1723 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1724   Compile* C = ra_->C;
 1725 
 1726   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1727 
 1728   if (C->output()->need_stack_bang(framesize))
 1729     st->print("# stack bang size=%d\n\t", framesize);
 1730 
 1731   if (VM_Version::use_rop_protection()) {
 1732     st->print("ldr  zr, [lr]\n\t");
 1733     st->print("paciaz\n\t");
 1734   }
 1735   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1736     st->print("sub  sp, sp, #%d\n\t", framesize);
 1737     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1738     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1739   } else {
 1740     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1741     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1742     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1743     st->print("sub  sp, sp, rscratch1");
 1744   }
 1745   if (C->stub_function() == nullptr) {
 1746     st->print("\n\t");
 1747     st->print("ldr  rscratch1, [guard]\n\t");
 1748     st->print("dmb ishld\n\t");
 1749     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1750     st->print("cmp  rscratch1, rscratch2\n\t");
 1751     st->print("b.eq skip");
 1752     st->print("\n\t");
 1753     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1754     st->print("b skip\n\t");
 1755     st->print("guard: int\n\t");
 1756     st->print("\n\t");
 1757     st->print("skip:\n\t");
 1758   }
 1759 }
 1760 #endif
 1761 
 1762 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1763   Compile* C = ra_->C;
 1764 
 1765   // n.b. frame size includes space for return pc and rfp
 1766   const int framesize = C->output()->frame_size_in_bytes();
 1767 
 1768   if (C->clinit_barrier_on_entry()) {
 1769     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1770 
 1771     Label L_skip_barrier;
 1772 
 1773     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1774     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1775     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1776     __ bind(L_skip_barrier);
 1777   }
 1778 
 1779   if (C->max_vector_size() > 0) {
 1780     __ reinitialize_ptrue();
 1781   }
 1782 
 1783   int bangsize = C->output()->bang_size_in_bytes();
 1784   if (C->output()->need_stack_bang(bangsize))
 1785     __ generate_stack_overflow_check(bangsize);
 1786 
 1787   __ build_frame(framesize);
 1788 
 1789   if (C->stub_function() == nullptr) {
 1790     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1791     // Dummy labels for just measuring the code size
 1792     Label dummy_slow_path;
 1793     Label dummy_continuation;
 1794     Label dummy_guard;
 1795     Label* slow_path = &dummy_slow_path;
 1796     Label* continuation = &dummy_continuation;
 1797     Label* guard = &dummy_guard;
 1798     if (!Compile::current()->output()->in_scratch_emit_size()) {
 1799       // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1800       C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1801       Compile::current()->output()->add_stub(stub);
 1802       slow_path = &stub->entry();
 1803       continuation = &stub->continuation();
 1804       guard = &stub->guard();
 1805     }
 1806     // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1807     bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1808   }
 1809 
 1810   if (VerifyStackAtCalls) {
 1811     Unimplemented();
 1812   }
 1813 
 1814   C->output()->set_frame_complete(__ offset());
 1815 
 1816   if (C->has_mach_constant_base_node()) {
 1817     // NOTE: We set the table base offset here because users might be
 1818     // emitted before MachConstantBaseNode.
 1819     ConstantTable& constant_table = C->output()->constant_table();
 1820     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1821   }
 1822 }
 1823 
 1824 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1825 {
 1826   return MachNode::size(ra_); // too many variables; just compute it
 1827                               // the hard way
 1828 }
 1829 
 1830 int MachPrologNode::reloc() const
 1831 {
 1832   return 0;
 1833 }
 1834 
 1835 //=============================================================================
 1836 
 1837 #ifndef PRODUCT
 1838 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1839   Compile* C = ra_->C;
 1840   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1841 
 1842   st->print("# pop frame %d\n\t",framesize);
 1843 
 1844   if (framesize == 0) {
 1845     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1846   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1847     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1848     st->print("add  sp, sp, #%d\n\t", framesize);
 1849   } else {
 1850     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1851     st->print("add  sp, sp, rscratch1\n\t");
 1852     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1853   }
 1854   if (VM_Version::use_rop_protection()) {
 1855     st->print("autiaz\n\t");
 1856     st->print("ldr  zr, [lr]\n\t");
 1857   }
 1858 
 1859   if (do_polling() && C->is_method_compilation()) {
 1860     st->print("# test polling word\n\t");
 1861     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1862     st->print("cmp  sp, rscratch1\n\t");
 1863     st->print("bhi #slow_path");
 1864   }
 1865 }
 1866 #endif
 1867 
 1868 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1869   Compile* C = ra_->C;
 1870   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1871 
 1872   __ remove_frame(framesize);
 1873 
 1874   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1875     __ reserved_stack_check();
 1876   }
 1877 
 1878   if (do_polling() && C->is_method_compilation()) {
 1879     Label dummy_label;
 1880     Label* code_stub = &dummy_label;
 1881     if (!C->output()->in_scratch_emit_size()) {
 1882       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1883       C->output()->add_stub(stub);
 1884       code_stub = &stub->entry();
 1885     }
 1886     __ relocate(relocInfo::poll_return_type);
 1887     __ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
 1888   }
 1889 }
 1890 
 1891 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1892   // Variable size. Determine dynamically.
 1893   return MachNode::size(ra_);
 1894 }
 1895 
 1896 int MachEpilogNode::reloc() const {
 1897   // Return number of relocatable values contained in this instruction.
 1898   return 1; // 1 for polling page.
 1899 }
 1900 
 1901 const Pipeline * MachEpilogNode::pipeline() const {
 1902   return MachNode::pipeline_class();
 1903 }
 1904 
 1905 //=============================================================================
 1906 
 1907 static enum RC rc_class(OptoReg::Name reg) {
 1908 
 1909   if (reg == OptoReg::Bad) {
 1910     return rc_bad;
 1911   }
 1912 
 1913   // we have 32 int registers * 2 halves
 1914   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1915 
 1916   if (reg < slots_of_int_registers) {
 1917     return rc_int;
 1918   }
 1919 
 1920   // we have 32 float register * 8 halves
 1921   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1922   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1923     return rc_float;
 1924   }
 1925 
 1926   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1927   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1928     return rc_predicate;
 1929   }
 1930 
 1931   // Between predicate regs & stack is the flags.
 1932   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1933 
 1934   return rc_stack;
 1935 }
 1936 
 1937 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1938   Compile* C = ra_->C;
 1939 
 1940   // Get registers to move.
 1941   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1942   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1943   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1944   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1945 
 1946   enum RC src_hi_rc = rc_class(src_hi);
 1947   enum RC src_lo_rc = rc_class(src_lo);
 1948   enum RC dst_hi_rc = rc_class(dst_hi);
 1949   enum RC dst_lo_rc = rc_class(dst_lo);
 1950 
 1951   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1952 
 1953   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1954     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1955            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1956            "expected aligned-adjacent pairs");
 1957   }
 1958 
 1959   if (src_lo == dst_lo && src_hi == dst_hi) {
 1960     return 0;            // Self copy, no move.
 1961   }
 1962 
 1963   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1964               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1965   int src_offset = ra_->reg2offset(src_lo);
 1966   int dst_offset = ra_->reg2offset(dst_lo);
 1967 
 1968   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1969     uint ireg = ideal_reg();
 1970     if (ireg == Op_VecA && masm) {
 1971       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1972       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1973         // stack->stack
 1974         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 1975                                                 sve_vector_reg_size_in_bytes);
 1976       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1977         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 1978                             sve_vector_reg_size_in_bytes);
 1979       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 1980         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 1981                               sve_vector_reg_size_in_bytes);
 1982       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1983         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1984                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 1985                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 1986       } else {
 1987         ShouldNotReachHere();
 1988       }
 1989     } else if (masm) {
 1990       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 1991       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 1992       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1993         // stack->stack
 1994         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 1995         if (ireg == Op_VecD) {
 1996           __ unspill(rscratch1, true, src_offset);
 1997           __ spill(rscratch1, true, dst_offset);
 1998         } else {
 1999           __ spill_copy128(src_offset, dst_offset);
 2000         }
 2001       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2002         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2003                ireg == Op_VecD ? __ T8B : __ T16B,
 2004                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2005       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2006         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2007                  ireg == Op_VecD ? __ D : __ Q,
 2008                  ra_->reg2offset(dst_lo));
 2009       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2010         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2011                    ireg == Op_VecD ? __ D : __ Q,
 2012                    ra_->reg2offset(src_lo));
 2013       } else {
 2014         ShouldNotReachHere();
 2015       }
 2016     }
 2017   } else if (masm) {
 2018     switch (src_lo_rc) {
 2019     case rc_int:
 2020       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2021         if (is64) {
 2022             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2023                    as_Register(Matcher::_regEncode[src_lo]));
 2024         } else {
 2025             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2026                     as_Register(Matcher::_regEncode[src_lo]));
 2027         }
 2028       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2029         if (is64) {
 2030             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2031                      as_Register(Matcher::_regEncode[src_lo]));
 2032         } else {
 2033             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2034                      as_Register(Matcher::_regEncode[src_lo]));
 2035         }
 2036       } else {                    // gpr --> stack spill
 2037         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2038         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2039       }
 2040       break;
 2041     case rc_float:
 2042       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2043         if (is64) {
 2044             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2045                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2046         } else {
 2047             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2048                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2049         }
 2050       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2051         if (is64) {
 2052             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2053                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2054         } else {
 2055             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2056                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2057         }
 2058       } else {                    // fpr --> stack spill
 2059         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2060         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2061                  is64 ? __ D : __ S, dst_offset);
 2062       }
 2063       break;
 2064     case rc_stack:
 2065       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2066         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2067       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2068         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2069                    is64 ? __ D : __ S, src_offset);
 2070       } else if (dst_lo_rc == rc_predicate) {
 2071         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2072                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2073       } else {                    // stack --> stack copy
 2074         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2075         if (ideal_reg() == Op_RegVectMask) {
 2076           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2077                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2078         } else {
 2079           __ unspill(rscratch1, is64, src_offset);
 2080           __ spill(rscratch1, is64, dst_offset);
 2081         }
 2082       }
 2083       break;
 2084     case rc_predicate:
 2085       if (dst_lo_rc == rc_predicate) {
 2086         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2087       } else if (dst_lo_rc == rc_stack) {
 2088         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2089                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2090       } else {
 2091         assert(false, "bad src and dst rc_class combination.");
 2092         ShouldNotReachHere();
 2093       }
 2094       break;
 2095     default:
 2096       assert(false, "bad rc_class for spill");
 2097       ShouldNotReachHere();
 2098     }
 2099   }
 2100 
 2101   if (st) {
 2102     st->print("spill ");
 2103     if (src_lo_rc == rc_stack) {
 2104       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2105     } else {
 2106       st->print("%s -> ", Matcher::regName[src_lo]);
 2107     }
 2108     if (dst_lo_rc == rc_stack) {
 2109       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2110     } else {
 2111       st->print("%s", Matcher::regName[dst_lo]);
 2112     }
 2113     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2114       int vsize = 0;
 2115       switch (ideal_reg()) {
 2116       case Op_VecD:
 2117         vsize = 64;
 2118         break;
 2119       case Op_VecX:
 2120         vsize = 128;
 2121         break;
 2122       case Op_VecA:
 2123         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2124         break;
 2125       default:
 2126         assert(false, "bad register type for spill");
 2127         ShouldNotReachHere();
 2128       }
 2129       st->print("\t# vector spill size = %d", vsize);
 2130     } else if (ideal_reg() == Op_RegVectMask) {
 2131       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2132       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2133       st->print("\t# predicate spill size = %d", vsize);
 2134     } else {
 2135       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2136     }
 2137   }
 2138 
 2139   return 0;
 2140 
 2141 }
 2142 
 2143 #ifndef PRODUCT
 2144 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2145   if (!ra_)
 2146     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2147   else
 2148     implementation(nullptr, ra_, false, st);
 2149 }
 2150 #endif
 2151 
 2152 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2153   implementation(masm, ra_, false, nullptr);
 2154 }
 2155 
 2156 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2157   return MachNode::size(ra_);
 2158 }
 2159 
 2160 //=============================================================================
 2161 
 2162 #ifndef PRODUCT
 2163 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2164   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2165   int reg = ra_->get_reg_first(this);
 2166   st->print("add %s, rsp, #%d]\t# box lock",
 2167             Matcher::regName[reg], offset);
 2168 }
 2169 #endif
 2170 
 2171 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2172   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2173   int reg    = ra_->get_encode(this);
 2174 
 2175   // This add will handle any 24-bit signed offset. 24 bits allows an
 2176   // 8 megabyte stack frame.
 2177   __ add(as_Register(reg), sp, offset);
 2178 }
 2179 
 2180 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2181   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2182   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2183 
 2184   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2185     return NativeInstruction::instruction_size;
 2186   } else {
 2187     return 2 * NativeInstruction::instruction_size;
 2188   }
 2189 }
 2190 
 2191 //=============================================================================
 2192 
 2193 #ifndef PRODUCT
 2194 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2195 {
 2196   st->print_cr("# MachUEPNode");
 2197   if (UseCompressedClassPointers) {
 2198     st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2199     st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2200     st->print_cr("\tcmpw rscratch1, r10");
 2201   } else {
 2202     st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2203     st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2204     st->print_cr("\tcmp rscratch1, r10");
 2205   }
 2206   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2207 }
 2208 #endif
 2209 
 2210 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 2211 {
 2212   __ ic_check(InteriorEntryAlignment);
 2213 }
 2214 
 2215 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2216 {
 2217   return MachNode::size(ra_);
 2218 }
 2219 
 2220 // REQUIRED EMIT CODE
 2221 
 2222 //=============================================================================
 2223 
 2224 // Emit exception handler code.
 2225 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 2226 {
 2227   // mov rscratch1 #exception_blob_entry_point
 2228   // br rscratch1
 2229   // Note that the code buffer's insts_mark is always relative to insts.
 2230   // That's why we must use the macroassembler to generate a handler.
 2231   address base = __ start_a_stub(size_exception_handler());
 2232   if (base == nullptr) {
 2233     ciEnv::current()->record_failure("CodeCache is full");
 2234     return 0;  // CodeBuffer::expand failed
 2235   }
 2236   int offset = __ offset();
 2237   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2238   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2239   __ end_a_stub();
 2240   return offset;
 2241 }
 2242 
 2243 // Emit deopt handler code.
 2244 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 2245 {
 2246   // Note that the code buffer's insts_mark is always relative to insts.
 2247   // That's why we must use the macroassembler to generate a handler.
 2248   address base = __ start_a_stub(size_deopt_handler());
 2249   if (base == nullptr) {
 2250     ciEnv::current()->record_failure("CodeCache is full");
 2251     return 0;  // CodeBuffer::expand failed
 2252   }
 2253   int offset = __ offset();
 2254 
 2255   __ adr(lr, __ pc());
 2256   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2257 
 2258   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2259   __ end_a_stub();
 2260   return offset;
 2261 }
 2262 
 2263 // REQUIRED MATCHER CODE
 2264 
 2265 //=============================================================================
 2266 
 2267 bool Matcher::match_rule_supported(int opcode) {
 2268   if (!has_match_rule(opcode))
 2269     return false;
 2270 
 2271   switch (opcode) {
 2272     case Op_OnSpinWait:
 2273       return VM_Version::supports_on_spin_wait();
 2274     case Op_CacheWB:
 2275     case Op_CacheWBPreSync:
 2276     case Op_CacheWBPostSync:
 2277       if (!VM_Version::supports_data_cache_line_flush()) {
 2278         return false;
 2279       }
 2280       break;
 2281     case Op_ExpandBits:
 2282     case Op_CompressBits:
 2283       if (!VM_Version::supports_svebitperm()) {
 2284         return false;
 2285       }
 2286       break;
 2287     case Op_FmaF:
 2288     case Op_FmaD:
 2289     case Op_FmaVF:
 2290     case Op_FmaVD:
 2291       if (!UseFMA) {
 2292         return false;
 2293       }
 2294       break;
 2295     case Op_FmaHF:
 2296       // UseFMA flag also needs to be checked along with FEAT_FP16
 2297       if (!UseFMA || !is_feat_fp16_supported()) {
 2298         return false;
 2299       }
 2300       break;
 2301     case Op_AddHF:
 2302     case Op_SubHF:
 2303     case Op_MulHF:
 2304     case Op_DivHF:
 2305     case Op_MinHF:
 2306     case Op_MaxHF:
 2307     case Op_SqrtHF:
 2308       // Half-precision floating point scalar operations require FEAT_FP16
 2309       // to be available. FEAT_FP16 is enabled if both "fphp" and "asimdhp"
 2310       // features are supported.
 2311       if (!is_feat_fp16_supported()) {
 2312         return false;
 2313       }
 2314       break;
 2315   }
 2316 
 2317   return true; // Per default match rules are supported.
 2318 }
 2319 
 2320 const RegMask* Matcher::predicate_reg_mask(void) {
 2321   return &_PR_REG_mask;
 2322 }
 2323 
 2324 bool Matcher::supports_vector_calling_convention(void) {
 2325   return EnableVectorSupport;
 2326 }
 2327 
 2328 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2329   assert(EnableVectorSupport, "sanity");
 2330   int lo = V0_num;
 2331   int hi = V0_H_num;
 2332   if (ideal_reg == Op_VecX || ideal_reg == Op_VecA) {
 2333     hi = V0_K_num;
 2334   }
 2335   return OptoRegPair(hi, lo);
 2336 }
 2337 
 2338 // Is this branch offset short enough that a short branch can be used?
 2339 //
 2340 // NOTE: If the platform does not provide any short branch variants, then
 2341 //       this method should return false for offset 0.
 2342 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2343   // The passed offset is relative to address of the branch.
 2344 
 2345   return (-32768 <= offset && offset < 32768);
 2346 }
 2347 
 2348 // Vector width in bytes.
 2349 int Matcher::vector_width_in_bytes(BasicType bt) {
 2350   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2351   int size = MIN2((UseSVE > 0) ? (int)FloatRegister::sve_vl_max : (int)FloatRegister::neon_vl, (int)MaxVectorSize);
 2352   // Minimum 2 values in vector
 2353   if (size < 2*type2aelembytes(bt)) size = 0;
 2354   // But never < 4
 2355   if (size < 4) size = 0;
 2356   return size;
 2357 }
 2358 
 2359 // Limits on vector size (number of elements) loaded into vector.
 2360 int Matcher::max_vector_size(const BasicType bt) {
 2361   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2362 }
 2363 
 2364 int Matcher::min_vector_size(const BasicType bt) {
 2365   int max_size = max_vector_size(bt);
 2366   // Limit the min vector size to 8 bytes.
 2367   int size = 8 / type2aelembytes(bt);
 2368   if (bt == T_BYTE) {
 2369     // To support vector api shuffle/rearrange.
 2370     size = 4;
 2371   } else if (bt == T_BOOLEAN) {
 2372     // To support vector api load/store mask.
 2373     size = 2;
 2374   }
 2375   if (size < 2) size = 2;
 2376   return MIN2(size, max_size);
 2377 }
 2378 
 2379 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2380   return Matcher::max_vector_size(bt);
 2381 }
 2382 
 2383 // Actual max scalable vector register length.
 2384 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2385   return Matcher::max_vector_size(bt);
 2386 }
 2387 
 2388 // Vector ideal reg.
 2389 uint Matcher::vector_ideal_reg(int len) {
 2390   if (UseSVE > 0 && FloatRegister::neon_vl < len && len <= FloatRegister::sve_vl_max) {
 2391     return Op_VecA;
 2392   }
 2393   switch(len) {
 2394     // For 16-bit/32-bit mask vector, reuse VecD.
 2395     case  2:
 2396     case  4:
 2397     case  8: return Op_VecD;
 2398     case 16: return Op_VecX;
 2399   }
 2400   ShouldNotReachHere();
 2401   return 0;
 2402 }
 2403 
 2404 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2405   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2406   switch (ideal_reg) {
 2407     case Op_VecA: return new vecAOper();
 2408     case Op_VecD: return new vecDOper();
 2409     case Op_VecX: return new vecXOper();
 2410   }
 2411   ShouldNotReachHere();
 2412   return nullptr;
 2413 }
 2414 
 2415 bool Matcher::is_reg2reg_move(MachNode* m) {
 2416   return false;
 2417 }
 2418 
 2419 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2420   return opnd->opcode() == VREG;
 2421 }
 2422 
 2423 // Return whether or not this register is ever used as an argument.
 2424 // This function is used on startup to build the trampoline stubs in
 2425 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2426 // call in the trampoline, and arguments in those registers not be
 2427 // available to the callee.
 2428 bool Matcher::can_be_java_arg(int reg)
 2429 {
 2430   return
 2431     reg ==  R0_num || reg == R0_H_num ||
 2432     reg ==  R1_num || reg == R1_H_num ||
 2433     reg ==  R2_num || reg == R2_H_num ||
 2434     reg ==  R3_num || reg == R3_H_num ||
 2435     reg ==  R4_num || reg == R4_H_num ||
 2436     reg ==  R5_num || reg == R5_H_num ||
 2437     reg ==  R6_num || reg == R6_H_num ||
 2438     reg ==  R7_num || reg == R7_H_num ||
 2439     reg ==  V0_num || reg == V0_H_num ||
 2440     reg ==  V1_num || reg == V1_H_num ||
 2441     reg ==  V2_num || reg == V2_H_num ||
 2442     reg ==  V3_num || reg == V3_H_num ||
 2443     reg ==  V4_num || reg == V4_H_num ||
 2444     reg ==  V5_num || reg == V5_H_num ||
 2445     reg ==  V6_num || reg == V6_H_num ||
 2446     reg ==  V7_num || reg == V7_H_num;
 2447 }
 2448 
 2449 bool Matcher::is_spillable_arg(int reg)
 2450 {
 2451   return can_be_java_arg(reg);
 2452 }
 2453 
 2454 uint Matcher::int_pressure_limit()
 2455 {
 2456   // JDK-8183543: When taking the number of available registers as int
 2457   // register pressure threshold, the jtreg test:
 2458   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2459   // failed due to C2 compilation failure with
 2460   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2461   //
 2462   // A derived pointer is live at CallNode and then is flagged by RA
 2463   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2464   // derived pointers and lastly fail to spill after reaching maximum
 2465   // number of iterations. Lowering the default pressure threshold to
 2466   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2467   // a high register pressure area of the code so that split_DEF can
 2468   // generate DefinitionSpillCopy for the derived pointer.
 2469   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2470   if (!PreserveFramePointer) {
 2471     // When PreserveFramePointer is off, frame pointer is allocatable,
 2472     // but different from other SOC registers, it is excluded from
 2473     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2474     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2475     // See check_pressure_at_fatproj().
 2476     default_int_pressure_threshold--;
 2477   }
 2478   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2479 }
 2480 
 2481 uint Matcher::float_pressure_limit()
 2482 {
 2483   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2484   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2485 }
 2486 
 2487 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2488   return false;
 2489 }
 2490 
 2491 RegMask Matcher::divI_proj_mask() {
 2492   ShouldNotReachHere();
 2493   return RegMask();
 2494 }
 2495 
 2496 // Register for MODI projection of divmodI.
 2497 RegMask Matcher::modI_proj_mask() {
 2498   ShouldNotReachHere();
 2499   return RegMask();
 2500 }
 2501 
 2502 // Register for DIVL projection of divmodL.
 2503 RegMask Matcher::divL_proj_mask() {
 2504   ShouldNotReachHere();
 2505   return RegMask();
 2506 }
 2507 
 2508 // Register for MODL projection of divmodL.
 2509 RegMask Matcher::modL_proj_mask() {
 2510   ShouldNotReachHere();
 2511   return RegMask();
 2512 }
 2513 
 2514 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2515   return FP_REG_mask();
 2516 }
 2517 
 2518 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2519   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2520     Node* u = addp->fast_out(i);
 2521     if (u->is_LoadStore()) {
 2522       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2523       // instructions) only take register indirect as an operand, so
 2524       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2525       // must fail.
 2526       return false;
 2527     }
 2528     if (u->is_Mem()) {
 2529       int opsize = u->as_Mem()->memory_size();
 2530       assert(opsize > 0, "unexpected memory operand size");
 2531       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2532         return false;
 2533       }
 2534     }
 2535   }
 2536   return true;
 2537 }
 2538 
 2539 // Convert BootTest condition to Assembler condition.
 2540 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2541 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2542   Assembler::Condition result;
 2543   switch(cond) {
 2544     case BoolTest::eq:
 2545       result = Assembler::EQ; break;
 2546     case BoolTest::ne:
 2547       result = Assembler::NE; break;
 2548     case BoolTest::le:
 2549       result = Assembler::LE; break;
 2550     case BoolTest::ge:
 2551       result = Assembler::GE; break;
 2552     case BoolTest::lt:
 2553       result = Assembler::LT; break;
 2554     case BoolTest::gt:
 2555       result = Assembler::GT; break;
 2556     case BoolTest::ule:
 2557       result = Assembler::LS; break;
 2558     case BoolTest::uge:
 2559       result = Assembler::HS; break;
 2560     case BoolTest::ult:
 2561       result = Assembler::LO; break;
 2562     case BoolTest::ugt:
 2563       result = Assembler::HI; break;
 2564     case BoolTest::overflow:
 2565       result = Assembler::VS; break;
 2566     case BoolTest::no_overflow:
 2567       result = Assembler::VC; break;
 2568     default:
 2569       ShouldNotReachHere();
 2570       return Assembler::Condition(-1);
 2571   }
 2572 
 2573   // Check conversion
 2574   if (cond & BoolTest::unsigned_compare) {
 2575     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2576   } else {
 2577     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2578   }
 2579 
 2580   return result;
 2581 }
 2582 
 2583 // Binary src (Replicate con)
 2584 static bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2585   if (n == nullptr || m == nullptr) {
 2586     return false;
 2587   }
 2588 
 2589   if (UseSVE == 0 || m->Opcode() != Op_Replicate) {
 2590     return false;
 2591   }
 2592 
 2593   Node* imm_node = m->in(1);
 2594   if (!imm_node->is_Con()) {
 2595     return false;
 2596   }
 2597 
 2598   const Type* t = imm_node->bottom_type();
 2599   if (!(t->isa_int() || t->isa_long())) {
 2600     return false;
 2601   }
 2602 
 2603   switch (n->Opcode()) {
 2604   case Op_AndV:
 2605   case Op_OrV:
 2606   case Op_XorV: {
 2607     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2608     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2609     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2610   }
 2611   case Op_AddVB:
 2612     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2613   case Op_AddVS:
 2614   case Op_AddVI:
 2615     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2616   case Op_AddVL:
 2617     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2618   default:
 2619     return false;
 2620   }
 2621 }
 2622 
 2623 // (XorV src (Replicate m1))
 2624 // (XorVMask src (MaskAll m1))
 2625 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2626   if (n != nullptr && m != nullptr) {
 2627     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2628            VectorNode::is_all_ones_vector(m);
 2629   }
 2630   return false;
 2631 }
 2632 
 2633 // Should the matcher clone input 'm' of node 'n'?
 2634 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2635   if (is_vshift_con_pattern(n, m) ||
 2636       is_vector_bitwise_not_pattern(n, m) ||
 2637       is_valid_sve_arith_imm_pattern(n, m) ||
 2638       is_encode_and_store_pattern(n, m)) {
 2639     mstack.push(m, Visit);
 2640     return true;
 2641   }
 2642   return false;
 2643 }
 2644 
 2645 // Should the Matcher clone shifts on addressing modes, expecting them
 2646 // to be subsumed into complex addressing expressions or compute them
 2647 // into registers?
 2648 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2649 
 2650   // Loads and stores with indirect memory input (e.g., volatile loads and
 2651   // stores) do not subsume the input into complex addressing expressions. If
 2652   // the addressing expression is input to at least one such load or store, do
 2653   // not clone the addressing expression. Query needs_acquiring_load and
 2654   // needs_releasing_store as a proxy for indirect memory input, as it is not
 2655   // possible to directly query for indirect memory input at this stage.
 2656   for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
 2657     Node* n = m->fast_out(i);
 2658     if (n->is_Load() && needs_acquiring_load(n)) {
 2659       return false;
 2660     }
 2661     if (n->is_Store() && needs_releasing_store(n)) {
 2662       return false;
 2663     }
 2664   }
 2665 
 2666   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2667     return true;
 2668   }
 2669 
 2670   Node *off = m->in(AddPNode::Offset);
 2671   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2672       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2673       // Are there other uses besides address expressions?
 2674       !is_visited(off)) {
 2675     address_visited.set(off->_idx); // Flag as address_visited
 2676     mstack.push(off->in(2), Visit);
 2677     Node *conv = off->in(1);
 2678     if (conv->Opcode() == Op_ConvI2L &&
 2679         // Are there other uses besides address expressions?
 2680         !is_visited(conv)) {
 2681       address_visited.set(conv->_idx); // Flag as address_visited
 2682       mstack.push(conv->in(1), Pre_Visit);
 2683     } else {
 2684       mstack.push(conv, Pre_Visit);
 2685     }
 2686     address_visited.test_set(m->_idx); // Flag as address_visited
 2687     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2688     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2689     return true;
 2690   } else if (off->Opcode() == Op_ConvI2L &&
 2691              // Are there other uses besides address expressions?
 2692              !is_visited(off)) {
 2693     address_visited.test_set(m->_idx); // Flag as address_visited
 2694     address_visited.set(off->_idx); // Flag as address_visited
 2695     mstack.push(off->in(1), Pre_Visit);
 2696     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2697     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2698     return true;
 2699   }
 2700   return false;
 2701 }
 2702 
 2703 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2704   {                                                                     \
 2705     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2706     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2707     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2708     __ INSN(REG, as_Register(BASE));                                    \
 2709   }
 2710 
 2711 
 2712 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2713   {
 2714     Address::extend scale;
 2715 
 2716     // Hooboy, this is fugly.  We need a way to communicate to the
 2717     // encoder that the index needs to be sign extended, so we have to
 2718     // enumerate all the cases.
 2719     switch (opcode) {
 2720     case INDINDEXSCALEDI2L:
 2721     case INDINDEXSCALEDI2LN:
 2722     case INDINDEXI2L:
 2723     case INDINDEXI2LN:
 2724       scale = Address::sxtw(size);
 2725       break;
 2726     default:
 2727       scale = Address::lsl(size);
 2728     }
 2729 
 2730     if (index == -1) {
 2731       return Address(base, disp);
 2732     } else {
 2733       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2734       return Address(base, as_Register(index), scale);
 2735     }
 2736   }
 2737 
 2738 
 2739 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2740 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2741 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2742 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2743                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2744 
 2745   // Used for all non-volatile memory accesses.  The use of
 2746   // $mem->opcode() to discover whether this pattern uses sign-extended
 2747   // offsets is something of a kludge.
 2748   static void loadStore(C2_MacroAssembler* masm, mem_insn insn,
 2749                         Register reg, int opcode,
 2750                         Register base, int index, int scale, int disp,
 2751                         int size_in_memory)
 2752   {
 2753     Address addr = mem2address(opcode, base, index, scale, disp);
 2754     if (addr.getMode() == Address::base_plus_offset) {
 2755       /* Fix up any out-of-range offsets. */
 2756       assert_different_registers(rscratch1, base);
 2757       assert_different_registers(rscratch1, reg);
 2758       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2759     }
 2760     (masm->*insn)(reg, addr);
 2761   }
 2762 
 2763   static void loadStore(C2_MacroAssembler* masm, mem_float_insn insn,
 2764                         FloatRegister reg, int opcode,
 2765                         Register base, int index, int size, int disp,
 2766                         int size_in_memory)
 2767   {
 2768     Address::extend scale;
 2769 
 2770     switch (opcode) {
 2771     case INDINDEXSCALEDI2L:
 2772     case INDINDEXSCALEDI2LN:
 2773       scale = Address::sxtw(size);
 2774       break;
 2775     default:
 2776       scale = Address::lsl(size);
 2777     }
 2778 
 2779     if (index == -1) {
 2780       // Fix up any out-of-range offsets.
 2781       assert_different_registers(rscratch1, base);
 2782       Address addr = Address(base, disp);
 2783       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2784       (masm->*insn)(reg, addr);
 2785     } else {
 2786       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2787       (masm->*insn)(reg, Address(base, as_Register(index), scale));
 2788     }
 2789   }
 2790 
 2791   static void loadStore(C2_MacroAssembler* masm, mem_vector_insn insn,
 2792                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2793                         int opcode, Register base, int index, int size, int disp)
 2794   {
 2795     if (index == -1) {
 2796       (masm->*insn)(reg, T, Address(base, disp));
 2797     } else {
 2798       assert(disp == 0, "unsupported address mode");
 2799       (masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2800     }
 2801   }
 2802 
 2803 %}
 2804 
 2805 
 2806 
 2807 //----------ENCODING BLOCK-----------------------------------------------------
 2808 // This block specifies the encoding classes used by the compiler to
 2809 // output byte streams.  Encoding classes are parameterized macros
 2810 // used by Machine Instruction Nodes in order to generate the bit
 2811 // encoding of the instruction.  Operands specify their base encoding
 2812 // interface with the interface keyword.  There are currently
 2813 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2814 // COND_INTER.  REG_INTER causes an operand to generate a function
 2815 // which returns its register number when queried.  CONST_INTER causes
 2816 // an operand to generate a function which returns the value of the
 2817 // constant when queried.  MEMORY_INTER causes an operand to generate
 2818 // four functions which return the Base Register, the Index Register,
 2819 // the Scale Value, and the Offset Value of the operand when queried.
 2820 // COND_INTER causes an operand to generate six functions which return
 2821 // the encoding code (ie - encoding bits for the instruction)
 2822 // associated with each basic boolean condition for a conditional
 2823 // instruction.
 2824 //
 2825 // Instructions specify two basic values for encoding.  Again, a
 2826 // function is available to check if the constant displacement is an
 2827 // oop. They use the ins_encode keyword to specify their encoding
 2828 // classes (which must be a sequence of enc_class names, and their
 2829 // parameters, specified in the encoding block), and they use the
 2830 // opcode keyword to specify, in order, their primary, secondary, and
 2831 // tertiary opcode.  Only the opcode sections which a particular
 2832 // instruction needs for encoding need to be specified.
 2833 encode %{
 2834   // Build emit functions for each basic byte or larger field in the
 2835   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2836   // from C++ code in the enc_class source block.  Emit functions will
 2837   // live in the main source block for now.  In future, we can
 2838   // generalize this by adding a syntax that specifies the sizes of
 2839   // fields in an order, so that the adlc can build the emit functions
 2840   // automagically
 2841 
 2842   // catch all for unimplemented encodings
 2843   enc_class enc_unimplemented %{
 2844     __ unimplemented("C2 catch all");
 2845   %}
 2846 
 2847   // BEGIN Non-volatile memory access
 2848 
 2849   // This encoding class is generated automatically from ad_encode.m4.
 2850   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2851   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2852     Register dst_reg = as_Register($dst$$reg);
 2853     loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2854                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2855   %}
 2856 
 2857   // This encoding class is generated automatically from ad_encode.m4.
 2858   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2859   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2860     Register dst_reg = as_Register($dst$$reg);
 2861     loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2862                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2863   %}
 2864 
 2865   // This encoding class is generated automatically from ad_encode.m4.
 2866   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2867   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2868     Register dst_reg = as_Register($dst$$reg);
 2869     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2870                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2871   %}
 2872 
 2873   // This encoding class is generated automatically from ad_encode.m4.
 2874   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2875   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2876     Register dst_reg = as_Register($dst$$reg);
 2877     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2878                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2879   %}
 2880 
 2881   // This encoding class is generated automatically from ad_encode.m4.
 2882   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2883   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2884     Register dst_reg = as_Register($dst$$reg);
 2885     loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2886                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2887   %}
 2888 
 2889   // This encoding class is generated automatically from ad_encode.m4.
 2890   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2891   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2892     Register dst_reg = as_Register($dst$$reg);
 2893     loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2894                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2895   %}
 2896 
 2897   // This encoding class is generated automatically from ad_encode.m4.
 2898   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2899   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2900     Register dst_reg = as_Register($dst$$reg);
 2901     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2902                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2903   %}
 2904 
 2905   // This encoding class is generated automatically from ad_encode.m4.
 2906   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2907   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2908     Register dst_reg = as_Register($dst$$reg);
 2909     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2910                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2911   %}
 2912 
 2913   // This encoding class is generated automatically from ad_encode.m4.
 2914   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2915   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2916     Register dst_reg = as_Register($dst$$reg);
 2917     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2918                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2919   %}
 2920 
 2921   // This encoding class is generated automatically from ad_encode.m4.
 2922   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2923   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2924     Register dst_reg = as_Register($dst$$reg);
 2925     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2926                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2927   %}
 2928 
 2929   // This encoding class is generated automatically from ad_encode.m4.
 2930   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2931   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2932     Register dst_reg = as_Register($dst$$reg);
 2933     loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2934                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2935   %}
 2936 
 2937   // This encoding class is generated automatically from ad_encode.m4.
 2938   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2939   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2940     Register dst_reg = as_Register($dst$$reg);
 2941     loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2942                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2943   %}
 2944 
 2945   // This encoding class is generated automatically from ad_encode.m4.
 2946   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2947   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2948     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2949     loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2950                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2951   %}
 2952 
 2953   // This encoding class is generated automatically from ad_encode.m4.
 2954   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2955   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2956     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2957     loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2958                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2959   %}
 2960 
 2961   // This encoding class is generated automatically from ad_encode.m4.
 2962   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2963   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2964     Register src_reg = as_Register($src$$reg);
 2965     loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
 2966                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2967   %}
 2968 
 2969   // This encoding class is generated automatically from ad_encode.m4.
 2970   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2971   enc_class aarch64_enc_strb0(memory1 mem) %{
 2972     loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2973                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2974   %}
 2975 
 2976   // This encoding class is generated automatically from ad_encode.m4.
 2977   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2978   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2979     Register src_reg = as_Register($src$$reg);
 2980     loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
 2981                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2982   %}
 2983 
 2984   // This encoding class is generated automatically from ad_encode.m4.
 2985   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2986   enc_class aarch64_enc_strh0(memory2 mem) %{
 2987     loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2988                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2989   %}
 2990 
 2991   // This encoding class is generated automatically from ad_encode.m4.
 2992   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2993   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 2994     Register src_reg = as_Register($src$$reg);
 2995     loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
 2996                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2997   %}
 2998 
 2999   // This encoding class is generated automatically from ad_encode.m4.
 3000   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3001   enc_class aarch64_enc_strw0(memory4 mem) %{
 3002     loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3003                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3004   %}
 3005 
 3006   // This encoding class is generated automatically from ad_encode.m4.
 3007   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3008   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3009     Register src_reg = as_Register($src$$reg);
 3010     // we sometimes get asked to store the stack pointer into the
 3011     // current thread -- we cannot do that directly on AArch64
 3012     if (src_reg == r31_sp) {
 3013       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3014       __ mov(rscratch2, sp);
 3015       src_reg = rscratch2;
 3016     }
 3017     loadStore(masm, &MacroAssembler::str, src_reg, $mem->opcode(),
 3018                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3019   %}
 3020 
 3021   // This encoding class is generated automatically from ad_encode.m4.
 3022   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3023   enc_class aarch64_enc_str0(memory8 mem) %{
 3024     loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
 3025                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3026   %}
 3027 
 3028   // This encoding class is generated automatically from ad_encode.m4.
 3029   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3030   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3031     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3032     loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
 3033                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3034   %}
 3035 
 3036   // This encoding class is generated automatically from ad_encode.m4.
 3037   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3038   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3039     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3040     loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
 3041                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3042   %}
 3043 
 3044   // This encoding class is generated automatically from ad_encode.m4.
 3045   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3046   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3047       __ membar(Assembler::StoreStore);
 3048       loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3049                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3050   %}
 3051 
 3052   // END Non-volatile memory access
 3053 
 3054   // Vector loads and stores
 3055   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3056     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3057     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3058        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3059   %}
 3060 
 3061   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3062     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3063     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3064        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3065   %}
 3066 
 3067   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3068     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3069     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3070        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3071   %}
 3072 
 3073   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3074     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3075     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3076        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3077   %}
 3078 
 3079   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3080     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3081     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::H,
 3082        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3083   %}
 3084 
 3085   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3086     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3087     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::S,
 3088        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3089   %}
 3090 
 3091   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3092     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3093     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::D,
 3094        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3095   %}
 3096 
 3097   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3098     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3099     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3100        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3101   %}
 3102 
 3103   // volatile loads and stores
 3104 
 3105   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3106     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3107                  rscratch1, stlrb);
 3108   %}
 3109 
 3110   enc_class aarch64_enc_stlrb0(memory mem) %{
 3111     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3112                  rscratch1, stlrb);
 3113   %}
 3114 
 3115   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3116     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3117                  rscratch1, stlrh);
 3118   %}
 3119 
 3120   enc_class aarch64_enc_stlrh0(memory mem) %{
 3121     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3122                  rscratch1, stlrh);
 3123   %}
 3124 
 3125   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3126     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3127                  rscratch1, stlrw);
 3128   %}
 3129 
 3130   enc_class aarch64_enc_stlrw0(memory mem) %{
 3131     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3132                  rscratch1, stlrw);
 3133   %}
 3134 
 3135   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3136     Register dst_reg = as_Register($dst$$reg);
 3137     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3138              rscratch1, ldarb);
 3139     __ sxtbw(dst_reg, dst_reg);
 3140   %}
 3141 
 3142   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3143     Register dst_reg = as_Register($dst$$reg);
 3144     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3145              rscratch1, ldarb);
 3146     __ sxtb(dst_reg, dst_reg);
 3147   %}
 3148 
 3149   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3150     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3151              rscratch1, ldarb);
 3152   %}
 3153 
 3154   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3155     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3156              rscratch1, ldarb);
 3157   %}
 3158 
 3159   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3160     Register dst_reg = as_Register($dst$$reg);
 3161     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3162              rscratch1, ldarh);
 3163     __ sxthw(dst_reg, dst_reg);
 3164   %}
 3165 
 3166   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3167     Register dst_reg = as_Register($dst$$reg);
 3168     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3169              rscratch1, ldarh);
 3170     __ sxth(dst_reg, dst_reg);
 3171   %}
 3172 
 3173   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3174     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3175              rscratch1, ldarh);
 3176   %}
 3177 
 3178   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3179     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3180              rscratch1, ldarh);
 3181   %}
 3182 
 3183   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3184     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3185              rscratch1, ldarw);
 3186   %}
 3187 
 3188   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3189     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3190              rscratch1, ldarw);
 3191   %}
 3192 
 3193   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3194     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3195              rscratch1, ldar);
 3196   %}
 3197 
 3198   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3199     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3200              rscratch1, ldarw);
 3201     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3202   %}
 3203 
 3204   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3205     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3206              rscratch1, ldar);
 3207     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3208   %}
 3209 
 3210   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3211     Register src_reg = as_Register($src$$reg);
 3212     // we sometimes get asked to store the stack pointer into the
 3213     // current thread -- we cannot do that directly on AArch64
 3214     if (src_reg == r31_sp) {
 3215       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3216       __ mov(rscratch2, sp);
 3217       src_reg = rscratch2;
 3218     }
 3219     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3220                  rscratch1, stlr);
 3221   %}
 3222 
 3223   enc_class aarch64_enc_stlr0(memory mem) %{
 3224     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3225                  rscratch1, stlr);
 3226   %}
 3227 
 3228   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3229     {
 3230       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3231       __ fmovs(rscratch2, src_reg);
 3232     }
 3233     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3234                  rscratch1, stlrw);
 3235   %}
 3236 
 3237   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3238     {
 3239       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3240       __ fmovd(rscratch2, src_reg);
 3241     }
 3242     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3243                  rscratch1, stlr);
 3244   %}
 3245 
 3246   // synchronized read/update encodings
 3247 
 3248   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3249     Register dst_reg = as_Register($dst$$reg);
 3250     Register base = as_Register($mem$$base);
 3251     int index = $mem$$index;
 3252     int scale = $mem$$scale;
 3253     int disp = $mem$$disp;
 3254     if (index == -1) {
 3255        if (disp != 0) {
 3256         __ lea(rscratch1, Address(base, disp));
 3257         __ ldaxr(dst_reg, rscratch1);
 3258       } else {
 3259         // TODO
 3260         // should we ever get anything other than this case?
 3261         __ ldaxr(dst_reg, base);
 3262       }
 3263     } else {
 3264       Register index_reg = as_Register(index);
 3265       if (disp == 0) {
 3266         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3267         __ ldaxr(dst_reg, rscratch1);
 3268       } else {
 3269         __ lea(rscratch1, Address(base, disp));
 3270         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3271         __ ldaxr(dst_reg, rscratch1);
 3272       }
 3273     }
 3274   %}
 3275 
 3276   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3277     Register src_reg = as_Register($src$$reg);
 3278     Register base = as_Register($mem$$base);
 3279     int index = $mem$$index;
 3280     int scale = $mem$$scale;
 3281     int disp = $mem$$disp;
 3282     if (index == -1) {
 3283        if (disp != 0) {
 3284         __ lea(rscratch2, Address(base, disp));
 3285         __ stlxr(rscratch1, src_reg, rscratch2);
 3286       } else {
 3287         // TODO
 3288         // should we ever get anything other than this case?
 3289         __ stlxr(rscratch1, src_reg, base);
 3290       }
 3291     } else {
 3292       Register index_reg = as_Register(index);
 3293       if (disp == 0) {
 3294         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3295         __ stlxr(rscratch1, src_reg, rscratch2);
 3296       } else {
 3297         __ lea(rscratch2, Address(base, disp));
 3298         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3299         __ stlxr(rscratch1, src_reg, rscratch2);
 3300       }
 3301     }
 3302     __ cmpw(rscratch1, zr);
 3303   %}
 3304 
 3305   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3306     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3307     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3308                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3309                /*weak*/ false, noreg);
 3310   %}
 3311 
 3312   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3313     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3314     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3315                Assembler::word, /*acquire*/ false, /*release*/ true,
 3316                /*weak*/ false, noreg);
 3317   %}
 3318 
 3319   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3320     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3321     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3322                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3323                /*weak*/ false, noreg);
 3324   %}
 3325 
 3326   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3327     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3328     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3329                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3330                /*weak*/ false, noreg);
 3331   %}
 3332 
 3333 
 3334   // The only difference between aarch64_enc_cmpxchg and
 3335   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3336   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3337   // lock.
 3338   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3339     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3340     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3341                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3342                /*weak*/ false, noreg);
 3343   %}
 3344 
 3345   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3346     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3347     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3348                Assembler::word, /*acquire*/ true, /*release*/ true,
 3349                /*weak*/ false, noreg);
 3350   %}
 3351 
 3352   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3353     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3354     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3355                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3356                /*weak*/ false, noreg);
 3357   %}
 3358 
 3359   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3360     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3361     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3362                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3363                /*weak*/ false, noreg);
 3364   %}
 3365 
 3366   // auxiliary used for CompareAndSwapX to set result register
 3367   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3368     Register res_reg = as_Register($res$$reg);
 3369     __ cset(res_reg, Assembler::EQ);
 3370   %}
 3371 
 3372   // prefetch encodings
 3373 
 3374   enc_class aarch64_enc_prefetchw(memory mem) %{
 3375     Register base = as_Register($mem$$base);
 3376     int index = $mem$$index;
 3377     int scale = $mem$$scale;
 3378     int disp = $mem$$disp;
 3379     if (index == -1) {
 3380       // Fix up any out-of-range offsets.
 3381       assert_different_registers(rscratch1, base);
 3382       Address addr = Address(base, disp);
 3383       addr = __ legitimize_address(addr, 8, rscratch1);
 3384       __ prfm(addr, PSTL1KEEP);
 3385     } else {
 3386       Register index_reg = as_Register(index);
 3387       if (disp == 0) {
 3388         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3389       } else {
 3390         __ lea(rscratch1, Address(base, disp));
 3391 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3392       }
 3393     }
 3394   %}
 3395 
 3396   // mov encodings
 3397 
 3398   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3399     uint32_t con = (uint32_t)$src$$constant;
 3400     Register dst_reg = as_Register($dst$$reg);
 3401     if (con == 0) {
 3402       __ movw(dst_reg, zr);
 3403     } else {
 3404       __ movw(dst_reg, con);
 3405     }
 3406   %}
 3407 
 3408   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3409     Register dst_reg = as_Register($dst$$reg);
 3410     uint64_t con = (uint64_t)$src$$constant;
 3411     if (con == 0) {
 3412       __ mov(dst_reg, zr);
 3413     } else {
 3414       __ mov(dst_reg, con);
 3415     }
 3416   %}
 3417 
 3418   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3419     Register dst_reg = as_Register($dst$$reg);
 3420     address con = (address)$src$$constant;
 3421     if (con == nullptr || con == (address)1) {
 3422       ShouldNotReachHere();
 3423     } else {
 3424       relocInfo::relocType rtype = $src->constant_reloc();
 3425       if (rtype == relocInfo::oop_type) {
 3426         __ movoop(dst_reg, (jobject)con);
 3427       } else if (rtype == relocInfo::metadata_type) {
 3428         __ mov_metadata(dst_reg, (Metadata*)con);
 3429       } else {
 3430         assert(rtype == relocInfo::none, "unexpected reloc type");
 3431         if (! __ is_valid_AArch64_address(con) ||
 3432             con < (address)(uintptr_t)os::vm_page_size()) {
 3433           __ mov(dst_reg, con);
 3434         } else {
 3435           uint64_t offset;
 3436           __ adrp(dst_reg, con, offset);
 3437           __ add(dst_reg, dst_reg, offset);
 3438         }
 3439       }
 3440     }
 3441   %}
 3442 
 3443   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3444     Register dst_reg = as_Register($dst$$reg);
 3445     __ mov(dst_reg, zr);
 3446   %}
 3447 
 3448   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3449     Register dst_reg = as_Register($dst$$reg);
 3450     __ mov(dst_reg, (uint64_t)1);
 3451   %}
 3452 
 3453   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3454     __ load_byte_map_base($dst$$Register);
 3455   %}
 3456 
 3457   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3458     Register dst_reg = as_Register($dst$$reg);
 3459     address con = (address)$src$$constant;
 3460     if (con == nullptr) {
 3461       ShouldNotReachHere();
 3462     } else {
 3463       relocInfo::relocType rtype = $src->constant_reloc();
 3464       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3465       __ set_narrow_oop(dst_reg, (jobject)con);
 3466     }
 3467   %}
 3468 
 3469   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3470     Register dst_reg = as_Register($dst$$reg);
 3471     __ mov(dst_reg, zr);
 3472   %}
 3473 
 3474   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3475     Register dst_reg = as_Register($dst$$reg);
 3476     address con = (address)$src$$constant;
 3477     if (con == nullptr) {
 3478       ShouldNotReachHere();
 3479     } else {
 3480       relocInfo::relocType rtype = $src->constant_reloc();
 3481       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3482       __ set_narrow_klass(dst_reg, (Klass *)con);
 3483     }
 3484   %}
 3485 
 3486   // arithmetic encodings
 3487 
 3488   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3489     Register dst_reg = as_Register($dst$$reg);
 3490     Register src_reg = as_Register($src1$$reg);
 3491     int32_t con = (int32_t)$src2$$constant;
 3492     // add has primary == 0, subtract has primary == 1
 3493     if ($primary) { con = -con; }
 3494     if (con < 0) {
 3495       __ subw(dst_reg, src_reg, -con);
 3496     } else {
 3497       __ addw(dst_reg, src_reg, con);
 3498     }
 3499   %}
 3500 
 3501   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3502     Register dst_reg = as_Register($dst$$reg);
 3503     Register src_reg = as_Register($src1$$reg);
 3504     int32_t con = (int32_t)$src2$$constant;
 3505     // add has primary == 0, subtract has primary == 1
 3506     if ($primary) { con = -con; }
 3507     if (con < 0) {
 3508       __ sub(dst_reg, src_reg, -con);
 3509     } else {
 3510       __ add(dst_reg, src_reg, con);
 3511     }
 3512   %}
 3513 
 3514   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3515    Register dst_reg = as_Register($dst$$reg);
 3516    Register src1_reg = as_Register($src1$$reg);
 3517    Register src2_reg = as_Register($src2$$reg);
 3518     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3519   %}
 3520 
 3521   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3522    Register dst_reg = as_Register($dst$$reg);
 3523    Register src1_reg = as_Register($src1$$reg);
 3524    Register src2_reg = as_Register($src2$$reg);
 3525     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3526   %}
 3527 
 3528   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3529    Register dst_reg = as_Register($dst$$reg);
 3530    Register src1_reg = as_Register($src1$$reg);
 3531    Register src2_reg = as_Register($src2$$reg);
 3532     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3533   %}
 3534 
 3535   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3536    Register dst_reg = as_Register($dst$$reg);
 3537    Register src1_reg = as_Register($src1$$reg);
 3538    Register src2_reg = as_Register($src2$$reg);
 3539     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3540   %}
 3541 
 3542   // compare instruction encodings
 3543 
 3544   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3545     Register reg1 = as_Register($src1$$reg);
 3546     Register reg2 = as_Register($src2$$reg);
 3547     __ cmpw(reg1, reg2);
 3548   %}
 3549 
 3550   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3551     Register reg = as_Register($src1$$reg);
 3552     int32_t val = $src2$$constant;
 3553     if (val >= 0) {
 3554       __ subsw(zr, reg, val);
 3555     } else {
 3556       __ addsw(zr, reg, -val);
 3557     }
 3558   %}
 3559 
 3560   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3561     Register reg1 = as_Register($src1$$reg);
 3562     uint32_t val = (uint32_t)$src2$$constant;
 3563     __ movw(rscratch1, val);
 3564     __ cmpw(reg1, rscratch1);
 3565   %}
 3566 
 3567   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3568     Register reg1 = as_Register($src1$$reg);
 3569     Register reg2 = as_Register($src2$$reg);
 3570     __ cmp(reg1, reg2);
 3571   %}
 3572 
 3573   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3574     Register reg = as_Register($src1$$reg);
 3575     int64_t val = $src2$$constant;
 3576     if (val >= 0) {
 3577       __ subs(zr, reg, val);
 3578     } else if (val != -val) {
 3579       __ adds(zr, reg, -val);
 3580     } else {
 3581     // aargh, Long.MIN_VALUE is a special case
 3582       __ orr(rscratch1, zr, (uint64_t)val);
 3583       __ subs(zr, reg, rscratch1);
 3584     }
 3585   %}
 3586 
 3587   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3588     Register reg1 = as_Register($src1$$reg);
 3589     uint64_t val = (uint64_t)$src2$$constant;
 3590     __ mov(rscratch1, val);
 3591     __ cmp(reg1, rscratch1);
 3592   %}
 3593 
 3594   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3595     Register reg1 = as_Register($src1$$reg);
 3596     Register reg2 = as_Register($src2$$reg);
 3597     __ cmp(reg1, reg2);
 3598   %}
 3599 
 3600   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3601     Register reg1 = as_Register($src1$$reg);
 3602     Register reg2 = as_Register($src2$$reg);
 3603     __ cmpw(reg1, reg2);
 3604   %}
 3605 
 3606   enc_class aarch64_enc_testp(iRegP src) %{
 3607     Register reg = as_Register($src$$reg);
 3608     __ cmp(reg, zr);
 3609   %}
 3610 
 3611   enc_class aarch64_enc_testn(iRegN src) %{
 3612     Register reg = as_Register($src$$reg);
 3613     __ cmpw(reg, zr);
 3614   %}
 3615 
 3616   enc_class aarch64_enc_b(label lbl) %{
 3617     Label *L = $lbl$$label;
 3618     __ b(*L);
 3619   %}
 3620 
 3621   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3622     Label *L = $lbl$$label;
 3623     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3624   %}
 3625 
 3626   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3627     Label *L = $lbl$$label;
 3628     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3629   %}
 3630 
 3631   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3632   %{
 3633      Register sub_reg = as_Register($sub$$reg);
 3634      Register super_reg = as_Register($super$$reg);
 3635      Register temp_reg = as_Register($temp$$reg);
 3636      Register result_reg = as_Register($result$$reg);
 3637 
 3638      Label miss;
 3639      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3640                                      nullptr, &miss,
 3641                                      /*set_cond_codes:*/ true);
 3642      if ($primary) {
 3643        __ mov(result_reg, zr);
 3644      }
 3645      __ bind(miss);
 3646   %}
 3647 
 3648   enc_class aarch64_enc_java_static_call(method meth) %{
 3649     address addr = (address)$meth$$method;
 3650     address call;
 3651     if (!_method) {
 3652       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3653       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3654       if (call == nullptr) {
 3655         ciEnv::current()->record_failure("CodeCache is full");
 3656         return;
 3657       }
 3658     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3659       // The NOP here is purely to ensure that eliding a call to
 3660       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3661       __ nop();
 3662       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3663     } else {
 3664       int method_index = resolved_method_index(masm);
 3665       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3666                                                   : static_call_Relocation::spec(method_index);
 3667       call = __ trampoline_call(Address(addr, rspec));
 3668       if (call == nullptr) {
 3669         ciEnv::current()->record_failure("CodeCache is full");
 3670         return;
 3671       }
 3672       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3673         // Calls of the same statically bound method can share
 3674         // a stub to the interpreter.
 3675         __ code()->shared_stub_to_interp_for(_method, call - __ begin());
 3676       } else {
 3677         // Emit stub for static call
 3678         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 3679         if (stub == nullptr) {
 3680           ciEnv::current()->record_failure("CodeCache is full");
 3681           return;
 3682         }
 3683       }
 3684     }
 3685 
 3686     __ post_call_nop();
 3687 
 3688     // Only non uncommon_trap calls need to reinitialize ptrue.
 3689     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3690       __ reinitialize_ptrue();
 3691     }
 3692   %}
 3693 
 3694   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3695     int method_index = resolved_method_index(masm);
 3696     address call = __ ic_call((address)$meth$$method, method_index);
 3697     if (call == nullptr) {
 3698       ciEnv::current()->record_failure("CodeCache is full");
 3699       return;
 3700     }
 3701     __ post_call_nop();
 3702     if (Compile::current()->max_vector_size() > 0) {
 3703       __ reinitialize_ptrue();
 3704     }
 3705   %}
 3706 
 3707   enc_class aarch64_enc_call_epilog() %{
 3708     if (VerifyStackAtCalls) {
 3709       // Check that stack depth is unchanged: find majik cookie on stack
 3710       __ call_Unimplemented();
 3711     }
 3712   %}
 3713 
 3714   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3715     // some calls to generated routines (arraycopy code) are scheduled
 3716     // by C2 as runtime calls. if so we can call them using a br (they
 3717     // will be in a reachable segment) otherwise we have to use a blr
 3718     // which loads the absolute address into a register.
 3719     address entry = (address)$meth$$method;
 3720     CodeBlob *cb = CodeCache::find_blob(entry);
 3721     if (cb) {
 3722       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3723       if (call == nullptr) {
 3724         ciEnv::current()->record_failure("CodeCache is full");
 3725         return;
 3726       }
 3727       __ post_call_nop();
 3728     } else {
 3729       Label retaddr;
 3730       // Make the anchor frame walkable
 3731       __ adr(rscratch2, retaddr);
 3732       __ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 3733       __ lea(rscratch1, RuntimeAddress(entry));
 3734       __ blr(rscratch1);
 3735       __ bind(retaddr);
 3736       __ post_call_nop();
 3737     }
 3738     if (Compile::current()->max_vector_size() > 0) {
 3739       __ reinitialize_ptrue();
 3740     }
 3741   %}
 3742 
 3743   enc_class aarch64_enc_rethrow() %{
 3744     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3745   %}
 3746 
 3747   enc_class aarch64_enc_ret() %{
 3748 #ifdef ASSERT
 3749     if (Compile::current()->max_vector_size() > 0) {
 3750       __ verify_ptrue();
 3751     }
 3752 #endif
 3753     __ ret(lr);
 3754   %}
 3755 
 3756   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3757     Register target_reg = as_Register($jump_target$$reg);
 3758     __ br(target_reg);
 3759   %}
 3760 
 3761   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3762     Register target_reg = as_Register($jump_target$$reg);
 3763     // exception oop should be in r0
 3764     // ret addr has been popped into lr
 3765     // callee expects it in r3
 3766     __ mov(r3, lr);
 3767     __ br(target_reg);
 3768   %}
 3769 
 3770 %}
 3771 
 3772 //----------FRAME--------------------------------------------------------------
 3773 // Definition of frame structure and management information.
 3774 //
 3775 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3776 //                             |   (to get allocators register number
 3777 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3778 //  r   CALLER     |        |
 3779 //  o     |        +--------+      pad to even-align allocators stack-slot
 3780 //  w     V        |  pad0  |        numbers; owned by CALLER
 3781 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3782 //  h     ^        |   in   |  5
 3783 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3784 //  |     |        |        |  3
 3785 //  |     |        +--------+
 3786 //  V     |        | old out|      Empty on Intel, window on Sparc
 3787 //        |    old |preserve|      Must be even aligned.
 3788 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3789 //        |        |   in   |  3   area for Intel ret address
 3790 //     Owned by    |preserve|      Empty on Sparc.
 3791 //       SELF      +--------+
 3792 //        |        |  pad2  |  2   pad to align old SP
 3793 //        |        +--------+  1
 3794 //        |        | locks  |  0
 3795 //        |        +--------+----> OptoReg::stack0(), even aligned
 3796 //        |        |  pad1  | 11   pad to align new SP
 3797 //        |        +--------+
 3798 //        |        |        | 10
 3799 //        |        | spills |  9   spills
 3800 //        V        |        |  8   (pad0 slot for callee)
 3801 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3802 //        ^        |  out   |  7
 3803 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3804 //     Owned by    +--------+
 3805 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3806 //        |    new |preserve|      Must be even-aligned.
 3807 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3808 //        |        |        |
 3809 //
 3810 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3811 //         known from SELF's arguments and the Java calling convention.
 3812 //         Region 6-7 is determined per call site.
 3813 // Note 2: If the calling convention leaves holes in the incoming argument
 3814 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3815 //         are owned by the CALLEE.  Holes should not be necessary in the
 3816 //         incoming area, as the Java calling convention is completely under
 3817 //         the control of the AD file.  Doubles can be sorted and packed to
 3818 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3819 //         varargs C calling conventions.
 3820 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3821 //         even aligned with pad0 as needed.
 3822 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3823 //           (the latter is true on Intel but is it false on AArch64?)
 3824 //         region 6-11 is even aligned; it may be padded out more so that
 3825 //         the region from SP to FP meets the minimum stack alignment.
 3826 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3827 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3828 //         SP meets the minimum alignment.
 3829 
 3830 frame %{
 3831   // These three registers define part of the calling convention
 3832   // between compiled code and the interpreter.
 3833 
 3834   // Inline Cache Register or Method for I2C.
 3835   inline_cache_reg(R12);
 3836 
 3837   // Number of stack slots consumed by locking an object
 3838   sync_stack_slots(2);
 3839 
 3840   // Compiled code's Frame Pointer
 3841   frame_pointer(R31);
 3842 
 3843   // Interpreter stores its frame pointer in a register which is
 3844   // stored to the stack by I2CAdaptors.
 3845   // I2CAdaptors convert from interpreted java to compiled java.
 3846   interpreter_frame_pointer(R29);
 3847 
 3848   // Stack alignment requirement
 3849   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3850 
 3851   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3852   // for calls to C.  Supports the var-args backing area for register parms.
 3853   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3854 
 3855   // The after-PROLOG location of the return address.  Location of
 3856   // return address specifies a type (REG or STACK) and a number
 3857   // representing the register number (i.e. - use a register name) or
 3858   // stack slot.
 3859   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3860   // Otherwise, it is above the locks and verification slot and alignment word
 3861   // TODO this may well be correct but need to check why that - 2 is there
 3862   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3863   // which folds in the space used for monitors
 3864   return_addr(STACK - 2 +
 3865               align_up((Compile::current()->in_preserve_stack_slots() +
 3866                         Compile::current()->fixed_slots()),
 3867                        stack_alignment_in_slots()));
 3868 
 3869   // Location of compiled Java return values.  Same as C for now.
 3870   return_value
 3871   %{
 3872     // TODO do we allow ideal_reg == Op_RegN???
 3873     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3874            "only return normal values");
 3875 
 3876     static const int lo[Op_RegL + 1] = { // enum name
 3877       0,                                 // Op_Node
 3878       0,                                 // Op_Set
 3879       R0_num,                            // Op_RegN
 3880       R0_num,                            // Op_RegI
 3881       R0_num,                            // Op_RegP
 3882       V0_num,                            // Op_RegF
 3883       V0_num,                            // Op_RegD
 3884       R0_num                             // Op_RegL
 3885     };
 3886 
 3887     static const int hi[Op_RegL + 1] = { // enum name
 3888       0,                                 // Op_Node
 3889       0,                                 // Op_Set
 3890       OptoReg::Bad,                      // Op_RegN
 3891       OptoReg::Bad,                      // Op_RegI
 3892       R0_H_num,                          // Op_RegP
 3893       OptoReg::Bad,                      // Op_RegF
 3894       V0_H_num,                          // Op_RegD
 3895       R0_H_num                           // Op_RegL
 3896     };
 3897 
 3898     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3899   %}
 3900 %}
 3901 
 3902 //----------ATTRIBUTES---------------------------------------------------------
 3903 //----------Operand Attributes-------------------------------------------------
 3904 op_attrib op_cost(1);        // Required cost attribute
 3905 
 3906 //----------Instruction Attributes---------------------------------------------
 3907 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3908 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3909 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3910                                 // a non-matching short branch variant
 3911                                 // of some long branch?
 3912 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3913                                 // be a power of 2) specifies the
 3914                                 // alignment that some part of the
 3915                                 // instruction (not necessarily the
 3916                                 // start) requires.  If > 1, a
 3917                                 // compute_padding() function must be
 3918                                 // provided for the instruction
 3919 
 3920 // Whether this node is expanded during code emission into a sequence of
 3921 // instructions and the first instruction can perform an implicit null check.
 3922 ins_attrib ins_is_late_expanded_null_check_candidate(false);
 3923 
 3924 //----------OPERANDS-----------------------------------------------------------
 3925 // Operand definitions must precede instruction definitions for correct parsing
 3926 // in the ADLC because operands constitute user defined types which are used in
 3927 // instruction definitions.
 3928 
 3929 //----------Simple Operands----------------------------------------------------
 3930 
 3931 // Integer operands 32 bit
 3932 // 32 bit immediate
 3933 operand immI()
 3934 %{
 3935   match(ConI);
 3936 
 3937   op_cost(0);
 3938   format %{ %}
 3939   interface(CONST_INTER);
 3940 %}
 3941 
 3942 // 32 bit zero
 3943 operand immI0()
 3944 %{
 3945   predicate(n->get_int() == 0);
 3946   match(ConI);
 3947 
 3948   op_cost(0);
 3949   format %{ %}
 3950   interface(CONST_INTER);
 3951 %}
 3952 
 3953 // 32 bit unit increment
 3954 operand immI_1()
 3955 %{
 3956   predicate(n->get_int() == 1);
 3957   match(ConI);
 3958 
 3959   op_cost(0);
 3960   format %{ %}
 3961   interface(CONST_INTER);
 3962 %}
 3963 
 3964 // 32 bit unit decrement
 3965 operand immI_M1()
 3966 %{
 3967   predicate(n->get_int() == -1);
 3968   match(ConI);
 3969 
 3970   op_cost(0);
 3971   format %{ %}
 3972   interface(CONST_INTER);
 3973 %}
 3974 
 3975 // Shift values for add/sub extension shift
 3976 operand immIExt()
 3977 %{
 3978   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 3979   match(ConI);
 3980 
 3981   op_cost(0);
 3982   format %{ %}
 3983   interface(CONST_INTER);
 3984 %}
 3985 
 3986 operand immI_gt_1()
 3987 %{
 3988   predicate(n->get_int() > 1);
 3989   match(ConI);
 3990 
 3991   op_cost(0);
 3992   format %{ %}
 3993   interface(CONST_INTER);
 3994 %}
 3995 
 3996 operand immI_le_4()
 3997 %{
 3998   predicate(n->get_int() <= 4);
 3999   match(ConI);
 4000 
 4001   op_cost(0);
 4002   format %{ %}
 4003   interface(CONST_INTER);
 4004 %}
 4005 
 4006 operand immI_16()
 4007 %{
 4008   predicate(n->get_int() == 16);
 4009   match(ConI);
 4010 
 4011   op_cost(0);
 4012   format %{ %}
 4013   interface(CONST_INTER);
 4014 %}
 4015 
 4016 operand immI_24()
 4017 %{
 4018   predicate(n->get_int() == 24);
 4019   match(ConI);
 4020 
 4021   op_cost(0);
 4022   format %{ %}
 4023   interface(CONST_INTER);
 4024 %}
 4025 
 4026 operand immI_32()
 4027 %{
 4028   predicate(n->get_int() == 32);
 4029   match(ConI);
 4030 
 4031   op_cost(0);
 4032   format %{ %}
 4033   interface(CONST_INTER);
 4034 %}
 4035 
 4036 operand immI_48()
 4037 %{
 4038   predicate(n->get_int() == 48);
 4039   match(ConI);
 4040 
 4041   op_cost(0);
 4042   format %{ %}
 4043   interface(CONST_INTER);
 4044 %}
 4045 
 4046 operand immI_56()
 4047 %{
 4048   predicate(n->get_int() == 56);
 4049   match(ConI);
 4050 
 4051   op_cost(0);
 4052   format %{ %}
 4053   interface(CONST_INTER);
 4054 %}
 4055 
 4056 operand immI_255()
 4057 %{
 4058   predicate(n->get_int() == 255);
 4059   match(ConI);
 4060 
 4061   op_cost(0);
 4062   format %{ %}
 4063   interface(CONST_INTER);
 4064 %}
 4065 
 4066 operand immI_65535()
 4067 %{
 4068   predicate(n->get_int() == 65535);
 4069   match(ConI);
 4070 
 4071   op_cost(0);
 4072   format %{ %}
 4073   interface(CONST_INTER);
 4074 %}
 4075 
 4076 operand immI_positive()
 4077 %{
 4078   predicate(n->get_int() > 0);
 4079   match(ConI);
 4080 
 4081   op_cost(0);
 4082   format %{ %}
 4083   interface(CONST_INTER);
 4084 %}
 4085 
 4086 // BoolTest condition for signed compare
 4087 operand immI_cmp_cond()
 4088 %{
 4089   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4090   match(ConI);
 4091 
 4092   op_cost(0);
 4093   format %{ %}
 4094   interface(CONST_INTER);
 4095 %}
 4096 
 4097 // BoolTest condition for unsigned compare
 4098 operand immI_cmpU_cond()
 4099 %{
 4100   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4101   match(ConI);
 4102 
 4103   op_cost(0);
 4104   format %{ %}
 4105   interface(CONST_INTER);
 4106 %}
 4107 
 4108 operand immL_255()
 4109 %{
 4110   predicate(n->get_long() == 255L);
 4111   match(ConL);
 4112 
 4113   op_cost(0);
 4114   format %{ %}
 4115   interface(CONST_INTER);
 4116 %}
 4117 
 4118 operand immL_65535()
 4119 %{
 4120   predicate(n->get_long() == 65535L);
 4121   match(ConL);
 4122 
 4123   op_cost(0);
 4124   format %{ %}
 4125   interface(CONST_INTER);
 4126 %}
 4127 
 4128 operand immL_4294967295()
 4129 %{
 4130   predicate(n->get_long() == 4294967295L);
 4131   match(ConL);
 4132 
 4133   op_cost(0);
 4134   format %{ %}
 4135   interface(CONST_INTER);
 4136 %}
 4137 
 4138 operand immL_bitmask()
 4139 %{
 4140   predicate((n->get_long() != 0)
 4141             && ((n->get_long() & 0xc000000000000000l) == 0)
 4142             && is_power_of_2(n->get_long() + 1));
 4143   match(ConL);
 4144 
 4145   op_cost(0);
 4146   format %{ %}
 4147   interface(CONST_INTER);
 4148 %}
 4149 
 4150 operand immI_bitmask()
 4151 %{
 4152   predicate((n->get_int() != 0)
 4153             && ((n->get_int() & 0xc0000000) == 0)
 4154             && is_power_of_2(n->get_int() + 1));
 4155   match(ConI);
 4156 
 4157   op_cost(0);
 4158   format %{ %}
 4159   interface(CONST_INTER);
 4160 %}
 4161 
 4162 operand immL_positive_bitmaskI()
 4163 %{
 4164   predicate((n->get_long() != 0)
 4165             && ((julong)n->get_long() < 0x80000000ULL)
 4166             && is_power_of_2(n->get_long() + 1));
 4167   match(ConL);
 4168 
 4169   op_cost(0);
 4170   format %{ %}
 4171   interface(CONST_INTER);
 4172 %}
 4173 
 4174 // Scale values for scaled offset addressing modes (up to long but not quad)
 4175 operand immIScale()
 4176 %{
 4177   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4178   match(ConI);
 4179 
 4180   op_cost(0);
 4181   format %{ %}
 4182   interface(CONST_INTER);
 4183 %}
 4184 
 4185 // 5 bit signed integer
 4186 operand immI5()
 4187 %{
 4188   predicate(Assembler::is_simm(n->get_int(), 5));
 4189   match(ConI);
 4190 
 4191   op_cost(0);
 4192   format %{ %}
 4193   interface(CONST_INTER);
 4194 %}
 4195 
 4196 // 7 bit unsigned integer
 4197 operand immIU7()
 4198 %{
 4199   predicate(Assembler::is_uimm(n->get_int(), 7));
 4200   match(ConI);
 4201 
 4202   op_cost(0);
 4203   format %{ %}
 4204   interface(CONST_INTER);
 4205 %}
 4206 
 4207 // Offset for scaled or unscaled immediate loads and stores
 4208 operand immIOffset()
 4209 %{
 4210   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4211   match(ConI);
 4212 
 4213   op_cost(0);
 4214   format %{ %}
 4215   interface(CONST_INTER);
 4216 %}
 4217 
 4218 operand immIOffset1()
 4219 %{
 4220   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4221   match(ConI);
 4222 
 4223   op_cost(0);
 4224   format %{ %}
 4225   interface(CONST_INTER);
 4226 %}
 4227 
 4228 operand immIOffset2()
 4229 %{
 4230   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4231   match(ConI);
 4232 
 4233   op_cost(0);
 4234   format %{ %}
 4235   interface(CONST_INTER);
 4236 %}
 4237 
 4238 operand immIOffset4()
 4239 %{
 4240   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4241   match(ConI);
 4242 
 4243   op_cost(0);
 4244   format %{ %}
 4245   interface(CONST_INTER);
 4246 %}
 4247 
 4248 operand immIOffset8()
 4249 %{
 4250   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4251   match(ConI);
 4252 
 4253   op_cost(0);
 4254   format %{ %}
 4255   interface(CONST_INTER);
 4256 %}
 4257 
 4258 operand immIOffset16()
 4259 %{
 4260   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4261   match(ConI);
 4262 
 4263   op_cost(0);
 4264   format %{ %}
 4265   interface(CONST_INTER);
 4266 %}
 4267 
 4268 operand immLOffset()
 4269 %{
 4270   predicate(n->get_long() >= -256 && n->get_long() <= 65520);
 4271   match(ConL);
 4272 
 4273   op_cost(0);
 4274   format %{ %}
 4275   interface(CONST_INTER);
 4276 %}
 4277 
 4278 operand immLoffset1()
 4279 %{
 4280   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4281   match(ConL);
 4282 
 4283   op_cost(0);
 4284   format %{ %}
 4285   interface(CONST_INTER);
 4286 %}
 4287 
 4288 operand immLoffset2()
 4289 %{
 4290   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4291   match(ConL);
 4292 
 4293   op_cost(0);
 4294   format %{ %}
 4295   interface(CONST_INTER);
 4296 %}
 4297 
 4298 operand immLoffset4()
 4299 %{
 4300   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4301   match(ConL);
 4302 
 4303   op_cost(0);
 4304   format %{ %}
 4305   interface(CONST_INTER);
 4306 %}
 4307 
 4308 operand immLoffset8()
 4309 %{
 4310   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4311   match(ConL);
 4312 
 4313   op_cost(0);
 4314   format %{ %}
 4315   interface(CONST_INTER);
 4316 %}
 4317 
 4318 operand immLoffset16()
 4319 %{
 4320   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4321   match(ConL);
 4322 
 4323   op_cost(0);
 4324   format %{ %}
 4325   interface(CONST_INTER);
 4326 %}
 4327 
 4328 // 5 bit signed long integer
 4329 operand immL5()
 4330 %{
 4331   predicate(Assembler::is_simm(n->get_long(), 5));
 4332   match(ConL);
 4333 
 4334   op_cost(0);
 4335   format %{ %}
 4336   interface(CONST_INTER);
 4337 %}
 4338 
 4339 // 7 bit unsigned long integer
 4340 operand immLU7()
 4341 %{
 4342   predicate(Assembler::is_uimm(n->get_long(), 7));
 4343   match(ConL);
 4344 
 4345   op_cost(0);
 4346   format %{ %}
 4347   interface(CONST_INTER);
 4348 %}
 4349 
 4350 // 8 bit signed value.
 4351 operand immI8()
 4352 %{
 4353   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4354   match(ConI);
 4355 
 4356   op_cost(0);
 4357   format %{ %}
 4358   interface(CONST_INTER);
 4359 %}
 4360 
 4361 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4362 operand immI8_shift8()
 4363 %{
 4364   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4365             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4366   match(ConI);
 4367 
 4368   op_cost(0);
 4369   format %{ %}
 4370   interface(CONST_INTER);
 4371 %}
 4372 
 4373 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4374 operand immL8_shift8()
 4375 %{
 4376   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4377             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4378   match(ConL);
 4379 
 4380   op_cost(0);
 4381   format %{ %}
 4382   interface(CONST_INTER);
 4383 %}
 4384 
 4385 // 8 bit integer valid for vector add sub immediate
 4386 operand immBAddSubV()
 4387 %{
 4388   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4389   match(ConI);
 4390 
 4391   op_cost(0);
 4392   format %{ %}
 4393   interface(CONST_INTER);
 4394 %}
 4395 
 4396 // 32 bit integer valid for add sub immediate
 4397 operand immIAddSub()
 4398 %{
 4399   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4400   match(ConI);
 4401   op_cost(0);
 4402   format %{ %}
 4403   interface(CONST_INTER);
 4404 %}
 4405 
 4406 // 32 bit integer valid for vector add sub immediate
 4407 operand immIAddSubV()
 4408 %{
 4409   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4410   match(ConI);
 4411 
 4412   op_cost(0);
 4413   format %{ %}
 4414   interface(CONST_INTER);
 4415 %}
 4416 
 4417 // 32 bit unsigned integer valid for logical immediate
 4418 
 4419 operand immBLog()
 4420 %{
 4421   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4422   match(ConI);
 4423 
 4424   op_cost(0);
 4425   format %{ %}
 4426   interface(CONST_INTER);
 4427 %}
 4428 
 4429 operand immSLog()
 4430 %{
 4431   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4432   match(ConI);
 4433 
 4434   op_cost(0);
 4435   format %{ %}
 4436   interface(CONST_INTER);
 4437 %}
 4438 
 4439 operand immILog()
 4440 %{
 4441   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4442   match(ConI);
 4443 
 4444   op_cost(0);
 4445   format %{ %}
 4446   interface(CONST_INTER);
 4447 %}
 4448 
 4449 // Integer operands 64 bit
 4450 // 64 bit immediate
 4451 operand immL()
 4452 %{
 4453   match(ConL);
 4454 
 4455   op_cost(0);
 4456   format %{ %}
 4457   interface(CONST_INTER);
 4458 %}
 4459 
 4460 // 64 bit zero
 4461 operand immL0()
 4462 %{
 4463   predicate(n->get_long() == 0);
 4464   match(ConL);
 4465 
 4466   op_cost(0);
 4467   format %{ %}
 4468   interface(CONST_INTER);
 4469 %}
 4470 
 4471 // 64 bit unit decrement
 4472 operand immL_M1()
 4473 %{
 4474   predicate(n->get_long() == -1);
 4475   match(ConL);
 4476 
 4477   op_cost(0);
 4478   format %{ %}
 4479   interface(CONST_INTER);
 4480 %}
 4481 
 4482 // 64 bit integer valid for add sub immediate
 4483 operand immLAddSub()
 4484 %{
 4485   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4486   match(ConL);
 4487   op_cost(0);
 4488   format %{ %}
 4489   interface(CONST_INTER);
 4490 %}
 4491 
 4492 // 64 bit integer valid for addv subv immediate
 4493 operand immLAddSubV()
 4494 %{
 4495   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4496   match(ConL);
 4497 
 4498   op_cost(0);
 4499   format %{ %}
 4500   interface(CONST_INTER);
 4501 %}
 4502 
 4503 // 64 bit integer valid for logical immediate
 4504 operand immLLog()
 4505 %{
 4506   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4507   match(ConL);
 4508   op_cost(0);
 4509   format %{ %}
 4510   interface(CONST_INTER);
 4511 %}
 4512 
 4513 // Long Immediate: low 32-bit mask
 4514 operand immL_32bits()
 4515 %{
 4516   predicate(n->get_long() == 0xFFFFFFFFL);
 4517   match(ConL);
 4518   op_cost(0);
 4519   format %{ %}
 4520   interface(CONST_INTER);
 4521 %}
 4522 
 4523 // Pointer operands
 4524 // Pointer Immediate
 4525 operand immP()
 4526 %{
 4527   match(ConP);
 4528 
 4529   op_cost(0);
 4530   format %{ %}
 4531   interface(CONST_INTER);
 4532 %}
 4533 
 4534 // nullptr Pointer Immediate
 4535 operand immP0()
 4536 %{
 4537   predicate(n->get_ptr() == 0);
 4538   match(ConP);
 4539 
 4540   op_cost(0);
 4541   format %{ %}
 4542   interface(CONST_INTER);
 4543 %}
 4544 
 4545 // Pointer Immediate One
 4546 // this is used in object initialization (initial object header)
 4547 operand immP_1()
 4548 %{
 4549   predicate(n->get_ptr() == 1);
 4550   match(ConP);
 4551 
 4552   op_cost(0);
 4553   format %{ %}
 4554   interface(CONST_INTER);
 4555 %}
 4556 
 4557 // Card Table Byte Map Base
 4558 operand immByteMapBase()
 4559 %{
 4560   // Get base of card map
 4561   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4562             SHENANDOAHGC_ONLY(!BarrierSet::barrier_set()->is_a(BarrierSet::ShenandoahBarrierSet) &&)
 4563             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4564   match(ConP);
 4565 
 4566   op_cost(0);
 4567   format %{ %}
 4568   interface(CONST_INTER);
 4569 %}
 4570 
 4571 // Float and Double operands
 4572 // Double Immediate
 4573 operand immD()
 4574 %{
 4575   match(ConD);
 4576   op_cost(0);
 4577   format %{ %}
 4578   interface(CONST_INTER);
 4579 %}
 4580 
 4581 // Double Immediate: +0.0d
 4582 operand immD0()
 4583 %{
 4584   predicate(jlong_cast(n->getd()) == 0);
 4585   match(ConD);
 4586 
 4587   op_cost(0);
 4588   format %{ %}
 4589   interface(CONST_INTER);
 4590 %}
 4591 
 4592 // constant 'double +0.0'.
 4593 operand immDPacked()
 4594 %{
 4595   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4596   match(ConD);
 4597   op_cost(0);
 4598   format %{ %}
 4599   interface(CONST_INTER);
 4600 %}
 4601 
 4602 // Float Immediate
 4603 operand immF()
 4604 %{
 4605   match(ConF);
 4606   op_cost(0);
 4607   format %{ %}
 4608   interface(CONST_INTER);
 4609 %}
 4610 
 4611 // Float Immediate: +0.0f.
 4612 operand immF0()
 4613 %{
 4614   predicate(jint_cast(n->getf()) == 0);
 4615   match(ConF);
 4616 
 4617   op_cost(0);
 4618   format %{ %}
 4619   interface(CONST_INTER);
 4620 %}
 4621 
 4622 // Half Float (FP16) Immediate
 4623 operand immH()
 4624 %{
 4625   match(ConH);
 4626   op_cost(0);
 4627   format %{ %}
 4628   interface(CONST_INTER);
 4629 %}
 4630 
 4631 //
 4632 operand immFPacked()
 4633 %{
 4634   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4635   match(ConF);
 4636   op_cost(0);
 4637   format %{ %}
 4638   interface(CONST_INTER);
 4639 %}
 4640 
 4641 // Narrow pointer operands
 4642 // Narrow Pointer Immediate
 4643 operand immN()
 4644 %{
 4645   match(ConN);
 4646 
 4647   op_cost(0);
 4648   format %{ %}
 4649   interface(CONST_INTER);
 4650 %}
 4651 
 4652 // Narrow nullptr Pointer Immediate
 4653 operand immN0()
 4654 %{
 4655   predicate(n->get_narrowcon() == 0);
 4656   match(ConN);
 4657 
 4658   op_cost(0);
 4659   format %{ %}
 4660   interface(CONST_INTER);
 4661 %}
 4662 
 4663 operand immNKlass()
 4664 %{
 4665   match(ConNKlass);
 4666 
 4667   op_cost(0);
 4668   format %{ %}
 4669   interface(CONST_INTER);
 4670 %}
 4671 
 4672 // Integer 32 bit Register Operands
 4673 // Integer 32 bitRegister (excludes SP)
 4674 operand iRegI()
 4675 %{
 4676   constraint(ALLOC_IN_RC(any_reg32));
 4677   match(RegI);
 4678   match(iRegINoSp);
 4679   op_cost(0);
 4680   format %{ %}
 4681   interface(REG_INTER);
 4682 %}
 4683 
 4684 // Integer 32 bit Register not Special
 4685 operand iRegINoSp()
 4686 %{
 4687   constraint(ALLOC_IN_RC(no_special_reg32));
 4688   match(RegI);
 4689   op_cost(0);
 4690   format %{ %}
 4691   interface(REG_INTER);
 4692 %}
 4693 
 4694 // Integer 64 bit Register Operands
 4695 // Integer 64 bit Register (includes SP)
 4696 operand iRegL()
 4697 %{
 4698   constraint(ALLOC_IN_RC(any_reg));
 4699   match(RegL);
 4700   match(iRegLNoSp);
 4701   op_cost(0);
 4702   format %{ %}
 4703   interface(REG_INTER);
 4704 %}
 4705 
 4706 // Integer 64 bit Register not Special
 4707 operand iRegLNoSp()
 4708 %{
 4709   constraint(ALLOC_IN_RC(no_special_reg));
 4710   match(RegL);
 4711   match(iRegL_R0);
 4712   format %{ %}
 4713   interface(REG_INTER);
 4714 %}
 4715 
 4716 // Pointer Register Operands
 4717 // Pointer Register
 4718 operand iRegP()
 4719 %{
 4720   constraint(ALLOC_IN_RC(ptr_reg));
 4721   match(RegP);
 4722   match(iRegPNoSp);
 4723   match(iRegP_R0);
 4724   //match(iRegP_R2);
 4725   //match(iRegP_R4);
 4726   match(iRegP_R5);
 4727   match(thread_RegP);
 4728   op_cost(0);
 4729   format %{ %}
 4730   interface(REG_INTER);
 4731 %}
 4732 
 4733 // Pointer 64 bit Register not Special
 4734 operand iRegPNoSp()
 4735 %{
 4736   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4737   match(RegP);
 4738   // match(iRegP);
 4739   // match(iRegP_R0);
 4740   // match(iRegP_R2);
 4741   // match(iRegP_R4);
 4742   // match(iRegP_R5);
 4743   // match(thread_RegP);
 4744   op_cost(0);
 4745   format %{ %}
 4746   interface(REG_INTER);
 4747 %}
 4748 
 4749 // This operand is not allowed to use rfp even if
 4750 // rfp is not used to hold the frame pointer.
 4751 operand iRegPNoSpNoRfp()
 4752 %{
 4753   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4754   match(RegP);
 4755   match(iRegPNoSp);
 4756   op_cost(0);
 4757   format %{ %}
 4758   interface(REG_INTER);
 4759 %}
 4760 
 4761 // Pointer 64 bit Register R0 only
 4762 operand iRegP_R0()
 4763 %{
 4764   constraint(ALLOC_IN_RC(r0_reg));
 4765   match(RegP);
 4766   // match(iRegP);
 4767   match(iRegPNoSp);
 4768   op_cost(0);
 4769   format %{ %}
 4770   interface(REG_INTER);
 4771 %}
 4772 
 4773 // Pointer 64 bit Register R1 only
 4774 operand iRegP_R1()
 4775 %{
 4776   constraint(ALLOC_IN_RC(r1_reg));
 4777   match(RegP);
 4778   // match(iRegP);
 4779   match(iRegPNoSp);
 4780   op_cost(0);
 4781   format %{ %}
 4782   interface(REG_INTER);
 4783 %}
 4784 
 4785 // Pointer 64 bit Register R2 only
 4786 operand iRegP_R2()
 4787 %{
 4788   constraint(ALLOC_IN_RC(r2_reg));
 4789   match(RegP);
 4790   // match(iRegP);
 4791   match(iRegPNoSp);
 4792   op_cost(0);
 4793   format %{ %}
 4794   interface(REG_INTER);
 4795 %}
 4796 
 4797 // Pointer 64 bit Register R3 only
 4798 operand iRegP_R3()
 4799 %{
 4800   constraint(ALLOC_IN_RC(r3_reg));
 4801   match(RegP);
 4802   // match(iRegP);
 4803   match(iRegPNoSp);
 4804   op_cost(0);
 4805   format %{ %}
 4806   interface(REG_INTER);
 4807 %}
 4808 
 4809 // Pointer 64 bit Register R4 only
 4810 operand iRegP_R4()
 4811 %{
 4812   constraint(ALLOC_IN_RC(r4_reg));
 4813   match(RegP);
 4814   // match(iRegP);
 4815   match(iRegPNoSp);
 4816   op_cost(0);
 4817   format %{ %}
 4818   interface(REG_INTER);
 4819 %}
 4820 
 4821 // Pointer 64 bit Register R5 only
 4822 operand iRegP_R5()
 4823 %{
 4824   constraint(ALLOC_IN_RC(r5_reg));
 4825   match(RegP);
 4826   // match(iRegP);
 4827   match(iRegPNoSp);
 4828   op_cost(0);
 4829   format %{ %}
 4830   interface(REG_INTER);
 4831 %}
 4832 
 4833 // Pointer 64 bit Register R10 only
 4834 operand iRegP_R10()
 4835 %{
 4836   constraint(ALLOC_IN_RC(r10_reg));
 4837   match(RegP);
 4838   // match(iRegP);
 4839   match(iRegPNoSp);
 4840   op_cost(0);
 4841   format %{ %}
 4842   interface(REG_INTER);
 4843 %}
 4844 
 4845 // Long 64 bit Register R0 only
 4846 operand iRegL_R0()
 4847 %{
 4848   constraint(ALLOC_IN_RC(r0_reg));
 4849   match(RegL);
 4850   match(iRegLNoSp);
 4851   op_cost(0);
 4852   format %{ %}
 4853   interface(REG_INTER);
 4854 %}
 4855 
 4856 // Long 64 bit Register R11 only
 4857 operand iRegL_R11()
 4858 %{
 4859   constraint(ALLOC_IN_RC(r11_reg));
 4860   match(RegL);
 4861   match(iRegLNoSp);
 4862   op_cost(0);
 4863   format %{ %}
 4864   interface(REG_INTER);
 4865 %}
 4866 
 4867 // Register R0 only
 4868 operand iRegI_R0()
 4869 %{
 4870   constraint(ALLOC_IN_RC(int_r0_reg));
 4871   match(RegI);
 4872   match(iRegINoSp);
 4873   op_cost(0);
 4874   format %{ %}
 4875   interface(REG_INTER);
 4876 %}
 4877 
 4878 // Register R2 only
 4879 operand iRegI_R2()
 4880 %{
 4881   constraint(ALLOC_IN_RC(int_r2_reg));
 4882   match(RegI);
 4883   match(iRegINoSp);
 4884   op_cost(0);
 4885   format %{ %}
 4886   interface(REG_INTER);
 4887 %}
 4888 
 4889 // Register R3 only
 4890 operand iRegI_R3()
 4891 %{
 4892   constraint(ALLOC_IN_RC(int_r3_reg));
 4893   match(RegI);
 4894   match(iRegINoSp);
 4895   op_cost(0);
 4896   format %{ %}
 4897   interface(REG_INTER);
 4898 %}
 4899 
 4900 
 4901 // Register R4 only
 4902 operand iRegI_R4()
 4903 %{
 4904   constraint(ALLOC_IN_RC(int_r4_reg));
 4905   match(RegI);
 4906   match(iRegINoSp);
 4907   op_cost(0);
 4908   format %{ %}
 4909   interface(REG_INTER);
 4910 %}
 4911 
 4912 
 4913 // Pointer Register Operands
 4914 // Narrow Pointer Register
 4915 operand iRegN()
 4916 %{
 4917   constraint(ALLOC_IN_RC(any_reg32));
 4918   match(RegN);
 4919   match(iRegNNoSp);
 4920   op_cost(0);
 4921   format %{ %}
 4922   interface(REG_INTER);
 4923 %}
 4924 
 4925 // Integer 64 bit Register not Special
 4926 operand iRegNNoSp()
 4927 %{
 4928   constraint(ALLOC_IN_RC(no_special_reg32));
 4929   match(RegN);
 4930   op_cost(0);
 4931   format %{ %}
 4932   interface(REG_INTER);
 4933 %}
 4934 
 4935 // Float Register
 4936 // Float register operands
 4937 operand vRegF()
 4938 %{
 4939   constraint(ALLOC_IN_RC(float_reg));
 4940   match(RegF);
 4941 
 4942   op_cost(0);
 4943   format %{ %}
 4944   interface(REG_INTER);
 4945 %}
 4946 
 4947 // Double Register
 4948 // Double register operands
 4949 operand vRegD()
 4950 %{
 4951   constraint(ALLOC_IN_RC(double_reg));
 4952   match(RegD);
 4953 
 4954   op_cost(0);
 4955   format %{ %}
 4956   interface(REG_INTER);
 4957 %}
 4958 
 4959 // Generic vector class. This will be used for
 4960 // all vector operands, including NEON and SVE.
 4961 operand vReg()
 4962 %{
 4963   constraint(ALLOC_IN_RC(dynamic));
 4964   match(VecA);
 4965   match(VecD);
 4966   match(VecX);
 4967 
 4968   op_cost(0);
 4969   format %{ %}
 4970   interface(REG_INTER);
 4971 %}
 4972 
 4973 operand vecA()
 4974 %{
 4975   constraint(ALLOC_IN_RC(vectora_reg));
 4976   match(VecA);
 4977 
 4978   op_cost(0);
 4979   format %{ %}
 4980   interface(REG_INTER);
 4981 %}
 4982 
 4983 operand vecD()
 4984 %{
 4985   constraint(ALLOC_IN_RC(vectord_reg));
 4986   match(VecD);
 4987 
 4988   op_cost(0);
 4989   format %{ %}
 4990   interface(REG_INTER);
 4991 %}
 4992 
 4993 operand vecX()
 4994 %{
 4995   constraint(ALLOC_IN_RC(vectorx_reg));
 4996   match(VecX);
 4997 
 4998   op_cost(0);
 4999   format %{ %}
 5000   interface(REG_INTER);
 5001 %}
 5002 
 5003 operand vRegD_V0()
 5004 %{
 5005   constraint(ALLOC_IN_RC(v0_reg));
 5006   match(RegD);
 5007   op_cost(0);
 5008   format %{ %}
 5009   interface(REG_INTER);
 5010 %}
 5011 
 5012 operand vRegD_V1()
 5013 %{
 5014   constraint(ALLOC_IN_RC(v1_reg));
 5015   match(RegD);
 5016   op_cost(0);
 5017   format %{ %}
 5018   interface(REG_INTER);
 5019 %}
 5020 
 5021 operand vRegD_V2()
 5022 %{
 5023   constraint(ALLOC_IN_RC(v2_reg));
 5024   match(RegD);
 5025   op_cost(0);
 5026   format %{ %}
 5027   interface(REG_INTER);
 5028 %}
 5029 
 5030 operand vRegD_V3()
 5031 %{
 5032   constraint(ALLOC_IN_RC(v3_reg));
 5033   match(RegD);
 5034   op_cost(0);
 5035   format %{ %}
 5036   interface(REG_INTER);
 5037 %}
 5038 
 5039 operand vRegD_V4()
 5040 %{
 5041   constraint(ALLOC_IN_RC(v4_reg));
 5042   match(RegD);
 5043   op_cost(0);
 5044   format %{ %}
 5045   interface(REG_INTER);
 5046 %}
 5047 
 5048 operand vRegD_V5()
 5049 %{
 5050   constraint(ALLOC_IN_RC(v5_reg));
 5051   match(RegD);
 5052   op_cost(0);
 5053   format %{ %}
 5054   interface(REG_INTER);
 5055 %}
 5056 
 5057 operand vRegD_V6()
 5058 %{
 5059   constraint(ALLOC_IN_RC(v6_reg));
 5060   match(RegD);
 5061   op_cost(0);
 5062   format %{ %}
 5063   interface(REG_INTER);
 5064 %}
 5065 
 5066 operand vRegD_V7()
 5067 %{
 5068   constraint(ALLOC_IN_RC(v7_reg));
 5069   match(RegD);
 5070   op_cost(0);
 5071   format %{ %}
 5072   interface(REG_INTER);
 5073 %}
 5074 
 5075 operand vRegD_V12()
 5076 %{
 5077   constraint(ALLOC_IN_RC(v12_reg));
 5078   match(RegD);
 5079   op_cost(0);
 5080   format %{ %}
 5081   interface(REG_INTER);
 5082 %}
 5083 
 5084 operand vRegD_V13()
 5085 %{
 5086   constraint(ALLOC_IN_RC(v13_reg));
 5087   match(RegD);
 5088   op_cost(0);
 5089   format %{ %}
 5090   interface(REG_INTER);
 5091 %}
 5092 
 5093 operand pReg()
 5094 %{
 5095   constraint(ALLOC_IN_RC(pr_reg));
 5096   match(RegVectMask);
 5097   match(pRegGov);
 5098   op_cost(0);
 5099   format %{ %}
 5100   interface(REG_INTER);
 5101 %}
 5102 
 5103 operand pRegGov()
 5104 %{
 5105   constraint(ALLOC_IN_RC(gov_pr));
 5106   match(RegVectMask);
 5107   match(pReg);
 5108   op_cost(0);
 5109   format %{ %}
 5110   interface(REG_INTER);
 5111 %}
 5112 
 5113 operand pRegGov_P0()
 5114 %{
 5115   constraint(ALLOC_IN_RC(p0_reg));
 5116   match(RegVectMask);
 5117   op_cost(0);
 5118   format %{ %}
 5119   interface(REG_INTER);
 5120 %}
 5121 
 5122 operand pRegGov_P1()
 5123 %{
 5124   constraint(ALLOC_IN_RC(p1_reg));
 5125   match(RegVectMask);
 5126   op_cost(0);
 5127   format %{ %}
 5128   interface(REG_INTER);
 5129 %}
 5130 
 5131 // Flags register, used as output of signed compare instructions
 5132 
 5133 // note that on AArch64 we also use this register as the output for
 5134 // for floating point compare instructions (CmpF CmpD). this ensures
 5135 // that ordered inequality tests use GT, GE, LT or LE none of which
 5136 // pass through cases where the result is unordered i.e. one or both
 5137 // inputs to the compare is a NaN. this means that the ideal code can
 5138 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5139 // (where the comparison should always fail). EQ and NE tests are
 5140 // always generated in ideal code so that unordered folds into the NE
 5141 // case, matching the behaviour of AArch64 NE.
 5142 //
 5143 // This differs from x86 where the outputs of FP compares use a
 5144 // special FP flags registers and where compares based on this
 5145 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5146 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5147 // to explicitly handle the unordered case in branches. x86 also has
 5148 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5149 
 5150 operand rFlagsReg()
 5151 %{
 5152   constraint(ALLOC_IN_RC(int_flags));
 5153   match(RegFlags);
 5154 
 5155   op_cost(0);
 5156   format %{ "RFLAGS" %}
 5157   interface(REG_INTER);
 5158 %}
 5159 
 5160 // Flags register, used as output of unsigned compare instructions
 5161 operand rFlagsRegU()
 5162 %{
 5163   constraint(ALLOC_IN_RC(int_flags));
 5164   match(RegFlags);
 5165 
 5166   op_cost(0);
 5167   format %{ "RFLAGSU" %}
 5168   interface(REG_INTER);
 5169 %}
 5170 
 5171 // Special Registers
 5172 
 5173 // Method Register
 5174 operand inline_cache_RegP(iRegP reg)
 5175 %{
 5176   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5177   match(reg);
 5178   match(iRegPNoSp);
 5179   op_cost(0);
 5180   format %{ %}
 5181   interface(REG_INTER);
 5182 %}
 5183 
 5184 // Thread Register
 5185 operand thread_RegP(iRegP reg)
 5186 %{
 5187   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5188   match(reg);
 5189   op_cost(0);
 5190   format %{ %}
 5191   interface(REG_INTER);
 5192 %}
 5193 
 5194 //----------Memory Operands----------------------------------------------------
 5195 
 5196 operand indirect(iRegP reg)
 5197 %{
 5198   constraint(ALLOC_IN_RC(ptr_reg));
 5199   match(reg);
 5200   op_cost(0);
 5201   format %{ "[$reg]" %}
 5202   interface(MEMORY_INTER) %{
 5203     base($reg);
 5204     index(0xffffffff);
 5205     scale(0x0);
 5206     disp(0x0);
 5207   %}
 5208 %}
 5209 
 5210 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5211 %{
 5212   constraint(ALLOC_IN_RC(ptr_reg));
 5213   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5214   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5215   op_cost(0);
 5216   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5217   interface(MEMORY_INTER) %{
 5218     base($reg);
 5219     index($ireg);
 5220     scale($scale);
 5221     disp(0x0);
 5222   %}
 5223 %}
 5224 
 5225 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5226 %{
 5227   constraint(ALLOC_IN_RC(ptr_reg));
 5228   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5229   match(AddP reg (LShiftL lreg scale));
 5230   op_cost(0);
 5231   format %{ "$reg, $lreg lsl($scale)" %}
 5232   interface(MEMORY_INTER) %{
 5233     base($reg);
 5234     index($lreg);
 5235     scale($scale);
 5236     disp(0x0);
 5237   %}
 5238 %}
 5239 
 5240 operand indIndexI2L(iRegP reg, iRegI ireg)
 5241 %{
 5242   constraint(ALLOC_IN_RC(ptr_reg));
 5243   match(AddP reg (ConvI2L ireg));
 5244   op_cost(0);
 5245   format %{ "$reg, $ireg, 0, I2L" %}
 5246   interface(MEMORY_INTER) %{
 5247     base($reg);
 5248     index($ireg);
 5249     scale(0x0);
 5250     disp(0x0);
 5251   %}
 5252 %}
 5253 
 5254 operand indIndex(iRegP reg, iRegL lreg)
 5255 %{
 5256   constraint(ALLOC_IN_RC(ptr_reg));
 5257   match(AddP reg lreg);
 5258   op_cost(0);
 5259   format %{ "$reg, $lreg" %}
 5260   interface(MEMORY_INTER) %{
 5261     base($reg);
 5262     index($lreg);
 5263     scale(0x0);
 5264     disp(0x0);
 5265   %}
 5266 %}
 5267 
 5268 operand indOffI1(iRegP reg, immIOffset1 off)
 5269 %{
 5270   constraint(ALLOC_IN_RC(ptr_reg));
 5271   match(AddP reg off);
 5272   op_cost(0);
 5273   format %{ "[$reg, $off]" %}
 5274   interface(MEMORY_INTER) %{
 5275     base($reg);
 5276     index(0xffffffff);
 5277     scale(0x0);
 5278     disp($off);
 5279   %}
 5280 %}
 5281 
 5282 operand indOffI2(iRegP reg, immIOffset2 off)
 5283 %{
 5284   constraint(ALLOC_IN_RC(ptr_reg));
 5285   match(AddP reg off);
 5286   op_cost(0);
 5287   format %{ "[$reg, $off]" %}
 5288   interface(MEMORY_INTER) %{
 5289     base($reg);
 5290     index(0xffffffff);
 5291     scale(0x0);
 5292     disp($off);
 5293   %}
 5294 %}
 5295 
 5296 operand indOffI4(iRegP reg, immIOffset4 off)
 5297 %{
 5298   constraint(ALLOC_IN_RC(ptr_reg));
 5299   match(AddP reg off);
 5300   op_cost(0);
 5301   format %{ "[$reg, $off]" %}
 5302   interface(MEMORY_INTER) %{
 5303     base($reg);
 5304     index(0xffffffff);
 5305     scale(0x0);
 5306     disp($off);
 5307   %}
 5308 %}
 5309 
 5310 operand indOffI8(iRegP reg, immIOffset8 off)
 5311 %{
 5312   constraint(ALLOC_IN_RC(ptr_reg));
 5313   match(AddP reg off);
 5314   op_cost(0);
 5315   format %{ "[$reg, $off]" %}
 5316   interface(MEMORY_INTER) %{
 5317     base($reg);
 5318     index(0xffffffff);
 5319     scale(0x0);
 5320     disp($off);
 5321   %}
 5322 %}
 5323 
 5324 operand indOffI16(iRegP reg, immIOffset16 off)
 5325 %{
 5326   constraint(ALLOC_IN_RC(ptr_reg));
 5327   match(AddP reg off);
 5328   op_cost(0);
 5329   format %{ "[$reg, $off]" %}
 5330   interface(MEMORY_INTER) %{
 5331     base($reg);
 5332     index(0xffffffff);
 5333     scale(0x0);
 5334     disp($off);
 5335   %}
 5336 %}
 5337 
 5338 operand indOffL1(iRegP reg, immLoffset1 off)
 5339 %{
 5340   constraint(ALLOC_IN_RC(ptr_reg));
 5341   match(AddP reg off);
 5342   op_cost(0);
 5343   format %{ "[$reg, $off]" %}
 5344   interface(MEMORY_INTER) %{
 5345     base($reg);
 5346     index(0xffffffff);
 5347     scale(0x0);
 5348     disp($off);
 5349   %}
 5350 %}
 5351 
 5352 operand indOffL2(iRegP reg, immLoffset2 off)
 5353 %{
 5354   constraint(ALLOC_IN_RC(ptr_reg));
 5355   match(AddP reg off);
 5356   op_cost(0);
 5357   format %{ "[$reg, $off]" %}
 5358   interface(MEMORY_INTER) %{
 5359     base($reg);
 5360     index(0xffffffff);
 5361     scale(0x0);
 5362     disp($off);
 5363   %}
 5364 %}
 5365 
 5366 operand indOffL4(iRegP reg, immLoffset4 off)
 5367 %{
 5368   constraint(ALLOC_IN_RC(ptr_reg));
 5369   match(AddP reg off);
 5370   op_cost(0);
 5371   format %{ "[$reg, $off]" %}
 5372   interface(MEMORY_INTER) %{
 5373     base($reg);
 5374     index(0xffffffff);
 5375     scale(0x0);
 5376     disp($off);
 5377   %}
 5378 %}
 5379 
 5380 operand indOffL8(iRegP reg, immLoffset8 off)
 5381 %{
 5382   constraint(ALLOC_IN_RC(ptr_reg));
 5383   match(AddP reg off);
 5384   op_cost(0);
 5385   format %{ "[$reg, $off]" %}
 5386   interface(MEMORY_INTER) %{
 5387     base($reg);
 5388     index(0xffffffff);
 5389     scale(0x0);
 5390     disp($off);
 5391   %}
 5392 %}
 5393 
 5394 operand indOffL16(iRegP reg, immLoffset16 off)
 5395 %{
 5396   constraint(ALLOC_IN_RC(ptr_reg));
 5397   match(AddP reg off);
 5398   op_cost(0);
 5399   format %{ "[$reg, $off]" %}
 5400   interface(MEMORY_INTER) %{
 5401     base($reg);
 5402     index(0xffffffff);
 5403     scale(0x0);
 5404     disp($off);
 5405   %}
 5406 %}
 5407 
 5408 operand indirectX2P(iRegL reg)
 5409 %{
 5410   constraint(ALLOC_IN_RC(ptr_reg));
 5411   match(CastX2P reg);
 5412   op_cost(0);
 5413   format %{ "[$reg]\t# long -> ptr" %}
 5414   interface(MEMORY_INTER) %{
 5415     base($reg);
 5416     index(0xffffffff);
 5417     scale(0x0);
 5418     disp(0x0);
 5419   %}
 5420 %}
 5421 
 5422 operand indOffX2P(iRegL reg, immLOffset off)
 5423 %{
 5424   constraint(ALLOC_IN_RC(ptr_reg));
 5425   match(AddP (CastX2P reg) off);
 5426   op_cost(0);
 5427   format %{ "[$reg, $off]\t# long -> ptr" %}
 5428   interface(MEMORY_INTER) %{
 5429     base($reg);
 5430     index(0xffffffff);
 5431     scale(0x0);
 5432     disp($off);
 5433   %}
 5434 %}
 5435 
 5436 operand indirectN(iRegN reg)
 5437 %{
 5438   predicate(CompressedOops::shift() == 0);
 5439   constraint(ALLOC_IN_RC(ptr_reg));
 5440   match(DecodeN reg);
 5441   op_cost(0);
 5442   format %{ "[$reg]\t# narrow" %}
 5443   interface(MEMORY_INTER) %{
 5444     base($reg);
 5445     index(0xffffffff);
 5446     scale(0x0);
 5447     disp(0x0);
 5448   %}
 5449 %}
 5450 
 5451 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5452 %{
 5453   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5454   constraint(ALLOC_IN_RC(ptr_reg));
 5455   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5456   op_cost(0);
 5457   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5458   interface(MEMORY_INTER) %{
 5459     base($reg);
 5460     index($ireg);
 5461     scale($scale);
 5462     disp(0x0);
 5463   %}
 5464 %}
 5465 
 5466 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5467 %{
 5468   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5469   constraint(ALLOC_IN_RC(ptr_reg));
 5470   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5471   op_cost(0);
 5472   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5473   interface(MEMORY_INTER) %{
 5474     base($reg);
 5475     index($lreg);
 5476     scale($scale);
 5477     disp(0x0);
 5478   %}
 5479 %}
 5480 
 5481 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5482 %{
 5483   predicate(CompressedOops::shift() == 0);
 5484   constraint(ALLOC_IN_RC(ptr_reg));
 5485   match(AddP (DecodeN reg) (ConvI2L ireg));
 5486   op_cost(0);
 5487   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5488   interface(MEMORY_INTER) %{
 5489     base($reg);
 5490     index($ireg);
 5491     scale(0x0);
 5492     disp(0x0);
 5493   %}
 5494 %}
 5495 
 5496 operand indIndexN(iRegN reg, iRegL lreg)
 5497 %{
 5498   predicate(CompressedOops::shift() == 0);
 5499   constraint(ALLOC_IN_RC(ptr_reg));
 5500   match(AddP (DecodeN reg) lreg);
 5501   op_cost(0);
 5502   format %{ "$reg, $lreg\t# narrow" %}
 5503   interface(MEMORY_INTER) %{
 5504     base($reg);
 5505     index($lreg);
 5506     scale(0x0);
 5507     disp(0x0);
 5508   %}
 5509 %}
 5510 
 5511 operand indOffIN(iRegN reg, immIOffset off)
 5512 %{
 5513   predicate(CompressedOops::shift() == 0);
 5514   constraint(ALLOC_IN_RC(ptr_reg));
 5515   match(AddP (DecodeN reg) off);
 5516   op_cost(0);
 5517   format %{ "[$reg, $off]\t# narrow" %}
 5518   interface(MEMORY_INTER) %{
 5519     base($reg);
 5520     index(0xffffffff);
 5521     scale(0x0);
 5522     disp($off);
 5523   %}
 5524 %}
 5525 
 5526 operand indOffLN(iRegN reg, immLOffset off)
 5527 %{
 5528   predicate(CompressedOops::shift() == 0);
 5529   constraint(ALLOC_IN_RC(ptr_reg));
 5530   match(AddP (DecodeN reg) off);
 5531   op_cost(0);
 5532   format %{ "[$reg, $off]\t# narrow" %}
 5533   interface(MEMORY_INTER) %{
 5534     base($reg);
 5535     index(0xffffffff);
 5536     scale(0x0);
 5537     disp($off);
 5538   %}
 5539 %}
 5540 
 5541 
 5542 //----------Special Memory Operands--------------------------------------------
 5543 // Stack Slot Operand - This operand is used for loading and storing temporary
 5544 //                      values on the stack where a match requires a value to
 5545 //                      flow through memory.
 5546 operand stackSlotP(sRegP reg)
 5547 %{
 5548   constraint(ALLOC_IN_RC(stack_slots));
 5549   op_cost(100);
 5550   // No match rule because this operand is only generated in matching
 5551   // match(RegP);
 5552   format %{ "[$reg]" %}
 5553   interface(MEMORY_INTER) %{
 5554     base(0x1e);  // RSP
 5555     index(0x0);  // No Index
 5556     scale(0x0);  // No Scale
 5557     disp($reg);  // Stack Offset
 5558   %}
 5559 %}
 5560 
 5561 operand stackSlotI(sRegI reg)
 5562 %{
 5563   constraint(ALLOC_IN_RC(stack_slots));
 5564   // No match rule because this operand is only generated in matching
 5565   // match(RegI);
 5566   format %{ "[$reg]" %}
 5567   interface(MEMORY_INTER) %{
 5568     base(0x1e);  // RSP
 5569     index(0x0);  // No Index
 5570     scale(0x0);  // No Scale
 5571     disp($reg);  // Stack Offset
 5572   %}
 5573 %}
 5574 
 5575 operand stackSlotF(sRegF reg)
 5576 %{
 5577   constraint(ALLOC_IN_RC(stack_slots));
 5578   // No match rule because this operand is only generated in matching
 5579   // match(RegF);
 5580   format %{ "[$reg]" %}
 5581   interface(MEMORY_INTER) %{
 5582     base(0x1e);  // RSP
 5583     index(0x0);  // No Index
 5584     scale(0x0);  // No Scale
 5585     disp($reg);  // Stack Offset
 5586   %}
 5587 %}
 5588 
 5589 operand stackSlotD(sRegD reg)
 5590 %{
 5591   constraint(ALLOC_IN_RC(stack_slots));
 5592   // No match rule because this operand is only generated in matching
 5593   // match(RegD);
 5594   format %{ "[$reg]" %}
 5595   interface(MEMORY_INTER) %{
 5596     base(0x1e);  // RSP
 5597     index(0x0);  // No Index
 5598     scale(0x0);  // No Scale
 5599     disp($reg);  // Stack Offset
 5600   %}
 5601 %}
 5602 
 5603 operand stackSlotL(sRegL reg)
 5604 %{
 5605   constraint(ALLOC_IN_RC(stack_slots));
 5606   // No match rule because this operand is only generated in matching
 5607   // match(RegL);
 5608   format %{ "[$reg]" %}
 5609   interface(MEMORY_INTER) %{
 5610     base(0x1e);  // RSP
 5611     index(0x0);  // No Index
 5612     scale(0x0);  // No Scale
 5613     disp($reg);  // Stack Offset
 5614   %}
 5615 %}
 5616 
 5617 // Operands for expressing Control Flow
 5618 // NOTE: Label is a predefined operand which should not be redefined in
 5619 //       the AD file. It is generically handled within the ADLC.
 5620 
 5621 //----------Conditional Branch Operands----------------------------------------
 5622 // Comparison Op  - This is the operation of the comparison, and is limited to
 5623 //                  the following set of codes:
 5624 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 5625 //
 5626 // Other attributes of the comparison, such as unsignedness, are specified
 5627 // by the comparison instruction that sets a condition code flags register.
 5628 // That result is represented by a flags operand whose subtype is appropriate
 5629 // to the unsignedness (etc.) of the comparison.
 5630 //
 5631 // Later, the instruction which matches both the Comparison Op (a Bool) and
 5632 // the flags (produced by the Cmp) specifies the coding of the comparison op
 5633 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 5634 
 5635 // used for signed integral comparisons and fp comparisons
 5636 
 5637 operand cmpOp()
 5638 %{
 5639   match(Bool);
 5640 
 5641   format %{ "" %}
 5642   interface(COND_INTER) %{
 5643     equal(0x0, "eq");
 5644     not_equal(0x1, "ne");
 5645     less(0xb, "lt");
 5646     greater_equal(0xa, "ge");
 5647     less_equal(0xd, "le");
 5648     greater(0xc, "gt");
 5649     overflow(0x6, "vs");
 5650     no_overflow(0x7, "vc");
 5651   %}
 5652 %}
 5653 
 5654 // used for unsigned integral comparisons
 5655 
 5656 operand cmpOpU()
 5657 %{
 5658   match(Bool);
 5659 
 5660   format %{ "" %}
 5661   interface(COND_INTER) %{
 5662     equal(0x0, "eq");
 5663     not_equal(0x1, "ne");
 5664     less(0x3, "lo");
 5665     greater_equal(0x2, "hs");
 5666     less_equal(0x9, "ls");
 5667     greater(0x8, "hi");
 5668     overflow(0x6, "vs");
 5669     no_overflow(0x7, "vc");
 5670   %}
 5671 %}
 5672 
 5673 // used for certain integral comparisons which can be
 5674 // converted to cbxx or tbxx instructions
 5675 
 5676 operand cmpOpEqNe()
 5677 %{
 5678   match(Bool);
 5679   op_cost(0);
 5680   predicate(n->as_Bool()->_test._test == BoolTest::ne
 5681             || n->as_Bool()->_test._test == BoolTest::eq);
 5682 
 5683   format %{ "" %}
 5684   interface(COND_INTER) %{
 5685     equal(0x0, "eq");
 5686     not_equal(0x1, "ne");
 5687     less(0xb, "lt");
 5688     greater_equal(0xa, "ge");
 5689     less_equal(0xd, "le");
 5690     greater(0xc, "gt");
 5691     overflow(0x6, "vs");
 5692     no_overflow(0x7, "vc");
 5693   %}
 5694 %}
 5695 
 5696 // used for certain integral comparisons which can be
 5697 // converted to cbxx or tbxx instructions
 5698 
 5699 operand cmpOpLtGe()
 5700 %{
 5701   match(Bool);
 5702   op_cost(0);
 5703 
 5704   predicate(n->as_Bool()->_test._test == BoolTest::lt
 5705             || n->as_Bool()->_test._test == BoolTest::ge);
 5706 
 5707   format %{ "" %}
 5708   interface(COND_INTER) %{
 5709     equal(0x0, "eq");
 5710     not_equal(0x1, "ne");
 5711     less(0xb, "lt");
 5712     greater_equal(0xa, "ge");
 5713     less_equal(0xd, "le");
 5714     greater(0xc, "gt");
 5715     overflow(0x6, "vs");
 5716     no_overflow(0x7, "vc");
 5717   %}
 5718 %}
 5719 
 5720 // used for certain unsigned integral comparisons which can be
 5721 // converted to cbxx or tbxx instructions
 5722 
 5723 operand cmpOpUEqNeLeGt()
 5724 %{
 5725   match(Bool);
 5726   op_cost(0);
 5727 
 5728   predicate(n->as_Bool()->_test._test == BoolTest::eq ||
 5729             n->as_Bool()->_test._test == BoolTest::ne ||
 5730             n->as_Bool()->_test._test == BoolTest::le ||
 5731             n->as_Bool()->_test._test == BoolTest::gt);
 5732 
 5733   format %{ "" %}
 5734   interface(COND_INTER) %{
 5735     equal(0x0, "eq");
 5736     not_equal(0x1, "ne");
 5737     less(0x3, "lo");
 5738     greater_equal(0x2, "hs");
 5739     less_equal(0x9, "ls");
 5740     greater(0x8, "hi");
 5741     overflow(0x6, "vs");
 5742     no_overflow(0x7, "vc");
 5743   %}
 5744 %}
 5745 
 5746 // Special operand allowing long args to int ops to be truncated for free
 5747 
 5748 operand iRegL2I(iRegL reg) %{
 5749 
 5750   op_cost(0);
 5751 
 5752   match(ConvL2I reg);
 5753 
 5754   format %{ "l2i($reg)" %}
 5755 
 5756   interface(REG_INTER)
 5757 %}
 5758 
 5759 operand iRegL2P(iRegL reg) %{
 5760 
 5761   op_cost(0);
 5762 
 5763   match(CastX2P reg);
 5764 
 5765   format %{ "l2p($reg)" %}
 5766 
 5767   interface(REG_INTER)
 5768 %}
 5769 
 5770 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 5771 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 5772 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 5773 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 5774 
 5775 //----------OPERAND CLASSES----------------------------------------------------
 5776 // Operand Classes are groups of operands that are used as to simplify
 5777 // instruction definitions by not requiring the AD writer to specify
 5778 // separate instructions for every form of operand when the
 5779 // instruction accepts multiple operand types with the same basic
 5780 // encoding and format. The classic case of this is memory operands.
 5781 
 5782 // memory is used to define read/write location for load/store
 5783 // instruction defs. we can turn a memory op into an Address
 5784 
 5785 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 5786                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5787 
 5788 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 5789                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5790 
 5791 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 5792                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5793 
 5794 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 5795                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5796 
 5797 // All of the memory operands. For the pipeline description.
 5798 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 5799                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 5800                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5801 
 5802 
 5803 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 5804 // operations. it allows the src to be either an iRegI or a (ConvL2I
 5805 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 5806 // can be elided because the 32-bit instruction will just employ the
 5807 // lower 32 bits anyway.
 5808 //
 5809 // n.b. this does not elide all L2I conversions. if the truncated
 5810 // value is consumed by more than one operation then the ConvL2I
 5811 // cannot be bundled into the consuming nodes so an l2i gets planted
 5812 // (actually a movw $dst $src) and the downstream instructions consume
 5813 // the result of the l2i as an iRegI input. That's a shame since the
 5814 // movw is actually redundant but its not too costly.
 5815 
 5816 opclass iRegIorL2I(iRegI, iRegL2I);
 5817 opclass iRegPorL2P(iRegP, iRegL2P);
 5818 
 5819 //----------PIPELINE-----------------------------------------------------------
 5820 // Rules which define the behavior of the target architectures pipeline.
 5821 
 5822 // For specific pipelines, eg A53, define the stages of that pipeline
 5823 //pipe_desc(ISS, EX1, EX2, WR);
 5824 #define ISS S0
 5825 #define EX1 S1
 5826 #define EX2 S2
 5827 #define WR  S3
 5828 
 5829 // Integer ALU reg operation
 5830 pipeline %{
 5831 
 5832 attributes %{
 5833   // ARM instructions are of fixed length
 5834   fixed_size_instructions;        // Fixed size instructions TODO does
 5835   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 5836   // ARM instructions come in 32-bit word units
 5837   instruction_unit_size = 4;         // An instruction is 4 bytes long
 5838   instruction_fetch_unit_size = 64;  // The processor fetches one line
 5839   instruction_fetch_units = 1;       // of 64 bytes
 5840 
 5841   // List of nop instructions
 5842   nops( MachNop );
 5843 %}
 5844 
 5845 // We don't use an actual pipeline model so don't care about resources
 5846 // or description. we do use pipeline classes to introduce fixed
 5847 // latencies
 5848 
 5849 //----------RESOURCES----------------------------------------------------------
 5850 // Resources are the functional units available to the machine
 5851 
 5852 resources( INS0, INS1, INS01 = INS0 | INS1,
 5853            ALU0, ALU1, ALU = ALU0 | ALU1,
 5854            MAC,
 5855            DIV,
 5856            BRANCH,
 5857            LDST,
 5858            NEON_FP);
 5859 
 5860 //----------PIPELINE DESCRIPTION-----------------------------------------------
 5861 // Pipeline Description specifies the stages in the machine's pipeline
 5862 
 5863 // Define the pipeline as a generic 6 stage pipeline
 5864 pipe_desc(S0, S1, S2, S3, S4, S5);
 5865 
 5866 //----------PIPELINE CLASSES---------------------------------------------------
 5867 // Pipeline Classes describe the stages in which input and output are
 5868 // referenced by the hardware pipeline.
 5869 
 5870 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 5871 %{
 5872   single_instruction;
 5873   src1   : S1(read);
 5874   src2   : S2(read);
 5875   dst    : S5(write);
 5876   INS01  : ISS;
 5877   NEON_FP : S5;
 5878 %}
 5879 
 5880 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 5881 %{
 5882   single_instruction;
 5883   src1   : S1(read);
 5884   src2   : S2(read);
 5885   dst    : S5(write);
 5886   INS01  : ISS;
 5887   NEON_FP : S5;
 5888 %}
 5889 
 5890 pipe_class fp_uop_s(vRegF dst, vRegF src)
 5891 %{
 5892   single_instruction;
 5893   src    : S1(read);
 5894   dst    : S5(write);
 5895   INS01  : ISS;
 5896   NEON_FP : S5;
 5897 %}
 5898 
 5899 pipe_class fp_uop_d(vRegD dst, vRegD src)
 5900 %{
 5901   single_instruction;
 5902   src    : S1(read);
 5903   dst    : S5(write);
 5904   INS01  : ISS;
 5905   NEON_FP : S5;
 5906 %}
 5907 
 5908 pipe_class fp_d2f(vRegF dst, vRegD src)
 5909 %{
 5910   single_instruction;
 5911   src    : S1(read);
 5912   dst    : S5(write);
 5913   INS01  : ISS;
 5914   NEON_FP : S5;
 5915 %}
 5916 
 5917 pipe_class fp_f2d(vRegD dst, vRegF src)
 5918 %{
 5919   single_instruction;
 5920   src    : S1(read);
 5921   dst    : S5(write);
 5922   INS01  : ISS;
 5923   NEON_FP : S5;
 5924 %}
 5925 
 5926 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 5927 %{
 5928   single_instruction;
 5929   src    : S1(read);
 5930   dst    : S5(write);
 5931   INS01  : ISS;
 5932   NEON_FP : S5;
 5933 %}
 5934 
 5935 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 5936 %{
 5937   single_instruction;
 5938   src    : S1(read);
 5939   dst    : S5(write);
 5940   INS01  : ISS;
 5941   NEON_FP : S5;
 5942 %}
 5943 
 5944 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 5945 %{
 5946   single_instruction;
 5947   src    : S1(read);
 5948   dst    : S5(write);
 5949   INS01  : ISS;
 5950   NEON_FP : S5;
 5951 %}
 5952 
 5953 pipe_class fp_l2f(vRegF dst, iRegL src)
 5954 %{
 5955   single_instruction;
 5956   src    : S1(read);
 5957   dst    : S5(write);
 5958   INS01  : ISS;
 5959   NEON_FP : S5;
 5960 %}
 5961 
 5962 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 5963 %{
 5964   single_instruction;
 5965   src    : S1(read);
 5966   dst    : S5(write);
 5967   INS01  : ISS;
 5968   NEON_FP : S5;
 5969 %}
 5970 
 5971 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 5972 %{
 5973   single_instruction;
 5974   src    : S1(read);
 5975   dst    : S5(write);
 5976   INS01  : ISS;
 5977   NEON_FP : S5;
 5978 %}
 5979 
 5980 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 5981 %{
 5982   single_instruction;
 5983   src    : S1(read);
 5984   dst    : S5(write);
 5985   INS01  : ISS;
 5986   NEON_FP : S5;
 5987 %}
 5988 
 5989 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 5990 %{
 5991   single_instruction;
 5992   src    : S1(read);
 5993   dst    : S5(write);
 5994   INS01  : ISS;
 5995   NEON_FP : S5;
 5996 %}
 5997 
 5998 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 5999 %{
 6000   single_instruction;
 6001   src1   : S1(read);
 6002   src2   : S2(read);
 6003   dst    : S5(write);
 6004   INS0   : ISS;
 6005   NEON_FP : S5;
 6006 %}
 6007 
 6008 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6009 %{
 6010   single_instruction;
 6011   src1   : S1(read);
 6012   src2   : S2(read);
 6013   dst    : S5(write);
 6014   INS0   : ISS;
 6015   NEON_FP : S5;
 6016 %}
 6017 
 6018 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6019 %{
 6020   single_instruction;
 6021   cr     : S1(read);
 6022   src1   : S1(read);
 6023   src2   : S1(read);
 6024   dst    : S3(write);
 6025   INS01  : ISS;
 6026   NEON_FP : S3;
 6027 %}
 6028 
 6029 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6030 %{
 6031   single_instruction;
 6032   cr     : S1(read);
 6033   src1   : S1(read);
 6034   src2   : S1(read);
 6035   dst    : S3(write);
 6036   INS01  : ISS;
 6037   NEON_FP : S3;
 6038 %}
 6039 
 6040 pipe_class fp_imm_s(vRegF dst)
 6041 %{
 6042   single_instruction;
 6043   dst    : S3(write);
 6044   INS01  : ISS;
 6045   NEON_FP : S3;
 6046 %}
 6047 
 6048 pipe_class fp_imm_d(vRegD dst)
 6049 %{
 6050   single_instruction;
 6051   dst    : S3(write);
 6052   INS01  : ISS;
 6053   NEON_FP : S3;
 6054 %}
 6055 
 6056 pipe_class fp_load_constant_s(vRegF dst)
 6057 %{
 6058   single_instruction;
 6059   dst    : S4(write);
 6060   INS01  : ISS;
 6061   NEON_FP : S4;
 6062 %}
 6063 
 6064 pipe_class fp_load_constant_d(vRegD dst)
 6065 %{
 6066   single_instruction;
 6067   dst    : S4(write);
 6068   INS01  : ISS;
 6069   NEON_FP : S4;
 6070 %}
 6071 
 6072 //------- Integer ALU operations --------------------------
 6073 
 6074 // Integer ALU reg-reg operation
 6075 // Operands needed in EX1, result generated in EX2
 6076 // Eg.  ADD     x0, x1, x2
 6077 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6078 %{
 6079   single_instruction;
 6080   dst    : EX2(write);
 6081   src1   : EX1(read);
 6082   src2   : EX1(read);
 6083   INS01  : ISS; // Dual issue as instruction 0 or 1
 6084   ALU    : EX2;
 6085 %}
 6086 
 6087 // Integer ALU reg-reg operation with constant shift
 6088 // Shifted register must be available in LATE_ISS instead of EX1
 6089 // Eg.  ADD     x0, x1, x2, LSL #2
 6090 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6091 %{
 6092   single_instruction;
 6093   dst    : EX2(write);
 6094   src1   : EX1(read);
 6095   src2   : ISS(read);
 6096   INS01  : ISS;
 6097   ALU    : EX2;
 6098 %}
 6099 
 6100 // Integer ALU reg operation with constant shift
 6101 // Eg.  LSL     x0, x1, #shift
 6102 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6103 %{
 6104   single_instruction;
 6105   dst    : EX2(write);
 6106   src1   : ISS(read);
 6107   INS01  : ISS;
 6108   ALU    : EX2;
 6109 %}
 6110 
 6111 // Integer ALU reg-reg operation with variable shift
 6112 // Both operands must be available in LATE_ISS instead of EX1
 6113 // Result is available in EX1 instead of EX2
 6114 // Eg.  LSLV    x0, x1, x2
 6115 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6116 %{
 6117   single_instruction;
 6118   dst    : EX1(write);
 6119   src1   : ISS(read);
 6120   src2   : ISS(read);
 6121   INS01  : ISS;
 6122   ALU    : EX1;
 6123 %}
 6124 
 6125 // Integer ALU reg-reg operation with extract
 6126 // As for _vshift above, but result generated in EX2
 6127 // Eg.  EXTR    x0, x1, x2, #N
 6128 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6129 %{
 6130   single_instruction;
 6131   dst    : EX2(write);
 6132   src1   : ISS(read);
 6133   src2   : ISS(read);
 6134   INS1   : ISS; // Can only dual issue as Instruction 1
 6135   ALU    : EX1;
 6136 %}
 6137 
 6138 // Integer ALU reg operation
 6139 // Eg.  NEG     x0, x1
 6140 pipe_class ialu_reg(iRegI dst, iRegI src)
 6141 %{
 6142   single_instruction;
 6143   dst    : EX2(write);
 6144   src    : EX1(read);
 6145   INS01  : ISS;
 6146   ALU    : EX2;
 6147 %}
 6148 
 6149 // Integer ALU reg mmediate operation
 6150 // Eg.  ADD     x0, x1, #N
 6151 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6152 %{
 6153   single_instruction;
 6154   dst    : EX2(write);
 6155   src1   : EX1(read);
 6156   INS01  : ISS;
 6157   ALU    : EX2;
 6158 %}
 6159 
 6160 // Integer ALU immediate operation (no source operands)
 6161 // Eg.  MOV     x0, #N
 6162 pipe_class ialu_imm(iRegI dst)
 6163 %{
 6164   single_instruction;
 6165   dst    : EX1(write);
 6166   INS01  : ISS;
 6167   ALU    : EX1;
 6168 %}
 6169 
 6170 //------- Compare operation -------------------------------
 6171 
 6172 // Compare reg-reg
 6173 // Eg.  CMP     x0, x1
 6174 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6175 %{
 6176   single_instruction;
 6177 //  fixed_latency(16);
 6178   cr     : EX2(write);
 6179   op1    : EX1(read);
 6180   op2    : EX1(read);
 6181   INS01  : ISS;
 6182   ALU    : EX2;
 6183 %}
 6184 
 6185 // Compare reg-reg
 6186 // Eg.  CMP     x0, #N
 6187 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6188 %{
 6189   single_instruction;
 6190 //  fixed_latency(16);
 6191   cr     : EX2(write);
 6192   op1    : EX1(read);
 6193   INS01  : ISS;
 6194   ALU    : EX2;
 6195 %}
 6196 
 6197 //------- Conditional instructions ------------------------
 6198 
 6199 // Conditional no operands
 6200 // Eg.  CSINC   x0, zr, zr, <cond>
 6201 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6202 %{
 6203   single_instruction;
 6204   cr     : EX1(read);
 6205   dst    : EX2(write);
 6206   INS01  : ISS;
 6207   ALU    : EX2;
 6208 %}
 6209 
 6210 // Conditional 2 operand
 6211 // EG.  CSEL    X0, X1, X2, <cond>
 6212 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6213 %{
 6214   single_instruction;
 6215   cr     : EX1(read);
 6216   src1   : EX1(read);
 6217   src2   : EX1(read);
 6218   dst    : EX2(write);
 6219   INS01  : ISS;
 6220   ALU    : EX2;
 6221 %}
 6222 
 6223 // Conditional 2 operand
 6224 // EG.  CSEL    X0, X1, X2, <cond>
 6225 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6226 %{
 6227   single_instruction;
 6228   cr     : EX1(read);
 6229   src    : EX1(read);
 6230   dst    : EX2(write);
 6231   INS01  : ISS;
 6232   ALU    : EX2;
 6233 %}
 6234 
 6235 //------- Multiply pipeline operations --------------------
 6236 
 6237 // Multiply reg-reg
 6238 // Eg.  MUL     w0, w1, w2
 6239 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6240 %{
 6241   single_instruction;
 6242   dst    : WR(write);
 6243   src1   : ISS(read);
 6244   src2   : ISS(read);
 6245   INS01  : ISS;
 6246   MAC    : WR;
 6247 %}
 6248 
 6249 // Multiply accumulate
 6250 // Eg.  MADD    w0, w1, w2, w3
 6251 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6252 %{
 6253   single_instruction;
 6254   dst    : WR(write);
 6255   src1   : ISS(read);
 6256   src2   : ISS(read);
 6257   src3   : ISS(read);
 6258   INS01  : ISS;
 6259   MAC    : WR;
 6260 %}
 6261 
 6262 // Eg.  MUL     w0, w1, w2
 6263 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6264 %{
 6265   single_instruction;
 6266   fixed_latency(3); // Maximum latency for 64 bit mul
 6267   dst    : WR(write);
 6268   src1   : ISS(read);
 6269   src2   : ISS(read);
 6270   INS01  : ISS;
 6271   MAC    : WR;
 6272 %}
 6273 
 6274 // Multiply accumulate
 6275 // Eg.  MADD    w0, w1, w2, w3
 6276 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6277 %{
 6278   single_instruction;
 6279   fixed_latency(3); // Maximum latency for 64 bit mul
 6280   dst    : WR(write);
 6281   src1   : ISS(read);
 6282   src2   : ISS(read);
 6283   src3   : ISS(read);
 6284   INS01  : ISS;
 6285   MAC    : WR;
 6286 %}
 6287 
 6288 //------- Divide pipeline operations --------------------
 6289 
 6290 // Eg.  SDIV    w0, w1, w2
 6291 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6292 %{
 6293   single_instruction;
 6294   fixed_latency(8); // Maximum latency for 32 bit divide
 6295   dst    : WR(write);
 6296   src1   : ISS(read);
 6297   src2   : ISS(read);
 6298   INS0   : ISS; // Can only dual issue as instruction 0
 6299   DIV    : WR;
 6300 %}
 6301 
 6302 // Eg.  SDIV    x0, x1, x2
 6303 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6304 %{
 6305   single_instruction;
 6306   fixed_latency(16); // Maximum latency for 64 bit divide
 6307   dst    : WR(write);
 6308   src1   : ISS(read);
 6309   src2   : ISS(read);
 6310   INS0   : ISS; // Can only dual issue as instruction 0
 6311   DIV    : WR;
 6312 %}
 6313 
 6314 //------- Load pipeline operations ------------------------
 6315 
 6316 // Load - prefetch
 6317 // Eg.  PFRM    <mem>
 6318 pipe_class iload_prefetch(memory mem)
 6319 %{
 6320   single_instruction;
 6321   mem    : ISS(read);
 6322   INS01  : ISS;
 6323   LDST   : WR;
 6324 %}
 6325 
 6326 // Load - reg, mem
 6327 // Eg.  LDR     x0, <mem>
 6328 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6329 %{
 6330   single_instruction;
 6331   dst    : WR(write);
 6332   mem    : ISS(read);
 6333   INS01  : ISS;
 6334   LDST   : WR;
 6335 %}
 6336 
 6337 // Load - reg, reg
 6338 // Eg.  LDR     x0, [sp, x1]
 6339 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6340 %{
 6341   single_instruction;
 6342   dst    : WR(write);
 6343   src    : ISS(read);
 6344   INS01  : ISS;
 6345   LDST   : WR;
 6346 %}
 6347 
 6348 //------- Store pipeline operations -----------------------
 6349 
 6350 // Store - zr, mem
 6351 // Eg.  STR     zr, <mem>
 6352 pipe_class istore_mem(memory mem)
 6353 %{
 6354   single_instruction;
 6355   mem    : ISS(read);
 6356   INS01  : ISS;
 6357   LDST   : WR;
 6358 %}
 6359 
 6360 // Store - reg, mem
 6361 // Eg.  STR     x0, <mem>
 6362 pipe_class istore_reg_mem(iRegI src, memory mem)
 6363 %{
 6364   single_instruction;
 6365   mem    : ISS(read);
 6366   src    : EX2(read);
 6367   INS01  : ISS;
 6368   LDST   : WR;
 6369 %}
 6370 
 6371 // Store - reg, reg
 6372 // Eg. STR      x0, [sp, x1]
 6373 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6374 %{
 6375   single_instruction;
 6376   dst    : ISS(read);
 6377   src    : EX2(read);
 6378   INS01  : ISS;
 6379   LDST   : WR;
 6380 %}
 6381 
 6382 //------- Store pipeline operations -----------------------
 6383 
 6384 // Branch
 6385 pipe_class pipe_branch()
 6386 %{
 6387   single_instruction;
 6388   INS01  : ISS;
 6389   BRANCH : EX1;
 6390 %}
 6391 
 6392 // Conditional branch
 6393 pipe_class pipe_branch_cond(rFlagsReg cr)
 6394 %{
 6395   single_instruction;
 6396   cr     : EX1(read);
 6397   INS01  : ISS;
 6398   BRANCH : EX1;
 6399 %}
 6400 
 6401 // Compare & Branch
 6402 // EG.  CBZ/CBNZ
 6403 pipe_class pipe_cmp_branch(iRegI op1)
 6404 %{
 6405   single_instruction;
 6406   op1    : EX1(read);
 6407   INS01  : ISS;
 6408   BRANCH : EX1;
 6409 %}
 6410 
 6411 //------- Synchronisation operations ----------------------
 6412 
 6413 // Any operation requiring serialization.
 6414 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6415 pipe_class pipe_serial()
 6416 %{
 6417   single_instruction;
 6418   force_serialization;
 6419   fixed_latency(16);
 6420   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6421   LDST   : WR;
 6422 %}
 6423 
 6424 // Generic big/slow expanded idiom - also serialized
 6425 pipe_class pipe_slow()
 6426 %{
 6427   instruction_count(10);
 6428   multiple_bundles;
 6429   force_serialization;
 6430   fixed_latency(16);
 6431   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6432   LDST   : WR;
 6433 %}
 6434 
 6435 // Empty pipeline class
 6436 pipe_class pipe_class_empty()
 6437 %{
 6438   single_instruction;
 6439   fixed_latency(0);
 6440 %}
 6441 
 6442 // Default pipeline class.
 6443 pipe_class pipe_class_default()
 6444 %{
 6445   single_instruction;
 6446   fixed_latency(2);
 6447 %}
 6448 
 6449 // Pipeline class for compares.
 6450 pipe_class pipe_class_compare()
 6451 %{
 6452   single_instruction;
 6453   fixed_latency(16);
 6454 %}
 6455 
 6456 // Pipeline class for memory operations.
 6457 pipe_class pipe_class_memory()
 6458 %{
 6459   single_instruction;
 6460   fixed_latency(16);
 6461 %}
 6462 
 6463 // Pipeline class for call.
 6464 pipe_class pipe_class_call()
 6465 %{
 6466   single_instruction;
 6467   fixed_latency(100);
 6468 %}
 6469 
 6470 // Define the class for the Nop node.
 6471 define %{
 6472    MachNop = pipe_class_empty;
 6473 %}
 6474 
 6475 %}
 6476 //----------INSTRUCTIONS-------------------------------------------------------
 6477 //
 6478 // match      -- States which machine-independent subtree may be replaced
 6479 //               by this instruction.
 6480 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6481 //               selection to identify a minimum cost tree of machine
 6482 //               instructions that matches a tree of machine-independent
 6483 //               instructions.
 6484 // format     -- A string providing the disassembly for this instruction.
 6485 //               The value of an instruction's operand may be inserted
 6486 //               by referring to it with a '$' prefix.
 6487 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6488 //               to within an encode class as $primary, $secondary, and $tertiary
 6489 //               rrspectively.  The primary opcode is commonly used to
 6490 //               indicate the type of machine instruction, while secondary
 6491 //               and tertiary are often used for prefix options or addressing
 6492 //               modes.
 6493 // ins_encode -- A list of encode classes with parameters. The encode class
 6494 //               name must have been defined in an 'enc_class' specification
 6495 //               in the encode section of the architecture description.
 6496 
 6497 // ============================================================================
 6498 // Memory (Load/Store) Instructions
 6499 
 6500 // Load Instructions
 6501 
 6502 // Load Byte (8 bit signed)
 6503 instruct loadB(iRegINoSp dst, memory1 mem)
 6504 %{
 6505   match(Set dst (LoadB mem));
 6506   predicate(!needs_acquiring_load(n));
 6507 
 6508   ins_cost(4 * INSN_COST);
 6509   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6510 
 6511   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6512 
 6513   ins_pipe(iload_reg_mem);
 6514 %}
 6515 
 6516 // Load Byte (8 bit signed) into long
 6517 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6518 %{
 6519   match(Set dst (ConvI2L (LoadB mem)));
 6520   predicate(!needs_acquiring_load(n->in(1)));
 6521 
 6522   ins_cost(4 * INSN_COST);
 6523   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6524 
 6525   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6526 
 6527   ins_pipe(iload_reg_mem);
 6528 %}
 6529 
 6530 // Load Byte (8 bit unsigned)
 6531 instruct loadUB(iRegINoSp dst, memory1 mem)
 6532 %{
 6533   match(Set dst (LoadUB mem));
 6534   predicate(!needs_acquiring_load(n));
 6535 
 6536   ins_cost(4 * INSN_COST);
 6537   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6538 
 6539   ins_encode(aarch64_enc_ldrb(dst, mem));
 6540 
 6541   ins_pipe(iload_reg_mem);
 6542 %}
 6543 
 6544 // Load Byte (8 bit unsigned) into long
 6545 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6546 %{
 6547   match(Set dst (ConvI2L (LoadUB mem)));
 6548   predicate(!needs_acquiring_load(n->in(1)));
 6549 
 6550   ins_cost(4 * INSN_COST);
 6551   format %{ "ldrb  $dst, $mem\t# byte" %}
 6552 
 6553   ins_encode(aarch64_enc_ldrb(dst, mem));
 6554 
 6555   ins_pipe(iload_reg_mem);
 6556 %}
 6557 
 6558 // Load Short (16 bit signed)
 6559 instruct loadS(iRegINoSp dst, memory2 mem)
 6560 %{
 6561   match(Set dst (LoadS mem));
 6562   predicate(!needs_acquiring_load(n));
 6563 
 6564   ins_cost(4 * INSN_COST);
 6565   format %{ "ldrshw  $dst, $mem\t# short" %}
 6566 
 6567   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6568 
 6569   ins_pipe(iload_reg_mem);
 6570 %}
 6571 
 6572 // Load Short (16 bit signed) into long
 6573 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6574 %{
 6575   match(Set dst (ConvI2L (LoadS mem)));
 6576   predicate(!needs_acquiring_load(n->in(1)));
 6577 
 6578   ins_cost(4 * INSN_COST);
 6579   format %{ "ldrsh  $dst, $mem\t# short" %}
 6580 
 6581   ins_encode(aarch64_enc_ldrsh(dst, mem));
 6582 
 6583   ins_pipe(iload_reg_mem);
 6584 %}
 6585 
 6586 // Load Char (16 bit unsigned)
 6587 instruct loadUS(iRegINoSp dst, memory2 mem)
 6588 %{
 6589   match(Set dst (LoadUS mem));
 6590   predicate(!needs_acquiring_load(n));
 6591 
 6592   ins_cost(4 * INSN_COST);
 6593   format %{ "ldrh  $dst, $mem\t# short" %}
 6594 
 6595   ins_encode(aarch64_enc_ldrh(dst, mem));
 6596 
 6597   ins_pipe(iload_reg_mem);
 6598 %}
 6599 
 6600 // Load Short/Char (16 bit unsigned) into long
 6601 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 6602 %{
 6603   match(Set dst (ConvI2L (LoadUS mem)));
 6604   predicate(!needs_acquiring_load(n->in(1)));
 6605 
 6606   ins_cost(4 * INSN_COST);
 6607   format %{ "ldrh  $dst, $mem\t# short" %}
 6608 
 6609   ins_encode(aarch64_enc_ldrh(dst, mem));
 6610 
 6611   ins_pipe(iload_reg_mem);
 6612 %}
 6613 
 6614 // Load Integer (32 bit signed)
 6615 instruct loadI(iRegINoSp dst, memory4 mem)
 6616 %{
 6617   match(Set dst (LoadI mem));
 6618   predicate(!needs_acquiring_load(n));
 6619 
 6620   ins_cost(4 * INSN_COST);
 6621   format %{ "ldrw  $dst, $mem\t# int" %}
 6622 
 6623   ins_encode(aarch64_enc_ldrw(dst, mem));
 6624 
 6625   ins_pipe(iload_reg_mem);
 6626 %}
 6627 
 6628 // Load Integer (32 bit signed) into long
 6629 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 6630 %{
 6631   match(Set dst (ConvI2L (LoadI mem)));
 6632   predicate(!needs_acquiring_load(n->in(1)));
 6633 
 6634   ins_cost(4 * INSN_COST);
 6635   format %{ "ldrsw  $dst, $mem\t# int" %}
 6636 
 6637   ins_encode(aarch64_enc_ldrsw(dst, mem));
 6638 
 6639   ins_pipe(iload_reg_mem);
 6640 %}
 6641 
 6642 // Load Integer (32 bit unsigned) into long
 6643 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 6644 %{
 6645   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 6646   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 6647 
 6648   ins_cost(4 * INSN_COST);
 6649   format %{ "ldrw  $dst, $mem\t# int" %}
 6650 
 6651   ins_encode(aarch64_enc_ldrw(dst, mem));
 6652 
 6653   ins_pipe(iload_reg_mem);
 6654 %}
 6655 
 6656 // Load Long (64 bit signed)
 6657 instruct loadL(iRegLNoSp dst, memory8 mem)
 6658 %{
 6659   match(Set dst (LoadL mem));
 6660   predicate(!needs_acquiring_load(n));
 6661 
 6662   ins_cost(4 * INSN_COST);
 6663   format %{ "ldr  $dst, $mem\t# int" %}
 6664 
 6665   ins_encode(aarch64_enc_ldr(dst, mem));
 6666 
 6667   ins_pipe(iload_reg_mem);
 6668 %}
 6669 
 6670 // Load Range
 6671 instruct loadRange(iRegINoSp dst, memory4 mem)
 6672 %{
 6673   match(Set dst (LoadRange mem));
 6674 
 6675   ins_cost(4 * INSN_COST);
 6676   format %{ "ldrw  $dst, $mem\t# range" %}
 6677 
 6678   ins_encode(aarch64_enc_ldrw(dst, mem));
 6679 
 6680   ins_pipe(iload_reg_mem);
 6681 %}
 6682 
 6683 // Load Pointer
 6684 instruct loadP(iRegPNoSp dst, memory8 mem)
 6685 %{
 6686   match(Set dst (LoadP mem));
 6687   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 6688 
 6689   ins_cost(4 * INSN_COST);
 6690   format %{ "ldr  $dst, $mem\t# ptr" %}
 6691 
 6692   ins_encode(aarch64_enc_ldr(dst, mem));
 6693 
 6694   ins_pipe(iload_reg_mem);
 6695 %}
 6696 
 6697 // Load Compressed Pointer
 6698 instruct loadN(iRegNNoSp dst, memory4 mem)
 6699 %{
 6700   match(Set dst (LoadN mem));
 6701   predicate(!needs_acquiring_load(n) && n->as_Load()->barrier_data() == 0);
 6702 
 6703   ins_cost(4 * INSN_COST);
 6704   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 6705 
 6706   ins_encode(aarch64_enc_ldrw(dst, mem));
 6707 
 6708   ins_pipe(iload_reg_mem);
 6709 %}
 6710 
 6711 // Load Klass Pointer
 6712 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 6713 %{
 6714   match(Set dst (LoadKlass mem));
 6715   predicate(!needs_acquiring_load(n));
 6716 
 6717   ins_cost(4 * INSN_COST);
 6718   format %{ "ldr  $dst, $mem\t# class" %}
 6719 
 6720   ins_encode(aarch64_enc_ldr(dst, mem));
 6721 
 6722   ins_pipe(iload_reg_mem);
 6723 %}
 6724 
 6725 // Load Narrow Klass Pointer
 6726 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 6727 %{
 6728   match(Set dst (LoadNKlass mem));
 6729   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
 6730 
 6731   ins_cost(4 * INSN_COST);
 6732   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 6733 
 6734   ins_encode(aarch64_enc_ldrw(dst, mem));
 6735 
 6736   ins_pipe(iload_reg_mem);
 6737 %}
 6738 
 6739 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem)
 6740 %{
 6741   match(Set dst (LoadNKlass mem));
 6742   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
 6743 
 6744   ins_cost(4 * INSN_COST);
 6745   format %{
 6746     "ldrw  $dst, $mem\t# compressed class ptr, shifted\n\t"
 6747     "lsrw  $dst, $dst, markWord::klass_shift_at_offset"
 6748   %}
 6749   ins_encode %{
 6750     // inlined aarch64_enc_ldrw
 6751     loadStore(masm, &MacroAssembler::ldrw, $dst$$Register, $mem->opcode(),
 6752               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 6753     __ lsrw($dst$$Register, $dst$$Register, markWord::klass_shift_at_offset);
 6754   %}
 6755   ins_pipe(iload_reg_mem);
 6756 %}
 6757 
 6758 // Load Float
 6759 instruct loadF(vRegF dst, memory4 mem)
 6760 %{
 6761   match(Set dst (LoadF mem));
 6762   predicate(!needs_acquiring_load(n));
 6763 
 6764   ins_cost(4 * INSN_COST);
 6765   format %{ "ldrs  $dst, $mem\t# float" %}
 6766 
 6767   ins_encode( aarch64_enc_ldrs(dst, mem) );
 6768 
 6769   ins_pipe(pipe_class_memory);
 6770 %}
 6771 
 6772 // Load Double
 6773 instruct loadD(vRegD dst, memory8 mem)
 6774 %{
 6775   match(Set dst (LoadD mem));
 6776   predicate(!needs_acquiring_load(n));
 6777 
 6778   ins_cost(4 * INSN_COST);
 6779   format %{ "ldrd  $dst, $mem\t# double" %}
 6780 
 6781   ins_encode( aarch64_enc_ldrd(dst, mem) );
 6782 
 6783   ins_pipe(pipe_class_memory);
 6784 %}
 6785 
 6786 
 6787 // Load Int Constant
 6788 instruct loadConI(iRegINoSp dst, immI src)
 6789 %{
 6790   match(Set dst src);
 6791 
 6792   ins_cost(INSN_COST);
 6793   format %{ "mov $dst, $src\t# int" %}
 6794 
 6795   ins_encode( aarch64_enc_movw_imm(dst, src) );
 6796 
 6797   ins_pipe(ialu_imm);
 6798 %}
 6799 
 6800 // Load Long Constant
 6801 instruct loadConL(iRegLNoSp dst, immL src)
 6802 %{
 6803   match(Set dst src);
 6804 
 6805   ins_cost(INSN_COST);
 6806   format %{ "mov $dst, $src\t# long" %}
 6807 
 6808   ins_encode( aarch64_enc_mov_imm(dst, src) );
 6809 
 6810   ins_pipe(ialu_imm);
 6811 %}
 6812 
 6813 // Load Pointer Constant
 6814 
 6815 instruct loadConP(iRegPNoSp dst, immP con)
 6816 %{
 6817   match(Set dst con);
 6818 
 6819   ins_cost(INSN_COST * 4);
 6820   format %{
 6821     "mov  $dst, $con\t# ptr\n\t"
 6822   %}
 6823 
 6824   ins_encode(aarch64_enc_mov_p(dst, con));
 6825 
 6826   ins_pipe(ialu_imm);
 6827 %}
 6828 
 6829 // Load Null Pointer Constant
 6830 
 6831 instruct loadConP0(iRegPNoSp dst, immP0 con)
 6832 %{
 6833   match(Set dst con);
 6834 
 6835   ins_cost(INSN_COST);
 6836   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6837 
 6838   ins_encode(aarch64_enc_mov_p0(dst, con));
 6839 
 6840   ins_pipe(ialu_imm);
 6841 %}
 6842 
 6843 // Load Pointer Constant One
 6844 
 6845 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 6846 %{
 6847   match(Set dst con);
 6848 
 6849   ins_cost(INSN_COST);
 6850   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6851 
 6852   ins_encode(aarch64_enc_mov_p1(dst, con));
 6853 
 6854   ins_pipe(ialu_imm);
 6855 %}
 6856 
 6857 // Load Byte Map Base Constant
 6858 
 6859 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 6860 %{
 6861   match(Set dst con);
 6862 
 6863   ins_cost(INSN_COST);
 6864   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 6865 
 6866   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 6867 
 6868   ins_pipe(ialu_imm);
 6869 %}
 6870 
 6871 // Load Narrow Pointer Constant
 6872 
 6873 instruct loadConN(iRegNNoSp dst, immN con)
 6874 %{
 6875   match(Set dst con);
 6876 
 6877   ins_cost(INSN_COST * 4);
 6878   format %{ "mov  $dst, $con\t# compressed ptr" %}
 6879 
 6880   ins_encode(aarch64_enc_mov_n(dst, con));
 6881 
 6882   ins_pipe(ialu_imm);
 6883 %}
 6884 
 6885 // Load Narrow Null Pointer Constant
 6886 
 6887 instruct loadConN0(iRegNNoSp dst, immN0 con)
 6888 %{
 6889   match(Set dst con);
 6890 
 6891   ins_cost(INSN_COST);
 6892   format %{ "mov  $dst, $con\t# compressed nullptr ptr" %}
 6893 
 6894   ins_encode(aarch64_enc_mov_n0(dst, con));
 6895 
 6896   ins_pipe(ialu_imm);
 6897 %}
 6898 
 6899 // Load Narrow Klass Constant
 6900 
 6901 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 6902 %{
 6903   match(Set dst con);
 6904 
 6905   ins_cost(INSN_COST);
 6906   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 6907 
 6908   ins_encode(aarch64_enc_mov_nk(dst, con));
 6909 
 6910   ins_pipe(ialu_imm);
 6911 %}
 6912 
 6913 // Load Packed Float Constant
 6914 
 6915 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 6916   match(Set dst con);
 6917   ins_cost(INSN_COST * 4);
 6918   format %{ "fmovs  $dst, $con"%}
 6919   ins_encode %{
 6920     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 6921   %}
 6922 
 6923   ins_pipe(fp_imm_s);
 6924 %}
 6925 
 6926 // Load Float Constant
 6927 
 6928 instruct loadConF(vRegF dst, immF con) %{
 6929   match(Set dst con);
 6930 
 6931   ins_cost(INSN_COST * 4);
 6932 
 6933   format %{
 6934     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 6935   %}
 6936 
 6937   ins_encode %{
 6938     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 6939   %}
 6940 
 6941   ins_pipe(fp_load_constant_s);
 6942 %}
 6943 
 6944 // Load Packed Double Constant
 6945 
 6946 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 6947   match(Set dst con);
 6948   ins_cost(INSN_COST);
 6949   format %{ "fmovd  $dst, $con"%}
 6950   ins_encode %{
 6951     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 6952   %}
 6953 
 6954   ins_pipe(fp_imm_d);
 6955 %}
 6956 
 6957 // Load Double Constant
 6958 
 6959 instruct loadConD(vRegD dst, immD con) %{
 6960   match(Set dst con);
 6961 
 6962   ins_cost(INSN_COST * 5);
 6963   format %{
 6964     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 6965   %}
 6966 
 6967   ins_encode %{
 6968     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 6969   %}
 6970 
 6971   ins_pipe(fp_load_constant_d);
 6972 %}
 6973 
 6974 // Load Half Float Constant
 6975 // The "ldr" instruction loads a 32-bit word from the constant pool into a
 6976 // 32-bit register but only the bottom half will be populated and the top
 6977 // 16 bits are zero.
 6978 instruct loadConH(vRegF dst, immH con) %{
 6979   match(Set dst con);
 6980   format %{
 6981     "ldrs $dst, [$constantaddress]\t# load from constant table: half float=$con\n\t"
 6982   %}
 6983   ins_encode %{
 6984     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 6985   %}
 6986   ins_pipe(fp_load_constant_s);
 6987 %}
 6988 
 6989 // Store Instructions
 6990 
 6991 // Store Byte
 6992 instruct storeB(iRegIorL2I src, memory1 mem)
 6993 %{
 6994   match(Set mem (StoreB mem src));
 6995   predicate(!needs_releasing_store(n));
 6996 
 6997   ins_cost(INSN_COST);
 6998   format %{ "strb  $src, $mem\t# byte" %}
 6999 
 7000   ins_encode(aarch64_enc_strb(src, mem));
 7001 
 7002   ins_pipe(istore_reg_mem);
 7003 %}
 7004 
 7005 
 7006 instruct storeimmB0(immI0 zero, memory1 mem)
 7007 %{
 7008   match(Set mem (StoreB mem zero));
 7009   predicate(!needs_releasing_store(n));
 7010 
 7011   ins_cost(INSN_COST);
 7012   format %{ "strb rscractch2, $mem\t# byte" %}
 7013 
 7014   ins_encode(aarch64_enc_strb0(mem));
 7015 
 7016   ins_pipe(istore_mem);
 7017 %}
 7018 
 7019 // Store Char/Short
 7020 instruct storeC(iRegIorL2I src, memory2 mem)
 7021 %{
 7022   match(Set mem (StoreC mem src));
 7023   predicate(!needs_releasing_store(n));
 7024 
 7025   ins_cost(INSN_COST);
 7026   format %{ "strh  $src, $mem\t# short" %}
 7027 
 7028   ins_encode(aarch64_enc_strh(src, mem));
 7029 
 7030   ins_pipe(istore_reg_mem);
 7031 %}
 7032 
 7033 instruct storeimmC0(immI0 zero, memory2 mem)
 7034 %{
 7035   match(Set mem (StoreC mem zero));
 7036   predicate(!needs_releasing_store(n));
 7037 
 7038   ins_cost(INSN_COST);
 7039   format %{ "strh  zr, $mem\t# short" %}
 7040 
 7041   ins_encode(aarch64_enc_strh0(mem));
 7042 
 7043   ins_pipe(istore_mem);
 7044 %}
 7045 
 7046 // Store Integer
 7047 
 7048 instruct storeI(iRegIorL2I src, memory4 mem)
 7049 %{
 7050   match(Set mem(StoreI mem src));
 7051   predicate(!needs_releasing_store(n));
 7052 
 7053   ins_cost(INSN_COST);
 7054   format %{ "strw  $src, $mem\t# int" %}
 7055 
 7056   ins_encode(aarch64_enc_strw(src, mem));
 7057 
 7058   ins_pipe(istore_reg_mem);
 7059 %}
 7060 
 7061 instruct storeimmI0(immI0 zero, memory4 mem)
 7062 %{
 7063   match(Set mem(StoreI mem zero));
 7064   predicate(!needs_releasing_store(n));
 7065 
 7066   ins_cost(INSN_COST);
 7067   format %{ "strw  zr, $mem\t# int" %}
 7068 
 7069   ins_encode(aarch64_enc_strw0(mem));
 7070 
 7071   ins_pipe(istore_mem);
 7072 %}
 7073 
 7074 // Store Long (64 bit signed)
 7075 instruct storeL(iRegL src, memory8 mem)
 7076 %{
 7077   match(Set mem (StoreL mem src));
 7078   predicate(!needs_releasing_store(n));
 7079 
 7080   ins_cost(INSN_COST);
 7081   format %{ "str  $src, $mem\t# int" %}
 7082 
 7083   ins_encode(aarch64_enc_str(src, mem));
 7084 
 7085   ins_pipe(istore_reg_mem);
 7086 %}
 7087 
 7088 // Store Long (64 bit signed)
 7089 instruct storeimmL0(immL0 zero, memory8 mem)
 7090 %{
 7091   match(Set mem (StoreL mem zero));
 7092   predicate(!needs_releasing_store(n));
 7093 
 7094   ins_cost(INSN_COST);
 7095   format %{ "str  zr, $mem\t# int" %}
 7096 
 7097   ins_encode(aarch64_enc_str0(mem));
 7098 
 7099   ins_pipe(istore_mem);
 7100 %}
 7101 
 7102 // Store Pointer
 7103 instruct storeP(iRegP src, memory8 mem)
 7104 %{
 7105   match(Set mem (StoreP mem src));
 7106   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7107 
 7108   ins_cost(INSN_COST);
 7109   format %{ "str  $src, $mem\t# ptr" %}
 7110 
 7111   ins_encode(aarch64_enc_str(src, mem));
 7112 
 7113   ins_pipe(istore_reg_mem);
 7114 %}
 7115 
 7116 // Store Pointer
 7117 instruct storeimmP0(immP0 zero, memory8 mem)
 7118 %{
 7119   match(Set mem (StoreP mem zero));
 7120   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7121 
 7122   ins_cost(INSN_COST);
 7123   format %{ "str zr, $mem\t# ptr" %}
 7124 
 7125   ins_encode(aarch64_enc_str0(mem));
 7126 
 7127   ins_pipe(istore_mem);
 7128 %}
 7129 
 7130 // Store Compressed Pointer
 7131 instruct storeN(iRegN src, memory4 mem)
 7132 %{
 7133   match(Set mem (StoreN mem src));
 7134   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7135 
 7136   ins_cost(INSN_COST);
 7137   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7138 
 7139   ins_encode(aarch64_enc_strw(src, mem));
 7140 
 7141   ins_pipe(istore_reg_mem);
 7142 %}
 7143 
 7144 instruct storeImmN0(immN0 zero, memory4 mem)
 7145 %{
 7146   match(Set mem (StoreN mem zero));
 7147   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7148 
 7149   ins_cost(INSN_COST);
 7150   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7151 
 7152   ins_encode(aarch64_enc_strw0(mem));
 7153 
 7154   ins_pipe(istore_mem);
 7155 %}
 7156 
 7157 // Store Float
 7158 instruct storeF(vRegF src, memory4 mem)
 7159 %{
 7160   match(Set mem (StoreF mem src));
 7161   predicate(!needs_releasing_store(n));
 7162 
 7163   ins_cost(INSN_COST);
 7164   format %{ "strs  $src, $mem\t# float" %}
 7165 
 7166   ins_encode( aarch64_enc_strs(src, mem) );
 7167 
 7168   ins_pipe(pipe_class_memory);
 7169 %}
 7170 
 7171 // TODO
 7172 // implement storeImmF0 and storeFImmPacked
 7173 
 7174 // Store Double
 7175 instruct storeD(vRegD src, memory8 mem)
 7176 %{
 7177   match(Set mem (StoreD mem src));
 7178   predicate(!needs_releasing_store(n));
 7179 
 7180   ins_cost(INSN_COST);
 7181   format %{ "strd  $src, $mem\t# double" %}
 7182 
 7183   ins_encode( aarch64_enc_strd(src, mem) );
 7184 
 7185   ins_pipe(pipe_class_memory);
 7186 %}
 7187 
 7188 // Store Compressed Klass Pointer
 7189 instruct storeNKlass(iRegN src, memory4 mem)
 7190 %{
 7191   predicate(!needs_releasing_store(n));
 7192   match(Set mem (StoreNKlass mem src));
 7193 
 7194   ins_cost(INSN_COST);
 7195   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7196 
 7197   ins_encode(aarch64_enc_strw(src, mem));
 7198 
 7199   ins_pipe(istore_reg_mem);
 7200 %}
 7201 
 7202 // TODO
 7203 // implement storeImmD0 and storeDImmPacked
 7204 
 7205 // prefetch instructions
 7206 // Must be safe to execute with invalid address (cannot fault).
 7207 
 7208 instruct prefetchalloc( memory8 mem ) %{
 7209   match(PrefetchAllocation mem);
 7210 
 7211   ins_cost(INSN_COST);
 7212   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7213 
 7214   ins_encode( aarch64_enc_prefetchw(mem) );
 7215 
 7216   ins_pipe(iload_prefetch);
 7217 %}
 7218 
 7219 //  ---------------- volatile loads and stores ----------------
 7220 
 7221 // Load Byte (8 bit signed)
 7222 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7223 %{
 7224   match(Set dst (LoadB mem));
 7225 
 7226   ins_cost(VOLATILE_REF_COST);
 7227   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7228 
 7229   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7230 
 7231   ins_pipe(pipe_serial);
 7232 %}
 7233 
 7234 // Load Byte (8 bit signed) into long
 7235 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7236 %{
 7237   match(Set dst (ConvI2L (LoadB mem)));
 7238 
 7239   ins_cost(VOLATILE_REF_COST);
 7240   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7241 
 7242   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7243 
 7244   ins_pipe(pipe_serial);
 7245 %}
 7246 
 7247 // Load Byte (8 bit unsigned)
 7248 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7249 %{
 7250   match(Set dst (LoadUB mem));
 7251 
 7252   ins_cost(VOLATILE_REF_COST);
 7253   format %{ "ldarb  $dst, $mem\t# byte" %}
 7254 
 7255   ins_encode(aarch64_enc_ldarb(dst, mem));
 7256 
 7257   ins_pipe(pipe_serial);
 7258 %}
 7259 
 7260 // Load Byte (8 bit unsigned) into long
 7261 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7262 %{
 7263   match(Set dst (ConvI2L (LoadUB mem)));
 7264 
 7265   ins_cost(VOLATILE_REF_COST);
 7266   format %{ "ldarb  $dst, $mem\t# byte" %}
 7267 
 7268   ins_encode(aarch64_enc_ldarb(dst, mem));
 7269 
 7270   ins_pipe(pipe_serial);
 7271 %}
 7272 
 7273 // Load Short (16 bit signed)
 7274 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7275 %{
 7276   match(Set dst (LoadS mem));
 7277 
 7278   ins_cost(VOLATILE_REF_COST);
 7279   format %{ "ldarshw  $dst, $mem\t# short" %}
 7280 
 7281   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7282 
 7283   ins_pipe(pipe_serial);
 7284 %}
 7285 
 7286 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7287 %{
 7288   match(Set dst (LoadUS mem));
 7289 
 7290   ins_cost(VOLATILE_REF_COST);
 7291   format %{ "ldarhw  $dst, $mem\t# short" %}
 7292 
 7293   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7294 
 7295   ins_pipe(pipe_serial);
 7296 %}
 7297 
 7298 // Load Short/Char (16 bit unsigned) into long
 7299 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7300 %{
 7301   match(Set dst (ConvI2L (LoadUS mem)));
 7302 
 7303   ins_cost(VOLATILE_REF_COST);
 7304   format %{ "ldarh  $dst, $mem\t# short" %}
 7305 
 7306   ins_encode(aarch64_enc_ldarh(dst, mem));
 7307 
 7308   ins_pipe(pipe_serial);
 7309 %}
 7310 
 7311 // Load Short/Char (16 bit signed) into long
 7312 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7313 %{
 7314   match(Set dst (ConvI2L (LoadS mem)));
 7315 
 7316   ins_cost(VOLATILE_REF_COST);
 7317   format %{ "ldarh  $dst, $mem\t# short" %}
 7318 
 7319   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7320 
 7321   ins_pipe(pipe_serial);
 7322 %}
 7323 
 7324 // Load Integer (32 bit signed)
 7325 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7326 %{
 7327   match(Set dst (LoadI mem));
 7328 
 7329   ins_cost(VOLATILE_REF_COST);
 7330   format %{ "ldarw  $dst, $mem\t# int" %}
 7331 
 7332   ins_encode(aarch64_enc_ldarw(dst, mem));
 7333 
 7334   ins_pipe(pipe_serial);
 7335 %}
 7336 
 7337 // Load Integer (32 bit unsigned) into long
 7338 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7339 %{
 7340   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7341 
 7342   ins_cost(VOLATILE_REF_COST);
 7343   format %{ "ldarw  $dst, $mem\t# int" %}
 7344 
 7345   ins_encode(aarch64_enc_ldarw(dst, mem));
 7346 
 7347   ins_pipe(pipe_serial);
 7348 %}
 7349 
 7350 // Load Long (64 bit signed)
 7351 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7352 %{
 7353   match(Set dst (LoadL mem));
 7354 
 7355   ins_cost(VOLATILE_REF_COST);
 7356   format %{ "ldar  $dst, $mem\t# int" %}
 7357 
 7358   ins_encode(aarch64_enc_ldar(dst, mem));
 7359 
 7360   ins_pipe(pipe_serial);
 7361 %}
 7362 
 7363 // Load Pointer
 7364 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7365 %{
 7366   match(Set dst (LoadP mem));
 7367   predicate(n->as_Load()->barrier_data() == 0);
 7368 
 7369   ins_cost(VOLATILE_REF_COST);
 7370   format %{ "ldar  $dst, $mem\t# ptr" %}
 7371 
 7372   ins_encode(aarch64_enc_ldar(dst, mem));
 7373 
 7374   ins_pipe(pipe_serial);
 7375 %}
 7376 
 7377 // Load Compressed Pointer
 7378 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7379 %{
 7380   match(Set dst (LoadN mem));
 7381   predicate(n->as_Load()->barrier_data() == 0);
 7382 
 7383   ins_cost(VOLATILE_REF_COST);
 7384   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7385 
 7386   ins_encode(aarch64_enc_ldarw(dst, mem));
 7387 
 7388   ins_pipe(pipe_serial);
 7389 %}
 7390 
 7391 // Load Float
 7392 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7393 %{
 7394   match(Set dst (LoadF mem));
 7395 
 7396   ins_cost(VOLATILE_REF_COST);
 7397   format %{ "ldars  $dst, $mem\t# float" %}
 7398 
 7399   ins_encode( aarch64_enc_fldars(dst, mem) );
 7400 
 7401   ins_pipe(pipe_serial);
 7402 %}
 7403 
 7404 // Load Double
 7405 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7406 %{
 7407   match(Set dst (LoadD mem));
 7408 
 7409   ins_cost(VOLATILE_REF_COST);
 7410   format %{ "ldard  $dst, $mem\t# double" %}
 7411 
 7412   ins_encode( aarch64_enc_fldard(dst, mem) );
 7413 
 7414   ins_pipe(pipe_serial);
 7415 %}
 7416 
 7417 // Store Byte
 7418 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7419 %{
 7420   match(Set mem (StoreB mem src));
 7421 
 7422   ins_cost(VOLATILE_REF_COST);
 7423   format %{ "stlrb  $src, $mem\t# byte" %}
 7424 
 7425   ins_encode(aarch64_enc_stlrb(src, mem));
 7426 
 7427   ins_pipe(pipe_class_memory);
 7428 %}
 7429 
 7430 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7431 %{
 7432   match(Set mem (StoreB mem zero));
 7433 
 7434   ins_cost(VOLATILE_REF_COST);
 7435   format %{ "stlrb  zr, $mem\t# byte" %}
 7436 
 7437   ins_encode(aarch64_enc_stlrb0(mem));
 7438 
 7439   ins_pipe(pipe_class_memory);
 7440 %}
 7441 
 7442 // Store Char/Short
 7443 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7444 %{
 7445   match(Set mem (StoreC mem src));
 7446 
 7447   ins_cost(VOLATILE_REF_COST);
 7448   format %{ "stlrh  $src, $mem\t# short" %}
 7449 
 7450   ins_encode(aarch64_enc_stlrh(src, mem));
 7451 
 7452   ins_pipe(pipe_class_memory);
 7453 %}
 7454 
 7455 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7456 %{
 7457   match(Set mem (StoreC mem zero));
 7458 
 7459   ins_cost(VOLATILE_REF_COST);
 7460   format %{ "stlrh  zr, $mem\t# short" %}
 7461 
 7462   ins_encode(aarch64_enc_stlrh0(mem));
 7463 
 7464   ins_pipe(pipe_class_memory);
 7465 %}
 7466 
 7467 // Store Integer
 7468 
 7469 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7470 %{
 7471   match(Set mem(StoreI mem src));
 7472 
 7473   ins_cost(VOLATILE_REF_COST);
 7474   format %{ "stlrw  $src, $mem\t# int" %}
 7475 
 7476   ins_encode(aarch64_enc_stlrw(src, mem));
 7477 
 7478   ins_pipe(pipe_class_memory);
 7479 %}
 7480 
 7481 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7482 %{
 7483   match(Set mem(StoreI mem zero));
 7484 
 7485   ins_cost(VOLATILE_REF_COST);
 7486   format %{ "stlrw  zr, $mem\t# int" %}
 7487 
 7488   ins_encode(aarch64_enc_stlrw0(mem));
 7489 
 7490   ins_pipe(pipe_class_memory);
 7491 %}
 7492 
 7493 // Store Long (64 bit signed)
 7494 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7495 %{
 7496   match(Set mem (StoreL mem src));
 7497 
 7498   ins_cost(VOLATILE_REF_COST);
 7499   format %{ "stlr  $src, $mem\t# int" %}
 7500 
 7501   ins_encode(aarch64_enc_stlr(src, mem));
 7502 
 7503   ins_pipe(pipe_class_memory);
 7504 %}
 7505 
 7506 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7507 %{
 7508   match(Set mem (StoreL mem zero));
 7509 
 7510   ins_cost(VOLATILE_REF_COST);
 7511   format %{ "stlr  zr, $mem\t# int" %}
 7512 
 7513   ins_encode(aarch64_enc_stlr0(mem));
 7514 
 7515   ins_pipe(pipe_class_memory);
 7516 %}
 7517 
 7518 // Store Pointer
 7519 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7520 %{
 7521   match(Set mem (StoreP mem src));
 7522   predicate(n->as_Store()->barrier_data() == 0);
 7523 
 7524   ins_cost(VOLATILE_REF_COST);
 7525   format %{ "stlr  $src, $mem\t# ptr" %}
 7526 
 7527   ins_encode(aarch64_enc_stlr(src, mem));
 7528 
 7529   ins_pipe(pipe_class_memory);
 7530 %}
 7531 
 7532 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7533 %{
 7534   match(Set mem (StoreP mem zero));
 7535   predicate(n->as_Store()->barrier_data() == 0);
 7536 
 7537   ins_cost(VOLATILE_REF_COST);
 7538   format %{ "stlr  zr, $mem\t# ptr" %}
 7539 
 7540   ins_encode(aarch64_enc_stlr0(mem));
 7541 
 7542   ins_pipe(pipe_class_memory);
 7543 %}
 7544 
 7545 // Store Compressed Pointer
 7546 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7547 %{
 7548   match(Set mem (StoreN mem src));
 7549   predicate(n->as_Store()->barrier_data() == 0);
 7550 
 7551   ins_cost(VOLATILE_REF_COST);
 7552   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7553 
 7554   ins_encode(aarch64_enc_stlrw(src, mem));
 7555 
 7556   ins_pipe(pipe_class_memory);
 7557 %}
 7558 
 7559 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7560 %{
 7561   match(Set mem (StoreN mem zero));
 7562   predicate(n->as_Store()->barrier_data() == 0);
 7563 
 7564   ins_cost(VOLATILE_REF_COST);
 7565   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7566 
 7567   ins_encode(aarch64_enc_stlrw0(mem));
 7568 
 7569   ins_pipe(pipe_class_memory);
 7570 %}
 7571 
 7572 // Store Float
 7573 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7574 %{
 7575   match(Set mem (StoreF mem src));
 7576 
 7577   ins_cost(VOLATILE_REF_COST);
 7578   format %{ "stlrs  $src, $mem\t# float" %}
 7579 
 7580   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7581 
 7582   ins_pipe(pipe_class_memory);
 7583 %}
 7584 
 7585 // TODO
 7586 // implement storeImmF0 and storeFImmPacked
 7587 
 7588 // Store Double
 7589 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 7590 %{
 7591   match(Set mem (StoreD mem src));
 7592 
 7593   ins_cost(VOLATILE_REF_COST);
 7594   format %{ "stlrd  $src, $mem\t# double" %}
 7595 
 7596   ins_encode( aarch64_enc_fstlrd(src, mem) );
 7597 
 7598   ins_pipe(pipe_class_memory);
 7599 %}
 7600 
 7601 //  ---------------- end of volatile loads and stores ----------------
 7602 
 7603 instruct cacheWB(indirect addr)
 7604 %{
 7605   predicate(VM_Version::supports_data_cache_line_flush());
 7606   match(CacheWB addr);
 7607 
 7608   ins_cost(100);
 7609   format %{"cache wb $addr" %}
 7610   ins_encode %{
 7611     assert($addr->index_position() < 0, "should be");
 7612     assert($addr$$disp == 0, "should be");
 7613     __ cache_wb(Address($addr$$base$$Register, 0));
 7614   %}
 7615   ins_pipe(pipe_slow); // XXX
 7616 %}
 7617 
 7618 instruct cacheWBPreSync()
 7619 %{
 7620   predicate(VM_Version::supports_data_cache_line_flush());
 7621   match(CacheWBPreSync);
 7622 
 7623   ins_cost(100);
 7624   format %{"cache wb presync" %}
 7625   ins_encode %{
 7626     __ cache_wbsync(true);
 7627   %}
 7628   ins_pipe(pipe_slow); // XXX
 7629 %}
 7630 
 7631 instruct cacheWBPostSync()
 7632 %{
 7633   predicate(VM_Version::supports_data_cache_line_flush());
 7634   match(CacheWBPostSync);
 7635 
 7636   ins_cost(100);
 7637   format %{"cache wb postsync" %}
 7638   ins_encode %{
 7639     __ cache_wbsync(false);
 7640   %}
 7641   ins_pipe(pipe_slow); // XXX
 7642 %}
 7643 
 7644 // ============================================================================
 7645 // BSWAP Instructions
 7646 
 7647 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 7648   match(Set dst (ReverseBytesI src));
 7649 
 7650   ins_cost(INSN_COST);
 7651   format %{ "revw  $dst, $src" %}
 7652 
 7653   ins_encode %{
 7654     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 7655   %}
 7656 
 7657   ins_pipe(ialu_reg);
 7658 %}
 7659 
 7660 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 7661   match(Set dst (ReverseBytesL src));
 7662 
 7663   ins_cost(INSN_COST);
 7664   format %{ "rev  $dst, $src" %}
 7665 
 7666   ins_encode %{
 7667     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 7668   %}
 7669 
 7670   ins_pipe(ialu_reg);
 7671 %}
 7672 
 7673 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 7674   match(Set dst (ReverseBytesUS src));
 7675 
 7676   ins_cost(INSN_COST);
 7677   format %{ "rev16w  $dst, $src" %}
 7678 
 7679   ins_encode %{
 7680     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7681   %}
 7682 
 7683   ins_pipe(ialu_reg);
 7684 %}
 7685 
 7686 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 7687   match(Set dst (ReverseBytesS src));
 7688 
 7689   ins_cost(INSN_COST);
 7690   format %{ "rev16w  $dst, $src\n\t"
 7691             "sbfmw $dst, $dst, #0, #15" %}
 7692 
 7693   ins_encode %{
 7694     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7695     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 7696   %}
 7697 
 7698   ins_pipe(ialu_reg);
 7699 %}
 7700 
 7701 // ============================================================================
 7702 // Zero Count Instructions
 7703 
 7704 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7705   match(Set dst (CountLeadingZerosI src));
 7706 
 7707   ins_cost(INSN_COST);
 7708   format %{ "clzw  $dst, $src" %}
 7709   ins_encode %{
 7710     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 7711   %}
 7712 
 7713   ins_pipe(ialu_reg);
 7714 %}
 7715 
 7716 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 7717   match(Set dst (CountLeadingZerosL src));
 7718 
 7719   ins_cost(INSN_COST);
 7720   format %{ "clz   $dst, $src" %}
 7721   ins_encode %{
 7722     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 7723   %}
 7724 
 7725   ins_pipe(ialu_reg);
 7726 %}
 7727 
 7728 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7729   match(Set dst (CountTrailingZerosI src));
 7730 
 7731   ins_cost(INSN_COST * 2);
 7732   format %{ "rbitw  $dst, $src\n\t"
 7733             "clzw   $dst, $dst" %}
 7734   ins_encode %{
 7735     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 7736     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 7737   %}
 7738 
 7739   ins_pipe(ialu_reg);
 7740 %}
 7741 
 7742 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 7743   match(Set dst (CountTrailingZerosL src));
 7744 
 7745   ins_cost(INSN_COST * 2);
 7746   format %{ "rbit   $dst, $src\n\t"
 7747             "clz    $dst, $dst" %}
 7748   ins_encode %{
 7749     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 7750     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 7751   %}
 7752 
 7753   ins_pipe(ialu_reg);
 7754 %}
 7755 
 7756 //---------- Population Count Instructions -------------------------------------
 7757 //
 7758 
 7759 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 7760   match(Set dst (PopCountI src));
 7761   effect(TEMP tmp);
 7762   ins_cost(INSN_COST * 13);
 7763 
 7764   format %{ "fmovs  $tmp, $src\t# vector (1S)\n\t"
 7765             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7766             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7767             "mov    $dst, $tmp\t# vector (1D)" %}
 7768   ins_encode %{
 7769     __ fmovs($tmp$$FloatRegister, $src$$Register);
 7770     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7771     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7772     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7773   %}
 7774 
 7775   ins_pipe(pipe_class_default);
 7776 %}
 7777 
 7778 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 7779   match(Set dst (PopCountI (LoadI mem)));
 7780   effect(TEMP tmp);
 7781   ins_cost(INSN_COST * 13);
 7782 
 7783   format %{ "ldrs   $tmp, $mem\n\t"
 7784             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7785             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7786             "mov    $dst, $tmp\t# vector (1D)" %}
 7787   ins_encode %{
 7788     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7789     loadStore(masm, &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 7790               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 7791     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7792     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7793     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7794   %}
 7795 
 7796   ins_pipe(pipe_class_default);
 7797 %}
 7798 
 7799 // Note: Long.bitCount(long) returns an int.
 7800 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 7801   match(Set dst (PopCountL src));
 7802   effect(TEMP tmp);
 7803   ins_cost(INSN_COST * 13);
 7804 
 7805   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 7806             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7807             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7808             "mov    $dst, $tmp\t# vector (1D)" %}
 7809   ins_encode %{
 7810     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 7811     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7812     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7813     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7814   %}
 7815 
 7816   ins_pipe(pipe_class_default);
 7817 %}
 7818 
 7819 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 7820   match(Set dst (PopCountL (LoadL mem)));
 7821   effect(TEMP tmp);
 7822   ins_cost(INSN_COST * 13);
 7823 
 7824   format %{ "ldrd   $tmp, $mem\n\t"
 7825             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7826             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7827             "mov    $dst, $tmp\t# vector (1D)" %}
 7828   ins_encode %{
 7829     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7830     loadStore(masm, &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 7831               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 7832     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7833     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7834     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7835   %}
 7836 
 7837   ins_pipe(pipe_class_default);
 7838 %}
 7839 
 7840 // ============================================================================
 7841 // VerifyVectorAlignment Instruction
 7842 
 7843 instruct verify_vector_alignment(iRegP addr, immL_positive_bitmaskI mask, rFlagsReg cr) %{
 7844   match(Set addr (VerifyVectorAlignment addr mask));
 7845   effect(KILL cr);
 7846   format %{ "verify_vector_alignment $addr $mask \t! verify alignment" %}
 7847   ins_encode %{
 7848     Label Lskip;
 7849     // check if masked bits of addr are zero
 7850     __ tst($addr$$Register, $mask$$constant);
 7851     __ br(Assembler::EQ, Lskip);
 7852     __ stop("verify_vector_alignment found a misaligned vector memory access");
 7853     __ bind(Lskip);
 7854   %}
 7855   ins_pipe(pipe_slow);
 7856 %}
 7857 
 7858 // ============================================================================
 7859 // MemBar Instruction
 7860 
 7861 instruct load_fence() %{
 7862   match(LoadFence);
 7863   ins_cost(VOLATILE_REF_COST);
 7864 
 7865   format %{ "load_fence" %}
 7866 
 7867   ins_encode %{
 7868     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7869   %}
 7870   ins_pipe(pipe_serial);
 7871 %}
 7872 
 7873 instruct unnecessary_membar_acquire() %{
 7874   predicate(unnecessary_acquire(n));
 7875   match(MemBarAcquire);
 7876   ins_cost(0);
 7877 
 7878   format %{ "membar_acquire (elided)" %}
 7879 
 7880   ins_encode %{
 7881     __ block_comment("membar_acquire (elided)");
 7882   %}
 7883 
 7884   ins_pipe(pipe_class_empty);
 7885 %}
 7886 
 7887 instruct membar_acquire() %{
 7888   match(MemBarAcquire);
 7889   ins_cost(VOLATILE_REF_COST);
 7890 
 7891   format %{ "membar_acquire\n\t"
 7892             "dmb ishld" %}
 7893 
 7894   ins_encode %{
 7895     __ block_comment("membar_acquire");
 7896     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7897   %}
 7898 
 7899   ins_pipe(pipe_serial);
 7900 %}
 7901 
 7902 
 7903 instruct membar_acquire_lock() %{
 7904   match(MemBarAcquireLock);
 7905   ins_cost(VOLATILE_REF_COST);
 7906 
 7907   format %{ "membar_acquire_lock (elided)" %}
 7908 
 7909   ins_encode %{
 7910     __ block_comment("membar_acquire_lock (elided)");
 7911   %}
 7912 
 7913   ins_pipe(pipe_serial);
 7914 %}
 7915 
 7916 instruct store_fence() %{
 7917   match(StoreFence);
 7918   ins_cost(VOLATILE_REF_COST);
 7919 
 7920   format %{ "store_fence" %}
 7921 
 7922   ins_encode %{
 7923     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 7924   %}
 7925   ins_pipe(pipe_serial);
 7926 %}
 7927 
 7928 instruct unnecessary_membar_release() %{
 7929   predicate(unnecessary_release(n));
 7930   match(MemBarRelease);
 7931   ins_cost(0);
 7932 
 7933   format %{ "membar_release (elided)" %}
 7934 
 7935   ins_encode %{
 7936     __ block_comment("membar_release (elided)");
 7937   %}
 7938   ins_pipe(pipe_serial);
 7939 %}
 7940 
 7941 instruct membar_release() %{
 7942   match(MemBarRelease);
 7943   ins_cost(VOLATILE_REF_COST);
 7944 
 7945   format %{ "membar_release\n\t"
 7946             "dmb ishst\n\tdmb ishld" %}
 7947 
 7948   ins_encode %{
 7949     __ block_comment("membar_release");
 7950     // These will be merged if AlwaysMergeDMB is enabled.
 7951     __ membar(Assembler::StoreStore);
 7952     __ membar(Assembler::LoadStore);
 7953   %}
 7954   ins_pipe(pipe_serial);
 7955 %}
 7956 
 7957 instruct membar_storestore() %{
 7958   match(MemBarStoreStore);
 7959   match(StoreStoreFence);
 7960   ins_cost(VOLATILE_REF_COST);
 7961 
 7962   format %{ "MEMBAR-store-store" %}
 7963 
 7964   ins_encode %{
 7965     __ membar(Assembler::StoreStore);
 7966   %}
 7967   ins_pipe(pipe_serial);
 7968 %}
 7969 
 7970 instruct membar_release_lock() %{
 7971   match(MemBarReleaseLock);
 7972   ins_cost(VOLATILE_REF_COST);
 7973 
 7974   format %{ "membar_release_lock (elided)" %}
 7975 
 7976   ins_encode %{
 7977     __ block_comment("membar_release_lock (elided)");
 7978   %}
 7979 
 7980   ins_pipe(pipe_serial);
 7981 %}
 7982 
 7983 instruct unnecessary_membar_volatile() %{
 7984   predicate(unnecessary_volatile(n));
 7985   match(MemBarVolatile);
 7986   ins_cost(0);
 7987 
 7988   format %{ "membar_volatile (elided)" %}
 7989 
 7990   ins_encode %{
 7991     __ block_comment("membar_volatile (elided)");
 7992   %}
 7993 
 7994   ins_pipe(pipe_serial);
 7995 %}
 7996 
 7997 instruct membar_volatile() %{
 7998   match(MemBarVolatile);
 7999   ins_cost(VOLATILE_REF_COST*100);
 8000 
 8001   format %{ "membar_volatile\n\t"
 8002              "dmb ish"%}
 8003 
 8004   ins_encode %{
 8005     __ block_comment("membar_volatile");
 8006     __ membar(Assembler::StoreLoad);
 8007   %}
 8008 
 8009   ins_pipe(pipe_serial);
 8010 %}
 8011 
 8012 // ============================================================================
 8013 // Cast/Convert Instructions
 8014 
 8015 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8016   match(Set dst (CastX2P src));
 8017 
 8018   ins_cost(INSN_COST);
 8019   format %{ "mov $dst, $src\t# long -> ptr" %}
 8020 
 8021   ins_encode %{
 8022     if ($dst$$reg != $src$$reg) {
 8023       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8024     }
 8025   %}
 8026 
 8027   ins_pipe(ialu_reg);
 8028 %}
 8029 
 8030 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8031   match(Set dst (CastP2X src));
 8032 
 8033   ins_cost(INSN_COST);
 8034   format %{ "mov $dst, $src\t# ptr -> long" %}
 8035 
 8036   ins_encode %{
 8037     if ($dst$$reg != $src$$reg) {
 8038       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8039     }
 8040   %}
 8041 
 8042   ins_pipe(ialu_reg);
 8043 %}
 8044 
 8045 // Convert oop into int for vectors alignment masking
 8046 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8047   match(Set dst (ConvL2I (CastP2X src)));
 8048 
 8049   ins_cost(INSN_COST);
 8050   format %{ "movw $dst, $src\t# ptr -> int" %}
 8051   ins_encode %{
 8052     __ movw($dst$$Register, $src$$Register);
 8053   %}
 8054 
 8055   ins_pipe(ialu_reg);
 8056 %}
 8057 
 8058 // Convert compressed oop into int for vectors alignment masking
 8059 // in case of 32bit oops (heap < 4Gb).
 8060 instruct convN2I(iRegINoSp dst, iRegN src)
 8061 %{
 8062   predicate(CompressedOops::shift() == 0);
 8063   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8064 
 8065   ins_cost(INSN_COST);
 8066   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8067   ins_encode %{
 8068     __ movw($dst$$Register, $src$$Register);
 8069   %}
 8070 
 8071   ins_pipe(ialu_reg);
 8072 %}
 8073 
 8074 
 8075 // Convert oop pointer into compressed form
 8076 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8077   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8078   match(Set dst (EncodeP src));
 8079   effect(KILL cr);
 8080   ins_cost(INSN_COST * 3);
 8081   format %{ "encode_heap_oop $dst, $src" %}
 8082   ins_encode %{
 8083     Register s = $src$$Register;
 8084     Register d = $dst$$Register;
 8085     __ encode_heap_oop(d, s);
 8086   %}
 8087   ins_pipe(ialu_reg);
 8088 %}
 8089 
 8090 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8091   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8092   match(Set dst (EncodeP src));
 8093   ins_cost(INSN_COST * 3);
 8094   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8095   ins_encode %{
 8096     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8097   %}
 8098   ins_pipe(ialu_reg);
 8099 %}
 8100 
 8101 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8102   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8103             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8104   match(Set dst (DecodeN src));
 8105   ins_cost(INSN_COST * 3);
 8106   format %{ "decode_heap_oop $dst, $src" %}
 8107   ins_encode %{
 8108     Register s = $src$$Register;
 8109     Register d = $dst$$Register;
 8110     __ decode_heap_oop(d, s);
 8111   %}
 8112   ins_pipe(ialu_reg);
 8113 %}
 8114 
 8115 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8116   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8117             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8118   match(Set dst (DecodeN src));
 8119   ins_cost(INSN_COST * 3);
 8120   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8121   ins_encode %{
 8122     Register s = $src$$Register;
 8123     Register d = $dst$$Register;
 8124     __ decode_heap_oop_not_null(d, s);
 8125   %}
 8126   ins_pipe(ialu_reg);
 8127 %}
 8128 
 8129 // n.b. AArch64 implementations of encode_klass_not_null and
 8130 // decode_klass_not_null do not modify the flags register so, unlike
 8131 // Intel, we don't kill CR as a side effect here
 8132 
 8133 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8134   match(Set dst (EncodePKlass src));
 8135 
 8136   ins_cost(INSN_COST * 3);
 8137   format %{ "encode_klass_not_null $dst,$src" %}
 8138 
 8139   ins_encode %{
 8140     Register src_reg = as_Register($src$$reg);
 8141     Register dst_reg = as_Register($dst$$reg);
 8142     __ encode_klass_not_null(dst_reg, src_reg);
 8143   %}
 8144 
 8145    ins_pipe(ialu_reg);
 8146 %}
 8147 
 8148 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8149   match(Set dst (DecodeNKlass src));
 8150 
 8151   ins_cost(INSN_COST * 3);
 8152   format %{ "decode_klass_not_null $dst,$src" %}
 8153 
 8154   ins_encode %{
 8155     Register src_reg = as_Register($src$$reg);
 8156     Register dst_reg = as_Register($dst$$reg);
 8157     if (dst_reg != src_reg) {
 8158       __ decode_klass_not_null(dst_reg, src_reg);
 8159     } else {
 8160       __ decode_klass_not_null(dst_reg);
 8161     }
 8162   %}
 8163 
 8164    ins_pipe(ialu_reg);
 8165 %}
 8166 
 8167 instruct checkCastPP(iRegPNoSp dst)
 8168 %{
 8169   match(Set dst (CheckCastPP dst));
 8170 
 8171   size(0);
 8172   format %{ "# checkcastPP of $dst" %}
 8173   ins_encode(/* empty encoding */);
 8174   ins_pipe(pipe_class_empty);
 8175 %}
 8176 
 8177 instruct castPP(iRegPNoSp dst)
 8178 %{
 8179   match(Set dst (CastPP dst));
 8180 
 8181   size(0);
 8182   format %{ "# castPP of $dst" %}
 8183   ins_encode(/* empty encoding */);
 8184   ins_pipe(pipe_class_empty);
 8185 %}
 8186 
 8187 instruct castII(iRegI dst)
 8188 %{
 8189   predicate(VerifyConstraintCasts == 0);
 8190   match(Set dst (CastII dst));
 8191 
 8192   size(0);
 8193   format %{ "# castII of $dst" %}
 8194   ins_encode(/* empty encoding */);
 8195   ins_cost(0);
 8196   ins_pipe(pipe_class_empty);
 8197 %}
 8198 
 8199 instruct castII_checked(iRegI dst, rFlagsReg cr)
 8200 %{
 8201   predicate(VerifyConstraintCasts > 0);
 8202   match(Set dst (CastII dst));
 8203   effect(KILL cr);
 8204 
 8205   format %{ "# castII_checked of $dst" %}
 8206   ins_encode %{
 8207     __ verify_int_in_range(_idx, bottom_type()->is_int(), $dst$$Register, rscratch1);
 8208   %}
 8209   ins_pipe(pipe_slow);
 8210 %}
 8211 
 8212 instruct castLL(iRegL dst)
 8213 %{
 8214   predicate(VerifyConstraintCasts == 0);
 8215   match(Set dst (CastLL dst));
 8216 
 8217   size(0);
 8218   format %{ "# castLL of $dst" %}
 8219   ins_encode(/* empty encoding */);
 8220   ins_cost(0);
 8221   ins_pipe(pipe_class_empty);
 8222 %}
 8223 
 8224 instruct castLL_checked(iRegL dst, rFlagsReg cr)
 8225 %{
 8226   predicate(VerifyConstraintCasts > 0);
 8227   match(Set dst (CastLL dst));
 8228   effect(KILL cr);
 8229 
 8230   format %{ "# castLL_checked of $dst" %}
 8231   ins_encode %{
 8232     __ verify_long_in_range(_idx, bottom_type()->is_long(), $dst$$Register, rscratch1);
 8233   %}
 8234   ins_pipe(pipe_slow);
 8235 %}
 8236 
 8237 instruct castHH(vRegF dst)
 8238 %{
 8239   match(Set dst (CastHH dst));
 8240   size(0);
 8241   format %{ "# castHH of $dst" %}
 8242   ins_encode(/* empty encoding */);
 8243   ins_cost(0);
 8244   ins_pipe(pipe_class_empty);
 8245 %}
 8246 
 8247 instruct castFF(vRegF dst)
 8248 %{
 8249   match(Set dst (CastFF dst));
 8250 
 8251   size(0);
 8252   format %{ "# castFF of $dst" %}
 8253   ins_encode(/* empty encoding */);
 8254   ins_cost(0);
 8255   ins_pipe(pipe_class_empty);
 8256 %}
 8257 
 8258 instruct castDD(vRegD dst)
 8259 %{
 8260   match(Set dst (CastDD dst));
 8261 
 8262   size(0);
 8263   format %{ "# castDD of $dst" %}
 8264   ins_encode(/* empty encoding */);
 8265   ins_cost(0);
 8266   ins_pipe(pipe_class_empty);
 8267 %}
 8268 
 8269 instruct castVV(vReg dst)
 8270 %{
 8271   match(Set dst (CastVV dst));
 8272 
 8273   size(0);
 8274   format %{ "# castVV of $dst" %}
 8275   ins_encode(/* empty encoding */);
 8276   ins_cost(0);
 8277   ins_pipe(pipe_class_empty);
 8278 %}
 8279 
 8280 instruct castVVMask(pRegGov dst)
 8281 %{
 8282   match(Set dst (CastVV dst));
 8283 
 8284   size(0);
 8285   format %{ "# castVV of $dst" %}
 8286   ins_encode(/* empty encoding */);
 8287   ins_cost(0);
 8288   ins_pipe(pipe_class_empty);
 8289 %}
 8290 
 8291 // ============================================================================
 8292 // Atomic operation instructions
 8293 //
 8294 
 8295 // standard CompareAndSwapX when we are using barriers
 8296 // these have higher priority than the rules selected by a predicate
 8297 
 8298 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8299 // can't match them
 8300 
 8301 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8302 
 8303   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8304   ins_cost(2 * VOLATILE_REF_COST);
 8305 
 8306   effect(KILL cr);
 8307 
 8308   format %{
 8309     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8310     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8311   %}
 8312 
 8313   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8314             aarch64_enc_cset_eq(res));
 8315 
 8316   ins_pipe(pipe_slow);
 8317 %}
 8318 
 8319 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8320 
 8321   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8322   ins_cost(2 * VOLATILE_REF_COST);
 8323 
 8324   effect(KILL cr);
 8325 
 8326   format %{
 8327     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8328     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8329   %}
 8330 
 8331   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8332             aarch64_enc_cset_eq(res));
 8333 
 8334   ins_pipe(pipe_slow);
 8335 %}
 8336 
 8337 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8338 
 8339   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8340   ins_cost(2 * VOLATILE_REF_COST);
 8341 
 8342   effect(KILL cr);
 8343 
 8344  format %{
 8345     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8346     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8347  %}
 8348 
 8349  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8350             aarch64_enc_cset_eq(res));
 8351 
 8352   ins_pipe(pipe_slow);
 8353 %}
 8354 
 8355 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8356 
 8357   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8358   ins_cost(2 * VOLATILE_REF_COST);
 8359 
 8360   effect(KILL cr);
 8361 
 8362  format %{
 8363     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8364     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8365  %}
 8366 
 8367  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8368             aarch64_enc_cset_eq(res));
 8369 
 8370   ins_pipe(pipe_slow);
 8371 %}
 8372 
 8373 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8374 
 8375   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8376   predicate(n->as_LoadStore()->barrier_data() == 0);
 8377   ins_cost(2 * VOLATILE_REF_COST);
 8378 
 8379   effect(KILL cr);
 8380 
 8381  format %{
 8382     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8383     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8384  %}
 8385 
 8386  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8387             aarch64_enc_cset_eq(res));
 8388 
 8389   ins_pipe(pipe_slow);
 8390 %}
 8391 
 8392 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8393 
 8394   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8395   predicate(n->as_LoadStore()->barrier_data() == 0);
 8396   ins_cost(2 * VOLATILE_REF_COST);
 8397 
 8398   effect(KILL cr);
 8399 
 8400  format %{
 8401     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8402     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8403  %}
 8404 
 8405  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8406             aarch64_enc_cset_eq(res));
 8407 
 8408   ins_pipe(pipe_slow);
 8409 %}
 8410 
 8411 // alternative CompareAndSwapX when we are eliding barriers
 8412 
 8413 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8414 
 8415   predicate(needs_acquiring_load_exclusive(n));
 8416   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8417   ins_cost(VOLATILE_REF_COST);
 8418 
 8419   effect(KILL cr);
 8420 
 8421   format %{
 8422     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8423     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8424   %}
 8425 
 8426   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8427             aarch64_enc_cset_eq(res));
 8428 
 8429   ins_pipe(pipe_slow);
 8430 %}
 8431 
 8432 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8433 
 8434   predicate(needs_acquiring_load_exclusive(n));
 8435   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8436   ins_cost(VOLATILE_REF_COST);
 8437 
 8438   effect(KILL cr);
 8439 
 8440   format %{
 8441     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8442     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8443   %}
 8444 
 8445   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8446             aarch64_enc_cset_eq(res));
 8447 
 8448   ins_pipe(pipe_slow);
 8449 %}
 8450 
 8451 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8452 
 8453   predicate(needs_acquiring_load_exclusive(n));
 8454   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8455   ins_cost(VOLATILE_REF_COST);
 8456 
 8457   effect(KILL cr);
 8458 
 8459  format %{
 8460     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8461     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8462  %}
 8463 
 8464  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8465             aarch64_enc_cset_eq(res));
 8466 
 8467   ins_pipe(pipe_slow);
 8468 %}
 8469 
 8470 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8471 
 8472   predicate(needs_acquiring_load_exclusive(n));
 8473   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8474   ins_cost(VOLATILE_REF_COST);
 8475 
 8476   effect(KILL cr);
 8477 
 8478  format %{
 8479     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8480     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8481  %}
 8482 
 8483  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8484             aarch64_enc_cset_eq(res));
 8485 
 8486   ins_pipe(pipe_slow);
 8487 %}
 8488 
 8489 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8490 
 8491   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8492   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8493   ins_cost(VOLATILE_REF_COST);
 8494 
 8495   effect(KILL cr);
 8496 
 8497  format %{
 8498     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8499     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8500  %}
 8501 
 8502  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8503             aarch64_enc_cset_eq(res));
 8504 
 8505   ins_pipe(pipe_slow);
 8506 %}
 8507 
 8508 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8509 
 8510   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8511   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8512   ins_cost(VOLATILE_REF_COST);
 8513 
 8514   effect(KILL cr);
 8515 
 8516  format %{
 8517     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8518     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8519  %}
 8520 
 8521  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8522             aarch64_enc_cset_eq(res));
 8523 
 8524   ins_pipe(pipe_slow);
 8525 %}
 8526 
 8527 
 8528 // ---------------------------------------------------------------------
 8529 
 8530 // BEGIN This section of the file is automatically generated. Do not edit --------------
 8531 
 8532 // Sundry CAS operations.  Note that release is always true,
 8533 // regardless of the memory ordering of the CAS.  This is because we
 8534 // need the volatile case to be sequentially consistent but there is
 8535 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 8536 // can't check the type of memory ordering here, so we always emit a
 8537 // STLXR.
 8538 
 8539 // This section is generated from cas.m4
 8540 
 8541 
 8542 // This pattern is generated automatically from cas.m4.
 8543 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8544 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8545   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8546   ins_cost(2 * VOLATILE_REF_COST);
 8547   effect(TEMP_DEF res, KILL cr);
 8548   format %{
 8549     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8550   %}
 8551   ins_encode %{
 8552     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8553                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8554                /*weak*/ false, $res$$Register);
 8555     __ sxtbw($res$$Register, $res$$Register);
 8556   %}
 8557   ins_pipe(pipe_slow);
 8558 %}
 8559 
 8560 // This pattern is generated automatically from cas.m4.
 8561 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8562 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8563   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8564   ins_cost(2 * VOLATILE_REF_COST);
 8565   effect(TEMP_DEF res, KILL cr);
 8566   format %{
 8567     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8568   %}
 8569   ins_encode %{
 8570     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8571                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8572                /*weak*/ false, $res$$Register);
 8573     __ sxthw($res$$Register, $res$$Register);
 8574   %}
 8575   ins_pipe(pipe_slow);
 8576 %}
 8577 
 8578 // This pattern is generated automatically from cas.m4.
 8579 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8580 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8581   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8582   ins_cost(2 * VOLATILE_REF_COST);
 8583   effect(TEMP_DEF res, KILL cr);
 8584   format %{
 8585     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8586   %}
 8587   ins_encode %{
 8588     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8589                Assembler::word, /*acquire*/ false, /*release*/ true,
 8590                /*weak*/ false, $res$$Register);
 8591   %}
 8592   ins_pipe(pipe_slow);
 8593 %}
 8594 
 8595 // This pattern is generated automatically from cas.m4.
 8596 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8597 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8598   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8599   ins_cost(2 * VOLATILE_REF_COST);
 8600   effect(TEMP_DEF res, KILL cr);
 8601   format %{
 8602     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8603   %}
 8604   ins_encode %{
 8605     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8606                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8607                /*weak*/ false, $res$$Register);
 8608   %}
 8609   ins_pipe(pipe_slow);
 8610 %}
 8611 
 8612 // This pattern is generated automatically from cas.m4.
 8613 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8614 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8615   predicate(n->as_LoadStore()->barrier_data() == 0);
 8616   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8617   ins_cost(2 * VOLATILE_REF_COST);
 8618   effect(TEMP_DEF res, KILL cr);
 8619   format %{
 8620     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8621   %}
 8622   ins_encode %{
 8623     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8624                Assembler::word, /*acquire*/ false, /*release*/ true,
 8625                /*weak*/ false, $res$$Register);
 8626   %}
 8627   ins_pipe(pipe_slow);
 8628 %}
 8629 
 8630 // This pattern is generated automatically from cas.m4.
 8631 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8632 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8633   predicate(n->as_LoadStore()->barrier_data() == 0);
 8634   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8635   ins_cost(2 * VOLATILE_REF_COST);
 8636   effect(TEMP_DEF res, KILL cr);
 8637   format %{
 8638     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8639   %}
 8640   ins_encode %{
 8641     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8642                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8643                /*weak*/ false, $res$$Register);
 8644   %}
 8645   ins_pipe(pipe_slow);
 8646 %}
 8647 
 8648 // This pattern is generated automatically from cas.m4.
 8649 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8650 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8651   predicate(needs_acquiring_load_exclusive(n));
 8652   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8653   ins_cost(VOLATILE_REF_COST);
 8654   effect(TEMP_DEF res, KILL cr);
 8655   format %{
 8656     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8657   %}
 8658   ins_encode %{
 8659     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8660                Assembler::byte, /*acquire*/ true, /*release*/ true,
 8661                /*weak*/ false, $res$$Register);
 8662     __ sxtbw($res$$Register, $res$$Register);
 8663   %}
 8664   ins_pipe(pipe_slow);
 8665 %}
 8666 
 8667 // This pattern is generated automatically from cas.m4.
 8668 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8669 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8670   predicate(needs_acquiring_load_exclusive(n));
 8671   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8672   ins_cost(VOLATILE_REF_COST);
 8673   effect(TEMP_DEF res, KILL cr);
 8674   format %{
 8675     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8676   %}
 8677   ins_encode %{
 8678     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8679                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 8680                /*weak*/ false, $res$$Register);
 8681     __ sxthw($res$$Register, $res$$Register);
 8682   %}
 8683   ins_pipe(pipe_slow);
 8684 %}
 8685 
 8686 // This pattern is generated automatically from cas.m4.
 8687 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8688 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8689   predicate(needs_acquiring_load_exclusive(n));
 8690   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8691   ins_cost(VOLATILE_REF_COST);
 8692   effect(TEMP_DEF res, KILL cr);
 8693   format %{
 8694     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8695   %}
 8696   ins_encode %{
 8697     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8698                Assembler::word, /*acquire*/ true, /*release*/ true,
 8699                /*weak*/ false, $res$$Register);
 8700   %}
 8701   ins_pipe(pipe_slow);
 8702 %}
 8703 
 8704 // This pattern is generated automatically from cas.m4.
 8705 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8706 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8707   predicate(needs_acquiring_load_exclusive(n));
 8708   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8709   ins_cost(VOLATILE_REF_COST);
 8710   effect(TEMP_DEF res, KILL cr);
 8711   format %{
 8712     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8713   %}
 8714   ins_encode %{
 8715     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8716                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8717                /*weak*/ false, $res$$Register);
 8718   %}
 8719   ins_pipe(pipe_slow);
 8720 %}
 8721 
 8722 // This pattern is generated automatically from cas.m4.
 8723 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8724 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8725   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8726   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8727   ins_cost(VOLATILE_REF_COST);
 8728   effect(TEMP_DEF res, KILL cr);
 8729   format %{
 8730     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8731   %}
 8732   ins_encode %{
 8733     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8734                Assembler::word, /*acquire*/ true, /*release*/ true,
 8735                /*weak*/ false, $res$$Register);
 8736   %}
 8737   ins_pipe(pipe_slow);
 8738 %}
 8739 
 8740 // This pattern is generated automatically from cas.m4.
 8741 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8742 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8743   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8744   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8745   ins_cost(VOLATILE_REF_COST);
 8746   effect(TEMP_DEF res, KILL cr);
 8747   format %{
 8748     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8749   %}
 8750   ins_encode %{
 8751     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8752                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8753                /*weak*/ false, $res$$Register);
 8754   %}
 8755   ins_pipe(pipe_slow);
 8756 %}
 8757 
 8758 // This pattern is generated automatically from cas.m4.
 8759 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8760 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8761   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8762   ins_cost(2 * VOLATILE_REF_COST);
 8763   effect(KILL cr);
 8764   format %{
 8765     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8766     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8767   %}
 8768   ins_encode %{
 8769     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8770                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8771                /*weak*/ true, noreg);
 8772     __ csetw($res$$Register, Assembler::EQ);
 8773   %}
 8774   ins_pipe(pipe_slow);
 8775 %}
 8776 
 8777 // This pattern is generated automatically from cas.m4.
 8778 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8779 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8780   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 8781   ins_cost(2 * VOLATILE_REF_COST);
 8782   effect(KILL cr);
 8783   format %{
 8784     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8785     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8786   %}
 8787   ins_encode %{
 8788     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8789                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8790                /*weak*/ true, noreg);
 8791     __ csetw($res$$Register, Assembler::EQ);
 8792   %}
 8793   ins_pipe(pipe_slow);
 8794 %}
 8795 
 8796 // This pattern is generated automatically from cas.m4.
 8797 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8798 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8799   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 8800   ins_cost(2 * VOLATILE_REF_COST);
 8801   effect(KILL cr);
 8802   format %{
 8803     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8804     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8805   %}
 8806   ins_encode %{
 8807     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8808                Assembler::word, /*acquire*/ false, /*release*/ true,
 8809                /*weak*/ true, noreg);
 8810     __ csetw($res$$Register, Assembler::EQ);
 8811   %}
 8812   ins_pipe(pipe_slow);
 8813 %}
 8814 
 8815 // This pattern is generated automatically from cas.m4.
 8816 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8817 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8818   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 8819   ins_cost(2 * VOLATILE_REF_COST);
 8820   effect(KILL cr);
 8821   format %{
 8822     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8823     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8824   %}
 8825   ins_encode %{
 8826     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8827                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8828                /*weak*/ true, noreg);
 8829     __ csetw($res$$Register, Assembler::EQ);
 8830   %}
 8831   ins_pipe(pipe_slow);
 8832 %}
 8833 
 8834 // This pattern is generated automatically from cas.m4.
 8835 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8836 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8837   predicate(n->as_LoadStore()->barrier_data() == 0);
 8838   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 8839   ins_cost(2 * VOLATILE_REF_COST);
 8840   effect(KILL cr);
 8841   format %{
 8842     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8843     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8844   %}
 8845   ins_encode %{
 8846     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8847                Assembler::word, /*acquire*/ false, /*release*/ true,
 8848                /*weak*/ true, noreg);
 8849     __ csetw($res$$Register, Assembler::EQ);
 8850   %}
 8851   ins_pipe(pipe_slow);
 8852 %}
 8853 
 8854 // This pattern is generated automatically from cas.m4.
 8855 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8856 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8857   predicate(n->as_LoadStore()->barrier_data() == 0);
 8858   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 8859   ins_cost(2 * VOLATILE_REF_COST);
 8860   effect(KILL cr);
 8861   format %{
 8862     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8863     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8864   %}
 8865   ins_encode %{
 8866     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8867                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8868                /*weak*/ true, noreg);
 8869     __ csetw($res$$Register, Assembler::EQ);
 8870   %}
 8871   ins_pipe(pipe_slow);
 8872 %}
 8873 
 8874 // This pattern is generated automatically from cas.m4.
 8875 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8876 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8877   predicate(needs_acquiring_load_exclusive(n));
 8878   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8879   ins_cost(VOLATILE_REF_COST);
 8880   effect(KILL cr);
 8881   format %{
 8882     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8883     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8884   %}
 8885   ins_encode %{
 8886     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8887                Assembler::byte, /*acquire*/ true, /*release*/ true,
 8888                /*weak*/ true, noreg);
 8889     __ csetw($res$$Register, Assembler::EQ);
 8890   %}
 8891   ins_pipe(pipe_slow);
 8892 %}
 8893 
 8894 // This pattern is generated automatically from cas.m4.
 8895 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8896 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8897   predicate(needs_acquiring_load_exclusive(n));
 8898   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 8899   ins_cost(VOLATILE_REF_COST);
 8900   effect(KILL cr);
 8901   format %{
 8902     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8903     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8904   %}
 8905   ins_encode %{
 8906     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8907                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 8908                /*weak*/ true, noreg);
 8909     __ csetw($res$$Register, Assembler::EQ);
 8910   %}
 8911   ins_pipe(pipe_slow);
 8912 %}
 8913 
 8914 // This pattern is generated automatically from cas.m4.
 8915 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8916 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8917   predicate(needs_acquiring_load_exclusive(n));
 8918   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 8919   ins_cost(VOLATILE_REF_COST);
 8920   effect(KILL cr);
 8921   format %{
 8922     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8923     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8924   %}
 8925   ins_encode %{
 8926     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8927                Assembler::word, /*acquire*/ true, /*release*/ true,
 8928                /*weak*/ true, noreg);
 8929     __ csetw($res$$Register, Assembler::EQ);
 8930   %}
 8931   ins_pipe(pipe_slow);
 8932 %}
 8933 
 8934 // This pattern is generated automatically from cas.m4.
 8935 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8936 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8937   predicate(needs_acquiring_load_exclusive(n));
 8938   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 8939   ins_cost(VOLATILE_REF_COST);
 8940   effect(KILL cr);
 8941   format %{
 8942     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8943     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8944   %}
 8945   ins_encode %{
 8946     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8947                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8948                /*weak*/ true, noreg);
 8949     __ csetw($res$$Register, Assembler::EQ);
 8950   %}
 8951   ins_pipe(pipe_slow);
 8952 %}
 8953 
 8954 // This pattern is generated automatically from cas.m4.
 8955 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8956 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8957   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8958   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 8959   ins_cost(VOLATILE_REF_COST);
 8960   effect(KILL cr);
 8961   format %{
 8962     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8963     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8964   %}
 8965   ins_encode %{
 8966     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8967                Assembler::word, /*acquire*/ true, /*release*/ true,
 8968                /*weak*/ true, noreg);
 8969     __ csetw($res$$Register, Assembler::EQ);
 8970   %}
 8971   ins_pipe(pipe_slow);
 8972 %}
 8973 
 8974 // This pattern is generated automatically from cas.m4.
 8975 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8976 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8977   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8978   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 8979   ins_cost(VOLATILE_REF_COST);
 8980   effect(KILL cr);
 8981   format %{
 8982     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8983     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8984   %}
 8985   ins_encode %{
 8986     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8987                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8988                /*weak*/ true, noreg);
 8989     __ csetw($res$$Register, Assembler::EQ);
 8990   %}
 8991   ins_pipe(pipe_slow);
 8992 %}
 8993 
 8994 // END This section of the file is automatically generated. Do not edit --------------
 8995 // ---------------------------------------------------------------------
 8996 
 8997 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 8998   match(Set prev (GetAndSetI mem newv));
 8999   ins_cost(2 * VOLATILE_REF_COST);
 9000   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9001   ins_encode %{
 9002     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9003   %}
 9004   ins_pipe(pipe_serial);
 9005 %}
 9006 
 9007 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9008   match(Set prev (GetAndSetL mem newv));
 9009   ins_cost(2 * VOLATILE_REF_COST);
 9010   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9011   ins_encode %{
 9012     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9013   %}
 9014   ins_pipe(pipe_serial);
 9015 %}
 9016 
 9017 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9018   predicate(n->as_LoadStore()->barrier_data() == 0);
 9019   match(Set prev (GetAndSetN mem newv));
 9020   ins_cost(2 * VOLATILE_REF_COST);
 9021   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9022   ins_encode %{
 9023     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9024   %}
 9025   ins_pipe(pipe_serial);
 9026 %}
 9027 
 9028 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9029   predicate(n->as_LoadStore()->barrier_data() == 0);
 9030   match(Set prev (GetAndSetP mem newv));
 9031   ins_cost(2 * VOLATILE_REF_COST);
 9032   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9033   ins_encode %{
 9034     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9035   %}
 9036   ins_pipe(pipe_serial);
 9037 %}
 9038 
 9039 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9040   predicate(needs_acquiring_load_exclusive(n));
 9041   match(Set prev (GetAndSetI mem newv));
 9042   ins_cost(VOLATILE_REF_COST);
 9043   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9044   ins_encode %{
 9045     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9046   %}
 9047   ins_pipe(pipe_serial);
 9048 %}
 9049 
 9050 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9051   predicate(needs_acquiring_load_exclusive(n));
 9052   match(Set prev (GetAndSetL mem newv));
 9053   ins_cost(VOLATILE_REF_COST);
 9054   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9055   ins_encode %{
 9056     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9057   %}
 9058   ins_pipe(pipe_serial);
 9059 %}
 9060 
 9061 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9062   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 9063   match(Set prev (GetAndSetN mem newv));
 9064   ins_cost(VOLATILE_REF_COST);
 9065   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9066   ins_encode %{
 9067     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9068   %}
 9069   ins_pipe(pipe_serial);
 9070 %}
 9071 
 9072 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9073   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9074   match(Set prev (GetAndSetP mem newv));
 9075   ins_cost(VOLATILE_REF_COST);
 9076   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9077   ins_encode %{
 9078     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9079   %}
 9080   ins_pipe(pipe_serial);
 9081 %}
 9082 
 9083 
 9084 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9085   match(Set newval (GetAndAddL mem incr));
 9086   ins_cost(2 * VOLATILE_REF_COST + 1);
 9087   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9088   ins_encode %{
 9089     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9090   %}
 9091   ins_pipe(pipe_serial);
 9092 %}
 9093 
 9094 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9095   predicate(n->as_LoadStore()->result_not_used());
 9096   match(Set dummy (GetAndAddL mem incr));
 9097   ins_cost(2 * VOLATILE_REF_COST);
 9098   format %{ "get_and_addL [$mem], $incr" %}
 9099   ins_encode %{
 9100     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9101   %}
 9102   ins_pipe(pipe_serial);
 9103 %}
 9104 
 9105 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9106   match(Set newval (GetAndAddL mem incr));
 9107   ins_cost(2 * VOLATILE_REF_COST + 1);
 9108   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9109   ins_encode %{
 9110     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9111   %}
 9112   ins_pipe(pipe_serial);
 9113 %}
 9114 
 9115 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9116   predicate(n->as_LoadStore()->result_not_used());
 9117   match(Set dummy (GetAndAddL mem incr));
 9118   ins_cost(2 * VOLATILE_REF_COST);
 9119   format %{ "get_and_addL [$mem], $incr" %}
 9120   ins_encode %{
 9121     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9122   %}
 9123   ins_pipe(pipe_serial);
 9124 %}
 9125 
 9126 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9127   match(Set newval (GetAndAddI mem incr));
 9128   ins_cost(2 * VOLATILE_REF_COST + 1);
 9129   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9130   ins_encode %{
 9131     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9132   %}
 9133   ins_pipe(pipe_serial);
 9134 %}
 9135 
 9136 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9137   predicate(n->as_LoadStore()->result_not_used());
 9138   match(Set dummy (GetAndAddI mem incr));
 9139   ins_cost(2 * VOLATILE_REF_COST);
 9140   format %{ "get_and_addI [$mem], $incr" %}
 9141   ins_encode %{
 9142     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9143   %}
 9144   ins_pipe(pipe_serial);
 9145 %}
 9146 
 9147 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9148   match(Set newval (GetAndAddI mem incr));
 9149   ins_cost(2 * VOLATILE_REF_COST + 1);
 9150   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9151   ins_encode %{
 9152     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9153   %}
 9154   ins_pipe(pipe_serial);
 9155 %}
 9156 
 9157 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9158   predicate(n->as_LoadStore()->result_not_used());
 9159   match(Set dummy (GetAndAddI mem incr));
 9160   ins_cost(2 * VOLATILE_REF_COST);
 9161   format %{ "get_and_addI [$mem], $incr" %}
 9162   ins_encode %{
 9163     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9164   %}
 9165   ins_pipe(pipe_serial);
 9166 %}
 9167 
 9168 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9169   predicate(needs_acquiring_load_exclusive(n));
 9170   match(Set newval (GetAndAddL mem incr));
 9171   ins_cost(VOLATILE_REF_COST + 1);
 9172   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9173   ins_encode %{
 9174     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9175   %}
 9176   ins_pipe(pipe_serial);
 9177 %}
 9178 
 9179 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9180   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9181   match(Set dummy (GetAndAddL mem incr));
 9182   ins_cost(VOLATILE_REF_COST);
 9183   format %{ "get_and_addL_acq [$mem], $incr" %}
 9184   ins_encode %{
 9185     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9186   %}
 9187   ins_pipe(pipe_serial);
 9188 %}
 9189 
 9190 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9191   predicate(needs_acquiring_load_exclusive(n));
 9192   match(Set newval (GetAndAddL mem incr));
 9193   ins_cost(VOLATILE_REF_COST + 1);
 9194   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9195   ins_encode %{
 9196     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9197   %}
 9198   ins_pipe(pipe_serial);
 9199 %}
 9200 
 9201 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9202   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9203   match(Set dummy (GetAndAddL mem incr));
 9204   ins_cost(VOLATILE_REF_COST);
 9205   format %{ "get_and_addL_acq [$mem], $incr" %}
 9206   ins_encode %{
 9207     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9208   %}
 9209   ins_pipe(pipe_serial);
 9210 %}
 9211 
 9212 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9213   predicate(needs_acquiring_load_exclusive(n));
 9214   match(Set newval (GetAndAddI mem incr));
 9215   ins_cost(VOLATILE_REF_COST + 1);
 9216   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9217   ins_encode %{
 9218     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9219   %}
 9220   ins_pipe(pipe_serial);
 9221 %}
 9222 
 9223 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9224   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9225   match(Set dummy (GetAndAddI mem incr));
 9226   ins_cost(VOLATILE_REF_COST);
 9227   format %{ "get_and_addI_acq [$mem], $incr" %}
 9228   ins_encode %{
 9229     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9230   %}
 9231   ins_pipe(pipe_serial);
 9232 %}
 9233 
 9234 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9235   predicate(needs_acquiring_load_exclusive(n));
 9236   match(Set newval (GetAndAddI mem incr));
 9237   ins_cost(VOLATILE_REF_COST + 1);
 9238   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9239   ins_encode %{
 9240     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9241   %}
 9242   ins_pipe(pipe_serial);
 9243 %}
 9244 
 9245 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9246   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9247   match(Set dummy (GetAndAddI mem incr));
 9248   ins_cost(VOLATILE_REF_COST);
 9249   format %{ "get_and_addI_acq [$mem], $incr" %}
 9250   ins_encode %{
 9251     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9252   %}
 9253   ins_pipe(pipe_serial);
 9254 %}
 9255 
 9256 // Manifest a CmpU result in an integer register.
 9257 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9258 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 9259 %{
 9260   match(Set dst (CmpU3 src1 src2));
 9261   effect(KILL flags);
 9262 
 9263   ins_cost(INSN_COST * 3);
 9264   format %{
 9265       "cmpw $src1, $src2\n\t"
 9266       "csetw $dst, ne\n\t"
 9267       "cnegw $dst, lo\t# CmpU3(reg)"
 9268   %}
 9269   ins_encode %{
 9270     __ cmpw($src1$$Register, $src2$$Register);
 9271     __ csetw($dst$$Register, Assembler::NE);
 9272     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9273   %}
 9274 
 9275   ins_pipe(pipe_class_default);
 9276 %}
 9277 
 9278 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 9279 %{
 9280   match(Set dst (CmpU3 src1 src2));
 9281   effect(KILL flags);
 9282 
 9283   ins_cost(INSN_COST * 3);
 9284   format %{
 9285       "subsw zr, $src1, $src2\n\t"
 9286       "csetw $dst, ne\n\t"
 9287       "cnegw $dst, lo\t# CmpU3(imm)"
 9288   %}
 9289   ins_encode %{
 9290     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 9291     __ csetw($dst$$Register, Assembler::NE);
 9292     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9293   %}
 9294 
 9295   ins_pipe(pipe_class_default);
 9296 %}
 9297 
 9298 // Manifest a CmpUL result in an integer register.
 9299 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9300 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9301 %{
 9302   match(Set dst (CmpUL3 src1 src2));
 9303   effect(KILL flags);
 9304 
 9305   ins_cost(INSN_COST * 3);
 9306   format %{
 9307       "cmp $src1, $src2\n\t"
 9308       "csetw $dst, ne\n\t"
 9309       "cnegw $dst, lo\t# CmpUL3(reg)"
 9310   %}
 9311   ins_encode %{
 9312     __ cmp($src1$$Register, $src2$$Register);
 9313     __ csetw($dst$$Register, Assembler::NE);
 9314     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9315   %}
 9316 
 9317   ins_pipe(pipe_class_default);
 9318 %}
 9319 
 9320 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9321 %{
 9322   match(Set dst (CmpUL3 src1 src2));
 9323   effect(KILL flags);
 9324 
 9325   ins_cost(INSN_COST * 3);
 9326   format %{
 9327       "subs zr, $src1, $src2\n\t"
 9328       "csetw $dst, ne\n\t"
 9329       "cnegw $dst, lo\t# CmpUL3(imm)"
 9330   %}
 9331   ins_encode %{
 9332     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9333     __ csetw($dst$$Register, Assembler::NE);
 9334     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9335   %}
 9336 
 9337   ins_pipe(pipe_class_default);
 9338 %}
 9339 
 9340 // Manifest a CmpL result in an integer register.
 9341 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9342 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9343 %{
 9344   match(Set dst (CmpL3 src1 src2));
 9345   effect(KILL flags);
 9346 
 9347   ins_cost(INSN_COST * 3);
 9348   format %{
 9349       "cmp $src1, $src2\n\t"
 9350       "csetw $dst, ne\n\t"
 9351       "cnegw $dst, lt\t# CmpL3(reg)"
 9352   %}
 9353   ins_encode %{
 9354     __ cmp($src1$$Register, $src2$$Register);
 9355     __ csetw($dst$$Register, Assembler::NE);
 9356     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9357   %}
 9358 
 9359   ins_pipe(pipe_class_default);
 9360 %}
 9361 
 9362 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9363 %{
 9364   match(Set dst (CmpL3 src1 src2));
 9365   effect(KILL flags);
 9366 
 9367   ins_cost(INSN_COST * 3);
 9368   format %{
 9369       "subs zr, $src1, $src2\n\t"
 9370       "csetw $dst, ne\n\t"
 9371       "cnegw $dst, lt\t# CmpL3(imm)"
 9372   %}
 9373   ins_encode %{
 9374     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9375     __ csetw($dst$$Register, Assembler::NE);
 9376     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9377   %}
 9378 
 9379   ins_pipe(pipe_class_default);
 9380 %}
 9381 
 9382 // ============================================================================
 9383 // Conditional Move Instructions
 9384 
 9385 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9386 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9387 // define an op class which merged both inputs and use it to type the
 9388 // argument to a single rule. unfortunatelyt his fails because the
 9389 // opclass does not live up to the COND_INTER interface of its
 9390 // component operands. When the generic code tries to negate the
 9391 // operand it ends up running the generci Machoper::negate method
 9392 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9393 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9394 
 9395 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9396   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9397 
 9398   ins_cost(INSN_COST * 2);
 9399   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9400 
 9401   ins_encode %{
 9402     __ cselw(as_Register($dst$$reg),
 9403              as_Register($src2$$reg),
 9404              as_Register($src1$$reg),
 9405              (Assembler::Condition)$cmp$$cmpcode);
 9406   %}
 9407 
 9408   ins_pipe(icond_reg_reg);
 9409 %}
 9410 
 9411 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9412   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9413 
 9414   ins_cost(INSN_COST * 2);
 9415   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9416 
 9417   ins_encode %{
 9418     __ cselw(as_Register($dst$$reg),
 9419              as_Register($src2$$reg),
 9420              as_Register($src1$$reg),
 9421              (Assembler::Condition)$cmp$$cmpcode);
 9422   %}
 9423 
 9424   ins_pipe(icond_reg_reg);
 9425 %}
 9426 
 9427 // special cases where one arg is zero
 9428 
 9429 // n.b. this is selected in preference to the rule above because it
 9430 // avoids loading constant 0 into a source register
 9431 
 9432 // TODO
 9433 // we ought only to be able to cull one of these variants as the ideal
 9434 // transforms ought always to order the zero consistently (to left/right?)
 9435 
 9436 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9437   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9438 
 9439   ins_cost(INSN_COST * 2);
 9440   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9441 
 9442   ins_encode %{
 9443     __ cselw(as_Register($dst$$reg),
 9444              as_Register($src$$reg),
 9445              zr,
 9446              (Assembler::Condition)$cmp$$cmpcode);
 9447   %}
 9448 
 9449   ins_pipe(icond_reg);
 9450 %}
 9451 
 9452 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9453   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9454 
 9455   ins_cost(INSN_COST * 2);
 9456   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9457 
 9458   ins_encode %{
 9459     __ cselw(as_Register($dst$$reg),
 9460              as_Register($src$$reg),
 9461              zr,
 9462              (Assembler::Condition)$cmp$$cmpcode);
 9463   %}
 9464 
 9465   ins_pipe(icond_reg);
 9466 %}
 9467 
 9468 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9469   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9470 
 9471   ins_cost(INSN_COST * 2);
 9472   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9473 
 9474   ins_encode %{
 9475     __ cselw(as_Register($dst$$reg),
 9476              zr,
 9477              as_Register($src$$reg),
 9478              (Assembler::Condition)$cmp$$cmpcode);
 9479   %}
 9480 
 9481   ins_pipe(icond_reg);
 9482 %}
 9483 
 9484 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9485   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9486 
 9487   ins_cost(INSN_COST * 2);
 9488   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9489 
 9490   ins_encode %{
 9491     __ cselw(as_Register($dst$$reg),
 9492              zr,
 9493              as_Register($src$$reg),
 9494              (Assembler::Condition)$cmp$$cmpcode);
 9495   %}
 9496 
 9497   ins_pipe(icond_reg);
 9498 %}
 9499 
 9500 // special case for creating a boolean 0 or 1
 9501 
 9502 // n.b. this is selected in preference to the rule above because it
 9503 // avoids loading constants 0 and 1 into a source register
 9504 
 9505 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9506   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9507 
 9508   ins_cost(INSN_COST * 2);
 9509   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9510 
 9511   ins_encode %{
 9512     // equivalently
 9513     // cset(as_Register($dst$$reg),
 9514     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9515     __ csincw(as_Register($dst$$reg),
 9516              zr,
 9517              zr,
 9518              (Assembler::Condition)$cmp$$cmpcode);
 9519   %}
 9520 
 9521   ins_pipe(icond_none);
 9522 %}
 9523 
 9524 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9525   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9526 
 9527   ins_cost(INSN_COST * 2);
 9528   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9529 
 9530   ins_encode %{
 9531     // equivalently
 9532     // cset(as_Register($dst$$reg),
 9533     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9534     __ csincw(as_Register($dst$$reg),
 9535              zr,
 9536              zr,
 9537              (Assembler::Condition)$cmp$$cmpcode);
 9538   %}
 9539 
 9540   ins_pipe(icond_none);
 9541 %}
 9542 
 9543 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9544   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9545 
 9546   ins_cost(INSN_COST * 2);
 9547   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9548 
 9549   ins_encode %{
 9550     __ csel(as_Register($dst$$reg),
 9551             as_Register($src2$$reg),
 9552             as_Register($src1$$reg),
 9553             (Assembler::Condition)$cmp$$cmpcode);
 9554   %}
 9555 
 9556   ins_pipe(icond_reg_reg);
 9557 %}
 9558 
 9559 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9560   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9561 
 9562   ins_cost(INSN_COST * 2);
 9563   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9564 
 9565   ins_encode %{
 9566     __ csel(as_Register($dst$$reg),
 9567             as_Register($src2$$reg),
 9568             as_Register($src1$$reg),
 9569             (Assembler::Condition)$cmp$$cmpcode);
 9570   %}
 9571 
 9572   ins_pipe(icond_reg_reg);
 9573 %}
 9574 
 9575 // special cases where one arg is zero
 9576 
 9577 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9578   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9579 
 9580   ins_cost(INSN_COST * 2);
 9581   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 9582 
 9583   ins_encode %{
 9584     __ csel(as_Register($dst$$reg),
 9585             zr,
 9586             as_Register($src$$reg),
 9587             (Assembler::Condition)$cmp$$cmpcode);
 9588   %}
 9589 
 9590   ins_pipe(icond_reg);
 9591 %}
 9592 
 9593 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9594   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9595 
 9596   ins_cost(INSN_COST * 2);
 9597   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 9598 
 9599   ins_encode %{
 9600     __ csel(as_Register($dst$$reg),
 9601             zr,
 9602             as_Register($src$$reg),
 9603             (Assembler::Condition)$cmp$$cmpcode);
 9604   %}
 9605 
 9606   ins_pipe(icond_reg);
 9607 %}
 9608 
 9609 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9610   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9611 
 9612   ins_cost(INSN_COST * 2);
 9613   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 9614 
 9615   ins_encode %{
 9616     __ csel(as_Register($dst$$reg),
 9617             as_Register($src$$reg),
 9618             zr,
 9619             (Assembler::Condition)$cmp$$cmpcode);
 9620   %}
 9621 
 9622   ins_pipe(icond_reg);
 9623 %}
 9624 
 9625 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9626   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9627 
 9628   ins_cost(INSN_COST * 2);
 9629   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 9630 
 9631   ins_encode %{
 9632     __ csel(as_Register($dst$$reg),
 9633             as_Register($src$$reg),
 9634             zr,
 9635             (Assembler::Condition)$cmp$$cmpcode);
 9636   %}
 9637 
 9638   ins_pipe(icond_reg);
 9639 %}
 9640 
 9641 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9642   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9643 
 9644   ins_cost(INSN_COST * 2);
 9645   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
 9646 
 9647   ins_encode %{
 9648     __ csel(as_Register($dst$$reg),
 9649             as_Register($src2$$reg),
 9650             as_Register($src1$$reg),
 9651             (Assembler::Condition)$cmp$$cmpcode);
 9652   %}
 9653 
 9654   ins_pipe(icond_reg_reg);
 9655 %}
 9656 
 9657 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9658   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9659 
 9660   ins_cost(INSN_COST * 2);
 9661   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
 9662 
 9663   ins_encode %{
 9664     __ csel(as_Register($dst$$reg),
 9665             as_Register($src2$$reg),
 9666             as_Register($src1$$reg),
 9667             (Assembler::Condition)$cmp$$cmpcode);
 9668   %}
 9669 
 9670   ins_pipe(icond_reg_reg);
 9671 %}
 9672 
 9673 // special cases where one arg is zero
 9674 
 9675 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9676   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9677 
 9678   ins_cost(INSN_COST * 2);
 9679   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
 9680 
 9681   ins_encode %{
 9682     __ csel(as_Register($dst$$reg),
 9683             zr,
 9684             as_Register($src$$reg),
 9685             (Assembler::Condition)$cmp$$cmpcode);
 9686   %}
 9687 
 9688   ins_pipe(icond_reg);
 9689 %}
 9690 
 9691 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9692   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9693 
 9694   ins_cost(INSN_COST * 2);
 9695   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
 9696 
 9697   ins_encode %{
 9698     __ csel(as_Register($dst$$reg),
 9699             zr,
 9700             as_Register($src$$reg),
 9701             (Assembler::Condition)$cmp$$cmpcode);
 9702   %}
 9703 
 9704   ins_pipe(icond_reg);
 9705 %}
 9706 
 9707 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9708   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9709 
 9710   ins_cost(INSN_COST * 2);
 9711   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
 9712 
 9713   ins_encode %{
 9714     __ csel(as_Register($dst$$reg),
 9715             as_Register($src$$reg),
 9716             zr,
 9717             (Assembler::Condition)$cmp$$cmpcode);
 9718   %}
 9719 
 9720   ins_pipe(icond_reg);
 9721 %}
 9722 
 9723 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9724   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9725 
 9726   ins_cost(INSN_COST * 2);
 9727   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
 9728 
 9729   ins_encode %{
 9730     __ csel(as_Register($dst$$reg),
 9731             as_Register($src$$reg),
 9732             zr,
 9733             (Assembler::Condition)$cmp$$cmpcode);
 9734   %}
 9735 
 9736   ins_pipe(icond_reg);
 9737 %}
 9738 
 9739 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9740   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9741 
 9742   ins_cost(INSN_COST * 2);
 9743   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9744 
 9745   ins_encode %{
 9746     __ cselw(as_Register($dst$$reg),
 9747              as_Register($src2$$reg),
 9748              as_Register($src1$$reg),
 9749              (Assembler::Condition)$cmp$$cmpcode);
 9750   %}
 9751 
 9752   ins_pipe(icond_reg_reg);
 9753 %}
 9754 
 9755 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9756   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9757 
 9758   ins_cost(INSN_COST * 2);
 9759   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9760 
 9761   ins_encode %{
 9762     __ cselw(as_Register($dst$$reg),
 9763              as_Register($src2$$reg),
 9764              as_Register($src1$$reg),
 9765              (Assembler::Condition)$cmp$$cmpcode);
 9766   %}
 9767 
 9768   ins_pipe(icond_reg_reg);
 9769 %}
 9770 
 9771 // special cases where one arg is zero
 9772 
 9773 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9774   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9775 
 9776   ins_cost(INSN_COST * 2);
 9777   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
 9778 
 9779   ins_encode %{
 9780     __ cselw(as_Register($dst$$reg),
 9781              zr,
 9782              as_Register($src$$reg),
 9783              (Assembler::Condition)$cmp$$cmpcode);
 9784   %}
 9785 
 9786   ins_pipe(icond_reg);
 9787 %}
 9788 
 9789 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9790   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9791 
 9792   ins_cost(INSN_COST * 2);
 9793   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
 9794 
 9795   ins_encode %{
 9796     __ cselw(as_Register($dst$$reg),
 9797              zr,
 9798              as_Register($src$$reg),
 9799              (Assembler::Condition)$cmp$$cmpcode);
 9800   %}
 9801 
 9802   ins_pipe(icond_reg);
 9803 %}
 9804 
 9805 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9806   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9807 
 9808   ins_cost(INSN_COST * 2);
 9809   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
 9810 
 9811   ins_encode %{
 9812     __ cselw(as_Register($dst$$reg),
 9813              as_Register($src$$reg),
 9814              zr,
 9815              (Assembler::Condition)$cmp$$cmpcode);
 9816   %}
 9817 
 9818   ins_pipe(icond_reg);
 9819 %}
 9820 
 9821 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9822   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9823 
 9824   ins_cost(INSN_COST * 2);
 9825   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
 9826 
 9827   ins_encode %{
 9828     __ cselw(as_Register($dst$$reg),
 9829              as_Register($src$$reg),
 9830              zr,
 9831              (Assembler::Condition)$cmp$$cmpcode);
 9832   %}
 9833 
 9834   ins_pipe(icond_reg);
 9835 %}
 9836 
 9837 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
 9838 %{
 9839   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9840 
 9841   ins_cost(INSN_COST * 3);
 9842 
 9843   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9844   ins_encode %{
 9845     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9846     __ fcsels(as_FloatRegister($dst$$reg),
 9847               as_FloatRegister($src2$$reg),
 9848               as_FloatRegister($src1$$reg),
 9849               cond);
 9850   %}
 9851 
 9852   ins_pipe(fp_cond_reg_reg_s);
 9853 %}
 9854 
 9855 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
 9856 %{
 9857   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9858 
 9859   ins_cost(INSN_COST * 3);
 9860 
 9861   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 9862   ins_encode %{
 9863     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9864     __ fcsels(as_FloatRegister($dst$$reg),
 9865               as_FloatRegister($src2$$reg),
 9866               as_FloatRegister($src1$$reg),
 9867               cond);
 9868   %}
 9869 
 9870   ins_pipe(fp_cond_reg_reg_s);
 9871 %}
 9872 
 9873 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
 9874 %{
 9875   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 9876 
 9877   ins_cost(INSN_COST * 3);
 9878 
 9879   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9880   ins_encode %{
 9881     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9882     __ fcseld(as_FloatRegister($dst$$reg),
 9883               as_FloatRegister($src2$$reg),
 9884               as_FloatRegister($src1$$reg),
 9885               cond);
 9886   %}
 9887 
 9888   ins_pipe(fp_cond_reg_reg_d);
 9889 %}
 9890 
 9891 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
 9892 %{
 9893   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 9894 
 9895   ins_cost(INSN_COST * 3);
 9896 
 9897   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 9898   ins_encode %{
 9899     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9900     __ fcseld(as_FloatRegister($dst$$reg),
 9901               as_FloatRegister($src2$$reg),
 9902               as_FloatRegister($src1$$reg),
 9903               cond);
 9904   %}
 9905 
 9906   ins_pipe(fp_cond_reg_reg_d);
 9907 %}
 9908 
 9909 // ============================================================================
 9910 // Arithmetic Instructions
 9911 //
 9912 
 9913 // Integer Addition
 9914 
 9915 // TODO
 9916 // these currently employ operations which do not set CR and hence are
 9917 // not flagged as killing CR but we would like to isolate the cases
 9918 // where we want to set flags from those where we don't. need to work
 9919 // out how to do that.
 9920 
 9921 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9922   match(Set dst (AddI src1 src2));
 9923 
 9924   ins_cost(INSN_COST);
 9925   format %{ "addw  $dst, $src1, $src2" %}
 9926 
 9927   ins_encode %{
 9928     __ addw(as_Register($dst$$reg),
 9929             as_Register($src1$$reg),
 9930             as_Register($src2$$reg));
 9931   %}
 9932 
 9933   ins_pipe(ialu_reg_reg);
 9934 %}
 9935 
 9936 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
 9937   match(Set dst (AddI src1 src2));
 9938 
 9939   ins_cost(INSN_COST);
 9940   format %{ "addw $dst, $src1, $src2" %}
 9941 
 9942   // use opcode to indicate that this is an add not a sub
 9943   opcode(0x0);
 9944 
 9945   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9946 
 9947   ins_pipe(ialu_reg_imm);
 9948 %}
 9949 
 9950 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
 9951   match(Set dst (AddI (ConvL2I src1) src2));
 9952 
 9953   ins_cost(INSN_COST);
 9954   format %{ "addw $dst, $src1, $src2" %}
 9955 
 9956   // use opcode to indicate that this is an add not a sub
 9957   opcode(0x0);
 9958 
 9959   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9960 
 9961   ins_pipe(ialu_reg_imm);
 9962 %}
 9963 
 9964 // Pointer Addition
 9965 instruct addP_reg_reg(iRegPNoSp dst, iRegPorL2P src1, iRegL src2) %{
 9966   match(Set dst (AddP src1 src2));
 9967 
 9968   ins_cost(INSN_COST);
 9969   format %{ "add $dst, $src1, $src2\t# ptr" %}
 9970 
 9971   ins_encode %{
 9972     __ add(as_Register($dst$$reg),
 9973            as_Register($src1$$reg),
 9974            as_Register($src2$$reg));
 9975   %}
 9976 
 9977   ins_pipe(ialu_reg_reg);
 9978 %}
 9979 
 9980 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2) %{
 9981   match(Set dst (AddP src1 (ConvI2L src2)));
 9982 
 9983   ins_cost(1.9 * INSN_COST);
 9984   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
 9985 
 9986   ins_encode %{
 9987     __ add(as_Register($dst$$reg),
 9988            as_Register($src1$$reg),
 9989            as_Register($src2$$reg), ext::sxtw);
 9990   %}
 9991 
 9992   ins_pipe(ialu_reg_reg);
 9993 %}
 9994 
 9995 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegPorL2P src1, iRegL src2, immIScale scale) %{
 9996   match(Set dst (AddP src1 (LShiftL src2 scale)));
 9997 
 9998   ins_cost(1.9 * INSN_COST);
 9999   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10000 
10001   ins_encode %{
10002     __ lea(as_Register($dst$$reg),
10003            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10004                    Address::lsl($scale$$constant)));
10005   %}
10006 
10007   ins_pipe(ialu_reg_reg_shift);
10008 %}
10009 
10010 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2, immIScale scale) %{
10011   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10012 
10013   ins_cost(1.9 * INSN_COST);
10014   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10015 
10016   ins_encode %{
10017     __ lea(as_Register($dst$$reg),
10018            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10019                    Address::sxtw($scale$$constant)));
10020   %}
10021 
10022   ins_pipe(ialu_reg_reg_shift);
10023 %}
10024 
10025 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10026   match(Set dst (LShiftL (ConvI2L src) scale));
10027 
10028   ins_cost(INSN_COST);
10029   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10030 
10031   ins_encode %{
10032     __ sbfiz(as_Register($dst$$reg),
10033           as_Register($src$$reg),
10034           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10035   %}
10036 
10037   ins_pipe(ialu_reg_shift);
10038 %}
10039 
10040 // Pointer Immediate Addition
10041 // n.b. this needs to be more expensive than using an indirect memory
10042 // operand
10043 instruct addP_reg_imm(iRegPNoSp dst, iRegPorL2P src1, immLAddSub src2) %{
10044   match(Set dst (AddP src1 src2));
10045 
10046   ins_cost(INSN_COST);
10047   format %{ "add $dst, $src1, $src2\t# ptr" %}
10048 
10049   // use opcode to indicate that this is an add not a sub
10050   opcode(0x0);
10051 
10052   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10053 
10054   ins_pipe(ialu_reg_imm);
10055 %}
10056 
10057 // Long Addition
10058 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10059 
10060   match(Set dst (AddL src1 src2));
10061 
10062   ins_cost(INSN_COST);
10063   format %{ "add  $dst, $src1, $src2" %}
10064 
10065   ins_encode %{
10066     __ add(as_Register($dst$$reg),
10067            as_Register($src1$$reg),
10068            as_Register($src2$$reg));
10069   %}
10070 
10071   ins_pipe(ialu_reg_reg);
10072 %}
10073 
10074 // No constant pool entries requiredLong Immediate Addition.
10075 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10076   match(Set dst (AddL src1 src2));
10077 
10078   ins_cost(INSN_COST);
10079   format %{ "add $dst, $src1, $src2" %}
10080 
10081   // use opcode to indicate that this is an add not a sub
10082   opcode(0x0);
10083 
10084   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10085 
10086   ins_pipe(ialu_reg_imm);
10087 %}
10088 
10089 // Integer Subtraction
10090 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10091   match(Set dst (SubI src1 src2));
10092 
10093   ins_cost(INSN_COST);
10094   format %{ "subw  $dst, $src1, $src2" %}
10095 
10096   ins_encode %{
10097     __ subw(as_Register($dst$$reg),
10098             as_Register($src1$$reg),
10099             as_Register($src2$$reg));
10100   %}
10101 
10102   ins_pipe(ialu_reg_reg);
10103 %}
10104 
10105 // Immediate Subtraction
10106 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10107   match(Set dst (SubI src1 src2));
10108 
10109   ins_cost(INSN_COST);
10110   format %{ "subw $dst, $src1, $src2" %}
10111 
10112   // use opcode to indicate that this is a sub not an add
10113   opcode(0x1);
10114 
10115   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10116 
10117   ins_pipe(ialu_reg_imm);
10118 %}
10119 
10120 // Long Subtraction
10121 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10122 
10123   match(Set dst (SubL src1 src2));
10124 
10125   ins_cost(INSN_COST);
10126   format %{ "sub  $dst, $src1, $src2" %}
10127 
10128   ins_encode %{
10129     __ sub(as_Register($dst$$reg),
10130            as_Register($src1$$reg),
10131            as_Register($src2$$reg));
10132   %}
10133 
10134   ins_pipe(ialu_reg_reg);
10135 %}
10136 
10137 // No constant pool entries requiredLong Immediate Subtraction.
10138 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10139   match(Set dst (SubL src1 src2));
10140 
10141   ins_cost(INSN_COST);
10142   format %{ "sub$dst, $src1, $src2" %}
10143 
10144   // use opcode to indicate that this is a sub not an add
10145   opcode(0x1);
10146 
10147   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10148 
10149   ins_pipe(ialu_reg_imm);
10150 %}
10151 
10152 // Integer Negation (special case for sub)
10153 
10154 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10155   match(Set dst (SubI zero src));
10156 
10157   ins_cost(INSN_COST);
10158   format %{ "negw $dst, $src\t# int" %}
10159 
10160   ins_encode %{
10161     __ negw(as_Register($dst$$reg),
10162             as_Register($src$$reg));
10163   %}
10164 
10165   ins_pipe(ialu_reg);
10166 %}
10167 
10168 // Long Negation
10169 
10170 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10171   match(Set dst (SubL zero src));
10172 
10173   ins_cost(INSN_COST);
10174   format %{ "neg $dst, $src\t# long" %}
10175 
10176   ins_encode %{
10177     __ neg(as_Register($dst$$reg),
10178            as_Register($src$$reg));
10179   %}
10180 
10181   ins_pipe(ialu_reg);
10182 %}
10183 
10184 // Integer Multiply
10185 
10186 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10187   match(Set dst (MulI src1 src2));
10188 
10189   ins_cost(INSN_COST * 3);
10190   format %{ "mulw  $dst, $src1, $src2" %}
10191 
10192   ins_encode %{
10193     __ mulw(as_Register($dst$$reg),
10194             as_Register($src1$$reg),
10195             as_Register($src2$$reg));
10196   %}
10197 
10198   ins_pipe(imul_reg_reg);
10199 %}
10200 
10201 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10202   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10203 
10204   ins_cost(INSN_COST * 3);
10205   format %{ "smull  $dst, $src1, $src2" %}
10206 
10207   ins_encode %{
10208     __ smull(as_Register($dst$$reg),
10209              as_Register($src1$$reg),
10210              as_Register($src2$$reg));
10211   %}
10212 
10213   ins_pipe(imul_reg_reg);
10214 %}
10215 
10216 // Long Multiply
10217 
10218 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10219   match(Set dst (MulL src1 src2));
10220 
10221   ins_cost(INSN_COST * 5);
10222   format %{ "mul  $dst, $src1, $src2" %}
10223 
10224   ins_encode %{
10225     __ mul(as_Register($dst$$reg),
10226            as_Register($src1$$reg),
10227            as_Register($src2$$reg));
10228   %}
10229 
10230   ins_pipe(lmul_reg_reg);
10231 %}
10232 
10233 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10234 %{
10235   match(Set dst (MulHiL src1 src2));
10236 
10237   ins_cost(INSN_COST * 7);
10238   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
10239 
10240   ins_encode %{
10241     __ smulh(as_Register($dst$$reg),
10242              as_Register($src1$$reg),
10243              as_Register($src2$$reg));
10244   %}
10245 
10246   ins_pipe(lmul_reg_reg);
10247 %}
10248 
10249 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10250 %{
10251   match(Set dst (UMulHiL src1 src2));
10252 
10253   ins_cost(INSN_COST * 7);
10254   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
10255 
10256   ins_encode %{
10257     __ umulh(as_Register($dst$$reg),
10258              as_Register($src1$$reg),
10259              as_Register($src2$$reg));
10260   %}
10261 
10262   ins_pipe(lmul_reg_reg);
10263 %}
10264 
10265 // Combined Integer Multiply & Add/Sub
10266 
10267 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10268   match(Set dst (AddI src3 (MulI src1 src2)));
10269 
10270   ins_cost(INSN_COST * 3);
10271   format %{ "madd  $dst, $src1, $src2, $src3" %}
10272 
10273   ins_encode %{
10274     __ maddw(as_Register($dst$$reg),
10275              as_Register($src1$$reg),
10276              as_Register($src2$$reg),
10277              as_Register($src3$$reg));
10278   %}
10279 
10280   ins_pipe(imac_reg_reg);
10281 %}
10282 
10283 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10284   match(Set dst (SubI src3 (MulI src1 src2)));
10285 
10286   ins_cost(INSN_COST * 3);
10287   format %{ "msub  $dst, $src1, $src2, $src3" %}
10288 
10289   ins_encode %{
10290     __ msubw(as_Register($dst$$reg),
10291              as_Register($src1$$reg),
10292              as_Register($src2$$reg),
10293              as_Register($src3$$reg));
10294   %}
10295 
10296   ins_pipe(imac_reg_reg);
10297 %}
10298 
10299 // Combined Integer Multiply & Neg
10300 
10301 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10302   match(Set dst (MulI (SubI zero src1) src2));
10303 
10304   ins_cost(INSN_COST * 3);
10305   format %{ "mneg  $dst, $src1, $src2" %}
10306 
10307   ins_encode %{
10308     __ mnegw(as_Register($dst$$reg),
10309              as_Register($src1$$reg),
10310              as_Register($src2$$reg));
10311   %}
10312 
10313   ins_pipe(imac_reg_reg);
10314 %}
10315 
10316 // Combined Long Multiply & Add/Sub
10317 
10318 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10319   match(Set dst (AddL src3 (MulL src1 src2)));
10320 
10321   ins_cost(INSN_COST * 5);
10322   format %{ "madd  $dst, $src1, $src2, $src3" %}
10323 
10324   ins_encode %{
10325     __ madd(as_Register($dst$$reg),
10326             as_Register($src1$$reg),
10327             as_Register($src2$$reg),
10328             as_Register($src3$$reg));
10329   %}
10330 
10331   ins_pipe(lmac_reg_reg);
10332 %}
10333 
10334 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10335   match(Set dst (SubL src3 (MulL src1 src2)));
10336 
10337   ins_cost(INSN_COST * 5);
10338   format %{ "msub  $dst, $src1, $src2, $src3" %}
10339 
10340   ins_encode %{
10341     __ msub(as_Register($dst$$reg),
10342             as_Register($src1$$reg),
10343             as_Register($src2$$reg),
10344             as_Register($src3$$reg));
10345   %}
10346 
10347   ins_pipe(lmac_reg_reg);
10348 %}
10349 
10350 // Combined Long Multiply & Neg
10351 
10352 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10353   match(Set dst (MulL (SubL zero src1) src2));
10354 
10355   ins_cost(INSN_COST * 5);
10356   format %{ "mneg  $dst, $src1, $src2" %}
10357 
10358   ins_encode %{
10359     __ mneg(as_Register($dst$$reg),
10360             as_Register($src1$$reg),
10361             as_Register($src2$$reg));
10362   %}
10363 
10364   ins_pipe(lmac_reg_reg);
10365 %}
10366 
10367 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10368 
10369 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10370   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10371 
10372   ins_cost(INSN_COST * 3);
10373   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10374 
10375   ins_encode %{
10376     __ smaddl(as_Register($dst$$reg),
10377               as_Register($src1$$reg),
10378               as_Register($src2$$reg),
10379               as_Register($src3$$reg));
10380   %}
10381 
10382   ins_pipe(imac_reg_reg);
10383 %}
10384 
10385 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10386   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10387 
10388   ins_cost(INSN_COST * 3);
10389   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10390 
10391   ins_encode %{
10392     __ smsubl(as_Register($dst$$reg),
10393               as_Register($src1$$reg),
10394               as_Register($src2$$reg),
10395               as_Register($src3$$reg));
10396   %}
10397 
10398   ins_pipe(imac_reg_reg);
10399 %}
10400 
10401 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10402   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10403 
10404   ins_cost(INSN_COST * 3);
10405   format %{ "smnegl  $dst, $src1, $src2" %}
10406 
10407   ins_encode %{
10408     __ smnegl(as_Register($dst$$reg),
10409               as_Register($src1$$reg),
10410               as_Register($src2$$reg));
10411   %}
10412 
10413   ins_pipe(imac_reg_reg);
10414 %}
10415 
10416 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10417 
10418 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10419   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10420 
10421   ins_cost(INSN_COST * 5);
10422   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10423             "maddw $dst, $src3, $src4, rscratch1" %}
10424 
10425   ins_encode %{
10426     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10427     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10428 
10429   ins_pipe(imac_reg_reg);
10430 %}
10431 
10432 // Integer Divide
10433 
10434 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10435   match(Set dst (DivI src1 src2));
10436 
10437   ins_cost(INSN_COST * 19);
10438   format %{ "sdivw  $dst, $src1, $src2" %}
10439 
10440   ins_encode(aarch64_enc_divw(dst, src1, src2));
10441   ins_pipe(idiv_reg_reg);
10442 %}
10443 
10444 // Long Divide
10445 
10446 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10447   match(Set dst (DivL src1 src2));
10448 
10449   ins_cost(INSN_COST * 35);
10450   format %{ "sdiv   $dst, $src1, $src2" %}
10451 
10452   ins_encode(aarch64_enc_div(dst, src1, src2));
10453   ins_pipe(ldiv_reg_reg);
10454 %}
10455 
10456 // Integer Remainder
10457 
10458 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10459   match(Set dst (ModI src1 src2));
10460 
10461   ins_cost(INSN_COST * 22);
10462   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10463             "msubw  $dst, rscratch1, $src2, $src1" %}
10464 
10465   ins_encode(aarch64_enc_modw(dst, src1, src2));
10466   ins_pipe(idiv_reg_reg);
10467 %}
10468 
10469 // Long Remainder
10470 
10471 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10472   match(Set dst (ModL src1 src2));
10473 
10474   ins_cost(INSN_COST * 38);
10475   format %{ "sdiv   rscratch1, $src1, $src2\n"
10476             "msub   $dst, rscratch1, $src2, $src1" %}
10477 
10478   ins_encode(aarch64_enc_mod(dst, src1, src2));
10479   ins_pipe(ldiv_reg_reg);
10480 %}
10481 
10482 // Unsigned Integer Divide
10483 
10484 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10485   match(Set dst (UDivI src1 src2));
10486 
10487   ins_cost(INSN_COST * 19);
10488   format %{ "udivw  $dst, $src1, $src2" %}
10489 
10490   ins_encode %{
10491     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
10492   %}
10493 
10494   ins_pipe(idiv_reg_reg);
10495 %}
10496 
10497 //  Unsigned Long Divide
10498 
10499 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10500   match(Set dst (UDivL src1 src2));
10501 
10502   ins_cost(INSN_COST * 35);
10503   format %{ "udiv   $dst, $src1, $src2" %}
10504 
10505   ins_encode %{
10506     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
10507   %}
10508 
10509   ins_pipe(ldiv_reg_reg);
10510 %}
10511 
10512 // Unsigned Integer Remainder
10513 
10514 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10515   match(Set dst (UModI src1 src2));
10516 
10517   ins_cost(INSN_COST * 22);
10518   format %{ "udivw  rscratch1, $src1, $src2\n\t"
10519             "msubw  $dst, rscratch1, $src2, $src1" %}
10520 
10521   ins_encode %{
10522     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
10523     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10524   %}
10525 
10526   ins_pipe(idiv_reg_reg);
10527 %}
10528 
10529 // Unsigned Long Remainder
10530 
10531 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10532   match(Set dst (UModL src1 src2));
10533 
10534   ins_cost(INSN_COST * 38);
10535   format %{ "udiv   rscratch1, $src1, $src2\n"
10536             "msub   $dst, rscratch1, $src2, $src1" %}
10537 
10538   ins_encode %{
10539     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
10540     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10541   %}
10542 
10543   ins_pipe(ldiv_reg_reg);
10544 %}
10545 
10546 // Integer Shifts
10547 
10548 // Shift Left Register
10549 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10550   match(Set dst (LShiftI src1 src2));
10551 
10552   ins_cost(INSN_COST * 2);
10553   format %{ "lslvw  $dst, $src1, $src2" %}
10554 
10555   ins_encode %{
10556     __ lslvw(as_Register($dst$$reg),
10557              as_Register($src1$$reg),
10558              as_Register($src2$$reg));
10559   %}
10560 
10561   ins_pipe(ialu_reg_reg_vshift);
10562 %}
10563 
10564 // Shift Left Immediate
10565 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10566   match(Set dst (LShiftI src1 src2));
10567 
10568   ins_cost(INSN_COST);
10569   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10570 
10571   ins_encode %{
10572     __ lslw(as_Register($dst$$reg),
10573             as_Register($src1$$reg),
10574             $src2$$constant & 0x1f);
10575   %}
10576 
10577   ins_pipe(ialu_reg_shift);
10578 %}
10579 
10580 // Shift Right Logical Register
10581 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10582   match(Set dst (URShiftI src1 src2));
10583 
10584   ins_cost(INSN_COST * 2);
10585   format %{ "lsrvw  $dst, $src1, $src2" %}
10586 
10587   ins_encode %{
10588     __ lsrvw(as_Register($dst$$reg),
10589              as_Register($src1$$reg),
10590              as_Register($src2$$reg));
10591   %}
10592 
10593   ins_pipe(ialu_reg_reg_vshift);
10594 %}
10595 
10596 // Shift Right Logical Immediate
10597 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10598   match(Set dst (URShiftI src1 src2));
10599 
10600   ins_cost(INSN_COST);
10601   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10602 
10603   ins_encode %{
10604     __ lsrw(as_Register($dst$$reg),
10605             as_Register($src1$$reg),
10606             $src2$$constant & 0x1f);
10607   %}
10608 
10609   ins_pipe(ialu_reg_shift);
10610 %}
10611 
10612 // Shift Right Arithmetic Register
10613 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10614   match(Set dst (RShiftI src1 src2));
10615 
10616   ins_cost(INSN_COST * 2);
10617   format %{ "asrvw  $dst, $src1, $src2" %}
10618 
10619   ins_encode %{
10620     __ asrvw(as_Register($dst$$reg),
10621              as_Register($src1$$reg),
10622              as_Register($src2$$reg));
10623   %}
10624 
10625   ins_pipe(ialu_reg_reg_vshift);
10626 %}
10627 
10628 // Shift Right Arithmetic Immediate
10629 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10630   match(Set dst (RShiftI src1 src2));
10631 
10632   ins_cost(INSN_COST);
10633   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10634 
10635   ins_encode %{
10636     __ asrw(as_Register($dst$$reg),
10637             as_Register($src1$$reg),
10638             $src2$$constant & 0x1f);
10639   %}
10640 
10641   ins_pipe(ialu_reg_shift);
10642 %}
10643 
10644 // Combined Int Mask and Right Shift (using UBFM)
10645 // TODO
10646 
10647 // Long Shifts
10648 
10649 // Shift Left Register
10650 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10651   match(Set dst (LShiftL src1 src2));
10652 
10653   ins_cost(INSN_COST * 2);
10654   format %{ "lslv  $dst, $src1, $src2" %}
10655 
10656   ins_encode %{
10657     __ lslv(as_Register($dst$$reg),
10658             as_Register($src1$$reg),
10659             as_Register($src2$$reg));
10660   %}
10661 
10662   ins_pipe(ialu_reg_reg_vshift);
10663 %}
10664 
10665 // Shift Left Immediate
10666 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10667   match(Set dst (LShiftL src1 src2));
10668 
10669   ins_cost(INSN_COST);
10670   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10671 
10672   ins_encode %{
10673     __ lsl(as_Register($dst$$reg),
10674             as_Register($src1$$reg),
10675             $src2$$constant & 0x3f);
10676   %}
10677 
10678   ins_pipe(ialu_reg_shift);
10679 %}
10680 
10681 // Shift Right Logical Register
10682 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10683   match(Set dst (URShiftL src1 src2));
10684 
10685   ins_cost(INSN_COST * 2);
10686   format %{ "lsrv  $dst, $src1, $src2" %}
10687 
10688   ins_encode %{
10689     __ lsrv(as_Register($dst$$reg),
10690             as_Register($src1$$reg),
10691             as_Register($src2$$reg));
10692   %}
10693 
10694   ins_pipe(ialu_reg_reg_vshift);
10695 %}
10696 
10697 // Shift Right Logical Immediate
10698 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10699   match(Set dst (URShiftL src1 src2));
10700 
10701   ins_cost(INSN_COST);
10702   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10703 
10704   ins_encode %{
10705     __ lsr(as_Register($dst$$reg),
10706            as_Register($src1$$reg),
10707            $src2$$constant & 0x3f);
10708   %}
10709 
10710   ins_pipe(ialu_reg_shift);
10711 %}
10712 
10713 // A special-case pattern for card table stores.
10714 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10715   match(Set dst (URShiftL (CastP2X src1) src2));
10716 
10717   ins_cost(INSN_COST);
10718   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10719 
10720   ins_encode %{
10721     __ lsr(as_Register($dst$$reg),
10722            as_Register($src1$$reg),
10723            $src2$$constant & 0x3f);
10724   %}
10725 
10726   ins_pipe(ialu_reg_shift);
10727 %}
10728 
10729 // Shift Right Arithmetic Register
10730 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10731   match(Set dst (RShiftL src1 src2));
10732 
10733   ins_cost(INSN_COST * 2);
10734   format %{ "asrv  $dst, $src1, $src2" %}
10735 
10736   ins_encode %{
10737     __ asrv(as_Register($dst$$reg),
10738             as_Register($src1$$reg),
10739             as_Register($src2$$reg));
10740   %}
10741 
10742   ins_pipe(ialu_reg_reg_vshift);
10743 %}
10744 
10745 // Shift Right Arithmetic Immediate
10746 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10747   match(Set dst (RShiftL src1 src2));
10748 
10749   ins_cost(INSN_COST);
10750   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10751 
10752   ins_encode %{
10753     __ asr(as_Register($dst$$reg),
10754            as_Register($src1$$reg),
10755            $src2$$constant & 0x3f);
10756   %}
10757 
10758   ins_pipe(ialu_reg_shift);
10759 %}
10760 
10761 // BEGIN This section of the file is automatically generated. Do not edit --------------
10762 // This section is generated from aarch64_ad.m4
10763 
10764 // This pattern is automatically generated from aarch64_ad.m4
10765 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10766 instruct regL_not_reg(iRegLNoSp dst,
10767                          iRegL src1, immL_M1 m1,
10768                          rFlagsReg cr) %{
10769   match(Set dst (XorL src1 m1));
10770   ins_cost(INSN_COST);
10771   format %{ "eon  $dst, $src1, zr" %}
10772 
10773   ins_encode %{
10774     __ eon(as_Register($dst$$reg),
10775               as_Register($src1$$reg),
10776               zr,
10777               Assembler::LSL, 0);
10778   %}
10779 
10780   ins_pipe(ialu_reg);
10781 %}
10782 
10783 // This pattern is automatically generated from aarch64_ad.m4
10784 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10785 instruct regI_not_reg(iRegINoSp dst,
10786                          iRegIorL2I src1, immI_M1 m1,
10787                          rFlagsReg cr) %{
10788   match(Set dst (XorI src1 m1));
10789   ins_cost(INSN_COST);
10790   format %{ "eonw  $dst, $src1, zr" %}
10791 
10792   ins_encode %{
10793     __ eonw(as_Register($dst$$reg),
10794               as_Register($src1$$reg),
10795               zr,
10796               Assembler::LSL, 0);
10797   %}
10798 
10799   ins_pipe(ialu_reg);
10800 %}
10801 
10802 // This pattern is automatically generated from aarch64_ad.m4
10803 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10804 instruct NegI_reg_URShift_reg(iRegINoSp dst,
10805                               immI0 zero, iRegIorL2I src1, immI src2) %{
10806   match(Set dst (SubI zero (URShiftI src1 src2)));
10807 
10808   ins_cost(1.9 * INSN_COST);
10809   format %{ "negw  $dst, $src1, LSR $src2" %}
10810 
10811   ins_encode %{
10812     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10813             Assembler::LSR, $src2$$constant & 0x1f);
10814   %}
10815 
10816   ins_pipe(ialu_reg_shift);
10817 %}
10818 
10819 // This pattern is automatically generated from aarch64_ad.m4
10820 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10821 instruct NegI_reg_RShift_reg(iRegINoSp dst,
10822                               immI0 zero, iRegIorL2I src1, immI src2) %{
10823   match(Set dst (SubI zero (RShiftI src1 src2)));
10824 
10825   ins_cost(1.9 * INSN_COST);
10826   format %{ "negw  $dst, $src1, ASR $src2" %}
10827 
10828   ins_encode %{
10829     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10830             Assembler::ASR, $src2$$constant & 0x1f);
10831   %}
10832 
10833   ins_pipe(ialu_reg_shift);
10834 %}
10835 
10836 // This pattern is automatically generated from aarch64_ad.m4
10837 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10838 instruct NegI_reg_LShift_reg(iRegINoSp dst,
10839                               immI0 zero, iRegIorL2I src1, immI src2) %{
10840   match(Set dst (SubI zero (LShiftI src1 src2)));
10841 
10842   ins_cost(1.9 * INSN_COST);
10843   format %{ "negw  $dst, $src1, LSL $src2" %}
10844 
10845   ins_encode %{
10846     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10847             Assembler::LSL, $src2$$constant & 0x1f);
10848   %}
10849 
10850   ins_pipe(ialu_reg_shift);
10851 %}
10852 
10853 // This pattern is automatically generated from aarch64_ad.m4
10854 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10855 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
10856                               immL0 zero, iRegL src1, immI src2) %{
10857   match(Set dst (SubL zero (URShiftL src1 src2)));
10858 
10859   ins_cost(1.9 * INSN_COST);
10860   format %{ "neg  $dst, $src1, LSR $src2" %}
10861 
10862   ins_encode %{
10863     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10864             Assembler::LSR, $src2$$constant & 0x3f);
10865   %}
10866 
10867   ins_pipe(ialu_reg_shift);
10868 %}
10869 
10870 // This pattern is automatically generated from aarch64_ad.m4
10871 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10872 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
10873                               immL0 zero, iRegL src1, immI src2) %{
10874   match(Set dst (SubL zero (RShiftL src1 src2)));
10875 
10876   ins_cost(1.9 * INSN_COST);
10877   format %{ "neg  $dst, $src1, ASR $src2" %}
10878 
10879   ins_encode %{
10880     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10881             Assembler::ASR, $src2$$constant & 0x3f);
10882   %}
10883 
10884   ins_pipe(ialu_reg_shift);
10885 %}
10886 
10887 // This pattern is automatically generated from aarch64_ad.m4
10888 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10889 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
10890                               immL0 zero, iRegL src1, immI src2) %{
10891   match(Set dst (SubL zero (LShiftL src1 src2)));
10892 
10893   ins_cost(1.9 * INSN_COST);
10894   format %{ "neg  $dst, $src1, LSL $src2" %}
10895 
10896   ins_encode %{
10897     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10898             Assembler::LSL, $src2$$constant & 0x3f);
10899   %}
10900 
10901   ins_pipe(ialu_reg_shift);
10902 %}
10903 
10904 // This pattern is automatically generated from aarch64_ad.m4
10905 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10906 instruct AndI_reg_not_reg(iRegINoSp dst,
10907                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10908   match(Set dst (AndI src1 (XorI src2 m1)));
10909   ins_cost(INSN_COST);
10910   format %{ "bicw  $dst, $src1, $src2" %}
10911 
10912   ins_encode %{
10913     __ bicw(as_Register($dst$$reg),
10914               as_Register($src1$$reg),
10915               as_Register($src2$$reg),
10916               Assembler::LSL, 0);
10917   %}
10918 
10919   ins_pipe(ialu_reg_reg);
10920 %}
10921 
10922 // This pattern is automatically generated from aarch64_ad.m4
10923 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10924 instruct AndL_reg_not_reg(iRegLNoSp dst,
10925                          iRegL src1, iRegL src2, immL_M1 m1) %{
10926   match(Set dst (AndL src1 (XorL src2 m1)));
10927   ins_cost(INSN_COST);
10928   format %{ "bic  $dst, $src1, $src2" %}
10929 
10930   ins_encode %{
10931     __ bic(as_Register($dst$$reg),
10932               as_Register($src1$$reg),
10933               as_Register($src2$$reg),
10934               Assembler::LSL, 0);
10935   %}
10936 
10937   ins_pipe(ialu_reg_reg);
10938 %}
10939 
10940 // This pattern is automatically generated from aarch64_ad.m4
10941 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10942 instruct OrI_reg_not_reg(iRegINoSp dst,
10943                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10944   match(Set dst (OrI src1 (XorI src2 m1)));
10945   ins_cost(INSN_COST);
10946   format %{ "ornw  $dst, $src1, $src2" %}
10947 
10948   ins_encode %{
10949     __ ornw(as_Register($dst$$reg),
10950               as_Register($src1$$reg),
10951               as_Register($src2$$reg),
10952               Assembler::LSL, 0);
10953   %}
10954 
10955   ins_pipe(ialu_reg_reg);
10956 %}
10957 
10958 // This pattern is automatically generated from aarch64_ad.m4
10959 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10960 instruct OrL_reg_not_reg(iRegLNoSp dst,
10961                          iRegL src1, iRegL src2, immL_M1 m1) %{
10962   match(Set dst (OrL src1 (XorL src2 m1)));
10963   ins_cost(INSN_COST);
10964   format %{ "orn  $dst, $src1, $src2" %}
10965 
10966   ins_encode %{
10967     __ orn(as_Register($dst$$reg),
10968               as_Register($src1$$reg),
10969               as_Register($src2$$reg),
10970               Assembler::LSL, 0);
10971   %}
10972 
10973   ins_pipe(ialu_reg_reg);
10974 %}
10975 
10976 // This pattern is automatically generated from aarch64_ad.m4
10977 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10978 instruct XorI_reg_not_reg(iRegINoSp dst,
10979                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10980   match(Set dst (XorI m1 (XorI src2 src1)));
10981   ins_cost(INSN_COST);
10982   format %{ "eonw  $dst, $src1, $src2" %}
10983 
10984   ins_encode %{
10985     __ eonw(as_Register($dst$$reg),
10986               as_Register($src1$$reg),
10987               as_Register($src2$$reg),
10988               Assembler::LSL, 0);
10989   %}
10990 
10991   ins_pipe(ialu_reg_reg);
10992 %}
10993 
10994 // This pattern is automatically generated from aarch64_ad.m4
10995 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10996 instruct XorL_reg_not_reg(iRegLNoSp dst,
10997                          iRegL src1, iRegL src2, immL_M1 m1) %{
10998   match(Set dst (XorL m1 (XorL src2 src1)));
10999   ins_cost(INSN_COST);
11000   format %{ "eon  $dst, $src1, $src2" %}
11001 
11002   ins_encode %{
11003     __ eon(as_Register($dst$$reg),
11004               as_Register($src1$$reg),
11005               as_Register($src2$$reg),
11006               Assembler::LSL, 0);
11007   %}
11008 
11009   ins_pipe(ialu_reg_reg);
11010 %}
11011 
11012 // This pattern is automatically generated from aarch64_ad.m4
11013 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11014 // val & (-1 ^ (val >>> shift)) ==> bicw
11015 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11016                          iRegIorL2I src1, iRegIorL2I src2,
11017                          immI src3, immI_M1 src4) %{
11018   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11019   ins_cost(1.9 * INSN_COST);
11020   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11021 
11022   ins_encode %{
11023     __ bicw(as_Register($dst$$reg),
11024               as_Register($src1$$reg),
11025               as_Register($src2$$reg),
11026               Assembler::LSR,
11027               $src3$$constant & 0x1f);
11028   %}
11029 
11030   ins_pipe(ialu_reg_reg_shift);
11031 %}
11032 
11033 // This pattern is automatically generated from aarch64_ad.m4
11034 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11035 // val & (-1 ^ (val >>> shift)) ==> bic
11036 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11037                          iRegL src1, iRegL src2,
11038                          immI src3, immL_M1 src4) %{
11039   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11040   ins_cost(1.9 * INSN_COST);
11041   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11042 
11043   ins_encode %{
11044     __ bic(as_Register($dst$$reg),
11045               as_Register($src1$$reg),
11046               as_Register($src2$$reg),
11047               Assembler::LSR,
11048               $src3$$constant & 0x3f);
11049   %}
11050 
11051   ins_pipe(ialu_reg_reg_shift);
11052 %}
11053 
11054 // This pattern is automatically generated from aarch64_ad.m4
11055 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11056 // val & (-1 ^ (val >> shift)) ==> bicw
11057 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11058                          iRegIorL2I src1, iRegIorL2I src2,
11059                          immI src3, immI_M1 src4) %{
11060   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11061   ins_cost(1.9 * INSN_COST);
11062   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11063 
11064   ins_encode %{
11065     __ bicw(as_Register($dst$$reg),
11066               as_Register($src1$$reg),
11067               as_Register($src2$$reg),
11068               Assembler::ASR,
11069               $src3$$constant & 0x1f);
11070   %}
11071 
11072   ins_pipe(ialu_reg_reg_shift);
11073 %}
11074 
11075 // This pattern is automatically generated from aarch64_ad.m4
11076 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11077 // val & (-1 ^ (val >> shift)) ==> bic
11078 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11079                          iRegL src1, iRegL src2,
11080                          immI src3, immL_M1 src4) %{
11081   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11082   ins_cost(1.9 * INSN_COST);
11083   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11084 
11085   ins_encode %{
11086     __ bic(as_Register($dst$$reg),
11087               as_Register($src1$$reg),
11088               as_Register($src2$$reg),
11089               Assembler::ASR,
11090               $src3$$constant & 0x3f);
11091   %}
11092 
11093   ins_pipe(ialu_reg_reg_shift);
11094 %}
11095 
11096 // This pattern is automatically generated from aarch64_ad.m4
11097 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11098 // val & (-1 ^ (val ror shift)) ==> bicw
11099 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11100                          iRegIorL2I src1, iRegIorL2I src2,
11101                          immI src3, immI_M1 src4) %{
11102   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11103   ins_cost(1.9 * INSN_COST);
11104   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11105 
11106   ins_encode %{
11107     __ bicw(as_Register($dst$$reg),
11108               as_Register($src1$$reg),
11109               as_Register($src2$$reg),
11110               Assembler::ROR,
11111               $src3$$constant & 0x1f);
11112   %}
11113 
11114   ins_pipe(ialu_reg_reg_shift);
11115 %}
11116 
11117 // This pattern is automatically generated from aarch64_ad.m4
11118 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11119 // val & (-1 ^ (val ror shift)) ==> bic
11120 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11121                          iRegL src1, iRegL src2,
11122                          immI src3, immL_M1 src4) %{
11123   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11124   ins_cost(1.9 * INSN_COST);
11125   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11126 
11127   ins_encode %{
11128     __ bic(as_Register($dst$$reg),
11129               as_Register($src1$$reg),
11130               as_Register($src2$$reg),
11131               Assembler::ROR,
11132               $src3$$constant & 0x3f);
11133   %}
11134 
11135   ins_pipe(ialu_reg_reg_shift);
11136 %}
11137 
11138 // This pattern is automatically generated from aarch64_ad.m4
11139 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11140 // val & (-1 ^ (val << shift)) ==> bicw
11141 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11142                          iRegIorL2I src1, iRegIorL2I src2,
11143                          immI src3, immI_M1 src4) %{
11144   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11145   ins_cost(1.9 * INSN_COST);
11146   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11147 
11148   ins_encode %{
11149     __ bicw(as_Register($dst$$reg),
11150               as_Register($src1$$reg),
11151               as_Register($src2$$reg),
11152               Assembler::LSL,
11153               $src3$$constant & 0x1f);
11154   %}
11155 
11156   ins_pipe(ialu_reg_reg_shift);
11157 %}
11158 
11159 // This pattern is automatically generated from aarch64_ad.m4
11160 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11161 // val & (-1 ^ (val << shift)) ==> bic
11162 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11163                          iRegL src1, iRegL src2,
11164                          immI src3, immL_M1 src4) %{
11165   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11166   ins_cost(1.9 * INSN_COST);
11167   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11168 
11169   ins_encode %{
11170     __ bic(as_Register($dst$$reg),
11171               as_Register($src1$$reg),
11172               as_Register($src2$$reg),
11173               Assembler::LSL,
11174               $src3$$constant & 0x3f);
11175   %}
11176 
11177   ins_pipe(ialu_reg_reg_shift);
11178 %}
11179 
11180 // This pattern is automatically generated from aarch64_ad.m4
11181 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11182 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11183 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11184                          iRegIorL2I src1, iRegIorL2I src2,
11185                          immI src3, immI_M1 src4) %{
11186   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11187   ins_cost(1.9 * INSN_COST);
11188   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11189 
11190   ins_encode %{
11191     __ eonw(as_Register($dst$$reg),
11192               as_Register($src1$$reg),
11193               as_Register($src2$$reg),
11194               Assembler::LSR,
11195               $src3$$constant & 0x1f);
11196   %}
11197 
11198   ins_pipe(ialu_reg_reg_shift);
11199 %}
11200 
11201 // This pattern is automatically generated from aarch64_ad.m4
11202 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11203 // val ^ (-1 ^ (val >>> shift)) ==> eon
11204 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11205                          iRegL src1, iRegL src2,
11206                          immI src3, immL_M1 src4) %{
11207   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11208   ins_cost(1.9 * INSN_COST);
11209   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11210 
11211   ins_encode %{
11212     __ eon(as_Register($dst$$reg),
11213               as_Register($src1$$reg),
11214               as_Register($src2$$reg),
11215               Assembler::LSR,
11216               $src3$$constant & 0x3f);
11217   %}
11218 
11219   ins_pipe(ialu_reg_reg_shift);
11220 %}
11221 
11222 // This pattern is automatically generated from aarch64_ad.m4
11223 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11224 // val ^ (-1 ^ (val >> shift)) ==> eonw
11225 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11226                          iRegIorL2I src1, iRegIorL2I src2,
11227                          immI src3, immI_M1 src4) %{
11228   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11229   ins_cost(1.9 * INSN_COST);
11230   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11231 
11232   ins_encode %{
11233     __ eonw(as_Register($dst$$reg),
11234               as_Register($src1$$reg),
11235               as_Register($src2$$reg),
11236               Assembler::ASR,
11237               $src3$$constant & 0x1f);
11238   %}
11239 
11240   ins_pipe(ialu_reg_reg_shift);
11241 %}
11242 
11243 // This pattern is automatically generated from aarch64_ad.m4
11244 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11245 // val ^ (-1 ^ (val >> shift)) ==> eon
11246 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11247                          iRegL src1, iRegL src2,
11248                          immI src3, immL_M1 src4) %{
11249   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11250   ins_cost(1.9 * INSN_COST);
11251   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11252 
11253   ins_encode %{
11254     __ eon(as_Register($dst$$reg),
11255               as_Register($src1$$reg),
11256               as_Register($src2$$reg),
11257               Assembler::ASR,
11258               $src3$$constant & 0x3f);
11259   %}
11260 
11261   ins_pipe(ialu_reg_reg_shift);
11262 %}
11263 
11264 // This pattern is automatically generated from aarch64_ad.m4
11265 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11266 // val ^ (-1 ^ (val ror shift)) ==> eonw
11267 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11268                          iRegIorL2I src1, iRegIorL2I src2,
11269                          immI src3, immI_M1 src4) %{
11270   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11271   ins_cost(1.9 * INSN_COST);
11272   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11273 
11274   ins_encode %{
11275     __ eonw(as_Register($dst$$reg),
11276               as_Register($src1$$reg),
11277               as_Register($src2$$reg),
11278               Assembler::ROR,
11279               $src3$$constant & 0x1f);
11280   %}
11281 
11282   ins_pipe(ialu_reg_reg_shift);
11283 %}
11284 
11285 // This pattern is automatically generated from aarch64_ad.m4
11286 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11287 // val ^ (-1 ^ (val ror shift)) ==> eon
11288 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11289                          iRegL src1, iRegL src2,
11290                          immI src3, immL_M1 src4) %{
11291   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11292   ins_cost(1.9 * INSN_COST);
11293   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11294 
11295   ins_encode %{
11296     __ eon(as_Register($dst$$reg),
11297               as_Register($src1$$reg),
11298               as_Register($src2$$reg),
11299               Assembler::ROR,
11300               $src3$$constant & 0x3f);
11301   %}
11302 
11303   ins_pipe(ialu_reg_reg_shift);
11304 %}
11305 
11306 // This pattern is automatically generated from aarch64_ad.m4
11307 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11308 // val ^ (-1 ^ (val << shift)) ==> eonw
11309 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11310                          iRegIorL2I src1, iRegIorL2I src2,
11311                          immI src3, immI_M1 src4) %{
11312   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11313   ins_cost(1.9 * INSN_COST);
11314   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11315 
11316   ins_encode %{
11317     __ eonw(as_Register($dst$$reg),
11318               as_Register($src1$$reg),
11319               as_Register($src2$$reg),
11320               Assembler::LSL,
11321               $src3$$constant & 0x1f);
11322   %}
11323 
11324   ins_pipe(ialu_reg_reg_shift);
11325 %}
11326 
11327 // This pattern is automatically generated from aarch64_ad.m4
11328 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11329 // val ^ (-1 ^ (val << shift)) ==> eon
11330 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11331                          iRegL src1, iRegL src2,
11332                          immI src3, immL_M1 src4) %{
11333   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11334   ins_cost(1.9 * INSN_COST);
11335   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11336 
11337   ins_encode %{
11338     __ eon(as_Register($dst$$reg),
11339               as_Register($src1$$reg),
11340               as_Register($src2$$reg),
11341               Assembler::LSL,
11342               $src3$$constant & 0x3f);
11343   %}
11344 
11345   ins_pipe(ialu_reg_reg_shift);
11346 %}
11347 
11348 // This pattern is automatically generated from aarch64_ad.m4
11349 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11350 // val | (-1 ^ (val >>> shift)) ==> ornw
11351 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11352                          iRegIorL2I src1, iRegIorL2I src2,
11353                          immI src3, immI_M1 src4) %{
11354   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11355   ins_cost(1.9 * INSN_COST);
11356   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11357 
11358   ins_encode %{
11359     __ ornw(as_Register($dst$$reg),
11360               as_Register($src1$$reg),
11361               as_Register($src2$$reg),
11362               Assembler::LSR,
11363               $src3$$constant & 0x1f);
11364   %}
11365 
11366   ins_pipe(ialu_reg_reg_shift);
11367 %}
11368 
11369 // This pattern is automatically generated from aarch64_ad.m4
11370 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11371 // val | (-1 ^ (val >>> shift)) ==> orn
11372 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11373                          iRegL src1, iRegL src2,
11374                          immI src3, immL_M1 src4) %{
11375   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11376   ins_cost(1.9 * INSN_COST);
11377   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11378 
11379   ins_encode %{
11380     __ orn(as_Register($dst$$reg),
11381               as_Register($src1$$reg),
11382               as_Register($src2$$reg),
11383               Assembler::LSR,
11384               $src3$$constant & 0x3f);
11385   %}
11386 
11387   ins_pipe(ialu_reg_reg_shift);
11388 %}
11389 
11390 // This pattern is automatically generated from aarch64_ad.m4
11391 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11392 // val | (-1 ^ (val >> shift)) ==> ornw
11393 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11394                          iRegIorL2I src1, iRegIorL2I src2,
11395                          immI src3, immI_M1 src4) %{
11396   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11397   ins_cost(1.9 * INSN_COST);
11398   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11399 
11400   ins_encode %{
11401     __ ornw(as_Register($dst$$reg),
11402               as_Register($src1$$reg),
11403               as_Register($src2$$reg),
11404               Assembler::ASR,
11405               $src3$$constant & 0x1f);
11406   %}
11407 
11408   ins_pipe(ialu_reg_reg_shift);
11409 %}
11410 
11411 // This pattern is automatically generated from aarch64_ad.m4
11412 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11413 // val | (-1 ^ (val >> shift)) ==> orn
11414 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11415                          iRegL src1, iRegL src2,
11416                          immI src3, immL_M1 src4) %{
11417   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11418   ins_cost(1.9 * INSN_COST);
11419   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11420 
11421   ins_encode %{
11422     __ orn(as_Register($dst$$reg),
11423               as_Register($src1$$reg),
11424               as_Register($src2$$reg),
11425               Assembler::ASR,
11426               $src3$$constant & 0x3f);
11427   %}
11428 
11429   ins_pipe(ialu_reg_reg_shift);
11430 %}
11431 
11432 // This pattern is automatically generated from aarch64_ad.m4
11433 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11434 // val | (-1 ^ (val ror shift)) ==> ornw
11435 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11436                          iRegIorL2I src1, iRegIorL2I src2,
11437                          immI src3, immI_M1 src4) %{
11438   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11439   ins_cost(1.9 * INSN_COST);
11440   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11441 
11442   ins_encode %{
11443     __ ornw(as_Register($dst$$reg),
11444               as_Register($src1$$reg),
11445               as_Register($src2$$reg),
11446               Assembler::ROR,
11447               $src3$$constant & 0x1f);
11448   %}
11449 
11450   ins_pipe(ialu_reg_reg_shift);
11451 %}
11452 
11453 // This pattern is automatically generated from aarch64_ad.m4
11454 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11455 // val | (-1 ^ (val ror shift)) ==> orn
11456 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11457                          iRegL src1, iRegL src2,
11458                          immI src3, immL_M1 src4) %{
11459   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11460   ins_cost(1.9 * INSN_COST);
11461   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11462 
11463   ins_encode %{
11464     __ orn(as_Register($dst$$reg),
11465               as_Register($src1$$reg),
11466               as_Register($src2$$reg),
11467               Assembler::ROR,
11468               $src3$$constant & 0x3f);
11469   %}
11470 
11471   ins_pipe(ialu_reg_reg_shift);
11472 %}
11473 
11474 // This pattern is automatically generated from aarch64_ad.m4
11475 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11476 // val | (-1 ^ (val << shift)) ==> ornw
11477 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11478                          iRegIorL2I src1, iRegIorL2I src2,
11479                          immI src3, immI_M1 src4) %{
11480   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11481   ins_cost(1.9 * INSN_COST);
11482   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11483 
11484   ins_encode %{
11485     __ ornw(as_Register($dst$$reg),
11486               as_Register($src1$$reg),
11487               as_Register($src2$$reg),
11488               Assembler::LSL,
11489               $src3$$constant & 0x1f);
11490   %}
11491 
11492   ins_pipe(ialu_reg_reg_shift);
11493 %}
11494 
11495 // This pattern is automatically generated from aarch64_ad.m4
11496 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11497 // val | (-1 ^ (val << shift)) ==> orn
11498 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11499                          iRegL src1, iRegL src2,
11500                          immI src3, immL_M1 src4) %{
11501   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11502   ins_cost(1.9 * INSN_COST);
11503   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11504 
11505   ins_encode %{
11506     __ orn(as_Register($dst$$reg),
11507               as_Register($src1$$reg),
11508               as_Register($src2$$reg),
11509               Assembler::LSL,
11510               $src3$$constant & 0x3f);
11511   %}
11512 
11513   ins_pipe(ialu_reg_reg_shift);
11514 %}
11515 
11516 // This pattern is automatically generated from aarch64_ad.m4
11517 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11518 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11519                          iRegIorL2I src1, iRegIorL2I src2,
11520                          immI src3) %{
11521   match(Set dst (AndI src1 (URShiftI src2 src3)));
11522 
11523   ins_cost(1.9 * INSN_COST);
11524   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11525 
11526   ins_encode %{
11527     __ andw(as_Register($dst$$reg),
11528               as_Register($src1$$reg),
11529               as_Register($src2$$reg),
11530               Assembler::LSR,
11531               $src3$$constant & 0x1f);
11532   %}
11533 
11534   ins_pipe(ialu_reg_reg_shift);
11535 %}
11536 
11537 // This pattern is automatically generated from aarch64_ad.m4
11538 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11539 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11540                          iRegL src1, iRegL src2,
11541                          immI src3) %{
11542   match(Set dst (AndL src1 (URShiftL src2 src3)));
11543 
11544   ins_cost(1.9 * INSN_COST);
11545   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11546 
11547   ins_encode %{
11548     __ andr(as_Register($dst$$reg),
11549               as_Register($src1$$reg),
11550               as_Register($src2$$reg),
11551               Assembler::LSR,
11552               $src3$$constant & 0x3f);
11553   %}
11554 
11555   ins_pipe(ialu_reg_reg_shift);
11556 %}
11557 
11558 // This pattern is automatically generated from aarch64_ad.m4
11559 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11560 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11561                          iRegIorL2I src1, iRegIorL2I src2,
11562                          immI src3) %{
11563   match(Set dst (AndI src1 (RShiftI src2 src3)));
11564 
11565   ins_cost(1.9 * INSN_COST);
11566   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11567 
11568   ins_encode %{
11569     __ andw(as_Register($dst$$reg),
11570               as_Register($src1$$reg),
11571               as_Register($src2$$reg),
11572               Assembler::ASR,
11573               $src3$$constant & 0x1f);
11574   %}
11575 
11576   ins_pipe(ialu_reg_reg_shift);
11577 %}
11578 
11579 // This pattern is automatically generated from aarch64_ad.m4
11580 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11581 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11582                          iRegL src1, iRegL src2,
11583                          immI src3) %{
11584   match(Set dst (AndL src1 (RShiftL src2 src3)));
11585 
11586   ins_cost(1.9 * INSN_COST);
11587   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11588 
11589   ins_encode %{
11590     __ andr(as_Register($dst$$reg),
11591               as_Register($src1$$reg),
11592               as_Register($src2$$reg),
11593               Assembler::ASR,
11594               $src3$$constant & 0x3f);
11595   %}
11596 
11597   ins_pipe(ialu_reg_reg_shift);
11598 %}
11599 
11600 // This pattern is automatically generated from aarch64_ad.m4
11601 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11602 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11603                          iRegIorL2I src1, iRegIorL2I src2,
11604                          immI src3) %{
11605   match(Set dst (AndI src1 (LShiftI src2 src3)));
11606 
11607   ins_cost(1.9 * INSN_COST);
11608   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11609 
11610   ins_encode %{
11611     __ andw(as_Register($dst$$reg),
11612               as_Register($src1$$reg),
11613               as_Register($src2$$reg),
11614               Assembler::LSL,
11615               $src3$$constant & 0x1f);
11616   %}
11617 
11618   ins_pipe(ialu_reg_reg_shift);
11619 %}
11620 
11621 // This pattern is automatically generated from aarch64_ad.m4
11622 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11623 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11624                          iRegL src1, iRegL src2,
11625                          immI src3) %{
11626   match(Set dst (AndL src1 (LShiftL src2 src3)));
11627 
11628   ins_cost(1.9 * INSN_COST);
11629   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11630 
11631   ins_encode %{
11632     __ andr(as_Register($dst$$reg),
11633               as_Register($src1$$reg),
11634               as_Register($src2$$reg),
11635               Assembler::LSL,
11636               $src3$$constant & 0x3f);
11637   %}
11638 
11639   ins_pipe(ialu_reg_reg_shift);
11640 %}
11641 
11642 // This pattern is automatically generated from aarch64_ad.m4
11643 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11644 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
11645                          iRegIorL2I src1, iRegIorL2I src2,
11646                          immI src3) %{
11647   match(Set dst (AndI src1 (RotateRight src2 src3)));
11648 
11649   ins_cost(1.9 * INSN_COST);
11650   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
11651 
11652   ins_encode %{
11653     __ andw(as_Register($dst$$reg),
11654               as_Register($src1$$reg),
11655               as_Register($src2$$reg),
11656               Assembler::ROR,
11657               $src3$$constant & 0x1f);
11658   %}
11659 
11660   ins_pipe(ialu_reg_reg_shift);
11661 %}
11662 
11663 // This pattern is automatically generated from aarch64_ad.m4
11664 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11665 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
11666                          iRegL src1, iRegL src2,
11667                          immI src3) %{
11668   match(Set dst (AndL src1 (RotateRight src2 src3)));
11669 
11670   ins_cost(1.9 * INSN_COST);
11671   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
11672 
11673   ins_encode %{
11674     __ andr(as_Register($dst$$reg),
11675               as_Register($src1$$reg),
11676               as_Register($src2$$reg),
11677               Assembler::ROR,
11678               $src3$$constant & 0x3f);
11679   %}
11680 
11681   ins_pipe(ialu_reg_reg_shift);
11682 %}
11683 
11684 // This pattern is automatically generated from aarch64_ad.m4
11685 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11686 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11687                          iRegIorL2I src1, iRegIorL2I src2,
11688                          immI src3) %{
11689   match(Set dst (XorI src1 (URShiftI src2 src3)));
11690 
11691   ins_cost(1.9 * INSN_COST);
11692   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11693 
11694   ins_encode %{
11695     __ eorw(as_Register($dst$$reg),
11696               as_Register($src1$$reg),
11697               as_Register($src2$$reg),
11698               Assembler::LSR,
11699               $src3$$constant & 0x1f);
11700   %}
11701 
11702   ins_pipe(ialu_reg_reg_shift);
11703 %}
11704 
11705 // This pattern is automatically generated from aarch64_ad.m4
11706 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11707 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11708                          iRegL src1, iRegL src2,
11709                          immI src3) %{
11710   match(Set dst (XorL src1 (URShiftL src2 src3)));
11711 
11712   ins_cost(1.9 * INSN_COST);
11713   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11714 
11715   ins_encode %{
11716     __ eor(as_Register($dst$$reg),
11717               as_Register($src1$$reg),
11718               as_Register($src2$$reg),
11719               Assembler::LSR,
11720               $src3$$constant & 0x3f);
11721   %}
11722 
11723   ins_pipe(ialu_reg_reg_shift);
11724 %}
11725 
11726 // This pattern is automatically generated from aarch64_ad.m4
11727 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11728 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11729                          iRegIorL2I src1, iRegIorL2I src2,
11730                          immI src3) %{
11731   match(Set dst (XorI src1 (RShiftI src2 src3)));
11732 
11733   ins_cost(1.9 * INSN_COST);
11734   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11735 
11736   ins_encode %{
11737     __ eorw(as_Register($dst$$reg),
11738               as_Register($src1$$reg),
11739               as_Register($src2$$reg),
11740               Assembler::ASR,
11741               $src3$$constant & 0x1f);
11742   %}
11743 
11744   ins_pipe(ialu_reg_reg_shift);
11745 %}
11746 
11747 // This pattern is automatically generated from aarch64_ad.m4
11748 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11749 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11750                          iRegL src1, iRegL src2,
11751                          immI src3) %{
11752   match(Set dst (XorL src1 (RShiftL src2 src3)));
11753 
11754   ins_cost(1.9 * INSN_COST);
11755   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11756 
11757   ins_encode %{
11758     __ eor(as_Register($dst$$reg),
11759               as_Register($src1$$reg),
11760               as_Register($src2$$reg),
11761               Assembler::ASR,
11762               $src3$$constant & 0x3f);
11763   %}
11764 
11765   ins_pipe(ialu_reg_reg_shift);
11766 %}
11767 
11768 // This pattern is automatically generated from aarch64_ad.m4
11769 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11770 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11771                          iRegIorL2I src1, iRegIorL2I src2,
11772                          immI src3) %{
11773   match(Set dst (XorI src1 (LShiftI src2 src3)));
11774 
11775   ins_cost(1.9 * INSN_COST);
11776   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11777 
11778   ins_encode %{
11779     __ eorw(as_Register($dst$$reg),
11780               as_Register($src1$$reg),
11781               as_Register($src2$$reg),
11782               Assembler::LSL,
11783               $src3$$constant & 0x1f);
11784   %}
11785 
11786   ins_pipe(ialu_reg_reg_shift);
11787 %}
11788 
11789 // This pattern is automatically generated from aarch64_ad.m4
11790 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11791 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11792                          iRegL src1, iRegL src2,
11793                          immI src3) %{
11794   match(Set dst (XorL src1 (LShiftL src2 src3)));
11795 
11796   ins_cost(1.9 * INSN_COST);
11797   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11798 
11799   ins_encode %{
11800     __ eor(as_Register($dst$$reg),
11801               as_Register($src1$$reg),
11802               as_Register($src2$$reg),
11803               Assembler::LSL,
11804               $src3$$constant & 0x3f);
11805   %}
11806 
11807   ins_pipe(ialu_reg_reg_shift);
11808 %}
11809 
11810 // This pattern is automatically generated from aarch64_ad.m4
11811 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11812 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
11813                          iRegIorL2I src1, iRegIorL2I src2,
11814                          immI src3) %{
11815   match(Set dst (XorI src1 (RotateRight src2 src3)));
11816 
11817   ins_cost(1.9 * INSN_COST);
11818   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
11819 
11820   ins_encode %{
11821     __ eorw(as_Register($dst$$reg),
11822               as_Register($src1$$reg),
11823               as_Register($src2$$reg),
11824               Assembler::ROR,
11825               $src3$$constant & 0x1f);
11826   %}
11827 
11828   ins_pipe(ialu_reg_reg_shift);
11829 %}
11830 
11831 // This pattern is automatically generated from aarch64_ad.m4
11832 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11833 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
11834                          iRegL src1, iRegL src2,
11835                          immI src3) %{
11836   match(Set dst (XorL src1 (RotateRight src2 src3)));
11837 
11838   ins_cost(1.9 * INSN_COST);
11839   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
11840 
11841   ins_encode %{
11842     __ eor(as_Register($dst$$reg),
11843               as_Register($src1$$reg),
11844               as_Register($src2$$reg),
11845               Assembler::ROR,
11846               $src3$$constant & 0x3f);
11847   %}
11848 
11849   ins_pipe(ialu_reg_reg_shift);
11850 %}
11851 
11852 // This pattern is automatically generated from aarch64_ad.m4
11853 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11854 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11855                          iRegIorL2I src1, iRegIorL2I src2,
11856                          immI src3) %{
11857   match(Set dst (OrI src1 (URShiftI src2 src3)));
11858 
11859   ins_cost(1.9 * INSN_COST);
11860   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11861 
11862   ins_encode %{
11863     __ orrw(as_Register($dst$$reg),
11864               as_Register($src1$$reg),
11865               as_Register($src2$$reg),
11866               Assembler::LSR,
11867               $src3$$constant & 0x1f);
11868   %}
11869 
11870   ins_pipe(ialu_reg_reg_shift);
11871 %}
11872 
11873 // This pattern is automatically generated from aarch64_ad.m4
11874 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11875 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11876                          iRegL src1, iRegL src2,
11877                          immI src3) %{
11878   match(Set dst (OrL src1 (URShiftL src2 src3)));
11879 
11880   ins_cost(1.9 * INSN_COST);
11881   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11882 
11883   ins_encode %{
11884     __ orr(as_Register($dst$$reg),
11885               as_Register($src1$$reg),
11886               as_Register($src2$$reg),
11887               Assembler::LSR,
11888               $src3$$constant & 0x3f);
11889   %}
11890 
11891   ins_pipe(ialu_reg_reg_shift);
11892 %}
11893 
11894 // This pattern is automatically generated from aarch64_ad.m4
11895 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11896 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11897                          iRegIorL2I src1, iRegIorL2I src2,
11898                          immI src3) %{
11899   match(Set dst (OrI src1 (RShiftI src2 src3)));
11900 
11901   ins_cost(1.9 * INSN_COST);
11902   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11903 
11904   ins_encode %{
11905     __ orrw(as_Register($dst$$reg),
11906               as_Register($src1$$reg),
11907               as_Register($src2$$reg),
11908               Assembler::ASR,
11909               $src3$$constant & 0x1f);
11910   %}
11911 
11912   ins_pipe(ialu_reg_reg_shift);
11913 %}
11914 
11915 // This pattern is automatically generated from aarch64_ad.m4
11916 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11917 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11918                          iRegL src1, iRegL src2,
11919                          immI src3) %{
11920   match(Set dst (OrL src1 (RShiftL src2 src3)));
11921 
11922   ins_cost(1.9 * INSN_COST);
11923   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11924 
11925   ins_encode %{
11926     __ orr(as_Register($dst$$reg),
11927               as_Register($src1$$reg),
11928               as_Register($src2$$reg),
11929               Assembler::ASR,
11930               $src3$$constant & 0x3f);
11931   %}
11932 
11933   ins_pipe(ialu_reg_reg_shift);
11934 %}
11935 
11936 // This pattern is automatically generated from aarch64_ad.m4
11937 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11938 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11939                          iRegIorL2I src1, iRegIorL2I src2,
11940                          immI src3) %{
11941   match(Set dst (OrI src1 (LShiftI src2 src3)));
11942 
11943   ins_cost(1.9 * INSN_COST);
11944   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11945 
11946   ins_encode %{
11947     __ orrw(as_Register($dst$$reg),
11948               as_Register($src1$$reg),
11949               as_Register($src2$$reg),
11950               Assembler::LSL,
11951               $src3$$constant & 0x1f);
11952   %}
11953 
11954   ins_pipe(ialu_reg_reg_shift);
11955 %}
11956 
11957 // This pattern is automatically generated from aarch64_ad.m4
11958 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11959 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11960                          iRegL src1, iRegL src2,
11961                          immI src3) %{
11962   match(Set dst (OrL src1 (LShiftL src2 src3)));
11963 
11964   ins_cost(1.9 * INSN_COST);
11965   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11966 
11967   ins_encode %{
11968     __ orr(as_Register($dst$$reg),
11969               as_Register($src1$$reg),
11970               as_Register($src2$$reg),
11971               Assembler::LSL,
11972               $src3$$constant & 0x3f);
11973   %}
11974 
11975   ins_pipe(ialu_reg_reg_shift);
11976 %}
11977 
11978 // This pattern is automatically generated from aarch64_ad.m4
11979 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11980 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
11981                          iRegIorL2I src1, iRegIorL2I src2,
11982                          immI src3) %{
11983   match(Set dst (OrI src1 (RotateRight src2 src3)));
11984 
11985   ins_cost(1.9 * INSN_COST);
11986   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
11987 
11988   ins_encode %{
11989     __ orrw(as_Register($dst$$reg),
11990               as_Register($src1$$reg),
11991               as_Register($src2$$reg),
11992               Assembler::ROR,
11993               $src3$$constant & 0x1f);
11994   %}
11995 
11996   ins_pipe(ialu_reg_reg_shift);
11997 %}
11998 
11999 // This pattern is automatically generated from aarch64_ad.m4
12000 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12001 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
12002                          iRegL src1, iRegL src2,
12003                          immI src3) %{
12004   match(Set dst (OrL src1 (RotateRight src2 src3)));
12005 
12006   ins_cost(1.9 * INSN_COST);
12007   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
12008 
12009   ins_encode %{
12010     __ orr(as_Register($dst$$reg),
12011               as_Register($src1$$reg),
12012               as_Register($src2$$reg),
12013               Assembler::ROR,
12014               $src3$$constant & 0x3f);
12015   %}
12016 
12017   ins_pipe(ialu_reg_reg_shift);
12018 %}
12019 
12020 // This pattern is automatically generated from aarch64_ad.m4
12021 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12022 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12023                          iRegIorL2I src1, iRegIorL2I src2,
12024                          immI src3) %{
12025   match(Set dst (AddI src1 (URShiftI src2 src3)));
12026 
12027   ins_cost(1.9 * INSN_COST);
12028   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12029 
12030   ins_encode %{
12031     __ addw(as_Register($dst$$reg),
12032               as_Register($src1$$reg),
12033               as_Register($src2$$reg),
12034               Assembler::LSR,
12035               $src3$$constant & 0x1f);
12036   %}
12037 
12038   ins_pipe(ialu_reg_reg_shift);
12039 %}
12040 
12041 // This pattern is automatically generated from aarch64_ad.m4
12042 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12043 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12044                          iRegL src1, iRegL src2,
12045                          immI src3) %{
12046   match(Set dst (AddL src1 (URShiftL src2 src3)));
12047 
12048   ins_cost(1.9 * INSN_COST);
12049   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12050 
12051   ins_encode %{
12052     __ add(as_Register($dst$$reg),
12053               as_Register($src1$$reg),
12054               as_Register($src2$$reg),
12055               Assembler::LSR,
12056               $src3$$constant & 0x3f);
12057   %}
12058 
12059   ins_pipe(ialu_reg_reg_shift);
12060 %}
12061 
12062 // This pattern is automatically generated from aarch64_ad.m4
12063 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12064 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12065                          iRegIorL2I src1, iRegIorL2I src2,
12066                          immI src3) %{
12067   match(Set dst (AddI src1 (RShiftI src2 src3)));
12068 
12069   ins_cost(1.9 * INSN_COST);
12070   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12071 
12072   ins_encode %{
12073     __ addw(as_Register($dst$$reg),
12074               as_Register($src1$$reg),
12075               as_Register($src2$$reg),
12076               Assembler::ASR,
12077               $src3$$constant & 0x1f);
12078   %}
12079 
12080   ins_pipe(ialu_reg_reg_shift);
12081 %}
12082 
12083 // This pattern is automatically generated from aarch64_ad.m4
12084 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12085 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12086                          iRegL src1, iRegL src2,
12087                          immI src3) %{
12088   match(Set dst (AddL src1 (RShiftL src2 src3)));
12089 
12090   ins_cost(1.9 * INSN_COST);
12091   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12092 
12093   ins_encode %{
12094     __ add(as_Register($dst$$reg),
12095               as_Register($src1$$reg),
12096               as_Register($src2$$reg),
12097               Assembler::ASR,
12098               $src3$$constant & 0x3f);
12099   %}
12100 
12101   ins_pipe(ialu_reg_reg_shift);
12102 %}
12103 
12104 // This pattern is automatically generated from aarch64_ad.m4
12105 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12106 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12107                          iRegIorL2I src1, iRegIorL2I src2,
12108                          immI src3) %{
12109   match(Set dst (AddI src1 (LShiftI src2 src3)));
12110 
12111   ins_cost(1.9 * INSN_COST);
12112   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12113 
12114   ins_encode %{
12115     __ addw(as_Register($dst$$reg),
12116               as_Register($src1$$reg),
12117               as_Register($src2$$reg),
12118               Assembler::LSL,
12119               $src3$$constant & 0x1f);
12120   %}
12121 
12122   ins_pipe(ialu_reg_reg_shift);
12123 %}
12124 
12125 // This pattern is automatically generated from aarch64_ad.m4
12126 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12127 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12128                          iRegL src1, iRegL src2,
12129                          immI src3) %{
12130   match(Set dst (AddL src1 (LShiftL src2 src3)));
12131 
12132   ins_cost(1.9 * INSN_COST);
12133   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12134 
12135   ins_encode %{
12136     __ add(as_Register($dst$$reg),
12137               as_Register($src1$$reg),
12138               as_Register($src2$$reg),
12139               Assembler::LSL,
12140               $src3$$constant & 0x3f);
12141   %}
12142 
12143   ins_pipe(ialu_reg_reg_shift);
12144 %}
12145 
12146 // This pattern is automatically generated from aarch64_ad.m4
12147 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12148 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12149                          iRegIorL2I src1, iRegIorL2I src2,
12150                          immI src3) %{
12151   match(Set dst (SubI src1 (URShiftI src2 src3)));
12152 
12153   ins_cost(1.9 * INSN_COST);
12154   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12155 
12156   ins_encode %{
12157     __ subw(as_Register($dst$$reg),
12158               as_Register($src1$$reg),
12159               as_Register($src2$$reg),
12160               Assembler::LSR,
12161               $src3$$constant & 0x1f);
12162   %}
12163 
12164   ins_pipe(ialu_reg_reg_shift);
12165 %}
12166 
12167 // This pattern is automatically generated from aarch64_ad.m4
12168 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12169 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12170                          iRegL src1, iRegL src2,
12171                          immI src3) %{
12172   match(Set dst (SubL src1 (URShiftL src2 src3)));
12173 
12174   ins_cost(1.9 * INSN_COST);
12175   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12176 
12177   ins_encode %{
12178     __ sub(as_Register($dst$$reg),
12179               as_Register($src1$$reg),
12180               as_Register($src2$$reg),
12181               Assembler::LSR,
12182               $src3$$constant & 0x3f);
12183   %}
12184 
12185   ins_pipe(ialu_reg_reg_shift);
12186 %}
12187 
12188 // This pattern is automatically generated from aarch64_ad.m4
12189 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12190 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12191                          iRegIorL2I src1, iRegIorL2I src2,
12192                          immI src3) %{
12193   match(Set dst (SubI src1 (RShiftI src2 src3)));
12194 
12195   ins_cost(1.9 * INSN_COST);
12196   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12197 
12198   ins_encode %{
12199     __ subw(as_Register($dst$$reg),
12200               as_Register($src1$$reg),
12201               as_Register($src2$$reg),
12202               Assembler::ASR,
12203               $src3$$constant & 0x1f);
12204   %}
12205 
12206   ins_pipe(ialu_reg_reg_shift);
12207 %}
12208 
12209 // This pattern is automatically generated from aarch64_ad.m4
12210 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12211 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12212                          iRegL src1, iRegL src2,
12213                          immI src3) %{
12214   match(Set dst (SubL src1 (RShiftL src2 src3)));
12215 
12216   ins_cost(1.9 * INSN_COST);
12217   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12218 
12219   ins_encode %{
12220     __ sub(as_Register($dst$$reg),
12221               as_Register($src1$$reg),
12222               as_Register($src2$$reg),
12223               Assembler::ASR,
12224               $src3$$constant & 0x3f);
12225   %}
12226 
12227   ins_pipe(ialu_reg_reg_shift);
12228 %}
12229 
12230 // This pattern is automatically generated from aarch64_ad.m4
12231 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12232 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12233                          iRegIorL2I src1, iRegIorL2I src2,
12234                          immI src3) %{
12235   match(Set dst (SubI src1 (LShiftI src2 src3)));
12236 
12237   ins_cost(1.9 * INSN_COST);
12238   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12239 
12240   ins_encode %{
12241     __ subw(as_Register($dst$$reg),
12242               as_Register($src1$$reg),
12243               as_Register($src2$$reg),
12244               Assembler::LSL,
12245               $src3$$constant & 0x1f);
12246   %}
12247 
12248   ins_pipe(ialu_reg_reg_shift);
12249 %}
12250 
12251 // This pattern is automatically generated from aarch64_ad.m4
12252 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12253 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12254                          iRegL src1, iRegL src2,
12255                          immI src3) %{
12256   match(Set dst (SubL src1 (LShiftL src2 src3)));
12257 
12258   ins_cost(1.9 * INSN_COST);
12259   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12260 
12261   ins_encode %{
12262     __ sub(as_Register($dst$$reg),
12263               as_Register($src1$$reg),
12264               as_Register($src2$$reg),
12265               Assembler::LSL,
12266               $src3$$constant & 0x3f);
12267   %}
12268 
12269   ins_pipe(ialu_reg_reg_shift);
12270 %}
12271 
12272 // This pattern is automatically generated from aarch64_ad.m4
12273 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12274 
12275 // Shift Left followed by Shift Right.
12276 // This idiom is used by the compiler for the i2b bytecode etc.
12277 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12278 %{
12279   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12280   ins_cost(INSN_COST * 2);
12281   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12282   ins_encode %{
12283     int lshift = $lshift_count$$constant & 63;
12284     int rshift = $rshift_count$$constant & 63;
12285     int s = 63 - lshift;
12286     int r = (rshift - lshift) & 63;
12287     __ sbfm(as_Register($dst$$reg),
12288             as_Register($src$$reg),
12289             r, s);
12290   %}
12291 
12292   ins_pipe(ialu_reg_shift);
12293 %}
12294 
12295 // This pattern is automatically generated from aarch64_ad.m4
12296 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12297 
12298 // Shift Left followed by Shift Right.
12299 // This idiom is used by the compiler for the i2b bytecode etc.
12300 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12301 %{
12302   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12303   ins_cost(INSN_COST * 2);
12304   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12305   ins_encode %{
12306     int lshift = $lshift_count$$constant & 31;
12307     int rshift = $rshift_count$$constant & 31;
12308     int s = 31 - lshift;
12309     int r = (rshift - lshift) & 31;
12310     __ sbfmw(as_Register($dst$$reg),
12311             as_Register($src$$reg),
12312             r, s);
12313   %}
12314 
12315   ins_pipe(ialu_reg_shift);
12316 %}
12317 
12318 // This pattern is automatically generated from aarch64_ad.m4
12319 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12320 
12321 // Shift Left followed by Shift Right.
12322 // This idiom is used by the compiler for the i2b bytecode etc.
12323 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12324 %{
12325   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12326   ins_cost(INSN_COST * 2);
12327   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12328   ins_encode %{
12329     int lshift = $lshift_count$$constant & 63;
12330     int rshift = $rshift_count$$constant & 63;
12331     int s = 63 - lshift;
12332     int r = (rshift - lshift) & 63;
12333     __ ubfm(as_Register($dst$$reg),
12334             as_Register($src$$reg),
12335             r, s);
12336   %}
12337 
12338   ins_pipe(ialu_reg_shift);
12339 %}
12340 
12341 // This pattern is automatically generated from aarch64_ad.m4
12342 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12343 
12344 // Shift Left followed by Shift Right.
12345 // This idiom is used by the compiler for the i2b bytecode etc.
12346 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12347 %{
12348   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12349   ins_cost(INSN_COST * 2);
12350   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12351   ins_encode %{
12352     int lshift = $lshift_count$$constant & 31;
12353     int rshift = $rshift_count$$constant & 31;
12354     int s = 31 - lshift;
12355     int r = (rshift - lshift) & 31;
12356     __ ubfmw(as_Register($dst$$reg),
12357             as_Register($src$$reg),
12358             r, s);
12359   %}
12360 
12361   ins_pipe(ialu_reg_shift);
12362 %}
12363 
12364 // Bitfield extract with shift & mask
12365 
12366 // This pattern is automatically generated from aarch64_ad.m4
12367 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12368 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12369 %{
12370   match(Set dst (AndI (URShiftI src rshift) mask));
12371   // Make sure we are not going to exceed what ubfxw can do.
12372   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12373 
12374   ins_cost(INSN_COST);
12375   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12376   ins_encode %{
12377     int rshift = $rshift$$constant & 31;
12378     intptr_t mask = $mask$$constant;
12379     int width = exact_log2(mask+1);
12380     __ ubfxw(as_Register($dst$$reg),
12381             as_Register($src$$reg), rshift, width);
12382   %}
12383   ins_pipe(ialu_reg_shift);
12384 %}
12385 
12386 // This pattern is automatically generated from aarch64_ad.m4
12387 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12388 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12389 %{
12390   match(Set dst (AndL (URShiftL src rshift) mask));
12391   // Make sure we are not going to exceed what ubfx can do.
12392   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12393 
12394   ins_cost(INSN_COST);
12395   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12396   ins_encode %{
12397     int rshift = $rshift$$constant & 63;
12398     intptr_t mask = $mask$$constant;
12399     int width = exact_log2_long(mask+1);
12400     __ ubfx(as_Register($dst$$reg),
12401             as_Register($src$$reg), rshift, width);
12402   %}
12403   ins_pipe(ialu_reg_shift);
12404 %}
12405 
12406 
12407 // This pattern is automatically generated from aarch64_ad.m4
12408 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12409 
12410 // We can use ubfx when extending an And with a mask when we know mask
12411 // is positive.  We know that because immI_bitmask guarantees it.
12412 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12413 %{
12414   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12415   // Make sure we are not going to exceed what ubfxw can do.
12416   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12417 
12418   ins_cost(INSN_COST * 2);
12419   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12420   ins_encode %{
12421     int rshift = $rshift$$constant & 31;
12422     intptr_t mask = $mask$$constant;
12423     int width = exact_log2(mask+1);
12424     __ ubfx(as_Register($dst$$reg),
12425             as_Register($src$$reg), rshift, width);
12426   %}
12427   ins_pipe(ialu_reg_shift);
12428 %}
12429 
12430 
12431 // This pattern is automatically generated from aarch64_ad.m4
12432 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12433 
12434 // We can use ubfiz when masking by a positive number and then left shifting the result.
12435 // We know that the mask is positive because immI_bitmask guarantees it.
12436 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12437 %{
12438   match(Set dst (LShiftI (AndI src mask) lshift));
12439   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12440 
12441   ins_cost(INSN_COST);
12442   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12443   ins_encode %{
12444     int lshift = $lshift$$constant & 31;
12445     intptr_t mask = $mask$$constant;
12446     int width = exact_log2(mask+1);
12447     __ ubfizw(as_Register($dst$$reg),
12448           as_Register($src$$reg), lshift, width);
12449   %}
12450   ins_pipe(ialu_reg_shift);
12451 %}
12452 
12453 // This pattern is automatically generated from aarch64_ad.m4
12454 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12455 
12456 // We can use ubfiz when masking by a positive number and then left shifting the result.
12457 // We know that the mask is positive because immL_bitmask guarantees it.
12458 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12459 %{
12460   match(Set dst (LShiftL (AndL src mask) lshift));
12461   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12462 
12463   ins_cost(INSN_COST);
12464   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12465   ins_encode %{
12466     int lshift = $lshift$$constant & 63;
12467     intptr_t mask = $mask$$constant;
12468     int width = exact_log2_long(mask+1);
12469     __ ubfiz(as_Register($dst$$reg),
12470           as_Register($src$$reg), lshift, width);
12471   %}
12472   ins_pipe(ialu_reg_shift);
12473 %}
12474 
12475 // This pattern is automatically generated from aarch64_ad.m4
12476 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12477 
12478 // We can use ubfiz when masking by a positive number and then left shifting the result.
12479 // We know that the mask is positive because immI_bitmask guarantees it.
12480 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12481 %{
12482   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12483   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12484 
12485   ins_cost(INSN_COST);
12486   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12487   ins_encode %{
12488     int lshift = $lshift$$constant & 31;
12489     intptr_t mask = $mask$$constant;
12490     int width = exact_log2(mask+1);
12491     __ ubfizw(as_Register($dst$$reg),
12492           as_Register($src$$reg), lshift, width);
12493   %}
12494   ins_pipe(ialu_reg_shift);
12495 %}
12496 
12497 // This pattern is automatically generated from aarch64_ad.m4
12498 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12499 
12500 // We can use ubfiz when masking by a positive number and then left shifting the result.
12501 // We know that the mask is positive because immL_bitmask guarantees it.
12502 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12503 %{
12504   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12505   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12506 
12507   ins_cost(INSN_COST);
12508   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12509   ins_encode %{
12510     int lshift = $lshift$$constant & 63;
12511     intptr_t mask = $mask$$constant;
12512     int width = exact_log2_long(mask+1);
12513     __ ubfiz(as_Register($dst$$reg),
12514           as_Register($src$$reg), lshift, width);
12515   %}
12516   ins_pipe(ialu_reg_shift);
12517 %}
12518 
12519 
12520 // This pattern is automatically generated from aarch64_ad.m4
12521 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12522 
12523 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12524 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12525 %{
12526   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12527   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12528 
12529   ins_cost(INSN_COST);
12530   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12531   ins_encode %{
12532     int lshift = $lshift$$constant & 63;
12533     intptr_t mask = $mask$$constant;
12534     int width = exact_log2(mask+1);
12535     __ ubfiz(as_Register($dst$$reg),
12536              as_Register($src$$reg), lshift, width);
12537   %}
12538   ins_pipe(ialu_reg_shift);
12539 %}
12540 
12541 // This pattern is automatically generated from aarch64_ad.m4
12542 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12543 
12544 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12545 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12546 %{
12547   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12548   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12549 
12550   ins_cost(INSN_COST);
12551   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12552   ins_encode %{
12553     int lshift = $lshift$$constant & 31;
12554     intptr_t mask = $mask$$constant;
12555     int width = exact_log2(mask+1);
12556     __ ubfiz(as_Register($dst$$reg),
12557              as_Register($src$$reg), lshift, width);
12558   %}
12559   ins_pipe(ialu_reg_shift);
12560 %}
12561 
12562 // This pattern is automatically generated from aarch64_ad.m4
12563 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12564 
12565 // Can skip int2long conversions after AND with small bitmask
12566 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12567 %{
12568   match(Set dst (ConvI2L (AndI src msk)));
12569   ins_cost(INSN_COST);
12570   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12571   ins_encode %{
12572     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12573   %}
12574   ins_pipe(ialu_reg_shift);
12575 %}
12576 
12577 
12578 // Rotations
12579 
12580 // This pattern is automatically generated from aarch64_ad.m4
12581 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12582 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12583 %{
12584   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12585   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12586 
12587   ins_cost(INSN_COST);
12588   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12589 
12590   ins_encode %{
12591     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12592             $rshift$$constant & 63);
12593   %}
12594   ins_pipe(ialu_reg_reg_extr);
12595 %}
12596 
12597 
12598 // This pattern is automatically generated from aarch64_ad.m4
12599 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12600 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12601 %{
12602   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12603   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12604 
12605   ins_cost(INSN_COST);
12606   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12607 
12608   ins_encode %{
12609     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12610             $rshift$$constant & 31);
12611   %}
12612   ins_pipe(ialu_reg_reg_extr);
12613 %}
12614 
12615 
12616 // This pattern is automatically generated from aarch64_ad.m4
12617 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12618 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12619 %{
12620   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12621   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12622 
12623   ins_cost(INSN_COST);
12624   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12625 
12626   ins_encode %{
12627     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12628             $rshift$$constant & 63);
12629   %}
12630   ins_pipe(ialu_reg_reg_extr);
12631 %}
12632 
12633 
12634 // This pattern is automatically generated from aarch64_ad.m4
12635 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12636 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12637 %{
12638   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12639   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12640 
12641   ins_cost(INSN_COST);
12642   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12643 
12644   ins_encode %{
12645     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12646             $rshift$$constant & 31);
12647   %}
12648   ins_pipe(ialu_reg_reg_extr);
12649 %}
12650 
12651 // This pattern is automatically generated from aarch64_ad.m4
12652 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12653 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
12654 %{
12655   match(Set dst (RotateRight src shift));
12656 
12657   ins_cost(INSN_COST);
12658   format %{ "ror    $dst, $src, $shift" %}
12659 
12660   ins_encode %{
12661      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12662                $shift$$constant & 0x1f);
12663   %}
12664   ins_pipe(ialu_reg_reg_vshift);
12665 %}
12666 
12667 // This pattern is automatically generated from aarch64_ad.m4
12668 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12669 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
12670 %{
12671   match(Set dst (RotateRight src shift));
12672 
12673   ins_cost(INSN_COST);
12674   format %{ "ror    $dst, $src, $shift" %}
12675 
12676   ins_encode %{
12677      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12678                $shift$$constant & 0x3f);
12679   %}
12680   ins_pipe(ialu_reg_reg_vshift);
12681 %}
12682 
12683 // This pattern is automatically generated from aarch64_ad.m4
12684 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12685 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12686 %{
12687   match(Set dst (RotateRight src shift));
12688 
12689   ins_cost(INSN_COST);
12690   format %{ "ror    $dst, $src, $shift" %}
12691 
12692   ins_encode %{
12693      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12694   %}
12695   ins_pipe(ialu_reg_reg_vshift);
12696 %}
12697 
12698 // This pattern is automatically generated from aarch64_ad.m4
12699 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12700 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12701 %{
12702   match(Set dst (RotateRight src shift));
12703 
12704   ins_cost(INSN_COST);
12705   format %{ "ror    $dst, $src, $shift" %}
12706 
12707   ins_encode %{
12708      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12709   %}
12710   ins_pipe(ialu_reg_reg_vshift);
12711 %}
12712 
12713 // This pattern is automatically generated from aarch64_ad.m4
12714 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12715 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12716 %{
12717   match(Set dst (RotateLeft src shift));
12718 
12719   ins_cost(INSN_COST);
12720   format %{ "rol    $dst, $src, $shift" %}
12721 
12722   ins_encode %{
12723      __ subw(rscratch1, zr, as_Register($shift$$reg));
12724      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12725   %}
12726   ins_pipe(ialu_reg_reg_vshift);
12727 %}
12728 
12729 // This pattern is automatically generated from aarch64_ad.m4
12730 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12731 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12732 %{
12733   match(Set dst (RotateLeft src shift));
12734 
12735   ins_cost(INSN_COST);
12736   format %{ "rol    $dst, $src, $shift" %}
12737 
12738   ins_encode %{
12739      __ subw(rscratch1, zr, as_Register($shift$$reg));
12740      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12741   %}
12742   ins_pipe(ialu_reg_reg_vshift);
12743 %}
12744 
12745 
12746 // Add/subtract (extended)
12747 
12748 // This pattern is automatically generated from aarch64_ad.m4
12749 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12750 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12751 %{
12752   match(Set dst (AddL src1 (ConvI2L src2)));
12753   ins_cost(INSN_COST);
12754   format %{ "add  $dst, $src1, $src2, sxtw" %}
12755 
12756    ins_encode %{
12757      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12758             as_Register($src2$$reg), ext::sxtw);
12759    %}
12760   ins_pipe(ialu_reg_reg);
12761 %}
12762 
12763 // This pattern is automatically generated from aarch64_ad.m4
12764 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12765 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12766 %{
12767   match(Set dst (SubL src1 (ConvI2L src2)));
12768   ins_cost(INSN_COST);
12769   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12770 
12771    ins_encode %{
12772      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12773             as_Register($src2$$reg), ext::sxtw);
12774    %}
12775   ins_pipe(ialu_reg_reg);
12776 %}
12777 
12778 // This pattern is automatically generated from aarch64_ad.m4
12779 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12780 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12781 %{
12782   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12783   ins_cost(INSN_COST);
12784   format %{ "add  $dst, $src1, $src2, sxth" %}
12785 
12786    ins_encode %{
12787      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12788             as_Register($src2$$reg), ext::sxth);
12789    %}
12790   ins_pipe(ialu_reg_reg);
12791 %}
12792 
12793 // This pattern is automatically generated from aarch64_ad.m4
12794 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12795 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12796 %{
12797   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12798   ins_cost(INSN_COST);
12799   format %{ "add  $dst, $src1, $src2, sxtb" %}
12800 
12801    ins_encode %{
12802      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12803             as_Register($src2$$reg), ext::sxtb);
12804    %}
12805   ins_pipe(ialu_reg_reg);
12806 %}
12807 
12808 // This pattern is automatically generated from aarch64_ad.m4
12809 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12810 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12811 %{
12812   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12813   ins_cost(INSN_COST);
12814   format %{ "add  $dst, $src1, $src2, uxtb" %}
12815 
12816    ins_encode %{
12817      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12818             as_Register($src2$$reg), ext::uxtb);
12819    %}
12820   ins_pipe(ialu_reg_reg);
12821 %}
12822 
12823 // This pattern is automatically generated from aarch64_ad.m4
12824 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12825 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12826 %{
12827   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12828   ins_cost(INSN_COST);
12829   format %{ "add  $dst, $src1, $src2, sxth" %}
12830 
12831    ins_encode %{
12832      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12833             as_Register($src2$$reg), ext::sxth);
12834    %}
12835   ins_pipe(ialu_reg_reg);
12836 %}
12837 
12838 // This pattern is automatically generated from aarch64_ad.m4
12839 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12840 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12841 %{
12842   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12843   ins_cost(INSN_COST);
12844   format %{ "add  $dst, $src1, $src2, sxtw" %}
12845 
12846    ins_encode %{
12847      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12848             as_Register($src2$$reg), ext::sxtw);
12849    %}
12850   ins_pipe(ialu_reg_reg);
12851 %}
12852 
12853 // This pattern is automatically generated from aarch64_ad.m4
12854 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12855 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12856 %{
12857   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12858   ins_cost(INSN_COST);
12859   format %{ "add  $dst, $src1, $src2, sxtb" %}
12860 
12861    ins_encode %{
12862      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12863             as_Register($src2$$reg), ext::sxtb);
12864    %}
12865   ins_pipe(ialu_reg_reg);
12866 %}
12867 
12868 // This pattern is automatically generated from aarch64_ad.m4
12869 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12870 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12871 %{
12872   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12873   ins_cost(INSN_COST);
12874   format %{ "add  $dst, $src1, $src2, uxtb" %}
12875 
12876    ins_encode %{
12877      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12878             as_Register($src2$$reg), ext::uxtb);
12879    %}
12880   ins_pipe(ialu_reg_reg);
12881 %}
12882 
12883 // This pattern is automatically generated from aarch64_ad.m4
12884 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12885 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12886 %{
12887   match(Set dst (AddI src1 (AndI src2 mask)));
12888   ins_cost(INSN_COST);
12889   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12890 
12891    ins_encode %{
12892      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12893             as_Register($src2$$reg), ext::uxtb);
12894    %}
12895   ins_pipe(ialu_reg_reg);
12896 %}
12897 
12898 // This pattern is automatically generated from aarch64_ad.m4
12899 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12900 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12901 %{
12902   match(Set dst (AddI src1 (AndI src2 mask)));
12903   ins_cost(INSN_COST);
12904   format %{ "addw  $dst, $src1, $src2, uxth" %}
12905 
12906    ins_encode %{
12907      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12908             as_Register($src2$$reg), ext::uxth);
12909    %}
12910   ins_pipe(ialu_reg_reg);
12911 %}
12912 
12913 // This pattern is automatically generated from aarch64_ad.m4
12914 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12915 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12916 %{
12917   match(Set dst (AddL src1 (AndL src2 mask)));
12918   ins_cost(INSN_COST);
12919   format %{ "add  $dst, $src1, $src2, uxtb" %}
12920 
12921    ins_encode %{
12922      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12923             as_Register($src2$$reg), ext::uxtb);
12924    %}
12925   ins_pipe(ialu_reg_reg);
12926 %}
12927 
12928 // This pattern is automatically generated from aarch64_ad.m4
12929 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12930 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12931 %{
12932   match(Set dst (AddL src1 (AndL src2 mask)));
12933   ins_cost(INSN_COST);
12934   format %{ "add  $dst, $src1, $src2, uxth" %}
12935 
12936    ins_encode %{
12937      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12938             as_Register($src2$$reg), ext::uxth);
12939    %}
12940   ins_pipe(ialu_reg_reg);
12941 %}
12942 
12943 // This pattern is automatically generated from aarch64_ad.m4
12944 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12945 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12946 %{
12947   match(Set dst (AddL src1 (AndL src2 mask)));
12948   ins_cost(INSN_COST);
12949   format %{ "add  $dst, $src1, $src2, uxtw" %}
12950 
12951    ins_encode %{
12952      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12953             as_Register($src2$$reg), ext::uxtw);
12954    %}
12955   ins_pipe(ialu_reg_reg);
12956 %}
12957 
12958 // This pattern is automatically generated from aarch64_ad.m4
12959 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12960 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12961 %{
12962   match(Set dst (SubI src1 (AndI src2 mask)));
12963   ins_cost(INSN_COST);
12964   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12965 
12966    ins_encode %{
12967      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12968             as_Register($src2$$reg), ext::uxtb);
12969    %}
12970   ins_pipe(ialu_reg_reg);
12971 %}
12972 
12973 // This pattern is automatically generated from aarch64_ad.m4
12974 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12975 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12976 %{
12977   match(Set dst (SubI src1 (AndI src2 mask)));
12978   ins_cost(INSN_COST);
12979   format %{ "subw  $dst, $src1, $src2, uxth" %}
12980 
12981    ins_encode %{
12982      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12983             as_Register($src2$$reg), ext::uxth);
12984    %}
12985   ins_pipe(ialu_reg_reg);
12986 %}
12987 
12988 // This pattern is automatically generated from aarch64_ad.m4
12989 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12990 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12991 %{
12992   match(Set dst (SubL src1 (AndL src2 mask)));
12993   ins_cost(INSN_COST);
12994   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12995 
12996    ins_encode %{
12997      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12998             as_Register($src2$$reg), ext::uxtb);
12999    %}
13000   ins_pipe(ialu_reg_reg);
13001 %}
13002 
13003 // This pattern is automatically generated from aarch64_ad.m4
13004 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13005 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13006 %{
13007   match(Set dst (SubL src1 (AndL src2 mask)));
13008   ins_cost(INSN_COST);
13009   format %{ "sub  $dst, $src1, $src2, uxth" %}
13010 
13011    ins_encode %{
13012      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13013             as_Register($src2$$reg), ext::uxth);
13014    %}
13015   ins_pipe(ialu_reg_reg);
13016 %}
13017 
13018 // This pattern is automatically generated from aarch64_ad.m4
13019 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13020 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13021 %{
13022   match(Set dst (SubL src1 (AndL src2 mask)));
13023   ins_cost(INSN_COST);
13024   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13025 
13026    ins_encode %{
13027      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13028             as_Register($src2$$reg), ext::uxtw);
13029    %}
13030   ins_pipe(ialu_reg_reg);
13031 %}
13032 
13033 
13034 // This pattern is automatically generated from aarch64_ad.m4
13035 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13036 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13037 %{
13038   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13039   ins_cost(1.9 * INSN_COST);
13040   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13041 
13042    ins_encode %{
13043      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13044             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13045    %}
13046   ins_pipe(ialu_reg_reg_shift);
13047 %}
13048 
13049 // This pattern is automatically generated from aarch64_ad.m4
13050 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13051 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13052 %{
13053   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13054   ins_cost(1.9 * INSN_COST);
13055   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13056 
13057    ins_encode %{
13058      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13059             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13060    %}
13061   ins_pipe(ialu_reg_reg_shift);
13062 %}
13063 
13064 // This pattern is automatically generated from aarch64_ad.m4
13065 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13066 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13067 %{
13068   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13069   ins_cost(1.9 * INSN_COST);
13070   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13071 
13072    ins_encode %{
13073      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13074             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13075    %}
13076   ins_pipe(ialu_reg_reg_shift);
13077 %}
13078 
13079 // This pattern is automatically generated from aarch64_ad.m4
13080 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13081 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13082 %{
13083   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13084   ins_cost(1.9 * INSN_COST);
13085   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13086 
13087    ins_encode %{
13088      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13089             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13090    %}
13091   ins_pipe(ialu_reg_reg_shift);
13092 %}
13093 
13094 // This pattern is automatically generated from aarch64_ad.m4
13095 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13096 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13097 %{
13098   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13099   ins_cost(1.9 * INSN_COST);
13100   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13101 
13102    ins_encode %{
13103      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13104             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13105    %}
13106   ins_pipe(ialu_reg_reg_shift);
13107 %}
13108 
13109 // This pattern is automatically generated from aarch64_ad.m4
13110 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13111 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13112 %{
13113   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13114   ins_cost(1.9 * INSN_COST);
13115   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13116 
13117    ins_encode %{
13118      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13119             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13120    %}
13121   ins_pipe(ialu_reg_reg_shift);
13122 %}
13123 
13124 // This pattern is automatically generated from aarch64_ad.m4
13125 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13126 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13127 %{
13128   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13129   ins_cost(1.9 * INSN_COST);
13130   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13131 
13132    ins_encode %{
13133      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13134             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13135    %}
13136   ins_pipe(ialu_reg_reg_shift);
13137 %}
13138 
13139 // This pattern is automatically generated from aarch64_ad.m4
13140 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13141 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13142 %{
13143   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13144   ins_cost(1.9 * INSN_COST);
13145   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13146 
13147    ins_encode %{
13148      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13149             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13150    %}
13151   ins_pipe(ialu_reg_reg_shift);
13152 %}
13153 
13154 // This pattern is automatically generated from aarch64_ad.m4
13155 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13156 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13157 %{
13158   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13159   ins_cost(1.9 * INSN_COST);
13160   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13161 
13162    ins_encode %{
13163      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13164             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13165    %}
13166   ins_pipe(ialu_reg_reg_shift);
13167 %}
13168 
13169 // This pattern is automatically generated from aarch64_ad.m4
13170 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13171 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13172 %{
13173   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13174   ins_cost(1.9 * INSN_COST);
13175   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13176 
13177    ins_encode %{
13178      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13179             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13180    %}
13181   ins_pipe(ialu_reg_reg_shift);
13182 %}
13183 
13184 // This pattern is automatically generated from aarch64_ad.m4
13185 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13186 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13187 %{
13188   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13189   ins_cost(1.9 * INSN_COST);
13190   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13191 
13192    ins_encode %{
13193      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13194             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13195    %}
13196   ins_pipe(ialu_reg_reg_shift);
13197 %}
13198 
13199 // This pattern is automatically generated from aarch64_ad.m4
13200 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13201 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13202 %{
13203   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13204   ins_cost(1.9 * INSN_COST);
13205   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13206 
13207    ins_encode %{
13208      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13209             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13210    %}
13211   ins_pipe(ialu_reg_reg_shift);
13212 %}
13213 
13214 // This pattern is automatically generated from aarch64_ad.m4
13215 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13216 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13217 %{
13218   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13219   ins_cost(1.9 * INSN_COST);
13220   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13221 
13222    ins_encode %{
13223      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13224             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13225    %}
13226   ins_pipe(ialu_reg_reg_shift);
13227 %}
13228 
13229 // This pattern is automatically generated from aarch64_ad.m4
13230 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13231 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13232 %{
13233   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13234   ins_cost(1.9 * INSN_COST);
13235   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13236 
13237    ins_encode %{
13238      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13239             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13240    %}
13241   ins_pipe(ialu_reg_reg_shift);
13242 %}
13243 
13244 // This pattern is automatically generated from aarch64_ad.m4
13245 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13246 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13247 %{
13248   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13249   ins_cost(1.9 * INSN_COST);
13250   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13251 
13252    ins_encode %{
13253      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13254             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13255    %}
13256   ins_pipe(ialu_reg_reg_shift);
13257 %}
13258 
13259 // This pattern is automatically generated from aarch64_ad.m4
13260 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13261 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13262 %{
13263   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13264   ins_cost(1.9 * INSN_COST);
13265   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13266 
13267    ins_encode %{
13268      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13269             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13270    %}
13271   ins_pipe(ialu_reg_reg_shift);
13272 %}
13273 
13274 // This pattern is automatically generated from aarch64_ad.m4
13275 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13276 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13277 %{
13278   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13279   ins_cost(1.9 * INSN_COST);
13280   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13281 
13282    ins_encode %{
13283      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13284             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13285    %}
13286   ins_pipe(ialu_reg_reg_shift);
13287 %}
13288 
13289 // This pattern is automatically generated from aarch64_ad.m4
13290 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13291 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13292 %{
13293   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13294   ins_cost(1.9 * INSN_COST);
13295   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13296 
13297    ins_encode %{
13298      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13299             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13300    %}
13301   ins_pipe(ialu_reg_reg_shift);
13302 %}
13303 
13304 // This pattern is automatically generated from aarch64_ad.m4
13305 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13306 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13307 %{
13308   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13309   ins_cost(1.9 * INSN_COST);
13310   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13311 
13312    ins_encode %{
13313      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13314             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13315    %}
13316   ins_pipe(ialu_reg_reg_shift);
13317 %}
13318 
13319 // This pattern is automatically generated from aarch64_ad.m4
13320 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13321 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13322 %{
13323   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13324   ins_cost(1.9 * INSN_COST);
13325   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13326 
13327    ins_encode %{
13328      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13329             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13330    %}
13331   ins_pipe(ialu_reg_reg_shift);
13332 %}
13333 
13334 // This pattern is automatically generated from aarch64_ad.m4
13335 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13336 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13337 %{
13338   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13339   ins_cost(1.9 * INSN_COST);
13340   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13341 
13342    ins_encode %{
13343      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13344             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13345    %}
13346   ins_pipe(ialu_reg_reg_shift);
13347 %}
13348 
13349 // This pattern is automatically generated from aarch64_ad.m4
13350 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13351 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13352 %{
13353   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13354   ins_cost(1.9 * INSN_COST);
13355   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13356 
13357    ins_encode %{
13358      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13359             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13360    %}
13361   ins_pipe(ialu_reg_reg_shift);
13362 %}
13363 
13364 // This pattern is automatically generated from aarch64_ad.m4
13365 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13366 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13367 %{
13368   effect(DEF dst, USE src1, USE src2, USE cr);
13369   ins_cost(INSN_COST * 2);
13370   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13371 
13372   ins_encode %{
13373     __ cselw($dst$$Register,
13374              $src1$$Register,
13375              $src2$$Register,
13376              Assembler::LT);
13377   %}
13378   ins_pipe(icond_reg_reg);
13379 %}
13380 
13381 // This pattern is automatically generated from aarch64_ad.m4
13382 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13383 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13384 %{
13385   effect(DEF dst, USE src1, USE src2, USE cr);
13386   ins_cost(INSN_COST * 2);
13387   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13388 
13389   ins_encode %{
13390     __ cselw($dst$$Register,
13391              $src1$$Register,
13392              $src2$$Register,
13393              Assembler::GT);
13394   %}
13395   ins_pipe(icond_reg_reg);
13396 %}
13397 
13398 // This pattern is automatically generated from aarch64_ad.m4
13399 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13400 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13401 %{
13402   effect(DEF dst, USE src1, USE cr);
13403   ins_cost(INSN_COST * 2);
13404   format %{ "cselw $dst, $src1, zr lt\t"  %}
13405 
13406   ins_encode %{
13407     __ cselw($dst$$Register,
13408              $src1$$Register,
13409              zr,
13410              Assembler::LT);
13411   %}
13412   ins_pipe(icond_reg);
13413 %}
13414 
13415 // This pattern is automatically generated from aarch64_ad.m4
13416 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13417 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13418 %{
13419   effect(DEF dst, USE src1, USE cr);
13420   ins_cost(INSN_COST * 2);
13421   format %{ "cselw $dst, $src1, zr gt\t"  %}
13422 
13423   ins_encode %{
13424     __ cselw($dst$$Register,
13425              $src1$$Register,
13426              zr,
13427              Assembler::GT);
13428   %}
13429   ins_pipe(icond_reg);
13430 %}
13431 
13432 // This pattern is automatically generated from aarch64_ad.m4
13433 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13434 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13435 %{
13436   effect(DEF dst, USE src1, USE cr);
13437   ins_cost(INSN_COST * 2);
13438   format %{ "csincw $dst, $src1, zr le\t"  %}
13439 
13440   ins_encode %{
13441     __ csincw($dst$$Register,
13442              $src1$$Register,
13443              zr,
13444              Assembler::LE);
13445   %}
13446   ins_pipe(icond_reg);
13447 %}
13448 
13449 // This pattern is automatically generated from aarch64_ad.m4
13450 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13451 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13452 %{
13453   effect(DEF dst, USE src1, USE cr);
13454   ins_cost(INSN_COST * 2);
13455   format %{ "csincw $dst, $src1, zr gt\t"  %}
13456 
13457   ins_encode %{
13458     __ csincw($dst$$Register,
13459              $src1$$Register,
13460              zr,
13461              Assembler::GT);
13462   %}
13463   ins_pipe(icond_reg);
13464 %}
13465 
13466 // This pattern is automatically generated from aarch64_ad.m4
13467 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13468 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13469 %{
13470   effect(DEF dst, USE src1, USE cr);
13471   ins_cost(INSN_COST * 2);
13472   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13473 
13474   ins_encode %{
13475     __ csinvw($dst$$Register,
13476              $src1$$Register,
13477              zr,
13478              Assembler::LT);
13479   %}
13480   ins_pipe(icond_reg);
13481 %}
13482 
13483 // This pattern is automatically generated from aarch64_ad.m4
13484 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13485 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13486 %{
13487   effect(DEF dst, USE src1, USE cr);
13488   ins_cost(INSN_COST * 2);
13489   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13490 
13491   ins_encode %{
13492     __ csinvw($dst$$Register,
13493              $src1$$Register,
13494              zr,
13495              Assembler::GE);
13496   %}
13497   ins_pipe(icond_reg);
13498 %}
13499 
13500 // This pattern is automatically generated from aarch64_ad.m4
13501 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13502 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13503 %{
13504   match(Set dst (MinI src imm));
13505   ins_cost(INSN_COST * 3);
13506   expand %{
13507     rFlagsReg cr;
13508     compI_reg_imm0(cr, src);
13509     cmovI_reg_imm0_lt(dst, src, cr);
13510   %}
13511 %}
13512 
13513 // This pattern is automatically generated from aarch64_ad.m4
13514 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13515 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13516 %{
13517   match(Set dst (MinI imm src));
13518   ins_cost(INSN_COST * 3);
13519   expand %{
13520     rFlagsReg cr;
13521     compI_reg_imm0(cr, src);
13522     cmovI_reg_imm0_lt(dst, src, cr);
13523   %}
13524 %}
13525 
13526 // This pattern is automatically generated from aarch64_ad.m4
13527 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13528 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13529 %{
13530   match(Set dst (MinI src imm));
13531   ins_cost(INSN_COST * 3);
13532   expand %{
13533     rFlagsReg cr;
13534     compI_reg_imm0(cr, src);
13535     cmovI_reg_imm1_le(dst, src, cr);
13536   %}
13537 %}
13538 
13539 // This pattern is automatically generated from aarch64_ad.m4
13540 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13541 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13542 %{
13543   match(Set dst (MinI imm src));
13544   ins_cost(INSN_COST * 3);
13545   expand %{
13546     rFlagsReg cr;
13547     compI_reg_imm0(cr, src);
13548     cmovI_reg_imm1_le(dst, src, cr);
13549   %}
13550 %}
13551 
13552 // This pattern is automatically generated from aarch64_ad.m4
13553 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13554 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13555 %{
13556   match(Set dst (MinI src imm));
13557   ins_cost(INSN_COST * 3);
13558   expand %{
13559     rFlagsReg cr;
13560     compI_reg_imm0(cr, src);
13561     cmovI_reg_immM1_lt(dst, src, cr);
13562   %}
13563 %}
13564 
13565 // This pattern is automatically generated from aarch64_ad.m4
13566 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13567 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13568 %{
13569   match(Set dst (MinI imm src));
13570   ins_cost(INSN_COST * 3);
13571   expand %{
13572     rFlagsReg cr;
13573     compI_reg_imm0(cr, src);
13574     cmovI_reg_immM1_lt(dst, src, cr);
13575   %}
13576 %}
13577 
13578 // This pattern is automatically generated from aarch64_ad.m4
13579 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13580 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13581 %{
13582   match(Set dst (MaxI src imm));
13583   ins_cost(INSN_COST * 3);
13584   expand %{
13585     rFlagsReg cr;
13586     compI_reg_imm0(cr, src);
13587     cmovI_reg_imm0_gt(dst, src, cr);
13588   %}
13589 %}
13590 
13591 // This pattern is automatically generated from aarch64_ad.m4
13592 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13593 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13594 %{
13595   match(Set dst (MaxI imm src));
13596   ins_cost(INSN_COST * 3);
13597   expand %{
13598     rFlagsReg cr;
13599     compI_reg_imm0(cr, src);
13600     cmovI_reg_imm0_gt(dst, src, cr);
13601   %}
13602 %}
13603 
13604 // This pattern is automatically generated from aarch64_ad.m4
13605 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13606 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13607 %{
13608   match(Set dst (MaxI src imm));
13609   ins_cost(INSN_COST * 3);
13610   expand %{
13611     rFlagsReg cr;
13612     compI_reg_imm0(cr, src);
13613     cmovI_reg_imm1_gt(dst, src, cr);
13614   %}
13615 %}
13616 
13617 // This pattern is automatically generated from aarch64_ad.m4
13618 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13619 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13620 %{
13621   match(Set dst (MaxI imm src));
13622   ins_cost(INSN_COST * 3);
13623   expand %{
13624     rFlagsReg cr;
13625     compI_reg_imm0(cr, src);
13626     cmovI_reg_imm1_gt(dst, src, cr);
13627   %}
13628 %}
13629 
13630 // This pattern is automatically generated from aarch64_ad.m4
13631 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13632 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13633 %{
13634   match(Set dst (MaxI src imm));
13635   ins_cost(INSN_COST * 3);
13636   expand %{
13637     rFlagsReg cr;
13638     compI_reg_imm0(cr, src);
13639     cmovI_reg_immM1_ge(dst, src, cr);
13640   %}
13641 %}
13642 
13643 // This pattern is automatically generated from aarch64_ad.m4
13644 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13645 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13646 %{
13647   match(Set dst (MaxI imm src));
13648   ins_cost(INSN_COST * 3);
13649   expand %{
13650     rFlagsReg cr;
13651     compI_reg_imm0(cr, src);
13652     cmovI_reg_immM1_ge(dst, src, cr);
13653   %}
13654 %}
13655 
13656 // This pattern is automatically generated from aarch64_ad.m4
13657 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13658 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
13659 %{
13660   match(Set dst (ReverseI src));
13661   ins_cost(INSN_COST);
13662   format %{ "rbitw  $dst, $src" %}
13663   ins_encode %{
13664     __ rbitw($dst$$Register, $src$$Register);
13665   %}
13666   ins_pipe(ialu_reg);
13667 %}
13668 
13669 // This pattern is automatically generated from aarch64_ad.m4
13670 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13671 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
13672 %{
13673   match(Set dst (ReverseL src));
13674   ins_cost(INSN_COST);
13675   format %{ "rbit  $dst, $src" %}
13676   ins_encode %{
13677     __ rbit($dst$$Register, $src$$Register);
13678   %}
13679   ins_pipe(ialu_reg);
13680 %}
13681 
13682 
13683 // END This section of the file is automatically generated. Do not edit --------------
13684 
13685 
13686 // ============================================================================
13687 // Floating Point Arithmetic Instructions
13688 
13689 instruct addHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13690   match(Set dst (AddHF src1 src2));
13691   format %{ "faddh $dst, $src1, $src2" %}
13692   ins_encode %{
13693     __ faddh($dst$$FloatRegister,
13694              $src1$$FloatRegister,
13695              $src2$$FloatRegister);
13696   %}
13697   ins_pipe(fp_dop_reg_reg_s);
13698 %}
13699 
13700 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13701   match(Set dst (AddF src1 src2));
13702 
13703   ins_cost(INSN_COST * 5);
13704   format %{ "fadds   $dst, $src1, $src2" %}
13705 
13706   ins_encode %{
13707     __ fadds(as_FloatRegister($dst$$reg),
13708              as_FloatRegister($src1$$reg),
13709              as_FloatRegister($src2$$reg));
13710   %}
13711 
13712   ins_pipe(fp_dop_reg_reg_s);
13713 %}
13714 
13715 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13716   match(Set dst (AddD src1 src2));
13717 
13718   ins_cost(INSN_COST * 5);
13719   format %{ "faddd   $dst, $src1, $src2" %}
13720 
13721   ins_encode %{
13722     __ faddd(as_FloatRegister($dst$$reg),
13723              as_FloatRegister($src1$$reg),
13724              as_FloatRegister($src2$$reg));
13725   %}
13726 
13727   ins_pipe(fp_dop_reg_reg_d);
13728 %}
13729 
13730 instruct subHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13731   match(Set dst (SubHF src1 src2));
13732   format %{ "fsubh $dst, $src1, $src2" %}
13733   ins_encode %{
13734     __ fsubh($dst$$FloatRegister,
13735              $src1$$FloatRegister,
13736              $src2$$FloatRegister);
13737   %}
13738   ins_pipe(fp_dop_reg_reg_s);
13739 %}
13740 
13741 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13742   match(Set dst (SubF src1 src2));
13743 
13744   ins_cost(INSN_COST * 5);
13745   format %{ "fsubs   $dst, $src1, $src2" %}
13746 
13747   ins_encode %{
13748     __ fsubs(as_FloatRegister($dst$$reg),
13749              as_FloatRegister($src1$$reg),
13750              as_FloatRegister($src2$$reg));
13751   %}
13752 
13753   ins_pipe(fp_dop_reg_reg_s);
13754 %}
13755 
13756 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13757   match(Set dst (SubD src1 src2));
13758 
13759   ins_cost(INSN_COST * 5);
13760   format %{ "fsubd   $dst, $src1, $src2" %}
13761 
13762   ins_encode %{
13763     __ fsubd(as_FloatRegister($dst$$reg),
13764              as_FloatRegister($src1$$reg),
13765              as_FloatRegister($src2$$reg));
13766   %}
13767 
13768   ins_pipe(fp_dop_reg_reg_d);
13769 %}
13770 
13771 instruct mulHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13772   match(Set dst (MulHF src1 src2));
13773   format %{ "fmulh $dst, $src1, $src2" %}
13774   ins_encode %{
13775     __ fmulh($dst$$FloatRegister,
13776              $src1$$FloatRegister,
13777              $src2$$FloatRegister);
13778   %}
13779   ins_pipe(fp_dop_reg_reg_s);
13780 %}
13781 
13782 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13783   match(Set dst (MulF src1 src2));
13784 
13785   ins_cost(INSN_COST * 6);
13786   format %{ "fmuls   $dst, $src1, $src2" %}
13787 
13788   ins_encode %{
13789     __ fmuls(as_FloatRegister($dst$$reg),
13790              as_FloatRegister($src1$$reg),
13791              as_FloatRegister($src2$$reg));
13792   %}
13793 
13794   ins_pipe(fp_dop_reg_reg_s);
13795 %}
13796 
13797 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13798   match(Set dst (MulD src1 src2));
13799 
13800   ins_cost(INSN_COST * 6);
13801   format %{ "fmuld   $dst, $src1, $src2" %}
13802 
13803   ins_encode %{
13804     __ fmuld(as_FloatRegister($dst$$reg),
13805              as_FloatRegister($src1$$reg),
13806              as_FloatRegister($src2$$reg));
13807   %}
13808 
13809   ins_pipe(fp_dop_reg_reg_d);
13810 %}
13811 
13812 // src1 * src2 + src3 (half-precision float)
13813 instruct maddHF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13814   match(Set dst (FmaHF src3 (Binary src1 src2)));
13815   format %{ "fmaddh $dst, $src1, $src2, $src3" %}
13816   ins_encode %{
13817     assert(UseFMA, "Needs FMA instructions support.");
13818     __ fmaddh($dst$$FloatRegister,
13819               $src1$$FloatRegister,
13820               $src2$$FloatRegister,
13821               $src3$$FloatRegister);
13822   %}
13823   ins_pipe(pipe_class_default);
13824 %}
13825 
13826 // src1 * src2 + src3
13827 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13828   match(Set dst (FmaF src3 (Binary src1 src2)));
13829 
13830   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13831 
13832   ins_encode %{
13833     assert(UseFMA, "Needs FMA instructions support.");
13834     __ fmadds(as_FloatRegister($dst$$reg),
13835              as_FloatRegister($src1$$reg),
13836              as_FloatRegister($src2$$reg),
13837              as_FloatRegister($src3$$reg));
13838   %}
13839 
13840   ins_pipe(pipe_class_default);
13841 %}
13842 
13843 // src1 * src2 + src3
13844 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13845   match(Set dst (FmaD src3 (Binary src1 src2)));
13846 
13847   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13848 
13849   ins_encode %{
13850     assert(UseFMA, "Needs FMA instructions support.");
13851     __ fmaddd(as_FloatRegister($dst$$reg),
13852              as_FloatRegister($src1$$reg),
13853              as_FloatRegister($src2$$reg),
13854              as_FloatRegister($src3$$reg));
13855   %}
13856 
13857   ins_pipe(pipe_class_default);
13858 %}
13859 
13860 // src1 * (-src2) + src3
13861 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13862 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13863   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13864 
13865   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13866 
13867   ins_encode %{
13868     assert(UseFMA, "Needs FMA instructions support.");
13869     __ fmsubs(as_FloatRegister($dst$$reg),
13870               as_FloatRegister($src1$$reg),
13871               as_FloatRegister($src2$$reg),
13872               as_FloatRegister($src3$$reg));
13873   %}
13874 
13875   ins_pipe(pipe_class_default);
13876 %}
13877 
13878 // src1 * (-src2) + src3
13879 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13880 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13881   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13882 
13883   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13884 
13885   ins_encode %{
13886     assert(UseFMA, "Needs FMA instructions support.");
13887     __ fmsubd(as_FloatRegister($dst$$reg),
13888               as_FloatRegister($src1$$reg),
13889               as_FloatRegister($src2$$reg),
13890               as_FloatRegister($src3$$reg));
13891   %}
13892 
13893   ins_pipe(pipe_class_default);
13894 %}
13895 
13896 // src1 * (-src2) - src3
13897 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
13898 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13899   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13900 
13901   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13902 
13903   ins_encode %{
13904     assert(UseFMA, "Needs FMA instructions support.");
13905     __ fnmadds(as_FloatRegister($dst$$reg),
13906                as_FloatRegister($src1$$reg),
13907                as_FloatRegister($src2$$reg),
13908                as_FloatRegister($src3$$reg));
13909   %}
13910 
13911   ins_pipe(pipe_class_default);
13912 %}
13913 
13914 // src1 * (-src2) - src3
13915 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
13916 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13917   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13918 
13919   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13920 
13921   ins_encode %{
13922     assert(UseFMA, "Needs FMA instructions support.");
13923     __ fnmaddd(as_FloatRegister($dst$$reg),
13924                as_FloatRegister($src1$$reg),
13925                as_FloatRegister($src2$$reg),
13926                as_FloatRegister($src3$$reg));
13927   %}
13928 
13929   ins_pipe(pipe_class_default);
13930 %}
13931 
13932 // src1 * src2 - src3
13933 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13934   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13935 
13936   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13937 
13938   ins_encode %{
13939     assert(UseFMA, "Needs FMA instructions support.");
13940     __ fnmsubs(as_FloatRegister($dst$$reg),
13941                as_FloatRegister($src1$$reg),
13942                as_FloatRegister($src2$$reg),
13943                as_FloatRegister($src3$$reg));
13944   %}
13945 
13946   ins_pipe(pipe_class_default);
13947 %}
13948 
13949 // src1 * src2 - src3
13950 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13951   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13952 
13953   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13954 
13955   ins_encode %{
13956     assert(UseFMA, "Needs FMA instructions support.");
13957     // n.b. insn name should be fnmsubd
13958     __ fnmsub(as_FloatRegister($dst$$reg),
13959               as_FloatRegister($src1$$reg),
13960               as_FloatRegister($src2$$reg),
13961               as_FloatRegister($src3$$reg));
13962   %}
13963 
13964   ins_pipe(pipe_class_default);
13965 %}
13966 
13967 // Math.max(HH)H (half-precision float)
13968 instruct maxHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13969   match(Set dst (MaxHF src1 src2));
13970   format %{ "fmaxh $dst, $src1, $src2" %}
13971   ins_encode %{
13972     __ fmaxh($dst$$FloatRegister,
13973              $src1$$FloatRegister,
13974              $src2$$FloatRegister);
13975   %}
13976   ins_pipe(fp_dop_reg_reg_s);
13977 %}
13978 
13979 // Math.min(HH)H (half-precision float)
13980 instruct minHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13981   match(Set dst (MinHF src1 src2));
13982   format %{ "fminh $dst, $src1, $src2" %}
13983   ins_encode %{
13984     __ fminh($dst$$FloatRegister,
13985              $src1$$FloatRegister,
13986              $src2$$FloatRegister);
13987   %}
13988   ins_pipe(fp_dop_reg_reg_s);
13989 %}
13990 
13991 // Math.max(FF)F
13992 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13993   match(Set dst (MaxF src1 src2));
13994 
13995   format %{ "fmaxs   $dst, $src1, $src2" %}
13996   ins_encode %{
13997     __ fmaxs(as_FloatRegister($dst$$reg),
13998              as_FloatRegister($src1$$reg),
13999              as_FloatRegister($src2$$reg));
14000   %}
14001 
14002   ins_pipe(fp_dop_reg_reg_s);
14003 %}
14004 
14005 // Math.min(FF)F
14006 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14007   match(Set dst (MinF src1 src2));
14008 
14009   format %{ "fmins   $dst, $src1, $src2" %}
14010   ins_encode %{
14011     __ fmins(as_FloatRegister($dst$$reg),
14012              as_FloatRegister($src1$$reg),
14013              as_FloatRegister($src2$$reg));
14014   %}
14015 
14016   ins_pipe(fp_dop_reg_reg_s);
14017 %}
14018 
14019 // Math.max(DD)D
14020 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14021   match(Set dst (MaxD src1 src2));
14022 
14023   format %{ "fmaxd   $dst, $src1, $src2" %}
14024   ins_encode %{
14025     __ fmaxd(as_FloatRegister($dst$$reg),
14026              as_FloatRegister($src1$$reg),
14027              as_FloatRegister($src2$$reg));
14028   %}
14029 
14030   ins_pipe(fp_dop_reg_reg_d);
14031 %}
14032 
14033 // Math.min(DD)D
14034 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14035   match(Set dst (MinD src1 src2));
14036 
14037   format %{ "fmind   $dst, $src1, $src2" %}
14038   ins_encode %{
14039     __ fmind(as_FloatRegister($dst$$reg),
14040              as_FloatRegister($src1$$reg),
14041              as_FloatRegister($src2$$reg));
14042   %}
14043 
14044   ins_pipe(fp_dop_reg_reg_d);
14045 %}
14046 
14047 instruct divHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14048   match(Set dst (DivHF src1  src2));
14049   format %{ "fdivh $dst, $src1, $src2" %}
14050   ins_encode %{
14051     __ fdivh($dst$$FloatRegister,
14052              $src1$$FloatRegister,
14053              $src2$$FloatRegister);
14054   %}
14055   ins_pipe(fp_div_s);
14056 %}
14057 
14058 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14059   match(Set dst (DivF src1  src2));
14060 
14061   ins_cost(INSN_COST * 18);
14062   format %{ "fdivs   $dst, $src1, $src2" %}
14063 
14064   ins_encode %{
14065     __ fdivs(as_FloatRegister($dst$$reg),
14066              as_FloatRegister($src1$$reg),
14067              as_FloatRegister($src2$$reg));
14068   %}
14069 
14070   ins_pipe(fp_div_s);
14071 %}
14072 
14073 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14074   match(Set dst (DivD src1  src2));
14075 
14076   ins_cost(INSN_COST * 32);
14077   format %{ "fdivd   $dst, $src1, $src2" %}
14078 
14079   ins_encode %{
14080     __ fdivd(as_FloatRegister($dst$$reg),
14081              as_FloatRegister($src1$$reg),
14082              as_FloatRegister($src2$$reg));
14083   %}
14084 
14085   ins_pipe(fp_div_d);
14086 %}
14087 
14088 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14089   match(Set dst (NegF src));
14090 
14091   ins_cost(INSN_COST * 3);
14092   format %{ "fneg   $dst, $src" %}
14093 
14094   ins_encode %{
14095     __ fnegs(as_FloatRegister($dst$$reg),
14096              as_FloatRegister($src$$reg));
14097   %}
14098 
14099   ins_pipe(fp_uop_s);
14100 %}
14101 
14102 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14103   match(Set dst (NegD src));
14104 
14105   ins_cost(INSN_COST * 3);
14106   format %{ "fnegd   $dst, $src" %}
14107 
14108   ins_encode %{
14109     __ fnegd(as_FloatRegister($dst$$reg),
14110              as_FloatRegister($src$$reg));
14111   %}
14112 
14113   ins_pipe(fp_uop_d);
14114 %}
14115 
14116 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14117 %{
14118   match(Set dst (AbsI src));
14119 
14120   effect(KILL cr);
14121   ins_cost(INSN_COST * 2);
14122   format %{ "cmpw  $src, zr\n\t"
14123             "cnegw $dst, $src, Assembler::LT\t# int abs"
14124   %}
14125 
14126   ins_encode %{
14127     __ cmpw(as_Register($src$$reg), zr);
14128     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14129   %}
14130   ins_pipe(pipe_class_default);
14131 %}
14132 
14133 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
14134 %{
14135   match(Set dst (AbsL src));
14136 
14137   effect(KILL cr);
14138   ins_cost(INSN_COST * 2);
14139   format %{ "cmp  $src, zr\n\t"
14140             "cneg $dst, $src, Assembler::LT\t# long abs"
14141   %}
14142 
14143   ins_encode %{
14144     __ cmp(as_Register($src$$reg), zr);
14145     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14146   %}
14147   ins_pipe(pipe_class_default);
14148 %}
14149 
14150 instruct absF_reg(vRegF dst, vRegF src) %{
14151   match(Set dst (AbsF src));
14152 
14153   ins_cost(INSN_COST * 3);
14154   format %{ "fabss   $dst, $src" %}
14155   ins_encode %{
14156     __ fabss(as_FloatRegister($dst$$reg),
14157              as_FloatRegister($src$$reg));
14158   %}
14159 
14160   ins_pipe(fp_uop_s);
14161 %}
14162 
14163 instruct absD_reg(vRegD dst, vRegD src) %{
14164   match(Set dst (AbsD src));
14165 
14166   ins_cost(INSN_COST * 3);
14167   format %{ "fabsd   $dst, $src" %}
14168   ins_encode %{
14169     __ fabsd(as_FloatRegister($dst$$reg),
14170              as_FloatRegister($src$$reg));
14171   %}
14172 
14173   ins_pipe(fp_uop_d);
14174 %}
14175 
14176 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14177   match(Set dst (AbsF (SubF src1 src2)));
14178 
14179   ins_cost(INSN_COST * 3);
14180   format %{ "fabds   $dst, $src1, $src2" %}
14181   ins_encode %{
14182     __ fabds(as_FloatRegister($dst$$reg),
14183              as_FloatRegister($src1$$reg),
14184              as_FloatRegister($src2$$reg));
14185   %}
14186 
14187   ins_pipe(fp_uop_s);
14188 %}
14189 
14190 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14191   match(Set dst (AbsD (SubD src1 src2)));
14192 
14193   ins_cost(INSN_COST * 3);
14194   format %{ "fabdd   $dst, $src1, $src2" %}
14195   ins_encode %{
14196     __ fabdd(as_FloatRegister($dst$$reg),
14197              as_FloatRegister($src1$$reg),
14198              as_FloatRegister($src2$$reg));
14199   %}
14200 
14201   ins_pipe(fp_uop_d);
14202 %}
14203 
14204 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14205   match(Set dst (SqrtD src));
14206 
14207   ins_cost(INSN_COST * 50);
14208   format %{ "fsqrtd  $dst, $src" %}
14209   ins_encode %{
14210     __ fsqrtd(as_FloatRegister($dst$$reg),
14211              as_FloatRegister($src$$reg));
14212   %}
14213 
14214   ins_pipe(fp_div_s);
14215 %}
14216 
14217 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14218   match(Set dst (SqrtF src));
14219 
14220   ins_cost(INSN_COST * 50);
14221   format %{ "fsqrts  $dst, $src" %}
14222   ins_encode %{
14223     __ fsqrts(as_FloatRegister($dst$$reg),
14224              as_FloatRegister($src$$reg));
14225   %}
14226 
14227   ins_pipe(fp_div_d);
14228 %}
14229 
14230 instruct sqrtHF_reg(vRegF dst, vRegF src) %{
14231   match(Set dst (SqrtHF src));
14232   format %{ "fsqrth $dst, $src" %}
14233   ins_encode %{
14234     __ fsqrth($dst$$FloatRegister,
14235               $src$$FloatRegister);
14236   %}
14237   ins_pipe(fp_div_s);
14238 %}
14239 
14240 // Math.rint, floor, ceil
14241 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14242   match(Set dst (RoundDoubleMode src rmode));
14243   format %{ "frint  $dst, $src, $rmode" %}
14244   ins_encode %{
14245     switch ($rmode$$constant) {
14246       case RoundDoubleModeNode::rmode_rint:
14247         __ frintnd(as_FloatRegister($dst$$reg),
14248                    as_FloatRegister($src$$reg));
14249         break;
14250       case RoundDoubleModeNode::rmode_floor:
14251         __ frintmd(as_FloatRegister($dst$$reg),
14252                    as_FloatRegister($src$$reg));
14253         break;
14254       case RoundDoubleModeNode::rmode_ceil:
14255         __ frintpd(as_FloatRegister($dst$$reg),
14256                    as_FloatRegister($src$$reg));
14257         break;
14258     }
14259   %}
14260   ins_pipe(fp_uop_d);
14261 %}
14262 
14263 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14264   match(Set dst (CopySignD src1 (Binary src2 zero)));
14265   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14266   format %{ "CopySignD  $dst $src1 $src2" %}
14267   ins_encode %{
14268     FloatRegister dst = as_FloatRegister($dst$$reg),
14269                   src1 = as_FloatRegister($src1$$reg),
14270                   src2 = as_FloatRegister($src2$$reg),
14271                   zero = as_FloatRegister($zero$$reg);
14272     __ fnegd(dst, zero);
14273     __ bsl(dst, __ T8B, src2, src1);
14274   %}
14275   ins_pipe(fp_uop_d);
14276 %}
14277 
14278 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14279   match(Set dst (CopySignF src1 src2));
14280   effect(TEMP_DEF dst, USE src1, USE src2);
14281   format %{ "CopySignF  $dst $src1 $src2" %}
14282   ins_encode %{
14283     FloatRegister dst = as_FloatRegister($dst$$reg),
14284                   src1 = as_FloatRegister($src1$$reg),
14285                   src2 = as_FloatRegister($src2$$reg);
14286     __ movi(dst, __ T2S, 0x80, 24);
14287     __ bsl(dst, __ T8B, src2, src1);
14288   %}
14289   ins_pipe(fp_uop_d);
14290 %}
14291 
14292 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14293   match(Set dst (SignumD src (Binary zero one)));
14294   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14295   format %{ "signumD  $dst, $src" %}
14296   ins_encode %{
14297     FloatRegister src = as_FloatRegister($src$$reg),
14298                   dst = as_FloatRegister($dst$$reg),
14299                   zero = as_FloatRegister($zero$$reg),
14300                   one = as_FloatRegister($one$$reg);
14301     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14302     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14303     // Bit selection instruction gets bit from "one" for each enabled bit in
14304     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14305     // NaN the whole "src" will be copied because "dst" is zero. For all other
14306     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14307     // from "src", and all other bits are copied from 1.0.
14308     __ bsl(dst, __ T8B, one, src);
14309   %}
14310   ins_pipe(fp_uop_d);
14311 %}
14312 
14313 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14314   match(Set dst (SignumF src (Binary zero one)));
14315   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14316   format %{ "signumF  $dst, $src" %}
14317   ins_encode %{
14318     FloatRegister src = as_FloatRegister($src$$reg),
14319                   dst = as_FloatRegister($dst$$reg),
14320                   zero = as_FloatRegister($zero$$reg),
14321                   one = as_FloatRegister($one$$reg);
14322     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14323     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14324     // Bit selection instruction gets bit from "one" for each enabled bit in
14325     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14326     // NaN the whole "src" will be copied because "dst" is zero. For all other
14327     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14328     // from "src", and all other bits are copied from 1.0.
14329     __ bsl(dst, __ T8B, one, src);
14330   %}
14331   ins_pipe(fp_uop_d);
14332 %}
14333 
14334 instruct onspinwait() %{
14335   match(OnSpinWait);
14336   ins_cost(INSN_COST);
14337 
14338   format %{ "onspinwait" %}
14339 
14340   ins_encode %{
14341     __ spin_wait();
14342   %}
14343   ins_pipe(pipe_class_empty);
14344 %}
14345 
14346 // ============================================================================
14347 // Logical Instructions
14348 
14349 // Integer Logical Instructions
14350 
14351 // And Instructions
14352 
14353 
14354 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14355   match(Set dst (AndI src1 src2));
14356 
14357   format %{ "andw  $dst, $src1, $src2\t# int" %}
14358 
14359   ins_cost(INSN_COST);
14360   ins_encode %{
14361     __ andw(as_Register($dst$$reg),
14362             as_Register($src1$$reg),
14363             as_Register($src2$$reg));
14364   %}
14365 
14366   ins_pipe(ialu_reg_reg);
14367 %}
14368 
14369 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14370   match(Set dst (AndI src1 src2));
14371 
14372   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14373 
14374   ins_cost(INSN_COST);
14375   ins_encode %{
14376     __ andw(as_Register($dst$$reg),
14377             as_Register($src1$$reg),
14378             (uint64_t)($src2$$constant));
14379   %}
14380 
14381   ins_pipe(ialu_reg_imm);
14382 %}
14383 
14384 // Or Instructions
14385 
14386 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14387   match(Set dst (OrI src1 src2));
14388 
14389   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14390 
14391   ins_cost(INSN_COST);
14392   ins_encode %{
14393     __ orrw(as_Register($dst$$reg),
14394             as_Register($src1$$reg),
14395             as_Register($src2$$reg));
14396   %}
14397 
14398   ins_pipe(ialu_reg_reg);
14399 %}
14400 
14401 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14402   match(Set dst (OrI src1 src2));
14403 
14404   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14405 
14406   ins_cost(INSN_COST);
14407   ins_encode %{
14408     __ orrw(as_Register($dst$$reg),
14409             as_Register($src1$$reg),
14410             (uint64_t)($src2$$constant));
14411   %}
14412 
14413   ins_pipe(ialu_reg_imm);
14414 %}
14415 
14416 // Xor Instructions
14417 
14418 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14419   match(Set dst (XorI src1 src2));
14420 
14421   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14422 
14423   ins_cost(INSN_COST);
14424   ins_encode %{
14425     __ eorw(as_Register($dst$$reg),
14426             as_Register($src1$$reg),
14427             as_Register($src2$$reg));
14428   %}
14429 
14430   ins_pipe(ialu_reg_reg);
14431 %}
14432 
14433 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14434   match(Set dst (XorI src1 src2));
14435 
14436   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14437 
14438   ins_cost(INSN_COST);
14439   ins_encode %{
14440     __ eorw(as_Register($dst$$reg),
14441             as_Register($src1$$reg),
14442             (uint64_t)($src2$$constant));
14443   %}
14444 
14445   ins_pipe(ialu_reg_imm);
14446 %}
14447 
14448 // Long Logical Instructions
14449 // TODO
14450 
14451 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14452   match(Set dst (AndL src1 src2));
14453 
14454   format %{ "and  $dst, $src1, $src2\t# int" %}
14455 
14456   ins_cost(INSN_COST);
14457   ins_encode %{
14458     __ andr(as_Register($dst$$reg),
14459             as_Register($src1$$reg),
14460             as_Register($src2$$reg));
14461   %}
14462 
14463   ins_pipe(ialu_reg_reg);
14464 %}
14465 
14466 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14467   match(Set dst (AndL src1 src2));
14468 
14469   format %{ "and  $dst, $src1, $src2\t# int" %}
14470 
14471   ins_cost(INSN_COST);
14472   ins_encode %{
14473     __ andr(as_Register($dst$$reg),
14474             as_Register($src1$$reg),
14475             (uint64_t)($src2$$constant));
14476   %}
14477 
14478   ins_pipe(ialu_reg_imm);
14479 %}
14480 
14481 // Or Instructions
14482 
14483 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14484   match(Set dst (OrL src1 src2));
14485 
14486   format %{ "orr  $dst, $src1, $src2\t# int" %}
14487 
14488   ins_cost(INSN_COST);
14489   ins_encode %{
14490     __ orr(as_Register($dst$$reg),
14491            as_Register($src1$$reg),
14492            as_Register($src2$$reg));
14493   %}
14494 
14495   ins_pipe(ialu_reg_reg);
14496 %}
14497 
14498 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14499   match(Set dst (OrL src1 src2));
14500 
14501   format %{ "orr  $dst, $src1, $src2\t# int" %}
14502 
14503   ins_cost(INSN_COST);
14504   ins_encode %{
14505     __ orr(as_Register($dst$$reg),
14506            as_Register($src1$$reg),
14507            (uint64_t)($src2$$constant));
14508   %}
14509 
14510   ins_pipe(ialu_reg_imm);
14511 %}
14512 
14513 // Xor Instructions
14514 
14515 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14516   match(Set dst (XorL src1 src2));
14517 
14518   format %{ "eor  $dst, $src1, $src2\t# int" %}
14519 
14520   ins_cost(INSN_COST);
14521   ins_encode %{
14522     __ eor(as_Register($dst$$reg),
14523            as_Register($src1$$reg),
14524            as_Register($src2$$reg));
14525   %}
14526 
14527   ins_pipe(ialu_reg_reg);
14528 %}
14529 
14530 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14531   match(Set dst (XorL src1 src2));
14532 
14533   ins_cost(INSN_COST);
14534   format %{ "eor  $dst, $src1, $src2\t# int" %}
14535 
14536   ins_encode %{
14537     __ eor(as_Register($dst$$reg),
14538            as_Register($src1$$reg),
14539            (uint64_t)($src2$$constant));
14540   %}
14541 
14542   ins_pipe(ialu_reg_imm);
14543 %}
14544 
14545 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14546 %{
14547   match(Set dst (ConvI2L src));
14548 
14549   ins_cost(INSN_COST);
14550   format %{ "sxtw  $dst, $src\t# i2l" %}
14551   ins_encode %{
14552     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14553   %}
14554   ins_pipe(ialu_reg_shift);
14555 %}
14556 
14557 // this pattern occurs in bigmath arithmetic
14558 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14559 %{
14560   match(Set dst (AndL (ConvI2L src) mask));
14561 
14562   ins_cost(INSN_COST);
14563   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14564   ins_encode %{
14565     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14566   %}
14567 
14568   ins_pipe(ialu_reg_shift);
14569 %}
14570 
14571 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14572   match(Set dst (ConvL2I src));
14573 
14574   ins_cost(INSN_COST);
14575   format %{ "movw  $dst, $src \t// l2i" %}
14576 
14577   ins_encode %{
14578     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14579   %}
14580 
14581   ins_pipe(ialu_reg);
14582 %}
14583 
14584 instruct convD2F_reg(vRegF dst, vRegD src) %{
14585   match(Set dst (ConvD2F src));
14586 
14587   ins_cost(INSN_COST * 5);
14588   format %{ "fcvtd  $dst, $src \t// d2f" %}
14589 
14590   ins_encode %{
14591     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14592   %}
14593 
14594   ins_pipe(fp_d2f);
14595 %}
14596 
14597 instruct convF2D_reg(vRegD dst, vRegF src) %{
14598   match(Set dst (ConvF2D src));
14599 
14600   ins_cost(INSN_COST * 5);
14601   format %{ "fcvts  $dst, $src \t// f2d" %}
14602 
14603   ins_encode %{
14604     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14605   %}
14606 
14607   ins_pipe(fp_f2d);
14608 %}
14609 
14610 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14611   match(Set dst (ConvF2I src));
14612 
14613   ins_cost(INSN_COST * 5);
14614   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14615 
14616   ins_encode %{
14617     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14618   %}
14619 
14620   ins_pipe(fp_f2i);
14621 %}
14622 
14623 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14624   match(Set dst (ConvF2L src));
14625 
14626   ins_cost(INSN_COST * 5);
14627   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14628 
14629   ins_encode %{
14630     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14631   %}
14632 
14633   ins_pipe(fp_f2l);
14634 %}
14635 
14636 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
14637   match(Set dst (ConvF2HF src));
14638   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
14639             "smov $dst, $tmp\t# move result from $tmp to $dst"
14640   %}
14641   effect(TEMP tmp);
14642   ins_encode %{
14643       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
14644   %}
14645   ins_pipe(pipe_slow);
14646 %}
14647 
14648 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
14649   match(Set dst (ConvHF2F src));
14650   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
14651             "fcvt $dst, $tmp\t# convert half to single precision"
14652   %}
14653   effect(TEMP tmp);
14654   ins_encode %{
14655       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
14656   %}
14657   ins_pipe(pipe_slow);
14658 %}
14659 
14660 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14661   match(Set dst (ConvI2F src));
14662 
14663   ins_cost(INSN_COST * 5);
14664   format %{ "scvtfws  $dst, $src \t// i2f" %}
14665 
14666   ins_encode %{
14667     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14668   %}
14669 
14670   ins_pipe(fp_i2f);
14671 %}
14672 
14673 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14674   match(Set dst (ConvL2F src));
14675 
14676   ins_cost(INSN_COST * 5);
14677   format %{ "scvtfs  $dst, $src \t// l2f" %}
14678 
14679   ins_encode %{
14680     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14681   %}
14682 
14683   ins_pipe(fp_l2f);
14684 %}
14685 
14686 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14687   match(Set dst (ConvD2I src));
14688 
14689   ins_cost(INSN_COST * 5);
14690   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14691 
14692   ins_encode %{
14693     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14694   %}
14695 
14696   ins_pipe(fp_d2i);
14697 %}
14698 
14699 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14700   match(Set dst (ConvD2L src));
14701 
14702   ins_cost(INSN_COST * 5);
14703   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14704 
14705   ins_encode %{
14706     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14707   %}
14708 
14709   ins_pipe(fp_d2l);
14710 %}
14711 
14712 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14713   match(Set dst (ConvI2D src));
14714 
14715   ins_cost(INSN_COST * 5);
14716   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14717 
14718   ins_encode %{
14719     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14720   %}
14721 
14722   ins_pipe(fp_i2d);
14723 %}
14724 
14725 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14726   match(Set dst (ConvL2D src));
14727 
14728   ins_cost(INSN_COST * 5);
14729   format %{ "scvtfd  $dst, $src \t// l2d" %}
14730 
14731   ins_encode %{
14732     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14733   %}
14734 
14735   ins_pipe(fp_l2d);
14736 %}
14737 
14738 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
14739 %{
14740   match(Set dst (RoundD src));
14741   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14742   format %{ "java_round_double $dst,$src"%}
14743   ins_encode %{
14744     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
14745                          as_FloatRegister($ftmp$$reg));
14746   %}
14747   ins_pipe(pipe_slow);
14748 %}
14749 
14750 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
14751 %{
14752   match(Set dst (RoundF src));
14753   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14754   format %{ "java_round_float $dst,$src"%}
14755   ins_encode %{
14756     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
14757                         as_FloatRegister($ftmp$$reg));
14758   %}
14759   ins_pipe(pipe_slow);
14760 %}
14761 
14762 // stack <-> reg and reg <-> reg shuffles with no conversion
14763 
14764 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14765 
14766   match(Set dst (MoveF2I src));
14767 
14768   effect(DEF dst, USE src);
14769 
14770   ins_cost(4 * INSN_COST);
14771 
14772   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14773 
14774   ins_encode %{
14775     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14776   %}
14777 
14778   ins_pipe(iload_reg_reg);
14779 
14780 %}
14781 
14782 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14783 
14784   match(Set dst (MoveI2F src));
14785 
14786   effect(DEF dst, USE src);
14787 
14788   ins_cost(4 * INSN_COST);
14789 
14790   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14791 
14792   ins_encode %{
14793     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14794   %}
14795 
14796   ins_pipe(pipe_class_memory);
14797 
14798 %}
14799 
14800 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14801 
14802   match(Set dst (MoveD2L src));
14803 
14804   effect(DEF dst, USE src);
14805 
14806   ins_cost(4 * INSN_COST);
14807 
14808   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14809 
14810   ins_encode %{
14811     __ ldr($dst$$Register, Address(sp, $src$$disp));
14812   %}
14813 
14814   ins_pipe(iload_reg_reg);
14815 
14816 %}
14817 
14818 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14819 
14820   match(Set dst (MoveL2D src));
14821 
14822   effect(DEF dst, USE src);
14823 
14824   ins_cost(4 * INSN_COST);
14825 
14826   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14827 
14828   ins_encode %{
14829     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14830   %}
14831 
14832   ins_pipe(pipe_class_memory);
14833 
14834 %}
14835 
14836 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14837 
14838   match(Set dst (MoveF2I src));
14839 
14840   effect(DEF dst, USE src);
14841 
14842   ins_cost(INSN_COST);
14843 
14844   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14845 
14846   ins_encode %{
14847     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14848   %}
14849 
14850   ins_pipe(pipe_class_memory);
14851 
14852 %}
14853 
14854 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14855 
14856   match(Set dst (MoveI2F src));
14857 
14858   effect(DEF dst, USE src);
14859 
14860   ins_cost(INSN_COST);
14861 
14862   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14863 
14864   ins_encode %{
14865     __ strw($src$$Register, Address(sp, $dst$$disp));
14866   %}
14867 
14868   ins_pipe(istore_reg_reg);
14869 
14870 %}
14871 
14872 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14873 
14874   match(Set dst (MoveD2L src));
14875 
14876   effect(DEF dst, USE src);
14877 
14878   ins_cost(INSN_COST);
14879 
14880   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14881 
14882   ins_encode %{
14883     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14884   %}
14885 
14886   ins_pipe(pipe_class_memory);
14887 
14888 %}
14889 
14890 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14891 
14892   match(Set dst (MoveL2D src));
14893 
14894   effect(DEF dst, USE src);
14895 
14896   ins_cost(INSN_COST);
14897 
14898   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14899 
14900   ins_encode %{
14901     __ str($src$$Register, Address(sp, $dst$$disp));
14902   %}
14903 
14904   ins_pipe(istore_reg_reg);
14905 
14906 %}
14907 
14908 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14909 
14910   match(Set dst (MoveF2I src));
14911 
14912   effect(DEF dst, USE src);
14913 
14914   ins_cost(INSN_COST);
14915 
14916   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14917 
14918   ins_encode %{
14919     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14920   %}
14921 
14922   ins_pipe(fp_f2i);
14923 
14924 %}
14925 
14926 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14927 
14928   match(Set dst (MoveI2F src));
14929 
14930   effect(DEF dst, USE src);
14931 
14932   ins_cost(INSN_COST);
14933 
14934   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14935 
14936   ins_encode %{
14937     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14938   %}
14939 
14940   ins_pipe(fp_i2f);
14941 
14942 %}
14943 
14944 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14945 
14946   match(Set dst (MoveD2L src));
14947 
14948   effect(DEF dst, USE src);
14949 
14950   ins_cost(INSN_COST);
14951 
14952   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14953 
14954   ins_encode %{
14955     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14956   %}
14957 
14958   ins_pipe(fp_d2l);
14959 
14960 %}
14961 
14962 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14963 
14964   match(Set dst (MoveL2D src));
14965 
14966   effect(DEF dst, USE src);
14967 
14968   ins_cost(INSN_COST);
14969 
14970   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14971 
14972   ins_encode %{
14973     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14974   %}
14975 
14976   ins_pipe(fp_l2d);
14977 
14978 %}
14979 
14980 // ============================================================================
14981 // clearing of an array
14982 
14983 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14984 %{
14985   match(Set dummy (ClearArray cnt base));
14986   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14987 
14988   ins_cost(4 * INSN_COST);
14989   format %{ "ClearArray $cnt, $base" %}
14990 
14991   ins_encode %{
14992     address tpc = __ zero_words($base$$Register, $cnt$$Register);
14993     if (tpc == nullptr) {
14994       ciEnv::current()->record_failure("CodeCache is full");
14995       return;
14996     }
14997   %}
14998 
14999   ins_pipe(pipe_class_memory);
15000 %}
15001 
15002 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15003 %{
15004   predicate((uint64_t)n->in(2)->get_long()
15005             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
15006   match(Set dummy (ClearArray cnt base));
15007   effect(TEMP temp, USE_KILL base, KILL cr);
15008 
15009   ins_cost(4 * INSN_COST);
15010   format %{ "ClearArray $cnt, $base" %}
15011 
15012   ins_encode %{
15013     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15014     if (tpc == nullptr) {
15015       ciEnv::current()->record_failure("CodeCache is full");
15016       return;
15017     }
15018   %}
15019 
15020   ins_pipe(pipe_class_memory);
15021 %}
15022 
15023 // ============================================================================
15024 // Overflow Math Instructions
15025 
15026 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15027 %{
15028   match(Set cr (OverflowAddI op1 op2));
15029 
15030   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15031   ins_cost(INSN_COST);
15032   ins_encode %{
15033     __ cmnw($op1$$Register, $op2$$Register);
15034   %}
15035 
15036   ins_pipe(icmp_reg_reg);
15037 %}
15038 
15039 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15040 %{
15041   match(Set cr (OverflowAddI op1 op2));
15042 
15043   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15044   ins_cost(INSN_COST);
15045   ins_encode %{
15046     __ cmnw($op1$$Register, $op2$$constant);
15047   %}
15048 
15049   ins_pipe(icmp_reg_imm);
15050 %}
15051 
15052 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15053 %{
15054   match(Set cr (OverflowAddL op1 op2));
15055 
15056   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15057   ins_cost(INSN_COST);
15058   ins_encode %{
15059     __ cmn($op1$$Register, $op2$$Register);
15060   %}
15061 
15062   ins_pipe(icmp_reg_reg);
15063 %}
15064 
15065 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15066 %{
15067   match(Set cr (OverflowAddL op1 op2));
15068 
15069   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
15070   ins_cost(INSN_COST);
15071   ins_encode %{
15072     __ adds(zr, $op1$$Register, $op2$$constant);
15073   %}
15074 
15075   ins_pipe(icmp_reg_imm);
15076 %}
15077 
15078 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15079 %{
15080   match(Set cr (OverflowSubI op1 op2));
15081 
15082   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15083   ins_cost(INSN_COST);
15084   ins_encode %{
15085     __ cmpw($op1$$Register, $op2$$Register);
15086   %}
15087 
15088   ins_pipe(icmp_reg_reg);
15089 %}
15090 
15091 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15092 %{
15093   match(Set cr (OverflowSubI op1 op2));
15094 
15095   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15096   ins_cost(INSN_COST);
15097   ins_encode %{
15098     __ cmpw($op1$$Register, $op2$$constant);
15099   %}
15100 
15101   ins_pipe(icmp_reg_imm);
15102 %}
15103 
15104 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15105 %{
15106   match(Set cr (OverflowSubL op1 op2));
15107 
15108   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15109   ins_cost(INSN_COST);
15110   ins_encode %{
15111     __ cmp($op1$$Register, $op2$$Register);
15112   %}
15113 
15114   ins_pipe(icmp_reg_reg);
15115 %}
15116 
15117 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15118 %{
15119   match(Set cr (OverflowSubL op1 op2));
15120 
15121   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15122   ins_cost(INSN_COST);
15123   ins_encode %{
15124     __ subs(zr, $op1$$Register, $op2$$constant);
15125   %}
15126 
15127   ins_pipe(icmp_reg_imm);
15128 %}
15129 
15130 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15131 %{
15132   match(Set cr (OverflowSubI zero op1));
15133 
15134   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15135   ins_cost(INSN_COST);
15136   ins_encode %{
15137     __ cmpw(zr, $op1$$Register);
15138   %}
15139 
15140   ins_pipe(icmp_reg_imm);
15141 %}
15142 
15143 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15144 %{
15145   match(Set cr (OverflowSubL zero op1));
15146 
15147   format %{ "cmp   zr, $op1\t# overflow check long" %}
15148   ins_cost(INSN_COST);
15149   ins_encode %{
15150     __ cmp(zr, $op1$$Register);
15151   %}
15152 
15153   ins_pipe(icmp_reg_imm);
15154 %}
15155 
15156 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15157 %{
15158   match(Set cr (OverflowMulI op1 op2));
15159 
15160   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15161             "cmp   rscratch1, rscratch1, sxtw\n\t"
15162             "movw  rscratch1, #0x80000000\n\t"
15163             "cselw rscratch1, rscratch1, zr, NE\n\t"
15164             "cmpw  rscratch1, #1" %}
15165   ins_cost(5 * INSN_COST);
15166   ins_encode %{
15167     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15168     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15169     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15170     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15171     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15172   %}
15173 
15174   ins_pipe(pipe_slow);
15175 %}
15176 
15177 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15178 %{
15179   match(If cmp (OverflowMulI op1 op2));
15180   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15181             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15182   effect(USE labl, KILL cr);
15183 
15184   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15185             "cmp   rscratch1, rscratch1, sxtw\n\t"
15186             "b$cmp   $labl" %}
15187   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15188   ins_encode %{
15189     Label* L = $labl$$label;
15190     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15191     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15192     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15193     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15194   %}
15195 
15196   ins_pipe(pipe_serial);
15197 %}
15198 
15199 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15200 %{
15201   match(Set cr (OverflowMulL op1 op2));
15202 
15203   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15204             "smulh rscratch2, $op1, $op2\n\t"
15205             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15206             "movw  rscratch1, #0x80000000\n\t"
15207             "cselw rscratch1, rscratch1, zr, NE\n\t"
15208             "cmpw  rscratch1, #1" %}
15209   ins_cost(6 * INSN_COST);
15210   ins_encode %{
15211     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15212     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15213     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15214     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15215     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15216     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15217   %}
15218 
15219   ins_pipe(pipe_slow);
15220 %}
15221 
15222 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15223 %{
15224   match(If cmp (OverflowMulL op1 op2));
15225   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15226             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15227   effect(USE labl, KILL cr);
15228 
15229   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15230             "smulh rscratch2, $op1, $op2\n\t"
15231             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15232             "b$cmp $labl" %}
15233   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15234   ins_encode %{
15235     Label* L = $labl$$label;
15236     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15237     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15238     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15239     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15240     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15241   %}
15242 
15243   ins_pipe(pipe_serial);
15244 %}
15245 
15246 // ============================================================================
15247 // Compare Instructions
15248 
15249 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15250 %{
15251   match(Set cr (CmpI op1 op2));
15252 
15253   effect(DEF cr, USE op1, USE op2);
15254 
15255   ins_cost(INSN_COST);
15256   format %{ "cmpw  $op1, $op2" %}
15257 
15258   ins_encode(aarch64_enc_cmpw(op1, op2));
15259 
15260   ins_pipe(icmp_reg_reg);
15261 %}
15262 
15263 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15264 %{
15265   match(Set cr (CmpI op1 zero));
15266 
15267   effect(DEF cr, USE op1);
15268 
15269   ins_cost(INSN_COST);
15270   format %{ "cmpw $op1, 0" %}
15271 
15272   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15273 
15274   ins_pipe(icmp_reg_imm);
15275 %}
15276 
15277 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15278 %{
15279   match(Set cr (CmpI op1 op2));
15280 
15281   effect(DEF cr, USE op1);
15282 
15283   ins_cost(INSN_COST);
15284   format %{ "cmpw  $op1, $op2" %}
15285 
15286   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15287 
15288   ins_pipe(icmp_reg_imm);
15289 %}
15290 
15291 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15292 %{
15293   match(Set cr (CmpI op1 op2));
15294 
15295   effect(DEF cr, USE op1);
15296 
15297   ins_cost(INSN_COST * 2);
15298   format %{ "cmpw  $op1, $op2" %}
15299 
15300   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15301 
15302   ins_pipe(icmp_reg_imm);
15303 %}
15304 
15305 // Unsigned compare Instructions; really, same as signed compare
15306 // except it should only be used to feed an If or a CMovI which takes a
15307 // cmpOpU.
15308 
15309 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15310 %{
15311   match(Set cr (CmpU op1 op2));
15312 
15313   effect(DEF cr, USE op1, USE op2);
15314 
15315   ins_cost(INSN_COST);
15316   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15317 
15318   ins_encode(aarch64_enc_cmpw(op1, op2));
15319 
15320   ins_pipe(icmp_reg_reg);
15321 %}
15322 
15323 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15324 %{
15325   match(Set cr (CmpU op1 zero));
15326 
15327   effect(DEF cr, USE op1);
15328 
15329   ins_cost(INSN_COST);
15330   format %{ "cmpw $op1, #0\t# unsigned" %}
15331 
15332   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15333 
15334   ins_pipe(icmp_reg_imm);
15335 %}
15336 
15337 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15338 %{
15339   match(Set cr (CmpU op1 op2));
15340 
15341   effect(DEF cr, USE op1);
15342 
15343   ins_cost(INSN_COST);
15344   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15345 
15346   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15347 
15348   ins_pipe(icmp_reg_imm);
15349 %}
15350 
15351 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15352 %{
15353   match(Set cr (CmpU op1 op2));
15354 
15355   effect(DEF cr, USE op1);
15356 
15357   ins_cost(INSN_COST * 2);
15358   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15359 
15360   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15361 
15362   ins_pipe(icmp_reg_imm);
15363 %}
15364 
15365 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15366 %{
15367   match(Set cr (CmpL op1 op2));
15368 
15369   effect(DEF cr, USE op1, USE op2);
15370 
15371   ins_cost(INSN_COST);
15372   format %{ "cmp  $op1, $op2" %}
15373 
15374   ins_encode(aarch64_enc_cmp(op1, op2));
15375 
15376   ins_pipe(icmp_reg_reg);
15377 %}
15378 
15379 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15380 %{
15381   match(Set cr (CmpL op1 zero));
15382 
15383   effect(DEF cr, USE op1);
15384 
15385   ins_cost(INSN_COST);
15386   format %{ "tst  $op1" %}
15387 
15388   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15389 
15390   ins_pipe(icmp_reg_imm);
15391 %}
15392 
15393 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15394 %{
15395   match(Set cr (CmpL op1 op2));
15396 
15397   effect(DEF cr, USE op1);
15398 
15399   ins_cost(INSN_COST);
15400   format %{ "cmp  $op1, $op2" %}
15401 
15402   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15403 
15404   ins_pipe(icmp_reg_imm);
15405 %}
15406 
15407 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15408 %{
15409   match(Set cr (CmpL op1 op2));
15410 
15411   effect(DEF cr, USE op1);
15412 
15413   ins_cost(INSN_COST * 2);
15414   format %{ "cmp  $op1, $op2" %}
15415 
15416   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15417 
15418   ins_pipe(icmp_reg_imm);
15419 %}
15420 
15421 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15422 %{
15423   match(Set cr (CmpUL op1 op2));
15424 
15425   effect(DEF cr, USE op1, USE op2);
15426 
15427   ins_cost(INSN_COST);
15428   format %{ "cmp  $op1, $op2" %}
15429 
15430   ins_encode(aarch64_enc_cmp(op1, op2));
15431 
15432   ins_pipe(icmp_reg_reg);
15433 %}
15434 
15435 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15436 %{
15437   match(Set cr (CmpUL op1 zero));
15438 
15439   effect(DEF cr, USE op1);
15440 
15441   ins_cost(INSN_COST);
15442   format %{ "tst  $op1" %}
15443 
15444   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15445 
15446   ins_pipe(icmp_reg_imm);
15447 %}
15448 
15449 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15450 %{
15451   match(Set cr (CmpUL op1 op2));
15452 
15453   effect(DEF cr, USE op1);
15454 
15455   ins_cost(INSN_COST);
15456   format %{ "cmp  $op1, $op2" %}
15457 
15458   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15459 
15460   ins_pipe(icmp_reg_imm);
15461 %}
15462 
15463 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15464 %{
15465   match(Set cr (CmpUL op1 op2));
15466 
15467   effect(DEF cr, USE op1);
15468 
15469   ins_cost(INSN_COST * 2);
15470   format %{ "cmp  $op1, $op2" %}
15471 
15472   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15473 
15474   ins_pipe(icmp_reg_imm);
15475 %}
15476 
15477 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15478 %{
15479   match(Set cr (CmpP op1 op2));
15480 
15481   effect(DEF cr, USE op1, USE op2);
15482 
15483   ins_cost(INSN_COST);
15484   format %{ "cmp  $op1, $op2\t // ptr" %}
15485 
15486   ins_encode(aarch64_enc_cmpp(op1, op2));
15487 
15488   ins_pipe(icmp_reg_reg);
15489 %}
15490 
15491 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15492 %{
15493   match(Set cr (CmpN op1 op2));
15494 
15495   effect(DEF cr, USE op1, USE op2);
15496 
15497   ins_cost(INSN_COST);
15498   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15499 
15500   ins_encode(aarch64_enc_cmpn(op1, op2));
15501 
15502   ins_pipe(icmp_reg_reg);
15503 %}
15504 
15505 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15506 %{
15507   match(Set cr (CmpP op1 zero));
15508 
15509   effect(DEF cr, USE op1, USE zero);
15510 
15511   ins_cost(INSN_COST);
15512   format %{ "cmp  $op1, 0\t // ptr" %}
15513 
15514   ins_encode(aarch64_enc_testp(op1));
15515 
15516   ins_pipe(icmp_reg_imm);
15517 %}
15518 
15519 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15520 %{
15521   match(Set cr (CmpN op1 zero));
15522 
15523   effect(DEF cr, USE op1, USE zero);
15524 
15525   ins_cost(INSN_COST);
15526   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15527 
15528   ins_encode(aarch64_enc_testn(op1));
15529 
15530   ins_pipe(icmp_reg_imm);
15531 %}
15532 
15533 // FP comparisons
15534 //
15535 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15536 // using normal cmpOp. See declaration of rFlagsReg for details.
15537 
15538 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15539 %{
15540   match(Set cr (CmpF src1 src2));
15541 
15542   ins_cost(3 * INSN_COST);
15543   format %{ "fcmps $src1, $src2" %}
15544 
15545   ins_encode %{
15546     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15547   %}
15548 
15549   ins_pipe(pipe_class_compare);
15550 %}
15551 
15552 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15553 %{
15554   match(Set cr (CmpF src1 src2));
15555 
15556   ins_cost(3 * INSN_COST);
15557   format %{ "fcmps $src1, 0.0" %}
15558 
15559   ins_encode %{
15560     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15561   %}
15562 
15563   ins_pipe(pipe_class_compare);
15564 %}
15565 // FROM HERE
15566 
15567 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15568 %{
15569   match(Set cr (CmpD src1 src2));
15570 
15571   ins_cost(3 * INSN_COST);
15572   format %{ "fcmpd $src1, $src2" %}
15573 
15574   ins_encode %{
15575     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15576   %}
15577 
15578   ins_pipe(pipe_class_compare);
15579 %}
15580 
15581 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15582 %{
15583   match(Set cr (CmpD src1 src2));
15584 
15585   ins_cost(3 * INSN_COST);
15586   format %{ "fcmpd $src1, 0.0" %}
15587 
15588   ins_encode %{
15589     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15590   %}
15591 
15592   ins_pipe(pipe_class_compare);
15593 %}
15594 
15595 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15596 %{
15597   match(Set dst (CmpF3 src1 src2));
15598   effect(KILL cr);
15599 
15600   ins_cost(5 * INSN_COST);
15601   format %{ "fcmps $src1, $src2\n\t"
15602             "csinvw($dst, zr, zr, eq\n\t"
15603             "csnegw($dst, $dst, $dst, lt)"
15604   %}
15605 
15606   ins_encode %{
15607     Label done;
15608     FloatRegister s1 = as_FloatRegister($src1$$reg);
15609     FloatRegister s2 = as_FloatRegister($src2$$reg);
15610     Register d = as_Register($dst$$reg);
15611     __ fcmps(s1, s2);
15612     // installs 0 if EQ else -1
15613     __ csinvw(d, zr, zr, Assembler::EQ);
15614     // keeps -1 if less or unordered else installs 1
15615     __ csnegw(d, d, d, Assembler::LT);
15616     __ bind(done);
15617   %}
15618 
15619   ins_pipe(pipe_class_default);
15620 
15621 %}
15622 
15623 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15624 %{
15625   match(Set dst (CmpD3 src1 src2));
15626   effect(KILL cr);
15627 
15628   ins_cost(5 * INSN_COST);
15629   format %{ "fcmpd $src1, $src2\n\t"
15630             "csinvw($dst, zr, zr, eq\n\t"
15631             "csnegw($dst, $dst, $dst, lt)"
15632   %}
15633 
15634   ins_encode %{
15635     Label done;
15636     FloatRegister s1 = as_FloatRegister($src1$$reg);
15637     FloatRegister s2 = as_FloatRegister($src2$$reg);
15638     Register d = as_Register($dst$$reg);
15639     __ fcmpd(s1, s2);
15640     // installs 0 if EQ else -1
15641     __ csinvw(d, zr, zr, Assembler::EQ);
15642     // keeps -1 if less or unordered else installs 1
15643     __ csnegw(d, d, d, Assembler::LT);
15644     __ bind(done);
15645   %}
15646   ins_pipe(pipe_class_default);
15647 
15648 %}
15649 
15650 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15651 %{
15652   match(Set dst (CmpF3 src1 zero));
15653   effect(KILL cr);
15654 
15655   ins_cost(5 * INSN_COST);
15656   format %{ "fcmps $src1, 0.0\n\t"
15657             "csinvw($dst, zr, zr, eq\n\t"
15658             "csnegw($dst, $dst, $dst, lt)"
15659   %}
15660 
15661   ins_encode %{
15662     Label done;
15663     FloatRegister s1 = as_FloatRegister($src1$$reg);
15664     Register d = as_Register($dst$$reg);
15665     __ fcmps(s1, 0.0);
15666     // installs 0 if EQ else -1
15667     __ csinvw(d, zr, zr, Assembler::EQ);
15668     // keeps -1 if less or unordered else installs 1
15669     __ csnegw(d, d, d, Assembler::LT);
15670     __ bind(done);
15671   %}
15672 
15673   ins_pipe(pipe_class_default);
15674 
15675 %}
15676 
15677 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15678 %{
15679   match(Set dst (CmpD3 src1 zero));
15680   effect(KILL cr);
15681 
15682   ins_cost(5 * INSN_COST);
15683   format %{ "fcmpd $src1, 0.0\n\t"
15684             "csinvw($dst, zr, zr, eq\n\t"
15685             "csnegw($dst, $dst, $dst, lt)"
15686   %}
15687 
15688   ins_encode %{
15689     Label done;
15690     FloatRegister s1 = as_FloatRegister($src1$$reg);
15691     Register d = as_Register($dst$$reg);
15692     __ fcmpd(s1, 0.0);
15693     // installs 0 if EQ else -1
15694     __ csinvw(d, zr, zr, Assembler::EQ);
15695     // keeps -1 if less or unordered else installs 1
15696     __ csnegw(d, d, d, Assembler::LT);
15697     __ bind(done);
15698   %}
15699   ins_pipe(pipe_class_default);
15700 
15701 %}
15702 
15703 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15704 %{
15705   match(Set dst (CmpLTMask p q));
15706   effect(KILL cr);
15707 
15708   ins_cost(3 * INSN_COST);
15709 
15710   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15711             "csetw $dst, lt\n\t"
15712             "subw $dst, zr, $dst"
15713   %}
15714 
15715   ins_encode %{
15716     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15717     __ csetw(as_Register($dst$$reg), Assembler::LT);
15718     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15719   %}
15720 
15721   ins_pipe(ialu_reg_reg);
15722 %}
15723 
15724 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15725 %{
15726   match(Set dst (CmpLTMask src zero));
15727   effect(KILL cr);
15728 
15729   ins_cost(INSN_COST);
15730 
15731   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15732 
15733   ins_encode %{
15734     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15735   %}
15736 
15737   ins_pipe(ialu_reg_shift);
15738 %}
15739 
15740 // ============================================================================
15741 // Max and Min
15742 
15743 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
15744 
15745 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
15746 %{
15747   effect(DEF cr, USE src);
15748   ins_cost(INSN_COST);
15749   format %{ "cmpw $src, 0" %}
15750 
15751   ins_encode %{
15752     __ cmpw($src$$Register, 0);
15753   %}
15754   ins_pipe(icmp_reg_imm);
15755 %}
15756 
15757 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15758 %{
15759   match(Set dst (MinI src1 src2));
15760   ins_cost(INSN_COST * 3);
15761 
15762   expand %{
15763     rFlagsReg cr;
15764     compI_reg_reg(cr, src1, src2);
15765     cmovI_reg_reg_lt(dst, src1, src2, cr);
15766   %}
15767 %}
15768 
15769 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15770 %{
15771   match(Set dst (MaxI src1 src2));
15772   ins_cost(INSN_COST * 3);
15773 
15774   expand %{
15775     rFlagsReg cr;
15776     compI_reg_reg(cr, src1, src2);
15777     cmovI_reg_reg_gt(dst, src1, src2, cr);
15778   %}
15779 %}
15780 
15781 
15782 // ============================================================================
15783 // Branch Instructions
15784 
15785 // Direct Branch.
15786 instruct branch(label lbl)
15787 %{
15788   match(Goto);
15789 
15790   effect(USE lbl);
15791 
15792   ins_cost(BRANCH_COST);
15793   format %{ "b  $lbl" %}
15794 
15795   ins_encode(aarch64_enc_b(lbl));
15796 
15797   ins_pipe(pipe_branch);
15798 %}
15799 
15800 // Conditional Near Branch
15801 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15802 %{
15803   // Same match rule as `branchConFar'.
15804   match(If cmp cr);
15805 
15806   effect(USE lbl);
15807 
15808   ins_cost(BRANCH_COST);
15809   // If set to 1 this indicates that the current instruction is a
15810   // short variant of a long branch. This avoids using this
15811   // instruction in first-pass matching. It will then only be used in
15812   // the `Shorten_branches' pass.
15813   // ins_short_branch(1);
15814   format %{ "b$cmp  $lbl" %}
15815 
15816   ins_encode(aarch64_enc_br_con(cmp, lbl));
15817 
15818   ins_pipe(pipe_branch_cond);
15819 %}
15820 
15821 // Conditional Near Branch Unsigned
15822 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15823 %{
15824   // Same match rule as `branchConFar'.
15825   match(If cmp cr);
15826 
15827   effect(USE lbl);
15828 
15829   ins_cost(BRANCH_COST);
15830   // If set to 1 this indicates that the current instruction is a
15831   // short variant of a long branch. This avoids using this
15832   // instruction in first-pass matching. It will then only be used in
15833   // the `Shorten_branches' pass.
15834   // ins_short_branch(1);
15835   format %{ "b$cmp  $lbl\t# unsigned" %}
15836 
15837   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15838 
15839   ins_pipe(pipe_branch_cond);
15840 %}
15841 
15842 // Make use of CBZ and CBNZ.  These instructions, as well as being
15843 // shorter than (cmp; branch), have the additional benefit of not
15844 // killing the flags.
15845 
15846 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15847   match(If cmp (CmpI op1 op2));
15848   effect(USE labl);
15849 
15850   ins_cost(BRANCH_COST);
15851   format %{ "cbw$cmp   $op1, $labl" %}
15852   ins_encode %{
15853     Label* L = $labl$$label;
15854     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15855     if (cond == Assembler::EQ)
15856       __ cbzw($op1$$Register, *L);
15857     else
15858       __ cbnzw($op1$$Register, *L);
15859   %}
15860   ins_pipe(pipe_cmp_branch);
15861 %}
15862 
15863 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15864   match(If cmp (CmpL op1 op2));
15865   effect(USE labl);
15866 
15867   ins_cost(BRANCH_COST);
15868   format %{ "cb$cmp   $op1, $labl" %}
15869   ins_encode %{
15870     Label* L = $labl$$label;
15871     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15872     if (cond == Assembler::EQ)
15873       __ cbz($op1$$Register, *L);
15874     else
15875       __ cbnz($op1$$Register, *L);
15876   %}
15877   ins_pipe(pipe_cmp_branch);
15878 %}
15879 
15880 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15881   match(If cmp (CmpP op1 op2));
15882   effect(USE labl);
15883 
15884   ins_cost(BRANCH_COST);
15885   format %{ "cb$cmp   $op1, $labl" %}
15886   ins_encode %{
15887     Label* L = $labl$$label;
15888     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15889     if (cond == Assembler::EQ)
15890       __ cbz($op1$$Register, *L);
15891     else
15892       __ cbnz($op1$$Register, *L);
15893   %}
15894   ins_pipe(pipe_cmp_branch);
15895 %}
15896 
15897 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15898   match(If cmp (CmpN op1 op2));
15899   effect(USE labl);
15900 
15901   ins_cost(BRANCH_COST);
15902   format %{ "cbw$cmp   $op1, $labl" %}
15903   ins_encode %{
15904     Label* L = $labl$$label;
15905     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15906     if (cond == Assembler::EQ)
15907       __ cbzw($op1$$Register, *L);
15908     else
15909       __ cbnzw($op1$$Register, *L);
15910   %}
15911   ins_pipe(pipe_cmp_branch);
15912 %}
15913 
15914 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15915   match(If cmp (CmpP (DecodeN oop) zero));
15916   effect(USE labl);
15917 
15918   ins_cost(BRANCH_COST);
15919   format %{ "cb$cmp   $oop, $labl" %}
15920   ins_encode %{
15921     Label* L = $labl$$label;
15922     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15923     if (cond == Assembler::EQ)
15924       __ cbzw($oop$$Register, *L);
15925     else
15926       __ cbnzw($oop$$Register, *L);
15927   %}
15928   ins_pipe(pipe_cmp_branch);
15929 %}
15930 
15931 instruct cmpUI_imm0_branch(cmpOpUEqNeLeGt cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15932   match(If cmp (CmpU op1 op2));
15933   effect(USE labl);
15934 
15935   ins_cost(BRANCH_COST);
15936   format %{ "cbw$cmp   $op1, $labl" %}
15937   ins_encode %{
15938     Label* L = $labl$$label;
15939     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15940     if (cond == Assembler::EQ || cond == Assembler::LS) {
15941       __ cbzw($op1$$Register, *L);
15942     } else {
15943       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
15944       __ cbnzw($op1$$Register, *L);
15945     }
15946   %}
15947   ins_pipe(pipe_cmp_branch);
15948 %}
15949 
15950 instruct cmpUL_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 op2, label labl) %{
15951   match(If cmp (CmpUL op1 op2));
15952   effect(USE labl);
15953 
15954   ins_cost(BRANCH_COST);
15955   format %{ "cb$cmp   $op1, $labl" %}
15956   ins_encode %{
15957     Label* L = $labl$$label;
15958     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15959     if (cond == Assembler::EQ || cond == Assembler::LS) {
15960       __ cbz($op1$$Register, *L);
15961     } else {
15962       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
15963       __ cbnz($op1$$Register, *L);
15964     }
15965   %}
15966   ins_pipe(pipe_cmp_branch);
15967 %}
15968 
15969 // Test bit and Branch
15970 
15971 // Patterns for short (< 32KiB) variants
15972 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15973   match(If cmp (CmpL op1 op2));
15974   effect(USE labl);
15975 
15976   ins_cost(BRANCH_COST);
15977   format %{ "cb$cmp   $op1, $labl # long" %}
15978   ins_encode %{
15979     Label* L = $labl$$label;
15980     Assembler::Condition cond =
15981       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15982     __ tbr(cond, $op1$$Register, 63, *L);
15983   %}
15984   ins_pipe(pipe_cmp_branch);
15985   ins_short_branch(1);
15986 %}
15987 
15988 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15989   match(If cmp (CmpI op1 op2));
15990   effect(USE labl);
15991 
15992   ins_cost(BRANCH_COST);
15993   format %{ "cb$cmp   $op1, $labl # int" %}
15994   ins_encode %{
15995     Label* L = $labl$$label;
15996     Assembler::Condition cond =
15997       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15998     __ tbr(cond, $op1$$Register, 31, *L);
15999   %}
16000   ins_pipe(pipe_cmp_branch);
16001   ins_short_branch(1);
16002 %}
16003 
16004 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16005   match(If cmp (CmpL (AndL op1 op2) op3));
16006   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16007   effect(USE labl);
16008 
16009   ins_cost(BRANCH_COST);
16010   format %{ "tb$cmp   $op1, $op2, $labl" %}
16011   ins_encode %{
16012     Label* L = $labl$$label;
16013     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16014     int bit = exact_log2_long($op2$$constant);
16015     __ tbr(cond, $op1$$Register, bit, *L);
16016   %}
16017   ins_pipe(pipe_cmp_branch);
16018   ins_short_branch(1);
16019 %}
16020 
16021 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16022   match(If cmp (CmpI (AndI op1 op2) op3));
16023   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16024   effect(USE labl);
16025 
16026   ins_cost(BRANCH_COST);
16027   format %{ "tb$cmp   $op1, $op2, $labl" %}
16028   ins_encode %{
16029     Label* L = $labl$$label;
16030     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16031     int bit = exact_log2((juint)$op2$$constant);
16032     __ tbr(cond, $op1$$Register, bit, *L);
16033   %}
16034   ins_pipe(pipe_cmp_branch);
16035   ins_short_branch(1);
16036 %}
16037 
16038 // And far variants
16039 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16040   match(If cmp (CmpL op1 op2));
16041   effect(USE labl);
16042 
16043   ins_cost(BRANCH_COST);
16044   format %{ "cb$cmp   $op1, $labl # long" %}
16045   ins_encode %{
16046     Label* L = $labl$$label;
16047     Assembler::Condition cond =
16048       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16049     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
16050   %}
16051   ins_pipe(pipe_cmp_branch);
16052 %}
16053 
16054 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16055   match(If cmp (CmpI op1 op2));
16056   effect(USE labl);
16057 
16058   ins_cost(BRANCH_COST);
16059   format %{ "cb$cmp   $op1, $labl # int" %}
16060   ins_encode %{
16061     Label* L = $labl$$label;
16062     Assembler::Condition cond =
16063       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16064     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16065   %}
16066   ins_pipe(pipe_cmp_branch);
16067 %}
16068 
16069 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16070   match(If cmp (CmpL (AndL op1 op2) op3));
16071   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16072   effect(USE labl);
16073 
16074   ins_cost(BRANCH_COST);
16075   format %{ "tb$cmp   $op1, $op2, $labl" %}
16076   ins_encode %{
16077     Label* L = $labl$$label;
16078     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16079     int bit = exact_log2_long($op2$$constant);
16080     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16081   %}
16082   ins_pipe(pipe_cmp_branch);
16083 %}
16084 
16085 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16086   match(If cmp (CmpI (AndI op1 op2) op3));
16087   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16088   effect(USE labl);
16089 
16090   ins_cost(BRANCH_COST);
16091   format %{ "tb$cmp   $op1, $op2, $labl" %}
16092   ins_encode %{
16093     Label* L = $labl$$label;
16094     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16095     int bit = exact_log2((juint)$op2$$constant);
16096     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16097   %}
16098   ins_pipe(pipe_cmp_branch);
16099 %}
16100 
16101 // Test bits
16102 
16103 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16104   match(Set cr (CmpL (AndL op1 op2) op3));
16105   predicate(Assembler::operand_valid_for_logical_immediate
16106             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16107 
16108   ins_cost(INSN_COST);
16109   format %{ "tst $op1, $op2 # long" %}
16110   ins_encode %{
16111     __ tst($op1$$Register, $op2$$constant);
16112   %}
16113   ins_pipe(ialu_reg_reg);
16114 %}
16115 
16116 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16117   match(Set cr (CmpI (AndI op1 op2) op3));
16118   predicate(Assembler::operand_valid_for_logical_immediate
16119             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16120 
16121   ins_cost(INSN_COST);
16122   format %{ "tst $op1, $op2 # int" %}
16123   ins_encode %{
16124     __ tstw($op1$$Register, $op2$$constant);
16125   %}
16126   ins_pipe(ialu_reg_reg);
16127 %}
16128 
16129 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16130   match(Set cr (CmpL (AndL op1 op2) op3));
16131 
16132   ins_cost(INSN_COST);
16133   format %{ "tst $op1, $op2 # long" %}
16134   ins_encode %{
16135     __ tst($op1$$Register, $op2$$Register);
16136   %}
16137   ins_pipe(ialu_reg_reg);
16138 %}
16139 
16140 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16141   match(Set cr (CmpI (AndI op1 op2) op3));
16142 
16143   ins_cost(INSN_COST);
16144   format %{ "tstw $op1, $op2 # int" %}
16145   ins_encode %{
16146     __ tstw($op1$$Register, $op2$$Register);
16147   %}
16148   ins_pipe(ialu_reg_reg);
16149 %}
16150 
16151 
16152 // Conditional Far Branch
16153 // Conditional Far Branch Unsigned
16154 // TODO: fixme
16155 
16156 // counted loop end branch near
16157 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16158 %{
16159   match(CountedLoopEnd cmp cr);
16160 
16161   effect(USE lbl);
16162 
16163   ins_cost(BRANCH_COST);
16164   // short variant.
16165   // ins_short_branch(1);
16166   format %{ "b$cmp $lbl \t// counted loop end" %}
16167 
16168   ins_encode(aarch64_enc_br_con(cmp, lbl));
16169 
16170   ins_pipe(pipe_branch);
16171 %}
16172 
16173 // counted loop end branch far
16174 // TODO: fixme
16175 
16176 // ============================================================================
16177 // inlined locking and unlocking
16178 
16179 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16180 %{
16181   predicate(LockingMode != LM_LIGHTWEIGHT);
16182   match(Set cr (FastLock object box));
16183   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16184 
16185   ins_cost(5 * INSN_COST);
16186   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16187 
16188   ins_encode %{
16189     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16190   %}
16191 
16192   ins_pipe(pipe_serial);
16193 %}
16194 
16195 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16196 %{
16197   predicate(LockingMode != LM_LIGHTWEIGHT);
16198   match(Set cr (FastUnlock object box));
16199   effect(TEMP tmp, TEMP tmp2);
16200 
16201   ins_cost(5 * INSN_COST);
16202   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16203 
16204   ins_encode %{
16205     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16206   %}
16207 
16208   ins_pipe(pipe_serial);
16209 %}
16210 
16211 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16212 %{
16213   predicate(LockingMode == LM_LIGHTWEIGHT);
16214   match(Set cr (FastLock object box));
16215   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16216 
16217   ins_cost(5 * INSN_COST);
16218   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16219 
16220   ins_encode %{
16221     __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16222   %}
16223 
16224   ins_pipe(pipe_serial);
16225 %}
16226 
16227 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16228 %{
16229   predicate(LockingMode == LM_LIGHTWEIGHT);
16230   match(Set cr (FastUnlock object box));
16231   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16232 
16233   ins_cost(5 * INSN_COST);
16234   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %}
16235 
16236   ins_encode %{
16237     __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16238   %}
16239 
16240   ins_pipe(pipe_serial);
16241 %}
16242 
16243 // ============================================================================
16244 // Safepoint Instructions
16245 
16246 // TODO
16247 // provide a near and far version of this code
16248 
16249 instruct safePoint(rFlagsReg cr, iRegP poll)
16250 %{
16251   match(SafePoint poll);
16252   effect(KILL cr);
16253 
16254   format %{
16255     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16256   %}
16257   ins_encode %{
16258     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16259   %}
16260   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16261 %}
16262 
16263 
16264 // ============================================================================
16265 // Procedure Call/Return Instructions
16266 
16267 // Call Java Static Instruction
16268 
16269 instruct CallStaticJavaDirect(method meth)
16270 %{
16271   match(CallStaticJava);
16272 
16273   effect(USE meth);
16274 
16275   ins_cost(CALL_COST);
16276 
16277   format %{ "call,static $meth \t// ==> " %}
16278 
16279   ins_encode(aarch64_enc_java_static_call(meth),
16280              aarch64_enc_call_epilog);
16281 
16282   ins_pipe(pipe_class_call);
16283 %}
16284 
16285 // TO HERE
16286 
16287 // Call Java Dynamic Instruction
16288 instruct CallDynamicJavaDirect(method meth)
16289 %{
16290   match(CallDynamicJava);
16291 
16292   effect(USE meth);
16293 
16294   ins_cost(CALL_COST);
16295 
16296   format %{ "CALL,dynamic $meth \t// ==> " %}
16297 
16298   ins_encode(aarch64_enc_java_dynamic_call(meth),
16299              aarch64_enc_call_epilog);
16300 
16301   ins_pipe(pipe_class_call);
16302 %}
16303 
16304 // Call Runtime Instruction
16305 
16306 instruct CallRuntimeDirect(method meth)
16307 %{
16308   match(CallRuntime);
16309 
16310   effect(USE meth);
16311 
16312   ins_cost(CALL_COST);
16313 
16314   format %{ "CALL, runtime $meth" %}
16315 
16316   ins_encode( aarch64_enc_java_to_runtime(meth) );
16317 
16318   ins_pipe(pipe_class_call);
16319 %}
16320 
16321 // Call Runtime Instruction
16322 
16323 instruct CallLeafDirect(method meth)
16324 %{
16325   match(CallLeaf);
16326 
16327   effect(USE meth);
16328 
16329   ins_cost(CALL_COST);
16330 
16331   format %{ "CALL, runtime leaf $meth" %}
16332 
16333   ins_encode( aarch64_enc_java_to_runtime(meth) );
16334 
16335   ins_pipe(pipe_class_call);
16336 %}
16337 
16338 // Call Runtime Instruction without safepoint and with vector arguments
16339 instruct CallLeafDirectVector(method meth)
16340 %{
16341   match(CallLeafVector);
16342 
16343   effect(USE meth);
16344 
16345   ins_cost(CALL_COST);
16346 
16347   format %{ "CALL, runtime leaf vector $meth" %}
16348 
16349   ins_encode(aarch64_enc_java_to_runtime(meth));
16350 
16351   ins_pipe(pipe_class_call);
16352 %}
16353 
16354 // Call Runtime Instruction
16355 
16356 instruct CallLeafNoFPDirect(method meth)
16357 %{
16358   match(CallLeafNoFP);
16359 
16360   effect(USE meth);
16361 
16362   ins_cost(CALL_COST);
16363 
16364   format %{ "CALL, runtime leaf nofp $meth" %}
16365 
16366   ins_encode( aarch64_enc_java_to_runtime(meth) );
16367 
16368   ins_pipe(pipe_class_call);
16369 %}
16370 
16371 // Tail Call; Jump from runtime stub to Java code.
16372 // Also known as an 'interprocedural jump'.
16373 // Target of jump will eventually return to caller.
16374 // TailJump below removes the return address.
16375 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16376 // emitted just above the TailCall which has reset rfp to the caller state.
16377 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
16378 %{
16379   match(TailCall jump_target method_ptr);
16380 
16381   ins_cost(CALL_COST);
16382 
16383   format %{ "br $jump_target\t# $method_ptr holds method" %}
16384 
16385   ins_encode(aarch64_enc_tail_call(jump_target));
16386 
16387   ins_pipe(pipe_class_call);
16388 %}
16389 
16390 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
16391 %{
16392   match(TailJump jump_target ex_oop);
16393 
16394   ins_cost(CALL_COST);
16395 
16396   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16397 
16398   ins_encode(aarch64_enc_tail_jmp(jump_target));
16399 
16400   ins_pipe(pipe_class_call);
16401 %}
16402 
16403 // Forward exception.
16404 instruct ForwardExceptionjmp()
16405 %{
16406   match(ForwardException);
16407   ins_cost(CALL_COST);
16408 
16409   format %{ "b forward_exception_stub" %}
16410   ins_encode %{
16411     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
16412   %}
16413   ins_pipe(pipe_class_call);
16414 %}
16415 
16416 // Create exception oop: created by stack-crawling runtime code.
16417 // Created exception is now available to this handler, and is setup
16418 // just prior to jumping to this handler. No code emitted.
16419 // TODO check
16420 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16421 instruct CreateException(iRegP_R0 ex_oop)
16422 %{
16423   match(Set ex_oop (CreateEx));
16424 
16425   format %{ " -- \t// exception oop; no code emitted" %}
16426 
16427   size(0);
16428 
16429   ins_encode( /*empty*/ );
16430 
16431   ins_pipe(pipe_class_empty);
16432 %}
16433 
16434 // Rethrow exception: The exception oop will come in the first
16435 // argument position. Then JUMP (not call) to the rethrow stub code.
16436 instruct RethrowException() %{
16437   match(Rethrow);
16438   ins_cost(CALL_COST);
16439 
16440   format %{ "b rethrow_stub" %}
16441 
16442   ins_encode( aarch64_enc_rethrow() );
16443 
16444   ins_pipe(pipe_class_call);
16445 %}
16446 
16447 
16448 // Return Instruction
16449 // epilog node loads ret address into lr as part of frame pop
16450 instruct Ret()
16451 %{
16452   match(Return);
16453 
16454   format %{ "ret\t// return register" %}
16455 
16456   ins_encode( aarch64_enc_ret() );
16457 
16458   ins_pipe(pipe_branch);
16459 %}
16460 
16461 // Die now.
16462 instruct ShouldNotReachHere() %{
16463   match(Halt);
16464 
16465   ins_cost(CALL_COST);
16466   format %{ "ShouldNotReachHere" %}
16467 
16468   ins_encode %{
16469     if (is_reachable()) {
16470       const char* str = __ code_string(_halt_reason);
16471       __ stop(str);
16472     }
16473   %}
16474 
16475   ins_pipe(pipe_class_default);
16476 %}
16477 
16478 // ============================================================================
16479 // Partial Subtype Check
16480 //
16481 // superklass array for an instance of the superklass.  Set a hidden
16482 // internal cache on a hit (cache is checked with exposed code in
16483 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16484 // encoding ALSO sets flags.
16485 
16486 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16487 %{
16488   match(Set result (PartialSubtypeCheck sub super));
16489   predicate(!UseSecondarySupersTable);
16490   effect(KILL cr, KILL temp);
16491 
16492   ins_cost(20 * INSN_COST);  // slightly larger than the next version
16493   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16494 
16495   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16496 
16497   opcode(0x1); // Force zero of result reg on hit
16498 
16499   ins_pipe(pipe_class_memory);
16500 %}
16501 
16502 // Two versions of partialSubtypeCheck, both used when we need to
16503 // search for a super class in the secondary supers array. The first
16504 // is used when we don't know _a priori_ the class being searched
16505 // for. The second, far more common, is used when we do know: this is
16506 // used for instanceof, checkcast, and any case where C2 can determine
16507 // it by constant propagation.
16508 
16509 instruct partialSubtypeCheckVarSuper(iRegP_R4 sub, iRegP_R0 super, vRegD_V0 vtemp, iRegP_R5 result,
16510                                      iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16511                                      rFlagsReg cr)
16512 %{
16513   match(Set result (PartialSubtypeCheck sub super));
16514   predicate(UseSecondarySupersTable);
16515   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16516 
16517   ins_cost(10 * INSN_COST);  // slightly larger than the next version
16518   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16519 
16520   ins_encode %{
16521     __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
16522                                          $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16523                                          $vtemp$$FloatRegister,
16524                                          $result$$Register, /*L_success*/nullptr);
16525   %}
16526 
16527   ins_pipe(pipe_class_memory);
16528 %}
16529 
16530 instruct partialSubtypeCheckConstSuper(iRegP_R4 sub, iRegP_R0 super_reg, immP super_con, vRegD_V0 vtemp, iRegP_R5 result,
16531                                        iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16532                                        rFlagsReg cr)
16533 %{
16534   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
16535   predicate(UseSecondarySupersTable);
16536   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16537 
16538   ins_cost(5 * INSN_COST);  // smaller than the next version
16539   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
16540 
16541   ins_encode %{
16542     bool success = false;
16543     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
16544     if (InlineSecondarySupersTest) {
16545       success =
16546         __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
16547                                                $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16548                                                $vtemp$$FloatRegister,
16549                                                $result$$Register,
16550                                                super_klass_slot);
16551     } else {
16552       address call = __ trampoline_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
16553       success = (call != nullptr);
16554     }
16555     if (!success) {
16556       ciEnv::current()->record_failure("CodeCache is full");
16557       return;
16558     }
16559   %}
16560 
16561   ins_pipe(pipe_class_memory);
16562 %}
16563 
16564 // Intrisics for String.compareTo()
16565 
16566 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16567                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16568 %{
16569   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16570   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16571   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16572 
16573   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16574   ins_encode %{
16575     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16576     __ string_compare($str1$$Register, $str2$$Register,
16577                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16578                       $tmp1$$Register, $tmp2$$Register,
16579                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
16580   %}
16581   ins_pipe(pipe_class_memory);
16582 %}
16583 
16584 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16585                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16586 %{
16587   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16588   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16589   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16590 
16591   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16592   ins_encode %{
16593     __ string_compare($str1$$Register, $str2$$Register,
16594                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16595                       $tmp1$$Register, $tmp2$$Register,
16596                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
16597   %}
16598   ins_pipe(pipe_class_memory);
16599 %}
16600 
16601 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16602                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16603                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16604 %{
16605   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16606   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16607   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16608          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16609 
16610   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16611   ins_encode %{
16612     __ string_compare($str1$$Register, $str2$$Register,
16613                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16614                       $tmp1$$Register, $tmp2$$Register,
16615                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16616                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
16617   %}
16618   ins_pipe(pipe_class_memory);
16619 %}
16620 
16621 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16622                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16623                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16624 %{
16625   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16626   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16627   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16628          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16629 
16630   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16631   ins_encode %{
16632     __ string_compare($str1$$Register, $str2$$Register,
16633                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16634                       $tmp1$$Register, $tmp2$$Register,
16635                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16636                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
16637   %}
16638   ins_pipe(pipe_class_memory);
16639 %}
16640 
16641 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
16642 // these string_compare variants as NEON register type for convenience so that the prototype of
16643 // string_compare can be shared with all variants.
16644 
16645 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16646                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16647                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16648                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16649 %{
16650   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16651   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16652   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16653          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16654 
16655   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16656   ins_encode %{
16657     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16658     __ string_compare($str1$$Register, $str2$$Register,
16659                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16660                       $tmp1$$Register, $tmp2$$Register,
16661                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16662                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16663                       StrIntrinsicNode::LL);
16664   %}
16665   ins_pipe(pipe_class_memory);
16666 %}
16667 
16668 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16669                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16670                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16671                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16672 %{
16673   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16674   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16675   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16676          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16677 
16678   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16679   ins_encode %{
16680     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16681     __ string_compare($str1$$Register, $str2$$Register,
16682                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16683                       $tmp1$$Register, $tmp2$$Register,
16684                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16685                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16686                       StrIntrinsicNode::LU);
16687   %}
16688   ins_pipe(pipe_class_memory);
16689 %}
16690 
16691 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16692                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16693                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16694                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16695 %{
16696   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16697   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16698   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16699          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16700 
16701   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16702   ins_encode %{
16703     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16704     __ string_compare($str1$$Register, $str2$$Register,
16705                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16706                       $tmp1$$Register, $tmp2$$Register,
16707                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16708                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16709                       StrIntrinsicNode::UL);
16710   %}
16711   ins_pipe(pipe_class_memory);
16712 %}
16713 
16714 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16715                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16716                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16717                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16718 %{
16719   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16720   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16721   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16722          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16723 
16724   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16725   ins_encode %{
16726     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16727     __ string_compare($str1$$Register, $str2$$Register,
16728                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16729                       $tmp1$$Register, $tmp2$$Register,
16730                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16731                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16732                       StrIntrinsicNode::UU);
16733   %}
16734   ins_pipe(pipe_class_memory);
16735 %}
16736 
16737 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16738                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16739                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16740                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16741 %{
16742   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16743   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16744   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16745          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16746          TEMP vtmp0, TEMP vtmp1, KILL cr);
16747   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16748             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16749 
16750   ins_encode %{
16751     __ string_indexof($str1$$Register, $str2$$Register,
16752                       $cnt1$$Register, $cnt2$$Register,
16753                       $tmp1$$Register, $tmp2$$Register,
16754                       $tmp3$$Register, $tmp4$$Register,
16755                       $tmp5$$Register, $tmp6$$Register,
16756                       -1, $result$$Register, StrIntrinsicNode::UU);
16757   %}
16758   ins_pipe(pipe_class_memory);
16759 %}
16760 
16761 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16762                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16763                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16764                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16765 %{
16766   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16767   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16768   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16769          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16770          TEMP vtmp0, TEMP vtmp1, KILL cr);
16771   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16772             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16773 
16774   ins_encode %{
16775     __ string_indexof($str1$$Register, $str2$$Register,
16776                       $cnt1$$Register, $cnt2$$Register,
16777                       $tmp1$$Register, $tmp2$$Register,
16778                       $tmp3$$Register, $tmp4$$Register,
16779                       $tmp5$$Register, $tmp6$$Register,
16780                       -1, $result$$Register, StrIntrinsicNode::LL);
16781   %}
16782   ins_pipe(pipe_class_memory);
16783 %}
16784 
16785 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16786                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16787                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16788                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16789 %{
16790   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16791   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16792   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16793          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16794          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16795   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16796             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16797 
16798   ins_encode %{
16799     __ string_indexof($str1$$Register, $str2$$Register,
16800                       $cnt1$$Register, $cnt2$$Register,
16801                       $tmp1$$Register, $tmp2$$Register,
16802                       $tmp3$$Register, $tmp4$$Register,
16803                       $tmp5$$Register, $tmp6$$Register,
16804                       -1, $result$$Register, StrIntrinsicNode::UL);
16805   %}
16806   ins_pipe(pipe_class_memory);
16807 %}
16808 
16809 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16810                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16811                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16812 %{
16813   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16814   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16815   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16816          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16817   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
16818             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16819 
16820   ins_encode %{
16821     int icnt2 = (int)$int_cnt2$$constant;
16822     __ string_indexof($str1$$Register, $str2$$Register,
16823                       $cnt1$$Register, zr,
16824                       $tmp1$$Register, $tmp2$$Register,
16825                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16826                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16827   %}
16828   ins_pipe(pipe_class_memory);
16829 %}
16830 
16831 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16832                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16833                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16834 %{
16835   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16836   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16837   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16838          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16839   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
16840             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16841 
16842   ins_encode %{
16843     int icnt2 = (int)$int_cnt2$$constant;
16844     __ string_indexof($str1$$Register, $str2$$Register,
16845                       $cnt1$$Register, zr,
16846                       $tmp1$$Register, $tmp2$$Register,
16847                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16848                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16849   %}
16850   ins_pipe(pipe_class_memory);
16851 %}
16852 
16853 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16854                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16855                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16856 %{
16857   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16858   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16859   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16860          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16861   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
16862             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16863 
16864   ins_encode %{
16865     int icnt2 = (int)$int_cnt2$$constant;
16866     __ string_indexof($str1$$Register, $str2$$Register,
16867                       $cnt1$$Register, zr,
16868                       $tmp1$$Register, $tmp2$$Register,
16869                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16870                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16871   %}
16872   ins_pipe(pipe_class_memory);
16873 %}
16874 
16875 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16876                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16877                              iRegINoSp tmp3, rFlagsReg cr)
16878 %{
16879   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16880   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
16881   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16882          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16883 
16884   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16885 
16886   ins_encode %{
16887     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16888                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16889                            $tmp3$$Register);
16890   %}
16891   ins_pipe(pipe_class_memory);
16892 %}
16893 
16894 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16895                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16896                               iRegINoSp tmp3, rFlagsReg cr)
16897 %{
16898   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16899   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
16900   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16901          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16902 
16903   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16904 
16905   ins_encode %{
16906     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16907                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
16908                             $tmp3$$Register);
16909   %}
16910   ins_pipe(pipe_class_memory);
16911 %}
16912 
16913 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16914                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
16915                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
16916   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
16917   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16918   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
16919   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
16920   ins_encode %{
16921     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
16922                                $result$$Register, $ztmp1$$FloatRegister,
16923                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
16924                                $ptmp$$PRegister, true /* isL */);
16925   %}
16926   ins_pipe(pipe_class_memory);
16927 %}
16928 
16929 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16930                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
16931                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
16932   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
16933   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16934   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
16935   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
16936   ins_encode %{
16937     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
16938                                $result$$Register, $ztmp1$$FloatRegister,
16939                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
16940                                $ptmp$$PRegister, false /* isL */);
16941   %}
16942   ins_pipe(pipe_class_memory);
16943 %}
16944 
16945 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16946                         iRegI_R0 result, rFlagsReg cr)
16947 %{
16948   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16949   match(Set result (StrEquals (Binary str1 str2) cnt));
16950   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16951 
16952   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16953   ins_encode %{
16954     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16955     __ string_equals($str1$$Register, $str2$$Register,
16956                      $result$$Register, $cnt$$Register);
16957   %}
16958   ins_pipe(pipe_class_memory);
16959 %}
16960 
16961 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16962                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16963                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16964                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16965                        iRegP_R10 tmp, rFlagsReg cr)
16966 %{
16967   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16968   match(Set result (AryEq ary1 ary2));
16969   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
16970          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16971          TEMP vtmp6, TEMP vtmp7, KILL cr);
16972 
16973   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
16974   ins_encode %{
16975     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
16976                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16977                                    $result$$Register, $tmp$$Register, 1);
16978     if (tpc == nullptr) {
16979       ciEnv::current()->record_failure("CodeCache is full");
16980       return;
16981     }
16982   %}
16983   ins_pipe(pipe_class_memory);
16984 %}
16985 
16986 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16987                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16988                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16989                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16990                        iRegP_R10 tmp, rFlagsReg cr)
16991 %{
16992   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16993   match(Set result (AryEq ary1 ary2));
16994   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
16995          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16996          TEMP vtmp6, TEMP vtmp7, KILL cr);
16997 
16998   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
16999   ins_encode %{
17000     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17001                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17002                                    $result$$Register, $tmp$$Register, 2);
17003     if (tpc == nullptr) {
17004       ciEnv::current()->record_failure("CodeCache is full");
17005       return;
17006     }
17007   %}
17008   ins_pipe(pipe_class_memory);
17009 %}
17010 
17011 instruct arrays_hashcode(iRegP_R1 ary, iRegI_R2 cnt, iRegI_R0 result, immI basic_type,
17012                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17013                          vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17014                          vRegD_V12 vtmp8, vRegD_V13 vtmp9, rFlagsReg cr)
17015 %{
17016   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
17017   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, TEMP vtmp6,
17018          TEMP vtmp7, TEMP vtmp8, TEMP vtmp9, USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
17019 
17020   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
17021   ins_encode %{
17022     address tpc = __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
17023                                      $vtmp3$$FloatRegister, $vtmp2$$FloatRegister,
17024                                      $vtmp1$$FloatRegister, $vtmp0$$FloatRegister,
17025                                      $vtmp4$$FloatRegister, $vtmp5$$FloatRegister,
17026                                      $vtmp6$$FloatRegister, $vtmp7$$FloatRegister,
17027                                      $vtmp8$$FloatRegister, $vtmp9$$FloatRegister,
17028                                      (BasicType)$basic_type$$constant);
17029     if (tpc == nullptr) {
17030       ciEnv::current()->record_failure("CodeCache is full");
17031       return;
17032     }
17033   %}
17034   ins_pipe(pipe_class_memory);
17035 %}
17036 
17037 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
17038 %{
17039   match(Set result (CountPositives ary1 len));
17040   effect(USE_KILL ary1, USE_KILL len, KILL cr);
17041   format %{ "count positives byte[] $ary1,$len -> $result" %}
17042   ins_encode %{
17043     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
17044     if (tpc == nullptr) {
17045       ciEnv::current()->record_failure("CodeCache is full");
17046       return;
17047     }
17048   %}
17049   ins_pipe( pipe_slow );
17050 %}
17051 
17052 // fast char[] to byte[] compression
17053 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17054                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17055                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17056                          iRegI_R0 result, rFlagsReg cr)
17057 %{
17058   match(Set result (StrCompressedCopy src (Binary dst len)));
17059   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17060          USE_KILL src, USE_KILL dst, USE len, KILL cr);
17061 
17062   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17063   ins_encode %{
17064     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
17065                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17066                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17067                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17068   %}
17069   ins_pipe(pipe_slow);
17070 %}
17071 
17072 // fast byte[] to char[] inflation
17073 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
17074                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17075                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
17076 %{
17077   match(Set dummy (StrInflatedCopy src (Binary dst len)));
17078   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
17079          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
17080          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
17081 
17082   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
17083   ins_encode %{
17084     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
17085                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17086                                         $vtmp2$$FloatRegister, $tmp$$Register);
17087     if (tpc == nullptr) {
17088       ciEnv::current()->record_failure("CodeCache is full");
17089       return;
17090     }
17091   %}
17092   ins_pipe(pipe_class_memory);
17093 %}
17094 
17095 // encode char[] to byte[] in ISO_8859_1
17096 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17097                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17098                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17099                           iRegI_R0 result, rFlagsReg cr)
17100 %{
17101   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
17102   match(Set result (EncodeISOArray src (Binary dst len)));
17103   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17104          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17105 
17106   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17107   ins_encode %{
17108     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17109                         $result$$Register, false,
17110                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17111                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17112                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17113   %}
17114   ins_pipe(pipe_class_memory);
17115 %}
17116 
17117 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17118                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17119                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17120                             iRegI_R0 result, rFlagsReg cr)
17121 %{
17122   predicate(((EncodeISOArrayNode*)n)->is_ascii());
17123   match(Set result (EncodeISOArray src (Binary dst len)));
17124   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17125          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17126 
17127   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17128   ins_encode %{
17129     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17130                         $result$$Register, true,
17131                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17132                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17133                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17134   %}
17135   ins_pipe(pipe_class_memory);
17136 %}
17137 
17138 //----------------------------- CompressBits/ExpandBits ------------------------
17139 
17140 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17141                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17142   match(Set dst (CompressBits src mask));
17143   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17144   format %{ "mov    $tsrc, $src\n\t"
17145             "mov    $tmask, $mask\n\t"
17146             "bext   $tdst, $tsrc, $tmask\n\t"
17147             "mov    $dst, $tdst"
17148           %}
17149   ins_encode %{
17150     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17151     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17152     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17153     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17154   %}
17155   ins_pipe(pipe_slow);
17156 %}
17157 
17158 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17159                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17160   match(Set dst (CompressBits (LoadI mem) mask));
17161   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17162   format %{ "ldrs   $tsrc, $mem\n\t"
17163             "ldrs   $tmask, $mask\n\t"
17164             "bext   $tdst, $tsrc, $tmask\n\t"
17165             "mov    $dst, $tdst"
17166           %}
17167   ins_encode %{
17168     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17169               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17170     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17171     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17172     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17173   %}
17174   ins_pipe(pipe_slow);
17175 %}
17176 
17177 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17178                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
17179   match(Set dst (CompressBits src mask));
17180   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17181   format %{ "mov    $tsrc, $src\n\t"
17182             "mov    $tmask, $mask\n\t"
17183             "bext   $tdst, $tsrc, $tmask\n\t"
17184             "mov    $dst, $tdst"
17185           %}
17186   ins_encode %{
17187     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17188     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17189     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17190     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17191   %}
17192   ins_pipe(pipe_slow);
17193 %}
17194 
17195 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
17196                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17197   match(Set dst (CompressBits (LoadL mem) mask));
17198   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17199   format %{ "ldrd   $tsrc, $mem\n\t"
17200             "ldrd   $tmask, $mask\n\t"
17201             "bext   $tdst, $tsrc, $tmask\n\t"
17202             "mov    $dst, $tdst"
17203           %}
17204   ins_encode %{
17205     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17206               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17207     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17208     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17209     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17210   %}
17211   ins_pipe(pipe_slow);
17212 %}
17213 
17214 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17215                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17216   match(Set dst (ExpandBits src mask));
17217   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17218   format %{ "mov    $tsrc, $src\n\t"
17219             "mov    $tmask, $mask\n\t"
17220             "bdep   $tdst, $tsrc, $tmask\n\t"
17221             "mov    $dst, $tdst"
17222           %}
17223   ins_encode %{
17224     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17225     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17226     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17227     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17228   %}
17229   ins_pipe(pipe_slow);
17230 %}
17231 
17232 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17233                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17234   match(Set dst (ExpandBits (LoadI mem) mask));
17235   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17236   format %{ "ldrs   $tsrc, $mem\n\t"
17237             "ldrs   $tmask, $mask\n\t"
17238             "bdep   $tdst, $tsrc, $tmask\n\t"
17239             "mov    $dst, $tdst"
17240           %}
17241   ins_encode %{
17242     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17243               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17244     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17245     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17246     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17247   %}
17248   ins_pipe(pipe_slow);
17249 %}
17250 
17251 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17252                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
17253   match(Set dst (ExpandBits src mask));
17254   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17255   format %{ "mov    $tsrc, $src\n\t"
17256             "mov    $tmask, $mask\n\t"
17257             "bdep   $tdst, $tsrc, $tmask\n\t"
17258             "mov    $dst, $tdst"
17259           %}
17260   ins_encode %{
17261     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17262     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17263     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17264     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17265   %}
17266   ins_pipe(pipe_slow);
17267 %}
17268 
17269 
17270 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
17271                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17272   match(Set dst (ExpandBits (LoadL mem) mask));
17273   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17274   format %{ "ldrd   $tsrc, $mem\n\t"
17275             "ldrd   $tmask, $mask\n\t"
17276             "bdep   $tdst, $tsrc, $tmask\n\t"
17277             "mov    $dst, $tdst"
17278           %}
17279   ins_encode %{
17280     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17281               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17282     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17283     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17284     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17285   %}
17286   ins_pipe(pipe_slow);
17287 %}
17288 
17289 //----------------------------- Reinterpret ----------------------------------
17290 // Reinterpret a half-precision float value in a floating point register to a general purpose register
17291 instruct reinterpretHF2S(iRegINoSp dst, vRegF src) %{
17292   match(Set dst (ReinterpretHF2S src));
17293   format %{ "reinterpretHF2S $dst, $src" %}
17294   ins_encode %{
17295     __ smov($dst$$Register, $src$$FloatRegister, __ H, 0);
17296   %}
17297   ins_pipe(pipe_slow);
17298 %}
17299 
17300 // Reinterpret a half-precision float value in a general purpose register to a floating point register
17301 instruct reinterpretS2HF(vRegF dst, iRegINoSp src) %{
17302   match(Set dst (ReinterpretS2HF src));
17303   format %{ "reinterpretS2HF $dst, $src" %}
17304   ins_encode %{
17305     __ mov($dst$$FloatRegister, __ H, 0, $src$$Register);
17306   %}
17307   ins_pipe(pipe_slow);
17308 %}
17309 
17310 // Without this optimization, ReinterpretS2HF (ConvF2HF src) would result in the following
17311 // instructions (the first two are for ConvF2HF and the last instruction is for ReinterpretS2HF) -
17312 // fcvt $tmp1_fpr, $src_fpr    // Convert float to half-precision float
17313 // mov  $tmp2_gpr, $tmp1_fpr   // Move half-precision float in FPR to a GPR
17314 // mov  $dst_fpr,  $tmp2_gpr   // Move the result from a GPR to an FPR
17315 // The move from FPR to GPR in ConvF2HF and the move from GPR to FPR in ReinterpretS2HF
17316 // can be omitted in this pattern, resulting in -
17317 // fcvt $dst, $src  // Convert float to half-precision float
17318 instruct convF2HFAndS2HF(vRegF dst, vRegF src)
17319 %{
17320   match(Set dst (ReinterpretS2HF (ConvF2HF src)));
17321   format %{ "convF2HFAndS2HF $dst, $src" %}
17322   ins_encode %{
17323     __ fcvtsh($dst$$FloatRegister, $src$$FloatRegister);
17324   %}
17325   ins_pipe(pipe_slow);
17326 %}
17327 
17328 // Without this optimization, ConvHF2F (ReinterpretHF2S src) would result in the following
17329 // instructions (the first one is for ReinterpretHF2S and the last two are for ConvHF2F) -
17330 // mov  $tmp1_gpr, $src_fpr  // Move the half-precision float from an FPR to a GPR
17331 // mov  $tmp2_fpr, $tmp1_gpr // Move the same value from GPR to an FPR
17332 // fcvt $dst_fpr,  $tmp2_fpr // Convert the half-precision float to 32-bit float
17333 // The move from FPR to GPR in ReinterpretHF2S and the move from GPR to FPR in ConvHF2F
17334 // can be omitted as the input (src) is already in an FPR required for the fcvths instruction
17335 // resulting in -
17336 // fcvt $dst, $src  // Convert half-precision float to a 32-bit float
17337 instruct convHF2SAndHF2F(vRegF dst, vRegF src)
17338 %{
17339   match(Set dst (ConvHF2F (ReinterpretHF2S src)));
17340   format %{ "convHF2SAndHF2F $dst, $src" %}
17341   ins_encode %{
17342     __ fcvths($dst$$FloatRegister, $src$$FloatRegister);
17343   %}
17344   ins_pipe(pipe_slow);
17345 %}
17346 
17347 // ============================================================================
17348 // This name is KNOWN by the ADLC and cannot be changed.
17349 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17350 // for this guy.
17351 instruct tlsLoadP(thread_RegP dst)
17352 %{
17353   match(Set dst (ThreadLocal));
17354 
17355   ins_cost(0);
17356 
17357   format %{ " -- \t// $dst=Thread::current(), empty" %}
17358 
17359   size(0);
17360 
17361   ins_encode( /*empty*/ );
17362 
17363   ins_pipe(pipe_class_empty);
17364 %}
17365 
17366 //----------PEEPHOLE RULES-----------------------------------------------------
17367 // These must follow all instruction definitions as they use the names
17368 // defined in the instructions definitions.
17369 //
17370 // peepmatch ( root_instr_name [preceding_instruction]* );
17371 //
17372 // peepconstraint %{
17373 // (instruction_number.operand_name relational_op instruction_number.operand_name
17374 //  [, ...] );
17375 // // instruction numbers are zero-based using left to right order in peepmatch
17376 //
17377 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17378 // // provide an instruction_number.operand_name for each operand that appears
17379 // // in the replacement instruction's match rule
17380 //
17381 // ---------VM FLAGS---------------------------------------------------------
17382 //
17383 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17384 //
17385 // Each peephole rule is given an identifying number starting with zero and
17386 // increasing by one in the order seen by the parser.  An individual peephole
17387 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17388 // on the command-line.
17389 //
17390 // ---------CURRENT LIMITATIONS----------------------------------------------
17391 //
17392 // Only match adjacent instructions in same basic block
17393 // Only equality constraints
17394 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17395 // Only one replacement instruction
17396 //
17397 // ---------EXAMPLE----------------------------------------------------------
17398 //
17399 // // pertinent parts of existing instructions in architecture description
17400 // instruct movI(iRegINoSp dst, iRegI src)
17401 // %{
17402 //   match(Set dst (CopyI src));
17403 // %}
17404 //
17405 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17406 // %{
17407 //   match(Set dst (AddI dst src));
17408 //   effect(KILL cr);
17409 // %}
17410 //
17411 // // Change (inc mov) to lea
17412 // peephole %{
17413 //   // increment preceded by register-register move
17414 //   peepmatch ( incI_iReg movI );
17415 //   // require that the destination register of the increment
17416 //   // match the destination register of the move
17417 //   peepconstraint ( 0.dst == 1.dst );
17418 //   // construct a replacement instruction that sets
17419 //   // the destination to ( move's source register + one )
17420 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17421 // %}
17422 //
17423 
17424 // Implementation no longer uses movX instructions since
17425 // machine-independent system no longer uses CopyX nodes.
17426 //
17427 // peephole
17428 // %{
17429 //   peepmatch (incI_iReg movI);
17430 //   peepconstraint (0.dst == 1.dst);
17431 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17432 // %}
17433 
17434 // peephole
17435 // %{
17436 //   peepmatch (decI_iReg movI);
17437 //   peepconstraint (0.dst == 1.dst);
17438 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17439 // %}
17440 
17441 // peephole
17442 // %{
17443 //   peepmatch (addI_iReg_imm movI);
17444 //   peepconstraint (0.dst == 1.dst);
17445 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17446 // %}
17447 
17448 // peephole
17449 // %{
17450 //   peepmatch (incL_iReg movL);
17451 //   peepconstraint (0.dst == 1.dst);
17452 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17453 // %}
17454 
17455 // peephole
17456 // %{
17457 //   peepmatch (decL_iReg movL);
17458 //   peepconstraint (0.dst == 1.dst);
17459 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17460 // %}
17461 
17462 // peephole
17463 // %{
17464 //   peepmatch (addL_iReg_imm movL);
17465 //   peepconstraint (0.dst == 1.dst);
17466 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17467 // %}
17468 
17469 // peephole
17470 // %{
17471 //   peepmatch (addP_iReg_imm movP);
17472 //   peepconstraint (0.dst == 1.dst);
17473 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17474 // %}
17475 
17476 // // Change load of spilled value to only a spill
17477 // instruct storeI(memory mem, iRegI src)
17478 // %{
17479 //   match(Set mem (StoreI mem src));
17480 // %}
17481 //
17482 // instruct loadI(iRegINoSp dst, memory mem)
17483 // %{
17484 //   match(Set dst (LoadI mem));
17485 // %}
17486 //
17487 
17488 //----------SMARTSPILL RULES---------------------------------------------------
17489 // These must follow all instruction definitions as they use the names
17490 // defined in the instructions definitions.
17491 
17492 // Local Variables:
17493 // mode: c++
17494 // End: