1 //
    2 // Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // architecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182 
  183   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  184   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  185   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  186   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  187 
  188   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  189   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  190   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  191   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  192 
  193   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  194   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  195   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  196   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  197 
  198   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  199   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  200   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  201   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  202 
  203   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  204   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  205   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  206   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  207 
  208   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  209   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  210   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  211   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  212 
  213   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  214   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  215   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  216   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  217 
  218   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  219   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  220   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  221   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  222 
  223   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  224   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  225   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  226   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  227 
  228   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  229   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  230   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  231   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  232 
  233   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  234   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  235   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  236   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  237 
  238   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  239   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  240   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  241   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  242 
  243   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  244   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  245   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  246   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  247 
  248   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  249   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  250   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  251   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  252 
  253   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  254   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  255   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  256   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  257 
  258   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  259   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  260   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  261   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  262 
  263   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  264   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  265   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  266   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  267 
  268   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  269   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  270   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  271   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  272 
  273   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  274   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  275   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  276   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  277 
  278   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  279   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  280   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  281   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  282 
  283   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  284   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  285   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  286   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  287 
  288   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  289   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  290   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  291   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  292 
  293   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  294   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  295   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  296   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  297 
  298   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  299   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  300   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  301   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  302 
  303   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  304   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  305   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  306   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  307 
  308   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  309   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  310   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  311   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  312 
  313   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  314   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  315   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  316   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  317 
  318   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  319   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  320   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  321   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  322 
  323   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  324   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  325   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  326   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  327 
  328   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  329   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  330   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  331   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  332 
  333   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  334   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  335   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  336   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  337 
  338 // ----------------------------
  339 // SVE Predicate Registers
  340 // ----------------------------
  341   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  342   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  343   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  344   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  345   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  346   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  347   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  348   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  349   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  350   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  351   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  352   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  353   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  354   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  355   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  356   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  357 
  358 // ----------------------------
  359 // Special Registers
  360 // ----------------------------
  361 
  362 // the AArch64 CSPR status flag register is not directly accessible as
  363 // instruction operand. the FPSR status flag register is a system
  364 // register which can be written/read using MSR/MRS but again does not
  365 // appear as an operand (a code identifying the FSPR occurs as an
  366 // immediate value in the instruction).
  367 
  368 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  369 
  370 // Specify priority of register selection within phases of register
  371 // allocation.  Highest priority is first.  A useful heuristic is to
  372 // give registers a low priority when they are required by machine
  373 // instructions, like EAX and EDX on I486, and choose no-save registers
  374 // before save-on-call, & save-on-call before save-on-entry.  Registers
  375 // which participate in fixed calling sequences should come last.
  376 // Registers which are used as pairs must fall on an even boundary.
  377 
  378 alloc_class chunk0(
  379     // volatiles
  380     R10, R10_H,
  381     R11, R11_H,
  382     R12, R12_H,
  383     R13, R13_H,
  384     R14, R14_H,
  385     R15, R15_H,
  386     R16, R16_H,
  387     R17, R17_H,
  388     R18, R18_H,
  389 
  390     // arg registers
  391     R0, R0_H,
  392     R1, R1_H,
  393     R2, R2_H,
  394     R3, R3_H,
  395     R4, R4_H,
  396     R5, R5_H,
  397     R6, R6_H,
  398     R7, R7_H,
  399 
  400     // non-volatiles
  401     R19, R19_H,
  402     R20, R20_H,
  403     R21, R21_H,
  404     R22, R22_H,
  405     R23, R23_H,
  406     R24, R24_H,
  407     R25, R25_H,
  408     R26, R26_H,
  409 
  410     // non-allocatable registers
  411 
  412     R27, R27_H, // heapbase
  413     R28, R28_H, // thread
  414     R29, R29_H, // fp
  415     R30, R30_H, // lr
  416     R31, R31_H, // sp
  417     R8, R8_H,   // rscratch1
  418     R9, R9_H,   // rscratch2
  419 );
  420 
  421 alloc_class chunk1(
  422 
  423     // no save
  424     V16, V16_H, V16_J, V16_K,
  425     V17, V17_H, V17_J, V17_K,
  426     V18, V18_H, V18_J, V18_K,
  427     V19, V19_H, V19_J, V19_K,
  428     V20, V20_H, V20_J, V20_K,
  429     V21, V21_H, V21_J, V21_K,
  430     V22, V22_H, V22_J, V22_K,
  431     V23, V23_H, V23_J, V23_K,
  432     V24, V24_H, V24_J, V24_K,
  433     V25, V25_H, V25_J, V25_K,
  434     V26, V26_H, V26_J, V26_K,
  435     V27, V27_H, V27_J, V27_K,
  436     V28, V28_H, V28_J, V28_K,
  437     V29, V29_H, V29_J, V29_K,
  438     V30, V30_H, V30_J, V30_K,
  439     V31, V31_H, V31_J, V31_K,
  440 
  441     // arg registers
  442     V0, V0_H, V0_J, V0_K,
  443     V1, V1_H, V1_J, V1_K,
  444     V2, V2_H, V2_J, V2_K,
  445     V3, V3_H, V3_J, V3_K,
  446     V4, V4_H, V4_J, V4_K,
  447     V5, V5_H, V5_J, V5_K,
  448     V6, V6_H, V6_J, V6_K,
  449     V7, V7_H, V7_J, V7_K,
  450 
  451     // non-volatiles
  452     V8, V8_H, V8_J, V8_K,
  453     V9, V9_H, V9_J, V9_K,
  454     V10, V10_H, V10_J, V10_K,
  455     V11, V11_H, V11_J, V11_K,
  456     V12, V12_H, V12_J, V12_K,
  457     V13, V13_H, V13_J, V13_K,
  458     V14, V14_H, V14_J, V14_K,
  459     V15, V15_H, V15_J, V15_K,
  460 );
  461 
  462 alloc_class chunk2 (
  463     // Governing predicates for load/store and arithmetic
  464     P0,
  465     P1,
  466     P2,
  467     P3,
  468     P4,
  469     P5,
  470     P6,
  471 
  472     // Extra predicates
  473     P8,
  474     P9,
  475     P10,
  476     P11,
  477     P12,
  478     P13,
  479     P14,
  480     P15,
  481 
  482     // Preserved for all-true predicate
  483     P7,
  484 );
  485 
  486 alloc_class chunk3(RFLAGS);
  487 
  488 //----------Architecture Description Register Classes--------------------------
  489 // Several register classes are automatically defined based upon information in
  490 // this architecture description.
  491 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  492 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  493 //
  494 
  495 // Class for all 32 bit general purpose registers
  496 reg_class all_reg32(
  497     R0,
  498     R1,
  499     R2,
  500     R3,
  501     R4,
  502     R5,
  503     R6,
  504     R7,
  505     R10,
  506     R11,
  507     R12,
  508     R13,
  509     R14,
  510     R15,
  511     R16,
  512     R17,
  513     R18,
  514     R19,
  515     R20,
  516     R21,
  517     R22,
  518     R23,
  519     R24,
  520     R25,
  521     R26,
  522     R27,
  523     R28,
  524     R29,
  525     R30,
  526     R31
  527 );
  528 
  529 
  530 // Class for all 32 bit integer registers (excluding SP which
  531 // will never be used as an integer register)
  532 reg_class any_reg32 %{
  533   return _ANY_REG32_mask;
  534 %}
  535 
  536 // Singleton class for R0 int register
  537 reg_class int_r0_reg(R0);
  538 
  539 // Singleton class for R2 int register
  540 reg_class int_r2_reg(R2);
  541 
  542 // Singleton class for R3 int register
  543 reg_class int_r3_reg(R3);
  544 
  545 // Singleton class for R4 int register
  546 reg_class int_r4_reg(R4);
  547 
  548 // Singleton class for R31 int register
  549 reg_class int_r31_reg(R31);
  550 
  551 // Class for all 64 bit general purpose registers
  552 reg_class all_reg(
  553     R0, R0_H,
  554     R1, R1_H,
  555     R2, R2_H,
  556     R3, R3_H,
  557     R4, R4_H,
  558     R5, R5_H,
  559     R6, R6_H,
  560     R7, R7_H,
  561     R10, R10_H,
  562     R11, R11_H,
  563     R12, R12_H,
  564     R13, R13_H,
  565     R14, R14_H,
  566     R15, R15_H,
  567     R16, R16_H,
  568     R17, R17_H,
  569     R18, R18_H,
  570     R19, R19_H,
  571     R20, R20_H,
  572     R21, R21_H,
  573     R22, R22_H,
  574     R23, R23_H,
  575     R24, R24_H,
  576     R25, R25_H,
  577     R26, R26_H,
  578     R27, R27_H,
  579     R28, R28_H,
  580     R29, R29_H,
  581     R30, R30_H,
  582     R31, R31_H
  583 );
  584 
  585 // Class for all long integer registers (including SP)
  586 reg_class any_reg %{
  587   return _ANY_REG_mask;
  588 %}
  589 
  590 // Class for non-allocatable 32 bit registers
  591 reg_class non_allocatable_reg32(
  592 #ifdef R18_RESERVED
  593     // See comment in register_aarch64.hpp
  594     R18,                        // tls on Windows
  595 #endif
  596     R28,                        // thread
  597     R30,                        // lr
  598     R31                         // sp
  599 );
  600 
  601 // Class for non-allocatable 64 bit registers
  602 reg_class non_allocatable_reg(
  603 #ifdef R18_RESERVED
  604     // See comment in register_aarch64.hpp
  605     R18, R18_H,                 // tls on Windows, platform register on macOS
  606 #endif
  607     R28, R28_H,                 // thread
  608     R30, R30_H,                 // lr
  609     R31, R31_H                  // sp
  610 );
  611 
  612 // Class for all non-special integer registers
  613 reg_class no_special_reg32 %{
  614   return _NO_SPECIAL_REG32_mask;
  615 %}
  616 
  617 // Class for all non-special long integer registers
  618 reg_class no_special_reg %{
  619   return _NO_SPECIAL_REG_mask;
  620 %}
  621 
  622 // Class for 64 bit register r0
  623 reg_class r0_reg(
  624     R0, R0_H
  625 );
  626 
  627 // Class for 64 bit register r1
  628 reg_class r1_reg(
  629     R1, R1_H
  630 );
  631 
  632 // Class for 64 bit register r2
  633 reg_class r2_reg(
  634     R2, R2_H
  635 );
  636 
  637 // Class for 64 bit register r3
  638 reg_class r3_reg(
  639     R3, R3_H
  640 );
  641 
  642 // Class for 64 bit register r4
  643 reg_class r4_reg(
  644     R4, R4_H
  645 );
  646 
  647 // Class for 64 bit register r5
  648 reg_class r5_reg(
  649     R5, R5_H
  650 );
  651 
  652 // Class for 64 bit register r10
  653 reg_class r10_reg(
  654     R10, R10_H
  655 );
  656 
  657 // Class for 64 bit register r11
  658 reg_class r11_reg(
  659     R11, R11_H
  660 );
  661 
  662 // Class for method register
  663 reg_class method_reg(
  664     R12, R12_H
  665 );
  666 
  667 // Class for thread register
  668 reg_class thread_reg(
  669     R28, R28_H
  670 );
  671 
  672 // Class for frame pointer register
  673 reg_class fp_reg(
  674     R29, R29_H
  675 );
  676 
  677 // Class for link register
  678 reg_class lr_reg(
  679     R30, R30_H
  680 );
  681 
  682 // Class for long sp register
  683 reg_class sp_reg(
  684   R31, R31_H
  685 );
  686 
  687 // Class for all pointer registers
  688 reg_class ptr_reg %{
  689   return _PTR_REG_mask;
  690 %}
  691 
  692 // Class for all non_special pointer registers
  693 reg_class no_special_ptr_reg %{
  694   return _NO_SPECIAL_PTR_REG_mask;
  695 %}
  696 
  697 // Class for all non_special pointer registers (excluding rfp)
  698 reg_class no_special_no_rfp_ptr_reg %{
  699   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  700 %}
  701 
  702 // Class for all float registers
  703 reg_class float_reg(
  704     V0,
  705     V1,
  706     V2,
  707     V3,
  708     V4,
  709     V5,
  710     V6,
  711     V7,
  712     V8,
  713     V9,
  714     V10,
  715     V11,
  716     V12,
  717     V13,
  718     V14,
  719     V15,
  720     V16,
  721     V17,
  722     V18,
  723     V19,
  724     V20,
  725     V21,
  726     V22,
  727     V23,
  728     V24,
  729     V25,
  730     V26,
  731     V27,
  732     V28,
  733     V29,
  734     V30,
  735     V31
  736 );
  737 
  738 // Double precision float registers have virtual `high halves' that
  739 // are needed by the allocator.
  740 // Class for all double registers
  741 reg_class double_reg(
  742     V0, V0_H,
  743     V1, V1_H,
  744     V2, V2_H,
  745     V3, V3_H,
  746     V4, V4_H,
  747     V5, V5_H,
  748     V6, V6_H,
  749     V7, V7_H,
  750     V8, V8_H,
  751     V9, V9_H,
  752     V10, V10_H,
  753     V11, V11_H,
  754     V12, V12_H,
  755     V13, V13_H,
  756     V14, V14_H,
  757     V15, V15_H,
  758     V16, V16_H,
  759     V17, V17_H,
  760     V18, V18_H,
  761     V19, V19_H,
  762     V20, V20_H,
  763     V21, V21_H,
  764     V22, V22_H,
  765     V23, V23_H,
  766     V24, V24_H,
  767     V25, V25_H,
  768     V26, V26_H,
  769     V27, V27_H,
  770     V28, V28_H,
  771     V29, V29_H,
  772     V30, V30_H,
  773     V31, V31_H
  774 );
  775 
  776 // Class for all SVE vector registers.
  777 reg_class vectora_reg (
  778     V0, V0_H, V0_J, V0_K,
  779     V1, V1_H, V1_J, V1_K,
  780     V2, V2_H, V2_J, V2_K,
  781     V3, V3_H, V3_J, V3_K,
  782     V4, V4_H, V4_J, V4_K,
  783     V5, V5_H, V5_J, V5_K,
  784     V6, V6_H, V6_J, V6_K,
  785     V7, V7_H, V7_J, V7_K,
  786     V8, V8_H, V8_J, V8_K,
  787     V9, V9_H, V9_J, V9_K,
  788     V10, V10_H, V10_J, V10_K,
  789     V11, V11_H, V11_J, V11_K,
  790     V12, V12_H, V12_J, V12_K,
  791     V13, V13_H, V13_J, V13_K,
  792     V14, V14_H, V14_J, V14_K,
  793     V15, V15_H, V15_J, V15_K,
  794     V16, V16_H, V16_J, V16_K,
  795     V17, V17_H, V17_J, V17_K,
  796     V18, V18_H, V18_J, V18_K,
  797     V19, V19_H, V19_J, V19_K,
  798     V20, V20_H, V20_J, V20_K,
  799     V21, V21_H, V21_J, V21_K,
  800     V22, V22_H, V22_J, V22_K,
  801     V23, V23_H, V23_J, V23_K,
  802     V24, V24_H, V24_J, V24_K,
  803     V25, V25_H, V25_J, V25_K,
  804     V26, V26_H, V26_J, V26_K,
  805     V27, V27_H, V27_J, V27_K,
  806     V28, V28_H, V28_J, V28_K,
  807     V29, V29_H, V29_J, V29_K,
  808     V30, V30_H, V30_J, V30_K,
  809     V31, V31_H, V31_J, V31_K,
  810 );
  811 
  812 // Class for all 64bit vector registers
  813 reg_class vectord_reg(
  814     V0, V0_H,
  815     V1, V1_H,
  816     V2, V2_H,
  817     V3, V3_H,
  818     V4, V4_H,
  819     V5, V5_H,
  820     V6, V6_H,
  821     V7, V7_H,
  822     V8, V8_H,
  823     V9, V9_H,
  824     V10, V10_H,
  825     V11, V11_H,
  826     V12, V12_H,
  827     V13, V13_H,
  828     V14, V14_H,
  829     V15, V15_H,
  830     V16, V16_H,
  831     V17, V17_H,
  832     V18, V18_H,
  833     V19, V19_H,
  834     V20, V20_H,
  835     V21, V21_H,
  836     V22, V22_H,
  837     V23, V23_H,
  838     V24, V24_H,
  839     V25, V25_H,
  840     V26, V26_H,
  841     V27, V27_H,
  842     V28, V28_H,
  843     V29, V29_H,
  844     V30, V30_H,
  845     V31, V31_H
  846 );
  847 
  848 // Class for all 128bit vector registers
  849 reg_class vectorx_reg(
  850     V0, V0_H, V0_J, V0_K,
  851     V1, V1_H, V1_J, V1_K,
  852     V2, V2_H, V2_J, V2_K,
  853     V3, V3_H, V3_J, V3_K,
  854     V4, V4_H, V4_J, V4_K,
  855     V5, V5_H, V5_J, V5_K,
  856     V6, V6_H, V6_J, V6_K,
  857     V7, V7_H, V7_J, V7_K,
  858     V8, V8_H, V8_J, V8_K,
  859     V9, V9_H, V9_J, V9_K,
  860     V10, V10_H, V10_J, V10_K,
  861     V11, V11_H, V11_J, V11_K,
  862     V12, V12_H, V12_J, V12_K,
  863     V13, V13_H, V13_J, V13_K,
  864     V14, V14_H, V14_J, V14_K,
  865     V15, V15_H, V15_J, V15_K,
  866     V16, V16_H, V16_J, V16_K,
  867     V17, V17_H, V17_J, V17_K,
  868     V18, V18_H, V18_J, V18_K,
  869     V19, V19_H, V19_J, V19_K,
  870     V20, V20_H, V20_J, V20_K,
  871     V21, V21_H, V21_J, V21_K,
  872     V22, V22_H, V22_J, V22_K,
  873     V23, V23_H, V23_J, V23_K,
  874     V24, V24_H, V24_J, V24_K,
  875     V25, V25_H, V25_J, V25_K,
  876     V26, V26_H, V26_J, V26_K,
  877     V27, V27_H, V27_J, V27_K,
  878     V28, V28_H, V28_J, V28_K,
  879     V29, V29_H, V29_J, V29_K,
  880     V30, V30_H, V30_J, V30_K,
  881     V31, V31_H, V31_J, V31_K
  882 );
  883 
  884 // Class for 128 bit register v0
  885 reg_class v0_reg(
  886     V0, V0_H
  887 );
  888 
  889 // Class for 128 bit register v1
  890 reg_class v1_reg(
  891     V1, V1_H
  892 );
  893 
  894 // Class for 128 bit register v2
  895 reg_class v2_reg(
  896     V2, V2_H
  897 );
  898 
  899 // Class for 128 bit register v3
  900 reg_class v3_reg(
  901     V3, V3_H
  902 );
  903 
  904 // Class for 128 bit register v4
  905 reg_class v4_reg(
  906     V4, V4_H
  907 );
  908 
  909 // Class for 128 bit register v5
  910 reg_class v5_reg(
  911     V5, V5_H
  912 );
  913 
  914 // Class for 128 bit register v6
  915 reg_class v6_reg(
  916     V6, V6_H
  917 );
  918 
  919 // Class for 128 bit register v7
  920 reg_class v7_reg(
  921     V7, V7_H
  922 );
  923 
  924 // Class for 128 bit register v8
  925 reg_class v8_reg(
  926     V8, V8_H
  927 );
  928 
  929 // Class for 128 bit register v9
  930 reg_class v9_reg(
  931     V9, V9_H
  932 );
  933 
  934 // Class for 128 bit register v10
  935 reg_class v10_reg(
  936     V10, V10_H
  937 );
  938 
  939 // Class for 128 bit register v11
  940 reg_class v11_reg(
  941     V11, V11_H
  942 );
  943 
  944 // Class for 128 bit register v12
  945 reg_class v12_reg(
  946     V12, V12_H
  947 );
  948 
  949 // Class for 128 bit register v13
  950 reg_class v13_reg(
  951     V13, V13_H
  952 );
  953 
  954 // Class for 128 bit register v14
  955 reg_class v14_reg(
  956     V14, V14_H
  957 );
  958 
  959 // Class for 128 bit register v15
  960 reg_class v15_reg(
  961     V15, V15_H
  962 );
  963 
  964 // Class for 128 bit register v16
  965 reg_class v16_reg(
  966     V16, V16_H
  967 );
  968 
  969 // Class for 128 bit register v17
  970 reg_class v17_reg(
  971     V17, V17_H
  972 );
  973 
  974 // Class for 128 bit register v18
  975 reg_class v18_reg(
  976     V18, V18_H
  977 );
  978 
  979 // Class for 128 bit register v19
  980 reg_class v19_reg(
  981     V19, V19_H
  982 );
  983 
  984 // Class for 128 bit register v20
  985 reg_class v20_reg(
  986     V20, V20_H
  987 );
  988 
  989 // Class for 128 bit register v21
  990 reg_class v21_reg(
  991     V21, V21_H
  992 );
  993 
  994 // Class for 128 bit register v22
  995 reg_class v22_reg(
  996     V22, V22_H
  997 );
  998 
  999 // Class for 128 bit register v23
 1000 reg_class v23_reg(
 1001     V23, V23_H
 1002 );
 1003 
 1004 // Class for 128 bit register v24
 1005 reg_class v24_reg(
 1006     V24, V24_H
 1007 );
 1008 
 1009 // Class for 128 bit register v25
 1010 reg_class v25_reg(
 1011     V25, V25_H
 1012 );
 1013 
 1014 // Class for 128 bit register v26
 1015 reg_class v26_reg(
 1016     V26, V26_H
 1017 );
 1018 
 1019 // Class for 128 bit register v27
 1020 reg_class v27_reg(
 1021     V27, V27_H
 1022 );
 1023 
 1024 // Class for 128 bit register v28
 1025 reg_class v28_reg(
 1026     V28, V28_H
 1027 );
 1028 
 1029 // Class for 128 bit register v29
 1030 reg_class v29_reg(
 1031     V29, V29_H
 1032 );
 1033 
 1034 // Class for 128 bit register v30
 1035 reg_class v30_reg(
 1036     V30, V30_H
 1037 );
 1038 
 1039 // Class for 128 bit register v31
 1040 reg_class v31_reg(
 1041     V31, V31_H
 1042 );
 1043 
 1044 // Class for all SVE predicate registers.
 1045 reg_class pr_reg (
 1046     P0,
 1047     P1,
 1048     P2,
 1049     P3,
 1050     P4,
 1051     P5,
 1052     P6,
 1053     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1054     P8,
 1055     P9,
 1056     P10,
 1057     P11,
 1058     P12,
 1059     P13,
 1060     P14,
 1061     P15
 1062 );
 1063 
 1064 // Class for SVE governing predicate registers, which are used
 1065 // to determine the active elements of a predicated instruction.
 1066 reg_class gov_pr (
 1067     P0,
 1068     P1,
 1069     P2,
 1070     P3,
 1071     P4,
 1072     P5,
 1073     P6,
 1074     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1075 );
 1076 
 1077 reg_class p0_reg(P0);
 1078 reg_class p1_reg(P1);
 1079 
 1080 // Singleton class for condition codes
 1081 reg_class int_flags(RFLAGS);
 1082 
 1083 %}
 1084 
 1085 //----------DEFINITION BLOCK---------------------------------------------------
 1086 // Define name --> value mappings to inform the ADLC of an integer valued name
 1087 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1088 // Format:
 1089 //        int_def  <name>         ( <int_value>, <expression>);
 1090 // Generated Code in ad_<arch>.hpp
 1091 //        #define  <name>   (<expression>)
 1092 //        // value == <int_value>
 1093 // Generated code in ad_<arch>.cpp adlc_verification()
 1094 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1095 //
 1096 
 1097 // we follow the ppc-aix port in using a simple cost model which ranks
 1098 // register operations as cheap, memory ops as more expensive and
 1099 // branches as most expensive. the first two have a low as well as a
 1100 // normal cost. huge cost appears to be a way of saying don't do
 1101 // something
 1102 
 1103 definitions %{
 1104   // The default cost (of a register move instruction).
 1105   int_def INSN_COST            (    100,     100);
 1106   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1107   int_def CALL_COST            (    200,     2 * INSN_COST);
 1108   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1109 %}
 1110 
 1111 
 1112 //----------SOURCE BLOCK-------------------------------------------------------
 1113 // This is a block of C++ code which provides values, functions, and
 1114 // definitions necessary in the rest of the architecture description
 1115 
 1116 source_hpp %{
 1117 
 1118 #include "asm/macroAssembler.hpp"
 1119 #include "gc/shared/barrierSetAssembler.hpp"
 1120 #include "gc/shared/cardTable.hpp"
 1121 #include "gc/shared/cardTableBarrierSet.hpp"
 1122 #include "gc/shared/collectedHeap.hpp"
 1123 #include "opto/addnode.hpp"
 1124 #include "opto/convertnode.hpp"
 1125 #include "runtime/objectMonitor.hpp"
 1126 
 1127 extern RegMask _ANY_REG32_mask;
 1128 extern RegMask _ANY_REG_mask;
 1129 extern RegMask _PTR_REG_mask;
 1130 extern RegMask _NO_SPECIAL_REG32_mask;
 1131 extern RegMask _NO_SPECIAL_REG_mask;
 1132 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1133 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1134 
 1135 class CallStubImpl {
 1136 
 1137   //--------------------------------------------------------------
 1138   //---<  Used for optimization in Compile::shorten_branches  >---
 1139   //--------------------------------------------------------------
 1140 
 1141  public:
 1142   // Size of call trampoline stub.
 1143   static uint size_call_trampoline() {
 1144     return 0; // no call trampolines on this platform
 1145   }
 1146 
 1147   // number of relocations needed by a call trampoline stub
 1148   static uint reloc_call_trampoline() {
 1149     return 0; // no call trampolines on this platform
 1150   }
 1151 };
 1152 
 1153 class HandlerImpl {
 1154 
 1155  public:
 1156 
 1157   static int emit_exception_handler(CodeBuffer &cbuf);
 1158   static int emit_deopt_handler(CodeBuffer& cbuf);
 1159 
 1160   static uint size_exception_handler() {
 1161     return MacroAssembler::far_codestub_branch_size();
 1162   }
 1163 
 1164   static uint size_deopt_handler() {
 1165     // count one adr and one far branch instruction
 1166     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1167   }
 1168 };
 1169 
 1170 class Node::PD {
 1171 public:
 1172   enum NodeFlags {
 1173     _last_flag = Node::_last_flag
 1174   };
 1175 };
 1176 
 1177   bool is_CAS(int opcode, bool maybe_volatile);
 1178 
 1179   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1180 
 1181   bool unnecessary_acquire(const Node *barrier);
 1182   bool needs_acquiring_load(const Node *load);
 1183 
 1184   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1185 
 1186   bool unnecessary_release(const Node *barrier);
 1187   bool unnecessary_volatile(const Node *barrier);
 1188   bool needs_releasing_store(const Node *store);
 1189 
 1190   // predicate controlling translation of CompareAndSwapX
 1191   bool needs_acquiring_load_exclusive(const Node *load);
 1192 
 1193   // predicate controlling addressing modes
 1194   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1195 
 1196   // Convert BootTest condition to Assembler condition.
 1197   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1198   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1199 %}
 1200 
 1201 source %{
 1202 
 1203   // Derived RegMask with conditionally allocatable registers
 1204 
 1205   void PhaseOutput::pd_perform_mach_node_analysis() {
 1206   }
 1207 
 1208   int MachNode::pd_alignment_required() const {
 1209     return 1;
 1210   }
 1211 
 1212   int MachNode::compute_padding(int current_offset) const {
 1213     return 0;
 1214   }
 1215 
 1216   RegMask _ANY_REG32_mask;
 1217   RegMask _ANY_REG_mask;
 1218   RegMask _PTR_REG_mask;
 1219   RegMask _NO_SPECIAL_REG32_mask;
 1220   RegMask _NO_SPECIAL_REG_mask;
 1221   RegMask _NO_SPECIAL_PTR_REG_mask;
 1222   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1223 
 1224   void reg_mask_init() {
 1225     // We derive below RegMask(s) from the ones which are auto-generated from
 1226     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1227     // registers conditionally reserved.
 1228 
 1229     _ANY_REG32_mask = _ALL_REG32_mask;
 1230     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1231 
 1232     _ANY_REG_mask = _ALL_REG_mask;
 1233 
 1234     _PTR_REG_mask = _ALL_REG_mask;
 1235 
 1236     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1237     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1238 
 1239     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1240     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1241 
 1242     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1243     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1244 
 1245     // r27 is not allocatable when compressed oops is on and heapbase is not
 1246     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1247     if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL)) {
 1248       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1249       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1250       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1251     }
 1252 
 1253     // r29 is not allocatable when PreserveFramePointer is on
 1254     if (PreserveFramePointer) {
 1255       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1256       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1257       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1258     }
 1259 
 1260     _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1261     _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1262   }
 1263 
 1264   // Optimizaton of volatile gets and puts
 1265   // -------------------------------------
 1266   //
 1267   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1268   // use to implement volatile reads and writes. For a volatile read
 1269   // we simply need
 1270   //
 1271   //   ldar<x>
 1272   //
 1273   // and for a volatile write we need
 1274   //
 1275   //   stlr<x>
 1276   //
 1277   // Alternatively, we can implement them by pairing a normal
 1278   // load/store with a memory barrier. For a volatile read we need
 1279   //
 1280   //   ldr<x>
 1281   //   dmb ishld
 1282   //
 1283   // for a volatile write
 1284   //
 1285   //   dmb ish
 1286   //   str<x>
 1287   //   dmb ish
 1288   //
 1289   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1290   // sequences. These are normally translated to an instruction
 1291   // sequence like the following
 1292   //
 1293   //   dmb      ish
 1294   // retry:
 1295   //   ldxr<x>   rval raddr
 1296   //   cmp       rval rold
 1297   //   b.ne done
 1298   //   stlxr<x>  rval, rnew, rold
 1299   //   cbnz      rval retry
 1300   // done:
 1301   //   cset      r0, eq
 1302   //   dmb ishld
 1303   //
 1304   // Note that the exclusive store is already using an stlxr
 1305   // instruction. That is required to ensure visibility to other
 1306   // threads of the exclusive write (assuming it succeeds) before that
 1307   // of any subsequent writes.
 1308   //
 1309   // The following instruction sequence is an improvement on the above
 1310   //
 1311   // retry:
 1312   //   ldaxr<x>  rval raddr
 1313   //   cmp       rval rold
 1314   //   b.ne done
 1315   //   stlxr<x>  rval, rnew, rold
 1316   //   cbnz      rval retry
 1317   // done:
 1318   //   cset      r0, eq
 1319   //
 1320   // We don't need the leading dmb ish since the stlxr guarantees
 1321   // visibility of prior writes in the case that the swap is
 1322   // successful. Crucially we don't have to worry about the case where
 1323   // the swap is not successful since no valid program should be
 1324   // relying on visibility of prior changes by the attempting thread
 1325   // in the case where the CAS fails.
 1326   //
 1327   // Similarly, we don't need the trailing dmb ishld if we substitute
 1328   // an ldaxr instruction since that will provide all the guarantees we
 1329   // require regarding observation of changes made by other threads
 1330   // before any change to the CAS address observed by the load.
 1331   //
 1332   // In order to generate the desired instruction sequence we need to
 1333   // be able to identify specific 'signature' ideal graph node
 1334   // sequences which i) occur as a translation of a volatile reads or
 1335   // writes or CAS operations and ii) do not occur through any other
 1336   // translation or graph transformation. We can then provide
 1337   // alternative aldc matching rules which translate these node
 1338   // sequences to the desired machine code sequences. Selection of the
 1339   // alternative rules can be implemented by predicates which identify
 1340   // the relevant node sequences.
 1341   //
 1342   // The ideal graph generator translates a volatile read to the node
 1343   // sequence
 1344   //
 1345   //   LoadX[mo_acquire]
 1346   //   MemBarAcquire
 1347   //
 1348   // As a special case when using the compressed oops optimization we
 1349   // may also see this variant
 1350   //
 1351   //   LoadN[mo_acquire]
 1352   //   DecodeN
 1353   //   MemBarAcquire
 1354   //
 1355   // A volatile write is translated to the node sequence
 1356   //
 1357   //   MemBarRelease
 1358   //   StoreX[mo_release] {CardMark}-optional
 1359   //   MemBarVolatile
 1360   //
 1361   // n.b. the above node patterns are generated with a strict
 1362   // 'signature' configuration of input and output dependencies (see
 1363   // the predicates below for exact details). The card mark may be as
 1364   // simple as a few extra nodes or, in a few GC configurations, may
 1365   // include more complex control flow between the leading and
 1366   // trailing memory barriers. However, whatever the card mark
 1367   // configuration these signatures are unique to translated volatile
 1368   // reads/stores -- they will not appear as a result of any other
 1369   // bytecode translation or inlining nor as a consequence of
 1370   // optimizing transforms.
 1371   //
 1372   // We also want to catch inlined unsafe volatile gets and puts and
 1373   // be able to implement them using either ldar<x>/stlr<x> or some
 1374   // combination of ldr<x>/stlr<x> and dmb instructions.
 1375   //
 1376   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1377   // normal volatile put node sequence containing an extra cpuorder
 1378   // membar
 1379   //
 1380   //   MemBarRelease
 1381   //   MemBarCPUOrder
 1382   //   StoreX[mo_release] {CardMark}-optional
 1383   //   MemBarCPUOrder
 1384   //   MemBarVolatile
 1385   //
 1386   // n.b. as an aside, a cpuorder membar is not itself subject to
 1387   // matching and translation by adlc rules.  However, the rule
 1388   // predicates need to detect its presence in order to correctly
 1389   // select the desired adlc rules.
 1390   //
 1391   // Inlined unsafe volatile gets manifest as a slightly different
 1392   // node sequence to a normal volatile get because of the
 1393   // introduction of some CPUOrder memory barriers to bracket the
 1394   // Load. However, but the same basic skeleton of a LoadX feeding a
 1395   // MemBarAcquire, possibly through an optional DecodeN, is still
 1396   // present
 1397   //
 1398   //   MemBarCPUOrder
 1399   //        ||       \\
 1400   //   MemBarCPUOrder LoadX[mo_acquire]
 1401   //        ||            |
 1402   //        ||       {DecodeN} optional
 1403   //        ||       /
 1404   //     MemBarAcquire
 1405   //
 1406   // In this case the acquire membar does not directly depend on the
 1407   // load. However, we can be sure that the load is generated from an
 1408   // inlined unsafe volatile get if we see it dependent on this unique
 1409   // sequence of membar nodes. Similarly, given an acquire membar we
 1410   // can know that it was added because of an inlined unsafe volatile
 1411   // get if it is fed and feeds a cpuorder membar and if its feed
 1412   // membar also feeds an acquiring load.
 1413   //
 1414   // Finally an inlined (Unsafe) CAS operation is translated to the
 1415   // following ideal graph
 1416   //
 1417   //   MemBarRelease
 1418   //   MemBarCPUOrder
 1419   //   CompareAndSwapX {CardMark}-optional
 1420   //   MemBarCPUOrder
 1421   //   MemBarAcquire
 1422   //
 1423   // So, where we can identify these volatile read and write
 1424   // signatures we can choose to plant either of the above two code
 1425   // sequences. For a volatile read we can simply plant a normal
 1426   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1427   // also choose to inhibit translation of the MemBarAcquire and
 1428   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1429   //
 1430   // When we recognise a volatile store signature we can choose to
 1431   // plant at a dmb ish as a translation for the MemBarRelease, a
 1432   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1433   // Alternatively, we can inhibit translation of the MemBarRelease
 1434   // and MemBarVolatile and instead plant a simple stlr<x>
 1435   // instruction.
 1436   //
 1437   // when we recognise a CAS signature we can choose to plant a dmb
 1438   // ish as a translation for the MemBarRelease, the conventional
 1439   // macro-instruction sequence for the CompareAndSwap node (which
 1440   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1441   // Alternatively, we can elide generation of the dmb instructions
 1442   // and plant the alternative CompareAndSwap macro-instruction
 1443   // sequence (which uses ldaxr<x>).
 1444   //
 1445   // Of course, the above only applies when we see these signature
 1446   // configurations. We still want to plant dmb instructions in any
 1447   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1448   // MemBarVolatile. For example, at the end of a constructor which
 1449   // writes final/volatile fields we will see a MemBarRelease
 1450   // instruction and this needs a 'dmb ish' lest we risk the
 1451   // constructed object being visible without making the
 1452   // final/volatile field writes visible.
 1453   //
 1454   // n.b. the translation rules below which rely on detection of the
 1455   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1456   // If we see anything other than the signature configurations we
 1457   // always just translate the loads and stores to ldr<x> and str<x>
 1458   // and translate acquire, release and volatile membars to the
 1459   // relevant dmb instructions.
 1460   //
 1461 
 1462   // is_CAS(int opcode, bool maybe_volatile)
 1463   //
 1464   // return true if opcode is one of the possible CompareAndSwapX
 1465   // values otherwise false.
 1466 
 1467   bool is_CAS(int opcode, bool maybe_volatile)
 1468   {
 1469     switch(opcode) {
 1470       // We handle these
 1471     case Op_CompareAndSwapI:
 1472     case Op_CompareAndSwapL:
 1473     case Op_CompareAndSwapP:
 1474     case Op_CompareAndSwapN:
 1475     case Op_ShenandoahCompareAndSwapP:
 1476     case Op_ShenandoahCompareAndSwapN:
 1477     case Op_CompareAndSwapB:
 1478     case Op_CompareAndSwapS:
 1479     case Op_GetAndSetI:
 1480     case Op_GetAndSetL:
 1481     case Op_GetAndSetP:
 1482     case Op_GetAndSetN:
 1483     case Op_GetAndAddI:
 1484     case Op_GetAndAddL:
 1485       return true;
 1486     case Op_CompareAndExchangeI:
 1487     case Op_CompareAndExchangeN:
 1488     case Op_CompareAndExchangeB:
 1489     case Op_CompareAndExchangeS:
 1490     case Op_CompareAndExchangeL:
 1491     case Op_CompareAndExchangeP:
 1492     case Op_WeakCompareAndSwapB:
 1493     case Op_WeakCompareAndSwapS:
 1494     case Op_WeakCompareAndSwapI:
 1495     case Op_WeakCompareAndSwapL:
 1496     case Op_WeakCompareAndSwapP:
 1497     case Op_WeakCompareAndSwapN:
 1498     case Op_ShenandoahWeakCompareAndSwapP:
 1499     case Op_ShenandoahWeakCompareAndSwapN:
 1500     case Op_ShenandoahCompareAndExchangeP:
 1501     case Op_ShenandoahCompareAndExchangeN:
 1502       return maybe_volatile;
 1503     default:
 1504       return false;
 1505     }
 1506   }
 1507 
 1508   // helper to determine the maximum number of Phi nodes we may need to
 1509   // traverse when searching from a card mark membar for the merge mem
 1510   // feeding a trailing membar or vice versa
 1511 
 1512 // predicates controlling emit of ldr<x>/ldar<x>
 1513 
 1514 bool unnecessary_acquire(const Node *barrier)
 1515 {
 1516   assert(barrier->is_MemBar(), "expecting a membar");
 1517 
 1518   MemBarNode* mb = barrier->as_MemBar();
 1519 
 1520   if (mb->trailing_load()) {
 1521     return true;
 1522   }
 1523 
 1524   if (mb->trailing_load_store()) {
 1525     Node* load_store = mb->in(MemBarNode::Precedent);
 1526     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1527     return is_CAS(load_store->Opcode(), true);
 1528   }
 1529 
 1530   return false;
 1531 }
 1532 
 1533 bool needs_acquiring_load(const Node *n)
 1534 {
 1535   assert(n->is_Load(), "expecting a load");
 1536   LoadNode *ld = n->as_Load();
 1537   return ld->is_acquire();
 1538 }
 1539 
 1540 bool unnecessary_release(const Node *n)
 1541 {
 1542   assert((n->is_MemBar() &&
 1543           n->Opcode() == Op_MemBarRelease),
 1544          "expecting a release membar");
 1545 
 1546   MemBarNode *barrier = n->as_MemBar();
 1547   if (!barrier->leading()) {
 1548     return false;
 1549   } else {
 1550     Node* trailing = barrier->trailing_membar();
 1551     MemBarNode* trailing_mb = trailing->as_MemBar();
 1552     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1553     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1554 
 1555     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1556     if (mem->is_Store()) {
 1557       assert(mem->as_Store()->is_release(), "");
 1558       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1559       return true;
 1560     } else {
 1561       assert(mem->is_LoadStore(), "");
 1562       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1563       return is_CAS(mem->Opcode(), true);
 1564     }
 1565   }
 1566   return false;
 1567 }
 1568 
 1569 bool unnecessary_volatile(const Node *n)
 1570 {
 1571   // assert n->is_MemBar();
 1572   MemBarNode *mbvol = n->as_MemBar();
 1573 
 1574   bool release = mbvol->trailing_store();
 1575   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1576 #ifdef ASSERT
 1577   if (release) {
 1578     Node* leading = mbvol->leading_membar();
 1579     assert(leading->Opcode() == Op_MemBarRelease, "");
 1580     assert(leading->as_MemBar()->leading_store(), "");
 1581     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1582   }
 1583 #endif
 1584 
 1585   return release;
 1586 }
 1587 
 1588 // predicates controlling emit of str<x>/stlr<x>
 1589 
 1590 bool needs_releasing_store(const Node *n)
 1591 {
 1592   // assert n->is_Store();
 1593   StoreNode *st = n->as_Store();
 1594   return st->trailing_membar() != NULL;
 1595 }
 1596 
 1597 // predicate controlling translation of CAS
 1598 //
 1599 // returns true if CAS needs to use an acquiring load otherwise false
 1600 
 1601 bool needs_acquiring_load_exclusive(const Node *n)
 1602 {
 1603   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1604   LoadStoreNode* ldst = n->as_LoadStore();
 1605   if (is_CAS(n->Opcode(), false)) {
 1606     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
 1607   } else {
 1608     return ldst->trailing_membar() != NULL;
 1609   }
 1610 
 1611   // so we can just return true here
 1612   return true;
 1613 }
 1614 
 1615 #define __ _masm.
 1616 
 1617 // advance declarations for helper functions to convert register
 1618 // indices to register objects
 1619 
 1620 // the ad file has to provide implementations of certain methods
 1621 // expected by the generic code
 1622 //
 1623 // REQUIRED FUNCTIONALITY
 1624 
 1625 //=============================================================================
 1626 
 1627 // !!!!! Special hack to get all types of calls to specify the byte offset
 1628 //       from the start of the call to the point where the return address
 1629 //       will point.
 1630 
 1631 int MachCallStaticJavaNode::ret_addr_offset()
 1632 {
 1633   // call should be a simple bl
 1634   int off = 4;
 1635   return off;
 1636 }
 1637 
 1638 int MachCallDynamicJavaNode::ret_addr_offset()
 1639 {
 1640   return 16; // movz, movk, movk, bl
 1641 }
 1642 
 1643 int MachCallRuntimeNode::ret_addr_offset() {
 1644   // for generated stubs the call will be
 1645   //   bl(addr)
 1646   // or with far branches
 1647   //   bl(trampoline_stub)
 1648   // for real runtime callouts it will be six instructions
 1649   // see aarch64_enc_java_to_runtime
 1650   //   adr(rscratch2, retaddr)
 1651   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1652   //   lea(rscratch1, RuntimeAddress(addr)
 1653   //   blr(rscratch1)
 1654   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1655   if (cb) {
 1656     return 1 * NativeInstruction::instruction_size;
 1657   } else {
 1658     return 6 * NativeInstruction::instruction_size;
 1659   }
 1660 }
 1661 
 1662 //=============================================================================
 1663 
 1664 #ifndef PRODUCT
 1665 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1666   st->print("BREAKPOINT");
 1667 }
 1668 #endif
 1669 
 1670 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1671   C2_MacroAssembler _masm(&cbuf);
 1672   __ brk(0);
 1673 }
 1674 
 1675 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1676   return MachNode::size(ra_);
 1677 }
 1678 
 1679 //=============================================================================
 1680 
 1681 #ifndef PRODUCT
 1682   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1683     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1684   }
 1685 #endif
 1686 
 1687   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
 1688     C2_MacroAssembler _masm(&cbuf);
 1689     for (int i = 0; i < _count; i++) {
 1690       __ nop();
 1691     }
 1692   }
 1693 
 1694   uint MachNopNode::size(PhaseRegAlloc*) const {
 1695     return _count * NativeInstruction::instruction_size;
 1696   }
 1697 
 1698 //=============================================================================
 1699 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1700 
 1701 int ConstantTable::calculate_table_base_offset() const {
 1702   return 0;  // absolute addressing, no offset
 1703 }
 1704 
 1705 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1706 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1707   ShouldNotReachHere();
 1708 }
 1709 
 1710 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 1711   // Empty encoding
 1712 }
 1713 
 1714 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1715   return 0;
 1716 }
 1717 
 1718 #ifndef PRODUCT
 1719 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1720   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1721 }
 1722 #endif
 1723 
 1724 #ifndef PRODUCT
 1725 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1726   Compile* C = ra_->C;
 1727 
 1728   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1729 
 1730   if (C->output()->need_stack_bang(framesize))
 1731     st->print("# stack bang size=%d\n\t", framesize);
 1732 
 1733   if (VM_Version::use_rop_protection()) {
 1734     st->print("ldr  zr, [lr]\n\t");
 1735     st->print("paciaz\n\t");
 1736   }
 1737   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1738     st->print("sub  sp, sp, #%d\n\t", framesize);
 1739     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1740     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1741   } else {
 1742     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1743     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1744     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1745     st->print("sub  sp, sp, rscratch1");
 1746   }
 1747   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1748     st->print("\n\t");
 1749     st->print("ldr  rscratch1, [guard]\n\t");
 1750     st->print("dmb ishld\n\t");
 1751     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1752     st->print("cmp  rscratch1, rscratch2\n\t");
 1753     st->print("b.eq skip");
 1754     st->print("\n\t");
 1755     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1756     st->print("b skip\n\t");
 1757     st->print("guard: int\n\t");
 1758     st->print("\n\t");
 1759     st->print("skip:\n\t");
 1760   }
 1761 }
 1762 #endif
 1763 
 1764 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1765   Compile* C = ra_->C;
 1766   C2_MacroAssembler _masm(&cbuf);
 1767 
 1768   // n.b. frame size includes space for return pc and rfp
 1769   const int framesize = C->output()->frame_size_in_bytes();
 1770 
 1771   // insert a nop at the start of the prolog so we can patch in a
 1772   // branch if we need to invalidate the method later
 1773   __ nop();
 1774 
 1775   if (C->clinit_barrier_on_entry()) {
 1776     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1777 
 1778     Label L_skip_barrier;
 1779 
 1780     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1781     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1782     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1783     __ bind(L_skip_barrier);
 1784   }
 1785 
 1786   if (C->max_vector_size() > 0) {
 1787     __ reinitialize_ptrue();
 1788   }
 1789 
 1790   int bangsize = C->output()->bang_size_in_bytes();
 1791   if (C->output()->need_stack_bang(bangsize))
 1792     __ generate_stack_overflow_check(bangsize);
 1793 
 1794   __ build_frame(framesize);
 1795 
 1796   if (C->stub_function() == NULL) {
 1797     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1798     if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1799       // Dummy labels for just measuring the code size
 1800       Label dummy_slow_path;
 1801       Label dummy_continuation;
 1802       Label dummy_guard;
 1803       Label* slow_path = &dummy_slow_path;
 1804       Label* continuation = &dummy_continuation;
 1805       Label* guard = &dummy_guard;
 1806       if (!Compile::current()->output()->in_scratch_emit_size()) {
 1807         // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1808         C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1809         Compile::current()->output()->add_stub(stub);
 1810         slow_path = &stub->entry();
 1811         continuation = &stub->continuation();
 1812         guard = &stub->guard();
 1813       }
 1814       // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1815       bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
 1816     }
 1817   }
 1818 
 1819   if (VerifyStackAtCalls) {
 1820     Unimplemented();
 1821   }
 1822 
 1823   C->output()->set_frame_complete(cbuf.insts_size());
 1824 
 1825   if (C->has_mach_constant_base_node()) {
 1826     // NOTE: We set the table base offset here because users might be
 1827     // emitted before MachConstantBaseNode.
 1828     ConstantTable& constant_table = C->output()->constant_table();
 1829     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1830   }
 1831 }
 1832 
 1833 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1834 {
 1835   return MachNode::size(ra_); // too many variables; just compute it
 1836                               // the hard way
 1837 }
 1838 
 1839 int MachPrologNode::reloc() const
 1840 {
 1841   return 0;
 1842 }
 1843 
 1844 //=============================================================================
 1845 
 1846 #ifndef PRODUCT
 1847 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1848   Compile* C = ra_->C;
 1849   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1850 
 1851   st->print("# pop frame %d\n\t",framesize);
 1852 
 1853   if (framesize == 0) {
 1854     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1855   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1856     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1857     st->print("add  sp, sp, #%d\n\t", framesize);
 1858   } else {
 1859     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1860     st->print("add  sp, sp, rscratch1\n\t");
 1861     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1862   }
 1863   if (VM_Version::use_rop_protection()) {
 1864     st->print("autiaz\n\t");
 1865     st->print("ldr  zr, [lr]\n\t");
 1866   }
 1867 
 1868   if (do_polling() && C->is_method_compilation()) {
 1869     st->print("# test polling word\n\t");
 1870     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1871     st->print("cmp  sp, rscratch1\n\t");
 1872     st->print("bhi #slow_path");
 1873   }
 1874 }
 1875 #endif
 1876 
 1877 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1878   Compile* C = ra_->C;
 1879   C2_MacroAssembler _masm(&cbuf);
 1880   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1881 
 1882   __ remove_frame(framesize);
 1883 
 1884   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1885     __ reserved_stack_check();
 1886   }
 1887 
 1888   if (do_polling() && C->is_method_compilation()) {
 1889     Label dummy_label;
 1890     Label* code_stub = &dummy_label;
 1891     if (!C->output()->in_scratch_emit_size()) {
 1892       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1893       C->output()->add_stub(stub);
 1894       code_stub = &stub->entry();
 1895     }
 1896     __ relocate(relocInfo::poll_return_type);
 1897     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1898   }
 1899 }
 1900 
 1901 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1902   // Variable size. Determine dynamically.
 1903   return MachNode::size(ra_);
 1904 }
 1905 
 1906 int MachEpilogNode::reloc() const {
 1907   // Return number of relocatable values contained in this instruction.
 1908   return 1; // 1 for polling page.
 1909 }
 1910 
 1911 const Pipeline * MachEpilogNode::pipeline() const {
 1912   return MachNode::pipeline_class();
 1913 }
 1914 
 1915 //=============================================================================
 1916 
 1917 // Figure out which register class each belongs in: rc_int, rc_float or
 1918 // rc_stack.
 1919 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 1920 
 1921 static enum RC rc_class(OptoReg::Name reg) {
 1922 
 1923   if (reg == OptoReg::Bad) {
 1924     return rc_bad;
 1925   }
 1926 
 1927   // we have 32 int registers * 2 halves
 1928   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1929 
 1930   if (reg < slots_of_int_registers) {
 1931     return rc_int;
 1932   }
 1933 
 1934   // we have 32 float register * 8 halves
 1935   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1936   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1937     return rc_float;
 1938   }
 1939 
 1940   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1941   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1942     return rc_predicate;
 1943   }
 1944 
 1945   // Between predicate regs & stack is the flags.
 1946   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1947 
 1948   return rc_stack;
 1949 }
 1950 
 1951 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1952   Compile* C = ra_->C;
 1953 
 1954   // Get registers to move.
 1955   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1956   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1957   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1958   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1959 
 1960   enum RC src_hi_rc = rc_class(src_hi);
 1961   enum RC src_lo_rc = rc_class(src_lo);
 1962   enum RC dst_hi_rc = rc_class(dst_hi);
 1963   enum RC dst_lo_rc = rc_class(dst_lo);
 1964 
 1965   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1966 
 1967   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1968     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1969            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1970            "expected aligned-adjacent pairs");
 1971   }
 1972 
 1973   if (src_lo == dst_lo && src_hi == dst_hi) {
 1974     return 0;            // Self copy, no move.
 1975   }
 1976 
 1977   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1978               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1979   int src_offset = ra_->reg2offset(src_lo);
 1980   int dst_offset = ra_->reg2offset(dst_lo);
 1981 
 1982   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1983     uint ireg = ideal_reg();
 1984     if (ireg == Op_VecA && cbuf) {
 1985       C2_MacroAssembler _masm(cbuf);
 1986       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1987       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1988         // stack->stack
 1989         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 1990                                                 sve_vector_reg_size_in_bytes);
 1991       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1992         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 1993                             sve_vector_reg_size_in_bytes);
 1994       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 1995         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 1996                               sve_vector_reg_size_in_bytes);
 1997       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1998         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1999                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 2000                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 2001       } else {
 2002         ShouldNotReachHere();
 2003       }
 2004     } else if (cbuf) {
 2005       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2006       C2_MacroAssembler _masm(cbuf);
 2007       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2008       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2009         // stack->stack
 2010         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2011         if (ireg == Op_VecD) {
 2012           __ unspill(rscratch1, true, src_offset);
 2013           __ spill(rscratch1, true, dst_offset);
 2014         } else {
 2015           __ spill_copy128(src_offset, dst_offset);
 2016         }
 2017       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2018         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2019                ireg == Op_VecD ? __ T8B : __ T16B,
 2020                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2021       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2022         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2023                  ireg == Op_VecD ? __ D : __ Q,
 2024                  ra_->reg2offset(dst_lo));
 2025       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2026         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2027                    ireg == Op_VecD ? __ D : __ Q,
 2028                    ra_->reg2offset(src_lo));
 2029       } else {
 2030         ShouldNotReachHere();
 2031       }
 2032     }
 2033   } else if (cbuf) {
 2034     C2_MacroAssembler _masm(cbuf);
 2035     switch (src_lo_rc) {
 2036     case rc_int:
 2037       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2038         if (is64) {
 2039             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2040                    as_Register(Matcher::_regEncode[src_lo]));
 2041         } else {
 2042             C2_MacroAssembler _masm(cbuf);
 2043             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2044                     as_Register(Matcher::_regEncode[src_lo]));
 2045         }
 2046       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2047         if (is64) {
 2048             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2049                      as_Register(Matcher::_regEncode[src_lo]));
 2050         } else {
 2051             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2052                      as_Register(Matcher::_regEncode[src_lo]));
 2053         }
 2054       } else {                    // gpr --> stack spill
 2055         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2056         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2057       }
 2058       break;
 2059     case rc_float:
 2060       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2061         if (is64) {
 2062             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2063                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2064         } else {
 2065             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2066                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2067         }
 2068       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2069         if (is64) {
 2070             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2071                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2072         } else {
 2073             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2074                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2075         }
 2076       } else {                    // fpr --> stack spill
 2077         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2078         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2079                  is64 ? __ D : __ S, dst_offset);
 2080       }
 2081       break;
 2082     case rc_stack:
 2083       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2084         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2085       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2086         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2087                    is64 ? __ D : __ S, src_offset);
 2088       } else if (dst_lo_rc == rc_predicate) {
 2089         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2090                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2091       } else {                    // stack --> stack copy
 2092         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2093         if (ideal_reg() == Op_RegVectMask) {
 2094           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2095                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2096         } else {
 2097           __ unspill(rscratch1, is64, src_offset);
 2098           __ spill(rscratch1, is64, dst_offset);
 2099         }
 2100       }
 2101       break;
 2102     case rc_predicate:
 2103       if (dst_lo_rc == rc_predicate) {
 2104         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2105       } else if (dst_lo_rc == rc_stack) {
 2106         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2107                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2108       } else {
 2109         assert(false, "bad src and dst rc_class combination.");
 2110         ShouldNotReachHere();
 2111       }
 2112       break;
 2113     default:
 2114       assert(false, "bad rc_class for spill");
 2115       ShouldNotReachHere();
 2116     }
 2117   }
 2118 
 2119   if (st) {
 2120     st->print("spill ");
 2121     if (src_lo_rc == rc_stack) {
 2122       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2123     } else {
 2124       st->print("%s -> ", Matcher::regName[src_lo]);
 2125     }
 2126     if (dst_lo_rc == rc_stack) {
 2127       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2128     } else {
 2129       st->print("%s", Matcher::regName[dst_lo]);
 2130     }
 2131     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2132       int vsize = 0;
 2133       switch (ideal_reg()) {
 2134       case Op_VecD:
 2135         vsize = 64;
 2136         break;
 2137       case Op_VecX:
 2138         vsize = 128;
 2139         break;
 2140       case Op_VecA:
 2141         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2142         break;
 2143       default:
 2144         assert(false, "bad register type for spill");
 2145         ShouldNotReachHere();
 2146       }
 2147       st->print("\t# vector spill size = %d", vsize);
 2148     } else if (ideal_reg() == Op_RegVectMask) {
 2149       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2150       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2151       st->print("\t# predicate spill size = %d", vsize);
 2152     } else {
 2153       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2154     }
 2155   }
 2156 
 2157   return 0;
 2158 
 2159 }
 2160 
 2161 #ifndef PRODUCT
 2162 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2163   if (!ra_)
 2164     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2165   else
 2166     implementation(NULL, ra_, false, st);
 2167 }
 2168 #endif
 2169 
 2170 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2171   implementation(&cbuf, ra_, false, NULL);
 2172 }
 2173 
 2174 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2175   return MachNode::size(ra_);
 2176 }
 2177 
 2178 //=============================================================================
 2179 
 2180 #ifndef PRODUCT
 2181 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2182   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2183   int reg = ra_->get_reg_first(this);
 2184   st->print("add %s, rsp, #%d]\t# box lock",
 2185             Matcher::regName[reg], offset);
 2186 }
 2187 #endif
 2188 
 2189 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2190   C2_MacroAssembler _masm(&cbuf);
 2191 
 2192   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2193   int reg    = ra_->get_encode(this);
 2194 
 2195   // This add will handle any 24-bit signed offset. 24 bits allows an
 2196   // 8 megabyte stack frame.
 2197   __ add(as_Register(reg), sp, offset);
 2198 }
 2199 
 2200 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2201   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2202   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2203 
 2204   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2205     return NativeInstruction::instruction_size;
 2206   } else {
 2207     return 2 * NativeInstruction::instruction_size;
 2208   }
 2209 }
 2210 
 2211 //=============================================================================
 2212 
 2213 #ifndef PRODUCT
 2214 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2215 {
 2216   st->print_cr("# MachUEPNode");
 2217   if (UseCompressedClassPointers) {
 2218     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2219     if (CompressedKlassPointers::shift() != 0) {
 2220       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2221     }
 2222   } else {
 2223    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2224   }
 2225   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2226   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2227 }
 2228 #endif
 2229 
 2230 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2231 {
 2232   // This is the unverified entry point.
 2233   C2_MacroAssembler _masm(&cbuf);
 2234 
 2235   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2236   Label skip;
 2237   // TODO
 2238   // can we avoid this skip and still use a reloc?
 2239   __ br(Assembler::EQ, skip);
 2240   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2241   __ bind(skip);
 2242 }
 2243 
 2244 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2245 {
 2246   return MachNode::size(ra_);
 2247 }
 2248 
 2249 // REQUIRED EMIT CODE
 2250 
 2251 //=============================================================================
 2252 
 2253 // Emit exception handler code.
 2254 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2255 {
 2256   // mov rscratch1 #exception_blob_entry_point
 2257   // br rscratch1
 2258   // Note that the code buffer's insts_mark is always relative to insts.
 2259   // That's why we must use the macroassembler to generate a handler.
 2260   C2_MacroAssembler _masm(&cbuf);
 2261   address base = __ start_a_stub(size_exception_handler());
 2262   if (base == NULL) {
 2263     ciEnv::current()->record_failure("CodeCache is full");
 2264     return 0;  // CodeBuffer::expand failed
 2265   }
 2266   int offset = __ offset();
 2267   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2268   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2269   __ end_a_stub();
 2270   return offset;
 2271 }
 2272 
 2273 // Emit deopt handler code.
 2274 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 2275 {
 2276   // Note that the code buffer's insts_mark is always relative to insts.
 2277   // That's why we must use the macroassembler to generate a handler.
 2278   C2_MacroAssembler _masm(&cbuf);
 2279   address base = __ start_a_stub(size_deopt_handler());
 2280   if (base == NULL) {
 2281     ciEnv::current()->record_failure("CodeCache is full");
 2282     return 0;  // CodeBuffer::expand failed
 2283   }
 2284   int offset = __ offset();
 2285 
 2286   __ adr(lr, __ pc());
 2287   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2288 
 2289   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2290   __ end_a_stub();
 2291   return offset;
 2292 }
 2293 
 2294 // REQUIRED MATCHER CODE
 2295 
 2296 //=============================================================================
 2297 
 2298 const bool Matcher::match_rule_supported(int opcode) {
 2299   if (!has_match_rule(opcode))
 2300     return false;
 2301 
 2302   bool ret_value = true;
 2303   switch (opcode) {
 2304     case Op_OnSpinWait:
 2305       return VM_Version::supports_on_spin_wait();
 2306     case Op_CacheWB:
 2307     case Op_CacheWBPreSync:
 2308     case Op_CacheWBPostSync:
 2309       if (!VM_Version::supports_data_cache_line_flush()) {
 2310         ret_value = false;
 2311       }
 2312       break;
 2313     case Op_ExpandBits:
 2314     case Op_CompressBits:
 2315       if (!(UseSVE > 1 && VM_Version::supports_svebitperm())) {
 2316         ret_value = false;
 2317       }
 2318       break;
 2319   }
 2320 
 2321   return ret_value; // Per default match rules are supported.
 2322 }
 2323 
 2324 const RegMask* Matcher::predicate_reg_mask(void) {
 2325   return &_PR_REG_mask;
 2326 }
 2327 
 2328 const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) {
 2329   return new TypeVectMask(elemTy, length);
 2330 }
 2331 
 2332 // Vector calling convention not yet implemented.
 2333 const bool Matcher::supports_vector_calling_convention(void) {
 2334   return false;
 2335 }
 2336 
 2337 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2338   Unimplemented();
 2339   return OptoRegPair(0, 0);
 2340 }
 2341 
 2342 // Is this branch offset short enough that a short branch can be used?
 2343 //
 2344 // NOTE: If the platform does not provide any short branch variants, then
 2345 //       this method should return false for offset 0.
 2346 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2347   // The passed offset is relative to address of the branch.
 2348 
 2349   return (-32768 <= offset && offset < 32768);
 2350 }
 2351 
 2352 // Vector width in bytes.
 2353 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2354   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2355   int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize);
 2356   // Minimum 2 values in vector
 2357   if (size < 2*type2aelembytes(bt)) size = 0;
 2358   // But never < 4
 2359   if (size < 4) size = 0;
 2360   return size;
 2361 }
 2362 
 2363 // Limits on vector size (number of elements) loaded into vector.
 2364 const int Matcher::max_vector_size(const BasicType bt) {
 2365   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2366 }
 2367 
 2368 const int Matcher::min_vector_size(const BasicType bt) {
 2369   int max_size = max_vector_size(bt);
 2370   // Limit the min vector size to 8 bytes.
 2371   int size = 8 / type2aelembytes(bt);
 2372   if (bt == T_BYTE) {
 2373     // To support vector api shuffle/rearrange.
 2374     size = 4;
 2375   } else if (bt == T_BOOLEAN) {
 2376     // To support vector api load/store mask.
 2377     size = 2;
 2378   }
 2379   if (size < 2) size = 2;
 2380   return MIN2(size, max_size);
 2381 }
 2382 
 2383 const int Matcher::superword_max_vector_size(const BasicType bt) {
 2384   return Matcher::max_vector_size(bt);
 2385 }
 2386 
 2387 // Actual max scalable vector register length.
 2388 const int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2389   return Matcher::max_vector_size(bt);
 2390 }
 2391 
 2392 // Vector ideal reg.
 2393 const uint Matcher::vector_ideal_reg(int len) {
 2394   if (UseSVE > 0 && 16 < len && len <= 256) {
 2395     return Op_VecA;
 2396   }
 2397   switch(len) {
 2398     // For 16-bit/32-bit mask vector, reuse VecD.
 2399     case  2:
 2400     case  4:
 2401     case  8: return Op_VecD;
 2402     case 16: return Op_VecX;
 2403   }
 2404   ShouldNotReachHere();
 2405   return 0;
 2406 }
 2407 
 2408 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2409   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2410   switch (ideal_reg) {
 2411     case Op_VecA: return new vecAOper();
 2412     case Op_VecD: return new vecDOper();
 2413     case Op_VecX: return new vecXOper();
 2414   }
 2415   ShouldNotReachHere();
 2416   return NULL;
 2417 }
 2418 
 2419 bool Matcher::is_reg2reg_move(MachNode* m) {
 2420   return false;
 2421 }
 2422 
 2423 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2424   return opnd->opcode() == VREG;
 2425 }
 2426 
 2427 // Return whether or not this register is ever used as an argument.
 2428 // This function is used on startup to build the trampoline stubs in
 2429 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2430 // call in the trampoline, and arguments in those registers not be
 2431 // available to the callee.
 2432 bool Matcher::can_be_java_arg(int reg)
 2433 {
 2434   return
 2435     reg ==  R0_num || reg == R0_H_num ||
 2436     reg ==  R1_num || reg == R1_H_num ||
 2437     reg ==  R2_num || reg == R2_H_num ||
 2438     reg ==  R3_num || reg == R3_H_num ||
 2439     reg ==  R4_num || reg == R4_H_num ||
 2440     reg ==  R5_num || reg == R5_H_num ||
 2441     reg ==  R6_num || reg == R6_H_num ||
 2442     reg ==  R7_num || reg == R7_H_num ||
 2443     reg ==  V0_num || reg == V0_H_num ||
 2444     reg ==  V1_num || reg == V1_H_num ||
 2445     reg ==  V2_num || reg == V2_H_num ||
 2446     reg ==  V3_num || reg == V3_H_num ||
 2447     reg ==  V4_num || reg == V4_H_num ||
 2448     reg ==  V5_num || reg == V5_H_num ||
 2449     reg ==  V6_num || reg == V6_H_num ||
 2450     reg ==  V7_num || reg == V7_H_num;
 2451 }
 2452 
 2453 bool Matcher::is_spillable_arg(int reg)
 2454 {
 2455   return can_be_java_arg(reg);
 2456 }
 2457 
 2458 uint Matcher::int_pressure_limit()
 2459 {
 2460   // JDK-8183543: When taking the number of available registers as int
 2461   // register pressure threshold, the jtreg test:
 2462   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2463   // failed due to C2 compilation failure with
 2464   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2465   //
 2466   // A derived pointer is live at CallNode and then is flagged by RA
 2467   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2468   // derived pointers and lastly fail to spill after reaching maximum
 2469   // number of iterations. Lowering the default pressure threshold to
 2470   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2471   // a high register pressure area of the code so that split_DEF can
 2472   // generate DefinitionSpillCopy for the derived pointer.
 2473   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2474   if (!PreserveFramePointer) {
 2475     // When PreserveFramePointer is off, frame pointer is allocatable,
 2476     // but different from other SOC registers, it is excluded from
 2477     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2478     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2479     // See check_pressure_at_fatproj().
 2480     default_int_pressure_threshold--;
 2481   }
 2482   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2483 }
 2484 
 2485 uint Matcher::float_pressure_limit()
 2486 {
 2487   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2488   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2489 }
 2490 
 2491 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2492   return false;
 2493 }
 2494 
 2495 RegMask Matcher::divI_proj_mask() {
 2496   ShouldNotReachHere();
 2497   return RegMask();
 2498 }
 2499 
 2500 // Register for MODI projection of divmodI.
 2501 RegMask Matcher::modI_proj_mask() {
 2502   ShouldNotReachHere();
 2503   return RegMask();
 2504 }
 2505 
 2506 // Register for DIVL projection of divmodL.
 2507 RegMask Matcher::divL_proj_mask() {
 2508   ShouldNotReachHere();
 2509   return RegMask();
 2510 }
 2511 
 2512 // Register for MODL projection of divmodL.
 2513 RegMask Matcher::modL_proj_mask() {
 2514   ShouldNotReachHere();
 2515   return RegMask();
 2516 }
 2517 
 2518 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2519   return FP_REG_mask();
 2520 }
 2521 
 2522 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2523   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2524     Node* u = addp->fast_out(i);
 2525     if (u->is_LoadStore()) {
 2526       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2527       // instructions) only take register indirect as an operand, so
 2528       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2529       // must fail.
 2530       return false;
 2531     }
 2532     if (u->is_Mem()) {
 2533       int opsize = u->as_Mem()->memory_size();
 2534       assert(opsize > 0, "unexpected memory operand size");
 2535       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2536         return false;
 2537       }
 2538     }
 2539   }
 2540   return true;
 2541 }
 2542 
 2543 // Convert BootTest condition to Assembler condition.
 2544 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2545 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2546   Assembler::Condition result;
 2547   switch(cond) {
 2548     case BoolTest::eq:
 2549       result = Assembler::EQ; break;
 2550     case BoolTest::ne:
 2551       result = Assembler::NE; break;
 2552     case BoolTest::le:
 2553       result = Assembler::LE; break;
 2554     case BoolTest::ge:
 2555       result = Assembler::GE; break;
 2556     case BoolTest::lt:
 2557       result = Assembler::LT; break;
 2558     case BoolTest::gt:
 2559       result = Assembler::GT; break;
 2560     case BoolTest::ule:
 2561       result = Assembler::LS; break;
 2562     case BoolTest::uge:
 2563       result = Assembler::HS; break;
 2564     case BoolTest::ult:
 2565       result = Assembler::LO; break;
 2566     case BoolTest::ugt:
 2567       result = Assembler::HI; break;
 2568     case BoolTest::overflow:
 2569       result = Assembler::VS; break;
 2570     case BoolTest::no_overflow:
 2571       result = Assembler::VC; break;
 2572     default:
 2573       ShouldNotReachHere();
 2574       return Assembler::Condition(-1);
 2575   }
 2576 
 2577   // Check conversion
 2578   if (cond & BoolTest::unsigned_compare) {
 2579     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2580   } else {
 2581     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2582   }
 2583 
 2584   return result;
 2585 }
 2586 
 2587 // Binary src (Replicate con)
 2588 bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2589   if (n == NULL || m == NULL) {
 2590     return false;
 2591   }
 2592 
 2593   if (UseSVE == 0 || !VectorNode::is_invariant_vector(m)) {
 2594     return false;
 2595   }
 2596 
 2597   Node* imm_node = m->in(1);
 2598   if (!imm_node->is_Con()) {
 2599     return false;
 2600   }
 2601 
 2602   const Type* t = imm_node->bottom_type();
 2603   if (!(t->isa_int() || t->isa_long())) {
 2604     return false;
 2605   }
 2606 
 2607   switch (n->Opcode()) {
 2608   case Op_AndV:
 2609   case Op_OrV:
 2610   case Op_XorV: {
 2611     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2612     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2613     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2614   }
 2615   case Op_AddVB:
 2616     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2617   case Op_AddVS:
 2618   case Op_AddVI:
 2619     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2620   case Op_AddVL:
 2621     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2622   default:
 2623     return false;
 2624   }
 2625 }
 2626 
 2627 // (XorV src (Replicate m1))
 2628 // (XorVMask src (MaskAll m1))
 2629 bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2630   if (n != NULL && m != NULL) {
 2631     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2632            VectorNode::is_all_ones_vector(m);
 2633   }
 2634   return false;
 2635 }
 2636 
 2637 // Should the matcher clone input 'm' of node 'n'?
 2638 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2639   if (is_vshift_con_pattern(n, m) ||
 2640       is_vector_bitwise_not_pattern(n, m) ||
 2641       is_valid_sve_arith_imm_pattern(n, m)) {
 2642     mstack.push(m, Visit);
 2643     return true;
 2644   }
 2645   return false;
 2646 }
 2647 
 2648 // Should the Matcher clone shifts on addressing modes, expecting them
 2649 // to be subsumed into complex addressing expressions or compute them
 2650 // into registers?
 2651 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2652   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2653     return true;
 2654   }
 2655 
 2656   Node *off = m->in(AddPNode::Offset);
 2657   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2658       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2659       // Are there other uses besides address expressions?
 2660       !is_visited(off)) {
 2661     address_visited.set(off->_idx); // Flag as address_visited
 2662     mstack.push(off->in(2), Visit);
 2663     Node *conv = off->in(1);
 2664     if (conv->Opcode() == Op_ConvI2L &&
 2665         // Are there other uses besides address expressions?
 2666         !is_visited(conv)) {
 2667       address_visited.set(conv->_idx); // Flag as address_visited
 2668       mstack.push(conv->in(1), Pre_Visit);
 2669     } else {
 2670       mstack.push(conv, Pre_Visit);
 2671     }
 2672     address_visited.test_set(m->_idx); // Flag as address_visited
 2673     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2674     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2675     return true;
 2676   } else if (off->Opcode() == Op_ConvI2L &&
 2677              // Are there other uses besides address expressions?
 2678              !is_visited(off)) {
 2679     address_visited.test_set(m->_idx); // Flag as address_visited
 2680     address_visited.set(off->_idx); // Flag as address_visited
 2681     mstack.push(off->in(1), Pre_Visit);
 2682     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2683     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2684     return true;
 2685   }
 2686   return false;
 2687 }
 2688 
 2689 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2690   C2_MacroAssembler _masm(&cbuf);                                       \
 2691   {                                                                     \
 2692     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2693     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2694     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2695     __ INSN(REG, as_Register(BASE));                                    \
 2696   }
 2697 
 2698 
 2699 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2700   {
 2701     Address::extend scale;
 2702 
 2703     // Hooboy, this is fugly.  We need a way to communicate to the
 2704     // encoder that the index needs to be sign extended, so we have to
 2705     // enumerate all the cases.
 2706     switch (opcode) {
 2707     case INDINDEXSCALEDI2L:
 2708     case INDINDEXSCALEDI2LN:
 2709     case INDINDEXI2L:
 2710     case INDINDEXI2LN:
 2711       scale = Address::sxtw(size);
 2712       break;
 2713     default:
 2714       scale = Address::lsl(size);
 2715     }
 2716 
 2717     if (index == -1) {
 2718       return Address(base, disp);
 2719     } else {
 2720       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2721       return Address(base, as_Register(index), scale);
 2722     }
 2723   }
 2724 
 2725 
 2726 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2727 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2728 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2729 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2730                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2731 
 2732   // Used for all non-volatile memory accesses.  The use of
 2733   // $mem->opcode() to discover whether this pattern uses sign-extended
 2734   // offsets is something of a kludge.
 2735   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
 2736                         Register reg, int opcode,
 2737                         Register base, int index, int scale, int disp,
 2738                         int size_in_memory)
 2739   {
 2740     Address addr = mem2address(opcode, base, index, scale, disp);
 2741     if (addr.getMode() == Address::base_plus_offset) {
 2742       /* Fix up any out-of-range offsets. */
 2743       assert_different_registers(rscratch1, base);
 2744       assert_different_registers(rscratch1, reg);
 2745       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2746     }
 2747     (masm.*insn)(reg, addr);
 2748   }
 2749 
 2750   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
 2751                         FloatRegister reg, int opcode,
 2752                         Register base, int index, int size, int disp,
 2753                         int size_in_memory)
 2754   {
 2755     Address::extend scale;
 2756 
 2757     switch (opcode) {
 2758     case INDINDEXSCALEDI2L:
 2759     case INDINDEXSCALEDI2LN:
 2760       scale = Address::sxtw(size);
 2761       break;
 2762     default:
 2763       scale = Address::lsl(size);
 2764     }
 2765 
 2766     if (index == -1) {
 2767       /* If we get an out-of-range offset it is a bug in the compiler,
 2768          so we assert here. */
 2769       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
 2770       /* Fix up any out-of-range offsets. */
 2771       assert_different_registers(rscratch1, base);
 2772       Address addr = Address(base, disp);
 2773       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2774       (masm.*insn)(reg, addr);
 2775     } else {
 2776       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2777       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 2778     }
 2779   }
 2780 
 2781   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
 2782                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2783                         int opcode, Register base, int index, int size, int disp)
 2784   {
 2785     if (index == -1) {
 2786       (masm.*insn)(reg, T, Address(base, disp));
 2787     } else {
 2788       assert(disp == 0, "unsupported address mode");
 2789       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2790     }
 2791   }
 2792 
 2793 %}
 2794 
 2795 
 2796 
 2797 //----------ENCODING BLOCK-----------------------------------------------------
 2798 // This block specifies the encoding classes used by the compiler to
 2799 // output byte streams.  Encoding classes are parameterized macros
 2800 // used by Machine Instruction Nodes in order to generate the bit
 2801 // encoding of the instruction.  Operands specify their base encoding
 2802 // interface with the interface keyword.  There are currently
 2803 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2804 // COND_INTER.  REG_INTER causes an operand to generate a function
 2805 // which returns its register number when queried.  CONST_INTER causes
 2806 // an operand to generate a function which returns the value of the
 2807 // constant when queried.  MEMORY_INTER causes an operand to generate
 2808 // four functions which return the Base Register, the Index Register,
 2809 // the Scale Value, and the Offset Value of the operand when queried.
 2810 // COND_INTER causes an operand to generate six functions which return
 2811 // the encoding code (ie - encoding bits for the instruction)
 2812 // associated with each basic boolean condition for a conditional
 2813 // instruction.
 2814 //
 2815 // Instructions specify two basic values for encoding.  Again, a
 2816 // function is available to check if the constant displacement is an
 2817 // oop. They use the ins_encode keyword to specify their encoding
 2818 // classes (which must be a sequence of enc_class names, and their
 2819 // parameters, specified in the encoding block), and they use the
 2820 // opcode keyword to specify, in order, their primary, secondary, and
 2821 // tertiary opcode.  Only the opcode sections which a particular
 2822 // instruction needs for encoding need to be specified.
 2823 encode %{
 2824   // Build emit functions for each basic byte or larger field in the
 2825   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2826   // from C++ code in the enc_class source block.  Emit functions will
 2827   // live in the main source block for now.  In future, we can
 2828   // generalize this by adding a syntax that specifies the sizes of
 2829   // fields in an order, so that the adlc can build the emit functions
 2830   // automagically
 2831 
 2832   // catch all for unimplemented encodings
 2833   enc_class enc_unimplemented %{
 2834     C2_MacroAssembler _masm(&cbuf);
 2835     __ unimplemented("C2 catch all");
 2836   %}
 2837 
 2838   // BEGIN Non-volatile memory access
 2839 
 2840   // This encoding class is generated automatically from ad_encode.m4.
 2841   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2842   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2843     Register dst_reg = as_Register($dst$$reg);
 2844     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2845                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2846   %}
 2847 
 2848   // This encoding class is generated automatically from ad_encode.m4.
 2849   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2850   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2851     Register dst_reg = as_Register($dst$$reg);
 2852     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2853                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2854   %}
 2855 
 2856   // This encoding class is generated automatically from ad_encode.m4.
 2857   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2858   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2859     Register dst_reg = as_Register($dst$$reg);
 2860     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2861                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2862   %}
 2863 
 2864   // This encoding class is generated automatically from ad_encode.m4.
 2865   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2866   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2867     Register dst_reg = as_Register($dst$$reg);
 2868     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2869                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2870   %}
 2871 
 2872   // This encoding class is generated automatically from ad_encode.m4.
 2873   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2874   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2875     Register dst_reg = as_Register($dst$$reg);
 2876     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2877                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2878   %}
 2879 
 2880   // This encoding class is generated automatically from ad_encode.m4.
 2881   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2882   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2883     Register dst_reg = as_Register($dst$$reg);
 2884     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2885                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2886   %}
 2887 
 2888   // This encoding class is generated automatically from ad_encode.m4.
 2889   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2890   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2891     Register dst_reg = as_Register($dst$$reg);
 2892     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2893                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2894   %}
 2895 
 2896   // This encoding class is generated automatically from ad_encode.m4.
 2897   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2898   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2899     Register dst_reg = as_Register($dst$$reg);
 2900     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2901                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2902   %}
 2903 
 2904   // This encoding class is generated automatically from ad_encode.m4.
 2905   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2906   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2907     Register dst_reg = as_Register($dst$$reg);
 2908     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2909                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2910   %}
 2911 
 2912   // This encoding class is generated automatically from ad_encode.m4.
 2913   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2914   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2915     Register dst_reg = as_Register($dst$$reg);
 2916     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2917                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2918   %}
 2919 
 2920   // This encoding class is generated automatically from ad_encode.m4.
 2921   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2922   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2923     Register dst_reg = as_Register($dst$$reg);
 2924     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2925                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2926   %}
 2927 
 2928   // This encoding class is generated automatically from ad_encode.m4.
 2929   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2930   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2931     Register dst_reg = as_Register($dst$$reg);
 2932     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2933                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2934   %}
 2935 
 2936   // This encoding class is generated automatically from ad_encode.m4.
 2937   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2938   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2939     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2940     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2941                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2942   %}
 2943 
 2944   // This encoding class is generated automatically from ad_encode.m4.
 2945   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2946   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2947     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2948     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2949                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2950   %}
 2951 
 2952   // This encoding class is generated automatically from ad_encode.m4.
 2953   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2954   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2955     Register src_reg = as_Register($src$$reg);
 2956     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
 2957                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2958   %}
 2959 
 2960   // This encoding class is generated automatically from ad_encode.m4.
 2961   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2962   enc_class aarch64_enc_strb0(memory1 mem) %{
 2963     C2_MacroAssembler _masm(&cbuf);
 2964     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2965                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2966   %}
 2967 
 2968   // This encoding class is generated automatically from ad_encode.m4.
 2969   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2970   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2971     Register src_reg = as_Register($src$$reg);
 2972     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
 2973                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2974   %}
 2975 
 2976   // This encoding class is generated automatically from ad_encode.m4.
 2977   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2978   enc_class aarch64_enc_strh0(memory2 mem) %{
 2979     C2_MacroAssembler _masm(&cbuf);
 2980     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2981                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2982   %}
 2983 
 2984   // This encoding class is generated automatically from ad_encode.m4.
 2985   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2986   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 2987     Register src_reg = as_Register($src$$reg);
 2988     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
 2989                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2990   %}
 2991 
 2992   // This encoding class is generated automatically from ad_encode.m4.
 2993   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2994   enc_class aarch64_enc_strw0(memory4 mem) %{
 2995     C2_MacroAssembler _masm(&cbuf);
 2996     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
 2997                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2998   %}
 2999 
 3000   // This encoding class is generated automatically from ad_encode.m4.
 3001   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3002   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3003     Register src_reg = as_Register($src$$reg);
 3004     // we sometimes get asked to store the stack pointer into the
 3005     // current thread -- we cannot do that directly on AArch64
 3006     if (src_reg == r31_sp) {
 3007       C2_MacroAssembler _masm(&cbuf);
 3008       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3009       __ mov(rscratch2, sp);
 3010       src_reg = rscratch2;
 3011     }
 3012     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
 3013                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3014   %}
 3015 
 3016   // This encoding class is generated automatically from ad_encode.m4.
 3017   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3018   enc_class aarch64_enc_str0(memory8 mem) %{
 3019     C2_MacroAssembler _masm(&cbuf);
 3020     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
 3021                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3022   %}
 3023 
 3024   // This encoding class is generated automatically from ad_encode.m4.
 3025   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3026   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3027     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3028     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
 3029                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3030   %}
 3031 
 3032   // This encoding class is generated automatically from ad_encode.m4.
 3033   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3034   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3035     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3036     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
 3037                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3038   %}
 3039 
 3040   // This encoding class is generated automatically from ad_encode.m4.
 3041   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3042   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3043       C2_MacroAssembler _masm(&cbuf);
 3044       __ membar(Assembler::StoreStore);
 3045       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3046                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3047   %}
 3048 
 3049   // END Non-volatile memory access
 3050 
 3051   // Vector loads and stores
 3052   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3053     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3054     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3055        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3056   %}
 3057 
 3058   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3059     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3060     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3061        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3062   %}
 3063 
 3064   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3065     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3066     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3067        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3068   %}
 3069 
 3070   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3071     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3072     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3073        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3074   %}
 3075 
 3076   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3077     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3078     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
 3079        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3080   %}
 3081 
 3082   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3083     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3084     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
 3085        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3086   %}
 3087 
 3088   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3089     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3090     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
 3091        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3092   %}
 3093 
 3094   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3095     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3096     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3097        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3098   %}
 3099 
 3100   // volatile loads and stores
 3101 
 3102   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3103     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3104                  rscratch1, stlrb);
 3105   %}
 3106 
 3107   enc_class aarch64_enc_stlrb0(memory mem) %{
 3108     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3109                  rscratch1, stlrb);
 3110   %}
 3111 
 3112   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3113     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3114                  rscratch1, stlrh);
 3115   %}
 3116 
 3117   enc_class aarch64_enc_stlrh0(memory mem) %{
 3118     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3119                  rscratch1, stlrh);
 3120   %}
 3121 
 3122   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3123     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3124                  rscratch1, stlrw);
 3125   %}
 3126 
 3127   enc_class aarch64_enc_stlrw0(memory mem) %{
 3128     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3129                  rscratch1, stlrw);
 3130   %}
 3131 
 3132   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3133     Register dst_reg = as_Register($dst$$reg);
 3134     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3135              rscratch1, ldarb);
 3136     __ sxtbw(dst_reg, dst_reg);
 3137   %}
 3138 
 3139   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3140     Register dst_reg = as_Register($dst$$reg);
 3141     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3142              rscratch1, ldarb);
 3143     __ sxtb(dst_reg, dst_reg);
 3144   %}
 3145 
 3146   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3147     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3148              rscratch1, ldarb);
 3149   %}
 3150 
 3151   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3152     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3153              rscratch1, ldarb);
 3154   %}
 3155 
 3156   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3157     Register dst_reg = as_Register($dst$$reg);
 3158     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3159              rscratch1, ldarh);
 3160     __ sxthw(dst_reg, dst_reg);
 3161   %}
 3162 
 3163   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3164     Register dst_reg = as_Register($dst$$reg);
 3165     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3166              rscratch1, ldarh);
 3167     __ sxth(dst_reg, dst_reg);
 3168   %}
 3169 
 3170   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3171     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3172              rscratch1, ldarh);
 3173   %}
 3174 
 3175   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3176     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3177              rscratch1, ldarh);
 3178   %}
 3179 
 3180   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3181     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3182              rscratch1, ldarw);
 3183   %}
 3184 
 3185   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3186     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3187              rscratch1, ldarw);
 3188   %}
 3189 
 3190   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3191     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3192              rscratch1, ldar);
 3193   %}
 3194 
 3195   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3196     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3197              rscratch1, ldarw);
 3198     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3199   %}
 3200 
 3201   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3202     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3203              rscratch1, ldar);
 3204     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3205   %}
 3206 
 3207   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3208     Register src_reg = as_Register($src$$reg);
 3209     // we sometimes get asked to store the stack pointer into the
 3210     // current thread -- we cannot do that directly on AArch64
 3211     if (src_reg == r31_sp) {
 3212       C2_MacroAssembler _masm(&cbuf);
 3213       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3214       __ mov(rscratch2, sp);
 3215       src_reg = rscratch2;
 3216     }
 3217     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3218                  rscratch1, stlr);
 3219   %}
 3220 
 3221   enc_class aarch64_enc_stlr0(memory mem) %{
 3222     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3223                  rscratch1, stlr);
 3224   %}
 3225 
 3226   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3227     {
 3228       C2_MacroAssembler _masm(&cbuf);
 3229       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3230       __ fmovs(rscratch2, src_reg);
 3231     }
 3232     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3233                  rscratch1, stlrw);
 3234   %}
 3235 
 3236   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3237     {
 3238       C2_MacroAssembler _masm(&cbuf);
 3239       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3240       __ fmovd(rscratch2, src_reg);
 3241     }
 3242     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3243                  rscratch1, stlr);
 3244   %}
 3245 
 3246   // synchronized read/update encodings
 3247 
 3248   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3249     C2_MacroAssembler _masm(&cbuf);
 3250     Register dst_reg = as_Register($dst$$reg);
 3251     Register base = as_Register($mem$$base);
 3252     int index = $mem$$index;
 3253     int scale = $mem$$scale;
 3254     int disp = $mem$$disp;
 3255     if (index == -1) {
 3256        if (disp != 0) {
 3257         __ lea(rscratch1, Address(base, disp));
 3258         __ ldaxr(dst_reg, rscratch1);
 3259       } else {
 3260         // TODO
 3261         // should we ever get anything other than this case?
 3262         __ ldaxr(dst_reg, base);
 3263       }
 3264     } else {
 3265       Register index_reg = as_Register(index);
 3266       if (disp == 0) {
 3267         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3268         __ ldaxr(dst_reg, rscratch1);
 3269       } else {
 3270         __ lea(rscratch1, Address(base, disp));
 3271         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3272         __ ldaxr(dst_reg, rscratch1);
 3273       }
 3274     }
 3275   %}
 3276 
 3277   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3278     C2_MacroAssembler _masm(&cbuf);
 3279     Register src_reg = as_Register($src$$reg);
 3280     Register base = as_Register($mem$$base);
 3281     int index = $mem$$index;
 3282     int scale = $mem$$scale;
 3283     int disp = $mem$$disp;
 3284     if (index == -1) {
 3285        if (disp != 0) {
 3286         __ lea(rscratch2, Address(base, disp));
 3287         __ stlxr(rscratch1, src_reg, rscratch2);
 3288       } else {
 3289         // TODO
 3290         // should we ever get anything other than this case?
 3291         __ stlxr(rscratch1, src_reg, base);
 3292       }
 3293     } else {
 3294       Register index_reg = as_Register(index);
 3295       if (disp == 0) {
 3296         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3297         __ stlxr(rscratch1, src_reg, rscratch2);
 3298       } else {
 3299         __ lea(rscratch2, Address(base, disp));
 3300         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3301         __ stlxr(rscratch1, src_reg, rscratch2);
 3302       }
 3303     }
 3304     __ cmpw(rscratch1, zr);
 3305   %}
 3306 
 3307   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3308     C2_MacroAssembler _masm(&cbuf);
 3309     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3310     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3311                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3312                /*weak*/ false, noreg);
 3313   %}
 3314 
 3315   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3316     C2_MacroAssembler _masm(&cbuf);
 3317     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3318     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3319                Assembler::word, /*acquire*/ false, /*release*/ true,
 3320                /*weak*/ false, noreg);
 3321   %}
 3322 
 3323   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3324     C2_MacroAssembler _masm(&cbuf);
 3325     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3326     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3327                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3328                /*weak*/ false, noreg);
 3329   %}
 3330 
 3331   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3332     C2_MacroAssembler _masm(&cbuf);
 3333     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3334     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3335                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3336                /*weak*/ false, noreg);
 3337   %}
 3338 
 3339 
 3340   // The only difference between aarch64_enc_cmpxchg and
 3341   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3342   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3343   // lock.
 3344   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3345     C2_MacroAssembler _masm(&cbuf);
 3346     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3347     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3348                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3349                /*weak*/ false, noreg);
 3350   %}
 3351 
 3352   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3353     C2_MacroAssembler _masm(&cbuf);
 3354     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3355     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3356                Assembler::word, /*acquire*/ true, /*release*/ true,
 3357                /*weak*/ false, noreg);
 3358   %}
 3359 
 3360   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3361     C2_MacroAssembler _masm(&cbuf);
 3362     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3363     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3364                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3365                /*weak*/ false, noreg);
 3366   %}
 3367 
 3368   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3369     C2_MacroAssembler _masm(&cbuf);
 3370     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3371     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3372                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3373                /*weak*/ false, noreg);
 3374   %}
 3375 
 3376   // auxiliary used for CompareAndSwapX to set result register
 3377   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3378     C2_MacroAssembler _masm(&cbuf);
 3379     Register res_reg = as_Register($res$$reg);
 3380     __ cset(res_reg, Assembler::EQ);
 3381   %}
 3382 
 3383   // prefetch encodings
 3384 
 3385   enc_class aarch64_enc_prefetchw(memory mem) %{
 3386     C2_MacroAssembler _masm(&cbuf);
 3387     Register base = as_Register($mem$$base);
 3388     int index = $mem$$index;
 3389     int scale = $mem$$scale;
 3390     int disp = $mem$$disp;
 3391     if (index == -1) {
 3392       __ prfm(Address(base, disp), PSTL1KEEP);
 3393     } else {
 3394       Register index_reg = as_Register(index);
 3395       if (disp == 0) {
 3396         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3397       } else {
 3398         __ lea(rscratch1, Address(base, disp));
 3399 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3400       }
 3401     }
 3402   %}
 3403 
 3404   /// mov envcodings
 3405 
 3406   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3407     C2_MacroAssembler _masm(&cbuf);
 3408     uint32_t con = (uint32_t)$src$$constant;
 3409     Register dst_reg = as_Register($dst$$reg);
 3410     if (con == 0) {
 3411       __ movw(dst_reg, zr);
 3412     } else {
 3413       __ movw(dst_reg, con);
 3414     }
 3415   %}
 3416 
 3417   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3418     C2_MacroAssembler _masm(&cbuf);
 3419     Register dst_reg = as_Register($dst$$reg);
 3420     uint64_t con = (uint64_t)$src$$constant;
 3421     if (con == 0) {
 3422       __ mov(dst_reg, zr);
 3423     } else {
 3424       __ mov(dst_reg, con);
 3425     }
 3426   %}
 3427 
 3428   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3429     C2_MacroAssembler _masm(&cbuf);
 3430     Register dst_reg = as_Register($dst$$reg);
 3431     address con = (address)$src$$constant;
 3432     if (con == NULL || con == (address)1) {
 3433       ShouldNotReachHere();
 3434     } else {
 3435       relocInfo::relocType rtype = $src->constant_reloc();
 3436       if (rtype == relocInfo::oop_type) {
 3437         __ movoop(dst_reg, (jobject)con);
 3438       } else if (rtype == relocInfo::metadata_type) {
 3439         __ mov_metadata(dst_reg, (Metadata*)con);
 3440       } else {
 3441         assert(rtype == relocInfo::none, "unexpected reloc type");
 3442         if (! __ is_valid_AArch64_address(con) ||
 3443             con < (address)(uintptr_t)os::vm_page_size()) {
 3444           __ mov(dst_reg, con);
 3445         } else {
 3446           uint64_t offset;
 3447           __ adrp(dst_reg, con, offset);
 3448           __ add(dst_reg, dst_reg, offset);
 3449         }
 3450       }
 3451     }
 3452   %}
 3453 
 3454   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3455     C2_MacroAssembler _masm(&cbuf);
 3456     Register dst_reg = as_Register($dst$$reg);
 3457     __ mov(dst_reg, zr);
 3458   %}
 3459 
 3460   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3461     C2_MacroAssembler _masm(&cbuf);
 3462     Register dst_reg = as_Register($dst$$reg);
 3463     __ mov(dst_reg, (uint64_t)1);
 3464   %}
 3465 
 3466   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3467     C2_MacroAssembler _masm(&cbuf);
 3468     __ load_byte_map_base($dst$$Register);
 3469   %}
 3470 
 3471   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3472     C2_MacroAssembler _masm(&cbuf);
 3473     Register dst_reg = as_Register($dst$$reg);
 3474     address con = (address)$src$$constant;
 3475     if (con == NULL) {
 3476       ShouldNotReachHere();
 3477     } else {
 3478       relocInfo::relocType rtype = $src->constant_reloc();
 3479       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3480       __ set_narrow_oop(dst_reg, (jobject)con);
 3481     }
 3482   %}
 3483 
 3484   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3485     C2_MacroAssembler _masm(&cbuf);
 3486     Register dst_reg = as_Register($dst$$reg);
 3487     __ mov(dst_reg, zr);
 3488   %}
 3489 
 3490   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3491     C2_MacroAssembler _masm(&cbuf);
 3492     Register dst_reg = as_Register($dst$$reg);
 3493     address con = (address)$src$$constant;
 3494     if (con == NULL) {
 3495       ShouldNotReachHere();
 3496     } else {
 3497       relocInfo::relocType rtype = $src->constant_reloc();
 3498       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3499       __ set_narrow_klass(dst_reg, (Klass *)con);
 3500     }
 3501   %}
 3502 
 3503   // arithmetic encodings
 3504 
 3505   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3506     C2_MacroAssembler _masm(&cbuf);
 3507     Register dst_reg = as_Register($dst$$reg);
 3508     Register src_reg = as_Register($src1$$reg);
 3509     int32_t con = (int32_t)$src2$$constant;
 3510     // add has primary == 0, subtract has primary == 1
 3511     if ($primary) { con = -con; }
 3512     if (con < 0) {
 3513       __ subw(dst_reg, src_reg, -con);
 3514     } else {
 3515       __ addw(dst_reg, src_reg, con);
 3516     }
 3517   %}
 3518 
 3519   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3520     C2_MacroAssembler _masm(&cbuf);
 3521     Register dst_reg = as_Register($dst$$reg);
 3522     Register src_reg = as_Register($src1$$reg);
 3523     int32_t con = (int32_t)$src2$$constant;
 3524     // add has primary == 0, subtract has primary == 1
 3525     if ($primary) { con = -con; }
 3526     if (con < 0) {
 3527       __ sub(dst_reg, src_reg, -con);
 3528     } else {
 3529       __ add(dst_reg, src_reg, con);
 3530     }
 3531   %}
 3532 
 3533   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3534     C2_MacroAssembler _masm(&cbuf);
 3535    Register dst_reg = as_Register($dst$$reg);
 3536    Register src1_reg = as_Register($src1$$reg);
 3537    Register src2_reg = as_Register($src2$$reg);
 3538     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3539   %}
 3540 
 3541   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3542     C2_MacroAssembler _masm(&cbuf);
 3543    Register dst_reg = as_Register($dst$$reg);
 3544    Register src1_reg = as_Register($src1$$reg);
 3545    Register src2_reg = as_Register($src2$$reg);
 3546     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3547   %}
 3548 
 3549   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3550     C2_MacroAssembler _masm(&cbuf);
 3551    Register dst_reg = as_Register($dst$$reg);
 3552    Register src1_reg = as_Register($src1$$reg);
 3553    Register src2_reg = as_Register($src2$$reg);
 3554     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3555   %}
 3556 
 3557   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3558     C2_MacroAssembler _masm(&cbuf);
 3559    Register dst_reg = as_Register($dst$$reg);
 3560    Register src1_reg = as_Register($src1$$reg);
 3561    Register src2_reg = as_Register($src2$$reg);
 3562     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3563   %}
 3564 
 3565   // compare instruction encodings
 3566 
 3567   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3568     C2_MacroAssembler _masm(&cbuf);
 3569     Register reg1 = as_Register($src1$$reg);
 3570     Register reg2 = as_Register($src2$$reg);
 3571     __ cmpw(reg1, reg2);
 3572   %}
 3573 
 3574   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3575     C2_MacroAssembler _masm(&cbuf);
 3576     Register reg = as_Register($src1$$reg);
 3577     int32_t val = $src2$$constant;
 3578     if (val >= 0) {
 3579       __ subsw(zr, reg, val);
 3580     } else {
 3581       __ addsw(zr, reg, -val);
 3582     }
 3583   %}
 3584 
 3585   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3586     C2_MacroAssembler _masm(&cbuf);
 3587     Register reg1 = as_Register($src1$$reg);
 3588     uint32_t val = (uint32_t)$src2$$constant;
 3589     __ movw(rscratch1, val);
 3590     __ cmpw(reg1, rscratch1);
 3591   %}
 3592 
 3593   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3594     C2_MacroAssembler _masm(&cbuf);
 3595     Register reg1 = as_Register($src1$$reg);
 3596     Register reg2 = as_Register($src2$$reg);
 3597     __ cmp(reg1, reg2);
 3598   %}
 3599 
 3600   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3601     C2_MacroAssembler _masm(&cbuf);
 3602     Register reg = as_Register($src1$$reg);
 3603     int64_t val = $src2$$constant;
 3604     if (val >= 0) {
 3605       __ subs(zr, reg, val);
 3606     } else if (val != -val) {
 3607       __ adds(zr, reg, -val);
 3608     } else {
 3609     // aargh, Long.MIN_VALUE is a special case
 3610       __ orr(rscratch1, zr, (uint64_t)val);
 3611       __ subs(zr, reg, rscratch1);
 3612     }
 3613   %}
 3614 
 3615   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3616     C2_MacroAssembler _masm(&cbuf);
 3617     Register reg1 = as_Register($src1$$reg);
 3618     uint64_t val = (uint64_t)$src2$$constant;
 3619     __ mov(rscratch1, val);
 3620     __ cmp(reg1, rscratch1);
 3621   %}
 3622 
 3623   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3624     C2_MacroAssembler _masm(&cbuf);
 3625     Register reg1 = as_Register($src1$$reg);
 3626     Register reg2 = as_Register($src2$$reg);
 3627     __ cmp(reg1, reg2);
 3628   %}
 3629 
 3630   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3631     C2_MacroAssembler _masm(&cbuf);
 3632     Register reg1 = as_Register($src1$$reg);
 3633     Register reg2 = as_Register($src2$$reg);
 3634     __ cmpw(reg1, reg2);
 3635   %}
 3636 
 3637   enc_class aarch64_enc_testp(iRegP src) %{
 3638     C2_MacroAssembler _masm(&cbuf);
 3639     Register reg = as_Register($src$$reg);
 3640     __ cmp(reg, zr);
 3641   %}
 3642 
 3643   enc_class aarch64_enc_testn(iRegN src) %{
 3644     C2_MacroAssembler _masm(&cbuf);
 3645     Register reg = as_Register($src$$reg);
 3646     __ cmpw(reg, zr);
 3647   %}
 3648 
 3649   enc_class aarch64_enc_b(label lbl) %{
 3650     C2_MacroAssembler _masm(&cbuf);
 3651     Label *L = $lbl$$label;
 3652     __ b(*L);
 3653   %}
 3654 
 3655   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3656     C2_MacroAssembler _masm(&cbuf);
 3657     Label *L = $lbl$$label;
 3658     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3659   %}
 3660 
 3661   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3662     C2_MacroAssembler _masm(&cbuf);
 3663     Label *L = $lbl$$label;
 3664     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3665   %}
 3666 
 3667   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3668   %{
 3669      Register sub_reg = as_Register($sub$$reg);
 3670      Register super_reg = as_Register($super$$reg);
 3671      Register temp_reg = as_Register($temp$$reg);
 3672      Register result_reg = as_Register($result$$reg);
 3673 
 3674      Label miss;
 3675      C2_MacroAssembler _masm(&cbuf);
 3676      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3677                                      NULL, &miss,
 3678                                      /*set_cond_codes:*/ true);
 3679      if ($primary) {
 3680        __ mov(result_reg, zr);
 3681      }
 3682      __ bind(miss);
 3683   %}
 3684 
 3685   enc_class aarch64_enc_java_static_call(method meth) %{
 3686     C2_MacroAssembler _masm(&cbuf);
 3687 
 3688     address addr = (address)$meth$$method;
 3689     address call;
 3690     if (!_method) {
 3691       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3692       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3693       if (call == NULL) {
 3694         ciEnv::current()->record_failure("CodeCache is full");
 3695         return;
 3696       }
 3697     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3698       // The NOP here is purely to ensure that eliding a call to
 3699       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3700       __ nop();
 3701       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3702     } else {
 3703       int method_index = resolved_method_index(cbuf);
 3704       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3705                                                   : static_call_Relocation::spec(method_index);
 3706       call = __ trampoline_call(Address(addr, rspec));
 3707       if (call == NULL) {
 3708         ciEnv::current()->record_failure("CodeCache is full");
 3709         return;
 3710       }
 3711       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3712         // Calls of the same statically bound method can share
 3713         // a stub to the interpreter.
 3714         cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
 3715       } else {
 3716         // Emit stub for static call
 3717         address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
 3718         if (stub == NULL) {
 3719           ciEnv::current()->record_failure("CodeCache is full");
 3720           return;
 3721         }
 3722       }
 3723     }
 3724 
 3725     __ post_call_nop();
 3726 
 3727     // Only non uncommon_trap calls need to reinitialize ptrue.
 3728     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3729       __ reinitialize_ptrue();
 3730     }
 3731   %}
 3732 
 3733   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3734     C2_MacroAssembler _masm(&cbuf);
 3735     int method_index = resolved_method_index(cbuf);
 3736     address call = __ ic_call((address)$meth$$method, method_index);
 3737     if (call == NULL) {
 3738       ciEnv::current()->record_failure("CodeCache is full");
 3739       return;
 3740     }
 3741     __ post_call_nop();
 3742     if (Compile::current()->max_vector_size() > 0) {
 3743       __ reinitialize_ptrue();
 3744     }
 3745   %}
 3746 
 3747   enc_class aarch64_enc_call_epilog() %{
 3748     C2_MacroAssembler _masm(&cbuf);
 3749     if (VerifyStackAtCalls) {
 3750       // Check that stack depth is unchanged: find majik cookie on stack
 3751       __ call_Unimplemented();
 3752     }
 3753   %}
 3754 
 3755   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3756     C2_MacroAssembler _masm(&cbuf);
 3757 
 3758     // some calls to generated routines (arraycopy code) are scheduled
 3759     // by C2 as runtime calls. if so we can call them using a br (they
 3760     // will be in a reachable segment) otherwise we have to use a blr
 3761     // which loads the absolute address into a register.
 3762     address entry = (address)$meth$$method;
 3763     CodeBlob *cb = CodeCache::find_blob(entry);
 3764     if (cb) {
 3765       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3766       if (call == NULL) {
 3767         ciEnv::current()->record_failure("CodeCache is full");
 3768         return;
 3769       }
 3770       __ post_call_nop();
 3771     } else {
 3772       Label retaddr;
 3773       // Make the anchor frame walkable
 3774       __ adr(rscratch2, retaddr);
 3775       __ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 3776       __ lea(rscratch1, RuntimeAddress(entry));
 3777       __ blr(rscratch1);
 3778       __ bind(retaddr);
 3779       __ post_call_nop();
 3780     }
 3781     if (Compile::current()->max_vector_size() > 0) {
 3782       __ reinitialize_ptrue();
 3783     }
 3784   %}
 3785 
 3786   enc_class aarch64_enc_rethrow() %{
 3787     C2_MacroAssembler _masm(&cbuf);
 3788     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3789   %}
 3790 
 3791   enc_class aarch64_enc_ret() %{
 3792     C2_MacroAssembler _masm(&cbuf);
 3793 #ifdef ASSERT
 3794     if (Compile::current()->max_vector_size() > 0) {
 3795       __ verify_ptrue();
 3796     }
 3797 #endif
 3798     __ ret(lr);
 3799   %}
 3800 
 3801   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3802     C2_MacroAssembler _masm(&cbuf);
 3803     Register target_reg = as_Register($jump_target$$reg);
 3804     __ br(target_reg);
 3805   %}
 3806 
 3807   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3808     C2_MacroAssembler _masm(&cbuf);
 3809     Register target_reg = as_Register($jump_target$$reg);
 3810     // exception oop should be in r0
 3811     // ret addr has been popped into lr
 3812     // callee expects it in r3
 3813     __ mov(r3, lr);
 3814     __ br(target_reg);
 3815   %}
 3816 
 3817 %}
 3818 
 3819 //----------FRAME--------------------------------------------------------------
 3820 // Definition of frame structure and management information.
 3821 //
 3822 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3823 //                             |   (to get allocators register number
 3824 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3825 //  r   CALLER     |        |
 3826 //  o     |        +--------+      pad to even-align allocators stack-slot
 3827 //  w     V        |  pad0  |        numbers; owned by CALLER
 3828 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3829 //  h     ^        |   in   |  5
 3830 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3831 //  |     |        |        |  3
 3832 //  |     |        +--------+
 3833 //  V     |        | old out|      Empty on Intel, window on Sparc
 3834 //        |    old |preserve|      Must be even aligned.
 3835 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3836 //        |        |   in   |  3   area for Intel ret address
 3837 //     Owned by    |preserve|      Empty on Sparc.
 3838 //       SELF      +--------+
 3839 //        |        |  pad2  |  2   pad to align old SP
 3840 //        |        +--------+  1
 3841 //        |        | locks  |  0
 3842 //        |        +--------+----> OptoReg::stack0(), even aligned
 3843 //        |        |  pad1  | 11   pad to align new SP
 3844 //        |        +--------+
 3845 //        |        |        | 10
 3846 //        |        | spills |  9   spills
 3847 //        V        |        |  8   (pad0 slot for callee)
 3848 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3849 //        ^        |  out   |  7
 3850 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3851 //     Owned by    +--------+
 3852 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3853 //        |    new |preserve|      Must be even-aligned.
 3854 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3855 //        |        |        |
 3856 //
 3857 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3858 //         known from SELF's arguments and the Java calling convention.
 3859 //         Region 6-7 is determined per call site.
 3860 // Note 2: If the calling convention leaves holes in the incoming argument
 3861 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3862 //         are owned by the CALLEE.  Holes should not be necessary in the
 3863 //         incoming area, as the Java calling convention is completely under
 3864 //         the control of the AD file.  Doubles can be sorted and packed to
 3865 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3866 //         varargs C calling conventions.
 3867 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3868 //         even aligned with pad0 as needed.
 3869 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3870 //           (the latter is true on Intel but is it false on AArch64?)
 3871 //         region 6-11 is even aligned; it may be padded out more so that
 3872 //         the region from SP to FP meets the minimum stack alignment.
 3873 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3874 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3875 //         SP meets the minimum alignment.
 3876 
 3877 frame %{
 3878   // These three registers define part of the calling convention
 3879   // between compiled code and the interpreter.
 3880 
 3881   // Inline Cache Register or Method for I2C.
 3882   inline_cache_reg(R12);
 3883 
 3884   // Number of stack slots consumed by locking an object
 3885   sync_stack_slots(2);
 3886 
 3887   // Compiled code's Frame Pointer
 3888   frame_pointer(R31);
 3889 
 3890   // Interpreter stores its frame pointer in a register which is
 3891   // stored to the stack by I2CAdaptors.
 3892   // I2CAdaptors convert from interpreted java to compiled java.
 3893   interpreter_frame_pointer(R29);
 3894 
 3895   // Stack alignment requirement
 3896   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3897 
 3898   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3899   // for calls to C.  Supports the var-args backing area for register parms.
 3900   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3901 
 3902   // The after-PROLOG location of the return address.  Location of
 3903   // return address specifies a type (REG or STACK) and a number
 3904   // representing the register number (i.e. - use a register name) or
 3905   // stack slot.
 3906   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3907   // Otherwise, it is above the locks and verification slot and alignment word
 3908   // TODO this may well be correct but need to check why that - 2 is there
 3909   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3910   // which folds in the space used for monitors
 3911   return_addr(STACK - 2 +
 3912               align_up((Compile::current()->in_preserve_stack_slots() +
 3913                         Compile::current()->fixed_slots()),
 3914                        stack_alignment_in_slots()));
 3915 
 3916   // Location of compiled Java return values.  Same as C for now.
 3917   return_value
 3918   %{
 3919     // TODO do we allow ideal_reg == Op_RegN???
 3920     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3921            "only return normal values");
 3922 
 3923     static const int lo[Op_RegL + 1] = { // enum name
 3924       0,                                 // Op_Node
 3925       0,                                 // Op_Set
 3926       R0_num,                            // Op_RegN
 3927       R0_num,                            // Op_RegI
 3928       R0_num,                            // Op_RegP
 3929       V0_num,                            // Op_RegF
 3930       V0_num,                            // Op_RegD
 3931       R0_num                             // Op_RegL
 3932     };
 3933 
 3934     static const int hi[Op_RegL + 1] = { // enum name
 3935       0,                                 // Op_Node
 3936       0,                                 // Op_Set
 3937       OptoReg::Bad,                      // Op_RegN
 3938       OptoReg::Bad,                      // Op_RegI
 3939       R0_H_num,                          // Op_RegP
 3940       OptoReg::Bad,                      // Op_RegF
 3941       V0_H_num,                          // Op_RegD
 3942       R0_H_num                           // Op_RegL
 3943     };
 3944 
 3945     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3946   %}
 3947 %}
 3948 
 3949 //----------ATTRIBUTES---------------------------------------------------------
 3950 //----------Operand Attributes-------------------------------------------------
 3951 op_attrib op_cost(1);        // Required cost attribute
 3952 
 3953 //----------Instruction Attributes---------------------------------------------
 3954 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3955 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3956 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3957                                 // a non-matching short branch variant
 3958                                 // of some long branch?
 3959 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3960                                 // be a power of 2) specifies the
 3961                                 // alignment that some part of the
 3962                                 // instruction (not necessarily the
 3963                                 // start) requires.  If > 1, a
 3964                                 // compute_padding() function must be
 3965                                 // provided for the instruction
 3966 
 3967 //----------OPERANDS-----------------------------------------------------------
 3968 // Operand definitions must precede instruction definitions for correct parsing
 3969 // in the ADLC because operands constitute user defined types which are used in
 3970 // instruction definitions.
 3971 
 3972 //----------Simple Operands----------------------------------------------------
 3973 
 3974 // Integer operands 32 bit
 3975 // 32 bit immediate
 3976 operand immI()
 3977 %{
 3978   match(ConI);
 3979 
 3980   op_cost(0);
 3981   format %{ %}
 3982   interface(CONST_INTER);
 3983 %}
 3984 
 3985 // 32 bit zero
 3986 operand immI0()
 3987 %{
 3988   predicate(n->get_int() == 0);
 3989   match(ConI);
 3990 
 3991   op_cost(0);
 3992   format %{ %}
 3993   interface(CONST_INTER);
 3994 %}
 3995 
 3996 // 32 bit unit increment
 3997 operand immI_1()
 3998 %{
 3999   predicate(n->get_int() == 1);
 4000   match(ConI);
 4001 
 4002   op_cost(0);
 4003   format %{ %}
 4004   interface(CONST_INTER);
 4005 %}
 4006 
 4007 // 32 bit unit decrement
 4008 operand immI_M1()
 4009 %{
 4010   predicate(n->get_int() == -1);
 4011   match(ConI);
 4012 
 4013   op_cost(0);
 4014   format %{ %}
 4015   interface(CONST_INTER);
 4016 %}
 4017 
 4018 // Shift values for add/sub extension shift
 4019 operand immIExt()
 4020 %{
 4021   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 4022   match(ConI);
 4023 
 4024   op_cost(0);
 4025   format %{ %}
 4026   interface(CONST_INTER);
 4027 %}
 4028 
 4029 operand immI_gt_1()
 4030 %{
 4031   predicate(n->get_int() > 1);
 4032   match(ConI);
 4033 
 4034   op_cost(0);
 4035   format %{ %}
 4036   interface(CONST_INTER);
 4037 %}
 4038 
 4039 operand immI_le_4()
 4040 %{
 4041   predicate(n->get_int() <= 4);
 4042   match(ConI);
 4043 
 4044   op_cost(0);
 4045   format %{ %}
 4046   interface(CONST_INTER);
 4047 %}
 4048 
 4049 operand immI_16()
 4050 %{
 4051   predicate(n->get_int() == 16);
 4052   match(ConI);
 4053 
 4054   op_cost(0);
 4055   format %{ %}
 4056   interface(CONST_INTER);
 4057 %}
 4058 
 4059 operand immI_24()
 4060 %{
 4061   predicate(n->get_int() == 24);
 4062   match(ConI);
 4063 
 4064   op_cost(0);
 4065   format %{ %}
 4066   interface(CONST_INTER);
 4067 %}
 4068 
 4069 operand immI_32()
 4070 %{
 4071   predicate(n->get_int() == 32);
 4072   match(ConI);
 4073 
 4074   op_cost(0);
 4075   format %{ %}
 4076   interface(CONST_INTER);
 4077 %}
 4078 
 4079 operand immI_48()
 4080 %{
 4081   predicate(n->get_int() == 48);
 4082   match(ConI);
 4083 
 4084   op_cost(0);
 4085   format %{ %}
 4086   interface(CONST_INTER);
 4087 %}
 4088 
 4089 operand immI_56()
 4090 %{
 4091   predicate(n->get_int() == 56);
 4092   match(ConI);
 4093 
 4094   op_cost(0);
 4095   format %{ %}
 4096   interface(CONST_INTER);
 4097 %}
 4098 
 4099 operand immI_63()
 4100 %{
 4101   predicate(n->get_int() == 63);
 4102   match(ConI);
 4103 
 4104   op_cost(0);
 4105   format %{ %}
 4106   interface(CONST_INTER);
 4107 %}
 4108 
 4109 operand immI_64()
 4110 %{
 4111   predicate(n->get_int() == 64);
 4112   match(ConI);
 4113 
 4114   op_cost(0);
 4115   format %{ %}
 4116   interface(CONST_INTER);
 4117 %}
 4118 
 4119 operand immI_255()
 4120 %{
 4121   predicate(n->get_int() == 255);
 4122   match(ConI);
 4123 
 4124   op_cost(0);
 4125   format %{ %}
 4126   interface(CONST_INTER);
 4127 %}
 4128 
 4129 operand immI_65535()
 4130 %{
 4131   predicate(n->get_int() == 65535);
 4132   match(ConI);
 4133 
 4134   op_cost(0);
 4135   format %{ %}
 4136   interface(CONST_INTER);
 4137 %}
 4138 
 4139 operand immI_positive()
 4140 %{
 4141   predicate(n->get_int() > 0);
 4142   match(ConI);
 4143 
 4144   op_cost(0);
 4145   format %{ %}
 4146   interface(CONST_INTER);
 4147 %}
 4148 
 4149 // BoolTest condition for signed compare
 4150 operand immI_cmp_cond()
 4151 %{
 4152   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4153   match(ConI);
 4154 
 4155   op_cost(0);
 4156   format %{ %}
 4157   interface(CONST_INTER);
 4158 %}
 4159 
 4160 // BoolTest condition for unsigned compare
 4161 operand immI_cmpU_cond()
 4162 %{
 4163   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4164   match(ConI);
 4165 
 4166   op_cost(0);
 4167   format %{ %}
 4168   interface(CONST_INTER);
 4169 %}
 4170 
 4171 operand immL_255()
 4172 %{
 4173   predicate(n->get_long() == 255L);
 4174   match(ConL);
 4175 
 4176   op_cost(0);
 4177   format %{ %}
 4178   interface(CONST_INTER);
 4179 %}
 4180 
 4181 operand immL_65535()
 4182 %{
 4183   predicate(n->get_long() == 65535L);
 4184   match(ConL);
 4185 
 4186   op_cost(0);
 4187   format %{ %}
 4188   interface(CONST_INTER);
 4189 %}
 4190 
 4191 operand immL_4294967295()
 4192 %{
 4193   predicate(n->get_long() == 4294967295L);
 4194   match(ConL);
 4195 
 4196   op_cost(0);
 4197   format %{ %}
 4198   interface(CONST_INTER);
 4199 %}
 4200 
 4201 operand immL_bitmask()
 4202 %{
 4203   predicate((n->get_long() != 0)
 4204             && ((n->get_long() & 0xc000000000000000l) == 0)
 4205             && is_power_of_2(n->get_long() + 1));
 4206   match(ConL);
 4207 
 4208   op_cost(0);
 4209   format %{ %}
 4210   interface(CONST_INTER);
 4211 %}
 4212 
 4213 operand immI_bitmask()
 4214 %{
 4215   predicate((n->get_int() != 0)
 4216             && ((n->get_int() & 0xc0000000) == 0)
 4217             && is_power_of_2(n->get_int() + 1));
 4218   match(ConI);
 4219 
 4220   op_cost(0);
 4221   format %{ %}
 4222   interface(CONST_INTER);
 4223 %}
 4224 
 4225 operand immL_positive_bitmaskI()
 4226 %{
 4227   predicate((n->get_long() != 0)
 4228             && ((julong)n->get_long() < 0x80000000ULL)
 4229             && is_power_of_2(n->get_long() + 1));
 4230   match(ConL);
 4231 
 4232   op_cost(0);
 4233   format %{ %}
 4234   interface(CONST_INTER);
 4235 %}
 4236 
 4237 // Scale values for scaled offset addressing modes (up to long but not quad)
 4238 operand immIScale()
 4239 %{
 4240   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4241   match(ConI);
 4242 
 4243   op_cost(0);
 4244   format %{ %}
 4245   interface(CONST_INTER);
 4246 %}
 4247 
 4248 // 26 bit signed offset -- for pc-relative branches
 4249 operand immI26()
 4250 %{
 4251   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
 4252   match(ConI);
 4253 
 4254   op_cost(0);
 4255   format %{ %}
 4256   interface(CONST_INTER);
 4257 %}
 4258 
 4259 // 19 bit signed offset -- for pc-relative loads
 4260 operand immI19()
 4261 %{
 4262   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
 4263   match(ConI);
 4264 
 4265   op_cost(0);
 4266   format %{ %}
 4267   interface(CONST_INTER);
 4268 %}
 4269 
 4270 // 5 bit signed integer
 4271 operand immI5()
 4272 %{
 4273   predicate(Assembler::is_simm(n->get_int(), 5));
 4274   match(ConI);
 4275 
 4276   op_cost(0);
 4277   format %{ %}
 4278   interface(CONST_INTER);
 4279 %}
 4280 
 4281 // 7 bit unsigned integer
 4282 operand immIU7()
 4283 %{
 4284   predicate(Assembler::is_uimm(n->get_int(), 7));
 4285   match(ConI);
 4286 
 4287   op_cost(0);
 4288   format %{ %}
 4289   interface(CONST_INTER);
 4290 %}
 4291 
 4292 // 12 bit unsigned offset -- for base plus immediate loads
 4293 operand immIU12()
 4294 %{
 4295   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
 4296   match(ConI);
 4297 
 4298   op_cost(0);
 4299   format %{ %}
 4300   interface(CONST_INTER);
 4301 %}
 4302 
 4303 operand immLU12()
 4304 %{
 4305   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
 4306   match(ConL);
 4307 
 4308   op_cost(0);
 4309   format %{ %}
 4310   interface(CONST_INTER);
 4311 %}
 4312 
 4313 // Offset for scaled or unscaled immediate loads and stores
 4314 operand immIOffset()
 4315 %{
 4316   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4317   match(ConI);
 4318 
 4319   op_cost(0);
 4320   format %{ %}
 4321   interface(CONST_INTER);
 4322 %}
 4323 
 4324 operand immIOffset1()
 4325 %{
 4326   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4327   match(ConI);
 4328 
 4329   op_cost(0);
 4330   format %{ %}
 4331   interface(CONST_INTER);
 4332 %}
 4333 
 4334 operand immIOffset2()
 4335 %{
 4336   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4337   match(ConI);
 4338 
 4339   op_cost(0);
 4340   format %{ %}
 4341   interface(CONST_INTER);
 4342 %}
 4343 
 4344 operand immIOffset4()
 4345 %{
 4346   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4347   match(ConI);
 4348 
 4349   op_cost(0);
 4350   format %{ %}
 4351   interface(CONST_INTER);
 4352 %}
 4353 
 4354 operand immIOffset8()
 4355 %{
 4356   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4357   match(ConI);
 4358 
 4359   op_cost(0);
 4360   format %{ %}
 4361   interface(CONST_INTER);
 4362 %}
 4363 
 4364 operand immIOffset16()
 4365 %{
 4366   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4367   match(ConI);
 4368 
 4369   op_cost(0);
 4370   format %{ %}
 4371   interface(CONST_INTER);
 4372 %}
 4373 
 4374 operand immLoffset()
 4375 %{
 4376   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4377   match(ConL);
 4378 
 4379   op_cost(0);
 4380   format %{ %}
 4381   interface(CONST_INTER);
 4382 %}
 4383 
 4384 operand immLoffset1()
 4385 %{
 4386   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4387   match(ConL);
 4388 
 4389   op_cost(0);
 4390   format %{ %}
 4391   interface(CONST_INTER);
 4392 %}
 4393 
 4394 operand immLoffset2()
 4395 %{
 4396   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4397   match(ConL);
 4398 
 4399   op_cost(0);
 4400   format %{ %}
 4401   interface(CONST_INTER);
 4402 %}
 4403 
 4404 operand immLoffset4()
 4405 %{
 4406   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4407   match(ConL);
 4408 
 4409   op_cost(0);
 4410   format %{ %}
 4411   interface(CONST_INTER);
 4412 %}
 4413 
 4414 operand immLoffset8()
 4415 %{
 4416   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4417   match(ConL);
 4418 
 4419   op_cost(0);
 4420   format %{ %}
 4421   interface(CONST_INTER);
 4422 %}
 4423 
 4424 operand immLoffset16()
 4425 %{
 4426   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4427   match(ConL);
 4428 
 4429   op_cost(0);
 4430   format %{ %}
 4431   interface(CONST_INTER);
 4432 %}
 4433 
 4434 // 5 bit signed long integer
 4435 operand immL5()
 4436 %{
 4437   predicate(Assembler::is_simm(n->get_long(), 5));
 4438   match(ConL);
 4439 
 4440   op_cost(0);
 4441   format %{ %}
 4442   interface(CONST_INTER);
 4443 %}
 4444 
 4445 // 7 bit unsigned long integer
 4446 operand immLU7()
 4447 %{
 4448   predicate(Assembler::is_uimm(n->get_long(), 7));
 4449   match(ConL);
 4450 
 4451   op_cost(0);
 4452   format %{ %}
 4453   interface(CONST_INTER);
 4454 %}
 4455 
 4456 // 8 bit signed value.
 4457 operand immI8()
 4458 %{
 4459   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4460   match(ConI);
 4461 
 4462   op_cost(0);
 4463   format %{ %}
 4464   interface(CONST_INTER);
 4465 %}
 4466 
 4467 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4468 operand immI8_shift8()
 4469 %{
 4470   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4471             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4472   match(ConI);
 4473 
 4474   op_cost(0);
 4475   format %{ %}
 4476   interface(CONST_INTER);
 4477 %}
 4478 
 4479 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4480 operand immL8_shift8()
 4481 %{
 4482   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4483             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4484   match(ConL);
 4485 
 4486   op_cost(0);
 4487   format %{ %}
 4488   interface(CONST_INTER);
 4489 %}
 4490 
 4491 // 8 bit integer valid for vector add sub immediate
 4492 operand immBAddSubV()
 4493 %{
 4494   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4495   match(ConI);
 4496 
 4497   op_cost(0);
 4498   format %{ %}
 4499   interface(CONST_INTER);
 4500 %}
 4501 
 4502 // 32 bit integer valid for add sub immediate
 4503 operand immIAddSub()
 4504 %{
 4505   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4506   match(ConI);
 4507   op_cost(0);
 4508   format %{ %}
 4509   interface(CONST_INTER);
 4510 %}
 4511 
 4512 // 32 bit integer valid for vector add sub immediate
 4513 operand immIAddSubV()
 4514 %{
 4515   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4516   match(ConI);
 4517 
 4518   op_cost(0);
 4519   format %{ %}
 4520   interface(CONST_INTER);
 4521 %}
 4522 
 4523 // 32 bit unsigned integer valid for logical immediate
 4524 
 4525 operand immBLog()
 4526 %{
 4527   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4528   match(ConI);
 4529 
 4530   op_cost(0);
 4531   format %{ %}
 4532   interface(CONST_INTER);
 4533 %}
 4534 
 4535 operand immSLog()
 4536 %{
 4537   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4538   match(ConI);
 4539 
 4540   op_cost(0);
 4541   format %{ %}
 4542   interface(CONST_INTER);
 4543 %}
 4544 
 4545 operand immILog()
 4546 %{
 4547   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4548   match(ConI);
 4549 
 4550   op_cost(0);
 4551   format %{ %}
 4552   interface(CONST_INTER);
 4553 %}
 4554 
 4555 // Integer operands 64 bit
 4556 // 64 bit immediate
 4557 operand immL()
 4558 %{
 4559   match(ConL);
 4560 
 4561   op_cost(0);
 4562   format %{ %}
 4563   interface(CONST_INTER);
 4564 %}
 4565 
 4566 // 64 bit zero
 4567 operand immL0()
 4568 %{
 4569   predicate(n->get_long() == 0);
 4570   match(ConL);
 4571 
 4572   op_cost(0);
 4573   format %{ %}
 4574   interface(CONST_INTER);
 4575 %}
 4576 
 4577 // 64 bit unit increment
 4578 operand immL_1()
 4579 %{
 4580   predicate(n->get_long() == 1);
 4581   match(ConL);
 4582 
 4583   op_cost(0);
 4584   format %{ %}
 4585   interface(CONST_INTER);
 4586 %}
 4587 
 4588 // 64 bit unit decrement
 4589 operand immL_M1()
 4590 %{
 4591   predicate(n->get_long() == -1);
 4592   match(ConL);
 4593 
 4594   op_cost(0);
 4595   format %{ %}
 4596   interface(CONST_INTER);
 4597 %}
 4598 
 4599 // 32 bit offset of pc in thread anchor
 4600 
 4601 operand immL_pc_off()
 4602 %{
 4603   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
 4604                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 4605   match(ConL);
 4606 
 4607   op_cost(0);
 4608   format %{ %}
 4609   interface(CONST_INTER);
 4610 %}
 4611 
 4612 // 64 bit integer valid for add sub immediate
 4613 operand immLAddSub()
 4614 %{
 4615   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4616   match(ConL);
 4617   op_cost(0);
 4618   format %{ %}
 4619   interface(CONST_INTER);
 4620 %}
 4621 
 4622 // 64 bit integer valid for addv subv immediate
 4623 operand immLAddSubV()
 4624 %{
 4625   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4626   match(ConL);
 4627 
 4628   op_cost(0);
 4629   format %{ %}
 4630   interface(CONST_INTER);
 4631 %}
 4632 
 4633 // 64 bit integer valid for logical immediate
 4634 operand immLLog()
 4635 %{
 4636   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4637   match(ConL);
 4638   op_cost(0);
 4639   format %{ %}
 4640   interface(CONST_INTER);
 4641 %}
 4642 
 4643 // Long Immediate: low 32-bit mask
 4644 operand immL_32bits()
 4645 %{
 4646   predicate(n->get_long() == 0xFFFFFFFFL);
 4647   match(ConL);
 4648   op_cost(0);
 4649   format %{ %}
 4650   interface(CONST_INTER);
 4651 %}
 4652 
 4653 // Pointer operands
 4654 // Pointer Immediate
 4655 operand immP()
 4656 %{
 4657   match(ConP);
 4658 
 4659   op_cost(0);
 4660   format %{ %}
 4661   interface(CONST_INTER);
 4662 %}
 4663 
 4664 // NULL Pointer Immediate
 4665 operand immP0()
 4666 %{
 4667   predicate(n->get_ptr() == 0);
 4668   match(ConP);
 4669 
 4670   op_cost(0);
 4671   format %{ %}
 4672   interface(CONST_INTER);
 4673 %}
 4674 
 4675 // Pointer Immediate One
 4676 // this is used in object initialization (initial object header)
 4677 operand immP_1()
 4678 %{
 4679   predicate(n->get_ptr() == 1);
 4680   match(ConP);
 4681 
 4682   op_cost(0);
 4683   format %{ %}
 4684   interface(CONST_INTER);
 4685 %}
 4686 
 4687 // Card Table Byte Map Base
 4688 operand immByteMapBase()
 4689 %{
 4690   // Get base of card map
 4691   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4692             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4693   match(ConP);
 4694 
 4695   op_cost(0);
 4696   format %{ %}
 4697   interface(CONST_INTER);
 4698 %}
 4699 
 4700 // Pointer Immediate Minus One
 4701 // this is used when we want to write the current PC to the thread anchor
 4702 operand immP_M1()
 4703 %{
 4704   predicate(n->get_ptr() == -1);
 4705   match(ConP);
 4706 
 4707   op_cost(0);
 4708   format %{ %}
 4709   interface(CONST_INTER);
 4710 %}
 4711 
 4712 // Pointer Immediate Minus Two
 4713 // this is used when we want to write the current PC to the thread anchor
 4714 operand immP_M2()
 4715 %{
 4716   predicate(n->get_ptr() == -2);
 4717   match(ConP);
 4718 
 4719   op_cost(0);
 4720   format %{ %}
 4721   interface(CONST_INTER);
 4722 %}
 4723 
 4724 // Float and Double operands
 4725 // Double Immediate
 4726 operand immD()
 4727 %{
 4728   match(ConD);
 4729   op_cost(0);
 4730   format %{ %}
 4731   interface(CONST_INTER);
 4732 %}
 4733 
 4734 // Double Immediate: +0.0d
 4735 operand immD0()
 4736 %{
 4737   predicate(jlong_cast(n->getd()) == 0);
 4738   match(ConD);
 4739 
 4740   op_cost(0);
 4741   format %{ %}
 4742   interface(CONST_INTER);
 4743 %}
 4744 
 4745 // constant 'double +0.0'.
 4746 operand immDPacked()
 4747 %{
 4748   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4749   match(ConD);
 4750   op_cost(0);
 4751   format %{ %}
 4752   interface(CONST_INTER);
 4753 %}
 4754 
 4755 // Float Immediate
 4756 operand immF()
 4757 %{
 4758   match(ConF);
 4759   op_cost(0);
 4760   format %{ %}
 4761   interface(CONST_INTER);
 4762 %}
 4763 
 4764 // Float Immediate: +0.0f.
 4765 operand immF0()
 4766 %{
 4767   predicate(jint_cast(n->getf()) == 0);
 4768   match(ConF);
 4769 
 4770   op_cost(0);
 4771   format %{ %}
 4772   interface(CONST_INTER);
 4773 %}
 4774 
 4775 //
 4776 operand immFPacked()
 4777 %{
 4778   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4779   match(ConF);
 4780   op_cost(0);
 4781   format %{ %}
 4782   interface(CONST_INTER);
 4783 %}
 4784 
 4785 // Narrow pointer operands
 4786 // Narrow Pointer Immediate
 4787 operand immN()
 4788 %{
 4789   match(ConN);
 4790 
 4791   op_cost(0);
 4792   format %{ %}
 4793   interface(CONST_INTER);
 4794 %}
 4795 
 4796 // Narrow NULL Pointer Immediate
 4797 operand immN0()
 4798 %{
 4799   predicate(n->get_narrowcon() == 0);
 4800   match(ConN);
 4801 
 4802   op_cost(0);
 4803   format %{ %}
 4804   interface(CONST_INTER);
 4805 %}
 4806 
 4807 operand immNKlass()
 4808 %{
 4809   match(ConNKlass);
 4810 
 4811   op_cost(0);
 4812   format %{ %}
 4813   interface(CONST_INTER);
 4814 %}
 4815 
 4816 // Integer 32 bit Register Operands
 4817 // Integer 32 bitRegister (excludes SP)
 4818 operand iRegI()
 4819 %{
 4820   constraint(ALLOC_IN_RC(any_reg32));
 4821   match(RegI);
 4822   match(iRegINoSp);
 4823   op_cost(0);
 4824   format %{ %}
 4825   interface(REG_INTER);
 4826 %}
 4827 
 4828 // Integer 32 bit Register not Special
 4829 operand iRegINoSp()
 4830 %{
 4831   constraint(ALLOC_IN_RC(no_special_reg32));
 4832   match(RegI);
 4833   op_cost(0);
 4834   format %{ %}
 4835   interface(REG_INTER);
 4836 %}
 4837 
 4838 // Integer 64 bit Register Operands
 4839 // Integer 64 bit Register (includes SP)
 4840 operand iRegL()
 4841 %{
 4842   constraint(ALLOC_IN_RC(any_reg));
 4843   match(RegL);
 4844   match(iRegLNoSp);
 4845   op_cost(0);
 4846   format %{ %}
 4847   interface(REG_INTER);
 4848 %}
 4849 
 4850 // Integer 64 bit Register not Special
 4851 operand iRegLNoSp()
 4852 %{
 4853   constraint(ALLOC_IN_RC(no_special_reg));
 4854   match(RegL);
 4855   match(iRegL_R0);
 4856   format %{ %}
 4857   interface(REG_INTER);
 4858 %}
 4859 
 4860 // Pointer Register Operands
 4861 // Pointer Register
 4862 operand iRegP()
 4863 %{
 4864   constraint(ALLOC_IN_RC(ptr_reg));
 4865   match(RegP);
 4866   match(iRegPNoSp);
 4867   match(iRegP_R0);
 4868   //match(iRegP_R2);
 4869   //match(iRegP_R4);
 4870   match(iRegP_R5);
 4871   match(thread_RegP);
 4872   op_cost(0);
 4873   format %{ %}
 4874   interface(REG_INTER);
 4875 %}
 4876 
 4877 // Pointer 64 bit Register not Special
 4878 operand iRegPNoSp()
 4879 %{
 4880   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4881   match(RegP);
 4882   // match(iRegP);
 4883   // match(iRegP_R0);
 4884   // match(iRegP_R2);
 4885   // match(iRegP_R4);
 4886   // match(iRegP_R5);
 4887   // match(thread_RegP);
 4888   op_cost(0);
 4889   format %{ %}
 4890   interface(REG_INTER);
 4891 %}
 4892 
 4893 // This operand is not allowed to use rfp even if
 4894 // rfp is not used to hold the frame pointer.
 4895 operand iRegPNoSpNoRfp()
 4896 %{
 4897   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4898   match(RegP);
 4899   match(iRegPNoSp);
 4900   op_cost(0);
 4901   format %{ %}
 4902   interface(REG_INTER);
 4903 %}
 4904 
 4905 // Pointer 64 bit Register R0 only
 4906 operand iRegP_R0()
 4907 %{
 4908   constraint(ALLOC_IN_RC(r0_reg));
 4909   match(RegP);
 4910   // match(iRegP);
 4911   match(iRegPNoSp);
 4912   op_cost(0);
 4913   format %{ %}
 4914   interface(REG_INTER);
 4915 %}
 4916 
 4917 // Pointer 64 bit Register R1 only
 4918 operand iRegP_R1()
 4919 %{
 4920   constraint(ALLOC_IN_RC(r1_reg));
 4921   match(RegP);
 4922   // match(iRegP);
 4923   match(iRegPNoSp);
 4924   op_cost(0);
 4925   format %{ %}
 4926   interface(REG_INTER);
 4927 %}
 4928 
 4929 // Pointer 64 bit Register R2 only
 4930 operand iRegP_R2()
 4931 %{
 4932   constraint(ALLOC_IN_RC(r2_reg));
 4933   match(RegP);
 4934   // match(iRegP);
 4935   match(iRegPNoSp);
 4936   op_cost(0);
 4937   format %{ %}
 4938   interface(REG_INTER);
 4939 %}
 4940 
 4941 // Pointer 64 bit Register R3 only
 4942 operand iRegP_R3()
 4943 %{
 4944   constraint(ALLOC_IN_RC(r3_reg));
 4945   match(RegP);
 4946   // match(iRegP);
 4947   match(iRegPNoSp);
 4948   op_cost(0);
 4949   format %{ %}
 4950   interface(REG_INTER);
 4951 %}
 4952 
 4953 // Pointer 64 bit Register R4 only
 4954 operand iRegP_R4()
 4955 %{
 4956   constraint(ALLOC_IN_RC(r4_reg));
 4957   match(RegP);
 4958   // match(iRegP);
 4959   match(iRegPNoSp);
 4960   op_cost(0);
 4961   format %{ %}
 4962   interface(REG_INTER);
 4963 %}
 4964 
 4965 // Pointer 64 bit Register R5 only
 4966 operand iRegP_R5()
 4967 %{
 4968   constraint(ALLOC_IN_RC(r5_reg));
 4969   match(RegP);
 4970   // match(iRegP);
 4971   match(iRegPNoSp);
 4972   op_cost(0);
 4973   format %{ %}
 4974   interface(REG_INTER);
 4975 %}
 4976 
 4977 // Pointer 64 bit Register R10 only
 4978 operand iRegP_R10()
 4979 %{
 4980   constraint(ALLOC_IN_RC(r10_reg));
 4981   match(RegP);
 4982   // match(iRegP);
 4983   match(iRegPNoSp);
 4984   op_cost(0);
 4985   format %{ %}
 4986   interface(REG_INTER);
 4987 %}
 4988 
 4989 // Long 64 bit Register R0 only
 4990 operand iRegL_R0()
 4991 %{
 4992   constraint(ALLOC_IN_RC(r0_reg));
 4993   match(RegL);
 4994   match(iRegLNoSp);
 4995   op_cost(0);
 4996   format %{ %}
 4997   interface(REG_INTER);
 4998 %}
 4999 
 5000 // Long 64 bit Register R2 only
 5001 operand iRegL_R2()
 5002 %{
 5003   constraint(ALLOC_IN_RC(r2_reg));
 5004   match(RegL);
 5005   match(iRegLNoSp);
 5006   op_cost(0);
 5007   format %{ %}
 5008   interface(REG_INTER);
 5009 %}
 5010 
 5011 // Long 64 bit Register R3 only
 5012 operand iRegL_R3()
 5013 %{
 5014   constraint(ALLOC_IN_RC(r3_reg));
 5015   match(RegL);
 5016   match(iRegLNoSp);
 5017   op_cost(0);
 5018   format %{ %}
 5019   interface(REG_INTER);
 5020 %}
 5021 
 5022 // Long 64 bit Register R11 only
 5023 operand iRegL_R11()
 5024 %{
 5025   constraint(ALLOC_IN_RC(r11_reg));
 5026   match(RegL);
 5027   match(iRegLNoSp);
 5028   op_cost(0);
 5029   format %{ %}
 5030   interface(REG_INTER);
 5031 %}
 5032 
 5033 // Pointer 64 bit Register FP only
 5034 operand iRegP_FP()
 5035 %{
 5036   constraint(ALLOC_IN_RC(fp_reg));
 5037   match(RegP);
 5038   // match(iRegP);
 5039   op_cost(0);
 5040   format %{ %}
 5041   interface(REG_INTER);
 5042 %}
 5043 
 5044 // Register R0 only
 5045 operand iRegI_R0()
 5046 %{
 5047   constraint(ALLOC_IN_RC(int_r0_reg));
 5048   match(RegI);
 5049   match(iRegINoSp);
 5050   op_cost(0);
 5051   format %{ %}
 5052   interface(REG_INTER);
 5053 %}
 5054 
 5055 // Register R2 only
 5056 operand iRegI_R2()
 5057 %{
 5058   constraint(ALLOC_IN_RC(int_r2_reg));
 5059   match(RegI);
 5060   match(iRegINoSp);
 5061   op_cost(0);
 5062   format %{ %}
 5063   interface(REG_INTER);
 5064 %}
 5065 
 5066 // Register R3 only
 5067 operand iRegI_R3()
 5068 %{
 5069   constraint(ALLOC_IN_RC(int_r3_reg));
 5070   match(RegI);
 5071   match(iRegINoSp);
 5072   op_cost(0);
 5073   format %{ %}
 5074   interface(REG_INTER);
 5075 %}
 5076 
 5077 
 5078 // Register R4 only
 5079 operand iRegI_R4()
 5080 %{
 5081   constraint(ALLOC_IN_RC(int_r4_reg));
 5082   match(RegI);
 5083   match(iRegINoSp);
 5084   op_cost(0);
 5085   format %{ %}
 5086   interface(REG_INTER);
 5087 %}
 5088 
 5089 
 5090 // Pointer Register Operands
 5091 // Narrow Pointer Register
 5092 operand iRegN()
 5093 %{
 5094   constraint(ALLOC_IN_RC(any_reg32));
 5095   match(RegN);
 5096   match(iRegNNoSp);
 5097   op_cost(0);
 5098   format %{ %}
 5099   interface(REG_INTER);
 5100 %}
 5101 
 5102 operand iRegN_R0()
 5103 %{
 5104   constraint(ALLOC_IN_RC(r0_reg));
 5105   match(iRegN);
 5106   op_cost(0);
 5107   format %{ %}
 5108   interface(REG_INTER);
 5109 %}
 5110 
 5111 operand iRegN_R2()
 5112 %{
 5113   constraint(ALLOC_IN_RC(r2_reg));
 5114   match(iRegN);
 5115   op_cost(0);
 5116   format %{ %}
 5117   interface(REG_INTER);
 5118 %}
 5119 
 5120 operand iRegN_R3()
 5121 %{
 5122   constraint(ALLOC_IN_RC(r3_reg));
 5123   match(iRegN);
 5124   op_cost(0);
 5125   format %{ %}
 5126   interface(REG_INTER);
 5127 %}
 5128 
 5129 // Integer 64 bit Register not Special
 5130 operand iRegNNoSp()
 5131 %{
 5132   constraint(ALLOC_IN_RC(no_special_reg32));
 5133   match(RegN);
 5134   op_cost(0);
 5135   format %{ %}
 5136   interface(REG_INTER);
 5137 %}
 5138 
 5139 // Float Register
 5140 // Float register operands
 5141 operand vRegF()
 5142 %{
 5143   constraint(ALLOC_IN_RC(float_reg));
 5144   match(RegF);
 5145 
 5146   op_cost(0);
 5147   format %{ %}
 5148   interface(REG_INTER);
 5149 %}
 5150 
 5151 // Double Register
 5152 // Double register operands
 5153 operand vRegD()
 5154 %{
 5155   constraint(ALLOC_IN_RC(double_reg));
 5156   match(RegD);
 5157 
 5158   op_cost(0);
 5159   format %{ %}
 5160   interface(REG_INTER);
 5161 %}
 5162 
 5163 // Generic vector class. This will be used for
 5164 // all vector operands, including NEON and SVE.
 5165 operand vReg()
 5166 %{
 5167   constraint(ALLOC_IN_RC(dynamic));
 5168   match(VecA);
 5169   match(VecD);
 5170   match(VecX);
 5171 
 5172   op_cost(0);
 5173   format %{ %}
 5174   interface(REG_INTER);
 5175 %}
 5176 
 5177 operand vecA()
 5178 %{
 5179   constraint(ALLOC_IN_RC(vectora_reg));
 5180   match(VecA);
 5181 
 5182   op_cost(0);
 5183   format %{ %}
 5184   interface(REG_INTER);
 5185 %}
 5186 
 5187 operand vecD()
 5188 %{
 5189   constraint(ALLOC_IN_RC(vectord_reg));
 5190   match(VecD);
 5191 
 5192   op_cost(0);
 5193   format %{ %}
 5194   interface(REG_INTER);
 5195 %}
 5196 
 5197 operand vecX()
 5198 %{
 5199   constraint(ALLOC_IN_RC(vectorx_reg));
 5200   match(VecX);
 5201 
 5202   op_cost(0);
 5203   format %{ %}
 5204   interface(REG_INTER);
 5205 %}
 5206 
 5207 operand vRegD_V0()
 5208 %{
 5209   constraint(ALLOC_IN_RC(v0_reg));
 5210   match(RegD);
 5211   op_cost(0);
 5212   format %{ %}
 5213   interface(REG_INTER);
 5214 %}
 5215 
 5216 operand vRegD_V1()
 5217 %{
 5218   constraint(ALLOC_IN_RC(v1_reg));
 5219   match(RegD);
 5220   op_cost(0);
 5221   format %{ %}
 5222   interface(REG_INTER);
 5223 %}
 5224 
 5225 operand vRegD_V2()
 5226 %{
 5227   constraint(ALLOC_IN_RC(v2_reg));
 5228   match(RegD);
 5229   op_cost(0);
 5230   format %{ %}
 5231   interface(REG_INTER);
 5232 %}
 5233 
 5234 operand vRegD_V3()
 5235 %{
 5236   constraint(ALLOC_IN_RC(v3_reg));
 5237   match(RegD);
 5238   op_cost(0);
 5239   format %{ %}
 5240   interface(REG_INTER);
 5241 %}
 5242 
 5243 operand vRegD_V4()
 5244 %{
 5245   constraint(ALLOC_IN_RC(v4_reg));
 5246   match(RegD);
 5247   op_cost(0);
 5248   format %{ %}
 5249   interface(REG_INTER);
 5250 %}
 5251 
 5252 operand vRegD_V5()
 5253 %{
 5254   constraint(ALLOC_IN_RC(v5_reg));
 5255   match(RegD);
 5256   op_cost(0);
 5257   format %{ %}
 5258   interface(REG_INTER);
 5259 %}
 5260 
 5261 operand vRegD_V6()
 5262 %{
 5263   constraint(ALLOC_IN_RC(v6_reg));
 5264   match(RegD);
 5265   op_cost(0);
 5266   format %{ %}
 5267   interface(REG_INTER);
 5268 %}
 5269 
 5270 operand vRegD_V7()
 5271 %{
 5272   constraint(ALLOC_IN_RC(v7_reg));
 5273   match(RegD);
 5274   op_cost(0);
 5275   format %{ %}
 5276   interface(REG_INTER);
 5277 %}
 5278 
 5279 operand vRegD_V8()
 5280 %{
 5281   constraint(ALLOC_IN_RC(v8_reg));
 5282   match(RegD);
 5283   op_cost(0);
 5284   format %{ %}
 5285   interface(REG_INTER);
 5286 %}
 5287 
 5288 operand vRegD_V9()
 5289 %{
 5290   constraint(ALLOC_IN_RC(v9_reg));
 5291   match(RegD);
 5292   op_cost(0);
 5293   format %{ %}
 5294   interface(REG_INTER);
 5295 %}
 5296 
 5297 operand vRegD_V10()
 5298 %{
 5299   constraint(ALLOC_IN_RC(v10_reg));
 5300   match(RegD);
 5301   op_cost(0);
 5302   format %{ %}
 5303   interface(REG_INTER);
 5304 %}
 5305 
 5306 operand vRegD_V11()
 5307 %{
 5308   constraint(ALLOC_IN_RC(v11_reg));
 5309   match(RegD);
 5310   op_cost(0);
 5311   format %{ %}
 5312   interface(REG_INTER);
 5313 %}
 5314 
 5315 operand vRegD_V12()
 5316 %{
 5317   constraint(ALLOC_IN_RC(v12_reg));
 5318   match(RegD);
 5319   op_cost(0);
 5320   format %{ %}
 5321   interface(REG_INTER);
 5322 %}
 5323 
 5324 operand vRegD_V13()
 5325 %{
 5326   constraint(ALLOC_IN_RC(v13_reg));
 5327   match(RegD);
 5328   op_cost(0);
 5329   format %{ %}
 5330   interface(REG_INTER);
 5331 %}
 5332 
 5333 operand vRegD_V14()
 5334 %{
 5335   constraint(ALLOC_IN_RC(v14_reg));
 5336   match(RegD);
 5337   op_cost(0);
 5338   format %{ %}
 5339   interface(REG_INTER);
 5340 %}
 5341 
 5342 operand vRegD_V15()
 5343 %{
 5344   constraint(ALLOC_IN_RC(v15_reg));
 5345   match(RegD);
 5346   op_cost(0);
 5347   format %{ %}
 5348   interface(REG_INTER);
 5349 %}
 5350 
 5351 operand vRegD_V16()
 5352 %{
 5353   constraint(ALLOC_IN_RC(v16_reg));
 5354   match(RegD);
 5355   op_cost(0);
 5356   format %{ %}
 5357   interface(REG_INTER);
 5358 %}
 5359 
 5360 operand vRegD_V17()
 5361 %{
 5362   constraint(ALLOC_IN_RC(v17_reg));
 5363   match(RegD);
 5364   op_cost(0);
 5365   format %{ %}
 5366   interface(REG_INTER);
 5367 %}
 5368 
 5369 operand vRegD_V18()
 5370 %{
 5371   constraint(ALLOC_IN_RC(v18_reg));
 5372   match(RegD);
 5373   op_cost(0);
 5374   format %{ %}
 5375   interface(REG_INTER);
 5376 %}
 5377 
 5378 operand vRegD_V19()
 5379 %{
 5380   constraint(ALLOC_IN_RC(v19_reg));
 5381   match(RegD);
 5382   op_cost(0);
 5383   format %{ %}
 5384   interface(REG_INTER);
 5385 %}
 5386 
 5387 operand vRegD_V20()
 5388 %{
 5389   constraint(ALLOC_IN_RC(v20_reg));
 5390   match(RegD);
 5391   op_cost(0);
 5392   format %{ %}
 5393   interface(REG_INTER);
 5394 %}
 5395 
 5396 operand vRegD_V21()
 5397 %{
 5398   constraint(ALLOC_IN_RC(v21_reg));
 5399   match(RegD);
 5400   op_cost(0);
 5401   format %{ %}
 5402   interface(REG_INTER);
 5403 %}
 5404 
 5405 operand vRegD_V22()
 5406 %{
 5407   constraint(ALLOC_IN_RC(v22_reg));
 5408   match(RegD);
 5409   op_cost(0);
 5410   format %{ %}
 5411   interface(REG_INTER);
 5412 %}
 5413 
 5414 operand vRegD_V23()
 5415 %{
 5416   constraint(ALLOC_IN_RC(v23_reg));
 5417   match(RegD);
 5418   op_cost(0);
 5419   format %{ %}
 5420   interface(REG_INTER);
 5421 %}
 5422 
 5423 operand vRegD_V24()
 5424 %{
 5425   constraint(ALLOC_IN_RC(v24_reg));
 5426   match(RegD);
 5427   op_cost(0);
 5428   format %{ %}
 5429   interface(REG_INTER);
 5430 %}
 5431 
 5432 operand vRegD_V25()
 5433 %{
 5434   constraint(ALLOC_IN_RC(v25_reg));
 5435   match(RegD);
 5436   op_cost(0);
 5437   format %{ %}
 5438   interface(REG_INTER);
 5439 %}
 5440 
 5441 operand vRegD_V26()
 5442 %{
 5443   constraint(ALLOC_IN_RC(v26_reg));
 5444   match(RegD);
 5445   op_cost(0);
 5446   format %{ %}
 5447   interface(REG_INTER);
 5448 %}
 5449 
 5450 operand vRegD_V27()
 5451 %{
 5452   constraint(ALLOC_IN_RC(v27_reg));
 5453   match(RegD);
 5454   op_cost(0);
 5455   format %{ %}
 5456   interface(REG_INTER);
 5457 %}
 5458 
 5459 operand vRegD_V28()
 5460 %{
 5461   constraint(ALLOC_IN_RC(v28_reg));
 5462   match(RegD);
 5463   op_cost(0);
 5464   format %{ %}
 5465   interface(REG_INTER);
 5466 %}
 5467 
 5468 operand vRegD_V29()
 5469 %{
 5470   constraint(ALLOC_IN_RC(v29_reg));
 5471   match(RegD);
 5472   op_cost(0);
 5473   format %{ %}
 5474   interface(REG_INTER);
 5475 %}
 5476 
 5477 operand vRegD_V30()
 5478 %{
 5479   constraint(ALLOC_IN_RC(v30_reg));
 5480   match(RegD);
 5481   op_cost(0);
 5482   format %{ %}
 5483   interface(REG_INTER);
 5484 %}
 5485 
 5486 operand vRegD_V31()
 5487 %{
 5488   constraint(ALLOC_IN_RC(v31_reg));
 5489   match(RegD);
 5490   op_cost(0);
 5491   format %{ %}
 5492   interface(REG_INTER);
 5493 %}
 5494 
 5495 operand pReg()
 5496 %{
 5497   constraint(ALLOC_IN_RC(pr_reg));
 5498   match(RegVectMask);
 5499   match(pRegGov);
 5500   op_cost(0);
 5501   format %{ %}
 5502   interface(REG_INTER);
 5503 %}
 5504 
 5505 operand pRegGov()
 5506 %{
 5507   constraint(ALLOC_IN_RC(gov_pr));
 5508   match(RegVectMask);
 5509   match(pReg);
 5510   op_cost(0);
 5511   format %{ %}
 5512   interface(REG_INTER);
 5513 %}
 5514 
 5515 operand pRegGov_P0()
 5516 %{
 5517   constraint(ALLOC_IN_RC(p0_reg));
 5518   match(RegVectMask);
 5519   op_cost(0);
 5520   format %{ %}
 5521   interface(REG_INTER);
 5522 %}
 5523 
 5524 operand pRegGov_P1()
 5525 %{
 5526   constraint(ALLOC_IN_RC(p1_reg));
 5527   match(RegVectMask);
 5528   op_cost(0);
 5529   format %{ %}
 5530   interface(REG_INTER);
 5531 %}
 5532 
 5533 // Flags register, used as output of signed compare instructions
 5534 
 5535 // note that on AArch64 we also use this register as the output for
 5536 // for floating point compare instructions (CmpF CmpD). this ensures
 5537 // that ordered inequality tests use GT, GE, LT or LE none of which
 5538 // pass through cases where the result is unordered i.e. one or both
 5539 // inputs to the compare is a NaN. this means that the ideal code can
 5540 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5541 // (where the comparison should always fail). EQ and NE tests are
 5542 // always generated in ideal code so that unordered folds into the NE
 5543 // case, matching the behaviour of AArch64 NE.
 5544 //
 5545 // This differs from x86 where the outputs of FP compares use a
 5546 // special FP flags registers and where compares based on this
 5547 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5548 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5549 // to explicitly handle the unordered case in branches. x86 also has
 5550 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5551 
 5552 operand rFlagsReg()
 5553 %{
 5554   constraint(ALLOC_IN_RC(int_flags));
 5555   match(RegFlags);
 5556 
 5557   op_cost(0);
 5558   format %{ "RFLAGS" %}
 5559   interface(REG_INTER);
 5560 %}
 5561 
 5562 // Flags register, used as output of unsigned compare instructions
 5563 operand rFlagsRegU()
 5564 %{
 5565   constraint(ALLOC_IN_RC(int_flags));
 5566   match(RegFlags);
 5567 
 5568   op_cost(0);
 5569   format %{ "RFLAGSU" %}
 5570   interface(REG_INTER);
 5571 %}
 5572 
 5573 // Special Registers
 5574 
 5575 // Method Register
 5576 operand inline_cache_RegP(iRegP reg)
 5577 %{
 5578   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5579   match(reg);
 5580   match(iRegPNoSp);
 5581   op_cost(0);
 5582   format %{ %}
 5583   interface(REG_INTER);
 5584 %}
 5585 
 5586 // Thread Register
 5587 operand thread_RegP(iRegP reg)
 5588 %{
 5589   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5590   match(reg);
 5591   op_cost(0);
 5592   format %{ %}
 5593   interface(REG_INTER);
 5594 %}
 5595 
 5596 operand lr_RegP(iRegP reg)
 5597 %{
 5598   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
 5599   match(reg);
 5600   op_cost(0);
 5601   format %{ %}
 5602   interface(REG_INTER);
 5603 %}
 5604 
 5605 //----------Memory Operands----------------------------------------------------
 5606 
 5607 operand indirect(iRegP reg)
 5608 %{
 5609   constraint(ALLOC_IN_RC(ptr_reg));
 5610   match(reg);
 5611   op_cost(0);
 5612   format %{ "[$reg]" %}
 5613   interface(MEMORY_INTER) %{
 5614     base($reg);
 5615     index(0xffffffff);
 5616     scale(0x0);
 5617     disp(0x0);
 5618   %}
 5619 %}
 5620 
 5621 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5622 %{
 5623   constraint(ALLOC_IN_RC(ptr_reg));
 5624   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5625   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5626   op_cost(0);
 5627   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5628   interface(MEMORY_INTER) %{
 5629     base($reg);
 5630     index($ireg);
 5631     scale($scale);
 5632     disp(0x0);
 5633   %}
 5634 %}
 5635 
 5636 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5637 %{
 5638   constraint(ALLOC_IN_RC(ptr_reg));
 5639   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5640   match(AddP reg (LShiftL lreg scale));
 5641   op_cost(0);
 5642   format %{ "$reg, $lreg lsl($scale)" %}
 5643   interface(MEMORY_INTER) %{
 5644     base($reg);
 5645     index($lreg);
 5646     scale($scale);
 5647     disp(0x0);
 5648   %}
 5649 %}
 5650 
 5651 operand indIndexI2L(iRegP reg, iRegI ireg)
 5652 %{
 5653   constraint(ALLOC_IN_RC(ptr_reg));
 5654   match(AddP reg (ConvI2L ireg));
 5655   op_cost(0);
 5656   format %{ "$reg, $ireg, 0, I2L" %}
 5657   interface(MEMORY_INTER) %{
 5658     base($reg);
 5659     index($ireg);
 5660     scale(0x0);
 5661     disp(0x0);
 5662   %}
 5663 %}
 5664 
 5665 operand indIndex(iRegP reg, iRegL lreg)
 5666 %{
 5667   constraint(ALLOC_IN_RC(ptr_reg));
 5668   match(AddP reg lreg);
 5669   op_cost(0);
 5670   format %{ "$reg, $lreg" %}
 5671   interface(MEMORY_INTER) %{
 5672     base($reg);
 5673     index($lreg);
 5674     scale(0x0);
 5675     disp(0x0);
 5676   %}
 5677 %}
 5678 
 5679 operand indOffI(iRegP reg, immIOffset off)
 5680 %{
 5681   constraint(ALLOC_IN_RC(ptr_reg));
 5682   match(AddP reg off);
 5683   op_cost(0);
 5684   format %{ "[$reg, $off]" %}
 5685   interface(MEMORY_INTER) %{
 5686     base($reg);
 5687     index(0xffffffff);
 5688     scale(0x0);
 5689     disp($off);
 5690   %}
 5691 %}
 5692 
 5693 operand indOffI1(iRegP reg, immIOffset1 off)
 5694 %{
 5695   constraint(ALLOC_IN_RC(ptr_reg));
 5696   match(AddP reg off);
 5697   op_cost(0);
 5698   format %{ "[$reg, $off]" %}
 5699   interface(MEMORY_INTER) %{
 5700     base($reg);
 5701     index(0xffffffff);
 5702     scale(0x0);
 5703     disp($off);
 5704   %}
 5705 %}
 5706 
 5707 operand indOffI2(iRegP reg, immIOffset2 off)
 5708 %{
 5709   constraint(ALLOC_IN_RC(ptr_reg));
 5710   match(AddP reg off);
 5711   op_cost(0);
 5712   format %{ "[$reg, $off]" %}
 5713   interface(MEMORY_INTER) %{
 5714     base($reg);
 5715     index(0xffffffff);
 5716     scale(0x0);
 5717     disp($off);
 5718   %}
 5719 %}
 5720 
 5721 operand indOffI4(iRegP reg, immIOffset4 off)
 5722 %{
 5723   constraint(ALLOC_IN_RC(ptr_reg));
 5724   match(AddP reg off);
 5725   op_cost(0);
 5726   format %{ "[$reg, $off]" %}
 5727   interface(MEMORY_INTER) %{
 5728     base($reg);
 5729     index(0xffffffff);
 5730     scale(0x0);
 5731     disp($off);
 5732   %}
 5733 %}
 5734 
 5735 operand indOffI8(iRegP reg, immIOffset8 off)
 5736 %{
 5737   constraint(ALLOC_IN_RC(ptr_reg));
 5738   match(AddP reg off);
 5739   op_cost(0);
 5740   format %{ "[$reg, $off]" %}
 5741   interface(MEMORY_INTER) %{
 5742     base($reg);
 5743     index(0xffffffff);
 5744     scale(0x0);
 5745     disp($off);
 5746   %}
 5747 %}
 5748 
 5749 operand indOffI16(iRegP reg, immIOffset16 off)
 5750 %{
 5751   constraint(ALLOC_IN_RC(ptr_reg));
 5752   match(AddP reg off);
 5753   op_cost(0);
 5754   format %{ "[$reg, $off]" %}
 5755   interface(MEMORY_INTER) %{
 5756     base($reg);
 5757     index(0xffffffff);
 5758     scale(0x0);
 5759     disp($off);
 5760   %}
 5761 %}
 5762 
 5763 operand indOffL(iRegP reg, immLoffset off)
 5764 %{
 5765   constraint(ALLOC_IN_RC(ptr_reg));
 5766   match(AddP reg off);
 5767   op_cost(0);
 5768   format %{ "[$reg, $off]" %}
 5769   interface(MEMORY_INTER) %{
 5770     base($reg);
 5771     index(0xffffffff);
 5772     scale(0x0);
 5773     disp($off);
 5774   %}
 5775 %}
 5776 
 5777 operand indOffL1(iRegP reg, immLoffset1 off)
 5778 %{
 5779   constraint(ALLOC_IN_RC(ptr_reg));
 5780   match(AddP reg off);
 5781   op_cost(0);
 5782   format %{ "[$reg, $off]" %}
 5783   interface(MEMORY_INTER) %{
 5784     base($reg);
 5785     index(0xffffffff);
 5786     scale(0x0);
 5787     disp($off);
 5788   %}
 5789 %}
 5790 
 5791 operand indOffL2(iRegP reg, immLoffset2 off)
 5792 %{
 5793   constraint(ALLOC_IN_RC(ptr_reg));
 5794   match(AddP reg off);
 5795   op_cost(0);
 5796   format %{ "[$reg, $off]" %}
 5797   interface(MEMORY_INTER) %{
 5798     base($reg);
 5799     index(0xffffffff);
 5800     scale(0x0);
 5801     disp($off);
 5802   %}
 5803 %}
 5804 
 5805 operand indOffL4(iRegP reg, immLoffset4 off)
 5806 %{
 5807   constraint(ALLOC_IN_RC(ptr_reg));
 5808   match(AddP reg off);
 5809   op_cost(0);
 5810   format %{ "[$reg, $off]" %}
 5811   interface(MEMORY_INTER) %{
 5812     base($reg);
 5813     index(0xffffffff);
 5814     scale(0x0);
 5815     disp($off);
 5816   %}
 5817 %}
 5818 
 5819 operand indOffL8(iRegP reg, immLoffset8 off)
 5820 %{
 5821   constraint(ALLOC_IN_RC(ptr_reg));
 5822   match(AddP reg off);
 5823   op_cost(0);
 5824   format %{ "[$reg, $off]" %}
 5825   interface(MEMORY_INTER) %{
 5826     base($reg);
 5827     index(0xffffffff);
 5828     scale(0x0);
 5829     disp($off);
 5830   %}
 5831 %}
 5832 
 5833 operand indOffL16(iRegP reg, immLoffset16 off)
 5834 %{
 5835   constraint(ALLOC_IN_RC(ptr_reg));
 5836   match(AddP reg off);
 5837   op_cost(0);
 5838   format %{ "[$reg, $off]" %}
 5839   interface(MEMORY_INTER) %{
 5840     base($reg);
 5841     index(0xffffffff);
 5842     scale(0x0);
 5843     disp($off);
 5844   %}
 5845 %}
 5846 
 5847 operand indirectN(iRegN reg)
 5848 %{
 5849   predicate(CompressedOops::shift() == 0);
 5850   constraint(ALLOC_IN_RC(ptr_reg));
 5851   match(DecodeN reg);
 5852   op_cost(0);
 5853   format %{ "[$reg]\t# narrow" %}
 5854   interface(MEMORY_INTER) %{
 5855     base($reg);
 5856     index(0xffffffff);
 5857     scale(0x0);
 5858     disp(0x0);
 5859   %}
 5860 %}
 5861 
 5862 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5863 %{
 5864   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5865   constraint(ALLOC_IN_RC(ptr_reg));
 5866   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5867   op_cost(0);
 5868   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5869   interface(MEMORY_INTER) %{
 5870     base($reg);
 5871     index($ireg);
 5872     scale($scale);
 5873     disp(0x0);
 5874   %}
 5875 %}
 5876 
 5877 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5878 %{
 5879   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5880   constraint(ALLOC_IN_RC(ptr_reg));
 5881   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5882   op_cost(0);
 5883   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5884   interface(MEMORY_INTER) %{
 5885     base($reg);
 5886     index($lreg);
 5887     scale($scale);
 5888     disp(0x0);
 5889   %}
 5890 %}
 5891 
 5892 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5893 %{
 5894   predicate(CompressedOops::shift() == 0);
 5895   constraint(ALLOC_IN_RC(ptr_reg));
 5896   match(AddP (DecodeN reg) (ConvI2L ireg));
 5897   op_cost(0);
 5898   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5899   interface(MEMORY_INTER) %{
 5900     base($reg);
 5901     index($ireg);
 5902     scale(0x0);
 5903     disp(0x0);
 5904   %}
 5905 %}
 5906 
 5907 operand indIndexN(iRegN reg, iRegL lreg)
 5908 %{
 5909   predicate(CompressedOops::shift() == 0);
 5910   constraint(ALLOC_IN_RC(ptr_reg));
 5911   match(AddP (DecodeN reg) lreg);
 5912   op_cost(0);
 5913   format %{ "$reg, $lreg\t# narrow" %}
 5914   interface(MEMORY_INTER) %{
 5915     base($reg);
 5916     index($lreg);
 5917     scale(0x0);
 5918     disp(0x0);
 5919   %}
 5920 %}
 5921 
 5922 operand indOffIN(iRegN reg, immIOffset off)
 5923 %{
 5924   predicate(CompressedOops::shift() == 0);
 5925   constraint(ALLOC_IN_RC(ptr_reg));
 5926   match(AddP (DecodeN reg) off);
 5927   op_cost(0);
 5928   format %{ "[$reg, $off]\t# narrow" %}
 5929   interface(MEMORY_INTER) %{
 5930     base($reg);
 5931     index(0xffffffff);
 5932     scale(0x0);
 5933     disp($off);
 5934   %}
 5935 %}
 5936 
 5937 operand indOffLN(iRegN reg, immLoffset off)
 5938 %{
 5939   predicate(CompressedOops::shift() == 0);
 5940   constraint(ALLOC_IN_RC(ptr_reg));
 5941   match(AddP (DecodeN reg) off);
 5942   op_cost(0);
 5943   format %{ "[$reg, $off]\t# narrow" %}
 5944   interface(MEMORY_INTER) %{
 5945     base($reg);
 5946     index(0xffffffff);
 5947     scale(0x0);
 5948     disp($off);
 5949   %}
 5950 %}
 5951 
 5952 
 5953 
 5954 // AArch64 opto stubs need to write to the pc slot in the thread anchor
 5955 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
 5956 %{
 5957   constraint(ALLOC_IN_RC(ptr_reg));
 5958   match(AddP reg off);
 5959   op_cost(0);
 5960   format %{ "[$reg, $off]" %}
 5961   interface(MEMORY_INTER) %{
 5962     base($reg);
 5963     index(0xffffffff);
 5964     scale(0x0);
 5965     disp($off);
 5966   %}
 5967 %}
 5968 
 5969 //----------Special Memory Operands--------------------------------------------
 5970 // Stack Slot Operand - This operand is used for loading and storing temporary
 5971 //                      values on the stack where a match requires a value to
 5972 //                      flow through memory.
 5973 operand stackSlotP(sRegP reg)
 5974 %{
 5975   constraint(ALLOC_IN_RC(stack_slots));
 5976   op_cost(100);
 5977   // No match rule because this operand is only generated in matching
 5978   // match(RegP);
 5979   format %{ "[$reg]" %}
 5980   interface(MEMORY_INTER) %{
 5981     base(0x1e);  // RSP
 5982     index(0x0);  // No Index
 5983     scale(0x0);  // No Scale
 5984     disp($reg);  // Stack Offset
 5985   %}
 5986 %}
 5987 
 5988 operand stackSlotI(sRegI reg)
 5989 %{
 5990   constraint(ALLOC_IN_RC(stack_slots));
 5991   // No match rule because this operand is only generated in matching
 5992   // match(RegI);
 5993   format %{ "[$reg]" %}
 5994   interface(MEMORY_INTER) %{
 5995     base(0x1e);  // RSP
 5996     index(0x0);  // No Index
 5997     scale(0x0);  // No Scale
 5998     disp($reg);  // Stack Offset
 5999   %}
 6000 %}
 6001 
 6002 operand stackSlotF(sRegF reg)
 6003 %{
 6004   constraint(ALLOC_IN_RC(stack_slots));
 6005   // No match rule because this operand is only generated in matching
 6006   // match(RegF);
 6007   format %{ "[$reg]" %}
 6008   interface(MEMORY_INTER) %{
 6009     base(0x1e);  // RSP
 6010     index(0x0);  // No Index
 6011     scale(0x0);  // No Scale
 6012     disp($reg);  // Stack Offset
 6013   %}
 6014 %}
 6015 
 6016 operand stackSlotD(sRegD reg)
 6017 %{
 6018   constraint(ALLOC_IN_RC(stack_slots));
 6019   // No match rule because this operand is only generated in matching
 6020   // match(RegD);
 6021   format %{ "[$reg]" %}
 6022   interface(MEMORY_INTER) %{
 6023     base(0x1e);  // RSP
 6024     index(0x0);  // No Index
 6025     scale(0x0);  // No Scale
 6026     disp($reg);  // Stack Offset
 6027   %}
 6028 %}
 6029 
 6030 operand stackSlotL(sRegL reg)
 6031 %{
 6032   constraint(ALLOC_IN_RC(stack_slots));
 6033   // No match rule because this operand is only generated in matching
 6034   // match(RegL);
 6035   format %{ "[$reg]" %}
 6036   interface(MEMORY_INTER) %{
 6037     base(0x1e);  // RSP
 6038     index(0x0);  // No Index
 6039     scale(0x0);  // No Scale
 6040     disp($reg);  // Stack Offset
 6041   %}
 6042 %}
 6043 
 6044 // Operands for expressing Control Flow
 6045 // NOTE: Label is a predefined operand which should not be redefined in
 6046 //       the AD file. It is generically handled within the ADLC.
 6047 
 6048 //----------Conditional Branch Operands----------------------------------------
 6049 // Comparison Op  - This is the operation of the comparison, and is limited to
 6050 //                  the following set of codes:
 6051 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 6052 //
 6053 // Other attributes of the comparison, such as unsignedness, are specified
 6054 // by the comparison instruction that sets a condition code flags register.
 6055 // That result is represented by a flags operand whose subtype is appropriate
 6056 // to the unsignedness (etc.) of the comparison.
 6057 //
 6058 // Later, the instruction which matches both the Comparison Op (a Bool) and
 6059 // the flags (produced by the Cmp) specifies the coding of the comparison op
 6060 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 6061 
 6062 // used for signed integral comparisons and fp comparisons
 6063 
 6064 operand cmpOp()
 6065 %{
 6066   match(Bool);
 6067 
 6068   format %{ "" %}
 6069   interface(COND_INTER) %{
 6070     equal(0x0, "eq");
 6071     not_equal(0x1, "ne");
 6072     less(0xb, "lt");
 6073     greater_equal(0xa, "ge");
 6074     less_equal(0xd, "le");
 6075     greater(0xc, "gt");
 6076     overflow(0x6, "vs");
 6077     no_overflow(0x7, "vc");
 6078   %}
 6079 %}
 6080 
 6081 // used for unsigned integral comparisons
 6082 
 6083 operand cmpOpU()
 6084 %{
 6085   match(Bool);
 6086 
 6087   format %{ "" %}
 6088   interface(COND_INTER) %{
 6089     equal(0x0, "eq");
 6090     not_equal(0x1, "ne");
 6091     less(0x3, "lo");
 6092     greater_equal(0x2, "hs");
 6093     less_equal(0x9, "ls");
 6094     greater(0x8, "hi");
 6095     overflow(0x6, "vs");
 6096     no_overflow(0x7, "vc");
 6097   %}
 6098 %}
 6099 
 6100 // used for certain integral comparisons which can be
 6101 // converted to cbxx or tbxx instructions
 6102 
 6103 operand cmpOpEqNe()
 6104 %{
 6105   match(Bool);
 6106   op_cost(0);
 6107   predicate(n->as_Bool()->_test._test == BoolTest::ne
 6108             || n->as_Bool()->_test._test == BoolTest::eq);
 6109 
 6110   format %{ "" %}
 6111   interface(COND_INTER) %{
 6112     equal(0x0, "eq");
 6113     not_equal(0x1, "ne");
 6114     less(0xb, "lt");
 6115     greater_equal(0xa, "ge");
 6116     less_equal(0xd, "le");
 6117     greater(0xc, "gt");
 6118     overflow(0x6, "vs");
 6119     no_overflow(0x7, "vc");
 6120   %}
 6121 %}
 6122 
 6123 // used for certain integral comparisons which can be
 6124 // converted to cbxx or tbxx instructions
 6125 
 6126 operand cmpOpLtGe()
 6127 %{
 6128   match(Bool);
 6129   op_cost(0);
 6130 
 6131   predicate(n->as_Bool()->_test._test == BoolTest::lt
 6132             || n->as_Bool()->_test._test == BoolTest::ge);
 6133 
 6134   format %{ "" %}
 6135   interface(COND_INTER) %{
 6136     equal(0x0, "eq");
 6137     not_equal(0x1, "ne");
 6138     less(0xb, "lt");
 6139     greater_equal(0xa, "ge");
 6140     less_equal(0xd, "le");
 6141     greater(0xc, "gt");
 6142     overflow(0x6, "vs");
 6143     no_overflow(0x7, "vc");
 6144   %}
 6145 %}
 6146 
 6147 // used for certain unsigned integral comparisons which can be
 6148 // converted to cbxx or tbxx instructions
 6149 
 6150 operand cmpOpUEqNeLtGe()
 6151 %{
 6152   match(Bool);
 6153   op_cost(0);
 6154 
 6155   predicate(n->as_Bool()->_test._test == BoolTest::eq
 6156             || n->as_Bool()->_test._test == BoolTest::ne
 6157             || n->as_Bool()->_test._test == BoolTest::lt
 6158             || n->as_Bool()->_test._test == BoolTest::ge);
 6159 
 6160   format %{ "" %}
 6161   interface(COND_INTER) %{
 6162     equal(0x0, "eq");
 6163     not_equal(0x1, "ne");
 6164     less(0xb, "lt");
 6165     greater_equal(0xa, "ge");
 6166     less_equal(0xd, "le");
 6167     greater(0xc, "gt");
 6168     overflow(0x6, "vs");
 6169     no_overflow(0x7, "vc");
 6170   %}
 6171 %}
 6172 
 6173 // Special operand allowing long args to int ops to be truncated for free
 6174 
 6175 operand iRegL2I(iRegL reg) %{
 6176 
 6177   op_cost(0);
 6178 
 6179   match(ConvL2I reg);
 6180 
 6181   format %{ "l2i($reg)" %}
 6182 
 6183   interface(REG_INTER)
 6184 %}
 6185 
 6186 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 6187 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 6188 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 6189 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 6190 
 6191 //----------OPERAND CLASSES----------------------------------------------------
 6192 // Operand Classes are groups of operands that are used as to simplify
 6193 // instruction definitions by not requiring the AD writer to specify
 6194 // separate instructions for every form of operand when the
 6195 // instruction accepts multiple operand types with the same basic
 6196 // encoding and format. The classic case of this is memory operands.
 6197 
 6198 // memory is used to define read/write location for load/store
 6199 // instruction defs. we can turn a memory op into an Address
 6200 
 6201 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 6202                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6203 
 6204 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 6205                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6206 
 6207 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 6208                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6209 
 6210 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 6211                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6212 
 6213 // All of the memory operands. For the pipeline description.
 6214 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 6215                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 6216                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6217 
 6218 
 6219 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 6220 // operations. it allows the src to be either an iRegI or a (ConvL2I
 6221 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 6222 // can be elided because the 32-bit instruction will just employ the
 6223 // lower 32 bits anyway.
 6224 //
 6225 // n.b. this does not elide all L2I conversions. if the truncated
 6226 // value is consumed by more than one operation then the ConvL2I
 6227 // cannot be bundled into the consuming nodes so an l2i gets planted
 6228 // (actually a movw $dst $src) and the downstream instructions consume
 6229 // the result of the l2i as an iRegI input. That's a shame since the
 6230 // movw is actually redundant but its not too costly.
 6231 
 6232 opclass iRegIorL2I(iRegI, iRegL2I);
 6233 
 6234 //----------PIPELINE-----------------------------------------------------------
 6235 // Rules which define the behavior of the target architectures pipeline.
 6236 
 6237 // For specific pipelines, eg A53, define the stages of that pipeline
 6238 //pipe_desc(ISS, EX1, EX2, WR);
 6239 #define ISS S0
 6240 #define EX1 S1
 6241 #define EX2 S2
 6242 #define WR  S3
 6243 
 6244 // Integer ALU reg operation
 6245 pipeline %{
 6246 
 6247 attributes %{
 6248   // ARM instructions are of fixed length
 6249   fixed_size_instructions;        // Fixed size instructions TODO does
 6250   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 6251   // ARM instructions come in 32-bit word units
 6252   instruction_unit_size = 4;         // An instruction is 4 bytes long
 6253   instruction_fetch_unit_size = 64;  // The processor fetches one line
 6254   instruction_fetch_units = 1;       // of 64 bytes
 6255 
 6256   // List of nop instructions
 6257   nops( MachNop );
 6258 %}
 6259 
 6260 // We don't use an actual pipeline model so don't care about resources
 6261 // or description. we do use pipeline classes to introduce fixed
 6262 // latencies
 6263 
 6264 //----------RESOURCES----------------------------------------------------------
 6265 // Resources are the functional units available to the machine
 6266 
 6267 resources( INS0, INS1, INS01 = INS0 | INS1,
 6268            ALU0, ALU1, ALU = ALU0 | ALU1,
 6269            MAC,
 6270            DIV,
 6271            BRANCH,
 6272            LDST,
 6273            NEON_FP);
 6274 
 6275 //----------PIPELINE DESCRIPTION-----------------------------------------------
 6276 // Pipeline Description specifies the stages in the machine's pipeline
 6277 
 6278 // Define the pipeline as a generic 6 stage pipeline
 6279 pipe_desc(S0, S1, S2, S3, S4, S5);
 6280 
 6281 //----------PIPELINE CLASSES---------------------------------------------------
 6282 // Pipeline Classes describe the stages in which input and output are
 6283 // referenced by the hardware pipeline.
 6284 
 6285 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 6286 %{
 6287   single_instruction;
 6288   src1   : S1(read);
 6289   src2   : S2(read);
 6290   dst    : S5(write);
 6291   INS01  : ISS;
 6292   NEON_FP : S5;
 6293 %}
 6294 
 6295 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6296 %{
 6297   single_instruction;
 6298   src1   : S1(read);
 6299   src2   : S2(read);
 6300   dst    : S5(write);
 6301   INS01  : ISS;
 6302   NEON_FP : S5;
 6303 %}
 6304 
 6305 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6306 %{
 6307   single_instruction;
 6308   src    : S1(read);
 6309   dst    : S5(write);
 6310   INS01  : ISS;
 6311   NEON_FP : S5;
 6312 %}
 6313 
 6314 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6315 %{
 6316   single_instruction;
 6317   src    : S1(read);
 6318   dst    : S5(write);
 6319   INS01  : ISS;
 6320   NEON_FP : S5;
 6321 %}
 6322 
 6323 pipe_class fp_d2f(vRegF dst, vRegD src)
 6324 %{
 6325   single_instruction;
 6326   src    : S1(read);
 6327   dst    : S5(write);
 6328   INS01  : ISS;
 6329   NEON_FP : S5;
 6330 %}
 6331 
 6332 pipe_class fp_f2d(vRegD dst, vRegF src)
 6333 %{
 6334   single_instruction;
 6335   src    : S1(read);
 6336   dst    : S5(write);
 6337   INS01  : ISS;
 6338   NEON_FP : S5;
 6339 %}
 6340 
 6341 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6342 %{
 6343   single_instruction;
 6344   src    : S1(read);
 6345   dst    : S5(write);
 6346   INS01  : ISS;
 6347   NEON_FP : S5;
 6348 %}
 6349 
 6350 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6351 %{
 6352   single_instruction;
 6353   src    : S1(read);
 6354   dst    : S5(write);
 6355   INS01  : ISS;
 6356   NEON_FP : S5;
 6357 %}
 6358 
 6359 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6360 %{
 6361   single_instruction;
 6362   src    : S1(read);
 6363   dst    : S5(write);
 6364   INS01  : ISS;
 6365   NEON_FP : S5;
 6366 %}
 6367 
 6368 pipe_class fp_l2f(vRegF dst, iRegL src)
 6369 %{
 6370   single_instruction;
 6371   src    : S1(read);
 6372   dst    : S5(write);
 6373   INS01  : ISS;
 6374   NEON_FP : S5;
 6375 %}
 6376 
 6377 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6378 %{
 6379   single_instruction;
 6380   src    : S1(read);
 6381   dst    : S5(write);
 6382   INS01  : ISS;
 6383   NEON_FP : S5;
 6384 %}
 6385 
 6386 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6387 %{
 6388   single_instruction;
 6389   src    : S1(read);
 6390   dst    : S5(write);
 6391   INS01  : ISS;
 6392   NEON_FP : S5;
 6393 %}
 6394 
 6395 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6396 %{
 6397   single_instruction;
 6398   src    : S1(read);
 6399   dst    : S5(write);
 6400   INS01  : ISS;
 6401   NEON_FP : S5;
 6402 %}
 6403 
 6404 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6405 %{
 6406   single_instruction;
 6407   src    : S1(read);
 6408   dst    : S5(write);
 6409   INS01  : ISS;
 6410   NEON_FP : S5;
 6411 %}
 6412 
 6413 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6414 %{
 6415   single_instruction;
 6416   src1   : S1(read);
 6417   src2   : S2(read);
 6418   dst    : S5(write);
 6419   INS0   : ISS;
 6420   NEON_FP : S5;
 6421 %}
 6422 
 6423 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6424 %{
 6425   single_instruction;
 6426   src1   : S1(read);
 6427   src2   : S2(read);
 6428   dst    : S5(write);
 6429   INS0   : ISS;
 6430   NEON_FP : S5;
 6431 %}
 6432 
 6433 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6434 %{
 6435   single_instruction;
 6436   cr     : S1(read);
 6437   src1   : S1(read);
 6438   src2   : S1(read);
 6439   dst    : S3(write);
 6440   INS01  : ISS;
 6441   NEON_FP : S3;
 6442 %}
 6443 
 6444 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6445 %{
 6446   single_instruction;
 6447   cr     : S1(read);
 6448   src1   : S1(read);
 6449   src2   : S1(read);
 6450   dst    : S3(write);
 6451   INS01  : ISS;
 6452   NEON_FP : S3;
 6453 %}
 6454 
 6455 pipe_class fp_imm_s(vRegF dst)
 6456 %{
 6457   single_instruction;
 6458   dst    : S3(write);
 6459   INS01  : ISS;
 6460   NEON_FP : S3;
 6461 %}
 6462 
 6463 pipe_class fp_imm_d(vRegD dst)
 6464 %{
 6465   single_instruction;
 6466   dst    : S3(write);
 6467   INS01  : ISS;
 6468   NEON_FP : S3;
 6469 %}
 6470 
 6471 pipe_class fp_load_constant_s(vRegF dst)
 6472 %{
 6473   single_instruction;
 6474   dst    : S4(write);
 6475   INS01  : ISS;
 6476   NEON_FP : S4;
 6477 %}
 6478 
 6479 pipe_class fp_load_constant_d(vRegD dst)
 6480 %{
 6481   single_instruction;
 6482   dst    : S4(write);
 6483   INS01  : ISS;
 6484   NEON_FP : S4;
 6485 %}
 6486 
 6487 //------- Integer ALU operations --------------------------
 6488 
 6489 // Integer ALU reg-reg operation
 6490 // Operands needed in EX1, result generated in EX2
 6491 // Eg.  ADD     x0, x1, x2
 6492 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6493 %{
 6494   single_instruction;
 6495   dst    : EX2(write);
 6496   src1   : EX1(read);
 6497   src2   : EX1(read);
 6498   INS01  : ISS; // Dual issue as instruction 0 or 1
 6499   ALU    : EX2;
 6500 %}
 6501 
 6502 // Integer ALU reg-reg operation with constant shift
 6503 // Shifted register must be available in LATE_ISS instead of EX1
 6504 // Eg.  ADD     x0, x1, x2, LSL #2
 6505 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6506 %{
 6507   single_instruction;
 6508   dst    : EX2(write);
 6509   src1   : EX1(read);
 6510   src2   : ISS(read);
 6511   INS01  : ISS;
 6512   ALU    : EX2;
 6513 %}
 6514 
 6515 // Integer ALU reg operation with constant shift
 6516 // Eg.  LSL     x0, x1, #shift
 6517 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6518 %{
 6519   single_instruction;
 6520   dst    : EX2(write);
 6521   src1   : ISS(read);
 6522   INS01  : ISS;
 6523   ALU    : EX2;
 6524 %}
 6525 
 6526 // Integer ALU reg-reg operation with variable shift
 6527 // Both operands must be available in LATE_ISS instead of EX1
 6528 // Result is available in EX1 instead of EX2
 6529 // Eg.  LSLV    x0, x1, x2
 6530 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6531 %{
 6532   single_instruction;
 6533   dst    : EX1(write);
 6534   src1   : ISS(read);
 6535   src2   : ISS(read);
 6536   INS01  : ISS;
 6537   ALU    : EX1;
 6538 %}
 6539 
 6540 // Integer ALU reg-reg operation with extract
 6541 // As for _vshift above, but result generated in EX2
 6542 // Eg.  EXTR    x0, x1, x2, #N
 6543 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6544 %{
 6545   single_instruction;
 6546   dst    : EX2(write);
 6547   src1   : ISS(read);
 6548   src2   : ISS(read);
 6549   INS1   : ISS; // Can only dual issue as Instruction 1
 6550   ALU    : EX1;
 6551 %}
 6552 
 6553 // Integer ALU reg operation
 6554 // Eg.  NEG     x0, x1
 6555 pipe_class ialu_reg(iRegI dst, iRegI src)
 6556 %{
 6557   single_instruction;
 6558   dst    : EX2(write);
 6559   src    : EX1(read);
 6560   INS01  : ISS;
 6561   ALU    : EX2;
 6562 %}
 6563 
 6564 // Integer ALU reg mmediate operation
 6565 // Eg.  ADD     x0, x1, #N
 6566 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6567 %{
 6568   single_instruction;
 6569   dst    : EX2(write);
 6570   src1   : EX1(read);
 6571   INS01  : ISS;
 6572   ALU    : EX2;
 6573 %}
 6574 
 6575 // Integer ALU immediate operation (no source operands)
 6576 // Eg.  MOV     x0, #N
 6577 pipe_class ialu_imm(iRegI dst)
 6578 %{
 6579   single_instruction;
 6580   dst    : EX1(write);
 6581   INS01  : ISS;
 6582   ALU    : EX1;
 6583 %}
 6584 
 6585 //------- Compare operation -------------------------------
 6586 
 6587 // Compare reg-reg
 6588 // Eg.  CMP     x0, x1
 6589 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6590 %{
 6591   single_instruction;
 6592 //  fixed_latency(16);
 6593   cr     : EX2(write);
 6594   op1    : EX1(read);
 6595   op2    : EX1(read);
 6596   INS01  : ISS;
 6597   ALU    : EX2;
 6598 %}
 6599 
 6600 // Compare reg-reg
 6601 // Eg.  CMP     x0, #N
 6602 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6603 %{
 6604   single_instruction;
 6605 //  fixed_latency(16);
 6606   cr     : EX2(write);
 6607   op1    : EX1(read);
 6608   INS01  : ISS;
 6609   ALU    : EX2;
 6610 %}
 6611 
 6612 //------- Conditional instructions ------------------------
 6613 
 6614 // Conditional no operands
 6615 // Eg.  CSINC   x0, zr, zr, <cond>
 6616 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6617 %{
 6618   single_instruction;
 6619   cr     : EX1(read);
 6620   dst    : EX2(write);
 6621   INS01  : ISS;
 6622   ALU    : EX2;
 6623 %}
 6624 
 6625 // Conditional 2 operand
 6626 // EG.  CSEL    X0, X1, X2, <cond>
 6627 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6628 %{
 6629   single_instruction;
 6630   cr     : EX1(read);
 6631   src1   : EX1(read);
 6632   src2   : EX1(read);
 6633   dst    : EX2(write);
 6634   INS01  : ISS;
 6635   ALU    : EX2;
 6636 %}
 6637 
 6638 // Conditional 2 operand
 6639 // EG.  CSEL    X0, X1, X2, <cond>
 6640 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6641 %{
 6642   single_instruction;
 6643   cr     : EX1(read);
 6644   src    : EX1(read);
 6645   dst    : EX2(write);
 6646   INS01  : ISS;
 6647   ALU    : EX2;
 6648 %}
 6649 
 6650 //------- Multiply pipeline operations --------------------
 6651 
 6652 // Multiply reg-reg
 6653 // Eg.  MUL     w0, w1, w2
 6654 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6655 %{
 6656   single_instruction;
 6657   dst    : WR(write);
 6658   src1   : ISS(read);
 6659   src2   : ISS(read);
 6660   INS01  : ISS;
 6661   MAC    : WR;
 6662 %}
 6663 
 6664 // Multiply accumulate
 6665 // Eg.  MADD    w0, w1, w2, w3
 6666 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6667 %{
 6668   single_instruction;
 6669   dst    : WR(write);
 6670   src1   : ISS(read);
 6671   src2   : ISS(read);
 6672   src3   : ISS(read);
 6673   INS01  : ISS;
 6674   MAC    : WR;
 6675 %}
 6676 
 6677 // Eg.  MUL     w0, w1, w2
 6678 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6679 %{
 6680   single_instruction;
 6681   fixed_latency(3); // Maximum latency for 64 bit mul
 6682   dst    : WR(write);
 6683   src1   : ISS(read);
 6684   src2   : ISS(read);
 6685   INS01  : ISS;
 6686   MAC    : WR;
 6687 %}
 6688 
 6689 // Multiply accumulate
 6690 // Eg.  MADD    w0, w1, w2, w3
 6691 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6692 %{
 6693   single_instruction;
 6694   fixed_latency(3); // Maximum latency for 64 bit mul
 6695   dst    : WR(write);
 6696   src1   : ISS(read);
 6697   src2   : ISS(read);
 6698   src3   : ISS(read);
 6699   INS01  : ISS;
 6700   MAC    : WR;
 6701 %}
 6702 
 6703 //------- Divide pipeline operations --------------------
 6704 
 6705 // Eg.  SDIV    w0, w1, w2
 6706 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6707 %{
 6708   single_instruction;
 6709   fixed_latency(8); // Maximum latency for 32 bit divide
 6710   dst    : WR(write);
 6711   src1   : ISS(read);
 6712   src2   : ISS(read);
 6713   INS0   : ISS; // Can only dual issue as instruction 0
 6714   DIV    : WR;
 6715 %}
 6716 
 6717 // Eg.  SDIV    x0, x1, x2
 6718 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6719 %{
 6720   single_instruction;
 6721   fixed_latency(16); // Maximum latency for 64 bit divide
 6722   dst    : WR(write);
 6723   src1   : ISS(read);
 6724   src2   : ISS(read);
 6725   INS0   : ISS; // Can only dual issue as instruction 0
 6726   DIV    : WR;
 6727 %}
 6728 
 6729 //------- Load pipeline operations ------------------------
 6730 
 6731 // Load - prefetch
 6732 // Eg.  PFRM    <mem>
 6733 pipe_class iload_prefetch(memory mem)
 6734 %{
 6735   single_instruction;
 6736   mem    : ISS(read);
 6737   INS01  : ISS;
 6738   LDST   : WR;
 6739 %}
 6740 
 6741 // Load - reg, mem
 6742 // Eg.  LDR     x0, <mem>
 6743 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6744 %{
 6745   single_instruction;
 6746   dst    : WR(write);
 6747   mem    : ISS(read);
 6748   INS01  : ISS;
 6749   LDST   : WR;
 6750 %}
 6751 
 6752 // Load - reg, reg
 6753 // Eg.  LDR     x0, [sp, x1]
 6754 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6755 %{
 6756   single_instruction;
 6757   dst    : WR(write);
 6758   src    : ISS(read);
 6759   INS01  : ISS;
 6760   LDST   : WR;
 6761 %}
 6762 
 6763 //------- Store pipeline operations -----------------------
 6764 
 6765 // Store - zr, mem
 6766 // Eg.  STR     zr, <mem>
 6767 pipe_class istore_mem(memory mem)
 6768 %{
 6769   single_instruction;
 6770   mem    : ISS(read);
 6771   INS01  : ISS;
 6772   LDST   : WR;
 6773 %}
 6774 
 6775 // Store - reg, mem
 6776 // Eg.  STR     x0, <mem>
 6777 pipe_class istore_reg_mem(iRegI src, memory mem)
 6778 %{
 6779   single_instruction;
 6780   mem    : ISS(read);
 6781   src    : EX2(read);
 6782   INS01  : ISS;
 6783   LDST   : WR;
 6784 %}
 6785 
 6786 // Store - reg, reg
 6787 // Eg. STR      x0, [sp, x1]
 6788 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6789 %{
 6790   single_instruction;
 6791   dst    : ISS(read);
 6792   src    : EX2(read);
 6793   INS01  : ISS;
 6794   LDST   : WR;
 6795 %}
 6796 
 6797 //------- Store pipeline operations -----------------------
 6798 
 6799 // Branch
 6800 pipe_class pipe_branch()
 6801 %{
 6802   single_instruction;
 6803   INS01  : ISS;
 6804   BRANCH : EX1;
 6805 %}
 6806 
 6807 // Conditional branch
 6808 pipe_class pipe_branch_cond(rFlagsReg cr)
 6809 %{
 6810   single_instruction;
 6811   cr     : EX1(read);
 6812   INS01  : ISS;
 6813   BRANCH : EX1;
 6814 %}
 6815 
 6816 // Compare & Branch
 6817 // EG.  CBZ/CBNZ
 6818 pipe_class pipe_cmp_branch(iRegI op1)
 6819 %{
 6820   single_instruction;
 6821   op1    : EX1(read);
 6822   INS01  : ISS;
 6823   BRANCH : EX1;
 6824 %}
 6825 
 6826 //------- Synchronisation operations ----------------------
 6827 
 6828 // Any operation requiring serialization.
 6829 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6830 pipe_class pipe_serial()
 6831 %{
 6832   single_instruction;
 6833   force_serialization;
 6834   fixed_latency(16);
 6835   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6836   LDST   : WR;
 6837 %}
 6838 
 6839 // Generic big/slow expanded idiom - also serialized
 6840 pipe_class pipe_slow()
 6841 %{
 6842   instruction_count(10);
 6843   multiple_bundles;
 6844   force_serialization;
 6845   fixed_latency(16);
 6846   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6847   LDST   : WR;
 6848 %}
 6849 
 6850 // Empty pipeline class
 6851 pipe_class pipe_class_empty()
 6852 %{
 6853   single_instruction;
 6854   fixed_latency(0);
 6855 %}
 6856 
 6857 // Default pipeline class.
 6858 pipe_class pipe_class_default()
 6859 %{
 6860   single_instruction;
 6861   fixed_latency(2);
 6862 %}
 6863 
 6864 // Pipeline class for compares.
 6865 pipe_class pipe_class_compare()
 6866 %{
 6867   single_instruction;
 6868   fixed_latency(16);
 6869 %}
 6870 
 6871 // Pipeline class for memory operations.
 6872 pipe_class pipe_class_memory()
 6873 %{
 6874   single_instruction;
 6875   fixed_latency(16);
 6876 %}
 6877 
 6878 // Pipeline class for call.
 6879 pipe_class pipe_class_call()
 6880 %{
 6881   single_instruction;
 6882   fixed_latency(100);
 6883 %}
 6884 
 6885 // Define the class for the Nop node.
 6886 define %{
 6887    MachNop = pipe_class_empty;
 6888 %}
 6889 
 6890 %}
 6891 //----------INSTRUCTIONS-------------------------------------------------------
 6892 //
 6893 // match      -- States which machine-independent subtree may be replaced
 6894 //               by this instruction.
 6895 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6896 //               selection to identify a minimum cost tree of machine
 6897 //               instructions that matches a tree of machine-independent
 6898 //               instructions.
 6899 // format     -- A string providing the disassembly for this instruction.
 6900 //               The value of an instruction's operand may be inserted
 6901 //               by referring to it with a '$' prefix.
 6902 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6903 //               to within an encode class as $primary, $secondary, and $tertiary
 6904 //               rrspectively.  The primary opcode is commonly used to
 6905 //               indicate the type of machine instruction, while secondary
 6906 //               and tertiary are often used for prefix options or addressing
 6907 //               modes.
 6908 // ins_encode -- A list of encode classes with parameters. The encode class
 6909 //               name must have been defined in an 'enc_class' specification
 6910 //               in the encode section of the architecture description.
 6911 
 6912 // ============================================================================
 6913 // Memory (Load/Store) Instructions
 6914 
 6915 // Load Instructions
 6916 
 6917 // Load Byte (8 bit signed)
 6918 instruct loadB(iRegINoSp dst, memory1 mem)
 6919 %{
 6920   match(Set dst (LoadB mem));
 6921   predicate(!needs_acquiring_load(n));
 6922 
 6923   ins_cost(4 * INSN_COST);
 6924   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6925 
 6926   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6927 
 6928   ins_pipe(iload_reg_mem);
 6929 %}
 6930 
 6931 // Load Byte (8 bit signed) into long
 6932 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6933 %{
 6934   match(Set dst (ConvI2L (LoadB mem)));
 6935   predicate(!needs_acquiring_load(n->in(1)));
 6936 
 6937   ins_cost(4 * INSN_COST);
 6938   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6939 
 6940   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6941 
 6942   ins_pipe(iload_reg_mem);
 6943 %}
 6944 
 6945 // Load Byte (8 bit unsigned)
 6946 instruct loadUB(iRegINoSp dst, memory1 mem)
 6947 %{
 6948   match(Set dst (LoadUB mem));
 6949   predicate(!needs_acquiring_load(n));
 6950 
 6951   ins_cost(4 * INSN_COST);
 6952   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6953 
 6954   ins_encode(aarch64_enc_ldrb(dst, mem));
 6955 
 6956   ins_pipe(iload_reg_mem);
 6957 %}
 6958 
 6959 // Load Byte (8 bit unsigned) into long
 6960 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6961 %{
 6962   match(Set dst (ConvI2L (LoadUB mem)));
 6963   predicate(!needs_acquiring_load(n->in(1)));
 6964 
 6965   ins_cost(4 * INSN_COST);
 6966   format %{ "ldrb  $dst, $mem\t# byte" %}
 6967 
 6968   ins_encode(aarch64_enc_ldrb(dst, mem));
 6969 
 6970   ins_pipe(iload_reg_mem);
 6971 %}
 6972 
 6973 // Load Short (16 bit signed)
 6974 instruct loadS(iRegINoSp dst, memory2 mem)
 6975 %{
 6976   match(Set dst (LoadS mem));
 6977   predicate(!needs_acquiring_load(n));
 6978 
 6979   ins_cost(4 * INSN_COST);
 6980   format %{ "ldrshw  $dst, $mem\t# short" %}
 6981 
 6982   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6983 
 6984   ins_pipe(iload_reg_mem);
 6985 %}
 6986 
 6987 // Load Short (16 bit signed) into long
 6988 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6989 %{
 6990   match(Set dst (ConvI2L (LoadS mem)));
 6991   predicate(!needs_acquiring_load(n->in(1)));
 6992 
 6993   ins_cost(4 * INSN_COST);
 6994   format %{ "ldrsh  $dst, $mem\t# short" %}
 6995 
 6996   ins_encode(aarch64_enc_ldrsh(dst, mem));
 6997 
 6998   ins_pipe(iload_reg_mem);
 6999 %}
 7000 
 7001 // Load Char (16 bit unsigned)
 7002 instruct loadUS(iRegINoSp dst, memory2 mem)
 7003 %{
 7004   match(Set dst (LoadUS mem));
 7005   predicate(!needs_acquiring_load(n));
 7006 
 7007   ins_cost(4 * INSN_COST);
 7008   format %{ "ldrh  $dst, $mem\t# short" %}
 7009 
 7010   ins_encode(aarch64_enc_ldrh(dst, mem));
 7011 
 7012   ins_pipe(iload_reg_mem);
 7013 %}
 7014 
 7015 // Load Short/Char (16 bit unsigned) into long
 7016 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 7017 %{
 7018   match(Set dst (ConvI2L (LoadUS mem)));
 7019   predicate(!needs_acquiring_load(n->in(1)));
 7020 
 7021   ins_cost(4 * INSN_COST);
 7022   format %{ "ldrh  $dst, $mem\t# short" %}
 7023 
 7024   ins_encode(aarch64_enc_ldrh(dst, mem));
 7025 
 7026   ins_pipe(iload_reg_mem);
 7027 %}
 7028 
 7029 // Load Integer (32 bit signed)
 7030 instruct loadI(iRegINoSp dst, memory4 mem)
 7031 %{
 7032   match(Set dst (LoadI mem));
 7033   predicate(!needs_acquiring_load(n));
 7034 
 7035   ins_cost(4 * INSN_COST);
 7036   format %{ "ldrw  $dst, $mem\t# int" %}
 7037 
 7038   ins_encode(aarch64_enc_ldrw(dst, mem));
 7039 
 7040   ins_pipe(iload_reg_mem);
 7041 %}
 7042 
 7043 // Load Integer (32 bit signed) into long
 7044 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 7045 %{
 7046   match(Set dst (ConvI2L (LoadI mem)));
 7047   predicate(!needs_acquiring_load(n->in(1)));
 7048 
 7049   ins_cost(4 * INSN_COST);
 7050   format %{ "ldrsw  $dst, $mem\t# int" %}
 7051 
 7052   ins_encode(aarch64_enc_ldrsw(dst, mem));
 7053 
 7054   ins_pipe(iload_reg_mem);
 7055 %}
 7056 
 7057 // Load Integer (32 bit unsigned) into long
 7058 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 7059 %{
 7060   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7061   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 7062 
 7063   ins_cost(4 * INSN_COST);
 7064   format %{ "ldrw  $dst, $mem\t# int" %}
 7065 
 7066   ins_encode(aarch64_enc_ldrw(dst, mem));
 7067 
 7068   ins_pipe(iload_reg_mem);
 7069 %}
 7070 
 7071 // Load Long (64 bit signed)
 7072 instruct loadL(iRegLNoSp dst, memory8 mem)
 7073 %{
 7074   match(Set dst (LoadL mem));
 7075   predicate(!needs_acquiring_load(n));
 7076 
 7077   ins_cost(4 * INSN_COST);
 7078   format %{ "ldr  $dst, $mem\t# int" %}
 7079 
 7080   ins_encode(aarch64_enc_ldr(dst, mem));
 7081 
 7082   ins_pipe(iload_reg_mem);
 7083 %}
 7084 
 7085 // Load Range
 7086 instruct loadRange(iRegINoSp dst, memory4 mem)
 7087 %{
 7088   match(Set dst (LoadRange mem));
 7089 
 7090   ins_cost(4 * INSN_COST);
 7091   format %{ "ldrw  $dst, $mem\t# range" %}
 7092 
 7093   ins_encode(aarch64_enc_ldrw(dst, mem));
 7094 
 7095   ins_pipe(iload_reg_mem);
 7096 %}
 7097 
 7098 // Load Pointer
 7099 instruct loadP(iRegPNoSp dst, memory8 mem)
 7100 %{
 7101   match(Set dst (LoadP mem));
 7102   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 7103 
 7104   ins_cost(4 * INSN_COST);
 7105   format %{ "ldr  $dst, $mem\t# ptr" %}
 7106 
 7107   ins_encode(aarch64_enc_ldr(dst, mem));
 7108 
 7109   ins_pipe(iload_reg_mem);
 7110 %}
 7111 
 7112 // Load Compressed Pointer
 7113 instruct loadN(iRegNNoSp dst, memory4 mem)
 7114 %{
 7115   match(Set dst (LoadN mem));
 7116   predicate(!needs_acquiring_load(n));
 7117 
 7118   ins_cost(4 * INSN_COST);
 7119   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 7120 
 7121   ins_encode(aarch64_enc_ldrw(dst, mem));
 7122 
 7123   ins_pipe(iload_reg_mem);
 7124 %}
 7125 
 7126 // Load Klass Pointer
 7127 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7128 %{
 7129   match(Set dst (LoadKlass mem));
 7130   predicate(!needs_acquiring_load(n));
 7131 
 7132   ins_cost(4 * INSN_COST);
 7133   format %{ "ldr  $dst, $mem\t# class" %}
 7134 
 7135   ins_encode(aarch64_enc_ldr(dst, mem));
 7136 
 7137   ins_pipe(iload_reg_mem);
 7138 %}
 7139 
 7140 // Load Narrow Klass Pointer
 7141 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7142 %{
 7143   match(Set dst (LoadNKlass mem));
 7144   predicate(!needs_acquiring_load(n));
 7145 
 7146   ins_cost(4 * INSN_COST);
 7147   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7148 
 7149   ins_encode(aarch64_enc_ldrw(dst, mem));
 7150 
 7151   ins_pipe(iload_reg_mem);
 7152 %}
 7153 
 7154 // Load Float
 7155 instruct loadF(vRegF dst, memory4 mem)
 7156 %{
 7157   match(Set dst (LoadF mem));
 7158   predicate(!needs_acquiring_load(n));
 7159 
 7160   ins_cost(4 * INSN_COST);
 7161   format %{ "ldrs  $dst, $mem\t# float" %}
 7162 
 7163   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7164 
 7165   ins_pipe(pipe_class_memory);
 7166 %}
 7167 
 7168 // Load Double
 7169 instruct loadD(vRegD dst, memory8 mem)
 7170 %{
 7171   match(Set dst (LoadD mem));
 7172   predicate(!needs_acquiring_load(n));
 7173 
 7174   ins_cost(4 * INSN_COST);
 7175   format %{ "ldrd  $dst, $mem\t# double" %}
 7176 
 7177   ins_encode( aarch64_enc_ldrd(dst, mem) );
 7178 
 7179   ins_pipe(pipe_class_memory);
 7180 %}
 7181 
 7182 
 7183 // Load Int Constant
 7184 instruct loadConI(iRegINoSp dst, immI src)
 7185 %{
 7186   match(Set dst src);
 7187 
 7188   ins_cost(INSN_COST);
 7189   format %{ "mov $dst, $src\t# int" %}
 7190 
 7191   ins_encode( aarch64_enc_movw_imm(dst, src) );
 7192 
 7193   ins_pipe(ialu_imm);
 7194 %}
 7195 
 7196 // Load Long Constant
 7197 instruct loadConL(iRegLNoSp dst, immL src)
 7198 %{
 7199   match(Set dst src);
 7200 
 7201   ins_cost(INSN_COST);
 7202   format %{ "mov $dst, $src\t# long" %}
 7203 
 7204   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7205 
 7206   ins_pipe(ialu_imm);
 7207 %}
 7208 
 7209 // Load Pointer Constant
 7210 
 7211 instruct loadConP(iRegPNoSp dst, immP con)
 7212 %{
 7213   match(Set dst con);
 7214 
 7215   ins_cost(INSN_COST * 4);
 7216   format %{
 7217     "mov  $dst, $con\t# ptr\n\t"
 7218   %}
 7219 
 7220   ins_encode(aarch64_enc_mov_p(dst, con));
 7221 
 7222   ins_pipe(ialu_imm);
 7223 %}
 7224 
 7225 // Load Null Pointer Constant
 7226 
 7227 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7228 %{
 7229   match(Set dst con);
 7230 
 7231   ins_cost(INSN_COST);
 7232   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7233 
 7234   ins_encode(aarch64_enc_mov_p0(dst, con));
 7235 
 7236   ins_pipe(ialu_imm);
 7237 %}
 7238 
 7239 // Load Pointer Constant One
 7240 
 7241 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 7242 %{
 7243   match(Set dst con);
 7244 
 7245   ins_cost(INSN_COST);
 7246   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7247 
 7248   ins_encode(aarch64_enc_mov_p1(dst, con));
 7249 
 7250   ins_pipe(ialu_imm);
 7251 %}
 7252 
 7253 // Load Byte Map Base Constant
 7254 
 7255 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 7256 %{
 7257   match(Set dst con);
 7258 
 7259   ins_cost(INSN_COST);
 7260   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 7261 
 7262   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 7263 
 7264   ins_pipe(ialu_imm);
 7265 %}
 7266 
 7267 // Load Narrow Pointer Constant
 7268 
 7269 instruct loadConN(iRegNNoSp dst, immN con)
 7270 %{
 7271   match(Set dst con);
 7272 
 7273   ins_cost(INSN_COST * 4);
 7274   format %{ "mov  $dst, $con\t# compressed ptr" %}
 7275 
 7276   ins_encode(aarch64_enc_mov_n(dst, con));
 7277 
 7278   ins_pipe(ialu_imm);
 7279 %}
 7280 
 7281 // Load Narrow Null Pointer Constant
 7282 
 7283 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7284 %{
 7285   match(Set dst con);
 7286 
 7287   ins_cost(INSN_COST);
 7288   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
 7289 
 7290   ins_encode(aarch64_enc_mov_n0(dst, con));
 7291 
 7292   ins_pipe(ialu_imm);
 7293 %}
 7294 
 7295 // Load Narrow Klass Constant
 7296 
 7297 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7298 %{
 7299   match(Set dst con);
 7300 
 7301   ins_cost(INSN_COST);
 7302   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7303 
 7304   ins_encode(aarch64_enc_mov_nk(dst, con));
 7305 
 7306   ins_pipe(ialu_imm);
 7307 %}
 7308 
 7309 // Load Packed Float Constant
 7310 
 7311 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7312   match(Set dst con);
 7313   ins_cost(INSN_COST * 4);
 7314   format %{ "fmovs  $dst, $con"%}
 7315   ins_encode %{
 7316     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7317   %}
 7318 
 7319   ins_pipe(fp_imm_s);
 7320 %}
 7321 
 7322 // Load Float Constant
 7323 
 7324 instruct loadConF(vRegF dst, immF con) %{
 7325   match(Set dst con);
 7326 
 7327   ins_cost(INSN_COST * 4);
 7328 
 7329   format %{
 7330     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7331   %}
 7332 
 7333   ins_encode %{
 7334     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7335   %}
 7336 
 7337   ins_pipe(fp_load_constant_s);
 7338 %}
 7339 
 7340 // Load Packed Double Constant
 7341 
 7342 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7343   match(Set dst con);
 7344   ins_cost(INSN_COST);
 7345   format %{ "fmovd  $dst, $con"%}
 7346   ins_encode %{
 7347     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7348   %}
 7349 
 7350   ins_pipe(fp_imm_d);
 7351 %}
 7352 
 7353 // Load Double Constant
 7354 
 7355 instruct loadConD(vRegD dst, immD con) %{
 7356   match(Set dst con);
 7357 
 7358   ins_cost(INSN_COST * 5);
 7359   format %{
 7360     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7361   %}
 7362 
 7363   ins_encode %{
 7364     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7365   %}
 7366 
 7367   ins_pipe(fp_load_constant_d);
 7368 %}
 7369 
 7370 // Store Instructions
 7371 
 7372 // Store CMS card-mark Immediate
 7373 instruct storeimmCM0(immI0 zero, memory1 mem)
 7374 %{
 7375   match(Set mem (StoreCM mem zero));
 7376 
 7377   ins_cost(INSN_COST);
 7378   format %{ "storestore (elided)\n\t"
 7379             "strb zr, $mem\t# byte" %}
 7380 
 7381   ins_encode(aarch64_enc_strb0(mem));
 7382 
 7383   ins_pipe(istore_mem);
 7384 %}
 7385 
 7386 // Store CMS card-mark Immediate with intervening StoreStore
 7387 // needed when using CMS with no conditional card marking
 7388 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
 7389 %{
 7390   match(Set mem (StoreCM mem zero));
 7391 
 7392   ins_cost(INSN_COST * 2);
 7393   format %{ "storestore\n\t"
 7394             "dmb ishst"
 7395             "\n\tstrb zr, $mem\t# byte" %}
 7396 
 7397   ins_encode(aarch64_enc_strb0_ordered(mem));
 7398 
 7399   ins_pipe(istore_mem);
 7400 %}
 7401 
 7402 // Store Byte
 7403 instruct storeB(iRegIorL2I src, memory1 mem)
 7404 %{
 7405   match(Set mem (StoreB mem src));
 7406   predicate(!needs_releasing_store(n));
 7407 
 7408   ins_cost(INSN_COST);
 7409   format %{ "strb  $src, $mem\t# byte" %}
 7410 
 7411   ins_encode(aarch64_enc_strb(src, mem));
 7412 
 7413   ins_pipe(istore_reg_mem);
 7414 %}
 7415 
 7416 
 7417 instruct storeimmB0(immI0 zero, memory1 mem)
 7418 %{
 7419   match(Set mem (StoreB mem zero));
 7420   predicate(!needs_releasing_store(n));
 7421 
 7422   ins_cost(INSN_COST);
 7423   format %{ "strb rscractch2, $mem\t# byte" %}
 7424 
 7425   ins_encode(aarch64_enc_strb0(mem));
 7426 
 7427   ins_pipe(istore_mem);
 7428 %}
 7429 
 7430 // Store Char/Short
 7431 instruct storeC(iRegIorL2I src, memory2 mem)
 7432 %{
 7433   match(Set mem (StoreC mem src));
 7434   predicate(!needs_releasing_store(n));
 7435 
 7436   ins_cost(INSN_COST);
 7437   format %{ "strh  $src, $mem\t# short" %}
 7438 
 7439   ins_encode(aarch64_enc_strh(src, mem));
 7440 
 7441   ins_pipe(istore_reg_mem);
 7442 %}
 7443 
 7444 instruct storeimmC0(immI0 zero, memory2 mem)
 7445 %{
 7446   match(Set mem (StoreC mem zero));
 7447   predicate(!needs_releasing_store(n));
 7448 
 7449   ins_cost(INSN_COST);
 7450   format %{ "strh  zr, $mem\t# short" %}
 7451 
 7452   ins_encode(aarch64_enc_strh0(mem));
 7453 
 7454   ins_pipe(istore_mem);
 7455 %}
 7456 
 7457 // Store Integer
 7458 
 7459 instruct storeI(iRegIorL2I src, memory4 mem)
 7460 %{
 7461   match(Set mem(StoreI mem src));
 7462   predicate(!needs_releasing_store(n));
 7463 
 7464   ins_cost(INSN_COST);
 7465   format %{ "strw  $src, $mem\t# int" %}
 7466 
 7467   ins_encode(aarch64_enc_strw(src, mem));
 7468 
 7469   ins_pipe(istore_reg_mem);
 7470 %}
 7471 
 7472 instruct storeimmI0(immI0 zero, memory4 mem)
 7473 %{
 7474   match(Set mem(StoreI mem zero));
 7475   predicate(!needs_releasing_store(n));
 7476 
 7477   ins_cost(INSN_COST);
 7478   format %{ "strw  zr, $mem\t# int" %}
 7479 
 7480   ins_encode(aarch64_enc_strw0(mem));
 7481 
 7482   ins_pipe(istore_mem);
 7483 %}
 7484 
 7485 // Store Long (64 bit signed)
 7486 instruct storeL(iRegL src, memory8 mem)
 7487 %{
 7488   match(Set mem (StoreL mem src));
 7489   predicate(!needs_releasing_store(n));
 7490 
 7491   ins_cost(INSN_COST);
 7492   format %{ "str  $src, $mem\t# int" %}
 7493 
 7494   ins_encode(aarch64_enc_str(src, mem));
 7495 
 7496   ins_pipe(istore_reg_mem);
 7497 %}
 7498 
 7499 // Store Long (64 bit signed)
 7500 instruct storeimmL0(immL0 zero, memory8 mem)
 7501 %{
 7502   match(Set mem (StoreL mem zero));
 7503   predicate(!needs_releasing_store(n));
 7504 
 7505   ins_cost(INSN_COST);
 7506   format %{ "str  zr, $mem\t# int" %}
 7507 
 7508   ins_encode(aarch64_enc_str0(mem));
 7509 
 7510   ins_pipe(istore_mem);
 7511 %}
 7512 
 7513 // Store Pointer
 7514 instruct storeP(iRegP src, memory8 mem)
 7515 %{
 7516   match(Set mem (StoreP mem src));
 7517   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7518 
 7519   ins_cost(INSN_COST);
 7520   format %{ "str  $src, $mem\t# ptr" %}
 7521 
 7522   ins_encode(aarch64_enc_str(src, mem));
 7523 
 7524   ins_pipe(istore_reg_mem);
 7525 %}
 7526 
 7527 // Store Pointer
 7528 instruct storeimmP0(immP0 zero, memory8 mem)
 7529 %{
 7530   match(Set mem (StoreP mem zero));
 7531   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7532 
 7533   ins_cost(INSN_COST);
 7534   format %{ "str zr, $mem\t# ptr" %}
 7535 
 7536   ins_encode(aarch64_enc_str0(mem));
 7537 
 7538   ins_pipe(istore_mem);
 7539 %}
 7540 
 7541 // Store Compressed Pointer
 7542 instruct storeN(iRegN src, memory4 mem)
 7543 %{
 7544   match(Set mem (StoreN mem src));
 7545   predicate(!needs_releasing_store(n));
 7546 
 7547   ins_cost(INSN_COST);
 7548   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7549 
 7550   ins_encode(aarch64_enc_strw(src, mem));
 7551 
 7552   ins_pipe(istore_reg_mem);
 7553 %}
 7554 
 7555 instruct storeImmN0(immN0 zero, memory4 mem)
 7556 %{
 7557   match(Set mem (StoreN mem zero));
 7558   predicate(!needs_releasing_store(n));
 7559 
 7560   ins_cost(INSN_COST);
 7561   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7562 
 7563   ins_encode(aarch64_enc_strw0(mem));
 7564 
 7565   ins_pipe(istore_mem);
 7566 %}
 7567 
 7568 // Store Float
 7569 instruct storeF(vRegF src, memory4 mem)
 7570 %{
 7571   match(Set mem (StoreF mem src));
 7572   predicate(!needs_releasing_store(n));
 7573 
 7574   ins_cost(INSN_COST);
 7575   format %{ "strs  $src, $mem\t# float" %}
 7576 
 7577   ins_encode( aarch64_enc_strs(src, mem) );
 7578 
 7579   ins_pipe(pipe_class_memory);
 7580 %}
 7581 
 7582 // TODO
 7583 // implement storeImmF0 and storeFImmPacked
 7584 
 7585 // Store Double
 7586 instruct storeD(vRegD src, memory8 mem)
 7587 %{
 7588   match(Set mem (StoreD mem src));
 7589   predicate(!needs_releasing_store(n));
 7590 
 7591   ins_cost(INSN_COST);
 7592   format %{ "strd  $src, $mem\t# double" %}
 7593 
 7594   ins_encode( aarch64_enc_strd(src, mem) );
 7595 
 7596   ins_pipe(pipe_class_memory);
 7597 %}
 7598 
 7599 // Store Compressed Klass Pointer
 7600 instruct storeNKlass(iRegN src, memory4 mem)
 7601 %{
 7602   predicate(!needs_releasing_store(n));
 7603   match(Set mem (StoreNKlass mem src));
 7604 
 7605   ins_cost(INSN_COST);
 7606   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7607 
 7608   ins_encode(aarch64_enc_strw(src, mem));
 7609 
 7610   ins_pipe(istore_reg_mem);
 7611 %}
 7612 
 7613 // TODO
 7614 // implement storeImmD0 and storeDImmPacked
 7615 
 7616 // prefetch instructions
 7617 // Must be safe to execute with invalid address (cannot fault).
 7618 
 7619 instruct prefetchalloc( memory8 mem ) %{
 7620   match(PrefetchAllocation mem);
 7621 
 7622   ins_cost(INSN_COST);
 7623   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7624 
 7625   ins_encode( aarch64_enc_prefetchw(mem) );
 7626 
 7627   ins_pipe(iload_prefetch);
 7628 %}
 7629 
 7630 //  ---------------- volatile loads and stores ----------------
 7631 
 7632 // Load Byte (8 bit signed)
 7633 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7634 %{
 7635   match(Set dst (LoadB mem));
 7636 
 7637   ins_cost(VOLATILE_REF_COST);
 7638   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7639 
 7640   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7641 
 7642   ins_pipe(pipe_serial);
 7643 %}
 7644 
 7645 // Load Byte (8 bit signed) into long
 7646 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7647 %{
 7648   match(Set dst (ConvI2L (LoadB mem)));
 7649 
 7650   ins_cost(VOLATILE_REF_COST);
 7651   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7652 
 7653   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7654 
 7655   ins_pipe(pipe_serial);
 7656 %}
 7657 
 7658 // Load Byte (8 bit unsigned)
 7659 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7660 %{
 7661   match(Set dst (LoadUB mem));
 7662 
 7663   ins_cost(VOLATILE_REF_COST);
 7664   format %{ "ldarb  $dst, $mem\t# byte" %}
 7665 
 7666   ins_encode(aarch64_enc_ldarb(dst, mem));
 7667 
 7668   ins_pipe(pipe_serial);
 7669 %}
 7670 
 7671 // Load Byte (8 bit unsigned) into long
 7672 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7673 %{
 7674   match(Set dst (ConvI2L (LoadUB mem)));
 7675 
 7676   ins_cost(VOLATILE_REF_COST);
 7677   format %{ "ldarb  $dst, $mem\t# byte" %}
 7678 
 7679   ins_encode(aarch64_enc_ldarb(dst, mem));
 7680 
 7681   ins_pipe(pipe_serial);
 7682 %}
 7683 
 7684 // Load Short (16 bit signed)
 7685 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7686 %{
 7687   match(Set dst (LoadS mem));
 7688 
 7689   ins_cost(VOLATILE_REF_COST);
 7690   format %{ "ldarshw  $dst, $mem\t# short" %}
 7691 
 7692   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7693 
 7694   ins_pipe(pipe_serial);
 7695 %}
 7696 
 7697 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7698 %{
 7699   match(Set dst (LoadUS mem));
 7700 
 7701   ins_cost(VOLATILE_REF_COST);
 7702   format %{ "ldarhw  $dst, $mem\t# short" %}
 7703 
 7704   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7705 
 7706   ins_pipe(pipe_serial);
 7707 %}
 7708 
 7709 // Load Short/Char (16 bit unsigned) into long
 7710 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7711 %{
 7712   match(Set dst (ConvI2L (LoadUS mem)));
 7713 
 7714   ins_cost(VOLATILE_REF_COST);
 7715   format %{ "ldarh  $dst, $mem\t# short" %}
 7716 
 7717   ins_encode(aarch64_enc_ldarh(dst, mem));
 7718 
 7719   ins_pipe(pipe_serial);
 7720 %}
 7721 
 7722 // Load Short/Char (16 bit signed) into long
 7723 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7724 %{
 7725   match(Set dst (ConvI2L (LoadS mem)));
 7726 
 7727   ins_cost(VOLATILE_REF_COST);
 7728   format %{ "ldarh  $dst, $mem\t# short" %}
 7729 
 7730   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7731 
 7732   ins_pipe(pipe_serial);
 7733 %}
 7734 
 7735 // Load Integer (32 bit signed)
 7736 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7737 %{
 7738   match(Set dst (LoadI mem));
 7739 
 7740   ins_cost(VOLATILE_REF_COST);
 7741   format %{ "ldarw  $dst, $mem\t# int" %}
 7742 
 7743   ins_encode(aarch64_enc_ldarw(dst, mem));
 7744 
 7745   ins_pipe(pipe_serial);
 7746 %}
 7747 
 7748 // Load Integer (32 bit unsigned) into long
 7749 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7750 %{
 7751   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7752 
 7753   ins_cost(VOLATILE_REF_COST);
 7754   format %{ "ldarw  $dst, $mem\t# int" %}
 7755 
 7756   ins_encode(aarch64_enc_ldarw(dst, mem));
 7757 
 7758   ins_pipe(pipe_serial);
 7759 %}
 7760 
 7761 // Load Long (64 bit signed)
 7762 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7763 %{
 7764   match(Set dst (LoadL mem));
 7765 
 7766   ins_cost(VOLATILE_REF_COST);
 7767   format %{ "ldar  $dst, $mem\t# int" %}
 7768 
 7769   ins_encode(aarch64_enc_ldar(dst, mem));
 7770 
 7771   ins_pipe(pipe_serial);
 7772 %}
 7773 
 7774 // Load Pointer
 7775 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7776 %{
 7777   match(Set dst (LoadP mem));
 7778   predicate(n->as_Load()->barrier_data() == 0);
 7779 
 7780   ins_cost(VOLATILE_REF_COST);
 7781   format %{ "ldar  $dst, $mem\t# ptr" %}
 7782 
 7783   ins_encode(aarch64_enc_ldar(dst, mem));
 7784 
 7785   ins_pipe(pipe_serial);
 7786 %}
 7787 
 7788 // Load Compressed Pointer
 7789 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7790 %{
 7791   match(Set dst (LoadN mem));
 7792 
 7793   ins_cost(VOLATILE_REF_COST);
 7794   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7795 
 7796   ins_encode(aarch64_enc_ldarw(dst, mem));
 7797 
 7798   ins_pipe(pipe_serial);
 7799 %}
 7800 
 7801 // Load Float
 7802 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7803 %{
 7804   match(Set dst (LoadF mem));
 7805 
 7806   ins_cost(VOLATILE_REF_COST);
 7807   format %{ "ldars  $dst, $mem\t# float" %}
 7808 
 7809   ins_encode( aarch64_enc_fldars(dst, mem) );
 7810 
 7811   ins_pipe(pipe_serial);
 7812 %}
 7813 
 7814 // Load Double
 7815 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7816 %{
 7817   match(Set dst (LoadD mem));
 7818 
 7819   ins_cost(VOLATILE_REF_COST);
 7820   format %{ "ldard  $dst, $mem\t# double" %}
 7821 
 7822   ins_encode( aarch64_enc_fldard(dst, mem) );
 7823 
 7824   ins_pipe(pipe_serial);
 7825 %}
 7826 
 7827 // Store Byte
 7828 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7829 %{
 7830   match(Set mem (StoreB mem src));
 7831 
 7832   ins_cost(VOLATILE_REF_COST);
 7833   format %{ "stlrb  $src, $mem\t# byte" %}
 7834 
 7835   ins_encode(aarch64_enc_stlrb(src, mem));
 7836 
 7837   ins_pipe(pipe_class_memory);
 7838 %}
 7839 
 7840 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7841 %{
 7842   match(Set mem (StoreB mem zero));
 7843 
 7844   ins_cost(VOLATILE_REF_COST);
 7845   format %{ "stlrb  zr, $mem\t# byte" %}
 7846 
 7847   ins_encode(aarch64_enc_stlrb0(mem));
 7848 
 7849   ins_pipe(pipe_class_memory);
 7850 %}
 7851 
 7852 // Store Char/Short
 7853 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7854 %{
 7855   match(Set mem (StoreC mem src));
 7856 
 7857   ins_cost(VOLATILE_REF_COST);
 7858   format %{ "stlrh  $src, $mem\t# short" %}
 7859 
 7860   ins_encode(aarch64_enc_stlrh(src, mem));
 7861 
 7862   ins_pipe(pipe_class_memory);
 7863 %}
 7864 
 7865 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7866 %{
 7867   match(Set mem (StoreC mem zero));
 7868 
 7869   ins_cost(VOLATILE_REF_COST);
 7870   format %{ "stlrh  zr, $mem\t# short" %}
 7871 
 7872   ins_encode(aarch64_enc_stlrh0(mem));
 7873 
 7874   ins_pipe(pipe_class_memory);
 7875 %}
 7876 
 7877 // Store Integer
 7878 
 7879 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7880 %{
 7881   match(Set mem(StoreI mem src));
 7882 
 7883   ins_cost(VOLATILE_REF_COST);
 7884   format %{ "stlrw  $src, $mem\t# int" %}
 7885 
 7886   ins_encode(aarch64_enc_stlrw(src, mem));
 7887 
 7888   ins_pipe(pipe_class_memory);
 7889 %}
 7890 
 7891 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7892 %{
 7893   match(Set mem(StoreI mem zero));
 7894 
 7895   ins_cost(VOLATILE_REF_COST);
 7896   format %{ "stlrw  zr, $mem\t# int" %}
 7897 
 7898   ins_encode(aarch64_enc_stlrw0(mem));
 7899 
 7900   ins_pipe(pipe_class_memory);
 7901 %}
 7902 
 7903 // Store Long (64 bit signed)
 7904 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7905 %{
 7906   match(Set mem (StoreL mem src));
 7907 
 7908   ins_cost(VOLATILE_REF_COST);
 7909   format %{ "stlr  $src, $mem\t# int" %}
 7910 
 7911   ins_encode(aarch64_enc_stlr(src, mem));
 7912 
 7913   ins_pipe(pipe_class_memory);
 7914 %}
 7915 
 7916 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7917 %{
 7918   match(Set mem (StoreL mem zero));
 7919 
 7920   ins_cost(VOLATILE_REF_COST);
 7921   format %{ "stlr  zr, $mem\t# int" %}
 7922 
 7923   ins_encode(aarch64_enc_stlr0(mem));
 7924 
 7925   ins_pipe(pipe_class_memory);
 7926 %}
 7927 
 7928 // Store Pointer
 7929 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7930 %{
 7931   match(Set mem (StoreP mem src));
 7932   predicate(n->as_Store()->barrier_data() == 0);
 7933 
 7934   ins_cost(VOLATILE_REF_COST);
 7935   format %{ "stlr  $src, $mem\t# ptr" %}
 7936 
 7937   ins_encode(aarch64_enc_stlr(src, mem));
 7938 
 7939   ins_pipe(pipe_class_memory);
 7940 %}
 7941 
 7942 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7943 %{
 7944   match(Set mem (StoreP mem zero));
 7945   predicate(n->as_Store()->barrier_data() == 0);
 7946 
 7947   ins_cost(VOLATILE_REF_COST);
 7948   format %{ "stlr  zr, $mem\t# ptr" %}
 7949 
 7950   ins_encode(aarch64_enc_stlr0(mem));
 7951 
 7952   ins_pipe(pipe_class_memory);
 7953 %}
 7954 
 7955 // Store Compressed Pointer
 7956 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7957 %{
 7958   match(Set mem (StoreN mem src));
 7959 
 7960   ins_cost(VOLATILE_REF_COST);
 7961   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7962 
 7963   ins_encode(aarch64_enc_stlrw(src, mem));
 7964 
 7965   ins_pipe(pipe_class_memory);
 7966 %}
 7967 
 7968 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7969 %{
 7970   match(Set mem (StoreN mem zero));
 7971 
 7972   ins_cost(VOLATILE_REF_COST);
 7973   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7974 
 7975   ins_encode(aarch64_enc_stlrw0(mem));
 7976 
 7977   ins_pipe(pipe_class_memory);
 7978 %}
 7979 
 7980 // Store Float
 7981 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7982 %{
 7983   match(Set mem (StoreF mem src));
 7984 
 7985   ins_cost(VOLATILE_REF_COST);
 7986   format %{ "stlrs  $src, $mem\t# float" %}
 7987 
 7988   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7989 
 7990   ins_pipe(pipe_class_memory);
 7991 %}
 7992 
 7993 // TODO
 7994 // implement storeImmF0 and storeFImmPacked
 7995 
 7996 // Store Double
 7997 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 7998 %{
 7999   match(Set mem (StoreD mem src));
 8000 
 8001   ins_cost(VOLATILE_REF_COST);
 8002   format %{ "stlrd  $src, $mem\t# double" %}
 8003 
 8004   ins_encode( aarch64_enc_fstlrd(src, mem) );
 8005 
 8006   ins_pipe(pipe_class_memory);
 8007 %}
 8008 
 8009 //  ---------------- end of volatile loads and stores ----------------
 8010 
 8011 instruct cacheWB(indirect addr)
 8012 %{
 8013   predicate(VM_Version::supports_data_cache_line_flush());
 8014   match(CacheWB addr);
 8015 
 8016   ins_cost(100);
 8017   format %{"cache wb $addr" %}
 8018   ins_encode %{
 8019     assert($addr->index_position() < 0, "should be");
 8020     assert($addr$$disp == 0, "should be");
 8021     __ cache_wb(Address($addr$$base$$Register, 0));
 8022   %}
 8023   ins_pipe(pipe_slow); // XXX
 8024 %}
 8025 
 8026 instruct cacheWBPreSync()
 8027 %{
 8028   predicate(VM_Version::supports_data_cache_line_flush());
 8029   match(CacheWBPreSync);
 8030 
 8031   ins_cost(100);
 8032   format %{"cache wb presync" %}
 8033   ins_encode %{
 8034     __ cache_wbsync(true);
 8035   %}
 8036   ins_pipe(pipe_slow); // XXX
 8037 %}
 8038 
 8039 instruct cacheWBPostSync()
 8040 %{
 8041   predicate(VM_Version::supports_data_cache_line_flush());
 8042   match(CacheWBPostSync);
 8043 
 8044   ins_cost(100);
 8045   format %{"cache wb postsync" %}
 8046   ins_encode %{
 8047     __ cache_wbsync(false);
 8048   %}
 8049   ins_pipe(pipe_slow); // XXX
 8050 %}
 8051 
 8052 // ============================================================================
 8053 // BSWAP Instructions
 8054 
 8055 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 8056   match(Set dst (ReverseBytesI src));
 8057 
 8058   ins_cost(INSN_COST);
 8059   format %{ "revw  $dst, $src" %}
 8060 
 8061   ins_encode %{
 8062     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 8063   %}
 8064 
 8065   ins_pipe(ialu_reg);
 8066 %}
 8067 
 8068 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 8069   match(Set dst (ReverseBytesL src));
 8070 
 8071   ins_cost(INSN_COST);
 8072   format %{ "rev  $dst, $src" %}
 8073 
 8074   ins_encode %{
 8075     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 8076   %}
 8077 
 8078   ins_pipe(ialu_reg);
 8079 %}
 8080 
 8081 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 8082   match(Set dst (ReverseBytesUS src));
 8083 
 8084   ins_cost(INSN_COST);
 8085   format %{ "rev16w  $dst, $src" %}
 8086 
 8087   ins_encode %{
 8088     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8089   %}
 8090 
 8091   ins_pipe(ialu_reg);
 8092 %}
 8093 
 8094 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 8095   match(Set dst (ReverseBytesS src));
 8096 
 8097   ins_cost(INSN_COST);
 8098   format %{ "rev16w  $dst, $src\n\t"
 8099             "sbfmw $dst, $dst, #0, #15" %}
 8100 
 8101   ins_encode %{
 8102     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8103     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 8104   %}
 8105 
 8106   ins_pipe(ialu_reg);
 8107 %}
 8108 
 8109 // ============================================================================
 8110 // Zero Count Instructions
 8111 
 8112 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8113   match(Set dst (CountLeadingZerosI src));
 8114 
 8115   ins_cost(INSN_COST);
 8116   format %{ "clzw  $dst, $src" %}
 8117   ins_encode %{
 8118     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 8119   %}
 8120 
 8121   ins_pipe(ialu_reg);
 8122 %}
 8123 
 8124 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 8125   match(Set dst (CountLeadingZerosL src));
 8126 
 8127   ins_cost(INSN_COST);
 8128   format %{ "clz   $dst, $src" %}
 8129   ins_encode %{
 8130     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 8131   %}
 8132 
 8133   ins_pipe(ialu_reg);
 8134 %}
 8135 
 8136 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8137   match(Set dst (CountTrailingZerosI src));
 8138 
 8139   ins_cost(INSN_COST * 2);
 8140   format %{ "rbitw  $dst, $src\n\t"
 8141             "clzw   $dst, $dst" %}
 8142   ins_encode %{
 8143     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 8144     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 8145   %}
 8146 
 8147   ins_pipe(ialu_reg);
 8148 %}
 8149 
 8150 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 8151   match(Set dst (CountTrailingZerosL src));
 8152 
 8153   ins_cost(INSN_COST * 2);
 8154   format %{ "rbit   $dst, $src\n\t"
 8155             "clz    $dst, $dst" %}
 8156   ins_encode %{
 8157     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 8158     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 8159   %}
 8160 
 8161   ins_pipe(ialu_reg);
 8162 %}
 8163 
 8164 //---------- Population Count Instructions -------------------------------------
 8165 //
 8166 
 8167 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 8168   match(Set dst (PopCountI src));
 8169   effect(TEMP tmp);
 8170   ins_cost(INSN_COST * 13);
 8171 
 8172   format %{ "movw   $src, $src\n\t"
 8173             "mov    $tmp, $src\t# vector (1D)\n\t"
 8174             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8175             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8176             "mov    $dst, $tmp\t# vector (1D)" %}
 8177   ins_encode %{
 8178     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 8179     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 8180     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8181     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8182     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8183   %}
 8184 
 8185   ins_pipe(pipe_class_default);
 8186 %}
 8187 
 8188 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 8189   match(Set dst (PopCountI (LoadI mem)));
 8190   effect(TEMP tmp);
 8191   ins_cost(INSN_COST * 13);
 8192 
 8193   format %{ "ldrs   $tmp, $mem\n\t"
 8194             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8195             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8196             "mov    $dst, $tmp\t# vector (1D)" %}
 8197   ins_encode %{
 8198     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8199     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 8200               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 8201     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8202     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8203     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8204   %}
 8205 
 8206   ins_pipe(pipe_class_default);
 8207 %}
 8208 
 8209 // Note: Long.bitCount(long) returns an int.
 8210 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 8211   match(Set dst (PopCountL src));
 8212   effect(TEMP tmp);
 8213   ins_cost(INSN_COST * 13);
 8214 
 8215   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 8216             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8217             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8218             "mov    $dst, $tmp\t# vector (1D)" %}
 8219   ins_encode %{
 8220     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 8221     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8222     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8223     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8224   %}
 8225 
 8226   ins_pipe(pipe_class_default);
 8227 %}
 8228 
 8229 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 8230   match(Set dst (PopCountL (LoadL mem)));
 8231   effect(TEMP tmp);
 8232   ins_cost(INSN_COST * 13);
 8233 
 8234   format %{ "ldrd   $tmp, $mem\n\t"
 8235             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8236             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8237             "mov    $dst, $tmp\t# vector (1D)" %}
 8238   ins_encode %{
 8239     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8240     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 8241               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 8242     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8243     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8244     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 8245   %}
 8246 
 8247   ins_pipe(pipe_class_default);
 8248 %}
 8249 
 8250 // ============================================================================
 8251 // MemBar Instruction
 8252 
 8253 instruct load_fence() %{
 8254   match(LoadFence);
 8255   ins_cost(VOLATILE_REF_COST);
 8256 
 8257   format %{ "load_fence" %}
 8258 
 8259   ins_encode %{
 8260     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8261   %}
 8262   ins_pipe(pipe_serial);
 8263 %}
 8264 
 8265 instruct unnecessary_membar_acquire() %{
 8266   predicate(unnecessary_acquire(n));
 8267   match(MemBarAcquire);
 8268   ins_cost(0);
 8269 
 8270   format %{ "membar_acquire (elided)" %}
 8271 
 8272   ins_encode %{
 8273     __ block_comment("membar_acquire (elided)");
 8274   %}
 8275 
 8276   ins_pipe(pipe_class_empty);
 8277 %}
 8278 
 8279 instruct membar_acquire() %{
 8280   match(MemBarAcquire);
 8281   ins_cost(VOLATILE_REF_COST);
 8282 
 8283   format %{ "membar_acquire\n\t"
 8284             "dmb ish" %}
 8285 
 8286   ins_encode %{
 8287     __ block_comment("membar_acquire");
 8288     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8289   %}
 8290 
 8291   ins_pipe(pipe_serial);
 8292 %}
 8293 
 8294 
 8295 instruct membar_acquire_lock() %{
 8296   match(MemBarAcquireLock);
 8297   ins_cost(VOLATILE_REF_COST);
 8298 
 8299   format %{ "membar_acquire_lock (elided)" %}
 8300 
 8301   ins_encode %{
 8302     __ block_comment("membar_acquire_lock (elided)");
 8303   %}
 8304 
 8305   ins_pipe(pipe_serial);
 8306 %}
 8307 
 8308 instruct store_fence() %{
 8309   match(StoreFence);
 8310   ins_cost(VOLATILE_REF_COST);
 8311 
 8312   format %{ "store_fence" %}
 8313 
 8314   ins_encode %{
 8315     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8316   %}
 8317   ins_pipe(pipe_serial);
 8318 %}
 8319 
 8320 instruct unnecessary_membar_release() %{
 8321   predicate(unnecessary_release(n));
 8322   match(MemBarRelease);
 8323   ins_cost(0);
 8324 
 8325   format %{ "membar_release (elided)" %}
 8326 
 8327   ins_encode %{
 8328     __ block_comment("membar_release (elided)");
 8329   %}
 8330   ins_pipe(pipe_serial);
 8331 %}
 8332 
 8333 instruct membar_release() %{
 8334   match(MemBarRelease);
 8335   ins_cost(VOLATILE_REF_COST);
 8336 
 8337   format %{ "membar_release\n\t"
 8338             "dmb ish" %}
 8339 
 8340   ins_encode %{
 8341     __ block_comment("membar_release");
 8342     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8343   %}
 8344   ins_pipe(pipe_serial);
 8345 %}
 8346 
 8347 instruct membar_storestore() %{
 8348   match(MemBarStoreStore);
 8349   match(StoreStoreFence);
 8350   ins_cost(VOLATILE_REF_COST);
 8351 
 8352   format %{ "MEMBAR-store-store" %}
 8353 
 8354   ins_encode %{
 8355     __ membar(Assembler::StoreStore);
 8356   %}
 8357   ins_pipe(pipe_serial);
 8358 %}
 8359 
 8360 instruct membar_release_lock() %{
 8361   match(MemBarReleaseLock);
 8362   ins_cost(VOLATILE_REF_COST);
 8363 
 8364   format %{ "membar_release_lock (elided)" %}
 8365 
 8366   ins_encode %{
 8367     __ block_comment("membar_release_lock (elided)");
 8368   %}
 8369 
 8370   ins_pipe(pipe_serial);
 8371 %}
 8372 
 8373 instruct unnecessary_membar_volatile() %{
 8374   predicate(unnecessary_volatile(n));
 8375   match(MemBarVolatile);
 8376   ins_cost(0);
 8377 
 8378   format %{ "membar_volatile (elided)" %}
 8379 
 8380   ins_encode %{
 8381     __ block_comment("membar_volatile (elided)");
 8382   %}
 8383 
 8384   ins_pipe(pipe_serial);
 8385 %}
 8386 
 8387 instruct membar_volatile() %{
 8388   match(MemBarVolatile);
 8389   ins_cost(VOLATILE_REF_COST*100);
 8390 
 8391   format %{ "membar_volatile\n\t"
 8392              "dmb ish"%}
 8393 
 8394   ins_encode %{
 8395     __ block_comment("membar_volatile");
 8396     __ membar(Assembler::StoreLoad);
 8397   %}
 8398 
 8399   ins_pipe(pipe_serial);
 8400 %}
 8401 
 8402 // ============================================================================
 8403 // Cast/Convert Instructions
 8404 
 8405 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8406   match(Set dst (CastX2P src));
 8407 
 8408   ins_cost(INSN_COST);
 8409   format %{ "mov $dst, $src\t# long -> ptr" %}
 8410 
 8411   ins_encode %{
 8412     if ($dst$$reg != $src$$reg) {
 8413       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8414     }
 8415   %}
 8416 
 8417   ins_pipe(ialu_reg);
 8418 %}
 8419 
 8420 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8421   match(Set dst (CastP2X src));
 8422 
 8423   ins_cost(INSN_COST);
 8424   format %{ "mov $dst, $src\t# ptr -> long" %}
 8425 
 8426   ins_encode %{
 8427     if ($dst$$reg != $src$$reg) {
 8428       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8429     }
 8430   %}
 8431 
 8432   ins_pipe(ialu_reg);
 8433 %}
 8434 
 8435 // Convert oop into int for vectors alignment masking
 8436 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8437   match(Set dst (ConvL2I (CastP2X src)));
 8438 
 8439   ins_cost(INSN_COST);
 8440   format %{ "movw $dst, $src\t# ptr -> int" %}
 8441   ins_encode %{
 8442     __ movw($dst$$Register, $src$$Register);
 8443   %}
 8444 
 8445   ins_pipe(ialu_reg);
 8446 %}
 8447 
 8448 // Convert compressed oop into int for vectors alignment masking
 8449 // in case of 32bit oops (heap < 4Gb).
 8450 instruct convN2I(iRegINoSp dst, iRegN src)
 8451 %{
 8452   predicate(CompressedOops::shift() == 0);
 8453   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8454 
 8455   ins_cost(INSN_COST);
 8456   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8457   ins_encode %{
 8458     __ movw($dst$$Register, $src$$Register);
 8459   %}
 8460 
 8461   ins_pipe(ialu_reg);
 8462 %}
 8463 
 8464 
 8465 // Convert oop pointer into compressed form
 8466 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8467   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8468   match(Set dst (EncodeP src));
 8469   effect(KILL cr);
 8470   ins_cost(INSN_COST * 3);
 8471   format %{ "encode_heap_oop $dst, $src" %}
 8472   ins_encode %{
 8473     Register s = $src$$Register;
 8474     Register d = $dst$$Register;
 8475     __ encode_heap_oop(d, s);
 8476   %}
 8477   ins_pipe(ialu_reg);
 8478 %}
 8479 
 8480 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8481   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8482   match(Set dst (EncodeP src));
 8483   ins_cost(INSN_COST * 3);
 8484   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8485   ins_encode %{
 8486     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8487   %}
 8488   ins_pipe(ialu_reg);
 8489 %}
 8490 
 8491 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8492   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8493             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8494   match(Set dst (DecodeN src));
 8495   ins_cost(INSN_COST * 3);
 8496   format %{ "decode_heap_oop $dst, $src" %}
 8497   ins_encode %{
 8498     Register s = $src$$Register;
 8499     Register d = $dst$$Register;
 8500     __ decode_heap_oop(d, s);
 8501   %}
 8502   ins_pipe(ialu_reg);
 8503 %}
 8504 
 8505 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8506   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8507             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8508   match(Set dst (DecodeN src));
 8509   ins_cost(INSN_COST * 3);
 8510   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8511   ins_encode %{
 8512     Register s = $src$$Register;
 8513     Register d = $dst$$Register;
 8514     __ decode_heap_oop_not_null(d, s);
 8515   %}
 8516   ins_pipe(ialu_reg);
 8517 %}
 8518 
 8519 // n.b. AArch64 implementations of encode_klass_not_null and
 8520 // decode_klass_not_null do not modify the flags register so, unlike
 8521 // Intel, we don't kill CR as a side effect here
 8522 
 8523 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8524   match(Set dst (EncodePKlass src));
 8525 
 8526   ins_cost(INSN_COST * 3);
 8527   format %{ "encode_klass_not_null $dst,$src" %}
 8528 
 8529   ins_encode %{
 8530     Register src_reg = as_Register($src$$reg);
 8531     Register dst_reg = as_Register($dst$$reg);
 8532     __ encode_klass_not_null(dst_reg, src_reg);
 8533   %}
 8534 
 8535    ins_pipe(ialu_reg);
 8536 %}
 8537 
 8538 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8539   match(Set dst (DecodeNKlass src));
 8540 
 8541   ins_cost(INSN_COST * 3);
 8542   format %{ "decode_klass_not_null $dst,$src" %}
 8543 
 8544   ins_encode %{
 8545     Register src_reg = as_Register($src$$reg);
 8546     Register dst_reg = as_Register($dst$$reg);
 8547     if (dst_reg != src_reg) {
 8548       __ decode_klass_not_null(dst_reg, src_reg);
 8549     } else {
 8550       __ decode_klass_not_null(dst_reg);
 8551     }
 8552   %}
 8553 
 8554    ins_pipe(ialu_reg);
 8555 %}
 8556 
 8557 instruct checkCastPP(iRegPNoSp dst)
 8558 %{
 8559   match(Set dst (CheckCastPP dst));
 8560 
 8561   size(0);
 8562   format %{ "# checkcastPP of $dst" %}
 8563   ins_encode(/* empty encoding */);
 8564   ins_pipe(pipe_class_empty);
 8565 %}
 8566 
 8567 instruct castPP(iRegPNoSp dst)
 8568 %{
 8569   match(Set dst (CastPP dst));
 8570 
 8571   size(0);
 8572   format %{ "# castPP of $dst" %}
 8573   ins_encode(/* empty encoding */);
 8574   ins_pipe(pipe_class_empty);
 8575 %}
 8576 
 8577 instruct castII(iRegI dst)
 8578 %{
 8579   match(Set dst (CastII dst));
 8580 
 8581   size(0);
 8582   format %{ "# castII of $dst" %}
 8583   ins_encode(/* empty encoding */);
 8584   ins_cost(0);
 8585   ins_pipe(pipe_class_empty);
 8586 %}
 8587 
 8588 instruct castLL(iRegL dst)
 8589 %{
 8590   match(Set dst (CastLL dst));
 8591 
 8592   size(0);
 8593   format %{ "# castLL of $dst" %}
 8594   ins_encode(/* empty encoding */);
 8595   ins_cost(0);
 8596   ins_pipe(pipe_class_empty);
 8597 %}
 8598 
 8599 instruct castFF(vRegF dst)
 8600 %{
 8601   match(Set dst (CastFF dst));
 8602 
 8603   size(0);
 8604   format %{ "# castFF of $dst" %}
 8605   ins_encode(/* empty encoding */);
 8606   ins_cost(0);
 8607   ins_pipe(pipe_class_empty);
 8608 %}
 8609 
 8610 instruct castDD(vRegD dst)
 8611 %{
 8612   match(Set dst (CastDD dst));
 8613 
 8614   size(0);
 8615   format %{ "# castDD of $dst" %}
 8616   ins_encode(/* empty encoding */);
 8617   ins_cost(0);
 8618   ins_pipe(pipe_class_empty);
 8619 %}
 8620 
 8621 instruct castVV(vReg dst)
 8622 %{
 8623   match(Set dst (CastVV dst));
 8624 
 8625   size(0);
 8626   format %{ "# castVV of $dst" %}
 8627   ins_encode(/* empty encoding */);
 8628   ins_cost(0);
 8629   ins_pipe(pipe_class_empty);
 8630 %}
 8631 
 8632 instruct castVVMask(pRegGov dst)
 8633 %{
 8634   match(Set dst (CastVV dst));
 8635 
 8636   size(0);
 8637   format %{ "# castVV of $dst" %}
 8638   ins_encode(/* empty encoding */);
 8639   ins_cost(0);
 8640   ins_pipe(pipe_class_empty);
 8641 %}
 8642 
 8643 // ============================================================================
 8644 // Atomic operation instructions
 8645 //
 8646 
 8647 // standard CompareAndSwapX when we are using barriers
 8648 // these have higher priority than the rules selected by a predicate
 8649 
 8650 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8651 // can't match them
 8652 
 8653 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8654 
 8655   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8656   ins_cost(2 * VOLATILE_REF_COST);
 8657 
 8658   effect(KILL cr);
 8659 
 8660   format %{
 8661     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8662     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8663   %}
 8664 
 8665   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8666             aarch64_enc_cset_eq(res));
 8667 
 8668   ins_pipe(pipe_slow);
 8669 %}
 8670 
 8671 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8672 
 8673   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8674   ins_cost(2 * VOLATILE_REF_COST);
 8675 
 8676   effect(KILL cr);
 8677 
 8678   format %{
 8679     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8680     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8681   %}
 8682 
 8683   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8684             aarch64_enc_cset_eq(res));
 8685 
 8686   ins_pipe(pipe_slow);
 8687 %}
 8688 
 8689 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8690 
 8691   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8692   ins_cost(2 * VOLATILE_REF_COST);
 8693 
 8694   effect(KILL cr);
 8695 
 8696  format %{
 8697     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8698     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8699  %}
 8700 
 8701  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8702             aarch64_enc_cset_eq(res));
 8703 
 8704   ins_pipe(pipe_slow);
 8705 %}
 8706 
 8707 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8708 
 8709   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8710   ins_cost(2 * VOLATILE_REF_COST);
 8711 
 8712   effect(KILL cr);
 8713 
 8714  format %{
 8715     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8716     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8717  %}
 8718 
 8719  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8720             aarch64_enc_cset_eq(res));
 8721 
 8722   ins_pipe(pipe_slow);
 8723 %}
 8724 
 8725 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8726 
 8727   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8728   predicate(n->as_LoadStore()->barrier_data() == 0);
 8729   ins_cost(2 * VOLATILE_REF_COST);
 8730 
 8731   effect(KILL cr);
 8732 
 8733  format %{
 8734     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8735     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8736  %}
 8737 
 8738  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8739             aarch64_enc_cset_eq(res));
 8740 
 8741   ins_pipe(pipe_slow);
 8742 %}
 8743 
 8744 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8745 
 8746   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8747   ins_cost(2 * VOLATILE_REF_COST);
 8748 
 8749   effect(KILL cr);
 8750 
 8751  format %{
 8752     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8753     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8754  %}
 8755 
 8756  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8757             aarch64_enc_cset_eq(res));
 8758 
 8759   ins_pipe(pipe_slow);
 8760 %}
 8761 
 8762 // alternative CompareAndSwapX when we are eliding barriers
 8763 
 8764 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8765 
 8766   predicate(needs_acquiring_load_exclusive(n));
 8767   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8768   ins_cost(VOLATILE_REF_COST);
 8769 
 8770   effect(KILL cr);
 8771 
 8772   format %{
 8773     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8774     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8775   %}
 8776 
 8777   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8778             aarch64_enc_cset_eq(res));
 8779 
 8780   ins_pipe(pipe_slow);
 8781 %}
 8782 
 8783 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8784 
 8785   predicate(needs_acquiring_load_exclusive(n));
 8786   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8787   ins_cost(VOLATILE_REF_COST);
 8788 
 8789   effect(KILL cr);
 8790 
 8791   format %{
 8792     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8793     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8794   %}
 8795 
 8796   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8797             aarch64_enc_cset_eq(res));
 8798 
 8799   ins_pipe(pipe_slow);
 8800 %}
 8801 
 8802 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8803 
 8804   predicate(needs_acquiring_load_exclusive(n));
 8805   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8806   ins_cost(VOLATILE_REF_COST);
 8807 
 8808   effect(KILL cr);
 8809 
 8810  format %{
 8811     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8812     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8813  %}
 8814 
 8815  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8816             aarch64_enc_cset_eq(res));
 8817 
 8818   ins_pipe(pipe_slow);
 8819 %}
 8820 
 8821 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8822 
 8823   predicate(needs_acquiring_load_exclusive(n));
 8824   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8825   ins_cost(VOLATILE_REF_COST);
 8826 
 8827   effect(KILL cr);
 8828 
 8829  format %{
 8830     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8831     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8832  %}
 8833 
 8834  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8835             aarch64_enc_cset_eq(res));
 8836 
 8837   ins_pipe(pipe_slow);
 8838 %}
 8839 
 8840 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8841 
 8842   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8843   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8844   ins_cost(VOLATILE_REF_COST);
 8845 
 8846   effect(KILL cr);
 8847 
 8848  format %{
 8849     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8850     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8851  %}
 8852 
 8853  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8854             aarch64_enc_cset_eq(res));
 8855 
 8856   ins_pipe(pipe_slow);
 8857 %}
 8858 
 8859 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8860 
 8861   predicate(needs_acquiring_load_exclusive(n));
 8862   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8863   ins_cost(VOLATILE_REF_COST);
 8864 
 8865   effect(KILL cr);
 8866 
 8867  format %{
 8868     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8869     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8870  %}
 8871 
 8872  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8873             aarch64_enc_cset_eq(res));
 8874 
 8875   ins_pipe(pipe_slow);
 8876 %}
 8877 
 8878 
 8879 // ---------------------------------------------------------------------
 8880 
 8881 // BEGIN This section of the file is automatically generated. Do not edit --------------
 8882 
 8883 // Sundry CAS operations.  Note that release is always true,
 8884 // regardless of the memory ordering of the CAS.  This is because we
 8885 // need the volatile case to be sequentially consistent but there is
 8886 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 8887 // can't check the type of memory ordering here, so we always emit a
 8888 // STLXR.
 8889 
 8890 // This section is generated from cas.m4
 8891 
 8892 
 8893 // This pattern is generated automatically from cas.m4.
 8894 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8895 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8896   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8897   ins_cost(2 * VOLATILE_REF_COST);
 8898   effect(TEMP_DEF res, KILL cr);
 8899   format %{
 8900     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8901   %}
 8902   ins_encode %{
 8903     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8904                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8905                /*weak*/ false, $res$$Register);
 8906     __ sxtbw($res$$Register, $res$$Register);
 8907   %}
 8908   ins_pipe(pipe_slow);
 8909 %}
 8910 
 8911 // This pattern is generated automatically from cas.m4.
 8912 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8913 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8914   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8915   ins_cost(2 * VOLATILE_REF_COST);
 8916   effect(TEMP_DEF res, KILL cr);
 8917   format %{
 8918     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8919   %}
 8920   ins_encode %{
 8921     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8922                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8923                /*weak*/ false, $res$$Register);
 8924     __ sxthw($res$$Register, $res$$Register);
 8925   %}
 8926   ins_pipe(pipe_slow);
 8927 %}
 8928 
 8929 // This pattern is generated automatically from cas.m4.
 8930 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8931 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8932   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8933   ins_cost(2 * VOLATILE_REF_COST);
 8934   effect(TEMP_DEF res, KILL cr);
 8935   format %{
 8936     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8937   %}
 8938   ins_encode %{
 8939     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8940                Assembler::word, /*acquire*/ false, /*release*/ true,
 8941                /*weak*/ false, $res$$Register);
 8942   %}
 8943   ins_pipe(pipe_slow);
 8944 %}
 8945 
 8946 // This pattern is generated automatically from cas.m4.
 8947 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8948 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8949   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8950   ins_cost(2 * VOLATILE_REF_COST);
 8951   effect(TEMP_DEF res, KILL cr);
 8952   format %{
 8953     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8954   %}
 8955   ins_encode %{
 8956     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8957                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8958                /*weak*/ false, $res$$Register);
 8959   %}
 8960   ins_pipe(pipe_slow);
 8961 %}
 8962 
 8963 // This pattern is generated automatically from cas.m4.
 8964 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8965 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8966   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8967   ins_cost(2 * VOLATILE_REF_COST);
 8968   effect(TEMP_DEF res, KILL cr);
 8969   format %{
 8970     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8971   %}
 8972   ins_encode %{
 8973     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8974                Assembler::word, /*acquire*/ false, /*release*/ true,
 8975                /*weak*/ false, $res$$Register);
 8976   %}
 8977   ins_pipe(pipe_slow);
 8978 %}
 8979 
 8980 // This pattern is generated automatically from cas.m4.
 8981 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8982 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8983   predicate(n->as_LoadStore()->barrier_data() == 0);
 8984   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8985   ins_cost(2 * VOLATILE_REF_COST);
 8986   effect(TEMP_DEF res, KILL cr);
 8987   format %{
 8988     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8989   %}
 8990   ins_encode %{
 8991     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8992                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8993                /*weak*/ false, $res$$Register);
 8994   %}
 8995   ins_pipe(pipe_slow);
 8996 %}
 8997 
 8998 // This pattern is generated automatically from cas.m4.
 8999 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9000 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9001   predicate(needs_acquiring_load_exclusive(n));
 9002   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9003   ins_cost(VOLATILE_REF_COST);
 9004   effect(TEMP_DEF res, KILL cr);
 9005   format %{
 9006     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9007   %}
 9008   ins_encode %{
 9009     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9010                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9011                /*weak*/ false, $res$$Register);
 9012     __ sxtbw($res$$Register, $res$$Register);
 9013   %}
 9014   ins_pipe(pipe_slow);
 9015 %}
 9016 
 9017 // This pattern is generated automatically from cas.m4.
 9018 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9019 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9020   predicate(needs_acquiring_load_exclusive(n));
 9021   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9022   ins_cost(VOLATILE_REF_COST);
 9023   effect(TEMP_DEF res, KILL cr);
 9024   format %{
 9025     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9026   %}
 9027   ins_encode %{
 9028     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9029                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9030                /*weak*/ false, $res$$Register);
 9031     __ sxthw($res$$Register, $res$$Register);
 9032   %}
 9033   ins_pipe(pipe_slow);
 9034 %}
 9035 
 9036 // This pattern is generated automatically from cas.m4.
 9037 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9038 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9039   predicate(needs_acquiring_load_exclusive(n));
 9040   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9041   ins_cost(VOLATILE_REF_COST);
 9042   effect(TEMP_DEF res, KILL cr);
 9043   format %{
 9044     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9045   %}
 9046   ins_encode %{
 9047     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9048                Assembler::word, /*acquire*/ true, /*release*/ true,
 9049                /*weak*/ false, $res$$Register);
 9050   %}
 9051   ins_pipe(pipe_slow);
 9052 %}
 9053 
 9054 // This pattern is generated automatically from cas.m4.
 9055 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9056 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9057   predicate(needs_acquiring_load_exclusive(n));
 9058   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9059   ins_cost(VOLATILE_REF_COST);
 9060   effect(TEMP_DEF res, KILL cr);
 9061   format %{
 9062     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9063   %}
 9064   ins_encode %{
 9065     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9066                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9067                /*weak*/ false, $res$$Register);
 9068   %}
 9069   ins_pipe(pipe_slow);
 9070 %}
 9071 
 9072 // This pattern is generated automatically from cas.m4.
 9073 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9074 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9075   predicate(needs_acquiring_load_exclusive(n));
 9076   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9077   ins_cost(VOLATILE_REF_COST);
 9078   effect(TEMP_DEF res, KILL cr);
 9079   format %{
 9080     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9081   %}
 9082   ins_encode %{
 9083     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9084                Assembler::word, /*acquire*/ true, /*release*/ true,
 9085                /*weak*/ false, $res$$Register);
 9086   %}
 9087   ins_pipe(pipe_slow);
 9088 %}
 9089 
 9090 // This pattern is generated automatically from cas.m4.
 9091 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9092 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9093   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9094   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9095   ins_cost(VOLATILE_REF_COST);
 9096   effect(TEMP_DEF res, KILL cr);
 9097   format %{
 9098     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9099   %}
 9100   ins_encode %{
 9101     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9102                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9103                /*weak*/ false, $res$$Register);
 9104   %}
 9105   ins_pipe(pipe_slow);
 9106 %}
 9107 
 9108 // This pattern is generated automatically from cas.m4.
 9109 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9110 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9111   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9112   ins_cost(2 * VOLATILE_REF_COST);
 9113   effect(KILL cr);
 9114   format %{
 9115     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9116     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9117   %}
 9118   ins_encode %{
 9119     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9120                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9121                /*weak*/ true, noreg);
 9122     __ csetw($res$$Register, Assembler::EQ);
 9123   %}
 9124   ins_pipe(pipe_slow);
 9125 %}
 9126 
 9127 // This pattern is generated automatically from cas.m4.
 9128 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9129 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9130   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9131   ins_cost(2 * VOLATILE_REF_COST);
 9132   effect(KILL cr);
 9133   format %{
 9134     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9135     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9136   %}
 9137   ins_encode %{
 9138     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9139                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9140                /*weak*/ true, noreg);
 9141     __ csetw($res$$Register, Assembler::EQ);
 9142   %}
 9143   ins_pipe(pipe_slow);
 9144 %}
 9145 
 9146 // This pattern is generated automatically from cas.m4.
 9147 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9148 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9149   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9150   ins_cost(2 * VOLATILE_REF_COST);
 9151   effect(KILL cr);
 9152   format %{
 9153     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9154     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9155   %}
 9156   ins_encode %{
 9157     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9158                Assembler::word, /*acquire*/ false, /*release*/ true,
 9159                /*weak*/ true, noreg);
 9160     __ csetw($res$$Register, Assembler::EQ);
 9161   %}
 9162   ins_pipe(pipe_slow);
 9163 %}
 9164 
 9165 // This pattern is generated automatically from cas.m4.
 9166 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9167 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9168   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9169   ins_cost(2 * VOLATILE_REF_COST);
 9170   effect(KILL cr);
 9171   format %{
 9172     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9173     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9174   %}
 9175   ins_encode %{
 9176     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9177                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9178                /*weak*/ true, noreg);
 9179     __ csetw($res$$Register, Assembler::EQ);
 9180   %}
 9181   ins_pipe(pipe_slow);
 9182 %}
 9183 
 9184 // This pattern is generated automatically from cas.m4.
 9185 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9186 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9187   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9188   ins_cost(2 * VOLATILE_REF_COST);
 9189   effect(KILL cr);
 9190   format %{
 9191     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9192     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9193   %}
 9194   ins_encode %{
 9195     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9196                Assembler::word, /*acquire*/ false, /*release*/ true,
 9197                /*weak*/ true, noreg);
 9198     __ csetw($res$$Register, Assembler::EQ);
 9199   %}
 9200   ins_pipe(pipe_slow);
 9201 %}
 9202 
 9203 // This pattern is generated automatically from cas.m4.
 9204 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9205 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9206   predicate(n->as_LoadStore()->barrier_data() == 0);
 9207   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9208   ins_cost(2 * VOLATILE_REF_COST);
 9209   effect(KILL cr);
 9210   format %{
 9211     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9212     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9213   %}
 9214   ins_encode %{
 9215     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9216                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9217                /*weak*/ true, noreg);
 9218     __ csetw($res$$Register, Assembler::EQ);
 9219   %}
 9220   ins_pipe(pipe_slow);
 9221 %}
 9222 
 9223 // This pattern is generated automatically from cas.m4.
 9224 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9225 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9226   predicate(needs_acquiring_load_exclusive(n));
 9227   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9228   ins_cost(VOLATILE_REF_COST);
 9229   effect(KILL cr);
 9230   format %{
 9231     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9232     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9233   %}
 9234   ins_encode %{
 9235     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9236                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9237                /*weak*/ true, noreg);
 9238     __ csetw($res$$Register, Assembler::EQ);
 9239   %}
 9240   ins_pipe(pipe_slow);
 9241 %}
 9242 
 9243 // This pattern is generated automatically from cas.m4.
 9244 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9245 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9246   predicate(needs_acquiring_load_exclusive(n));
 9247   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9248   ins_cost(VOLATILE_REF_COST);
 9249   effect(KILL cr);
 9250   format %{
 9251     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9252     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9253   %}
 9254   ins_encode %{
 9255     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9256                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9257                /*weak*/ true, noreg);
 9258     __ csetw($res$$Register, Assembler::EQ);
 9259   %}
 9260   ins_pipe(pipe_slow);
 9261 %}
 9262 
 9263 // This pattern is generated automatically from cas.m4.
 9264 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9265 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9266   predicate(needs_acquiring_load_exclusive(n));
 9267   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9268   ins_cost(VOLATILE_REF_COST);
 9269   effect(KILL cr);
 9270   format %{
 9271     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9272     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9273   %}
 9274   ins_encode %{
 9275     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9276                Assembler::word, /*acquire*/ true, /*release*/ true,
 9277                /*weak*/ true, noreg);
 9278     __ csetw($res$$Register, Assembler::EQ);
 9279   %}
 9280   ins_pipe(pipe_slow);
 9281 %}
 9282 
 9283 // This pattern is generated automatically from cas.m4.
 9284 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9285 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9286   predicate(needs_acquiring_load_exclusive(n));
 9287   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9288   ins_cost(VOLATILE_REF_COST);
 9289   effect(KILL cr);
 9290   format %{
 9291     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9292     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9293   %}
 9294   ins_encode %{
 9295     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9296                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9297                /*weak*/ true, noreg);
 9298     __ csetw($res$$Register, Assembler::EQ);
 9299   %}
 9300   ins_pipe(pipe_slow);
 9301 %}
 9302 
 9303 // This pattern is generated automatically from cas.m4.
 9304 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9305 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9306   predicate(needs_acquiring_load_exclusive(n));
 9307   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9308   ins_cost(VOLATILE_REF_COST);
 9309   effect(KILL cr);
 9310   format %{
 9311     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9312     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9313   %}
 9314   ins_encode %{
 9315     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9316                Assembler::word, /*acquire*/ true, /*release*/ true,
 9317                /*weak*/ true, noreg);
 9318     __ csetw($res$$Register, Assembler::EQ);
 9319   %}
 9320   ins_pipe(pipe_slow);
 9321 %}
 9322 
 9323 // This pattern is generated automatically from cas.m4.
 9324 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9325 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9326   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9327   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9328   ins_cost(VOLATILE_REF_COST);
 9329   effect(KILL cr);
 9330   format %{
 9331     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9332     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9333   %}
 9334   ins_encode %{
 9335     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9336                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9337                /*weak*/ true, noreg);
 9338     __ csetw($res$$Register, Assembler::EQ);
 9339   %}
 9340   ins_pipe(pipe_slow);
 9341 %}
 9342 
 9343 // END This section of the file is automatically generated. Do not edit --------------
 9344 // ---------------------------------------------------------------------
 9345 
 9346 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9347   match(Set prev (GetAndSetI mem newv));
 9348   ins_cost(2 * VOLATILE_REF_COST);
 9349   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9350   ins_encode %{
 9351     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9352   %}
 9353   ins_pipe(pipe_serial);
 9354 %}
 9355 
 9356 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9357   match(Set prev (GetAndSetL mem newv));
 9358   ins_cost(2 * VOLATILE_REF_COST);
 9359   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9360   ins_encode %{
 9361     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9362   %}
 9363   ins_pipe(pipe_serial);
 9364 %}
 9365 
 9366 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9367   match(Set prev (GetAndSetN mem newv));
 9368   ins_cost(2 * VOLATILE_REF_COST);
 9369   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9370   ins_encode %{
 9371     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9372   %}
 9373   ins_pipe(pipe_serial);
 9374 %}
 9375 
 9376 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9377   predicate(n->as_LoadStore()->barrier_data() == 0);
 9378   match(Set prev (GetAndSetP mem newv));
 9379   ins_cost(2 * VOLATILE_REF_COST);
 9380   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9381   ins_encode %{
 9382     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9383   %}
 9384   ins_pipe(pipe_serial);
 9385 %}
 9386 
 9387 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9388   predicate(needs_acquiring_load_exclusive(n));
 9389   match(Set prev (GetAndSetI mem newv));
 9390   ins_cost(VOLATILE_REF_COST);
 9391   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9392   ins_encode %{
 9393     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9394   %}
 9395   ins_pipe(pipe_serial);
 9396 %}
 9397 
 9398 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9399   predicate(needs_acquiring_load_exclusive(n));
 9400   match(Set prev (GetAndSetL mem newv));
 9401   ins_cost(VOLATILE_REF_COST);
 9402   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9403   ins_encode %{
 9404     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9405   %}
 9406   ins_pipe(pipe_serial);
 9407 %}
 9408 
 9409 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9410   predicate(needs_acquiring_load_exclusive(n));
 9411   match(Set prev (GetAndSetN mem newv));
 9412   ins_cost(VOLATILE_REF_COST);
 9413   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9414   ins_encode %{
 9415     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9416   %}
 9417   ins_pipe(pipe_serial);
 9418 %}
 9419 
 9420 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9421   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9422   match(Set prev (GetAndSetP mem newv));
 9423   ins_cost(VOLATILE_REF_COST);
 9424   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9425   ins_encode %{
 9426     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9427   %}
 9428   ins_pipe(pipe_serial);
 9429 %}
 9430 
 9431 
 9432 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9433   match(Set newval (GetAndAddL mem incr));
 9434   ins_cost(2 * VOLATILE_REF_COST + 1);
 9435   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9436   ins_encode %{
 9437     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9438   %}
 9439   ins_pipe(pipe_serial);
 9440 %}
 9441 
 9442 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9443   predicate(n->as_LoadStore()->result_not_used());
 9444   match(Set dummy (GetAndAddL mem incr));
 9445   ins_cost(2 * VOLATILE_REF_COST);
 9446   format %{ "get_and_addL [$mem], $incr" %}
 9447   ins_encode %{
 9448     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9449   %}
 9450   ins_pipe(pipe_serial);
 9451 %}
 9452 
 9453 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9454   match(Set newval (GetAndAddL mem incr));
 9455   ins_cost(2 * VOLATILE_REF_COST + 1);
 9456   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9457   ins_encode %{
 9458     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9459   %}
 9460   ins_pipe(pipe_serial);
 9461 %}
 9462 
 9463 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9464   predicate(n->as_LoadStore()->result_not_used());
 9465   match(Set dummy (GetAndAddL mem incr));
 9466   ins_cost(2 * VOLATILE_REF_COST);
 9467   format %{ "get_and_addL [$mem], $incr" %}
 9468   ins_encode %{
 9469     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9470   %}
 9471   ins_pipe(pipe_serial);
 9472 %}
 9473 
 9474 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9475   match(Set newval (GetAndAddI mem incr));
 9476   ins_cost(2 * VOLATILE_REF_COST + 1);
 9477   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9478   ins_encode %{
 9479     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9480   %}
 9481   ins_pipe(pipe_serial);
 9482 %}
 9483 
 9484 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9485   predicate(n->as_LoadStore()->result_not_used());
 9486   match(Set dummy (GetAndAddI mem incr));
 9487   ins_cost(2 * VOLATILE_REF_COST);
 9488   format %{ "get_and_addI [$mem], $incr" %}
 9489   ins_encode %{
 9490     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9491   %}
 9492   ins_pipe(pipe_serial);
 9493 %}
 9494 
 9495 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9496   match(Set newval (GetAndAddI mem incr));
 9497   ins_cost(2 * VOLATILE_REF_COST + 1);
 9498   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9499   ins_encode %{
 9500     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9501   %}
 9502   ins_pipe(pipe_serial);
 9503 %}
 9504 
 9505 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9506   predicate(n->as_LoadStore()->result_not_used());
 9507   match(Set dummy (GetAndAddI mem incr));
 9508   ins_cost(2 * VOLATILE_REF_COST);
 9509   format %{ "get_and_addI [$mem], $incr" %}
 9510   ins_encode %{
 9511     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9512   %}
 9513   ins_pipe(pipe_serial);
 9514 %}
 9515 
 9516 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9517   predicate(needs_acquiring_load_exclusive(n));
 9518   match(Set newval (GetAndAddL mem incr));
 9519   ins_cost(VOLATILE_REF_COST + 1);
 9520   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9521   ins_encode %{
 9522     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9523   %}
 9524   ins_pipe(pipe_serial);
 9525 %}
 9526 
 9527 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9528   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9529   match(Set dummy (GetAndAddL mem incr));
 9530   ins_cost(VOLATILE_REF_COST);
 9531   format %{ "get_and_addL_acq [$mem], $incr" %}
 9532   ins_encode %{
 9533     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9534   %}
 9535   ins_pipe(pipe_serial);
 9536 %}
 9537 
 9538 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9539   predicate(needs_acquiring_load_exclusive(n));
 9540   match(Set newval (GetAndAddL mem incr));
 9541   ins_cost(VOLATILE_REF_COST + 1);
 9542   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9543   ins_encode %{
 9544     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9545   %}
 9546   ins_pipe(pipe_serial);
 9547 %}
 9548 
 9549 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9550   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9551   match(Set dummy (GetAndAddL mem incr));
 9552   ins_cost(VOLATILE_REF_COST);
 9553   format %{ "get_and_addL_acq [$mem], $incr" %}
 9554   ins_encode %{
 9555     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9556   %}
 9557   ins_pipe(pipe_serial);
 9558 %}
 9559 
 9560 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9561   predicate(needs_acquiring_load_exclusive(n));
 9562   match(Set newval (GetAndAddI mem incr));
 9563   ins_cost(VOLATILE_REF_COST + 1);
 9564   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9565   ins_encode %{
 9566     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9567   %}
 9568   ins_pipe(pipe_serial);
 9569 %}
 9570 
 9571 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9572   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9573   match(Set dummy (GetAndAddI mem incr));
 9574   ins_cost(VOLATILE_REF_COST);
 9575   format %{ "get_and_addI_acq [$mem], $incr" %}
 9576   ins_encode %{
 9577     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9578   %}
 9579   ins_pipe(pipe_serial);
 9580 %}
 9581 
 9582 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9583   predicate(needs_acquiring_load_exclusive(n));
 9584   match(Set newval (GetAndAddI mem incr));
 9585   ins_cost(VOLATILE_REF_COST + 1);
 9586   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9587   ins_encode %{
 9588     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9589   %}
 9590   ins_pipe(pipe_serial);
 9591 %}
 9592 
 9593 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9594   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9595   match(Set dummy (GetAndAddI mem incr));
 9596   ins_cost(VOLATILE_REF_COST);
 9597   format %{ "get_and_addI_acq [$mem], $incr" %}
 9598   ins_encode %{
 9599     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9600   %}
 9601   ins_pipe(pipe_serial);
 9602 %}
 9603 
 9604 // Manifest a CmpU result in an integer register.
 9605 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9606 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 9607 %{
 9608   match(Set dst (CmpU3 src1 src2));
 9609   effect(KILL flags);
 9610 
 9611   ins_cost(INSN_COST * 3);
 9612   format %{
 9613       "cmpw $src1, $src2\n\t"
 9614       "csetw $dst, ne\n\t"
 9615       "cnegw $dst, lo\t# CmpU3(reg)"
 9616   %}
 9617   ins_encode %{
 9618     __ cmpw($src1$$Register, $src2$$Register);
 9619     __ csetw($dst$$Register, Assembler::NE);
 9620     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9621   %}
 9622 
 9623   ins_pipe(pipe_class_default);
 9624 %}
 9625 
 9626 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 9627 %{
 9628   match(Set dst (CmpU3 src1 src2));
 9629   effect(KILL flags);
 9630 
 9631   ins_cost(INSN_COST * 3);
 9632   format %{
 9633       "subsw zr, $src1, $src2\n\t"
 9634       "csetw $dst, ne\n\t"
 9635       "cnegw $dst, lo\t# CmpU3(imm)"
 9636   %}
 9637   ins_encode %{
 9638     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 9639     __ csetw($dst$$Register, Assembler::NE);
 9640     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9641   %}
 9642 
 9643   ins_pipe(pipe_class_default);
 9644 %}
 9645 
 9646 // Manifest a CmpUL result in an integer register.
 9647 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9648 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9649 %{
 9650   match(Set dst (CmpUL3 src1 src2));
 9651   effect(KILL flags);
 9652 
 9653   ins_cost(INSN_COST * 3);
 9654   format %{
 9655       "cmp $src1, $src2\n\t"
 9656       "csetw $dst, ne\n\t"
 9657       "cnegw $dst, lo\t# CmpUL3(reg)"
 9658   %}
 9659   ins_encode %{
 9660     __ cmp($src1$$Register, $src2$$Register);
 9661     __ csetw($dst$$Register, Assembler::NE);
 9662     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9663   %}
 9664 
 9665   ins_pipe(pipe_class_default);
 9666 %}
 9667 
 9668 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9669 %{
 9670   match(Set dst (CmpUL3 src1 src2));
 9671   effect(KILL flags);
 9672 
 9673   ins_cost(INSN_COST * 3);
 9674   format %{
 9675       "subs zr, $src1, $src2\n\t"
 9676       "csetw $dst, ne\n\t"
 9677       "cnegw $dst, lo\t# CmpUL3(imm)"
 9678   %}
 9679   ins_encode %{
 9680     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9681     __ csetw($dst$$Register, Assembler::NE);
 9682     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9683   %}
 9684 
 9685   ins_pipe(pipe_class_default);
 9686 %}
 9687 
 9688 // Manifest a CmpL result in an integer register.
 9689 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9690 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9691 %{
 9692   match(Set dst (CmpL3 src1 src2));
 9693   effect(KILL flags);
 9694 
 9695   ins_cost(INSN_COST * 3);
 9696   format %{
 9697       "cmp $src1, $src2\n\t"
 9698       "csetw $dst, ne\n\t"
 9699       "cnegw $dst, lt\t# CmpL3(reg)"
 9700   %}
 9701   ins_encode %{
 9702     __ cmp($src1$$Register, $src2$$Register);
 9703     __ csetw($dst$$Register, Assembler::NE);
 9704     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9705   %}
 9706 
 9707   ins_pipe(pipe_class_default);
 9708 %}
 9709 
 9710 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9711 %{
 9712   match(Set dst (CmpL3 src1 src2));
 9713   effect(KILL flags);
 9714 
 9715   ins_cost(INSN_COST * 3);
 9716   format %{
 9717       "subs zr, $src1, $src2\n\t"
 9718       "csetw $dst, ne\n\t"
 9719       "cnegw $dst, lt\t# CmpL3(imm)"
 9720   %}
 9721   ins_encode %{
 9722     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9723     __ csetw($dst$$Register, Assembler::NE);
 9724     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9725   %}
 9726 
 9727   ins_pipe(pipe_class_default);
 9728 %}
 9729 
 9730 // ============================================================================
 9731 // Conditional Move Instructions
 9732 
 9733 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9734 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9735 // define an op class which merged both inputs and use it to type the
 9736 // argument to a single rule. unfortunatelyt his fails because the
 9737 // opclass does not live up to the COND_INTER interface of its
 9738 // component operands. When the generic code tries to negate the
 9739 // operand it ends up running the generci Machoper::negate method
 9740 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9741 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9742 
 9743 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9744   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9745 
 9746   ins_cost(INSN_COST * 2);
 9747   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9748 
 9749   ins_encode %{
 9750     __ cselw(as_Register($dst$$reg),
 9751              as_Register($src2$$reg),
 9752              as_Register($src1$$reg),
 9753              (Assembler::Condition)$cmp$$cmpcode);
 9754   %}
 9755 
 9756   ins_pipe(icond_reg_reg);
 9757 %}
 9758 
 9759 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9760   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9761 
 9762   ins_cost(INSN_COST * 2);
 9763   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9764 
 9765   ins_encode %{
 9766     __ cselw(as_Register($dst$$reg),
 9767              as_Register($src2$$reg),
 9768              as_Register($src1$$reg),
 9769              (Assembler::Condition)$cmp$$cmpcode);
 9770   %}
 9771 
 9772   ins_pipe(icond_reg_reg);
 9773 %}
 9774 
 9775 // special cases where one arg is zero
 9776 
 9777 // n.b. this is selected in preference to the rule above because it
 9778 // avoids loading constant 0 into a source register
 9779 
 9780 // TODO
 9781 // we ought only to be able to cull one of these variants as the ideal
 9782 // transforms ought always to order the zero consistently (to left/right?)
 9783 
 9784 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9785   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9786 
 9787   ins_cost(INSN_COST * 2);
 9788   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9789 
 9790   ins_encode %{
 9791     __ cselw(as_Register($dst$$reg),
 9792              as_Register($src$$reg),
 9793              zr,
 9794              (Assembler::Condition)$cmp$$cmpcode);
 9795   %}
 9796 
 9797   ins_pipe(icond_reg);
 9798 %}
 9799 
 9800 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9801   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9802 
 9803   ins_cost(INSN_COST * 2);
 9804   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9805 
 9806   ins_encode %{
 9807     __ cselw(as_Register($dst$$reg),
 9808              as_Register($src$$reg),
 9809              zr,
 9810              (Assembler::Condition)$cmp$$cmpcode);
 9811   %}
 9812 
 9813   ins_pipe(icond_reg);
 9814 %}
 9815 
 9816 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9817   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9818 
 9819   ins_cost(INSN_COST * 2);
 9820   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9821 
 9822   ins_encode %{
 9823     __ cselw(as_Register($dst$$reg),
 9824              zr,
 9825              as_Register($src$$reg),
 9826              (Assembler::Condition)$cmp$$cmpcode);
 9827   %}
 9828 
 9829   ins_pipe(icond_reg);
 9830 %}
 9831 
 9832 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9833   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9834 
 9835   ins_cost(INSN_COST * 2);
 9836   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9837 
 9838   ins_encode %{
 9839     __ cselw(as_Register($dst$$reg),
 9840              zr,
 9841              as_Register($src$$reg),
 9842              (Assembler::Condition)$cmp$$cmpcode);
 9843   %}
 9844 
 9845   ins_pipe(icond_reg);
 9846 %}
 9847 
 9848 // special case for creating a boolean 0 or 1
 9849 
 9850 // n.b. this is selected in preference to the rule above because it
 9851 // avoids loading constants 0 and 1 into a source register
 9852 
 9853 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9854   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9855 
 9856   ins_cost(INSN_COST * 2);
 9857   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9858 
 9859   ins_encode %{
 9860     // equivalently
 9861     // cset(as_Register($dst$$reg),
 9862     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9863     __ csincw(as_Register($dst$$reg),
 9864              zr,
 9865              zr,
 9866              (Assembler::Condition)$cmp$$cmpcode);
 9867   %}
 9868 
 9869   ins_pipe(icond_none);
 9870 %}
 9871 
 9872 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9873   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9874 
 9875   ins_cost(INSN_COST * 2);
 9876   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9877 
 9878   ins_encode %{
 9879     // equivalently
 9880     // cset(as_Register($dst$$reg),
 9881     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9882     __ csincw(as_Register($dst$$reg),
 9883              zr,
 9884              zr,
 9885              (Assembler::Condition)$cmp$$cmpcode);
 9886   %}
 9887 
 9888   ins_pipe(icond_none);
 9889 %}
 9890 
 9891 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9892   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9893 
 9894   ins_cost(INSN_COST * 2);
 9895   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9896 
 9897   ins_encode %{
 9898     __ csel(as_Register($dst$$reg),
 9899             as_Register($src2$$reg),
 9900             as_Register($src1$$reg),
 9901             (Assembler::Condition)$cmp$$cmpcode);
 9902   %}
 9903 
 9904   ins_pipe(icond_reg_reg);
 9905 %}
 9906 
 9907 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9908   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9909 
 9910   ins_cost(INSN_COST * 2);
 9911   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9912 
 9913   ins_encode %{
 9914     __ csel(as_Register($dst$$reg),
 9915             as_Register($src2$$reg),
 9916             as_Register($src1$$reg),
 9917             (Assembler::Condition)$cmp$$cmpcode);
 9918   %}
 9919 
 9920   ins_pipe(icond_reg_reg);
 9921 %}
 9922 
 9923 // special cases where one arg is zero
 9924 
 9925 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9926   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9927 
 9928   ins_cost(INSN_COST * 2);
 9929   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 9930 
 9931   ins_encode %{
 9932     __ csel(as_Register($dst$$reg),
 9933             zr,
 9934             as_Register($src$$reg),
 9935             (Assembler::Condition)$cmp$$cmpcode);
 9936   %}
 9937 
 9938   ins_pipe(icond_reg);
 9939 %}
 9940 
 9941 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9942   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9943 
 9944   ins_cost(INSN_COST * 2);
 9945   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 9946 
 9947   ins_encode %{
 9948     __ csel(as_Register($dst$$reg),
 9949             zr,
 9950             as_Register($src$$reg),
 9951             (Assembler::Condition)$cmp$$cmpcode);
 9952   %}
 9953 
 9954   ins_pipe(icond_reg);
 9955 %}
 9956 
 9957 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9958   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9959 
 9960   ins_cost(INSN_COST * 2);
 9961   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 9962 
 9963   ins_encode %{
 9964     __ csel(as_Register($dst$$reg),
 9965             as_Register($src$$reg),
 9966             zr,
 9967             (Assembler::Condition)$cmp$$cmpcode);
 9968   %}
 9969 
 9970   ins_pipe(icond_reg);
 9971 %}
 9972 
 9973 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9974   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9975 
 9976   ins_cost(INSN_COST * 2);
 9977   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 9978 
 9979   ins_encode %{
 9980     __ csel(as_Register($dst$$reg),
 9981             as_Register($src$$reg),
 9982             zr,
 9983             (Assembler::Condition)$cmp$$cmpcode);
 9984   %}
 9985 
 9986   ins_pipe(icond_reg);
 9987 %}
 9988 
 9989 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9990   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9991 
 9992   ins_cost(INSN_COST * 2);
 9993   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
 9994 
 9995   ins_encode %{
 9996     __ csel(as_Register($dst$$reg),
 9997             as_Register($src2$$reg),
 9998             as_Register($src1$$reg),
 9999             (Assembler::Condition)$cmp$$cmpcode);
10000   %}
10001 
10002   ins_pipe(icond_reg_reg);
10003 %}
10004 
10005 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10006   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10007 
10008   ins_cost(INSN_COST * 2);
10009   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10010 
10011   ins_encode %{
10012     __ csel(as_Register($dst$$reg),
10013             as_Register($src2$$reg),
10014             as_Register($src1$$reg),
10015             (Assembler::Condition)$cmp$$cmpcode);
10016   %}
10017 
10018   ins_pipe(icond_reg_reg);
10019 %}
10020 
10021 // special cases where one arg is zero
10022 
10023 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10024   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10025 
10026   ins_cost(INSN_COST * 2);
10027   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10028 
10029   ins_encode %{
10030     __ csel(as_Register($dst$$reg),
10031             zr,
10032             as_Register($src$$reg),
10033             (Assembler::Condition)$cmp$$cmpcode);
10034   %}
10035 
10036   ins_pipe(icond_reg);
10037 %}
10038 
10039 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10040   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10041 
10042   ins_cost(INSN_COST * 2);
10043   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10044 
10045   ins_encode %{
10046     __ csel(as_Register($dst$$reg),
10047             zr,
10048             as_Register($src$$reg),
10049             (Assembler::Condition)$cmp$$cmpcode);
10050   %}
10051 
10052   ins_pipe(icond_reg);
10053 %}
10054 
10055 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10056   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10057 
10058   ins_cost(INSN_COST * 2);
10059   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10060 
10061   ins_encode %{
10062     __ csel(as_Register($dst$$reg),
10063             as_Register($src$$reg),
10064             zr,
10065             (Assembler::Condition)$cmp$$cmpcode);
10066   %}
10067 
10068   ins_pipe(icond_reg);
10069 %}
10070 
10071 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10072   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10073 
10074   ins_cost(INSN_COST * 2);
10075   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10076 
10077   ins_encode %{
10078     __ csel(as_Register($dst$$reg),
10079             as_Register($src$$reg),
10080             zr,
10081             (Assembler::Condition)$cmp$$cmpcode);
10082   %}
10083 
10084   ins_pipe(icond_reg);
10085 %}
10086 
10087 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10088   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10089 
10090   ins_cost(INSN_COST * 2);
10091   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10092 
10093   ins_encode %{
10094     __ cselw(as_Register($dst$$reg),
10095              as_Register($src2$$reg),
10096              as_Register($src1$$reg),
10097              (Assembler::Condition)$cmp$$cmpcode);
10098   %}
10099 
10100   ins_pipe(icond_reg_reg);
10101 %}
10102 
10103 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10104   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10105 
10106   ins_cost(INSN_COST * 2);
10107   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10108 
10109   ins_encode %{
10110     __ cselw(as_Register($dst$$reg),
10111              as_Register($src2$$reg),
10112              as_Register($src1$$reg),
10113              (Assembler::Condition)$cmp$$cmpcode);
10114   %}
10115 
10116   ins_pipe(icond_reg_reg);
10117 %}
10118 
10119 // special cases where one arg is zero
10120 
10121 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10122   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10123 
10124   ins_cost(INSN_COST * 2);
10125   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10126 
10127   ins_encode %{
10128     __ cselw(as_Register($dst$$reg),
10129              zr,
10130              as_Register($src$$reg),
10131              (Assembler::Condition)$cmp$$cmpcode);
10132   %}
10133 
10134   ins_pipe(icond_reg);
10135 %}
10136 
10137 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10138   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10139 
10140   ins_cost(INSN_COST * 2);
10141   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10142 
10143   ins_encode %{
10144     __ cselw(as_Register($dst$$reg),
10145              zr,
10146              as_Register($src$$reg),
10147              (Assembler::Condition)$cmp$$cmpcode);
10148   %}
10149 
10150   ins_pipe(icond_reg);
10151 %}
10152 
10153 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10154   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10155 
10156   ins_cost(INSN_COST * 2);
10157   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10158 
10159   ins_encode %{
10160     __ cselw(as_Register($dst$$reg),
10161              as_Register($src$$reg),
10162              zr,
10163              (Assembler::Condition)$cmp$$cmpcode);
10164   %}
10165 
10166   ins_pipe(icond_reg);
10167 %}
10168 
10169 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10170   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10171 
10172   ins_cost(INSN_COST * 2);
10173   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10174 
10175   ins_encode %{
10176     __ cselw(as_Register($dst$$reg),
10177              as_Register($src$$reg),
10178              zr,
10179              (Assembler::Condition)$cmp$$cmpcode);
10180   %}
10181 
10182   ins_pipe(icond_reg);
10183 %}
10184 
10185 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10186 %{
10187   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10188 
10189   ins_cost(INSN_COST * 3);
10190 
10191   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10192   ins_encode %{
10193     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10194     __ fcsels(as_FloatRegister($dst$$reg),
10195               as_FloatRegister($src2$$reg),
10196               as_FloatRegister($src1$$reg),
10197               cond);
10198   %}
10199 
10200   ins_pipe(fp_cond_reg_reg_s);
10201 %}
10202 
10203 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10204 %{
10205   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10206 
10207   ins_cost(INSN_COST * 3);
10208 
10209   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10210   ins_encode %{
10211     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10212     __ fcsels(as_FloatRegister($dst$$reg),
10213               as_FloatRegister($src2$$reg),
10214               as_FloatRegister($src1$$reg),
10215               cond);
10216   %}
10217 
10218   ins_pipe(fp_cond_reg_reg_s);
10219 %}
10220 
10221 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10222 %{
10223   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10224 
10225   ins_cost(INSN_COST * 3);
10226 
10227   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10228   ins_encode %{
10229     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10230     __ fcseld(as_FloatRegister($dst$$reg),
10231               as_FloatRegister($src2$$reg),
10232               as_FloatRegister($src1$$reg),
10233               cond);
10234   %}
10235 
10236   ins_pipe(fp_cond_reg_reg_d);
10237 %}
10238 
10239 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10240 %{
10241   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10242 
10243   ins_cost(INSN_COST * 3);
10244 
10245   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10246   ins_encode %{
10247     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10248     __ fcseld(as_FloatRegister($dst$$reg),
10249               as_FloatRegister($src2$$reg),
10250               as_FloatRegister($src1$$reg),
10251               cond);
10252   %}
10253 
10254   ins_pipe(fp_cond_reg_reg_d);
10255 %}
10256 
10257 // ============================================================================
10258 // Arithmetic Instructions
10259 //
10260 
10261 // Integer Addition
10262 
10263 // TODO
10264 // these currently employ operations which do not set CR and hence are
10265 // not flagged as killing CR but we would like to isolate the cases
10266 // where we want to set flags from those where we don't. need to work
10267 // out how to do that.
10268 
10269 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10270   match(Set dst (AddI src1 src2));
10271 
10272   ins_cost(INSN_COST);
10273   format %{ "addw  $dst, $src1, $src2" %}
10274 
10275   ins_encode %{
10276     __ addw(as_Register($dst$$reg),
10277             as_Register($src1$$reg),
10278             as_Register($src2$$reg));
10279   %}
10280 
10281   ins_pipe(ialu_reg_reg);
10282 %}
10283 
10284 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10285   match(Set dst (AddI src1 src2));
10286 
10287   ins_cost(INSN_COST);
10288   format %{ "addw $dst, $src1, $src2" %}
10289 
10290   // use opcode to indicate that this is an add not a sub
10291   opcode(0x0);
10292 
10293   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10294 
10295   ins_pipe(ialu_reg_imm);
10296 %}
10297 
10298 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10299   match(Set dst (AddI (ConvL2I src1) src2));
10300 
10301   ins_cost(INSN_COST);
10302   format %{ "addw $dst, $src1, $src2" %}
10303 
10304   // use opcode to indicate that this is an add not a sub
10305   opcode(0x0);
10306 
10307   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10308 
10309   ins_pipe(ialu_reg_imm);
10310 %}
10311 
10312 // Pointer Addition
10313 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10314   match(Set dst (AddP src1 src2));
10315 
10316   ins_cost(INSN_COST);
10317   format %{ "add $dst, $src1, $src2\t# ptr" %}
10318 
10319   ins_encode %{
10320     __ add(as_Register($dst$$reg),
10321            as_Register($src1$$reg),
10322            as_Register($src2$$reg));
10323   %}
10324 
10325   ins_pipe(ialu_reg_reg);
10326 %}
10327 
10328 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10329   match(Set dst (AddP src1 (ConvI2L src2)));
10330 
10331   ins_cost(1.9 * INSN_COST);
10332   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10333 
10334   ins_encode %{
10335     __ add(as_Register($dst$$reg),
10336            as_Register($src1$$reg),
10337            as_Register($src2$$reg), ext::sxtw);
10338   %}
10339 
10340   ins_pipe(ialu_reg_reg);
10341 %}
10342 
10343 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10344   match(Set dst (AddP src1 (LShiftL src2 scale)));
10345 
10346   ins_cost(1.9 * INSN_COST);
10347   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10348 
10349   ins_encode %{
10350     __ lea(as_Register($dst$$reg),
10351            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10352                    Address::lsl($scale$$constant)));
10353   %}
10354 
10355   ins_pipe(ialu_reg_reg_shift);
10356 %}
10357 
10358 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10359   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10360 
10361   ins_cost(1.9 * INSN_COST);
10362   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10363 
10364   ins_encode %{
10365     __ lea(as_Register($dst$$reg),
10366            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10367                    Address::sxtw($scale$$constant)));
10368   %}
10369 
10370   ins_pipe(ialu_reg_reg_shift);
10371 %}
10372 
10373 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10374   match(Set dst (LShiftL (ConvI2L src) scale));
10375 
10376   ins_cost(INSN_COST);
10377   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10378 
10379   ins_encode %{
10380     __ sbfiz(as_Register($dst$$reg),
10381           as_Register($src$$reg),
10382           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10383   %}
10384 
10385   ins_pipe(ialu_reg_shift);
10386 %}
10387 
10388 // Pointer Immediate Addition
10389 // n.b. this needs to be more expensive than using an indirect memory
10390 // operand
10391 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10392   match(Set dst (AddP src1 src2));
10393 
10394   ins_cost(INSN_COST);
10395   format %{ "add $dst, $src1, $src2\t# ptr" %}
10396 
10397   // use opcode to indicate that this is an add not a sub
10398   opcode(0x0);
10399 
10400   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10401 
10402   ins_pipe(ialu_reg_imm);
10403 %}
10404 
10405 // Long Addition
10406 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10407 
10408   match(Set dst (AddL src1 src2));
10409 
10410   ins_cost(INSN_COST);
10411   format %{ "add  $dst, $src1, $src2" %}
10412 
10413   ins_encode %{
10414     __ add(as_Register($dst$$reg),
10415            as_Register($src1$$reg),
10416            as_Register($src2$$reg));
10417   %}
10418 
10419   ins_pipe(ialu_reg_reg);
10420 %}
10421 
10422 // No constant pool entries requiredLong Immediate Addition.
10423 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10424   match(Set dst (AddL src1 src2));
10425 
10426   ins_cost(INSN_COST);
10427   format %{ "add $dst, $src1, $src2" %}
10428 
10429   // use opcode to indicate that this is an add not a sub
10430   opcode(0x0);
10431 
10432   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10433 
10434   ins_pipe(ialu_reg_imm);
10435 %}
10436 
10437 // Integer Subtraction
10438 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10439   match(Set dst (SubI src1 src2));
10440 
10441   ins_cost(INSN_COST);
10442   format %{ "subw  $dst, $src1, $src2" %}
10443 
10444   ins_encode %{
10445     __ subw(as_Register($dst$$reg),
10446             as_Register($src1$$reg),
10447             as_Register($src2$$reg));
10448   %}
10449 
10450   ins_pipe(ialu_reg_reg);
10451 %}
10452 
10453 // Immediate Subtraction
10454 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10455   match(Set dst (SubI src1 src2));
10456 
10457   ins_cost(INSN_COST);
10458   format %{ "subw $dst, $src1, $src2" %}
10459 
10460   // use opcode to indicate that this is a sub not an add
10461   opcode(0x1);
10462 
10463   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10464 
10465   ins_pipe(ialu_reg_imm);
10466 %}
10467 
10468 // Long Subtraction
10469 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10470 
10471   match(Set dst (SubL src1 src2));
10472 
10473   ins_cost(INSN_COST);
10474   format %{ "sub  $dst, $src1, $src2" %}
10475 
10476   ins_encode %{
10477     __ sub(as_Register($dst$$reg),
10478            as_Register($src1$$reg),
10479            as_Register($src2$$reg));
10480   %}
10481 
10482   ins_pipe(ialu_reg_reg);
10483 %}
10484 
10485 // No constant pool entries requiredLong Immediate Subtraction.
10486 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10487   match(Set dst (SubL src1 src2));
10488 
10489   ins_cost(INSN_COST);
10490   format %{ "sub$dst, $src1, $src2" %}
10491 
10492   // use opcode to indicate that this is a sub not an add
10493   opcode(0x1);
10494 
10495   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10496 
10497   ins_pipe(ialu_reg_imm);
10498 %}
10499 
10500 // Integer Negation (special case for sub)
10501 
10502 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10503   match(Set dst (SubI zero src));
10504 
10505   ins_cost(INSN_COST);
10506   format %{ "negw $dst, $src\t# int" %}
10507 
10508   ins_encode %{
10509     __ negw(as_Register($dst$$reg),
10510             as_Register($src$$reg));
10511   %}
10512 
10513   ins_pipe(ialu_reg);
10514 %}
10515 
10516 // Long Negation
10517 
10518 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10519   match(Set dst (SubL zero src));
10520 
10521   ins_cost(INSN_COST);
10522   format %{ "neg $dst, $src\t# long" %}
10523 
10524   ins_encode %{
10525     __ neg(as_Register($dst$$reg),
10526            as_Register($src$$reg));
10527   %}
10528 
10529   ins_pipe(ialu_reg);
10530 %}
10531 
10532 // Integer Multiply
10533 
10534 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10535   match(Set dst (MulI src1 src2));
10536 
10537   ins_cost(INSN_COST * 3);
10538   format %{ "mulw  $dst, $src1, $src2" %}
10539 
10540   ins_encode %{
10541     __ mulw(as_Register($dst$$reg),
10542             as_Register($src1$$reg),
10543             as_Register($src2$$reg));
10544   %}
10545 
10546   ins_pipe(imul_reg_reg);
10547 %}
10548 
10549 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10550   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10551 
10552   ins_cost(INSN_COST * 3);
10553   format %{ "smull  $dst, $src1, $src2" %}
10554 
10555   ins_encode %{
10556     __ smull(as_Register($dst$$reg),
10557              as_Register($src1$$reg),
10558              as_Register($src2$$reg));
10559   %}
10560 
10561   ins_pipe(imul_reg_reg);
10562 %}
10563 
10564 // Long Multiply
10565 
10566 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10567   match(Set dst (MulL src1 src2));
10568 
10569   ins_cost(INSN_COST * 5);
10570   format %{ "mul  $dst, $src1, $src2" %}
10571 
10572   ins_encode %{
10573     __ mul(as_Register($dst$$reg),
10574            as_Register($src1$$reg),
10575            as_Register($src2$$reg));
10576   %}
10577 
10578   ins_pipe(lmul_reg_reg);
10579 %}
10580 
10581 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10582 %{
10583   match(Set dst (MulHiL src1 src2));
10584 
10585   ins_cost(INSN_COST * 7);
10586   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
10587 
10588   ins_encode %{
10589     __ smulh(as_Register($dst$$reg),
10590              as_Register($src1$$reg),
10591              as_Register($src2$$reg));
10592   %}
10593 
10594   ins_pipe(lmul_reg_reg);
10595 %}
10596 
10597 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10598 %{
10599   match(Set dst (UMulHiL src1 src2));
10600 
10601   ins_cost(INSN_COST * 7);
10602   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
10603 
10604   ins_encode %{
10605     __ umulh(as_Register($dst$$reg),
10606              as_Register($src1$$reg),
10607              as_Register($src2$$reg));
10608   %}
10609 
10610   ins_pipe(lmul_reg_reg);
10611 %}
10612 
10613 // Combined Integer Multiply & Add/Sub
10614 
10615 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10616   match(Set dst (AddI src3 (MulI src1 src2)));
10617 
10618   ins_cost(INSN_COST * 3);
10619   format %{ "madd  $dst, $src1, $src2, $src3" %}
10620 
10621   ins_encode %{
10622     __ maddw(as_Register($dst$$reg),
10623              as_Register($src1$$reg),
10624              as_Register($src2$$reg),
10625              as_Register($src3$$reg));
10626   %}
10627 
10628   ins_pipe(imac_reg_reg);
10629 %}
10630 
10631 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10632   match(Set dst (SubI src3 (MulI src1 src2)));
10633 
10634   ins_cost(INSN_COST * 3);
10635   format %{ "msub  $dst, $src1, $src2, $src3" %}
10636 
10637   ins_encode %{
10638     __ msubw(as_Register($dst$$reg),
10639              as_Register($src1$$reg),
10640              as_Register($src2$$reg),
10641              as_Register($src3$$reg));
10642   %}
10643 
10644   ins_pipe(imac_reg_reg);
10645 %}
10646 
10647 // Combined Integer Multiply & Neg
10648 
10649 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10650   match(Set dst (MulI (SubI zero src1) src2));
10651 
10652   ins_cost(INSN_COST * 3);
10653   format %{ "mneg  $dst, $src1, $src2" %}
10654 
10655   ins_encode %{
10656     __ mnegw(as_Register($dst$$reg),
10657              as_Register($src1$$reg),
10658              as_Register($src2$$reg));
10659   %}
10660 
10661   ins_pipe(imac_reg_reg);
10662 %}
10663 
10664 // Combined Long Multiply & Add/Sub
10665 
10666 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10667   match(Set dst (AddL src3 (MulL src1 src2)));
10668 
10669   ins_cost(INSN_COST * 5);
10670   format %{ "madd  $dst, $src1, $src2, $src3" %}
10671 
10672   ins_encode %{
10673     __ madd(as_Register($dst$$reg),
10674             as_Register($src1$$reg),
10675             as_Register($src2$$reg),
10676             as_Register($src3$$reg));
10677   %}
10678 
10679   ins_pipe(lmac_reg_reg);
10680 %}
10681 
10682 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10683   match(Set dst (SubL src3 (MulL src1 src2)));
10684 
10685   ins_cost(INSN_COST * 5);
10686   format %{ "msub  $dst, $src1, $src2, $src3" %}
10687 
10688   ins_encode %{
10689     __ msub(as_Register($dst$$reg),
10690             as_Register($src1$$reg),
10691             as_Register($src2$$reg),
10692             as_Register($src3$$reg));
10693   %}
10694 
10695   ins_pipe(lmac_reg_reg);
10696 %}
10697 
10698 // Combined Long Multiply & Neg
10699 
10700 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10701   match(Set dst (MulL (SubL zero src1) src2));
10702 
10703   ins_cost(INSN_COST * 5);
10704   format %{ "mneg  $dst, $src1, $src2" %}
10705 
10706   ins_encode %{
10707     __ mneg(as_Register($dst$$reg),
10708             as_Register($src1$$reg),
10709             as_Register($src2$$reg));
10710   %}
10711 
10712   ins_pipe(lmac_reg_reg);
10713 %}
10714 
10715 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10716 
10717 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10718   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10719 
10720   ins_cost(INSN_COST * 3);
10721   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10722 
10723   ins_encode %{
10724     __ smaddl(as_Register($dst$$reg),
10725               as_Register($src1$$reg),
10726               as_Register($src2$$reg),
10727               as_Register($src3$$reg));
10728   %}
10729 
10730   ins_pipe(imac_reg_reg);
10731 %}
10732 
10733 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10734   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10735 
10736   ins_cost(INSN_COST * 3);
10737   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10738 
10739   ins_encode %{
10740     __ smsubl(as_Register($dst$$reg),
10741               as_Register($src1$$reg),
10742               as_Register($src2$$reg),
10743               as_Register($src3$$reg));
10744   %}
10745 
10746   ins_pipe(imac_reg_reg);
10747 %}
10748 
10749 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10750   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10751 
10752   ins_cost(INSN_COST * 3);
10753   format %{ "smnegl  $dst, $src1, $src2" %}
10754 
10755   ins_encode %{
10756     __ smnegl(as_Register($dst$$reg),
10757               as_Register($src1$$reg),
10758               as_Register($src2$$reg));
10759   %}
10760 
10761   ins_pipe(imac_reg_reg);
10762 %}
10763 
10764 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10765 
10766 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10767   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10768 
10769   ins_cost(INSN_COST * 5);
10770   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10771             "maddw $dst, $src3, $src4, rscratch1" %}
10772 
10773   ins_encode %{
10774     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10775     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10776 
10777   ins_pipe(imac_reg_reg);
10778 %}
10779 
10780 // Integer Divide
10781 
10782 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10783   match(Set dst (DivI src1 src2));
10784 
10785   ins_cost(INSN_COST * 19);
10786   format %{ "sdivw  $dst, $src1, $src2" %}
10787 
10788   ins_encode(aarch64_enc_divw(dst, src1, src2));
10789   ins_pipe(idiv_reg_reg);
10790 %}
10791 
10792 // Long Divide
10793 
10794 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10795   match(Set dst (DivL src1 src2));
10796 
10797   ins_cost(INSN_COST * 35);
10798   format %{ "sdiv   $dst, $src1, $src2" %}
10799 
10800   ins_encode(aarch64_enc_div(dst, src1, src2));
10801   ins_pipe(ldiv_reg_reg);
10802 %}
10803 
10804 // Integer Remainder
10805 
10806 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10807   match(Set dst (ModI src1 src2));
10808 
10809   ins_cost(INSN_COST * 22);
10810   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10811             "msubw  $dst, rscratch1, $src2, $src1" %}
10812 
10813   ins_encode(aarch64_enc_modw(dst, src1, src2));
10814   ins_pipe(idiv_reg_reg);
10815 %}
10816 
10817 // Long Remainder
10818 
10819 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10820   match(Set dst (ModL src1 src2));
10821 
10822   ins_cost(INSN_COST * 38);
10823   format %{ "sdiv   rscratch1, $src1, $src2\n"
10824             "msub   $dst, rscratch1, $src2, $src1" %}
10825 
10826   ins_encode(aarch64_enc_mod(dst, src1, src2));
10827   ins_pipe(ldiv_reg_reg);
10828 %}
10829 
10830 // Unsigned Integer Divide
10831 
10832 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10833   match(Set dst (UDivI src1 src2));
10834 
10835   ins_cost(INSN_COST * 19);
10836   format %{ "udivw  $dst, $src1, $src2" %}
10837 
10838   ins_encode %{
10839     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
10840   %}
10841 
10842   ins_pipe(idiv_reg_reg);
10843 %}
10844 
10845 //  Unsigned Long Divide
10846 
10847 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10848   match(Set dst (UDivL src1 src2));
10849 
10850   ins_cost(INSN_COST * 35);
10851   format %{ "udiv   $dst, $src1, $src2" %}
10852 
10853   ins_encode %{
10854     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
10855   %}
10856 
10857   ins_pipe(ldiv_reg_reg);
10858 %}
10859 
10860 // Unsigned Integer Remainder
10861 
10862 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10863   match(Set dst (UModI src1 src2));
10864 
10865   ins_cost(INSN_COST * 22);
10866   format %{ "udivw  rscratch1, $src1, $src2\n\t"
10867             "msubw  $dst, rscratch1, $src2, $src1" %}
10868 
10869   ins_encode %{
10870     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
10871     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10872   %}
10873 
10874   ins_pipe(idiv_reg_reg);
10875 %}
10876 
10877 // Unsigned Long Remainder
10878 
10879 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10880   match(Set dst (UModL src1 src2));
10881 
10882   ins_cost(INSN_COST * 38);
10883   format %{ "udiv   rscratch1, $src1, $src2\n"
10884             "msub   $dst, rscratch1, $src2, $src1" %}
10885 
10886   ins_encode %{
10887     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
10888     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10889   %}
10890 
10891   ins_pipe(ldiv_reg_reg);
10892 %}
10893 
10894 // Integer Shifts
10895 
10896 // Shift Left Register
10897 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10898   match(Set dst (LShiftI src1 src2));
10899 
10900   ins_cost(INSN_COST * 2);
10901   format %{ "lslvw  $dst, $src1, $src2" %}
10902 
10903   ins_encode %{
10904     __ lslvw(as_Register($dst$$reg),
10905              as_Register($src1$$reg),
10906              as_Register($src2$$reg));
10907   %}
10908 
10909   ins_pipe(ialu_reg_reg_vshift);
10910 %}
10911 
10912 // Shift Left Immediate
10913 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10914   match(Set dst (LShiftI src1 src2));
10915 
10916   ins_cost(INSN_COST);
10917   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10918 
10919   ins_encode %{
10920     __ lslw(as_Register($dst$$reg),
10921             as_Register($src1$$reg),
10922             $src2$$constant & 0x1f);
10923   %}
10924 
10925   ins_pipe(ialu_reg_shift);
10926 %}
10927 
10928 // Shift Right Logical Register
10929 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10930   match(Set dst (URShiftI src1 src2));
10931 
10932   ins_cost(INSN_COST * 2);
10933   format %{ "lsrvw  $dst, $src1, $src2" %}
10934 
10935   ins_encode %{
10936     __ lsrvw(as_Register($dst$$reg),
10937              as_Register($src1$$reg),
10938              as_Register($src2$$reg));
10939   %}
10940 
10941   ins_pipe(ialu_reg_reg_vshift);
10942 %}
10943 
10944 // Shift Right Logical Immediate
10945 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10946   match(Set dst (URShiftI src1 src2));
10947 
10948   ins_cost(INSN_COST);
10949   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10950 
10951   ins_encode %{
10952     __ lsrw(as_Register($dst$$reg),
10953             as_Register($src1$$reg),
10954             $src2$$constant & 0x1f);
10955   %}
10956 
10957   ins_pipe(ialu_reg_shift);
10958 %}
10959 
10960 // Shift Right Arithmetic Register
10961 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10962   match(Set dst (RShiftI src1 src2));
10963 
10964   ins_cost(INSN_COST * 2);
10965   format %{ "asrvw  $dst, $src1, $src2" %}
10966 
10967   ins_encode %{
10968     __ asrvw(as_Register($dst$$reg),
10969              as_Register($src1$$reg),
10970              as_Register($src2$$reg));
10971   %}
10972 
10973   ins_pipe(ialu_reg_reg_vshift);
10974 %}
10975 
10976 // Shift Right Arithmetic Immediate
10977 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10978   match(Set dst (RShiftI src1 src2));
10979 
10980   ins_cost(INSN_COST);
10981   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10982 
10983   ins_encode %{
10984     __ asrw(as_Register($dst$$reg),
10985             as_Register($src1$$reg),
10986             $src2$$constant & 0x1f);
10987   %}
10988 
10989   ins_pipe(ialu_reg_shift);
10990 %}
10991 
10992 // Combined Int Mask and Right Shift (using UBFM)
10993 // TODO
10994 
10995 // Long Shifts
10996 
10997 // Shift Left Register
10998 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10999   match(Set dst (LShiftL src1 src2));
11000 
11001   ins_cost(INSN_COST * 2);
11002   format %{ "lslv  $dst, $src1, $src2" %}
11003 
11004   ins_encode %{
11005     __ lslv(as_Register($dst$$reg),
11006             as_Register($src1$$reg),
11007             as_Register($src2$$reg));
11008   %}
11009 
11010   ins_pipe(ialu_reg_reg_vshift);
11011 %}
11012 
11013 // Shift Left Immediate
11014 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11015   match(Set dst (LShiftL src1 src2));
11016 
11017   ins_cost(INSN_COST);
11018   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11019 
11020   ins_encode %{
11021     __ lsl(as_Register($dst$$reg),
11022             as_Register($src1$$reg),
11023             $src2$$constant & 0x3f);
11024   %}
11025 
11026   ins_pipe(ialu_reg_shift);
11027 %}
11028 
11029 // Shift Right Logical Register
11030 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11031   match(Set dst (URShiftL src1 src2));
11032 
11033   ins_cost(INSN_COST * 2);
11034   format %{ "lsrv  $dst, $src1, $src2" %}
11035 
11036   ins_encode %{
11037     __ lsrv(as_Register($dst$$reg),
11038             as_Register($src1$$reg),
11039             as_Register($src2$$reg));
11040   %}
11041 
11042   ins_pipe(ialu_reg_reg_vshift);
11043 %}
11044 
11045 // Shift Right Logical Immediate
11046 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11047   match(Set dst (URShiftL src1 src2));
11048 
11049   ins_cost(INSN_COST);
11050   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11051 
11052   ins_encode %{
11053     __ lsr(as_Register($dst$$reg),
11054            as_Register($src1$$reg),
11055            $src2$$constant & 0x3f);
11056   %}
11057 
11058   ins_pipe(ialu_reg_shift);
11059 %}
11060 
11061 // A special-case pattern for card table stores.
11062 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11063   match(Set dst (URShiftL (CastP2X src1) src2));
11064 
11065   ins_cost(INSN_COST);
11066   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11067 
11068   ins_encode %{
11069     __ lsr(as_Register($dst$$reg),
11070            as_Register($src1$$reg),
11071            $src2$$constant & 0x3f);
11072   %}
11073 
11074   ins_pipe(ialu_reg_shift);
11075 %}
11076 
11077 // Shift Right Arithmetic Register
11078 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11079   match(Set dst (RShiftL src1 src2));
11080 
11081   ins_cost(INSN_COST * 2);
11082   format %{ "asrv  $dst, $src1, $src2" %}
11083 
11084   ins_encode %{
11085     __ asrv(as_Register($dst$$reg),
11086             as_Register($src1$$reg),
11087             as_Register($src2$$reg));
11088   %}
11089 
11090   ins_pipe(ialu_reg_reg_vshift);
11091 %}
11092 
11093 // Shift Right Arithmetic Immediate
11094 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11095   match(Set dst (RShiftL src1 src2));
11096 
11097   ins_cost(INSN_COST);
11098   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11099 
11100   ins_encode %{
11101     __ asr(as_Register($dst$$reg),
11102            as_Register($src1$$reg),
11103            $src2$$constant & 0x3f);
11104   %}
11105 
11106   ins_pipe(ialu_reg_shift);
11107 %}
11108 
11109 // BEGIN This section of the file is automatically generated. Do not edit --------------
11110 // This section is generated from aarch64_ad.m4
11111 
11112 // This pattern is automatically generated from aarch64_ad.m4
11113 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11114 instruct regL_not_reg(iRegLNoSp dst,
11115                          iRegL src1, immL_M1 m1,
11116                          rFlagsReg cr) %{
11117   match(Set dst (XorL src1 m1));
11118   ins_cost(INSN_COST);
11119   format %{ "eon  $dst, $src1, zr" %}
11120 
11121   ins_encode %{
11122     __ eon(as_Register($dst$$reg),
11123               as_Register($src1$$reg),
11124               zr,
11125               Assembler::LSL, 0);
11126   %}
11127 
11128   ins_pipe(ialu_reg);
11129 %}
11130 
11131 // This pattern is automatically generated from aarch64_ad.m4
11132 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11133 instruct regI_not_reg(iRegINoSp dst,
11134                          iRegIorL2I src1, immI_M1 m1,
11135                          rFlagsReg cr) %{
11136   match(Set dst (XorI src1 m1));
11137   ins_cost(INSN_COST);
11138   format %{ "eonw  $dst, $src1, zr" %}
11139 
11140   ins_encode %{
11141     __ eonw(as_Register($dst$$reg),
11142               as_Register($src1$$reg),
11143               zr,
11144               Assembler::LSL, 0);
11145   %}
11146 
11147   ins_pipe(ialu_reg);
11148 %}
11149 
11150 // This pattern is automatically generated from aarch64_ad.m4
11151 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11152 instruct NegI_reg_URShift_reg(iRegINoSp dst,
11153                               immI0 zero, iRegIorL2I src1, immI src2) %{
11154   match(Set dst (SubI zero (URShiftI src1 src2)));
11155 
11156   ins_cost(1.9 * INSN_COST);
11157   format %{ "negw  $dst, $src1, LSR $src2" %}
11158 
11159   ins_encode %{
11160     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11161             Assembler::LSR, $src2$$constant & 0x1f);
11162   %}
11163 
11164   ins_pipe(ialu_reg_shift);
11165 %}
11166 
11167 // This pattern is automatically generated from aarch64_ad.m4
11168 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11169 instruct NegI_reg_RShift_reg(iRegINoSp dst,
11170                               immI0 zero, iRegIorL2I src1, immI src2) %{
11171   match(Set dst (SubI zero (RShiftI src1 src2)));
11172 
11173   ins_cost(1.9 * INSN_COST);
11174   format %{ "negw  $dst, $src1, ASR $src2" %}
11175 
11176   ins_encode %{
11177     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11178             Assembler::ASR, $src2$$constant & 0x1f);
11179   %}
11180 
11181   ins_pipe(ialu_reg_shift);
11182 %}
11183 
11184 // This pattern is automatically generated from aarch64_ad.m4
11185 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11186 instruct NegI_reg_LShift_reg(iRegINoSp dst,
11187                               immI0 zero, iRegIorL2I src1, immI src2) %{
11188   match(Set dst (SubI zero (LShiftI src1 src2)));
11189 
11190   ins_cost(1.9 * INSN_COST);
11191   format %{ "negw  $dst, $src1, LSL $src2" %}
11192 
11193   ins_encode %{
11194     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11195             Assembler::LSL, $src2$$constant & 0x1f);
11196   %}
11197 
11198   ins_pipe(ialu_reg_shift);
11199 %}
11200 
11201 // This pattern is automatically generated from aarch64_ad.m4
11202 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11203 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
11204                               immL0 zero, iRegL src1, immI src2) %{
11205   match(Set dst (SubL zero (URShiftL src1 src2)));
11206 
11207   ins_cost(1.9 * INSN_COST);
11208   format %{ "neg  $dst, $src1, LSR $src2" %}
11209 
11210   ins_encode %{
11211     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11212             Assembler::LSR, $src2$$constant & 0x3f);
11213   %}
11214 
11215   ins_pipe(ialu_reg_shift);
11216 %}
11217 
11218 // This pattern is automatically generated from aarch64_ad.m4
11219 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11220 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
11221                               immL0 zero, iRegL src1, immI src2) %{
11222   match(Set dst (SubL zero (RShiftL src1 src2)));
11223 
11224   ins_cost(1.9 * INSN_COST);
11225   format %{ "neg  $dst, $src1, ASR $src2" %}
11226 
11227   ins_encode %{
11228     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11229             Assembler::ASR, $src2$$constant & 0x3f);
11230   %}
11231 
11232   ins_pipe(ialu_reg_shift);
11233 %}
11234 
11235 // This pattern is automatically generated from aarch64_ad.m4
11236 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11237 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
11238                               immL0 zero, iRegL src1, immI src2) %{
11239   match(Set dst (SubL zero (LShiftL src1 src2)));
11240 
11241   ins_cost(1.9 * INSN_COST);
11242   format %{ "neg  $dst, $src1, LSL $src2" %}
11243 
11244   ins_encode %{
11245     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11246             Assembler::LSL, $src2$$constant & 0x3f);
11247   %}
11248 
11249   ins_pipe(ialu_reg_shift);
11250 %}
11251 
11252 // This pattern is automatically generated from aarch64_ad.m4
11253 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11254 instruct AndI_reg_not_reg(iRegINoSp dst,
11255                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11256   match(Set dst (AndI src1 (XorI src2 m1)));
11257   ins_cost(INSN_COST);
11258   format %{ "bicw  $dst, $src1, $src2" %}
11259 
11260   ins_encode %{
11261     __ bicw(as_Register($dst$$reg),
11262               as_Register($src1$$reg),
11263               as_Register($src2$$reg),
11264               Assembler::LSL, 0);
11265   %}
11266 
11267   ins_pipe(ialu_reg_reg);
11268 %}
11269 
11270 // This pattern is automatically generated from aarch64_ad.m4
11271 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11272 instruct AndL_reg_not_reg(iRegLNoSp dst,
11273                          iRegL src1, iRegL src2, immL_M1 m1) %{
11274   match(Set dst (AndL src1 (XorL src2 m1)));
11275   ins_cost(INSN_COST);
11276   format %{ "bic  $dst, $src1, $src2" %}
11277 
11278   ins_encode %{
11279     __ bic(as_Register($dst$$reg),
11280               as_Register($src1$$reg),
11281               as_Register($src2$$reg),
11282               Assembler::LSL, 0);
11283   %}
11284 
11285   ins_pipe(ialu_reg_reg);
11286 %}
11287 
11288 // This pattern is automatically generated from aarch64_ad.m4
11289 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11290 instruct OrI_reg_not_reg(iRegINoSp dst,
11291                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11292   match(Set dst (OrI src1 (XorI src2 m1)));
11293   ins_cost(INSN_COST);
11294   format %{ "ornw  $dst, $src1, $src2" %}
11295 
11296   ins_encode %{
11297     __ ornw(as_Register($dst$$reg),
11298               as_Register($src1$$reg),
11299               as_Register($src2$$reg),
11300               Assembler::LSL, 0);
11301   %}
11302 
11303   ins_pipe(ialu_reg_reg);
11304 %}
11305 
11306 // This pattern is automatically generated from aarch64_ad.m4
11307 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11308 instruct OrL_reg_not_reg(iRegLNoSp dst,
11309                          iRegL src1, iRegL src2, immL_M1 m1) %{
11310   match(Set dst (OrL src1 (XorL src2 m1)));
11311   ins_cost(INSN_COST);
11312   format %{ "orn  $dst, $src1, $src2" %}
11313 
11314   ins_encode %{
11315     __ orn(as_Register($dst$$reg),
11316               as_Register($src1$$reg),
11317               as_Register($src2$$reg),
11318               Assembler::LSL, 0);
11319   %}
11320 
11321   ins_pipe(ialu_reg_reg);
11322 %}
11323 
11324 // This pattern is automatically generated from aarch64_ad.m4
11325 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11326 instruct XorI_reg_not_reg(iRegINoSp dst,
11327                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11328   match(Set dst (XorI m1 (XorI src2 src1)));
11329   ins_cost(INSN_COST);
11330   format %{ "eonw  $dst, $src1, $src2" %}
11331 
11332   ins_encode %{
11333     __ eonw(as_Register($dst$$reg),
11334               as_Register($src1$$reg),
11335               as_Register($src2$$reg),
11336               Assembler::LSL, 0);
11337   %}
11338 
11339   ins_pipe(ialu_reg_reg);
11340 %}
11341 
11342 // This pattern is automatically generated from aarch64_ad.m4
11343 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11344 instruct XorL_reg_not_reg(iRegLNoSp dst,
11345                          iRegL src1, iRegL src2, immL_M1 m1) %{
11346   match(Set dst (XorL m1 (XorL src2 src1)));
11347   ins_cost(INSN_COST);
11348   format %{ "eon  $dst, $src1, $src2" %}
11349 
11350   ins_encode %{
11351     __ eon(as_Register($dst$$reg),
11352               as_Register($src1$$reg),
11353               as_Register($src2$$reg),
11354               Assembler::LSL, 0);
11355   %}
11356 
11357   ins_pipe(ialu_reg_reg);
11358 %}
11359 
11360 // This pattern is automatically generated from aarch64_ad.m4
11361 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11362 // val & (-1 ^ (val >>> shift)) ==> bicw
11363 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11364                          iRegIorL2I src1, iRegIorL2I src2,
11365                          immI src3, immI_M1 src4) %{
11366   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11367   ins_cost(1.9 * INSN_COST);
11368   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11369 
11370   ins_encode %{
11371     __ bicw(as_Register($dst$$reg),
11372               as_Register($src1$$reg),
11373               as_Register($src2$$reg),
11374               Assembler::LSR,
11375               $src3$$constant & 0x1f);
11376   %}
11377 
11378   ins_pipe(ialu_reg_reg_shift);
11379 %}
11380 
11381 // This pattern is automatically generated from aarch64_ad.m4
11382 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11383 // val & (-1 ^ (val >>> shift)) ==> bic
11384 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11385                          iRegL src1, iRegL src2,
11386                          immI src3, immL_M1 src4) %{
11387   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11388   ins_cost(1.9 * INSN_COST);
11389   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11390 
11391   ins_encode %{
11392     __ bic(as_Register($dst$$reg),
11393               as_Register($src1$$reg),
11394               as_Register($src2$$reg),
11395               Assembler::LSR,
11396               $src3$$constant & 0x3f);
11397   %}
11398 
11399   ins_pipe(ialu_reg_reg_shift);
11400 %}
11401 
11402 // This pattern is automatically generated from aarch64_ad.m4
11403 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11404 // val & (-1 ^ (val >> shift)) ==> bicw
11405 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11406                          iRegIorL2I src1, iRegIorL2I src2,
11407                          immI src3, immI_M1 src4) %{
11408   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11409   ins_cost(1.9 * INSN_COST);
11410   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11411 
11412   ins_encode %{
11413     __ bicw(as_Register($dst$$reg),
11414               as_Register($src1$$reg),
11415               as_Register($src2$$reg),
11416               Assembler::ASR,
11417               $src3$$constant & 0x1f);
11418   %}
11419 
11420   ins_pipe(ialu_reg_reg_shift);
11421 %}
11422 
11423 // This pattern is automatically generated from aarch64_ad.m4
11424 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11425 // val & (-1 ^ (val >> shift)) ==> bic
11426 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11427                          iRegL src1, iRegL src2,
11428                          immI src3, immL_M1 src4) %{
11429   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11430   ins_cost(1.9 * INSN_COST);
11431   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11432 
11433   ins_encode %{
11434     __ bic(as_Register($dst$$reg),
11435               as_Register($src1$$reg),
11436               as_Register($src2$$reg),
11437               Assembler::ASR,
11438               $src3$$constant & 0x3f);
11439   %}
11440 
11441   ins_pipe(ialu_reg_reg_shift);
11442 %}
11443 
11444 // This pattern is automatically generated from aarch64_ad.m4
11445 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11446 // val & (-1 ^ (val ror shift)) ==> bicw
11447 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11448                          iRegIorL2I src1, iRegIorL2I src2,
11449                          immI src3, immI_M1 src4) %{
11450   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11451   ins_cost(1.9 * INSN_COST);
11452   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11453 
11454   ins_encode %{
11455     __ bicw(as_Register($dst$$reg),
11456               as_Register($src1$$reg),
11457               as_Register($src2$$reg),
11458               Assembler::ROR,
11459               $src3$$constant & 0x1f);
11460   %}
11461 
11462   ins_pipe(ialu_reg_reg_shift);
11463 %}
11464 
11465 // This pattern is automatically generated from aarch64_ad.m4
11466 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11467 // val & (-1 ^ (val ror shift)) ==> bic
11468 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11469                          iRegL src1, iRegL src2,
11470                          immI src3, immL_M1 src4) %{
11471   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11472   ins_cost(1.9 * INSN_COST);
11473   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11474 
11475   ins_encode %{
11476     __ bic(as_Register($dst$$reg),
11477               as_Register($src1$$reg),
11478               as_Register($src2$$reg),
11479               Assembler::ROR,
11480               $src3$$constant & 0x3f);
11481   %}
11482 
11483   ins_pipe(ialu_reg_reg_shift);
11484 %}
11485 
11486 // This pattern is automatically generated from aarch64_ad.m4
11487 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11488 // val & (-1 ^ (val << shift)) ==> bicw
11489 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11490                          iRegIorL2I src1, iRegIorL2I src2,
11491                          immI src3, immI_M1 src4) %{
11492   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11493   ins_cost(1.9 * INSN_COST);
11494   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11495 
11496   ins_encode %{
11497     __ bicw(as_Register($dst$$reg),
11498               as_Register($src1$$reg),
11499               as_Register($src2$$reg),
11500               Assembler::LSL,
11501               $src3$$constant & 0x1f);
11502   %}
11503 
11504   ins_pipe(ialu_reg_reg_shift);
11505 %}
11506 
11507 // This pattern is automatically generated from aarch64_ad.m4
11508 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11509 // val & (-1 ^ (val << shift)) ==> bic
11510 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11511                          iRegL src1, iRegL src2,
11512                          immI src3, immL_M1 src4) %{
11513   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11514   ins_cost(1.9 * INSN_COST);
11515   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11516 
11517   ins_encode %{
11518     __ bic(as_Register($dst$$reg),
11519               as_Register($src1$$reg),
11520               as_Register($src2$$reg),
11521               Assembler::LSL,
11522               $src3$$constant & 0x3f);
11523   %}
11524 
11525   ins_pipe(ialu_reg_reg_shift);
11526 %}
11527 
11528 // This pattern is automatically generated from aarch64_ad.m4
11529 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11530 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11531 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11532                          iRegIorL2I src1, iRegIorL2I src2,
11533                          immI src3, immI_M1 src4) %{
11534   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11535   ins_cost(1.9 * INSN_COST);
11536   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11537 
11538   ins_encode %{
11539     __ eonw(as_Register($dst$$reg),
11540               as_Register($src1$$reg),
11541               as_Register($src2$$reg),
11542               Assembler::LSR,
11543               $src3$$constant & 0x1f);
11544   %}
11545 
11546   ins_pipe(ialu_reg_reg_shift);
11547 %}
11548 
11549 // This pattern is automatically generated from aarch64_ad.m4
11550 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11551 // val ^ (-1 ^ (val >>> shift)) ==> eon
11552 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11553                          iRegL src1, iRegL src2,
11554                          immI src3, immL_M1 src4) %{
11555   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11556   ins_cost(1.9 * INSN_COST);
11557   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11558 
11559   ins_encode %{
11560     __ eon(as_Register($dst$$reg),
11561               as_Register($src1$$reg),
11562               as_Register($src2$$reg),
11563               Assembler::LSR,
11564               $src3$$constant & 0x3f);
11565   %}
11566 
11567   ins_pipe(ialu_reg_reg_shift);
11568 %}
11569 
11570 // This pattern is automatically generated from aarch64_ad.m4
11571 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11572 // val ^ (-1 ^ (val >> shift)) ==> eonw
11573 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11574                          iRegIorL2I src1, iRegIorL2I src2,
11575                          immI src3, immI_M1 src4) %{
11576   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11577   ins_cost(1.9 * INSN_COST);
11578   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11579 
11580   ins_encode %{
11581     __ eonw(as_Register($dst$$reg),
11582               as_Register($src1$$reg),
11583               as_Register($src2$$reg),
11584               Assembler::ASR,
11585               $src3$$constant & 0x1f);
11586   %}
11587 
11588   ins_pipe(ialu_reg_reg_shift);
11589 %}
11590 
11591 // This pattern is automatically generated from aarch64_ad.m4
11592 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11593 // val ^ (-1 ^ (val >> shift)) ==> eon
11594 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11595                          iRegL src1, iRegL src2,
11596                          immI src3, immL_M1 src4) %{
11597   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11598   ins_cost(1.9 * INSN_COST);
11599   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11600 
11601   ins_encode %{
11602     __ eon(as_Register($dst$$reg),
11603               as_Register($src1$$reg),
11604               as_Register($src2$$reg),
11605               Assembler::ASR,
11606               $src3$$constant & 0x3f);
11607   %}
11608 
11609   ins_pipe(ialu_reg_reg_shift);
11610 %}
11611 
11612 // This pattern is automatically generated from aarch64_ad.m4
11613 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11614 // val ^ (-1 ^ (val ror shift)) ==> eonw
11615 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11616                          iRegIorL2I src1, iRegIorL2I src2,
11617                          immI src3, immI_M1 src4) %{
11618   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11619   ins_cost(1.9 * INSN_COST);
11620   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11621 
11622   ins_encode %{
11623     __ eonw(as_Register($dst$$reg),
11624               as_Register($src1$$reg),
11625               as_Register($src2$$reg),
11626               Assembler::ROR,
11627               $src3$$constant & 0x1f);
11628   %}
11629 
11630   ins_pipe(ialu_reg_reg_shift);
11631 %}
11632 
11633 // This pattern is automatically generated from aarch64_ad.m4
11634 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11635 // val ^ (-1 ^ (val ror shift)) ==> eon
11636 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11637                          iRegL src1, iRegL src2,
11638                          immI src3, immL_M1 src4) %{
11639   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11640   ins_cost(1.9 * INSN_COST);
11641   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11642 
11643   ins_encode %{
11644     __ eon(as_Register($dst$$reg),
11645               as_Register($src1$$reg),
11646               as_Register($src2$$reg),
11647               Assembler::ROR,
11648               $src3$$constant & 0x3f);
11649   %}
11650 
11651   ins_pipe(ialu_reg_reg_shift);
11652 %}
11653 
11654 // This pattern is automatically generated from aarch64_ad.m4
11655 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11656 // val ^ (-1 ^ (val << shift)) ==> eonw
11657 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11658                          iRegIorL2I src1, iRegIorL2I src2,
11659                          immI src3, immI_M1 src4) %{
11660   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11661   ins_cost(1.9 * INSN_COST);
11662   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11663 
11664   ins_encode %{
11665     __ eonw(as_Register($dst$$reg),
11666               as_Register($src1$$reg),
11667               as_Register($src2$$reg),
11668               Assembler::LSL,
11669               $src3$$constant & 0x1f);
11670   %}
11671 
11672   ins_pipe(ialu_reg_reg_shift);
11673 %}
11674 
11675 // This pattern is automatically generated from aarch64_ad.m4
11676 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11677 // val ^ (-1 ^ (val << shift)) ==> eon
11678 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11679                          iRegL src1, iRegL src2,
11680                          immI src3, immL_M1 src4) %{
11681   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11682   ins_cost(1.9 * INSN_COST);
11683   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11684 
11685   ins_encode %{
11686     __ eon(as_Register($dst$$reg),
11687               as_Register($src1$$reg),
11688               as_Register($src2$$reg),
11689               Assembler::LSL,
11690               $src3$$constant & 0x3f);
11691   %}
11692 
11693   ins_pipe(ialu_reg_reg_shift);
11694 %}
11695 
11696 // This pattern is automatically generated from aarch64_ad.m4
11697 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11698 // val | (-1 ^ (val >>> shift)) ==> ornw
11699 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11700                          iRegIorL2I src1, iRegIorL2I src2,
11701                          immI src3, immI_M1 src4) %{
11702   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11703   ins_cost(1.9 * INSN_COST);
11704   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11705 
11706   ins_encode %{
11707     __ ornw(as_Register($dst$$reg),
11708               as_Register($src1$$reg),
11709               as_Register($src2$$reg),
11710               Assembler::LSR,
11711               $src3$$constant & 0x1f);
11712   %}
11713 
11714   ins_pipe(ialu_reg_reg_shift);
11715 %}
11716 
11717 // This pattern is automatically generated from aarch64_ad.m4
11718 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11719 // val | (-1 ^ (val >>> shift)) ==> orn
11720 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11721                          iRegL src1, iRegL src2,
11722                          immI src3, immL_M1 src4) %{
11723   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11724   ins_cost(1.9 * INSN_COST);
11725   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11726 
11727   ins_encode %{
11728     __ orn(as_Register($dst$$reg),
11729               as_Register($src1$$reg),
11730               as_Register($src2$$reg),
11731               Assembler::LSR,
11732               $src3$$constant & 0x3f);
11733   %}
11734 
11735   ins_pipe(ialu_reg_reg_shift);
11736 %}
11737 
11738 // This pattern is automatically generated from aarch64_ad.m4
11739 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11740 // val | (-1 ^ (val >> shift)) ==> ornw
11741 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11742                          iRegIorL2I src1, iRegIorL2I src2,
11743                          immI src3, immI_M1 src4) %{
11744   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11745   ins_cost(1.9 * INSN_COST);
11746   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11747 
11748   ins_encode %{
11749     __ ornw(as_Register($dst$$reg),
11750               as_Register($src1$$reg),
11751               as_Register($src2$$reg),
11752               Assembler::ASR,
11753               $src3$$constant & 0x1f);
11754   %}
11755 
11756   ins_pipe(ialu_reg_reg_shift);
11757 %}
11758 
11759 // This pattern is automatically generated from aarch64_ad.m4
11760 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11761 // val | (-1 ^ (val >> shift)) ==> orn
11762 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11763                          iRegL src1, iRegL src2,
11764                          immI src3, immL_M1 src4) %{
11765   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11766   ins_cost(1.9 * INSN_COST);
11767   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11768 
11769   ins_encode %{
11770     __ orn(as_Register($dst$$reg),
11771               as_Register($src1$$reg),
11772               as_Register($src2$$reg),
11773               Assembler::ASR,
11774               $src3$$constant & 0x3f);
11775   %}
11776 
11777   ins_pipe(ialu_reg_reg_shift);
11778 %}
11779 
11780 // This pattern is automatically generated from aarch64_ad.m4
11781 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11782 // val | (-1 ^ (val ror shift)) ==> ornw
11783 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11784                          iRegIorL2I src1, iRegIorL2I src2,
11785                          immI src3, immI_M1 src4) %{
11786   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11787   ins_cost(1.9 * INSN_COST);
11788   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11789 
11790   ins_encode %{
11791     __ ornw(as_Register($dst$$reg),
11792               as_Register($src1$$reg),
11793               as_Register($src2$$reg),
11794               Assembler::ROR,
11795               $src3$$constant & 0x1f);
11796   %}
11797 
11798   ins_pipe(ialu_reg_reg_shift);
11799 %}
11800 
11801 // This pattern is automatically generated from aarch64_ad.m4
11802 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11803 // val | (-1 ^ (val ror shift)) ==> orn
11804 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11805                          iRegL src1, iRegL src2,
11806                          immI src3, immL_M1 src4) %{
11807   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11808   ins_cost(1.9 * INSN_COST);
11809   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11810 
11811   ins_encode %{
11812     __ orn(as_Register($dst$$reg),
11813               as_Register($src1$$reg),
11814               as_Register($src2$$reg),
11815               Assembler::ROR,
11816               $src3$$constant & 0x3f);
11817   %}
11818 
11819   ins_pipe(ialu_reg_reg_shift);
11820 %}
11821 
11822 // This pattern is automatically generated from aarch64_ad.m4
11823 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11824 // val | (-1 ^ (val << shift)) ==> ornw
11825 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11826                          iRegIorL2I src1, iRegIorL2I src2,
11827                          immI src3, immI_M1 src4) %{
11828   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11829   ins_cost(1.9 * INSN_COST);
11830   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11831 
11832   ins_encode %{
11833     __ ornw(as_Register($dst$$reg),
11834               as_Register($src1$$reg),
11835               as_Register($src2$$reg),
11836               Assembler::LSL,
11837               $src3$$constant & 0x1f);
11838   %}
11839 
11840   ins_pipe(ialu_reg_reg_shift);
11841 %}
11842 
11843 // This pattern is automatically generated from aarch64_ad.m4
11844 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11845 // val | (-1 ^ (val << shift)) ==> orn
11846 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11847                          iRegL src1, iRegL src2,
11848                          immI src3, immL_M1 src4) %{
11849   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11850   ins_cost(1.9 * INSN_COST);
11851   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11852 
11853   ins_encode %{
11854     __ orn(as_Register($dst$$reg),
11855               as_Register($src1$$reg),
11856               as_Register($src2$$reg),
11857               Assembler::LSL,
11858               $src3$$constant & 0x3f);
11859   %}
11860 
11861   ins_pipe(ialu_reg_reg_shift);
11862 %}
11863 
11864 // This pattern is automatically generated from aarch64_ad.m4
11865 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11866 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11867                          iRegIorL2I src1, iRegIorL2I src2,
11868                          immI src3) %{
11869   match(Set dst (AndI src1 (URShiftI src2 src3)));
11870 
11871   ins_cost(1.9 * INSN_COST);
11872   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11873 
11874   ins_encode %{
11875     __ andw(as_Register($dst$$reg),
11876               as_Register($src1$$reg),
11877               as_Register($src2$$reg),
11878               Assembler::LSR,
11879               $src3$$constant & 0x1f);
11880   %}
11881 
11882   ins_pipe(ialu_reg_reg_shift);
11883 %}
11884 
11885 // This pattern is automatically generated from aarch64_ad.m4
11886 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11887 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11888                          iRegL src1, iRegL src2,
11889                          immI src3) %{
11890   match(Set dst (AndL src1 (URShiftL src2 src3)));
11891 
11892   ins_cost(1.9 * INSN_COST);
11893   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11894 
11895   ins_encode %{
11896     __ andr(as_Register($dst$$reg),
11897               as_Register($src1$$reg),
11898               as_Register($src2$$reg),
11899               Assembler::LSR,
11900               $src3$$constant & 0x3f);
11901   %}
11902 
11903   ins_pipe(ialu_reg_reg_shift);
11904 %}
11905 
11906 // This pattern is automatically generated from aarch64_ad.m4
11907 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11908 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11909                          iRegIorL2I src1, iRegIorL2I src2,
11910                          immI src3) %{
11911   match(Set dst (AndI src1 (RShiftI src2 src3)));
11912 
11913   ins_cost(1.9 * INSN_COST);
11914   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11915 
11916   ins_encode %{
11917     __ andw(as_Register($dst$$reg),
11918               as_Register($src1$$reg),
11919               as_Register($src2$$reg),
11920               Assembler::ASR,
11921               $src3$$constant & 0x1f);
11922   %}
11923 
11924   ins_pipe(ialu_reg_reg_shift);
11925 %}
11926 
11927 // This pattern is automatically generated from aarch64_ad.m4
11928 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11929 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11930                          iRegL src1, iRegL src2,
11931                          immI src3) %{
11932   match(Set dst (AndL src1 (RShiftL src2 src3)));
11933 
11934   ins_cost(1.9 * INSN_COST);
11935   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11936 
11937   ins_encode %{
11938     __ andr(as_Register($dst$$reg),
11939               as_Register($src1$$reg),
11940               as_Register($src2$$reg),
11941               Assembler::ASR,
11942               $src3$$constant & 0x3f);
11943   %}
11944 
11945   ins_pipe(ialu_reg_reg_shift);
11946 %}
11947 
11948 // This pattern is automatically generated from aarch64_ad.m4
11949 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11950 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11951                          iRegIorL2I src1, iRegIorL2I src2,
11952                          immI src3) %{
11953   match(Set dst (AndI src1 (LShiftI src2 src3)));
11954 
11955   ins_cost(1.9 * INSN_COST);
11956   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11957 
11958   ins_encode %{
11959     __ andw(as_Register($dst$$reg),
11960               as_Register($src1$$reg),
11961               as_Register($src2$$reg),
11962               Assembler::LSL,
11963               $src3$$constant & 0x1f);
11964   %}
11965 
11966   ins_pipe(ialu_reg_reg_shift);
11967 %}
11968 
11969 // This pattern is automatically generated from aarch64_ad.m4
11970 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11971 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11972                          iRegL src1, iRegL src2,
11973                          immI src3) %{
11974   match(Set dst (AndL src1 (LShiftL src2 src3)));
11975 
11976   ins_cost(1.9 * INSN_COST);
11977   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11978 
11979   ins_encode %{
11980     __ andr(as_Register($dst$$reg),
11981               as_Register($src1$$reg),
11982               as_Register($src2$$reg),
11983               Assembler::LSL,
11984               $src3$$constant & 0x3f);
11985   %}
11986 
11987   ins_pipe(ialu_reg_reg_shift);
11988 %}
11989 
11990 // This pattern is automatically generated from aarch64_ad.m4
11991 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11992 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
11993                          iRegIorL2I src1, iRegIorL2I src2,
11994                          immI src3) %{
11995   match(Set dst (AndI src1 (RotateRight src2 src3)));
11996 
11997   ins_cost(1.9 * INSN_COST);
11998   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
11999 
12000   ins_encode %{
12001     __ andw(as_Register($dst$$reg),
12002               as_Register($src1$$reg),
12003               as_Register($src2$$reg),
12004               Assembler::ROR,
12005               $src3$$constant & 0x1f);
12006   %}
12007 
12008   ins_pipe(ialu_reg_reg_shift);
12009 %}
12010 
12011 // This pattern is automatically generated from aarch64_ad.m4
12012 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12013 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
12014                          iRegL src1, iRegL src2,
12015                          immI src3) %{
12016   match(Set dst (AndL src1 (RotateRight src2 src3)));
12017 
12018   ins_cost(1.9 * INSN_COST);
12019   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
12020 
12021   ins_encode %{
12022     __ andr(as_Register($dst$$reg),
12023               as_Register($src1$$reg),
12024               as_Register($src2$$reg),
12025               Assembler::ROR,
12026               $src3$$constant & 0x3f);
12027   %}
12028 
12029   ins_pipe(ialu_reg_reg_shift);
12030 %}
12031 
12032 // This pattern is automatically generated from aarch64_ad.m4
12033 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12034 instruct XorI_reg_URShift_reg(iRegINoSp dst,
12035                          iRegIorL2I src1, iRegIorL2I src2,
12036                          immI src3) %{
12037   match(Set dst (XorI src1 (URShiftI src2 src3)));
12038 
12039   ins_cost(1.9 * INSN_COST);
12040   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
12041 
12042   ins_encode %{
12043     __ eorw(as_Register($dst$$reg),
12044               as_Register($src1$$reg),
12045               as_Register($src2$$reg),
12046               Assembler::LSR,
12047               $src3$$constant & 0x1f);
12048   %}
12049 
12050   ins_pipe(ialu_reg_reg_shift);
12051 %}
12052 
12053 // This pattern is automatically generated from aarch64_ad.m4
12054 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12055 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12056                          iRegL src1, iRegL src2,
12057                          immI src3) %{
12058   match(Set dst (XorL src1 (URShiftL src2 src3)));
12059 
12060   ins_cost(1.9 * INSN_COST);
12061   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12062 
12063   ins_encode %{
12064     __ eor(as_Register($dst$$reg),
12065               as_Register($src1$$reg),
12066               as_Register($src2$$reg),
12067               Assembler::LSR,
12068               $src3$$constant & 0x3f);
12069   %}
12070 
12071   ins_pipe(ialu_reg_reg_shift);
12072 %}
12073 
12074 // This pattern is automatically generated from aarch64_ad.m4
12075 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12076 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12077                          iRegIorL2I src1, iRegIorL2I src2,
12078                          immI src3) %{
12079   match(Set dst (XorI src1 (RShiftI src2 src3)));
12080 
12081   ins_cost(1.9 * INSN_COST);
12082   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12083 
12084   ins_encode %{
12085     __ eorw(as_Register($dst$$reg),
12086               as_Register($src1$$reg),
12087               as_Register($src2$$reg),
12088               Assembler::ASR,
12089               $src3$$constant & 0x1f);
12090   %}
12091 
12092   ins_pipe(ialu_reg_reg_shift);
12093 %}
12094 
12095 // This pattern is automatically generated from aarch64_ad.m4
12096 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12097 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12098                          iRegL src1, iRegL src2,
12099                          immI src3) %{
12100   match(Set dst (XorL src1 (RShiftL src2 src3)));
12101 
12102   ins_cost(1.9 * INSN_COST);
12103   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12104 
12105   ins_encode %{
12106     __ eor(as_Register($dst$$reg),
12107               as_Register($src1$$reg),
12108               as_Register($src2$$reg),
12109               Assembler::ASR,
12110               $src3$$constant & 0x3f);
12111   %}
12112 
12113   ins_pipe(ialu_reg_reg_shift);
12114 %}
12115 
12116 // This pattern is automatically generated from aarch64_ad.m4
12117 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12118 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12119                          iRegIorL2I src1, iRegIorL2I src2,
12120                          immI src3) %{
12121   match(Set dst (XorI src1 (LShiftI src2 src3)));
12122 
12123   ins_cost(1.9 * INSN_COST);
12124   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12125 
12126   ins_encode %{
12127     __ eorw(as_Register($dst$$reg),
12128               as_Register($src1$$reg),
12129               as_Register($src2$$reg),
12130               Assembler::LSL,
12131               $src3$$constant & 0x1f);
12132   %}
12133 
12134   ins_pipe(ialu_reg_reg_shift);
12135 %}
12136 
12137 // This pattern is automatically generated from aarch64_ad.m4
12138 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12139 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12140                          iRegL src1, iRegL src2,
12141                          immI src3) %{
12142   match(Set dst (XorL src1 (LShiftL src2 src3)));
12143 
12144   ins_cost(1.9 * INSN_COST);
12145   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12146 
12147   ins_encode %{
12148     __ eor(as_Register($dst$$reg),
12149               as_Register($src1$$reg),
12150               as_Register($src2$$reg),
12151               Assembler::LSL,
12152               $src3$$constant & 0x3f);
12153   %}
12154 
12155   ins_pipe(ialu_reg_reg_shift);
12156 %}
12157 
12158 // This pattern is automatically generated from aarch64_ad.m4
12159 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12160 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
12161                          iRegIorL2I src1, iRegIorL2I src2,
12162                          immI src3) %{
12163   match(Set dst (XorI src1 (RotateRight src2 src3)));
12164 
12165   ins_cost(1.9 * INSN_COST);
12166   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
12167 
12168   ins_encode %{
12169     __ eorw(as_Register($dst$$reg),
12170               as_Register($src1$$reg),
12171               as_Register($src2$$reg),
12172               Assembler::ROR,
12173               $src3$$constant & 0x1f);
12174   %}
12175 
12176   ins_pipe(ialu_reg_reg_shift);
12177 %}
12178 
12179 // This pattern is automatically generated from aarch64_ad.m4
12180 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12181 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
12182                          iRegL src1, iRegL src2,
12183                          immI src3) %{
12184   match(Set dst (XorL src1 (RotateRight src2 src3)));
12185 
12186   ins_cost(1.9 * INSN_COST);
12187   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
12188 
12189   ins_encode %{
12190     __ eor(as_Register($dst$$reg),
12191               as_Register($src1$$reg),
12192               as_Register($src2$$reg),
12193               Assembler::ROR,
12194               $src3$$constant & 0x3f);
12195   %}
12196 
12197   ins_pipe(ialu_reg_reg_shift);
12198 %}
12199 
12200 // This pattern is automatically generated from aarch64_ad.m4
12201 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12202 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12203                          iRegIorL2I src1, iRegIorL2I src2,
12204                          immI src3) %{
12205   match(Set dst (OrI src1 (URShiftI src2 src3)));
12206 
12207   ins_cost(1.9 * INSN_COST);
12208   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12209 
12210   ins_encode %{
12211     __ orrw(as_Register($dst$$reg),
12212               as_Register($src1$$reg),
12213               as_Register($src2$$reg),
12214               Assembler::LSR,
12215               $src3$$constant & 0x1f);
12216   %}
12217 
12218   ins_pipe(ialu_reg_reg_shift);
12219 %}
12220 
12221 // This pattern is automatically generated from aarch64_ad.m4
12222 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12223 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12224                          iRegL src1, iRegL src2,
12225                          immI src3) %{
12226   match(Set dst (OrL src1 (URShiftL src2 src3)));
12227 
12228   ins_cost(1.9 * INSN_COST);
12229   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12230 
12231   ins_encode %{
12232     __ orr(as_Register($dst$$reg),
12233               as_Register($src1$$reg),
12234               as_Register($src2$$reg),
12235               Assembler::LSR,
12236               $src3$$constant & 0x3f);
12237   %}
12238 
12239   ins_pipe(ialu_reg_reg_shift);
12240 %}
12241 
12242 // This pattern is automatically generated from aarch64_ad.m4
12243 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12244 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12245                          iRegIorL2I src1, iRegIorL2I src2,
12246                          immI src3) %{
12247   match(Set dst (OrI src1 (RShiftI src2 src3)));
12248 
12249   ins_cost(1.9 * INSN_COST);
12250   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12251 
12252   ins_encode %{
12253     __ orrw(as_Register($dst$$reg),
12254               as_Register($src1$$reg),
12255               as_Register($src2$$reg),
12256               Assembler::ASR,
12257               $src3$$constant & 0x1f);
12258   %}
12259 
12260   ins_pipe(ialu_reg_reg_shift);
12261 %}
12262 
12263 // This pattern is automatically generated from aarch64_ad.m4
12264 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12265 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12266                          iRegL src1, iRegL src2,
12267                          immI src3) %{
12268   match(Set dst (OrL src1 (RShiftL src2 src3)));
12269 
12270   ins_cost(1.9 * INSN_COST);
12271   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12272 
12273   ins_encode %{
12274     __ orr(as_Register($dst$$reg),
12275               as_Register($src1$$reg),
12276               as_Register($src2$$reg),
12277               Assembler::ASR,
12278               $src3$$constant & 0x3f);
12279   %}
12280 
12281   ins_pipe(ialu_reg_reg_shift);
12282 %}
12283 
12284 // This pattern is automatically generated from aarch64_ad.m4
12285 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12286 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12287                          iRegIorL2I src1, iRegIorL2I src2,
12288                          immI src3) %{
12289   match(Set dst (OrI src1 (LShiftI src2 src3)));
12290 
12291   ins_cost(1.9 * INSN_COST);
12292   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12293 
12294   ins_encode %{
12295     __ orrw(as_Register($dst$$reg),
12296               as_Register($src1$$reg),
12297               as_Register($src2$$reg),
12298               Assembler::LSL,
12299               $src3$$constant & 0x1f);
12300   %}
12301 
12302   ins_pipe(ialu_reg_reg_shift);
12303 %}
12304 
12305 // This pattern is automatically generated from aarch64_ad.m4
12306 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12307 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12308                          iRegL src1, iRegL src2,
12309                          immI src3) %{
12310   match(Set dst (OrL src1 (LShiftL src2 src3)));
12311 
12312   ins_cost(1.9 * INSN_COST);
12313   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12314 
12315   ins_encode %{
12316     __ orr(as_Register($dst$$reg),
12317               as_Register($src1$$reg),
12318               as_Register($src2$$reg),
12319               Assembler::LSL,
12320               $src3$$constant & 0x3f);
12321   %}
12322 
12323   ins_pipe(ialu_reg_reg_shift);
12324 %}
12325 
12326 // This pattern is automatically generated from aarch64_ad.m4
12327 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12328 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
12329                          iRegIorL2I src1, iRegIorL2I src2,
12330                          immI src3) %{
12331   match(Set dst (OrI src1 (RotateRight src2 src3)));
12332 
12333   ins_cost(1.9 * INSN_COST);
12334   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
12335 
12336   ins_encode %{
12337     __ orrw(as_Register($dst$$reg),
12338               as_Register($src1$$reg),
12339               as_Register($src2$$reg),
12340               Assembler::ROR,
12341               $src3$$constant & 0x1f);
12342   %}
12343 
12344   ins_pipe(ialu_reg_reg_shift);
12345 %}
12346 
12347 // This pattern is automatically generated from aarch64_ad.m4
12348 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12349 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
12350                          iRegL src1, iRegL src2,
12351                          immI src3) %{
12352   match(Set dst (OrL src1 (RotateRight src2 src3)));
12353 
12354   ins_cost(1.9 * INSN_COST);
12355   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
12356 
12357   ins_encode %{
12358     __ orr(as_Register($dst$$reg),
12359               as_Register($src1$$reg),
12360               as_Register($src2$$reg),
12361               Assembler::ROR,
12362               $src3$$constant & 0x3f);
12363   %}
12364 
12365   ins_pipe(ialu_reg_reg_shift);
12366 %}
12367 
12368 // This pattern is automatically generated from aarch64_ad.m4
12369 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12370 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12371                          iRegIorL2I src1, iRegIorL2I src2,
12372                          immI src3) %{
12373   match(Set dst (AddI src1 (URShiftI src2 src3)));
12374 
12375   ins_cost(1.9 * INSN_COST);
12376   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12377 
12378   ins_encode %{
12379     __ addw(as_Register($dst$$reg),
12380               as_Register($src1$$reg),
12381               as_Register($src2$$reg),
12382               Assembler::LSR,
12383               $src3$$constant & 0x1f);
12384   %}
12385 
12386   ins_pipe(ialu_reg_reg_shift);
12387 %}
12388 
12389 // This pattern is automatically generated from aarch64_ad.m4
12390 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12391 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12392                          iRegL src1, iRegL src2,
12393                          immI src3) %{
12394   match(Set dst (AddL src1 (URShiftL src2 src3)));
12395 
12396   ins_cost(1.9 * INSN_COST);
12397   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12398 
12399   ins_encode %{
12400     __ add(as_Register($dst$$reg),
12401               as_Register($src1$$reg),
12402               as_Register($src2$$reg),
12403               Assembler::LSR,
12404               $src3$$constant & 0x3f);
12405   %}
12406 
12407   ins_pipe(ialu_reg_reg_shift);
12408 %}
12409 
12410 // This pattern is automatically generated from aarch64_ad.m4
12411 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12412 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12413                          iRegIorL2I src1, iRegIorL2I src2,
12414                          immI src3) %{
12415   match(Set dst (AddI src1 (RShiftI src2 src3)));
12416 
12417   ins_cost(1.9 * INSN_COST);
12418   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12419 
12420   ins_encode %{
12421     __ addw(as_Register($dst$$reg),
12422               as_Register($src1$$reg),
12423               as_Register($src2$$reg),
12424               Assembler::ASR,
12425               $src3$$constant & 0x1f);
12426   %}
12427 
12428   ins_pipe(ialu_reg_reg_shift);
12429 %}
12430 
12431 // This pattern is automatically generated from aarch64_ad.m4
12432 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12433 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12434                          iRegL src1, iRegL src2,
12435                          immI src3) %{
12436   match(Set dst (AddL src1 (RShiftL src2 src3)));
12437 
12438   ins_cost(1.9 * INSN_COST);
12439   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12440 
12441   ins_encode %{
12442     __ add(as_Register($dst$$reg),
12443               as_Register($src1$$reg),
12444               as_Register($src2$$reg),
12445               Assembler::ASR,
12446               $src3$$constant & 0x3f);
12447   %}
12448 
12449   ins_pipe(ialu_reg_reg_shift);
12450 %}
12451 
12452 // This pattern is automatically generated from aarch64_ad.m4
12453 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12454 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12455                          iRegIorL2I src1, iRegIorL2I src2,
12456                          immI src3) %{
12457   match(Set dst (AddI src1 (LShiftI src2 src3)));
12458 
12459   ins_cost(1.9 * INSN_COST);
12460   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12461 
12462   ins_encode %{
12463     __ addw(as_Register($dst$$reg),
12464               as_Register($src1$$reg),
12465               as_Register($src2$$reg),
12466               Assembler::LSL,
12467               $src3$$constant & 0x1f);
12468   %}
12469 
12470   ins_pipe(ialu_reg_reg_shift);
12471 %}
12472 
12473 // This pattern is automatically generated from aarch64_ad.m4
12474 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12475 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12476                          iRegL src1, iRegL src2,
12477                          immI src3) %{
12478   match(Set dst (AddL src1 (LShiftL src2 src3)));
12479 
12480   ins_cost(1.9 * INSN_COST);
12481   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12482 
12483   ins_encode %{
12484     __ add(as_Register($dst$$reg),
12485               as_Register($src1$$reg),
12486               as_Register($src2$$reg),
12487               Assembler::LSL,
12488               $src3$$constant & 0x3f);
12489   %}
12490 
12491   ins_pipe(ialu_reg_reg_shift);
12492 %}
12493 
12494 // This pattern is automatically generated from aarch64_ad.m4
12495 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12496 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12497                          iRegIorL2I src1, iRegIorL2I src2,
12498                          immI src3) %{
12499   match(Set dst (SubI src1 (URShiftI src2 src3)));
12500 
12501   ins_cost(1.9 * INSN_COST);
12502   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12503 
12504   ins_encode %{
12505     __ subw(as_Register($dst$$reg),
12506               as_Register($src1$$reg),
12507               as_Register($src2$$reg),
12508               Assembler::LSR,
12509               $src3$$constant & 0x1f);
12510   %}
12511 
12512   ins_pipe(ialu_reg_reg_shift);
12513 %}
12514 
12515 // This pattern is automatically generated from aarch64_ad.m4
12516 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12517 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12518                          iRegL src1, iRegL src2,
12519                          immI src3) %{
12520   match(Set dst (SubL src1 (URShiftL src2 src3)));
12521 
12522   ins_cost(1.9 * INSN_COST);
12523   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12524 
12525   ins_encode %{
12526     __ sub(as_Register($dst$$reg),
12527               as_Register($src1$$reg),
12528               as_Register($src2$$reg),
12529               Assembler::LSR,
12530               $src3$$constant & 0x3f);
12531   %}
12532 
12533   ins_pipe(ialu_reg_reg_shift);
12534 %}
12535 
12536 // This pattern is automatically generated from aarch64_ad.m4
12537 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12538 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12539                          iRegIorL2I src1, iRegIorL2I src2,
12540                          immI src3) %{
12541   match(Set dst (SubI src1 (RShiftI src2 src3)));
12542 
12543   ins_cost(1.9 * INSN_COST);
12544   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12545 
12546   ins_encode %{
12547     __ subw(as_Register($dst$$reg),
12548               as_Register($src1$$reg),
12549               as_Register($src2$$reg),
12550               Assembler::ASR,
12551               $src3$$constant & 0x1f);
12552   %}
12553 
12554   ins_pipe(ialu_reg_reg_shift);
12555 %}
12556 
12557 // This pattern is automatically generated from aarch64_ad.m4
12558 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12559 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12560                          iRegL src1, iRegL src2,
12561                          immI src3) %{
12562   match(Set dst (SubL src1 (RShiftL src2 src3)));
12563 
12564   ins_cost(1.9 * INSN_COST);
12565   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12566 
12567   ins_encode %{
12568     __ sub(as_Register($dst$$reg),
12569               as_Register($src1$$reg),
12570               as_Register($src2$$reg),
12571               Assembler::ASR,
12572               $src3$$constant & 0x3f);
12573   %}
12574 
12575   ins_pipe(ialu_reg_reg_shift);
12576 %}
12577 
12578 // This pattern is automatically generated from aarch64_ad.m4
12579 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12580 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12581                          iRegIorL2I src1, iRegIorL2I src2,
12582                          immI src3) %{
12583   match(Set dst (SubI src1 (LShiftI src2 src3)));
12584 
12585   ins_cost(1.9 * INSN_COST);
12586   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12587 
12588   ins_encode %{
12589     __ subw(as_Register($dst$$reg),
12590               as_Register($src1$$reg),
12591               as_Register($src2$$reg),
12592               Assembler::LSL,
12593               $src3$$constant & 0x1f);
12594   %}
12595 
12596   ins_pipe(ialu_reg_reg_shift);
12597 %}
12598 
12599 // This pattern is automatically generated from aarch64_ad.m4
12600 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12601 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12602                          iRegL src1, iRegL src2,
12603                          immI src3) %{
12604   match(Set dst (SubL src1 (LShiftL src2 src3)));
12605 
12606   ins_cost(1.9 * INSN_COST);
12607   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12608 
12609   ins_encode %{
12610     __ sub(as_Register($dst$$reg),
12611               as_Register($src1$$reg),
12612               as_Register($src2$$reg),
12613               Assembler::LSL,
12614               $src3$$constant & 0x3f);
12615   %}
12616 
12617   ins_pipe(ialu_reg_reg_shift);
12618 %}
12619 
12620 // This pattern is automatically generated from aarch64_ad.m4
12621 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12622 
12623 // Shift Left followed by Shift Right.
12624 // This idiom is used by the compiler for the i2b bytecode etc.
12625 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12626 %{
12627   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12628   ins_cost(INSN_COST * 2);
12629   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12630   ins_encode %{
12631     int lshift = $lshift_count$$constant & 63;
12632     int rshift = $rshift_count$$constant & 63;
12633     int s = 63 - lshift;
12634     int r = (rshift - lshift) & 63;
12635     __ sbfm(as_Register($dst$$reg),
12636             as_Register($src$$reg),
12637             r, s);
12638   %}
12639 
12640   ins_pipe(ialu_reg_shift);
12641 %}
12642 
12643 // This pattern is automatically generated from aarch64_ad.m4
12644 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12645 
12646 // Shift Left followed by Shift Right.
12647 // This idiom is used by the compiler for the i2b bytecode etc.
12648 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12649 %{
12650   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12651   ins_cost(INSN_COST * 2);
12652   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12653   ins_encode %{
12654     int lshift = $lshift_count$$constant & 31;
12655     int rshift = $rshift_count$$constant & 31;
12656     int s = 31 - lshift;
12657     int r = (rshift - lshift) & 31;
12658     __ sbfmw(as_Register($dst$$reg),
12659             as_Register($src$$reg),
12660             r, s);
12661   %}
12662 
12663   ins_pipe(ialu_reg_shift);
12664 %}
12665 
12666 // This pattern is automatically generated from aarch64_ad.m4
12667 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12668 
12669 // Shift Left followed by Shift Right.
12670 // This idiom is used by the compiler for the i2b bytecode etc.
12671 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12672 %{
12673   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12674   ins_cost(INSN_COST * 2);
12675   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12676   ins_encode %{
12677     int lshift = $lshift_count$$constant & 63;
12678     int rshift = $rshift_count$$constant & 63;
12679     int s = 63 - lshift;
12680     int r = (rshift - lshift) & 63;
12681     __ ubfm(as_Register($dst$$reg),
12682             as_Register($src$$reg),
12683             r, s);
12684   %}
12685 
12686   ins_pipe(ialu_reg_shift);
12687 %}
12688 
12689 // This pattern is automatically generated from aarch64_ad.m4
12690 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12691 
12692 // Shift Left followed by Shift Right.
12693 // This idiom is used by the compiler for the i2b bytecode etc.
12694 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12695 %{
12696   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12697   ins_cost(INSN_COST * 2);
12698   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12699   ins_encode %{
12700     int lshift = $lshift_count$$constant & 31;
12701     int rshift = $rshift_count$$constant & 31;
12702     int s = 31 - lshift;
12703     int r = (rshift - lshift) & 31;
12704     __ ubfmw(as_Register($dst$$reg),
12705             as_Register($src$$reg),
12706             r, s);
12707   %}
12708 
12709   ins_pipe(ialu_reg_shift);
12710 %}
12711 
12712 // Bitfield extract with shift & mask
12713 
12714 // This pattern is automatically generated from aarch64_ad.m4
12715 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12716 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12717 %{
12718   match(Set dst (AndI (URShiftI src rshift) mask));
12719   // Make sure we are not going to exceed what ubfxw can do.
12720   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12721 
12722   ins_cost(INSN_COST);
12723   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12724   ins_encode %{
12725     int rshift = $rshift$$constant & 31;
12726     intptr_t mask = $mask$$constant;
12727     int width = exact_log2(mask+1);
12728     __ ubfxw(as_Register($dst$$reg),
12729             as_Register($src$$reg), rshift, width);
12730   %}
12731   ins_pipe(ialu_reg_shift);
12732 %}
12733 
12734 // This pattern is automatically generated from aarch64_ad.m4
12735 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12736 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12737 %{
12738   match(Set dst (AndL (URShiftL src rshift) mask));
12739   // Make sure we are not going to exceed what ubfx can do.
12740   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12741 
12742   ins_cost(INSN_COST);
12743   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12744   ins_encode %{
12745     int rshift = $rshift$$constant & 63;
12746     intptr_t mask = $mask$$constant;
12747     int width = exact_log2_long(mask+1);
12748     __ ubfx(as_Register($dst$$reg),
12749             as_Register($src$$reg), rshift, width);
12750   %}
12751   ins_pipe(ialu_reg_shift);
12752 %}
12753 
12754 
12755 // This pattern is automatically generated from aarch64_ad.m4
12756 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12757 
12758 // We can use ubfx when extending an And with a mask when we know mask
12759 // is positive.  We know that because immI_bitmask guarantees it.
12760 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12761 %{
12762   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12763   // Make sure we are not going to exceed what ubfxw can do.
12764   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12765 
12766   ins_cost(INSN_COST * 2);
12767   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12768   ins_encode %{
12769     int rshift = $rshift$$constant & 31;
12770     intptr_t mask = $mask$$constant;
12771     int width = exact_log2(mask+1);
12772     __ ubfx(as_Register($dst$$reg),
12773             as_Register($src$$reg), rshift, width);
12774   %}
12775   ins_pipe(ialu_reg_shift);
12776 %}
12777 
12778 
12779 // This pattern is automatically generated from aarch64_ad.m4
12780 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12781 
12782 // We can use ubfiz when masking by a positive number and then left shifting the result.
12783 // We know that the mask is positive because immI_bitmask guarantees it.
12784 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12785 %{
12786   match(Set dst (LShiftI (AndI src mask) lshift));
12787   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12788 
12789   ins_cost(INSN_COST);
12790   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12791   ins_encode %{
12792     int lshift = $lshift$$constant & 31;
12793     intptr_t mask = $mask$$constant;
12794     int width = exact_log2(mask+1);
12795     __ ubfizw(as_Register($dst$$reg),
12796           as_Register($src$$reg), lshift, width);
12797   %}
12798   ins_pipe(ialu_reg_shift);
12799 %}
12800 
12801 // This pattern is automatically generated from aarch64_ad.m4
12802 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12803 
12804 // We can use ubfiz when masking by a positive number and then left shifting the result.
12805 // We know that the mask is positive because immL_bitmask guarantees it.
12806 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12807 %{
12808   match(Set dst (LShiftL (AndL src mask) lshift));
12809   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12810 
12811   ins_cost(INSN_COST);
12812   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12813   ins_encode %{
12814     int lshift = $lshift$$constant & 63;
12815     intptr_t mask = $mask$$constant;
12816     int width = exact_log2_long(mask+1);
12817     __ ubfiz(as_Register($dst$$reg),
12818           as_Register($src$$reg), lshift, width);
12819   %}
12820   ins_pipe(ialu_reg_shift);
12821 %}
12822 
12823 // This pattern is automatically generated from aarch64_ad.m4
12824 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12825 
12826 // We can use ubfiz when masking by a positive number and then left shifting the result.
12827 // We know that the mask is positive because immI_bitmask guarantees it.
12828 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12829 %{
12830   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12831   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12832 
12833   ins_cost(INSN_COST);
12834   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12835   ins_encode %{
12836     int lshift = $lshift$$constant & 31;
12837     intptr_t mask = $mask$$constant;
12838     int width = exact_log2(mask+1);
12839     __ ubfizw(as_Register($dst$$reg),
12840           as_Register($src$$reg), lshift, width);
12841   %}
12842   ins_pipe(ialu_reg_shift);
12843 %}
12844 
12845 // This pattern is automatically generated from aarch64_ad.m4
12846 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12847 
12848 // We can use ubfiz when masking by a positive number and then left shifting the result.
12849 // We know that the mask is positive because immL_bitmask guarantees it.
12850 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12851 %{
12852   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12853   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12854 
12855   ins_cost(INSN_COST);
12856   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12857   ins_encode %{
12858     int lshift = $lshift$$constant & 63;
12859     intptr_t mask = $mask$$constant;
12860     int width = exact_log2_long(mask+1);
12861     __ ubfiz(as_Register($dst$$reg),
12862           as_Register($src$$reg), lshift, width);
12863   %}
12864   ins_pipe(ialu_reg_shift);
12865 %}
12866 
12867 
12868 // This pattern is automatically generated from aarch64_ad.m4
12869 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12870 
12871 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12872 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12873 %{
12874   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12875   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12876 
12877   ins_cost(INSN_COST);
12878   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12879   ins_encode %{
12880     int lshift = $lshift$$constant & 63;
12881     intptr_t mask = $mask$$constant;
12882     int width = exact_log2(mask+1);
12883     __ ubfiz(as_Register($dst$$reg),
12884              as_Register($src$$reg), lshift, width);
12885   %}
12886   ins_pipe(ialu_reg_shift);
12887 %}
12888 
12889 // This pattern is automatically generated from aarch64_ad.m4
12890 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12891 
12892 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12893 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12894 %{
12895   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12896   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12897 
12898   ins_cost(INSN_COST);
12899   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12900   ins_encode %{
12901     int lshift = $lshift$$constant & 31;
12902     intptr_t mask = $mask$$constant;
12903     int width = exact_log2(mask+1);
12904     __ ubfiz(as_Register($dst$$reg),
12905              as_Register($src$$reg), lshift, width);
12906   %}
12907   ins_pipe(ialu_reg_shift);
12908 %}
12909 
12910 // This pattern is automatically generated from aarch64_ad.m4
12911 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12912 
12913 // Can skip int2long conversions after AND with small bitmask
12914 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12915 %{
12916   match(Set dst (ConvI2L (AndI src msk)));
12917   ins_cost(INSN_COST);
12918   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12919   ins_encode %{
12920     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12921   %}
12922   ins_pipe(ialu_reg_shift);
12923 %}
12924 
12925 
12926 // Rotations
12927 
12928 // This pattern is automatically generated from aarch64_ad.m4
12929 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12930 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12931 %{
12932   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12933   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12934 
12935   ins_cost(INSN_COST);
12936   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12937 
12938   ins_encode %{
12939     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12940             $rshift$$constant & 63);
12941   %}
12942   ins_pipe(ialu_reg_reg_extr);
12943 %}
12944 
12945 
12946 // This pattern is automatically generated from aarch64_ad.m4
12947 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12948 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12949 %{
12950   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12951   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12952 
12953   ins_cost(INSN_COST);
12954   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12955 
12956   ins_encode %{
12957     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12958             $rshift$$constant & 31);
12959   %}
12960   ins_pipe(ialu_reg_reg_extr);
12961 %}
12962 
12963 
12964 // This pattern is automatically generated from aarch64_ad.m4
12965 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12966 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12967 %{
12968   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12969   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12970 
12971   ins_cost(INSN_COST);
12972   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12973 
12974   ins_encode %{
12975     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12976             $rshift$$constant & 63);
12977   %}
12978   ins_pipe(ialu_reg_reg_extr);
12979 %}
12980 
12981 
12982 // This pattern is automatically generated from aarch64_ad.m4
12983 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12984 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12985 %{
12986   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12987   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12988 
12989   ins_cost(INSN_COST);
12990   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12991 
12992   ins_encode %{
12993     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12994             $rshift$$constant & 31);
12995   %}
12996   ins_pipe(ialu_reg_reg_extr);
12997 %}
12998 
12999 // This pattern is automatically generated from aarch64_ad.m4
13000 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13001 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
13002 %{
13003   match(Set dst (RotateRight src shift));
13004 
13005   ins_cost(INSN_COST);
13006   format %{ "ror    $dst, $src, $shift" %}
13007 
13008   ins_encode %{
13009      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
13010                $shift$$constant & 0x1f);
13011   %}
13012   ins_pipe(ialu_reg_reg_vshift);
13013 %}
13014 
13015 // This pattern is automatically generated from aarch64_ad.m4
13016 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13017 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
13018 %{
13019   match(Set dst (RotateRight src shift));
13020 
13021   ins_cost(INSN_COST);
13022   format %{ "ror    $dst, $src, $shift" %}
13023 
13024   ins_encode %{
13025      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
13026                $shift$$constant & 0x3f);
13027   %}
13028   ins_pipe(ialu_reg_reg_vshift);
13029 %}
13030 
13031 // This pattern is automatically generated from aarch64_ad.m4
13032 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13033 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
13034 %{
13035   match(Set dst (RotateRight src shift));
13036 
13037   ins_cost(INSN_COST);
13038   format %{ "ror    $dst, $src, $shift" %}
13039 
13040   ins_encode %{
13041      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
13042   %}
13043   ins_pipe(ialu_reg_reg_vshift);
13044 %}
13045 
13046 // This pattern is automatically generated from aarch64_ad.m4
13047 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13048 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
13049 %{
13050   match(Set dst (RotateRight src shift));
13051 
13052   ins_cost(INSN_COST);
13053   format %{ "ror    $dst, $src, $shift" %}
13054 
13055   ins_encode %{
13056      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
13057   %}
13058   ins_pipe(ialu_reg_reg_vshift);
13059 %}
13060 
13061 // This pattern is automatically generated from aarch64_ad.m4
13062 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13063 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
13064 %{
13065   match(Set dst (RotateLeft src shift));
13066 
13067   ins_cost(INSN_COST);
13068   format %{ "rol    $dst, $src, $shift" %}
13069 
13070   ins_encode %{
13071      __ subw(rscratch1, zr, as_Register($shift$$reg));
13072      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
13073   %}
13074   ins_pipe(ialu_reg_reg_vshift);
13075 %}
13076 
13077 // This pattern is automatically generated from aarch64_ad.m4
13078 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13079 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
13080 %{
13081   match(Set dst (RotateLeft src shift));
13082 
13083   ins_cost(INSN_COST);
13084   format %{ "rol    $dst, $src, $shift" %}
13085 
13086   ins_encode %{
13087      __ subw(rscratch1, zr, as_Register($shift$$reg));
13088      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
13089   %}
13090   ins_pipe(ialu_reg_reg_vshift);
13091 %}
13092 
13093 
13094 // Add/subtract (extended)
13095 
13096 // This pattern is automatically generated from aarch64_ad.m4
13097 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13098 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13099 %{
13100   match(Set dst (AddL src1 (ConvI2L src2)));
13101   ins_cost(INSN_COST);
13102   format %{ "add  $dst, $src1, $src2, sxtw" %}
13103 
13104    ins_encode %{
13105      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13106             as_Register($src2$$reg), ext::sxtw);
13107    %}
13108   ins_pipe(ialu_reg_reg);
13109 %}
13110 
13111 // This pattern is automatically generated from aarch64_ad.m4
13112 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13113 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13114 %{
13115   match(Set dst (SubL src1 (ConvI2L src2)));
13116   ins_cost(INSN_COST);
13117   format %{ "sub  $dst, $src1, $src2, sxtw" %}
13118 
13119    ins_encode %{
13120      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13121             as_Register($src2$$reg), ext::sxtw);
13122    %}
13123   ins_pipe(ialu_reg_reg);
13124 %}
13125 
13126 // This pattern is automatically generated from aarch64_ad.m4
13127 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13128 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
13129 %{
13130   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13131   ins_cost(INSN_COST);
13132   format %{ "add  $dst, $src1, $src2, sxth" %}
13133 
13134    ins_encode %{
13135      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13136             as_Register($src2$$reg), ext::sxth);
13137    %}
13138   ins_pipe(ialu_reg_reg);
13139 %}
13140 
13141 // This pattern is automatically generated from aarch64_ad.m4
13142 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13143 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13144 %{
13145   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13146   ins_cost(INSN_COST);
13147   format %{ "add  $dst, $src1, $src2, sxtb" %}
13148 
13149    ins_encode %{
13150      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13151             as_Register($src2$$reg), ext::sxtb);
13152    %}
13153   ins_pipe(ialu_reg_reg);
13154 %}
13155 
13156 // This pattern is automatically generated from aarch64_ad.m4
13157 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13158 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13159 %{
13160   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
13161   ins_cost(INSN_COST);
13162   format %{ "add  $dst, $src1, $src2, uxtb" %}
13163 
13164    ins_encode %{
13165      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13166             as_Register($src2$$reg), ext::uxtb);
13167    %}
13168   ins_pipe(ialu_reg_reg);
13169 %}
13170 
13171 // This pattern is automatically generated from aarch64_ad.m4
13172 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13173 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
13174 %{
13175   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13176   ins_cost(INSN_COST);
13177   format %{ "add  $dst, $src1, $src2, sxth" %}
13178 
13179    ins_encode %{
13180      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13181             as_Register($src2$$reg), ext::sxth);
13182    %}
13183   ins_pipe(ialu_reg_reg);
13184 %}
13185 
13186 // This pattern is automatically generated from aarch64_ad.m4
13187 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13188 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
13189 %{
13190   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13191   ins_cost(INSN_COST);
13192   format %{ "add  $dst, $src1, $src2, sxtw" %}
13193 
13194    ins_encode %{
13195      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13196             as_Register($src2$$reg), ext::sxtw);
13197    %}
13198   ins_pipe(ialu_reg_reg);
13199 %}
13200 
13201 // This pattern is automatically generated from aarch64_ad.m4
13202 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13203 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13204 %{
13205   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13206   ins_cost(INSN_COST);
13207   format %{ "add  $dst, $src1, $src2, sxtb" %}
13208 
13209    ins_encode %{
13210      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13211             as_Register($src2$$reg), ext::sxtb);
13212    %}
13213   ins_pipe(ialu_reg_reg);
13214 %}
13215 
13216 // This pattern is automatically generated from aarch64_ad.m4
13217 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13218 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13219 %{
13220   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13221   ins_cost(INSN_COST);
13222   format %{ "add  $dst, $src1, $src2, uxtb" %}
13223 
13224    ins_encode %{
13225      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13226             as_Register($src2$$reg), ext::uxtb);
13227    %}
13228   ins_pipe(ialu_reg_reg);
13229 %}
13230 
13231 // This pattern is automatically generated from aarch64_ad.m4
13232 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13233 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13234 %{
13235   match(Set dst (AddI src1 (AndI src2 mask)));
13236   ins_cost(INSN_COST);
13237   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13238 
13239    ins_encode %{
13240      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13241             as_Register($src2$$reg), ext::uxtb);
13242    %}
13243   ins_pipe(ialu_reg_reg);
13244 %}
13245 
13246 // This pattern is automatically generated from aarch64_ad.m4
13247 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13248 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13249 %{
13250   match(Set dst (AddI src1 (AndI src2 mask)));
13251   ins_cost(INSN_COST);
13252   format %{ "addw  $dst, $src1, $src2, uxth" %}
13253 
13254    ins_encode %{
13255      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13256             as_Register($src2$$reg), ext::uxth);
13257    %}
13258   ins_pipe(ialu_reg_reg);
13259 %}
13260 
13261 // This pattern is automatically generated from aarch64_ad.m4
13262 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13263 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13264 %{
13265   match(Set dst (AddL src1 (AndL src2 mask)));
13266   ins_cost(INSN_COST);
13267   format %{ "add  $dst, $src1, $src2, uxtb" %}
13268 
13269    ins_encode %{
13270      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13271             as_Register($src2$$reg), ext::uxtb);
13272    %}
13273   ins_pipe(ialu_reg_reg);
13274 %}
13275 
13276 // This pattern is automatically generated from aarch64_ad.m4
13277 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13278 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13279 %{
13280   match(Set dst (AddL src1 (AndL src2 mask)));
13281   ins_cost(INSN_COST);
13282   format %{ "add  $dst, $src1, $src2, uxth" %}
13283 
13284    ins_encode %{
13285      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13286             as_Register($src2$$reg), ext::uxth);
13287    %}
13288   ins_pipe(ialu_reg_reg);
13289 %}
13290 
13291 // This pattern is automatically generated from aarch64_ad.m4
13292 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13293 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13294 %{
13295   match(Set dst (AddL src1 (AndL src2 mask)));
13296   ins_cost(INSN_COST);
13297   format %{ "add  $dst, $src1, $src2, uxtw" %}
13298 
13299    ins_encode %{
13300      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13301             as_Register($src2$$reg), ext::uxtw);
13302    %}
13303   ins_pipe(ialu_reg_reg);
13304 %}
13305 
13306 // This pattern is automatically generated from aarch64_ad.m4
13307 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13308 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13309 %{
13310   match(Set dst (SubI src1 (AndI src2 mask)));
13311   ins_cost(INSN_COST);
13312   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13313 
13314    ins_encode %{
13315      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13316             as_Register($src2$$reg), ext::uxtb);
13317    %}
13318   ins_pipe(ialu_reg_reg);
13319 %}
13320 
13321 // This pattern is automatically generated from aarch64_ad.m4
13322 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13323 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13324 %{
13325   match(Set dst (SubI src1 (AndI src2 mask)));
13326   ins_cost(INSN_COST);
13327   format %{ "subw  $dst, $src1, $src2, uxth" %}
13328 
13329    ins_encode %{
13330      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13331             as_Register($src2$$reg), ext::uxth);
13332    %}
13333   ins_pipe(ialu_reg_reg);
13334 %}
13335 
13336 // This pattern is automatically generated from aarch64_ad.m4
13337 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13338 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13339 %{
13340   match(Set dst (SubL src1 (AndL src2 mask)));
13341   ins_cost(INSN_COST);
13342   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13343 
13344    ins_encode %{
13345      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13346             as_Register($src2$$reg), ext::uxtb);
13347    %}
13348   ins_pipe(ialu_reg_reg);
13349 %}
13350 
13351 // This pattern is automatically generated from aarch64_ad.m4
13352 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13353 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13354 %{
13355   match(Set dst (SubL src1 (AndL src2 mask)));
13356   ins_cost(INSN_COST);
13357   format %{ "sub  $dst, $src1, $src2, uxth" %}
13358 
13359    ins_encode %{
13360      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13361             as_Register($src2$$reg), ext::uxth);
13362    %}
13363   ins_pipe(ialu_reg_reg);
13364 %}
13365 
13366 // This pattern is automatically generated from aarch64_ad.m4
13367 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13368 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13369 %{
13370   match(Set dst (SubL src1 (AndL src2 mask)));
13371   ins_cost(INSN_COST);
13372   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13373 
13374    ins_encode %{
13375      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13376             as_Register($src2$$reg), ext::uxtw);
13377    %}
13378   ins_pipe(ialu_reg_reg);
13379 %}
13380 
13381 
13382 // This pattern is automatically generated from aarch64_ad.m4
13383 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13384 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13385 %{
13386   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13387   ins_cost(1.9 * INSN_COST);
13388   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13389 
13390    ins_encode %{
13391      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13392             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13393    %}
13394   ins_pipe(ialu_reg_reg_shift);
13395 %}
13396 
13397 // This pattern is automatically generated from aarch64_ad.m4
13398 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13399 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13400 %{
13401   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13402   ins_cost(1.9 * INSN_COST);
13403   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13404 
13405    ins_encode %{
13406      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13407             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13408    %}
13409   ins_pipe(ialu_reg_reg_shift);
13410 %}
13411 
13412 // This pattern is automatically generated from aarch64_ad.m4
13413 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13414 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13415 %{
13416   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13417   ins_cost(1.9 * INSN_COST);
13418   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13419 
13420    ins_encode %{
13421      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13422             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13423    %}
13424   ins_pipe(ialu_reg_reg_shift);
13425 %}
13426 
13427 // This pattern is automatically generated from aarch64_ad.m4
13428 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13429 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13430 %{
13431   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13432   ins_cost(1.9 * INSN_COST);
13433   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13434 
13435    ins_encode %{
13436      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13437             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13438    %}
13439   ins_pipe(ialu_reg_reg_shift);
13440 %}
13441 
13442 // This pattern is automatically generated from aarch64_ad.m4
13443 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13444 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13445 %{
13446   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13447   ins_cost(1.9 * INSN_COST);
13448   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13449 
13450    ins_encode %{
13451      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13452             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13453    %}
13454   ins_pipe(ialu_reg_reg_shift);
13455 %}
13456 
13457 // This pattern is automatically generated from aarch64_ad.m4
13458 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13459 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13460 %{
13461   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13462   ins_cost(1.9 * INSN_COST);
13463   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13464 
13465    ins_encode %{
13466      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13467             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13468    %}
13469   ins_pipe(ialu_reg_reg_shift);
13470 %}
13471 
13472 // This pattern is automatically generated from aarch64_ad.m4
13473 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13474 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13475 %{
13476   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13477   ins_cost(1.9 * INSN_COST);
13478   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13479 
13480    ins_encode %{
13481      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13482             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13483    %}
13484   ins_pipe(ialu_reg_reg_shift);
13485 %}
13486 
13487 // This pattern is automatically generated from aarch64_ad.m4
13488 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13489 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13490 %{
13491   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13492   ins_cost(1.9 * INSN_COST);
13493   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13494 
13495    ins_encode %{
13496      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13497             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13498    %}
13499   ins_pipe(ialu_reg_reg_shift);
13500 %}
13501 
13502 // This pattern is automatically generated from aarch64_ad.m4
13503 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13504 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13505 %{
13506   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13507   ins_cost(1.9 * INSN_COST);
13508   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13509 
13510    ins_encode %{
13511      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13512             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13513    %}
13514   ins_pipe(ialu_reg_reg_shift);
13515 %}
13516 
13517 // This pattern is automatically generated from aarch64_ad.m4
13518 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13519 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13520 %{
13521   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13522   ins_cost(1.9 * INSN_COST);
13523   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13524 
13525    ins_encode %{
13526      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13527             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13528    %}
13529   ins_pipe(ialu_reg_reg_shift);
13530 %}
13531 
13532 // This pattern is automatically generated from aarch64_ad.m4
13533 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13534 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13535 %{
13536   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13537   ins_cost(1.9 * INSN_COST);
13538   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13539 
13540    ins_encode %{
13541      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13542             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13543    %}
13544   ins_pipe(ialu_reg_reg_shift);
13545 %}
13546 
13547 // This pattern is automatically generated from aarch64_ad.m4
13548 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13549 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13550 %{
13551   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13552   ins_cost(1.9 * INSN_COST);
13553   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13554 
13555    ins_encode %{
13556      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13557             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13558    %}
13559   ins_pipe(ialu_reg_reg_shift);
13560 %}
13561 
13562 // This pattern is automatically generated from aarch64_ad.m4
13563 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13564 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13565 %{
13566   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13567   ins_cost(1.9 * INSN_COST);
13568   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13569 
13570    ins_encode %{
13571      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13572             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13573    %}
13574   ins_pipe(ialu_reg_reg_shift);
13575 %}
13576 
13577 // This pattern is automatically generated from aarch64_ad.m4
13578 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13579 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13580 %{
13581   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13582   ins_cost(1.9 * INSN_COST);
13583   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13584 
13585    ins_encode %{
13586      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13587             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13588    %}
13589   ins_pipe(ialu_reg_reg_shift);
13590 %}
13591 
13592 // This pattern is automatically generated from aarch64_ad.m4
13593 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13594 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13595 %{
13596   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13597   ins_cost(1.9 * INSN_COST);
13598   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13599 
13600    ins_encode %{
13601      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13602             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13603    %}
13604   ins_pipe(ialu_reg_reg_shift);
13605 %}
13606 
13607 // This pattern is automatically generated from aarch64_ad.m4
13608 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13609 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13610 %{
13611   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13612   ins_cost(1.9 * INSN_COST);
13613   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13614 
13615    ins_encode %{
13616      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13617             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13618    %}
13619   ins_pipe(ialu_reg_reg_shift);
13620 %}
13621 
13622 // This pattern is automatically generated from aarch64_ad.m4
13623 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13624 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13625 %{
13626   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13627   ins_cost(1.9 * INSN_COST);
13628   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13629 
13630    ins_encode %{
13631      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13632             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13633    %}
13634   ins_pipe(ialu_reg_reg_shift);
13635 %}
13636 
13637 // This pattern is automatically generated from aarch64_ad.m4
13638 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13639 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13640 %{
13641   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13642   ins_cost(1.9 * INSN_COST);
13643   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13644 
13645    ins_encode %{
13646      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13647             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13648    %}
13649   ins_pipe(ialu_reg_reg_shift);
13650 %}
13651 
13652 // This pattern is automatically generated from aarch64_ad.m4
13653 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13654 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13655 %{
13656   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13657   ins_cost(1.9 * INSN_COST);
13658   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13659 
13660    ins_encode %{
13661      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13662             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13663    %}
13664   ins_pipe(ialu_reg_reg_shift);
13665 %}
13666 
13667 // This pattern is automatically generated from aarch64_ad.m4
13668 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13669 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13670 %{
13671   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13672   ins_cost(1.9 * INSN_COST);
13673   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13674 
13675    ins_encode %{
13676      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13677             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13678    %}
13679   ins_pipe(ialu_reg_reg_shift);
13680 %}
13681 
13682 // This pattern is automatically generated from aarch64_ad.m4
13683 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13684 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13685 %{
13686   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13687   ins_cost(1.9 * INSN_COST);
13688   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13689 
13690    ins_encode %{
13691      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13692             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13693    %}
13694   ins_pipe(ialu_reg_reg_shift);
13695 %}
13696 
13697 // This pattern is automatically generated from aarch64_ad.m4
13698 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13699 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13700 %{
13701   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13702   ins_cost(1.9 * INSN_COST);
13703   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13704 
13705    ins_encode %{
13706      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13707             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13708    %}
13709   ins_pipe(ialu_reg_reg_shift);
13710 %}
13711 
13712 // This pattern is automatically generated from aarch64_ad.m4
13713 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13714 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13715 %{
13716   effect(DEF dst, USE src1, USE src2, USE cr);
13717   ins_cost(INSN_COST * 2);
13718   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13719 
13720   ins_encode %{
13721     __ cselw($dst$$Register,
13722              $src1$$Register,
13723              $src2$$Register,
13724              Assembler::LT);
13725   %}
13726   ins_pipe(icond_reg_reg);
13727 %}
13728 
13729 // This pattern is automatically generated from aarch64_ad.m4
13730 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13731 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13732 %{
13733   effect(DEF dst, USE src1, USE src2, USE cr);
13734   ins_cost(INSN_COST * 2);
13735   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13736 
13737   ins_encode %{
13738     __ cselw($dst$$Register,
13739              $src1$$Register,
13740              $src2$$Register,
13741              Assembler::GT);
13742   %}
13743   ins_pipe(icond_reg_reg);
13744 %}
13745 
13746 // This pattern is automatically generated from aarch64_ad.m4
13747 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13748 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13749 %{
13750   effect(DEF dst, USE src1, USE cr);
13751   ins_cost(INSN_COST * 2);
13752   format %{ "cselw $dst, $src1, zr lt\t"  %}
13753 
13754   ins_encode %{
13755     __ cselw($dst$$Register,
13756              $src1$$Register,
13757              zr,
13758              Assembler::LT);
13759   %}
13760   ins_pipe(icond_reg);
13761 %}
13762 
13763 // This pattern is automatically generated from aarch64_ad.m4
13764 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13765 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13766 %{
13767   effect(DEF dst, USE src1, USE cr);
13768   ins_cost(INSN_COST * 2);
13769   format %{ "cselw $dst, $src1, zr gt\t"  %}
13770 
13771   ins_encode %{
13772     __ cselw($dst$$Register,
13773              $src1$$Register,
13774              zr,
13775              Assembler::GT);
13776   %}
13777   ins_pipe(icond_reg);
13778 %}
13779 
13780 // This pattern is automatically generated from aarch64_ad.m4
13781 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13782 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13783 %{
13784   effect(DEF dst, USE src1, USE cr);
13785   ins_cost(INSN_COST * 2);
13786   format %{ "csincw $dst, $src1, zr le\t"  %}
13787 
13788   ins_encode %{
13789     __ csincw($dst$$Register,
13790              $src1$$Register,
13791              zr,
13792              Assembler::LE);
13793   %}
13794   ins_pipe(icond_reg);
13795 %}
13796 
13797 // This pattern is automatically generated from aarch64_ad.m4
13798 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13799 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13800 %{
13801   effect(DEF dst, USE src1, USE cr);
13802   ins_cost(INSN_COST * 2);
13803   format %{ "csincw $dst, $src1, zr gt\t"  %}
13804 
13805   ins_encode %{
13806     __ csincw($dst$$Register,
13807              $src1$$Register,
13808              zr,
13809              Assembler::GT);
13810   %}
13811   ins_pipe(icond_reg);
13812 %}
13813 
13814 // This pattern is automatically generated from aarch64_ad.m4
13815 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13816 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13817 %{
13818   effect(DEF dst, USE src1, USE cr);
13819   ins_cost(INSN_COST * 2);
13820   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13821 
13822   ins_encode %{
13823     __ csinvw($dst$$Register,
13824              $src1$$Register,
13825              zr,
13826              Assembler::LT);
13827   %}
13828   ins_pipe(icond_reg);
13829 %}
13830 
13831 // This pattern is automatically generated from aarch64_ad.m4
13832 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13833 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13834 %{
13835   effect(DEF dst, USE src1, USE cr);
13836   ins_cost(INSN_COST * 2);
13837   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13838 
13839   ins_encode %{
13840     __ csinvw($dst$$Register,
13841              $src1$$Register,
13842              zr,
13843              Assembler::GE);
13844   %}
13845   ins_pipe(icond_reg);
13846 %}
13847 
13848 // This pattern is automatically generated from aarch64_ad.m4
13849 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13850 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13851 %{
13852   match(Set dst (MinI src imm));
13853   ins_cost(INSN_COST * 3);
13854   expand %{
13855     rFlagsReg cr;
13856     compI_reg_imm0(cr, src);
13857     cmovI_reg_imm0_lt(dst, src, cr);
13858   %}
13859 %}
13860 
13861 // This pattern is automatically generated from aarch64_ad.m4
13862 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13863 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13864 %{
13865   match(Set dst (MinI imm src));
13866   ins_cost(INSN_COST * 3);
13867   expand %{
13868     rFlagsReg cr;
13869     compI_reg_imm0(cr, src);
13870     cmovI_reg_imm0_lt(dst, src, cr);
13871   %}
13872 %}
13873 
13874 // This pattern is automatically generated from aarch64_ad.m4
13875 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13876 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13877 %{
13878   match(Set dst (MinI src imm));
13879   ins_cost(INSN_COST * 3);
13880   expand %{
13881     rFlagsReg cr;
13882     compI_reg_imm0(cr, src);
13883     cmovI_reg_imm1_le(dst, src, cr);
13884   %}
13885 %}
13886 
13887 // This pattern is automatically generated from aarch64_ad.m4
13888 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13889 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13890 %{
13891   match(Set dst (MinI imm src));
13892   ins_cost(INSN_COST * 3);
13893   expand %{
13894     rFlagsReg cr;
13895     compI_reg_imm0(cr, src);
13896     cmovI_reg_imm1_le(dst, src, cr);
13897   %}
13898 %}
13899 
13900 // This pattern is automatically generated from aarch64_ad.m4
13901 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13902 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13903 %{
13904   match(Set dst (MinI src imm));
13905   ins_cost(INSN_COST * 3);
13906   expand %{
13907     rFlagsReg cr;
13908     compI_reg_imm0(cr, src);
13909     cmovI_reg_immM1_lt(dst, src, cr);
13910   %}
13911 %}
13912 
13913 // This pattern is automatically generated from aarch64_ad.m4
13914 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13915 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13916 %{
13917   match(Set dst (MinI imm src));
13918   ins_cost(INSN_COST * 3);
13919   expand %{
13920     rFlagsReg cr;
13921     compI_reg_imm0(cr, src);
13922     cmovI_reg_immM1_lt(dst, src, cr);
13923   %}
13924 %}
13925 
13926 // This pattern is automatically generated from aarch64_ad.m4
13927 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13928 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13929 %{
13930   match(Set dst (MaxI src imm));
13931   ins_cost(INSN_COST * 3);
13932   expand %{
13933     rFlagsReg cr;
13934     compI_reg_imm0(cr, src);
13935     cmovI_reg_imm0_gt(dst, src, cr);
13936   %}
13937 %}
13938 
13939 // This pattern is automatically generated from aarch64_ad.m4
13940 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13941 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13942 %{
13943   match(Set dst (MaxI imm src));
13944   ins_cost(INSN_COST * 3);
13945   expand %{
13946     rFlagsReg cr;
13947     compI_reg_imm0(cr, src);
13948     cmovI_reg_imm0_gt(dst, src, cr);
13949   %}
13950 %}
13951 
13952 // This pattern is automatically generated from aarch64_ad.m4
13953 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13954 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13955 %{
13956   match(Set dst (MaxI src imm));
13957   ins_cost(INSN_COST * 3);
13958   expand %{
13959     rFlagsReg cr;
13960     compI_reg_imm0(cr, src);
13961     cmovI_reg_imm1_gt(dst, src, cr);
13962   %}
13963 %}
13964 
13965 // This pattern is automatically generated from aarch64_ad.m4
13966 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13967 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13968 %{
13969   match(Set dst (MaxI imm src));
13970   ins_cost(INSN_COST * 3);
13971   expand %{
13972     rFlagsReg cr;
13973     compI_reg_imm0(cr, src);
13974     cmovI_reg_imm1_gt(dst, src, cr);
13975   %}
13976 %}
13977 
13978 // This pattern is automatically generated from aarch64_ad.m4
13979 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13980 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13981 %{
13982   match(Set dst (MaxI src imm));
13983   ins_cost(INSN_COST * 3);
13984   expand %{
13985     rFlagsReg cr;
13986     compI_reg_imm0(cr, src);
13987     cmovI_reg_immM1_ge(dst, src, cr);
13988   %}
13989 %}
13990 
13991 // This pattern is automatically generated from aarch64_ad.m4
13992 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13993 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13994 %{
13995   match(Set dst (MaxI imm src));
13996   ins_cost(INSN_COST * 3);
13997   expand %{
13998     rFlagsReg cr;
13999     compI_reg_imm0(cr, src);
14000     cmovI_reg_immM1_ge(dst, src, cr);
14001   %}
14002 %}
14003 
14004 // This pattern is automatically generated from aarch64_ad.m4
14005 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14006 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
14007 %{
14008   match(Set dst (ReverseI src));
14009   ins_cost(INSN_COST);
14010   format %{ "rbitw  $dst, $src" %}
14011   ins_encode %{
14012     __ rbitw($dst$$Register, $src$$Register);
14013   %}
14014   ins_pipe(ialu_reg);
14015 %}
14016 
14017 // This pattern is automatically generated from aarch64_ad.m4
14018 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14019 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
14020 %{
14021   match(Set dst (ReverseL src));
14022   ins_cost(INSN_COST);
14023   format %{ "rbit  $dst, $src" %}
14024   ins_encode %{
14025     __ rbit($dst$$Register, $src$$Register);
14026   %}
14027   ins_pipe(ialu_reg);
14028 %}
14029 
14030 
14031 // END This section of the file is automatically generated. Do not edit --------------
14032 
14033 
14034 // ============================================================================
14035 // Floating Point Arithmetic Instructions
14036 
14037 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14038   match(Set dst (AddF src1 src2));
14039 
14040   ins_cost(INSN_COST * 5);
14041   format %{ "fadds   $dst, $src1, $src2" %}
14042 
14043   ins_encode %{
14044     __ fadds(as_FloatRegister($dst$$reg),
14045              as_FloatRegister($src1$$reg),
14046              as_FloatRegister($src2$$reg));
14047   %}
14048 
14049   ins_pipe(fp_dop_reg_reg_s);
14050 %}
14051 
14052 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14053   match(Set dst (AddD src1 src2));
14054 
14055   ins_cost(INSN_COST * 5);
14056   format %{ "faddd   $dst, $src1, $src2" %}
14057 
14058   ins_encode %{
14059     __ faddd(as_FloatRegister($dst$$reg),
14060              as_FloatRegister($src1$$reg),
14061              as_FloatRegister($src2$$reg));
14062   %}
14063 
14064   ins_pipe(fp_dop_reg_reg_d);
14065 %}
14066 
14067 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14068   match(Set dst (SubF src1 src2));
14069 
14070   ins_cost(INSN_COST * 5);
14071   format %{ "fsubs   $dst, $src1, $src2" %}
14072 
14073   ins_encode %{
14074     __ fsubs(as_FloatRegister($dst$$reg),
14075              as_FloatRegister($src1$$reg),
14076              as_FloatRegister($src2$$reg));
14077   %}
14078 
14079   ins_pipe(fp_dop_reg_reg_s);
14080 %}
14081 
14082 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14083   match(Set dst (SubD src1 src2));
14084 
14085   ins_cost(INSN_COST * 5);
14086   format %{ "fsubd   $dst, $src1, $src2" %}
14087 
14088   ins_encode %{
14089     __ fsubd(as_FloatRegister($dst$$reg),
14090              as_FloatRegister($src1$$reg),
14091              as_FloatRegister($src2$$reg));
14092   %}
14093 
14094   ins_pipe(fp_dop_reg_reg_d);
14095 %}
14096 
14097 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14098   match(Set dst (MulF src1 src2));
14099 
14100   ins_cost(INSN_COST * 6);
14101   format %{ "fmuls   $dst, $src1, $src2" %}
14102 
14103   ins_encode %{
14104     __ fmuls(as_FloatRegister($dst$$reg),
14105              as_FloatRegister($src1$$reg),
14106              as_FloatRegister($src2$$reg));
14107   %}
14108 
14109   ins_pipe(fp_dop_reg_reg_s);
14110 %}
14111 
14112 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14113   match(Set dst (MulD src1 src2));
14114 
14115   ins_cost(INSN_COST * 6);
14116   format %{ "fmuld   $dst, $src1, $src2" %}
14117 
14118   ins_encode %{
14119     __ fmuld(as_FloatRegister($dst$$reg),
14120              as_FloatRegister($src1$$reg),
14121              as_FloatRegister($src2$$reg));
14122   %}
14123 
14124   ins_pipe(fp_dop_reg_reg_d);
14125 %}
14126 
14127 // src1 * src2 + src3
14128 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14129   predicate(UseFMA);
14130   match(Set dst (FmaF src3 (Binary src1 src2)));
14131 
14132   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
14133 
14134   ins_encode %{
14135     __ fmadds(as_FloatRegister($dst$$reg),
14136              as_FloatRegister($src1$$reg),
14137              as_FloatRegister($src2$$reg),
14138              as_FloatRegister($src3$$reg));
14139   %}
14140 
14141   ins_pipe(pipe_class_default);
14142 %}
14143 
14144 // src1 * src2 + src3
14145 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14146   predicate(UseFMA);
14147   match(Set dst (FmaD src3 (Binary src1 src2)));
14148 
14149   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
14150 
14151   ins_encode %{
14152     __ fmaddd(as_FloatRegister($dst$$reg),
14153              as_FloatRegister($src1$$reg),
14154              as_FloatRegister($src2$$reg),
14155              as_FloatRegister($src3$$reg));
14156   %}
14157 
14158   ins_pipe(pipe_class_default);
14159 %}
14160 
14161 // -src1 * src2 + src3
14162 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14163   predicate(UseFMA);
14164   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
14165   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
14166 
14167   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
14168 
14169   ins_encode %{
14170     __ fmsubs(as_FloatRegister($dst$$reg),
14171               as_FloatRegister($src1$$reg),
14172               as_FloatRegister($src2$$reg),
14173               as_FloatRegister($src3$$reg));
14174   %}
14175 
14176   ins_pipe(pipe_class_default);
14177 %}
14178 
14179 // -src1 * src2 + src3
14180 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14181   predicate(UseFMA);
14182   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
14183   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
14184 
14185   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
14186 
14187   ins_encode %{
14188     __ fmsubd(as_FloatRegister($dst$$reg),
14189               as_FloatRegister($src1$$reg),
14190               as_FloatRegister($src2$$reg),
14191               as_FloatRegister($src3$$reg));
14192   %}
14193 
14194   ins_pipe(pipe_class_default);
14195 %}
14196 
14197 // -src1 * src2 - src3
14198 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14199   predicate(UseFMA);
14200   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
14201   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
14202 
14203   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
14204 
14205   ins_encode %{
14206     __ fnmadds(as_FloatRegister($dst$$reg),
14207                as_FloatRegister($src1$$reg),
14208                as_FloatRegister($src2$$reg),
14209                as_FloatRegister($src3$$reg));
14210   %}
14211 
14212   ins_pipe(pipe_class_default);
14213 %}
14214 
14215 // -src1 * src2 - src3
14216 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14217   predicate(UseFMA);
14218   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
14219   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
14220 
14221   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
14222 
14223   ins_encode %{
14224     __ fnmaddd(as_FloatRegister($dst$$reg),
14225                as_FloatRegister($src1$$reg),
14226                as_FloatRegister($src2$$reg),
14227                as_FloatRegister($src3$$reg));
14228   %}
14229 
14230   ins_pipe(pipe_class_default);
14231 %}
14232 
14233 // src1 * src2 - src3
14234 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
14235   predicate(UseFMA);
14236   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
14237 
14238   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
14239 
14240   ins_encode %{
14241     __ fnmsubs(as_FloatRegister($dst$$reg),
14242                as_FloatRegister($src1$$reg),
14243                as_FloatRegister($src2$$reg),
14244                as_FloatRegister($src3$$reg));
14245   %}
14246 
14247   ins_pipe(pipe_class_default);
14248 %}
14249 
14250 // src1 * src2 - src3
14251 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
14252   predicate(UseFMA);
14253   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
14254 
14255   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
14256 
14257   ins_encode %{
14258   // n.b. insn name should be fnmsubd
14259     __ fnmsub(as_FloatRegister($dst$$reg),
14260               as_FloatRegister($src1$$reg),
14261               as_FloatRegister($src2$$reg),
14262               as_FloatRegister($src3$$reg));
14263   %}
14264 
14265   ins_pipe(pipe_class_default);
14266 %}
14267 
14268 
14269 // Math.max(FF)F
14270 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14271   match(Set dst (MaxF src1 src2));
14272 
14273   format %{ "fmaxs   $dst, $src1, $src2" %}
14274   ins_encode %{
14275     __ fmaxs(as_FloatRegister($dst$$reg),
14276              as_FloatRegister($src1$$reg),
14277              as_FloatRegister($src2$$reg));
14278   %}
14279 
14280   ins_pipe(fp_dop_reg_reg_s);
14281 %}
14282 
14283 // Math.min(FF)F
14284 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14285   match(Set dst (MinF src1 src2));
14286 
14287   format %{ "fmins   $dst, $src1, $src2" %}
14288   ins_encode %{
14289     __ fmins(as_FloatRegister($dst$$reg),
14290              as_FloatRegister($src1$$reg),
14291              as_FloatRegister($src2$$reg));
14292   %}
14293 
14294   ins_pipe(fp_dop_reg_reg_s);
14295 %}
14296 
14297 // Math.max(DD)D
14298 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14299   match(Set dst (MaxD src1 src2));
14300 
14301   format %{ "fmaxd   $dst, $src1, $src2" %}
14302   ins_encode %{
14303     __ fmaxd(as_FloatRegister($dst$$reg),
14304              as_FloatRegister($src1$$reg),
14305              as_FloatRegister($src2$$reg));
14306   %}
14307 
14308   ins_pipe(fp_dop_reg_reg_d);
14309 %}
14310 
14311 // Math.min(DD)D
14312 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14313   match(Set dst (MinD src1 src2));
14314 
14315   format %{ "fmind   $dst, $src1, $src2" %}
14316   ins_encode %{
14317     __ fmind(as_FloatRegister($dst$$reg),
14318              as_FloatRegister($src1$$reg),
14319              as_FloatRegister($src2$$reg));
14320   %}
14321 
14322   ins_pipe(fp_dop_reg_reg_d);
14323 %}
14324 
14325 
14326 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14327   match(Set dst (DivF src1  src2));
14328 
14329   ins_cost(INSN_COST * 18);
14330   format %{ "fdivs   $dst, $src1, $src2" %}
14331 
14332   ins_encode %{
14333     __ fdivs(as_FloatRegister($dst$$reg),
14334              as_FloatRegister($src1$$reg),
14335              as_FloatRegister($src2$$reg));
14336   %}
14337 
14338   ins_pipe(fp_div_s);
14339 %}
14340 
14341 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14342   match(Set dst (DivD src1  src2));
14343 
14344   ins_cost(INSN_COST * 32);
14345   format %{ "fdivd   $dst, $src1, $src2" %}
14346 
14347   ins_encode %{
14348     __ fdivd(as_FloatRegister($dst$$reg),
14349              as_FloatRegister($src1$$reg),
14350              as_FloatRegister($src2$$reg));
14351   %}
14352 
14353   ins_pipe(fp_div_d);
14354 %}
14355 
14356 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14357   match(Set dst (NegF src));
14358 
14359   ins_cost(INSN_COST * 3);
14360   format %{ "fneg   $dst, $src" %}
14361 
14362   ins_encode %{
14363     __ fnegs(as_FloatRegister($dst$$reg),
14364              as_FloatRegister($src$$reg));
14365   %}
14366 
14367   ins_pipe(fp_uop_s);
14368 %}
14369 
14370 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14371   match(Set dst (NegD src));
14372 
14373   ins_cost(INSN_COST * 3);
14374   format %{ "fnegd   $dst, $src" %}
14375 
14376   ins_encode %{
14377     __ fnegd(as_FloatRegister($dst$$reg),
14378              as_FloatRegister($src$$reg));
14379   %}
14380 
14381   ins_pipe(fp_uop_d);
14382 %}
14383 
14384 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14385 %{
14386   match(Set dst (AbsI src));
14387 
14388   effect(KILL cr);
14389   ins_cost(INSN_COST * 2);
14390   format %{ "cmpw  $src, zr\n\t"
14391             "cnegw $dst, $src, Assembler::LT\t# int abs"
14392   %}
14393 
14394   ins_encode %{
14395     __ cmpw(as_Register($src$$reg), zr);
14396     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14397   %}
14398   ins_pipe(pipe_class_default);
14399 %}
14400 
14401 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
14402 %{
14403   match(Set dst (AbsL src));
14404 
14405   effect(KILL cr);
14406   ins_cost(INSN_COST * 2);
14407   format %{ "cmp  $src, zr\n\t"
14408             "cneg $dst, $src, Assembler::LT\t# long abs"
14409   %}
14410 
14411   ins_encode %{
14412     __ cmp(as_Register($src$$reg), zr);
14413     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14414   %}
14415   ins_pipe(pipe_class_default);
14416 %}
14417 
14418 instruct absF_reg(vRegF dst, vRegF src) %{
14419   match(Set dst (AbsF src));
14420 
14421   ins_cost(INSN_COST * 3);
14422   format %{ "fabss   $dst, $src" %}
14423   ins_encode %{
14424     __ fabss(as_FloatRegister($dst$$reg),
14425              as_FloatRegister($src$$reg));
14426   %}
14427 
14428   ins_pipe(fp_uop_s);
14429 %}
14430 
14431 instruct absD_reg(vRegD dst, vRegD src) %{
14432   match(Set dst (AbsD src));
14433 
14434   ins_cost(INSN_COST * 3);
14435   format %{ "fabsd   $dst, $src" %}
14436   ins_encode %{
14437     __ fabsd(as_FloatRegister($dst$$reg),
14438              as_FloatRegister($src$$reg));
14439   %}
14440 
14441   ins_pipe(fp_uop_d);
14442 %}
14443 
14444 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14445   match(Set dst (AbsF (SubF src1 src2)));
14446 
14447   ins_cost(INSN_COST * 3);
14448   format %{ "fabds   $dst, $src1, $src2" %}
14449   ins_encode %{
14450     __ fabds(as_FloatRegister($dst$$reg),
14451              as_FloatRegister($src1$$reg),
14452              as_FloatRegister($src2$$reg));
14453   %}
14454 
14455   ins_pipe(fp_uop_s);
14456 %}
14457 
14458 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14459   match(Set dst (AbsD (SubD src1 src2)));
14460 
14461   ins_cost(INSN_COST * 3);
14462   format %{ "fabdd   $dst, $src1, $src2" %}
14463   ins_encode %{
14464     __ fabdd(as_FloatRegister($dst$$reg),
14465              as_FloatRegister($src1$$reg),
14466              as_FloatRegister($src2$$reg));
14467   %}
14468 
14469   ins_pipe(fp_uop_d);
14470 %}
14471 
14472 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14473   match(Set dst (SqrtD src));
14474 
14475   ins_cost(INSN_COST * 50);
14476   format %{ "fsqrtd  $dst, $src" %}
14477   ins_encode %{
14478     __ fsqrtd(as_FloatRegister($dst$$reg),
14479              as_FloatRegister($src$$reg));
14480   %}
14481 
14482   ins_pipe(fp_div_s);
14483 %}
14484 
14485 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14486   match(Set dst (SqrtF src));
14487 
14488   ins_cost(INSN_COST * 50);
14489   format %{ "fsqrts  $dst, $src" %}
14490   ins_encode %{
14491     __ fsqrts(as_FloatRegister($dst$$reg),
14492              as_FloatRegister($src$$reg));
14493   %}
14494 
14495   ins_pipe(fp_div_d);
14496 %}
14497 
14498 // Math.rint, floor, ceil
14499 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14500   match(Set dst (RoundDoubleMode src rmode));
14501   format %{ "frint  $dst, $src, $rmode" %}
14502   ins_encode %{
14503     switch ($rmode$$constant) {
14504       case RoundDoubleModeNode::rmode_rint:
14505         __ frintnd(as_FloatRegister($dst$$reg),
14506                    as_FloatRegister($src$$reg));
14507         break;
14508       case RoundDoubleModeNode::rmode_floor:
14509         __ frintmd(as_FloatRegister($dst$$reg),
14510                    as_FloatRegister($src$$reg));
14511         break;
14512       case RoundDoubleModeNode::rmode_ceil:
14513         __ frintpd(as_FloatRegister($dst$$reg),
14514                    as_FloatRegister($src$$reg));
14515         break;
14516     }
14517   %}
14518   ins_pipe(fp_uop_d);
14519 %}
14520 
14521 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14522   match(Set dst (CopySignD src1 (Binary src2 zero)));
14523   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14524   format %{ "CopySignD  $dst $src1 $src2" %}
14525   ins_encode %{
14526     FloatRegister dst = as_FloatRegister($dst$$reg),
14527                   src1 = as_FloatRegister($src1$$reg),
14528                   src2 = as_FloatRegister($src2$$reg),
14529                   zero = as_FloatRegister($zero$$reg);
14530     __ fnegd(dst, zero);
14531     __ bsl(dst, __ T8B, src2, src1);
14532   %}
14533   ins_pipe(fp_uop_d);
14534 %}
14535 
14536 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14537   match(Set dst (CopySignF src1 src2));
14538   effect(TEMP_DEF dst, USE src1, USE src2);
14539   format %{ "CopySignF  $dst $src1 $src2" %}
14540   ins_encode %{
14541     FloatRegister dst = as_FloatRegister($dst$$reg),
14542                   src1 = as_FloatRegister($src1$$reg),
14543                   src2 = as_FloatRegister($src2$$reg);
14544     __ movi(dst, __ T2S, 0x80, 24);
14545     __ bsl(dst, __ T8B, src2, src1);
14546   %}
14547   ins_pipe(fp_uop_d);
14548 %}
14549 
14550 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14551   match(Set dst (SignumD src (Binary zero one)));
14552   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14553   format %{ "signumD  $dst, $src" %}
14554   ins_encode %{
14555     FloatRegister src = as_FloatRegister($src$$reg),
14556                   dst = as_FloatRegister($dst$$reg),
14557                   zero = as_FloatRegister($zero$$reg),
14558                   one = as_FloatRegister($one$$reg);
14559     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14560     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14561     // Bit selection instruction gets bit from "one" for each enabled bit in
14562     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14563     // NaN the whole "src" will be copied because "dst" is zero. For all other
14564     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14565     // from "src", and all other bits are copied from 1.0.
14566     __ bsl(dst, __ T8B, one, src);
14567   %}
14568   ins_pipe(fp_uop_d);
14569 %}
14570 
14571 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14572   match(Set dst (SignumF src (Binary zero one)));
14573   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14574   format %{ "signumF  $dst, $src" %}
14575   ins_encode %{
14576     FloatRegister src = as_FloatRegister($src$$reg),
14577                   dst = as_FloatRegister($dst$$reg),
14578                   zero = as_FloatRegister($zero$$reg),
14579                   one = as_FloatRegister($one$$reg);
14580     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14581     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14582     // Bit selection instruction gets bit from "one" for each enabled bit in
14583     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14584     // NaN the whole "src" will be copied because "dst" is zero. For all other
14585     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14586     // from "src", and all other bits are copied from 1.0.
14587     __ bsl(dst, __ T8B, one, src);
14588   %}
14589   ins_pipe(fp_uop_d);
14590 %}
14591 
14592 instruct onspinwait() %{
14593   match(OnSpinWait);
14594   ins_cost(INSN_COST);
14595 
14596   format %{ "onspinwait" %}
14597 
14598   ins_encode %{
14599     __ spin_wait();
14600   %}
14601   ins_pipe(pipe_class_empty);
14602 %}
14603 
14604 // ============================================================================
14605 // Logical Instructions
14606 
14607 // Integer Logical Instructions
14608 
14609 // And Instructions
14610 
14611 
14612 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14613   match(Set dst (AndI src1 src2));
14614 
14615   format %{ "andw  $dst, $src1, $src2\t# int" %}
14616 
14617   ins_cost(INSN_COST);
14618   ins_encode %{
14619     __ andw(as_Register($dst$$reg),
14620             as_Register($src1$$reg),
14621             as_Register($src2$$reg));
14622   %}
14623 
14624   ins_pipe(ialu_reg_reg);
14625 %}
14626 
14627 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14628   match(Set dst (AndI src1 src2));
14629 
14630   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14631 
14632   ins_cost(INSN_COST);
14633   ins_encode %{
14634     __ andw(as_Register($dst$$reg),
14635             as_Register($src1$$reg),
14636             (uint64_t)($src2$$constant));
14637   %}
14638 
14639   ins_pipe(ialu_reg_imm);
14640 %}
14641 
14642 // Or Instructions
14643 
14644 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14645   match(Set dst (OrI src1 src2));
14646 
14647   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14648 
14649   ins_cost(INSN_COST);
14650   ins_encode %{
14651     __ orrw(as_Register($dst$$reg),
14652             as_Register($src1$$reg),
14653             as_Register($src2$$reg));
14654   %}
14655 
14656   ins_pipe(ialu_reg_reg);
14657 %}
14658 
14659 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14660   match(Set dst (OrI src1 src2));
14661 
14662   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14663 
14664   ins_cost(INSN_COST);
14665   ins_encode %{
14666     __ orrw(as_Register($dst$$reg),
14667             as_Register($src1$$reg),
14668             (uint64_t)($src2$$constant));
14669   %}
14670 
14671   ins_pipe(ialu_reg_imm);
14672 %}
14673 
14674 // Xor Instructions
14675 
14676 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14677   match(Set dst (XorI src1 src2));
14678 
14679   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14680 
14681   ins_cost(INSN_COST);
14682   ins_encode %{
14683     __ eorw(as_Register($dst$$reg),
14684             as_Register($src1$$reg),
14685             as_Register($src2$$reg));
14686   %}
14687 
14688   ins_pipe(ialu_reg_reg);
14689 %}
14690 
14691 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14692   match(Set dst (XorI src1 src2));
14693 
14694   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14695 
14696   ins_cost(INSN_COST);
14697   ins_encode %{
14698     __ eorw(as_Register($dst$$reg),
14699             as_Register($src1$$reg),
14700             (uint64_t)($src2$$constant));
14701   %}
14702 
14703   ins_pipe(ialu_reg_imm);
14704 %}
14705 
14706 // Long Logical Instructions
14707 // TODO
14708 
14709 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14710   match(Set dst (AndL src1 src2));
14711 
14712   format %{ "and  $dst, $src1, $src2\t# int" %}
14713 
14714   ins_cost(INSN_COST);
14715   ins_encode %{
14716     __ andr(as_Register($dst$$reg),
14717             as_Register($src1$$reg),
14718             as_Register($src2$$reg));
14719   %}
14720 
14721   ins_pipe(ialu_reg_reg);
14722 %}
14723 
14724 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14725   match(Set dst (AndL src1 src2));
14726 
14727   format %{ "and  $dst, $src1, $src2\t# int" %}
14728 
14729   ins_cost(INSN_COST);
14730   ins_encode %{
14731     __ andr(as_Register($dst$$reg),
14732             as_Register($src1$$reg),
14733             (uint64_t)($src2$$constant));
14734   %}
14735 
14736   ins_pipe(ialu_reg_imm);
14737 %}
14738 
14739 // Or Instructions
14740 
14741 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14742   match(Set dst (OrL src1 src2));
14743 
14744   format %{ "orr  $dst, $src1, $src2\t# int" %}
14745 
14746   ins_cost(INSN_COST);
14747   ins_encode %{
14748     __ orr(as_Register($dst$$reg),
14749            as_Register($src1$$reg),
14750            as_Register($src2$$reg));
14751   %}
14752 
14753   ins_pipe(ialu_reg_reg);
14754 %}
14755 
14756 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14757   match(Set dst (OrL src1 src2));
14758 
14759   format %{ "orr  $dst, $src1, $src2\t# int" %}
14760 
14761   ins_cost(INSN_COST);
14762   ins_encode %{
14763     __ orr(as_Register($dst$$reg),
14764            as_Register($src1$$reg),
14765            (uint64_t)($src2$$constant));
14766   %}
14767 
14768   ins_pipe(ialu_reg_imm);
14769 %}
14770 
14771 // Xor Instructions
14772 
14773 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14774   match(Set dst (XorL src1 src2));
14775 
14776   format %{ "eor  $dst, $src1, $src2\t# int" %}
14777 
14778   ins_cost(INSN_COST);
14779   ins_encode %{
14780     __ eor(as_Register($dst$$reg),
14781            as_Register($src1$$reg),
14782            as_Register($src2$$reg));
14783   %}
14784 
14785   ins_pipe(ialu_reg_reg);
14786 %}
14787 
14788 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14789   match(Set dst (XorL src1 src2));
14790 
14791   ins_cost(INSN_COST);
14792   format %{ "eor  $dst, $src1, $src2\t# int" %}
14793 
14794   ins_encode %{
14795     __ eor(as_Register($dst$$reg),
14796            as_Register($src1$$reg),
14797            (uint64_t)($src2$$constant));
14798   %}
14799 
14800   ins_pipe(ialu_reg_imm);
14801 %}
14802 
14803 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14804 %{
14805   match(Set dst (ConvI2L src));
14806 
14807   ins_cost(INSN_COST);
14808   format %{ "sxtw  $dst, $src\t# i2l" %}
14809   ins_encode %{
14810     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14811   %}
14812   ins_pipe(ialu_reg_shift);
14813 %}
14814 
14815 // this pattern occurs in bigmath arithmetic
14816 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14817 %{
14818   match(Set dst (AndL (ConvI2L src) mask));
14819 
14820   ins_cost(INSN_COST);
14821   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14822   ins_encode %{
14823     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14824   %}
14825 
14826   ins_pipe(ialu_reg_shift);
14827 %}
14828 
14829 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14830   match(Set dst (ConvL2I src));
14831 
14832   ins_cost(INSN_COST);
14833   format %{ "movw  $dst, $src \t// l2i" %}
14834 
14835   ins_encode %{
14836     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14837   %}
14838 
14839   ins_pipe(ialu_reg);
14840 %}
14841 
14842 instruct convD2F_reg(vRegF dst, vRegD src) %{
14843   match(Set dst (ConvD2F src));
14844 
14845   ins_cost(INSN_COST * 5);
14846   format %{ "fcvtd  $dst, $src \t// d2f" %}
14847 
14848   ins_encode %{
14849     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14850   %}
14851 
14852   ins_pipe(fp_d2f);
14853 %}
14854 
14855 instruct convF2D_reg(vRegD dst, vRegF src) %{
14856   match(Set dst (ConvF2D src));
14857 
14858   ins_cost(INSN_COST * 5);
14859   format %{ "fcvts  $dst, $src \t// f2d" %}
14860 
14861   ins_encode %{
14862     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14863   %}
14864 
14865   ins_pipe(fp_f2d);
14866 %}
14867 
14868 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14869   match(Set dst (ConvF2I src));
14870 
14871   ins_cost(INSN_COST * 5);
14872   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14873 
14874   ins_encode %{
14875     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14876   %}
14877 
14878   ins_pipe(fp_f2i);
14879 %}
14880 
14881 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14882   match(Set dst (ConvF2L src));
14883 
14884   ins_cost(INSN_COST * 5);
14885   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14886 
14887   ins_encode %{
14888     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14889   %}
14890 
14891   ins_pipe(fp_f2l);
14892 %}
14893 
14894 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
14895   match(Set dst (ConvF2HF src));
14896   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
14897             "smov $dst, $tmp\t# move result from $tmp to $dst"
14898   %}
14899   effect(TEMP tmp);
14900   ins_encode %{
14901       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
14902   %}
14903   ins_pipe(pipe_slow);
14904 %}
14905 
14906 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
14907   match(Set dst (ConvHF2F src));
14908   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
14909             "fcvt $dst, $tmp\t# convert half to single precision"
14910   %}
14911   effect(TEMP tmp);
14912   ins_encode %{
14913       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
14914   %}
14915   ins_pipe(pipe_slow);
14916 %}
14917 
14918 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14919   match(Set dst (ConvI2F src));
14920 
14921   ins_cost(INSN_COST * 5);
14922   format %{ "scvtfws  $dst, $src \t// i2f" %}
14923 
14924   ins_encode %{
14925     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14926   %}
14927 
14928   ins_pipe(fp_i2f);
14929 %}
14930 
14931 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14932   match(Set dst (ConvL2F src));
14933 
14934   ins_cost(INSN_COST * 5);
14935   format %{ "scvtfs  $dst, $src \t// l2f" %}
14936 
14937   ins_encode %{
14938     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14939   %}
14940 
14941   ins_pipe(fp_l2f);
14942 %}
14943 
14944 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14945   match(Set dst (ConvD2I src));
14946 
14947   ins_cost(INSN_COST * 5);
14948   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14949 
14950   ins_encode %{
14951     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14952   %}
14953 
14954   ins_pipe(fp_d2i);
14955 %}
14956 
14957 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14958   match(Set dst (ConvD2L src));
14959 
14960   ins_cost(INSN_COST * 5);
14961   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14962 
14963   ins_encode %{
14964     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14965   %}
14966 
14967   ins_pipe(fp_d2l);
14968 %}
14969 
14970 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14971   match(Set dst (ConvI2D src));
14972 
14973   ins_cost(INSN_COST * 5);
14974   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14975 
14976   ins_encode %{
14977     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14978   %}
14979 
14980   ins_pipe(fp_i2d);
14981 %}
14982 
14983 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14984   match(Set dst (ConvL2D src));
14985 
14986   ins_cost(INSN_COST * 5);
14987   format %{ "scvtfd  $dst, $src \t// l2d" %}
14988 
14989   ins_encode %{
14990     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14991   %}
14992 
14993   ins_pipe(fp_l2d);
14994 %}
14995 
14996 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
14997 %{
14998   match(Set dst (RoundD src));
14999   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
15000   format %{ "java_round_double $dst,$src"%}
15001   ins_encode %{
15002     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
15003                          as_FloatRegister($ftmp$$reg));
15004   %}
15005   ins_pipe(pipe_slow);
15006 %}
15007 
15008 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
15009 %{
15010   match(Set dst (RoundF src));
15011   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
15012   format %{ "java_round_float $dst,$src"%}
15013   ins_encode %{
15014     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
15015                         as_FloatRegister($ftmp$$reg));
15016   %}
15017   ins_pipe(pipe_slow);
15018 %}
15019 
15020 // stack <-> reg and reg <-> reg shuffles with no conversion
15021 
15022 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
15023 
15024   match(Set dst (MoveF2I src));
15025 
15026   effect(DEF dst, USE src);
15027 
15028   ins_cost(4 * INSN_COST);
15029 
15030   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
15031 
15032   ins_encode %{
15033     __ ldrw($dst$$Register, Address(sp, $src$$disp));
15034   %}
15035 
15036   ins_pipe(iload_reg_reg);
15037 
15038 %}
15039 
15040 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
15041 
15042   match(Set dst (MoveI2F src));
15043 
15044   effect(DEF dst, USE src);
15045 
15046   ins_cost(4 * INSN_COST);
15047 
15048   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
15049 
15050   ins_encode %{
15051     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
15052   %}
15053 
15054   ins_pipe(pipe_class_memory);
15055 
15056 %}
15057 
15058 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
15059 
15060   match(Set dst (MoveD2L src));
15061 
15062   effect(DEF dst, USE src);
15063 
15064   ins_cost(4 * INSN_COST);
15065 
15066   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
15067 
15068   ins_encode %{
15069     __ ldr($dst$$Register, Address(sp, $src$$disp));
15070   %}
15071 
15072   ins_pipe(iload_reg_reg);
15073 
15074 %}
15075 
15076 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
15077 
15078   match(Set dst (MoveL2D src));
15079 
15080   effect(DEF dst, USE src);
15081 
15082   ins_cost(4 * INSN_COST);
15083 
15084   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
15085 
15086   ins_encode %{
15087     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
15088   %}
15089 
15090   ins_pipe(pipe_class_memory);
15091 
15092 %}
15093 
15094 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
15095 
15096   match(Set dst (MoveF2I src));
15097 
15098   effect(DEF dst, USE src);
15099 
15100   ins_cost(INSN_COST);
15101 
15102   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
15103 
15104   ins_encode %{
15105     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15106   %}
15107 
15108   ins_pipe(pipe_class_memory);
15109 
15110 %}
15111 
15112 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
15113 
15114   match(Set dst (MoveI2F src));
15115 
15116   effect(DEF dst, USE src);
15117 
15118   ins_cost(INSN_COST);
15119 
15120   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
15121 
15122   ins_encode %{
15123     __ strw($src$$Register, Address(sp, $dst$$disp));
15124   %}
15125 
15126   ins_pipe(istore_reg_reg);
15127 
15128 %}
15129 
15130 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
15131 
15132   match(Set dst (MoveD2L src));
15133 
15134   effect(DEF dst, USE src);
15135 
15136   ins_cost(INSN_COST);
15137 
15138   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
15139 
15140   ins_encode %{
15141     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15142   %}
15143 
15144   ins_pipe(pipe_class_memory);
15145 
15146 %}
15147 
15148 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
15149 
15150   match(Set dst (MoveL2D src));
15151 
15152   effect(DEF dst, USE src);
15153 
15154   ins_cost(INSN_COST);
15155 
15156   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
15157 
15158   ins_encode %{
15159     __ str($src$$Register, Address(sp, $dst$$disp));
15160   %}
15161 
15162   ins_pipe(istore_reg_reg);
15163 
15164 %}
15165 
15166 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
15167 
15168   match(Set dst (MoveF2I src));
15169 
15170   effect(DEF dst, USE src);
15171 
15172   ins_cost(INSN_COST);
15173 
15174   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
15175 
15176   ins_encode %{
15177     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
15178   %}
15179 
15180   ins_pipe(fp_f2i);
15181 
15182 %}
15183 
15184 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
15185 
15186   match(Set dst (MoveI2F src));
15187 
15188   effect(DEF dst, USE src);
15189 
15190   ins_cost(INSN_COST);
15191 
15192   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
15193 
15194   ins_encode %{
15195     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
15196   %}
15197 
15198   ins_pipe(fp_i2f);
15199 
15200 %}
15201 
15202 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
15203 
15204   match(Set dst (MoveD2L src));
15205 
15206   effect(DEF dst, USE src);
15207 
15208   ins_cost(INSN_COST);
15209 
15210   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
15211 
15212   ins_encode %{
15213     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
15214   %}
15215 
15216   ins_pipe(fp_d2l);
15217 
15218 %}
15219 
15220 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
15221 
15222   match(Set dst (MoveL2D src));
15223 
15224   effect(DEF dst, USE src);
15225 
15226   ins_cost(INSN_COST);
15227 
15228   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
15229 
15230   ins_encode %{
15231     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
15232   %}
15233 
15234   ins_pipe(fp_l2d);
15235 
15236 %}
15237 
15238 // ============================================================================
15239 // clearing of an array
15240 
15241 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
15242 %{
15243   match(Set dummy (ClearArray cnt base));
15244   effect(USE_KILL cnt, USE_KILL base, KILL cr);
15245 
15246   ins_cost(4 * INSN_COST);
15247   format %{ "ClearArray $cnt, $base" %}
15248 
15249   ins_encode %{
15250     address tpc = __ zero_words($base$$Register, $cnt$$Register);
15251     if (tpc == NULL) {
15252       ciEnv::current()->record_failure("CodeCache is full");
15253       return;
15254     }
15255   %}
15256 
15257   ins_pipe(pipe_class_memory);
15258 %}
15259 
15260 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15261 %{
15262   predicate((uint64_t)n->in(2)->get_long()
15263             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
15264   match(Set dummy (ClearArray cnt base));
15265   effect(TEMP temp, USE_KILL base, KILL cr);
15266 
15267   ins_cost(4 * INSN_COST);
15268   format %{ "ClearArray $cnt, $base" %}
15269 
15270   ins_encode %{
15271     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15272     if (tpc == NULL) {
15273       ciEnv::current()->record_failure("CodeCache is full");
15274       return;
15275     }
15276   %}
15277 
15278   ins_pipe(pipe_class_memory);
15279 %}
15280 
15281 // ============================================================================
15282 // Overflow Math Instructions
15283 
15284 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15285 %{
15286   match(Set cr (OverflowAddI op1 op2));
15287 
15288   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15289   ins_cost(INSN_COST);
15290   ins_encode %{
15291     __ cmnw($op1$$Register, $op2$$Register);
15292   %}
15293 
15294   ins_pipe(icmp_reg_reg);
15295 %}
15296 
15297 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15298 %{
15299   match(Set cr (OverflowAddI op1 op2));
15300 
15301   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15302   ins_cost(INSN_COST);
15303   ins_encode %{
15304     __ cmnw($op1$$Register, $op2$$constant);
15305   %}
15306 
15307   ins_pipe(icmp_reg_imm);
15308 %}
15309 
15310 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15311 %{
15312   match(Set cr (OverflowAddL op1 op2));
15313 
15314   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15315   ins_cost(INSN_COST);
15316   ins_encode %{
15317     __ cmn($op1$$Register, $op2$$Register);
15318   %}
15319 
15320   ins_pipe(icmp_reg_reg);
15321 %}
15322 
15323 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15324 %{
15325   match(Set cr (OverflowAddL op1 op2));
15326 
15327   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
15328   ins_cost(INSN_COST);
15329   ins_encode %{
15330     __ adds(zr, $op1$$Register, $op2$$constant);
15331   %}
15332 
15333   ins_pipe(icmp_reg_imm);
15334 %}
15335 
15336 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15337 %{
15338   match(Set cr (OverflowSubI op1 op2));
15339 
15340   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15341   ins_cost(INSN_COST);
15342   ins_encode %{
15343     __ cmpw($op1$$Register, $op2$$Register);
15344   %}
15345 
15346   ins_pipe(icmp_reg_reg);
15347 %}
15348 
15349 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15350 %{
15351   match(Set cr (OverflowSubI op1 op2));
15352 
15353   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15354   ins_cost(INSN_COST);
15355   ins_encode %{
15356     __ cmpw($op1$$Register, $op2$$constant);
15357   %}
15358 
15359   ins_pipe(icmp_reg_imm);
15360 %}
15361 
15362 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15363 %{
15364   match(Set cr (OverflowSubL op1 op2));
15365 
15366   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15367   ins_cost(INSN_COST);
15368   ins_encode %{
15369     __ cmp($op1$$Register, $op2$$Register);
15370   %}
15371 
15372   ins_pipe(icmp_reg_reg);
15373 %}
15374 
15375 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15376 %{
15377   match(Set cr (OverflowSubL op1 op2));
15378 
15379   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15380   ins_cost(INSN_COST);
15381   ins_encode %{
15382     __ subs(zr, $op1$$Register, $op2$$constant);
15383   %}
15384 
15385   ins_pipe(icmp_reg_imm);
15386 %}
15387 
15388 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15389 %{
15390   match(Set cr (OverflowSubI zero op1));
15391 
15392   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15393   ins_cost(INSN_COST);
15394   ins_encode %{
15395     __ cmpw(zr, $op1$$Register);
15396   %}
15397 
15398   ins_pipe(icmp_reg_imm);
15399 %}
15400 
15401 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15402 %{
15403   match(Set cr (OverflowSubL zero op1));
15404 
15405   format %{ "cmp   zr, $op1\t# overflow check long" %}
15406   ins_cost(INSN_COST);
15407   ins_encode %{
15408     __ cmp(zr, $op1$$Register);
15409   %}
15410 
15411   ins_pipe(icmp_reg_imm);
15412 %}
15413 
15414 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15415 %{
15416   match(Set cr (OverflowMulI op1 op2));
15417 
15418   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15419             "cmp   rscratch1, rscratch1, sxtw\n\t"
15420             "movw  rscratch1, #0x80000000\n\t"
15421             "cselw rscratch1, rscratch1, zr, NE\n\t"
15422             "cmpw  rscratch1, #1" %}
15423   ins_cost(5 * INSN_COST);
15424   ins_encode %{
15425     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15426     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15427     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15428     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15429     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15430   %}
15431 
15432   ins_pipe(pipe_slow);
15433 %}
15434 
15435 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15436 %{
15437   match(If cmp (OverflowMulI op1 op2));
15438   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15439             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15440   effect(USE labl, KILL cr);
15441 
15442   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15443             "cmp   rscratch1, rscratch1, sxtw\n\t"
15444             "b$cmp   $labl" %}
15445   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15446   ins_encode %{
15447     Label* L = $labl$$label;
15448     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15449     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15450     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15451     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15452   %}
15453 
15454   ins_pipe(pipe_serial);
15455 %}
15456 
15457 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15458 %{
15459   match(Set cr (OverflowMulL op1 op2));
15460 
15461   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15462             "smulh rscratch2, $op1, $op2\n\t"
15463             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15464             "movw  rscratch1, #0x80000000\n\t"
15465             "cselw rscratch1, rscratch1, zr, NE\n\t"
15466             "cmpw  rscratch1, #1" %}
15467   ins_cost(6 * INSN_COST);
15468   ins_encode %{
15469     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15470     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15471     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15472     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15473     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15474     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15475   %}
15476 
15477   ins_pipe(pipe_slow);
15478 %}
15479 
15480 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15481 %{
15482   match(If cmp (OverflowMulL op1 op2));
15483   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15484             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15485   effect(USE labl, KILL cr);
15486 
15487   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15488             "smulh rscratch2, $op1, $op2\n\t"
15489             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15490             "b$cmp $labl" %}
15491   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15492   ins_encode %{
15493     Label* L = $labl$$label;
15494     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15495     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15496     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15497     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15498     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15499   %}
15500 
15501   ins_pipe(pipe_serial);
15502 %}
15503 
15504 // ============================================================================
15505 // Compare Instructions
15506 
15507 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15508 %{
15509   match(Set cr (CmpI op1 op2));
15510 
15511   effect(DEF cr, USE op1, USE op2);
15512 
15513   ins_cost(INSN_COST);
15514   format %{ "cmpw  $op1, $op2" %}
15515 
15516   ins_encode(aarch64_enc_cmpw(op1, op2));
15517 
15518   ins_pipe(icmp_reg_reg);
15519 %}
15520 
15521 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15522 %{
15523   match(Set cr (CmpI op1 zero));
15524 
15525   effect(DEF cr, USE op1);
15526 
15527   ins_cost(INSN_COST);
15528   format %{ "cmpw $op1, 0" %}
15529 
15530   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15531 
15532   ins_pipe(icmp_reg_imm);
15533 %}
15534 
15535 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15536 %{
15537   match(Set cr (CmpI op1 op2));
15538 
15539   effect(DEF cr, USE op1);
15540 
15541   ins_cost(INSN_COST);
15542   format %{ "cmpw  $op1, $op2" %}
15543 
15544   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15545 
15546   ins_pipe(icmp_reg_imm);
15547 %}
15548 
15549 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15550 %{
15551   match(Set cr (CmpI op1 op2));
15552 
15553   effect(DEF cr, USE op1);
15554 
15555   ins_cost(INSN_COST * 2);
15556   format %{ "cmpw  $op1, $op2" %}
15557 
15558   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15559 
15560   ins_pipe(icmp_reg_imm);
15561 %}
15562 
15563 // Unsigned compare Instructions; really, same as signed compare
15564 // except it should only be used to feed an If or a CMovI which takes a
15565 // cmpOpU.
15566 
15567 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15568 %{
15569   match(Set cr (CmpU op1 op2));
15570 
15571   effect(DEF cr, USE op1, USE op2);
15572 
15573   ins_cost(INSN_COST);
15574   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15575 
15576   ins_encode(aarch64_enc_cmpw(op1, op2));
15577 
15578   ins_pipe(icmp_reg_reg);
15579 %}
15580 
15581 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15582 %{
15583   match(Set cr (CmpU op1 zero));
15584 
15585   effect(DEF cr, USE op1);
15586 
15587   ins_cost(INSN_COST);
15588   format %{ "cmpw $op1, #0\t# unsigned" %}
15589 
15590   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15591 
15592   ins_pipe(icmp_reg_imm);
15593 %}
15594 
15595 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15596 %{
15597   match(Set cr (CmpU op1 op2));
15598 
15599   effect(DEF cr, USE op1);
15600 
15601   ins_cost(INSN_COST);
15602   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15603 
15604   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15605 
15606   ins_pipe(icmp_reg_imm);
15607 %}
15608 
15609 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15610 %{
15611   match(Set cr (CmpU op1 op2));
15612 
15613   effect(DEF cr, USE op1);
15614 
15615   ins_cost(INSN_COST * 2);
15616   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15617 
15618   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15619 
15620   ins_pipe(icmp_reg_imm);
15621 %}
15622 
15623 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15624 %{
15625   match(Set cr (CmpL op1 op2));
15626 
15627   effect(DEF cr, USE op1, USE op2);
15628 
15629   ins_cost(INSN_COST);
15630   format %{ "cmp  $op1, $op2" %}
15631 
15632   ins_encode(aarch64_enc_cmp(op1, op2));
15633 
15634   ins_pipe(icmp_reg_reg);
15635 %}
15636 
15637 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15638 %{
15639   match(Set cr (CmpL op1 zero));
15640 
15641   effect(DEF cr, USE op1);
15642 
15643   ins_cost(INSN_COST);
15644   format %{ "tst  $op1" %}
15645 
15646   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15647 
15648   ins_pipe(icmp_reg_imm);
15649 %}
15650 
15651 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15652 %{
15653   match(Set cr (CmpL op1 op2));
15654 
15655   effect(DEF cr, USE op1);
15656 
15657   ins_cost(INSN_COST);
15658   format %{ "cmp  $op1, $op2" %}
15659 
15660   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15661 
15662   ins_pipe(icmp_reg_imm);
15663 %}
15664 
15665 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15666 %{
15667   match(Set cr (CmpL op1 op2));
15668 
15669   effect(DEF cr, USE op1);
15670 
15671   ins_cost(INSN_COST * 2);
15672   format %{ "cmp  $op1, $op2" %}
15673 
15674   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15675 
15676   ins_pipe(icmp_reg_imm);
15677 %}
15678 
15679 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15680 %{
15681   match(Set cr (CmpUL op1 op2));
15682 
15683   effect(DEF cr, USE op1, USE op2);
15684 
15685   ins_cost(INSN_COST);
15686   format %{ "cmp  $op1, $op2" %}
15687 
15688   ins_encode(aarch64_enc_cmp(op1, op2));
15689 
15690   ins_pipe(icmp_reg_reg);
15691 %}
15692 
15693 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15694 %{
15695   match(Set cr (CmpUL op1 zero));
15696 
15697   effect(DEF cr, USE op1);
15698 
15699   ins_cost(INSN_COST);
15700   format %{ "tst  $op1" %}
15701 
15702   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15703 
15704   ins_pipe(icmp_reg_imm);
15705 %}
15706 
15707 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15708 %{
15709   match(Set cr (CmpUL op1 op2));
15710 
15711   effect(DEF cr, USE op1);
15712 
15713   ins_cost(INSN_COST);
15714   format %{ "cmp  $op1, $op2" %}
15715 
15716   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15717 
15718   ins_pipe(icmp_reg_imm);
15719 %}
15720 
15721 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15722 %{
15723   match(Set cr (CmpUL op1 op2));
15724 
15725   effect(DEF cr, USE op1);
15726 
15727   ins_cost(INSN_COST * 2);
15728   format %{ "cmp  $op1, $op2" %}
15729 
15730   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15731 
15732   ins_pipe(icmp_reg_imm);
15733 %}
15734 
15735 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15736 %{
15737   match(Set cr (CmpP op1 op2));
15738 
15739   effect(DEF cr, USE op1, USE op2);
15740 
15741   ins_cost(INSN_COST);
15742   format %{ "cmp  $op1, $op2\t // ptr" %}
15743 
15744   ins_encode(aarch64_enc_cmpp(op1, op2));
15745 
15746   ins_pipe(icmp_reg_reg);
15747 %}
15748 
15749 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15750 %{
15751   match(Set cr (CmpN op1 op2));
15752 
15753   effect(DEF cr, USE op1, USE op2);
15754 
15755   ins_cost(INSN_COST);
15756   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15757 
15758   ins_encode(aarch64_enc_cmpn(op1, op2));
15759 
15760   ins_pipe(icmp_reg_reg);
15761 %}
15762 
15763 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15764 %{
15765   match(Set cr (CmpP op1 zero));
15766 
15767   effect(DEF cr, USE op1, USE zero);
15768 
15769   ins_cost(INSN_COST);
15770   format %{ "cmp  $op1, 0\t // ptr" %}
15771 
15772   ins_encode(aarch64_enc_testp(op1));
15773 
15774   ins_pipe(icmp_reg_imm);
15775 %}
15776 
15777 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15778 %{
15779   match(Set cr (CmpN op1 zero));
15780 
15781   effect(DEF cr, USE op1, USE zero);
15782 
15783   ins_cost(INSN_COST);
15784   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15785 
15786   ins_encode(aarch64_enc_testn(op1));
15787 
15788   ins_pipe(icmp_reg_imm);
15789 %}
15790 
15791 // FP comparisons
15792 //
15793 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15794 // using normal cmpOp. See declaration of rFlagsReg for details.
15795 
15796 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15797 %{
15798   match(Set cr (CmpF src1 src2));
15799 
15800   ins_cost(3 * INSN_COST);
15801   format %{ "fcmps $src1, $src2" %}
15802 
15803   ins_encode %{
15804     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15805   %}
15806 
15807   ins_pipe(pipe_class_compare);
15808 %}
15809 
15810 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15811 %{
15812   match(Set cr (CmpF src1 src2));
15813 
15814   ins_cost(3 * INSN_COST);
15815   format %{ "fcmps $src1, 0.0" %}
15816 
15817   ins_encode %{
15818     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15819   %}
15820 
15821   ins_pipe(pipe_class_compare);
15822 %}
15823 // FROM HERE
15824 
15825 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15826 %{
15827   match(Set cr (CmpD src1 src2));
15828 
15829   ins_cost(3 * INSN_COST);
15830   format %{ "fcmpd $src1, $src2" %}
15831 
15832   ins_encode %{
15833     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15834   %}
15835 
15836   ins_pipe(pipe_class_compare);
15837 %}
15838 
15839 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15840 %{
15841   match(Set cr (CmpD src1 src2));
15842 
15843   ins_cost(3 * INSN_COST);
15844   format %{ "fcmpd $src1, 0.0" %}
15845 
15846   ins_encode %{
15847     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15848   %}
15849 
15850   ins_pipe(pipe_class_compare);
15851 %}
15852 
15853 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15854 %{
15855   match(Set dst (CmpF3 src1 src2));
15856   effect(KILL cr);
15857 
15858   ins_cost(5 * INSN_COST);
15859   format %{ "fcmps $src1, $src2\n\t"
15860             "csinvw($dst, zr, zr, eq\n\t"
15861             "csnegw($dst, $dst, $dst, lt)"
15862   %}
15863 
15864   ins_encode %{
15865     Label done;
15866     FloatRegister s1 = as_FloatRegister($src1$$reg);
15867     FloatRegister s2 = as_FloatRegister($src2$$reg);
15868     Register d = as_Register($dst$$reg);
15869     __ fcmps(s1, s2);
15870     // installs 0 if EQ else -1
15871     __ csinvw(d, zr, zr, Assembler::EQ);
15872     // keeps -1 if less or unordered else installs 1
15873     __ csnegw(d, d, d, Assembler::LT);
15874     __ bind(done);
15875   %}
15876 
15877   ins_pipe(pipe_class_default);
15878 
15879 %}
15880 
15881 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15882 %{
15883   match(Set dst (CmpD3 src1 src2));
15884   effect(KILL cr);
15885 
15886   ins_cost(5 * INSN_COST);
15887   format %{ "fcmpd $src1, $src2\n\t"
15888             "csinvw($dst, zr, zr, eq\n\t"
15889             "csnegw($dst, $dst, $dst, lt)"
15890   %}
15891 
15892   ins_encode %{
15893     Label done;
15894     FloatRegister s1 = as_FloatRegister($src1$$reg);
15895     FloatRegister s2 = as_FloatRegister($src2$$reg);
15896     Register d = as_Register($dst$$reg);
15897     __ fcmpd(s1, s2);
15898     // installs 0 if EQ else -1
15899     __ csinvw(d, zr, zr, Assembler::EQ);
15900     // keeps -1 if less or unordered else installs 1
15901     __ csnegw(d, d, d, Assembler::LT);
15902     __ bind(done);
15903   %}
15904   ins_pipe(pipe_class_default);
15905 
15906 %}
15907 
15908 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15909 %{
15910   match(Set dst (CmpF3 src1 zero));
15911   effect(KILL cr);
15912 
15913   ins_cost(5 * INSN_COST);
15914   format %{ "fcmps $src1, 0.0\n\t"
15915             "csinvw($dst, zr, zr, eq\n\t"
15916             "csnegw($dst, $dst, $dst, lt)"
15917   %}
15918 
15919   ins_encode %{
15920     Label done;
15921     FloatRegister s1 = as_FloatRegister($src1$$reg);
15922     Register d = as_Register($dst$$reg);
15923     __ fcmps(s1, 0.0);
15924     // installs 0 if EQ else -1
15925     __ csinvw(d, zr, zr, Assembler::EQ);
15926     // keeps -1 if less or unordered else installs 1
15927     __ csnegw(d, d, d, Assembler::LT);
15928     __ bind(done);
15929   %}
15930 
15931   ins_pipe(pipe_class_default);
15932 
15933 %}
15934 
15935 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15936 %{
15937   match(Set dst (CmpD3 src1 zero));
15938   effect(KILL cr);
15939 
15940   ins_cost(5 * INSN_COST);
15941   format %{ "fcmpd $src1, 0.0\n\t"
15942             "csinvw($dst, zr, zr, eq\n\t"
15943             "csnegw($dst, $dst, $dst, lt)"
15944   %}
15945 
15946   ins_encode %{
15947     Label done;
15948     FloatRegister s1 = as_FloatRegister($src1$$reg);
15949     Register d = as_Register($dst$$reg);
15950     __ fcmpd(s1, 0.0);
15951     // installs 0 if EQ else -1
15952     __ csinvw(d, zr, zr, Assembler::EQ);
15953     // keeps -1 if less or unordered else installs 1
15954     __ csnegw(d, d, d, Assembler::LT);
15955     __ bind(done);
15956   %}
15957   ins_pipe(pipe_class_default);
15958 
15959 %}
15960 
15961 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15962 %{
15963   match(Set dst (CmpLTMask p q));
15964   effect(KILL cr);
15965 
15966   ins_cost(3 * INSN_COST);
15967 
15968   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15969             "csetw $dst, lt\n\t"
15970             "subw $dst, zr, $dst"
15971   %}
15972 
15973   ins_encode %{
15974     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15975     __ csetw(as_Register($dst$$reg), Assembler::LT);
15976     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15977   %}
15978 
15979   ins_pipe(ialu_reg_reg);
15980 %}
15981 
15982 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15983 %{
15984   match(Set dst (CmpLTMask src zero));
15985   effect(KILL cr);
15986 
15987   ins_cost(INSN_COST);
15988 
15989   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15990 
15991   ins_encode %{
15992     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15993   %}
15994 
15995   ins_pipe(ialu_reg_shift);
15996 %}
15997 
15998 // ============================================================================
15999 // Max and Min
16000 
16001 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
16002 
16003 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
16004 %{
16005   effect(DEF cr, USE src);
16006   ins_cost(INSN_COST);
16007   format %{ "cmpw $src, 0" %}
16008 
16009   ins_encode %{
16010     __ cmpw($src$$Register, 0);
16011   %}
16012   ins_pipe(icmp_reg_imm);
16013 %}
16014 
16015 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
16016 %{
16017   match(Set dst (MinI src1 src2));
16018   ins_cost(INSN_COST * 3);
16019 
16020   expand %{
16021     rFlagsReg cr;
16022     compI_reg_reg(cr, src1, src2);
16023     cmovI_reg_reg_lt(dst, src1, src2, cr);
16024   %}
16025 %}
16026 
16027 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
16028 %{
16029   match(Set dst (MaxI src1 src2));
16030   ins_cost(INSN_COST * 3);
16031 
16032   expand %{
16033     rFlagsReg cr;
16034     compI_reg_reg(cr, src1, src2);
16035     cmovI_reg_reg_gt(dst, src1, src2, cr);
16036   %}
16037 %}
16038 
16039 
16040 // ============================================================================
16041 // Branch Instructions
16042 
16043 // Direct Branch.
16044 instruct branch(label lbl)
16045 %{
16046   match(Goto);
16047 
16048   effect(USE lbl);
16049 
16050   ins_cost(BRANCH_COST);
16051   format %{ "b  $lbl" %}
16052 
16053   ins_encode(aarch64_enc_b(lbl));
16054 
16055   ins_pipe(pipe_branch);
16056 %}
16057 
16058 // Conditional Near Branch
16059 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
16060 %{
16061   // Same match rule as `branchConFar'.
16062   match(If cmp cr);
16063 
16064   effect(USE lbl);
16065 
16066   ins_cost(BRANCH_COST);
16067   // If set to 1 this indicates that the current instruction is a
16068   // short variant of a long branch. This avoids using this
16069   // instruction in first-pass matching. It will then only be used in
16070   // the `Shorten_branches' pass.
16071   // ins_short_branch(1);
16072   format %{ "b$cmp  $lbl" %}
16073 
16074   ins_encode(aarch64_enc_br_con(cmp, lbl));
16075 
16076   ins_pipe(pipe_branch_cond);
16077 %}
16078 
16079 // Conditional Near Branch Unsigned
16080 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
16081 %{
16082   // Same match rule as `branchConFar'.
16083   match(If cmp cr);
16084 
16085   effect(USE lbl);
16086 
16087   ins_cost(BRANCH_COST);
16088   // If set to 1 this indicates that the current instruction is a
16089   // short variant of a long branch. This avoids using this
16090   // instruction in first-pass matching. It will then only be used in
16091   // the `Shorten_branches' pass.
16092   // ins_short_branch(1);
16093   format %{ "b$cmp  $lbl\t# unsigned" %}
16094 
16095   ins_encode(aarch64_enc_br_conU(cmp, lbl));
16096 
16097   ins_pipe(pipe_branch_cond);
16098 %}
16099 
16100 // Make use of CBZ and CBNZ.  These instructions, as well as being
16101 // shorter than (cmp; branch), have the additional benefit of not
16102 // killing the flags.
16103 
16104 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
16105   match(If cmp (CmpI op1 op2));
16106   effect(USE labl);
16107 
16108   ins_cost(BRANCH_COST);
16109   format %{ "cbw$cmp   $op1, $labl" %}
16110   ins_encode %{
16111     Label* L = $labl$$label;
16112     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16113     if (cond == Assembler::EQ)
16114       __ cbzw($op1$$Register, *L);
16115     else
16116       __ cbnzw($op1$$Register, *L);
16117   %}
16118   ins_pipe(pipe_cmp_branch);
16119 %}
16120 
16121 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
16122   match(If cmp (CmpL op1 op2));
16123   effect(USE labl);
16124 
16125   ins_cost(BRANCH_COST);
16126   format %{ "cb$cmp   $op1, $labl" %}
16127   ins_encode %{
16128     Label* L = $labl$$label;
16129     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16130     if (cond == Assembler::EQ)
16131       __ cbz($op1$$Register, *L);
16132     else
16133       __ cbnz($op1$$Register, *L);
16134   %}
16135   ins_pipe(pipe_cmp_branch);
16136 %}
16137 
16138 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
16139   match(If cmp (CmpP op1 op2));
16140   effect(USE labl);
16141 
16142   ins_cost(BRANCH_COST);
16143   format %{ "cb$cmp   $op1, $labl" %}
16144   ins_encode %{
16145     Label* L = $labl$$label;
16146     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16147     if (cond == Assembler::EQ)
16148       __ cbz($op1$$Register, *L);
16149     else
16150       __ cbnz($op1$$Register, *L);
16151   %}
16152   ins_pipe(pipe_cmp_branch);
16153 %}
16154 
16155 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
16156   match(If cmp (CmpN op1 op2));
16157   effect(USE labl);
16158 
16159   ins_cost(BRANCH_COST);
16160   format %{ "cbw$cmp   $op1, $labl" %}
16161   ins_encode %{
16162     Label* L = $labl$$label;
16163     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16164     if (cond == Assembler::EQ)
16165       __ cbzw($op1$$Register, *L);
16166     else
16167       __ cbnzw($op1$$Register, *L);
16168   %}
16169   ins_pipe(pipe_cmp_branch);
16170 %}
16171 
16172 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
16173   match(If cmp (CmpP (DecodeN oop) zero));
16174   effect(USE labl);
16175 
16176   ins_cost(BRANCH_COST);
16177   format %{ "cb$cmp   $oop, $labl" %}
16178   ins_encode %{
16179     Label* L = $labl$$label;
16180     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16181     if (cond == Assembler::EQ)
16182       __ cbzw($oop$$Register, *L);
16183     else
16184       __ cbnzw($oop$$Register, *L);
16185   %}
16186   ins_pipe(pipe_cmp_branch);
16187 %}
16188 
16189 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
16190   match(If cmp (CmpU op1 op2));
16191   effect(USE labl);
16192 
16193   ins_cost(BRANCH_COST);
16194   format %{ "cbw$cmp   $op1, $labl" %}
16195   ins_encode %{
16196     Label* L = $labl$$label;
16197     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16198     if (cond == Assembler::EQ || cond == Assembler::LS)
16199       __ cbzw($op1$$Register, *L);
16200     else
16201       __ cbnzw($op1$$Register, *L);
16202   %}
16203   ins_pipe(pipe_cmp_branch);
16204 %}
16205 
16206 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
16207   match(If cmp (CmpUL op1 op2));
16208   effect(USE labl);
16209 
16210   ins_cost(BRANCH_COST);
16211   format %{ "cb$cmp   $op1, $labl" %}
16212   ins_encode %{
16213     Label* L = $labl$$label;
16214     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16215     if (cond == Assembler::EQ || cond == Assembler::LS)
16216       __ cbz($op1$$Register, *L);
16217     else
16218       __ cbnz($op1$$Register, *L);
16219   %}
16220   ins_pipe(pipe_cmp_branch);
16221 %}
16222 
16223 // Test bit and Branch
16224 
16225 // Patterns for short (< 32KiB) variants
16226 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16227   match(If cmp (CmpL op1 op2));
16228   effect(USE labl);
16229 
16230   ins_cost(BRANCH_COST);
16231   format %{ "cb$cmp   $op1, $labl # long" %}
16232   ins_encode %{
16233     Label* L = $labl$$label;
16234     Assembler::Condition cond =
16235       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16236     __ tbr(cond, $op1$$Register, 63, *L);
16237   %}
16238   ins_pipe(pipe_cmp_branch);
16239   ins_short_branch(1);
16240 %}
16241 
16242 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16243   match(If cmp (CmpI op1 op2));
16244   effect(USE labl);
16245 
16246   ins_cost(BRANCH_COST);
16247   format %{ "cb$cmp   $op1, $labl # int" %}
16248   ins_encode %{
16249     Label* L = $labl$$label;
16250     Assembler::Condition cond =
16251       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16252     __ tbr(cond, $op1$$Register, 31, *L);
16253   %}
16254   ins_pipe(pipe_cmp_branch);
16255   ins_short_branch(1);
16256 %}
16257 
16258 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16259   match(If cmp (CmpL (AndL op1 op2) op3));
16260   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16261   effect(USE labl);
16262 
16263   ins_cost(BRANCH_COST);
16264   format %{ "tb$cmp   $op1, $op2, $labl" %}
16265   ins_encode %{
16266     Label* L = $labl$$label;
16267     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16268     int bit = exact_log2_long($op2$$constant);
16269     __ tbr(cond, $op1$$Register, bit, *L);
16270   %}
16271   ins_pipe(pipe_cmp_branch);
16272   ins_short_branch(1);
16273 %}
16274 
16275 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16276   match(If cmp (CmpI (AndI op1 op2) op3));
16277   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16278   effect(USE labl);
16279 
16280   ins_cost(BRANCH_COST);
16281   format %{ "tb$cmp   $op1, $op2, $labl" %}
16282   ins_encode %{
16283     Label* L = $labl$$label;
16284     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16285     int bit = exact_log2((juint)$op2$$constant);
16286     __ tbr(cond, $op1$$Register, bit, *L);
16287   %}
16288   ins_pipe(pipe_cmp_branch);
16289   ins_short_branch(1);
16290 %}
16291 
16292 // And far variants
16293 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16294   match(If cmp (CmpL op1 op2));
16295   effect(USE labl);
16296 
16297   ins_cost(BRANCH_COST);
16298   format %{ "cb$cmp   $op1, $labl # long" %}
16299   ins_encode %{
16300     Label* L = $labl$$label;
16301     Assembler::Condition cond =
16302       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16303     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
16304   %}
16305   ins_pipe(pipe_cmp_branch);
16306 %}
16307 
16308 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16309   match(If cmp (CmpI op1 op2));
16310   effect(USE labl);
16311 
16312   ins_cost(BRANCH_COST);
16313   format %{ "cb$cmp   $op1, $labl # int" %}
16314   ins_encode %{
16315     Label* L = $labl$$label;
16316     Assembler::Condition cond =
16317       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16318     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16319   %}
16320   ins_pipe(pipe_cmp_branch);
16321 %}
16322 
16323 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16324   match(If cmp (CmpL (AndL op1 op2) op3));
16325   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16326   effect(USE labl);
16327 
16328   ins_cost(BRANCH_COST);
16329   format %{ "tb$cmp   $op1, $op2, $labl" %}
16330   ins_encode %{
16331     Label* L = $labl$$label;
16332     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16333     int bit = exact_log2_long($op2$$constant);
16334     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16335   %}
16336   ins_pipe(pipe_cmp_branch);
16337 %}
16338 
16339 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16340   match(If cmp (CmpI (AndI op1 op2) op3));
16341   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16342   effect(USE labl);
16343 
16344   ins_cost(BRANCH_COST);
16345   format %{ "tb$cmp   $op1, $op2, $labl" %}
16346   ins_encode %{
16347     Label* L = $labl$$label;
16348     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16349     int bit = exact_log2((juint)$op2$$constant);
16350     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16351   %}
16352   ins_pipe(pipe_cmp_branch);
16353 %}
16354 
16355 // Test bits
16356 
16357 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16358   match(Set cr (CmpL (AndL op1 op2) op3));
16359   predicate(Assembler::operand_valid_for_logical_immediate
16360             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16361 
16362   ins_cost(INSN_COST);
16363   format %{ "tst $op1, $op2 # long" %}
16364   ins_encode %{
16365     __ tst($op1$$Register, $op2$$constant);
16366   %}
16367   ins_pipe(ialu_reg_reg);
16368 %}
16369 
16370 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16371   match(Set cr (CmpI (AndI op1 op2) op3));
16372   predicate(Assembler::operand_valid_for_logical_immediate
16373             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16374 
16375   ins_cost(INSN_COST);
16376   format %{ "tst $op1, $op2 # int" %}
16377   ins_encode %{
16378     __ tstw($op1$$Register, $op2$$constant);
16379   %}
16380   ins_pipe(ialu_reg_reg);
16381 %}
16382 
16383 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16384   match(Set cr (CmpL (AndL op1 op2) op3));
16385 
16386   ins_cost(INSN_COST);
16387   format %{ "tst $op1, $op2 # long" %}
16388   ins_encode %{
16389     __ tst($op1$$Register, $op2$$Register);
16390   %}
16391   ins_pipe(ialu_reg_reg);
16392 %}
16393 
16394 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16395   match(Set cr (CmpI (AndI op1 op2) op3));
16396 
16397   ins_cost(INSN_COST);
16398   format %{ "tstw $op1, $op2 # int" %}
16399   ins_encode %{
16400     __ tstw($op1$$Register, $op2$$Register);
16401   %}
16402   ins_pipe(ialu_reg_reg);
16403 %}
16404 
16405 
16406 // Conditional Far Branch
16407 // Conditional Far Branch Unsigned
16408 // TODO: fixme
16409 
16410 // counted loop end branch near
16411 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16412 %{
16413   match(CountedLoopEnd cmp cr);
16414 
16415   effect(USE lbl);
16416 
16417   ins_cost(BRANCH_COST);
16418   // short variant.
16419   // ins_short_branch(1);
16420   format %{ "b$cmp $lbl \t// counted loop end" %}
16421 
16422   ins_encode(aarch64_enc_br_con(cmp, lbl));
16423 
16424   ins_pipe(pipe_branch);
16425 %}
16426 
16427 // counted loop end branch far
16428 // TODO: fixme
16429 
16430 // ============================================================================
16431 // inlined locking and unlocking
16432 
16433 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16434 %{
16435   match(Set cr (FastLock object box));
16436   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16437 
16438   // TODO
16439   // identify correct cost
16440   ins_cost(5 * INSN_COST);
16441   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
16442 
16443   ins_encode %{
16444     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16445   %}
16446 
16447   ins_pipe(pipe_serial);
16448 %}
16449 
16450 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16451 %{
16452   match(Set cr (FastUnlock object box));
16453   effect(TEMP tmp, TEMP tmp2);
16454 
16455   ins_cost(5 * INSN_COST);
16456   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16457 
16458   ins_encode %{
16459     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16460   %}
16461 
16462   ins_pipe(pipe_serial);
16463 %}
16464 
16465 
16466 // ============================================================================
16467 // Safepoint Instructions
16468 
16469 // TODO
16470 // provide a near and far version of this code
16471 
16472 instruct safePoint(rFlagsReg cr, iRegP poll)
16473 %{
16474   match(SafePoint poll);
16475   effect(KILL cr);
16476 
16477   format %{
16478     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16479   %}
16480   ins_encode %{
16481     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16482   %}
16483   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16484 %}
16485 
16486 
16487 // ============================================================================
16488 // Procedure Call/Return Instructions
16489 
16490 // Call Java Static Instruction
16491 
16492 instruct CallStaticJavaDirect(method meth)
16493 %{
16494   match(CallStaticJava);
16495 
16496   effect(USE meth);
16497 
16498   ins_cost(CALL_COST);
16499 
16500   format %{ "call,static $meth \t// ==> " %}
16501 
16502   ins_encode(aarch64_enc_java_static_call(meth),
16503              aarch64_enc_call_epilog);
16504 
16505   ins_pipe(pipe_class_call);
16506 %}
16507 
16508 // TO HERE
16509 
16510 // Call Java Dynamic Instruction
16511 instruct CallDynamicJavaDirect(method meth)
16512 %{
16513   match(CallDynamicJava);
16514 
16515   effect(USE meth);
16516 
16517   ins_cost(CALL_COST);
16518 
16519   format %{ "CALL,dynamic $meth \t// ==> " %}
16520 
16521   ins_encode(aarch64_enc_java_dynamic_call(meth),
16522              aarch64_enc_call_epilog);
16523 
16524   ins_pipe(pipe_class_call);
16525 %}
16526 
16527 // Call Runtime Instruction
16528 
16529 instruct CallRuntimeDirect(method meth)
16530 %{
16531   match(CallRuntime);
16532 
16533   effect(USE meth);
16534 
16535   ins_cost(CALL_COST);
16536 
16537   format %{ "CALL, runtime $meth" %}
16538 
16539   ins_encode( aarch64_enc_java_to_runtime(meth) );
16540 
16541   ins_pipe(pipe_class_call);
16542 %}
16543 
16544 // Call Runtime Instruction
16545 
16546 instruct CallLeafDirect(method meth)
16547 %{
16548   match(CallLeaf);
16549 
16550   effect(USE meth);
16551 
16552   ins_cost(CALL_COST);
16553 
16554   format %{ "CALL, runtime leaf $meth" %}
16555 
16556   ins_encode( aarch64_enc_java_to_runtime(meth) );
16557 
16558   ins_pipe(pipe_class_call);
16559 %}
16560 
16561 // Call Runtime Instruction
16562 
16563 instruct CallLeafNoFPDirect(method meth)
16564 %{
16565   match(CallLeafNoFP);
16566 
16567   effect(USE meth);
16568 
16569   ins_cost(CALL_COST);
16570 
16571   format %{ "CALL, runtime leaf nofp $meth" %}
16572 
16573   ins_encode( aarch64_enc_java_to_runtime(meth) );
16574 
16575   ins_pipe(pipe_class_call);
16576 %}
16577 
16578 // Tail Call; Jump from runtime stub to Java code.
16579 // Also known as an 'interprocedural jump'.
16580 // Target of jump will eventually return to caller.
16581 // TailJump below removes the return address.
16582 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16583 // emitted just above the TailCall which has reset rfp to the caller state.
16584 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
16585 %{
16586   match(TailCall jump_target method_ptr);
16587 
16588   ins_cost(CALL_COST);
16589 
16590   format %{ "br $jump_target\t# $method_ptr holds method" %}
16591 
16592   ins_encode(aarch64_enc_tail_call(jump_target));
16593 
16594   ins_pipe(pipe_class_call);
16595 %}
16596 
16597 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
16598 %{
16599   match(TailJump jump_target ex_oop);
16600 
16601   ins_cost(CALL_COST);
16602 
16603   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16604 
16605   ins_encode(aarch64_enc_tail_jmp(jump_target));
16606 
16607   ins_pipe(pipe_class_call);
16608 %}
16609 
16610 // Create exception oop: created by stack-crawling runtime code.
16611 // Created exception is now available to this handler, and is setup
16612 // just prior to jumping to this handler. No code emitted.
16613 // TODO check
16614 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16615 instruct CreateException(iRegP_R0 ex_oop)
16616 %{
16617   match(Set ex_oop (CreateEx));
16618 
16619   format %{ " -- \t// exception oop; no code emitted" %}
16620 
16621   size(0);
16622 
16623   ins_encode( /*empty*/ );
16624 
16625   ins_pipe(pipe_class_empty);
16626 %}
16627 
16628 // Rethrow exception: The exception oop will come in the first
16629 // argument position. Then JUMP (not call) to the rethrow stub code.
16630 instruct RethrowException() %{
16631   match(Rethrow);
16632   ins_cost(CALL_COST);
16633 
16634   format %{ "b rethrow_stub" %}
16635 
16636   ins_encode( aarch64_enc_rethrow() );
16637 
16638   ins_pipe(pipe_class_call);
16639 %}
16640 
16641 
16642 // Return Instruction
16643 // epilog node loads ret address into lr as part of frame pop
16644 instruct Ret()
16645 %{
16646   match(Return);
16647 
16648   format %{ "ret\t// return register" %}
16649 
16650   ins_encode( aarch64_enc_ret() );
16651 
16652   ins_pipe(pipe_branch);
16653 %}
16654 
16655 // Die now.
16656 instruct ShouldNotReachHere() %{
16657   match(Halt);
16658 
16659   ins_cost(CALL_COST);
16660   format %{ "ShouldNotReachHere" %}
16661 
16662   ins_encode %{
16663     if (is_reachable()) {
16664       __ stop(_halt_reason);
16665     }
16666   %}
16667 
16668   ins_pipe(pipe_class_default);
16669 %}
16670 
16671 // ============================================================================
16672 // Partial Subtype Check
16673 //
16674 // superklass array for an instance of the superklass.  Set a hidden
16675 // internal cache on a hit (cache is checked with exposed code in
16676 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16677 // encoding ALSO sets flags.
16678 
16679 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16680 %{
16681   match(Set result (PartialSubtypeCheck sub super));
16682   effect(KILL cr, KILL temp);
16683 
16684   ins_cost(1100);  // slightly larger than the next version
16685   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16686 
16687   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16688 
16689   opcode(0x1); // Force zero of result reg on hit
16690 
16691   ins_pipe(pipe_class_memory);
16692 %}
16693 
16694 instruct partialSubtypeCheckConstSuper(iRegP_R4 sub, iRegP_R0 super_reg, immP super_con, vRegD_V0 vtemp, iRegP_R5 result,
16695                                        iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16696                                        rFlagsReg cr)
16697 %{
16698   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
16699   predicate(UseSecondarySupersTable);
16700   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16701 
16702   ins_cost(700);  // smaller than the next version
16703   format %{ "partialSubtypeCheck $result, $sub, super" %}
16704 
16705   ins_encode %{
16706     bool success = false;
16707     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
16708     if (InlineSecondarySupersTest) {
16709       success = __ lookup_secondary_supers_table($sub$$Register, $super_reg$$Register,
16710                                                  $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16711                                                  $vtemp$$FloatRegister,
16712                                                  $result$$Register,
16713                                                  super_klass_slot);
16714     } else {
16715       address call = __ trampoline_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
16716       success = (call != nullptr);
16717     }
16718     if (!success) {
16719       ciEnv::current()->record_failure("CodeCache is full");
16720       return;
16721     }
16722   %}
16723 
16724   ins_pipe(pipe_class_memory);
16725 %}
16726 
16727 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
16728 %{
16729   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
16730   effect(KILL temp, KILL result);
16731 
16732   ins_cost(1100);  // slightly larger than the next version
16733   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16734 
16735   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16736 
16737   opcode(0x0); // Don't zero result reg on hit
16738 
16739   ins_pipe(pipe_class_memory);
16740 %}
16741 
16742 // Intrisics for String.compareTo()
16743 
16744 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16745                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16746 %{
16747   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16748   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16749   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16750 
16751   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16752   ins_encode %{
16753     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16754     __ string_compare($str1$$Register, $str2$$Register,
16755                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16756                       $tmp1$$Register, $tmp2$$Register,
16757                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
16758   %}
16759   ins_pipe(pipe_class_memory);
16760 %}
16761 
16762 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16763                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16764 %{
16765   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16766   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16767   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16768 
16769   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16770   ins_encode %{
16771     __ string_compare($str1$$Register, $str2$$Register,
16772                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16773                       $tmp1$$Register, $tmp2$$Register,
16774                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
16775   %}
16776   ins_pipe(pipe_class_memory);
16777 %}
16778 
16779 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16780                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16781                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16782 %{
16783   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16784   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16785   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16786          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16787 
16788   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16789   ins_encode %{
16790     __ string_compare($str1$$Register, $str2$$Register,
16791                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16792                       $tmp1$$Register, $tmp2$$Register,
16793                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16794                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
16795   %}
16796   ins_pipe(pipe_class_memory);
16797 %}
16798 
16799 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16800                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16801                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16802 %{
16803   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16804   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16805   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16806          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16807 
16808   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16809   ins_encode %{
16810     __ string_compare($str1$$Register, $str2$$Register,
16811                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16812                       $tmp1$$Register, $tmp2$$Register,
16813                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16814                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
16815   %}
16816   ins_pipe(pipe_class_memory);
16817 %}
16818 
16819 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
16820 // these string_compare variants as NEON register type for convenience so that the prototype of
16821 // string_compare can be shared with all variants.
16822 
16823 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16824                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16825                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16826                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16827 %{
16828   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16829   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16830   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16831          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16832 
16833   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16834   ins_encode %{
16835     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16836     __ string_compare($str1$$Register, $str2$$Register,
16837                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16838                       $tmp1$$Register, $tmp2$$Register,
16839                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16840                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16841                       StrIntrinsicNode::LL);
16842   %}
16843   ins_pipe(pipe_class_memory);
16844 %}
16845 
16846 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16847                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16848                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16849                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16850 %{
16851   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16852   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16853   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16854          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16855 
16856   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16857   ins_encode %{
16858     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16859     __ string_compare($str1$$Register, $str2$$Register,
16860                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16861                       $tmp1$$Register, $tmp2$$Register,
16862                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16863                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16864                       StrIntrinsicNode::LU);
16865   %}
16866   ins_pipe(pipe_class_memory);
16867 %}
16868 
16869 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16870                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16871                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16872                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16873 %{
16874   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16875   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16876   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16877          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16878 
16879   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16880   ins_encode %{
16881     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16882     __ string_compare($str1$$Register, $str2$$Register,
16883                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16884                       $tmp1$$Register, $tmp2$$Register,
16885                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16886                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16887                       StrIntrinsicNode::UL);
16888   %}
16889   ins_pipe(pipe_class_memory);
16890 %}
16891 
16892 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16893                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16894                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16895                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16896 %{
16897   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16898   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16899   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16900          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16901 
16902   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16903   ins_encode %{
16904     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16905     __ string_compare($str1$$Register, $str2$$Register,
16906                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16907                       $tmp1$$Register, $tmp2$$Register,
16908                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16909                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16910                       StrIntrinsicNode::UU);
16911   %}
16912   ins_pipe(pipe_class_memory);
16913 %}
16914 
16915 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16916                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16917                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16918                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16919 %{
16920   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16921   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16922   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16923          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16924          TEMP vtmp0, TEMP vtmp1, KILL cr);
16925   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16926             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16927 
16928   ins_encode %{
16929     __ string_indexof($str1$$Register, $str2$$Register,
16930                       $cnt1$$Register, $cnt2$$Register,
16931                       $tmp1$$Register, $tmp2$$Register,
16932                       $tmp3$$Register, $tmp4$$Register,
16933                       $tmp5$$Register, $tmp6$$Register,
16934                       -1, $result$$Register, StrIntrinsicNode::UU);
16935   %}
16936   ins_pipe(pipe_class_memory);
16937 %}
16938 
16939 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16940                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16941                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16942                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16943 %{
16944   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16945   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16946   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16947          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16948          TEMP vtmp0, TEMP vtmp1, KILL cr);
16949   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16950             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16951 
16952   ins_encode %{
16953     __ string_indexof($str1$$Register, $str2$$Register,
16954                       $cnt1$$Register, $cnt2$$Register,
16955                       $tmp1$$Register, $tmp2$$Register,
16956                       $tmp3$$Register, $tmp4$$Register,
16957                       $tmp5$$Register, $tmp6$$Register,
16958                       -1, $result$$Register, StrIntrinsicNode::LL);
16959   %}
16960   ins_pipe(pipe_class_memory);
16961 %}
16962 
16963 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16964                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16965                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16966                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16967 %{
16968   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16969   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16970   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16971          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16972          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16973   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16974             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16975 
16976   ins_encode %{
16977     __ string_indexof($str1$$Register, $str2$$Register,
16978                       $cnt1$$Register, $cnt2$$Register,
16979                       $tmp1$$Register, $tmp2$$Register,
16980                       $tmp3$$Register, $tmp4$$Register,
16981                       $tmp5$$Register, $tmp6$$Register,
16982                       -1, $result$$Register, StrIntrinsicNode::UL);
16983   %}
16984   ins_pipe(pipe_class_memory);
16985 %}
16986 
16987 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16988                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16989                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16990 %{
16991   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16992   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16993   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16994          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16995   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
16996             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16997 
16998   ins_encode %{
16999     int icnt2 = (int)$int_cnt2$$constant;
17000     __ string_indexof($str1$$Register, $str2$$Register,
17001                       $cnt1$$Register, zr,
17002                       $tmp1$$Register, $tmp2$$Register,
17003                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17004                       icnt2, $result$$Register, StrIntrinsicNode::UU);
17005   %}
17006   ins_pipe(pipe_class_memory);
17007 %}
17008 
17009 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17010                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17011                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17012 %{
17013   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
17014   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17015   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17016          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17017   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
17018             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17019 
17020   ins_encode %{
17021     int icnt2 = (int)$int_cnt2$$constant;
17022     __ string_indexof($str1$$Register, $str2$$Register,
17023                       $cnt1$$Register, zr,
17024                       $tmp1$$Register, $tmp2$$Register,
17025                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17026                       icnt2, $result$$Register, StrIntrinsicNode::LL);
17027   %}
17028   ins_pipe(pipe_class_memory);
17029 %}
17030 
17031 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17032                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17033                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17034 %{
17035   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
17036   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17037   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17038          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17039   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
17040             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17041 
17042   ins_encode %{
17043     int icnt2 = (int)$int_cnt2$$constant;
17044     __ string_indexof($str1$$Register, $str2$$Register,
17045                       $cnt1$$Register, zr,
17046                       $tmp1$$Register, $tmp2$$Register,
17047                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17048                       icnt2, $result$$Register, StrIntrinsicNode::UL);
17049   %}
17050   ins_pipe(pipe_class_memory);
17051 %}
17052 
17053 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17054                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
17055                              iRegINoSp tmp3, rFlagsReg cr)
17056 %{
17057   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17058   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
17059   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17060          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17061 
17062   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17063 
17064   ins_encode %{
17065     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17066                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
17067                            $tmp3$$Register);
17068   %}
17069   ins_pipe(pipe_class_memory);
17070 %}
17071 
17072 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17073                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
17074                               iRegINoSp tmp3, rFlagsReg cr)
17075 %{
17076   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17077   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
17078   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17079          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17080 
17081   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17082 
17083   ins_encode %{
17084     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17085                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
17086                             $tmp3$$Register);
17087   %}
17088   ins_pipe(pipe_class_memory);
17089 %}
17090 
17091 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17092                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17093                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17094   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
17095   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17096   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17097   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17098   ins_encode %{
17099     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17100                                $result$$Register, $ztmp1$$FloatRegister,
17101                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17102                                $ptmp$$PRegister, true /* isL */);
17103   %}
17104   ins_pipe(pipe_class_memory);
17105 %}
17106 
17107 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17108                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
17109                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
17110   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
17111   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17112   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
17113   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
17114   ins_encode %{
17115     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
17116                                $result$$Register, $ztmp1$$FloatRegister,
17117                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
17118                                $ptmp$$PRegister, false /* isL */);
17119   %}
17120   ins_pipe(pipe_class_memory);
17121 %}
17122 
17123 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17124                         iRegI_R0 result, rFlagsReg cr)
17125 %{
17126   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
17127   match(Set result (StrEquals (Binary str1 str2) cnt));
17128   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17129 
17130   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17131   ins_encode %{
17132     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17133     __ string_equals($str1$$Register, $str2$$Register,
17134                      $result$$Register, $cnt$$Register, 1);
17135   %}
17136   ins_pipe(pipe_class_memory);
17137 %}
17138 
17139 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17140                         iRegI_R0 result, rFlagsReg cr)
17141 %{
17142   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
17143   match(Set result (StrEquals (Binary str1 str2) cnt));
17144   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17145 
17146   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17147   ins_encode %{
17148     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17149     __ string_equals($str1$$Register, $str2$$Register,
17150                      $result$$Register, $cnt$$Register, 2);
17151   %}
17152   ins_pipe(pipe_class_memory);
17153 %}
17154 
17155 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17156                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17157                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17158                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17159                        iRegP_R10 tmp, rFlagsReg cr)
17160 %{
17161   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
17162   match(Set result (AryEq ary1 ary2));
17163   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17164          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17165          TEMP vtmp6, TEMP vtmp7, KILL cr);
17166 
17167   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17168   ins_encode %{
17169     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17170                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17171                                    $result$$Register, $tmp$$Register, 1);
17172     if (tpc == NULL) {
17173       ciEnv::current()->record_failure("CodeCache is full");
17174       return;
17175     }
17176   %}
17177   ins_pipe(pipe_class_memory);
17178 %}
17179 
17180 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17181                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17182                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17183                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17184                        iRegP_R10 tmp, rFlagsReg cr)
17185 %{
17186   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
17187   match(Set result (AryEq ary1 ary2));
17188   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17189          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17190          TEMP vtmp6, TEMP vtmp7, KILL cr);
17191 
17192   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17193   ins_encode %{
17194     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17195                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17196                                    $result$$Register, $tmp$$Register, 2);
17197     if (tpc == NULL) {
17198       ciEnv::current()->record_failure("CodeCache is full");
17199       return;
17200     }
17201   %}
17202   ins_pipe(pipe_class_memory);
17203 %}
17204 
17205 instruct arrays_hashcode(iRegP_R1 ary, iRegI_R2 cnt, iRegI_R0 result, immI basic_type,
17206                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17207                          vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17208                          vRegD_V12 vtmp8, vRegD_V13 vtmp9, rFlagsReg cr)
17209 %{
17210   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
17211   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, TEMP vtmp6,
17212          TEMP vtmp7, TEMP vtmp8, TEMP vtmp9, USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
17213 
17214   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
17215   ins_encode %{
17216     address tpc = __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
17217                                      $vtmp3$$FloatRegister, $vtmp2$$FloatRegister,
17218                                      $vtmp1$$FloatRegister, $vtmp0$$FloatRegister,
17219                                      $vtmp4$$FloatRegister, $vtmp5$$FloatRegister,
17220                                      $vtmp6$$FloatRegister, $vtmp7$$FloatRegister,
17221                                      $vtmp8$$FloatRegister, $vtmp9$$FloatRegister,
17222                                      (BasicType)$basic_type$$constant);
17223     if (tpc == nullptr) {
17224       ciEnv::current()->record_failure("CodeCache is full");
17225       return;
17226     }
17227   %}
17228   ins_pipe(pipe_class_memory);
17229 %}
17230 
17231 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
17232 %{
17233   match(Set result (CountPositives ary1 len));
17234   effect(USE_KILL ary1, USE_KILL len, KILL cr);
17235   format %{ "count positives byte[] $ary1,$len -> $result" %}
17236   ins_encode %{
17237     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
17238     if (tpc == NULL) {
17239       ciEnv::current()->record_failure("CodeCache is full");
17240       return;
17241     }
17242   %}
17243   ins_pipe( pipe_slow );
17244 %}
17245 
17246 // fast char[] to byte[] compression
17247 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17248                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17249                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17250                          iRegI_R0 result, rFlagsReg cr)
17251 %{
17252   match(Set result (StrCompressedCopy src (Binary dst len)));
17253   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17254          USE_KILL src, USE_KILL dst, USE len, KILL cr);
17255 
17256   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17257   ins_encode %{
17258     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
17259                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17260                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17261                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17262   %}
17263   ins_pipe(pipe_slow);
17264 %}
17265 
17266 // fast byte[] to char[] inflation
17267 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
17268                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17269                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
17270 %{
17271   match(Set dummy (StrInflatedCopy src (Binary dst len)));
17272   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
17273          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
17274          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
17275 
17276   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
17277   ins_encode %{
17278     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
17279                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17280                                         $vtmp2$$FloatRegister, $tmp$$Register);
17281     if (tpc == NULL) {
17282       ciEnv::current()->record_failure("CodeCache is full");
17283       return;
17284     }
17285   %}
17286   ins_pipe(pipe_class_memory);
17287 %}
17288 
17289 // encode char[] to byte[] in ISO_8859_1
17290 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17291                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17292                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17293                           iRegI_R0 result, rFlagsReg cr)
17294 %{
17295   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
17296   match(Set result (EncodeISOArray src (Binary dst len)));
17297   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17298          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17299 
17300   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17301   ins_encode %{
17302     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17303                         $result$$Register, false,
17304                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17305                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17306                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17307   %}
17308   ins_pipe(pipe_class_memory);
17309 %}
17310 
17311 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17312                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17313                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17314                             iRegI_R0 result, rFlagsReg cr)
17315 %{
17316   predicate(((EncodeISOArrayNode*)n)->is_ascii());
17317   match(Set result (EncodeISOArray src (Binary dst len)));
17318   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17319          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17320 
17321   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17322   ins_encode %{
17323     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17324                         $result$$Register, true,
17325                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17326                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17327                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17328   %}
17329   ins_pipe(pipe_class_memory);
17330 %}
17331 
17332 //----------------------------- CompressBits/ExpandBits ------------------------
17333 
17334 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17335                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17336   match(Set dst (CompressBits src mask));
17337   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17338   format %{ "mov    $tsrc, $src\n\t"
17339             "mov    $tmask, $mask\n\t"
17340             "bext   $tdst, $tsrc, $tmask\n\t"
17341             "mov    $dst, $tdst"
17342           %}
17343   ins_encode %{
17344     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17345     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17346     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17347     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17348   %}
17349   ins_pipe(pipe_slow);
17350 %}
17351 
17352 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17353                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17354   match(Set dst (CompressBits (LoadI mem) mask));
17355   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17356   format %{ "ldrs   $tsrc, $mem\n\t"
17357             "ldrs   $tmask, $mask\n\t"
17358             "bext   $tdst, $tsrc, $tmask\n\t"
17359             "mov    $dst, $tdst"
17360           %}
17361   ins_encode %{
17362     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17363               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17364     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17365     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17366     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17367   %}
17368   ins_pipe(pipe_slow);
17369 %}
17370 
17371 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17372                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
17373   match(Set dst (CompressBits src mask));
17374   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17375   format %{ "mov    $tsrc, $src\n\t"
17376             "mov    $tmask, $mask\n\t"
17377             "bext   $tdst, $tsrc, $tmask\n\t"
17378             "mov    $dst, $tdst"
17379           %}
17380   ins_encode %{
17381     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17382     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17383     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17384     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17385   %}
17386   ins_pipe(pipe_slow);
17387 %}
17388 
17389 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
17390                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17391   match(Set dst (CompressBits (LoadL mem) mask));
17392   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17393   format %{ "ldrd   $tsrc, $mem\n\t"
17394             "ldrd   $tmask, $mask\n\t"
17395             "bext   $tdst, $tsrc, $tmask\n\t"
17396             "mov    $dst, $tdst"
17397           %}
17398   ins_encode %{
17399     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17400               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17401     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17402     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17403     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17404   %}
17405   ins_pipe(pipe_slow);
17406 %}
17407 
17408 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17409                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17410   match(Set dst (ExpandBits src mask));
17411   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17412   format %{ "mov    $tsrc, $src\n\t"
17413             "mov    $tmask, $mask\n\t"
17414             "bdep   $tdst, $tsrc, $tmask\n\t"
17415             "mov    $dst, $tdst"
17416           %}
17417   ins_encode %{
17418     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17419     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17420     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17421     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17422   %}
17423   ins_pipe(pipe_slow);
17424 %}
17425 
17426 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17427                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17428   match(Set dst (ExpandBits (LoadI mem) mask));
17429   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17430   format %{ "ldrs   $tsrc, $mem\n\t"
17431             "ldrs   $tmask, $mask\n\t"
17432             "bdep   $tdst, $tsrc, $tmask\n\t"
17433             "mov    $dst, $tdst"
17434           %}
17435   ins_encode %{
17436     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17437               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17438     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17439     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17440     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17441   %}
17442   ins_pipe(pipe_slow);
17443 %}
17444 
17445 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17446                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
17447   match(Set dst (ExpandBits src mask));
17448   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17449   format %{ "mov    $tsrc, $src\n\t"
17450             "mov    $tmask, $mask\n\t"
17451             "bdep   $tdst, $tsrc, $tmask\n\t"
17452             "mov    $dst, $tdst"
17453           %}
17454   ins_encode %{
17455     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17456     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17457     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17458     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17459   %}
17460   ins_pipe(pipe_slow);
17461 %}
17462 
17463 
17464 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
17465                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17466   match(Set dst (ExpandBits (LoadL mem) mask));
17467   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17468   format %{ "ldrd   $tsrc, $mem\n\t"
17469             "ldrd   $tmask, $mask\n\t"
17470             "bdep   $tdst, $tsrc, $tmask\n\t"
17471             "mov    $dst, $tdst"
17472           %}
17473   ins_encode %{
17474     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17475               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17476     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17477     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17478     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17479   %}
17480   ins_pipe(pipe_slow);
17481 %}
17482 
17483 // ============================================================================
17484 // This name is KNOWN by the ADLC and cannot be changed.
17485 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17486 // for this guy.
17487 instruct tlsLoadP(thread_RegP dst)
17488 %{
17489   match(Set dst (ThreadLocal));
17490 
17491   ins_cost(0);
17492 
17493   format %{ " -- \t// $dst=Thread::current(), empty" %}
17494 
17495   size(0);
17496 
17497   ins_encode( /*empty*/ );
17498 
17499   ins_pipe(pipe_class_empty);
17500 %}
17501 
17502 //----------PEEPHOLE RULES-----------------------------------------------------
17503 // These must follow all instruction definitions as they use the names
17504 // defined in the instructions definitions.
17505 //
17506 // peepmatch ( root_instr_name [preceding_instruction]* );
17507 //
17508 // peepconstraint %{
17509 // (instruction_number.operand_name relational_op instruction_number.operand_name
17510 //  [, ...] );
17511 // // instruction numbers are zero-based using left to right order in peepmatch
17512 //
17513 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17514 // // provide an instruction_number.operand_name for each operand that appears
17515 // // in the replacement instruction's match rule
17516 //
17517 // ---------VM FLAGS---------------------------------------------------------
17518 //
17519 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17520 //
17521 // Each peephole rule is given an identifying number starting with zero and
17522 // increasing by one in the order seen by the parser.  An individual peephole
17523 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17524 // on the command-line.
17525 //
17526 // ---------CURRENT LIMITATIONS----------------------------------------------
17527 //
17528 // Only match adjacent instructions in same basic block
17529 // Only equality constraints
17530 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17531 // Only one replacement instruction
17532 //
17533 // ---------EXAMPLE----------------------------------------------------------
17534 //
17535 // // pertinent parts of existing instructions in architecture description
17536 // instruct movI(iRegINoSp dst, iRegI src)
17537 // %{
17538 //   match(Set dst (CopyI src));
17539 // %}
17540 //
17541 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17542 // %{
17543 //   match(Set dst (AddI dst src));
17544 //   effect(KILL cr);
17545 // %}
17546 //
17547 // // Change (inc mov) to lea
17548 // peephole %{
17549 //   // increment preceded by register-register move
17550 //   peepmatch ( incI_iReg movI );
17551 //   // require that the destination register of the increment
17552 //   // match the destination register of the move
17553 //   peepconstraint ( 0.dst == 1.dst );
17554 //   // construct a replacement instruction that sets
17555 //   // the destination to ( move's source register + one )
17556 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17557 // %}
17558 //
17559 
17560 // Implementation no longer uses movX instructions since
17561 // machine-independent system no longer uses CopyX nodes.
17562 //
17563 // peephole
17564 // %{
17565 //   peepmatch (incI_iReg movI);
17566 //   peepconstraint (0.dst == 1.dst);
17567 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17568 // %}
17569 
17570 // peephole
17571 // %{
17572 //   peepmatch (decI_iReg movI);
17573 //   peepconstraint (0.dst == 1.dst);
17574 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17575 // %}
17576 
17577 // peephole
17578 // %{
17579 //   peepmatch (addI_iReg_imm movI);
17580 //   peepconstraint (0.dst == 1.dst);
17581 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17582 // %}
17583 
17584 // peephole
17585 // %{
17586 //   peepmatch (incL_iReg movL);
17587 //   peepconstraint (0.dst == 1.dst);
17588 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17589 // %}
17590 
17591 // peephole
17592 // %{
17593 //   peepmatch (decL_iReg movL);
17594 //   peepconstraint (0.dst == 1.dst);
17595 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17596 // %}
17597 
17598 // peephole
17599 // %{
17600 //   peepmatch (addL_iReg_imm movL);
17601 //   peepconstraint (0.dst == 1.dst);
17602 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17603 // %}
17604 
17605 // peephole
17606 // %{
17607 //   peepmatch (addP_iReg_imm movP);
17608 //   peepconstraint (0.dst == 1.dst);
17609 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17610 // %}
17611 
17612 // // Change load of spilled value to only a spill
17613 // instruct storeI(memory mem, iRegI src)
17614 // %{
17615 //   match(Set mem (StoreI mem src));
17616 // %}
17617 //
17618 // instruct loadI(iRegINoSp dst, memory mem)
17619 // %{
17620 //   match(Set dst (LoadI mem));
17621 // %}
17622 //
17623 
17624 //----------SMARTSPILL RULES---------------------------------------------------
17625 // These must follow all instruction definitions as they use the names
17626 // defined in the instructions definitions.
17627 
17628 // Local Variables:
17629 // mode: c++
17630 // End: