1 //
    2 // Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // architecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182 
  183   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  184   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  185   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  186   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  187 
  188   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  189   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  190   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  191   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  192 
  193   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  194   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  195   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  196   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  197 
  198   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  199   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  200   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  201   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  202 
  203   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  204   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  205   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  206   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  207 
  208   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  209   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  210   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  211   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  212 
  213   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  214   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  215   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  216   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  217 
  218   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  219   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  220   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  221   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  222 
  223   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  224   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  225   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  226   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  227 
  228   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  229   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  230   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  231   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  232 
  233   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  234   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  235   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  236   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  237 
  238   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  239   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  240   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  241   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  242 
  243   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  244   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  245   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  246   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  247 
  248   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  249   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  250   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  251   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  252 
  253   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  254   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  255   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  256   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  257 
  258   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  259   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  260   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  261   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  262 
  263   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  264   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  265   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  266   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  267 
  268   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  269   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  270   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  271   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  272 
  273   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  274   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  275   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  276   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  277 
  278   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  279   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  280   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  281   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  282 
  283   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  284   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  285   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  286   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  287 
  288   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  289   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  290   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  291   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  292 
  293   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  294   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  295   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  296   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  297 
  298   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  299   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  300   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  301   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  302 
  303   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  304   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  305   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  306   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  307 
  308   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  309   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  310   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  311   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  312 
  313   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  314   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  315   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  316   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  317 
  318   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  319   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  320   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  321   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  322 
  323   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  324   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  325   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  326   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  327 
  328   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  329   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  330   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  331   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  332 
  333   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  334   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  335   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  336   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  337 
  338 // ----------------------------
  339 // SVE Predicate Registers
  340 // ----------------------------
  341   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  342   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  343   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  344   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  345   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  346   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  347   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  348   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  349   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  350   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  351   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  352   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  353   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  354   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  355   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  356   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  357 
  358 // ----------------------------
  359 // Special Registers
  360 // ----------------------------
  361 
  362 // the AArch64 CSPR status flag register is not directly accessible as
  363 // instruction operand. the FPSR status flag register is a system
  364 // register which can be written/read using MSR/MRS but again does not
  365 // appear as an operand (a code identifying the FSPR occurs as an
  366 // immediate value in the instruction).
  367 
  368 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  369 
  370 // Specify priority of register selection within phases of register
  371 // allocation.  Highest priority is first.  A useful heuristic is to
  372 // give registers a low priority when they are required by machine
  373 // instructions, like EAX and EDX on I486, and choose no-save registers
  374 // before save-on-call, & save-on-call before save-on-entry.  Registers
  375 // which participate in fixed calling sequences should come last.
  376 // Registers which are used as pairs must fall on an even boundary.
  377 
  378 alloc_class chunk0(
  379     // volatiles
  380     R10, R10_H,
  381     R11, R11_H,
  382     R12, R12_H,
  383     R13, R13_H,
  384     R14, R14_H,
  385     R15, R15_H,
  386     R16, R16_H,
  387     R17, R17_H,
  388     R18, R18_H,
  389 
  390     // arg registers
  391     R0, R0_H,
  392     R1, R1_H,
  393     R2, R2_H,
  394     R3, R3_H,
  395     R4, R4_H,
  396     R5, R5_H,
  397     R6, R6_H,
  398     R7, R7_H,
  399 
  400     // non-volatiles
  401     R19, R19_H,
  402     R20, R20_H,
  403     R21, R21_H,
  404     R22, R22_H,
  405     R23, R23_H,
  406     R24, R24_H,
  407     R25, R25_H,
  408     R26, R26_H,
  409 
  410     // non-allocatable registers
  411 
  412     R27, R27_H, // heapbase
  413     R28, R28_H, // thread
  414     R29, R29_H, // fp
  415     R30, R30_H, // lr
  416     R31, R31_H, // sp
  417     R8, R8_H,   // rscratch1
  418     R9, R9_H,   // rscratch2
  419 );
  420 
  421 alloc_class chunk1(
  422 
  423     // no save
  424     V16, V16_H, V16_J, V16_K,
  425     V17, V17_H, V17_J, V17_K,
  426     V18, V18_H, V18_J, V18_K,
  427     V19, V19_H, V19_J, V19_K,
  428     V20, V20_H, V20_J, V20_K,
  429     V21, V21_H, V21_J, V21_K,
  430     V22, V22_H, V22_J, V22_K,
  431     V23, V23_H, V23_J, V23_K,
  432     V24, V24_H, V24_J, V24_K,
  433     V25, V25_H, V25_J, V25_K,
  434     V26, V26_H, V26_J, V26_K,
  435     V27, V27_H, V27_J, V27_K,
  436     V28, V28_H, V28_J, V28_K,
  437     V29, V29_H, V29_J, V29_K,
  438     V30, V30_H, V30_J, V30_K,
  439     V31, V31_H, V31_J, V31_K,
  440 
  441     // arg registers
  442     V0, V0_H, V0_J, V0_K,
  443     V1, V1_H, V1_J, V1_K,
  444     V2, V2_H, V2_J, V2_K,
  445     V3, V3_H, V3_J, V3_K,
  446     V4, V4_H, V4_J, V4_K,
  447     V5, V5_H, V5_J, V5_K,
  448     V6, V6_H, V6_J, V6_K,
  449     V7, V7_H, V7_J, V7_K,
  450 
  451     // non-volatiles
  452     V8, V8_H, V8_J, V8_K,
  453     V9, V9_H, V9_J, V9_K,
  454     V10, V10_H, V10_J, V10_K,
  455     V11, V11_H, V11_J, V11_K,
  456     V12, V12_H, V12_J, V12_K,
  457     V13, V13_H, V13_J, V13_K,
  458     V14, V14_H, V14_J, V14_K,
  459     V15, V15_H, V15_J, V15_K,
  460 );
  461 
  462 alloc_class chunk2 (
  463     // Governing predicates for load/store and arithmetic
  464     P0,
  465     P1,
  466     P2,
  467     P3,
  468     P4,
  469     P5,
  470     P6,
  471 
  472     // Extra predicates
  473     P8,
  474     P9,
  475     P10,
  476     P11,
  477     P12,
  478     P13,
  479     P14,
  480     P15,
  481 
  482     // Preserved for all-true predicate
  483     P7,
  484 );
  485 
  486 alloc_class chunk3(RFLAGS);
  487 
  488 //----------Architecture Description Register Classes--------------------------
  489 // Several register classes are automatically defined based upon information in
  490 // this architecture description.
  491 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  492 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  493 //
  494 
  495 // Class for all 32 bit general purpose registers
  496 reg_class all_reg32(
  497     R0,
  498     R1,
  499     R2,
  500     R3,
  501     R4,
  502     R5,
  503     R6,
  504     R7,
  505     R10,
  506     R11,
  507     R12,
  508     R13,
  509     R14,
  510     R15,
  511     R16,
  512     R17,
  513     R18,
  514     R19,
  515     R20,
  516     R21,
  517     R22,
  518     R23,
  519     R24,
  520     R25,
  521     R26,
  522     R27,
  523     R28,
  524     R29,
  525     R30,
  526     R31
  527 );
  528 
  529 
  530 // Class for all 32 bit integer registers (excluding SP which
  531 // will never be used as an integer register)
  532 reg_class any_reg32 %{
  533   return _ANY_REG32_mask;
  534 %}
  535 
  536 // Singleton class for R0 int register
  537 reg_class int_r0_reg(R0);
  538 
  539 // Singleton class for R2 int register
  540 reg_class int_r2_reg(R2);
  541 
  542 // Singleton class for R3 int register
  543 reg_class int_r3_reg(R3);
  544 
  545 // Singleton class for R4 int register
  546 reg_class int_r4_reg(R4);
  547 
  548 // Singleton class for R31 int register
  549 reg_class int_r31_reg(R31);
  550 
  551 // Class for all 64 bit general purpose registers
  552 reg_class all_reg(
  553     R0, R0_H,
  554     R1, R1_H,
  555     R2, R2_H,
  556     R3, R3_H,
  557     R4, R4_H,
  558     R5, R5_H,
  559     R6, R6_H,
  560     R7, R7_H,
  561     R10, R10_H,
  562     R11, R11_H,
  563     R12, R12_H,
  564     R13, R13_H,
  565     R14, R14_H,
  566     R15, R15_H,
  567     R16, R16_H,
  568     R17, R17_H,
  569     R18, R18_H,
  570     R19, R19_H,
  571     R20, R20_H,
  572     R21, R21_H,
  573     R22, R22_H,
  574     R23, R23_H,
  575     R24, R24_H,
  576     R25, R25_H,
  577     R26, R26_H,
  578     R27, R27_H,
  579     R28, R28_H,
  580     R29, R29_H,
  581     R30, R30_H,
  582     R31, R31_H
  583 );
  584 
  585 // Class for all long integer registers (including SP)
  586 reg_class any_reg %{
  587   return _ANY_REG_mask;
  588 %}
  589 
  590 // Class for non-allocatable 32 bit registers
  591 reg_class non_allocatable_reg32(
  592 #ifdef R18_RESERVED
  593     // See comment in register_aarch64.hpp
  594     R18,                        // tls on Windows
  595 #endif
  596     R28,                        // thread
  597     R30,                        // lr
  598     R31                         // sp
  599 );
  600 
  601 // Class for non-allocatable 64 bit registers
  602 reg_class non_allocatable_reg(
  603 #ifdef R18_RESERVED
  604     // See comment in register_aarch64.hpp
  605     R18, R18_H,                 // tls on Windows, platform register on macOS
  606 #endif
  607     R28, R28_H,                 // thread
  608     R30, R30_H,                 // lr
  609     R31, R31_H                  // sp
  610 );
  611 
  612 // Class for all non-special integer registers
  613 reg_class no_special_reg32 %{
  614   return _NO_SPECIAL_REG32_mask;
  615 %}
  616 
  617 // Class for all non-special long integer registers
  618 reg_class no_special_reg %{
  619   return _NO_SPECIAL_REG_mask;
  620 %}
  621 
  622 // Class for 64 bit register r0
  623 reg_class r0_reg(
  624     R0, R0_H
  625 );
  626 
  627 // Class for 64 bit register r1
  628 reg_class r1_reg(
  629     R1, R1_H
  630 );
  631 
  632 // Class for 64 bit register r2
  633 reg_class r2_reg(
  634     R2, R2_H
  635 );
  636 
  637 // Class for 64 bit register r3
  638 reg_class r3_reg(
  639     R3, R3_H
  640 );
  641 
  642 // Class for 64 bit register r4
  643 reg_class r4_reg(
  644     R4, R4_H
  645 );
  646 
  647 // Class for 64 bit register r5
  648 reg_class r5_reg(
  649     R5, R5_H
  650 );
  651 
  652 // Class for 64 bit register r10
  653 reg_class r10_reg(
  654     R10, R10_H
  655 );
  656 
  657 // Class for 64 bit register r11
  658 reg_class r11_reg(
  659     R11, R11_H
  660 );
  661 
  662 // Class for method register
  663 reg_class method_reg(
  664     R12, R12_H
  665 );
  666 
  667 // Class for thread register
  668 reg_class thread_reg(
  669     R28, R28_H
  670 );
  671 
  672 // Class for frame pointer register
  673 reg_class fp_reg(
  674     R29, R29_H
  675 );
  676 
  677 // Class for link register
  678 reg_class lr_reg(
  679     R30, R30_H
  680 );
  681 
  682 // Class for long sp register
  683 reg_class sp_reg(
  684   R31, R31_H
  685 );
  686 
  687 // Class for all pointer registers
  688 reg_class ptr_reg %{
  689   return _PTR_REG_mask;
  690 %}
  691 
  692 // Class for all non_special pointer registers
  693 reg_class no_special_ptr_reg %{
  694   return _NO_SPECIAL_PTR_REG_mask;
  695 %}
  696 
  697 // Class for all non_special pointer registers (excluding rfp)
  698 reg_class no_special_no_rfp_ptr_reg %{
  699   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  700 %}
  701 
  702 // Class for all float registers
  703 reg_class float_reg(
  704     V0,
  705     V1,
  706     V2,
  707     V3,
  708     V4,
  709     V5,
  710     V6,
  711     V7,
  712     V8,
  713     V9,
  714     V10,
  715     V11,
  716     V12,
  717     V13,
  718     V14,
  719     V15,
  720     V16,
  721     V17,
  722     V18,
  723     V19,
  724     V20,
  725     V21,
  726     V22,
  727     V23,
  728     V24,
  729     V25,
  730     V26,
  731     V27,
  732     V28,
  733     V29,
  734     V30,
  735     V31
  736 );
  737 
  738 // Double precision float registers have virtual `high halves' that
  739 // are needed by the allocator.
  740 // Class for all double registers
  741 reg_class double_reg(
  742     V0, V0_H,
  743     V1, V1_H,
  744     V2, V2_H,
  745     V3, V3_H,
  746     V4, V4_H,
  747     V5, V5_H,
  748     V6, V6_H,
  749     V7, V7_H,
  750     V8, V8_H,
  751     V9, V9_H,
  752     V10, V10_H,
  753     V11, V11_H,
  754     V12, V12_H,
  755     V13, V13_H,
  756     V14, V14_H,
  757     V15, V15_H,
  758     V16, V16_H,
  759     V17, V17_H,
  760     V18, V18_H,
  761     V19, V19_H,
  762     V20, V20_H,
  763     V21, V21_H,
  764     V22, V22_H,
  765     V23, V23_H,
  766     V24, V24_H,
  767     V25, V25_H,
  768     V26, V26_H,
  769     V27, V27_H,
  770     V28, V28_H,
  771     V29, V29_H,
  772     V30, V30_H,
  773     V31, V31_H
  774 );
  775 
  776 // Class for all SVE vector registers.
  777 reg_class vectora_reg (
  778     V0, V0_H, V0_J, V0_K,
  779     V1, V1_H, V1_J, V1_K,
  780     V2, V2_H, V2_J, V2_K,
  781     V3, V3_H, V3_J, V3_K,
  782     V4, V4_H, V4_J, V4_K,
  783     V5, V5_H, V5_J, V5_K,
  784     V6, V6_H, V6_J, V6_K,
  785     V7, V7_H, V7_J, V7_K,
  786     V8, V8_H, V8_J, V8_K,
  787     V9, V9_H, V9_J, V9_K,
  788     V10, V10_H, V10_J, V10_K,
  789     V11, V11_H, V11_J, V11_K,
  790     V12, V12_H, V12_J, V12_K,
  791     V13, V13_H, V13_J, V13_K,
  792     V14, V14_H, V14_J, V14_K,
  793     V15, V15_H, V15_J, V15_K,
  794     V16, V16_H, V16_J, V16_K,
  795     V17, V17_H, V17_J, V17_K,
  796     V18, V18_H, V18_J, V18_K,
  797     V19, V19_H, V19_J, V19_K,
  798     V20, V20_H, V20_J, V20_K,
  799     V21, V21_H, V21_J, V21_K,
  800     V22, V22_H, V22_J, V22_K,
  801     V23, V23_H, V23_J, V23_K,
  802     V24, V24_H, V24_J, V24_K,
  803     V25, V25_H, V25_J, V25_K,
  804     V26, V26_H, V26_J, V26_K,
  805     V27, V27_H, V27_J, V27_K,
  806     V28, V28_H, V28_J, V28_K,
  807     V29, V29_H, V29_J, V29_K,
  808     V30, V30_H, V30_J, V30_K,
  809     V31, V31_H, V31_J, V31_K,
  810 );
  811 
  812 // Class for all 64bit vector registers
  813 reg_class vectord_reg(
  814     V0, V0_H,
  815     V1, V1_H,
  816     V2, V2_H,
  817     V3, V3_H,
  818     V4, V4_H,
  819     V5, V5_H,
  820     V6, V6_H,
  821     V7, V7_H,
  822     V8, V8_H,
  823     V9, V9_H,
  824     V10, V10_H,
  825     V11, V11_H,
  826     V12, V12_H,
  827     V13, V13_H,
  828     V14, V14_H,
  829     V15, V15_H,
  830     V16, V16_H,
  831     V17, V17_H,
  832     V18, V18_H,
  833     V19, V19_H,
  834     V20, V20_H,
  835     V21, V21_H,
  836     V22, V22_H,
  837     V23, V23_H,
  838     V24, V24_H,
  839     V25, V25_H,
  840     V26, V26_H,
  841     V27, V27_H,
  842     V28, V28_H,
  843     V29, V29_H,
  844     V30, V30_H,
  845     V31, V31_H
  846 );
  847 
  848 // Class for all 128bit vector registers
  849 reg_class vectorx_reg(
  850     V0, V0_H, V0_J, V0_K,
  851     V1, V1_H, V1_J, V1_K,
  852     V2, V2_H, V2_J, V2_K,
  853     V3, V3_H, V3_J, V3_K,
  854     V4, V4_H, V4_J, V4_K,
  855     V5, V5_H, V5_J, V5_K,
  856     V6, V6_H, V6_J, V6_K,
  857     V7, V7_H, V7_J, V7_K,
  858     V8, V8_H, V8_J, V8_K,
  859     V9, V9_H, V9_J, V9_K,
  860     V10, V10_H, V10_J, V10_K,
  861     V11, V11_H, V11_J, V11_K,
  862     V12, V12_H, V12_J, V12_K,
  863     V13, V13_H, V13_J, V13_K,
  864     V14, V14_H, V14_J, V14_K,
  865     V15, V15_H, V15_J, V15_K,
  866     V16, V16_H, V16_J, V16_K,
  867     V17, V17_H, V17_J, V17_K,
  868     V18, V18_H, V18_J, V18_K,
  869     V19, V19_H, V19_J, V19_K,
  870     V20, V20_H, V20_J, V20_K,
  871     V21, V21_H, V21_J, V21_K,
  872     V22, V22_H, V22_J, V22_K,
  873     V23, V23_H, V23_J, V23_K,
  874     V24, V24_H, V24_J, V24_K,
  875     V25, V25_H, V25_J, V25_K,
  876     V26, V26_H, V26_J, V26_K,
  877     V27, V27_H, V27_J, V27_K,
  878     V28, V28_H, V28_J, V28_K,
  879     V29, V29_H, V29_J, V29_K,
  880     V30, V30_H, V30_J, V30_K,
  881     V31, V31_H, V31_J, V31_K
  882 );
  883 
  884 // Class for 128 bit register v0
  885 reg_class v0_reg(
  886     V0, V0_H
  887 );
  888 
  889 // Class for 128 bit register v1
  890 reg_class v1_reg(
  891     V1, V1_H
  892 );
  893 
  894 // Class for 128 bit register v2
  895 reg_class v2_reg(
  896     V2, V2_H
  897 );
  898 
  899 // Class for 128 bit register v3
  900 reg_class v3_reg(
  901     V3, V3_H
  902 );
  903 
  904 // Class for 128 bit register v4
  905 reg_class v4_reg(
  906     V4, V4_H
  907 );
  908 
  909 // Class for 128 bit register v5
  910 reg_class v5_reg(
  911     V5, V5_H
  912 );
  913 
  914 // Class for 128 bit register v6
  915 reg_class v6_reg(
  916     V6, V6_H
  917 );
  918 
  919 // Class for 128 bit register v7
  920 reg_class v7_reg(
  921     V7, V7_H
  922 );
  923 
  924 // Class for 128 bit register v8
  925 reg_class v8_reg(
  926     V8, V8_H
  927 );
  928 
  929 // Class for 128 bit register v9
  930 reg_class v9_reg(
  931     V9, V9_H
  932 );
  933 
  934 // Class for 128 bit register v10
  935 reg_class v10_reg(
  936     V10, V10_H
  937 );
  938 
  939 // Class for 128 bit register v11
  940 reg_class v11_reg(
  941     V11, V11_H
  942 );
  943 
  944 // Class for 128 bit register v12
  945 reg_class v12_reg(
  946     V12, V12_H
  947 );
  948 
  949 // Class for 128 bit register v13
  950 reg_class v13_reg(
  951     V13, V13_H
  952 );
  953 
  954 // Class for 128 bit register v14
  955 reg_class v14_reg(
  956     V14, V14_H
  957 );
  958 
  959 // Class for 128 bit register v15
  960 reg_class v15_reg(
  961     V15, V15_H
  962 );
  963 
  964 // Class for 128 bit register v16
  965 reg_class v16_reg(
  966     V16, V16_H
  967 );
  968 
  969 // Class for 128 bit register v17
  970 reg_class v17_reg(
  971     V17, V17_H
  972 );
  973 
  974 // Class for 128 bit register v18
  975 reg_class v18_reg(
  976     V18, V18_H
  977 );
  978 
  979 // Class for 128 bit register v19
  980 reg_class v19_reg(
  981     V19, V19_H
  982 );
  983 
  984 // Class for 128 bit register v20
  985 reg_class v20_reg(
  986     V20, V20_H
  987 );
  988 
  989 // Class for 128 bit register v21
  990 reg_class v21_reg(
  991     V21, V21_H
  992 );
  993 
  994 // Class for 128 bit register v22
  995 reg_class v22_reg(
  996     V22, V22_H
  997 );
  998 
  999 // Class for 128 bit register v23
 1000 reg_class v23_reg(
 1001     V23, V23_H
 1002 );
 1003 
 1004 // Class for 128 bit register v24
 1005 reg_class v24_reg(
 1006     V24, V24_H
 1007 );
 1008 
 1009 // Class for 128 bit register v25
 1010 reg_class v25_reg(
 1011     V25, V25_H
 1012 );
 1013 
 1014 // Class for 128 bit register v26
 1015 reg_class v26_reg(
 1016     V26, V26_H
 1017 );
 1018 
 1019 // Class for 128 bit register v27
 1020 reg_class v27_reg(
 1021     V27, V27_H
 1022 );
 1023 
 1024 // Class for 128 bit register v28
 1025 reg_class v28_reg(
 1026     V28, V28_H
 1027 );
 1028 
 1029 // Class for 128 bit register v29
 1030 reg_class v29_reg(
 1031     V29, V29_H
 1032 );
 1033 
 1034 // Class for 128 bit register v30
 1035 reg_class v30_reg(
 1036     V30, V30_H
 1037 );
 1038 
 1039 // Class for 128 bit register v31
 1040 reg_class v31_reg(
 1041     V31, V31_H
 1042 );
 1043 
 1044 // Class for all SVE predicate registers.
 1045 reg_class pr_reg (
 1046     P0,
 1047     P1,
 1048     P2,
 1049     P3,
 1050     P4,
 1051     P5,
 1052     P6,
 1053     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1054     P8,
 1055     P9,
 1056     P10,
 1057     P11,
 1058     P12,
 1059     P13,
 1060     P14,
 1061     P15
 1062 );
 1063 
 1064 // Class for SVE governing predicate registers, which are used
 1065 // to determine the active elements of a predicated instruction.
 1066 reg_class gov_pr (
 1067     P0,
 1068     P1,
 1069     P2,
 1070     P3,
 1071     P4,
 1072     P5,
 1073     P6,
 1074     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1075 );
 1076 
 1077 reg_class p0_reg(P0);
 1078 reg_class p1_reg(P1);
 1079 
 1080 // Singleton class for condition codes
 1081 reg_class int_flags(RFLAGS);
 1082 
 1083 %}
 1084 
 1085 //----------DEFINITION BLOCK---------------------------------------------------
 1086 // Define name --> value mappings to inform the ADLC of an integer valued name
 1087 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1088 // Format:
 1089 //        int_def  <name>         ( <int_value>, <expression>);
 1090 // Generated Code in ad_<arch>.hpp
 1091 //        #define  <name>   (<expression>)
 1092 //        // value == <int_value>
 1093 // Generated code in ad_<arch>.cpp adlc_verification()
 1094 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1095 //
 1096 
 1097 // we follow the ppc-aix port in using a simple cost model which ranks
 1098 // register operations as cheap, memory ops as more expensive and
 1099 // branches as most expensive. the first two have a low as well as a
 1100 // normal cost. huge cost appears to be a way of saying don't do
 1101 // something
 1102 
 1103 definitions %{
 1104   // The default cost (of a register move instruction).
 1105   int_def INSN_COST            (    100,     100);
 1106   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1107   int_def CALL_COST            (    200,     2 * INSN_COST);
 1108   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1109 %}
 1110 
 1111 
 1112 //----------SOURCE BLOCK-------------------------------------------------------
 1113 // This is a block of C++ code which provides values, functions, and
 1114 // definitions necessary in the rest of the architecture description
 1115 
 1116 source_hpp %{
 1117 
 1118 #include "asm/macroAssembler.hpp"
 1119 #include "gc/shared/barrierSetAssembler.hpp"
 1120 #include "gc/shared/cardTable.hpp"
 1121 #include "gc/shared/cardTableBarrierSet.hpp"
 1122 #include "gc/shared/collectedHeap.hpp"
 1123 #include "opto/addnode.hpp"
 1124 #include "opto/convertnode.hpp"
 1125 #include "runtime/objectMonitor.hpp"
 1126 
 1127 extern RegMask _ANY_REG32_mask;
 1128 extern RegMask _ANY_REG_mask;
 1129 extern RegMask _PTR_REG_mask;
 1130 extern RegMask _NO_SPECIAL_REG32_mask;
 1131 extern RegMask _NO_SPECIAL_REG_mask;
 1132 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1133 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1134 
 1135 class CallStubImpl {
 1136 
 1137   //--------------------------------------------------------------
 1138   //---<  Used for optimization in Compile::shorten_branches  >---
 1139   //--------------------------------------------------------------
 1140 
 1141  public:
 1142   // Size of call trampoline stub.
 1143   static uint size_call_trampoline() {
 1144     return 0; // no call trampolines on this platform
 1145   }
 1146 
 1147   // number of relocations needed by a call trampoline stub
 1148   static uint reloc_call_trampoline() {
 1149     return 0; // no call trampolines on this platform
 1150   }
 1151 };
 1152 
 1153 class HandlerImpl {
 1154 
 1155  public:
 1156 
 1157   static int emit_exception_handler(C2_MacroAssembler *masm);
 1158   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1159 
 1160   static uint size_exception_handler() {
 1161     return MacroAssembler::far_codestub_branch_size();
 1162   }
 1163 
 1164   static uint size_deopt_handler() {
 1165     // count one adr and one far branch instruction
 1166     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1167   }
 1168 };
 1169 
 1170 class Node::PD {
 1171 public:
 1172   enum NodeFlags {
 1173     _last_flag = Node::_last_flag
 1174   };
 1175 };
 1176 
 1177   bool is_CAS(int opcode, bool maybe_volatile);
 1178 
 1179   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1180 
 1181   bool unnecessary_acquire(const Node *barrier);
 1182   bool needs_acquiring_load(const Node *load);
 1183 
 1184   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1185 
 1186   bool unnecessary_release(const Node *barrier);
 1187   bool unnecessary_volatile(const Node *barrier);
 1188   bool needs_releasing_store(const Node *store);
 1189 
 1190   // predicate controlling translation of CompareAndSwapX
 1191   bool needs_acquiring_load_exclusive(const Node *load);
 1192 
 1193   // predicate controlling addressing modes
 1194   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1195 
 1196   // Convert BootTest condition to Assembler condition.
 1197   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1198   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1199 %}
 1200 
 1201 source %{
 1202 
 1203   // Derived RegMask with conditionally allocatable registers
 1204 
 1205   void PhaseOutput::pd_perform_mach_node_analysis() {
 1206   }
 1207 
 1208   int MachNode::pd_alignment_required() const {
 1209     return 1;
 1210   }
 1211 
 1212   int MachNode::compute_padding(int current_offset) const {
 1213     return 0;
 1214   }
 1215 
 1216   RegMask _ANY_REG32_mask;
 1217   RegMask _ANY_REG_mask;
 1218   RegMask _PTR_REG_mask;
 1219   RegMask _NO_SPECIAL_REG32_mask;
 1220   RegMask _NO_SPECIAL_REG_mask;
 1221   RegMask _NO_SPECIAL_PTR_REG_mask;
 1222   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1223 
 1224   void reg_mask_init() {
 1225     // We derive below RegMask(s) from the ones which are auto-generated from
 1226     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1227     // registers conditionally reserved.
 1228 
 1229     _ANY_REG32_mask = _ALL_REG32_mask;
 1230     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1231 
 1232     _ANY_REG_mask = _ALL_REG_mask;
 1233 
 1234     _PTR_REG_mask = _ALL_REG_mask;
 1235 
 1236     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1237     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1238 
 1239     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1240     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1241 
 1242     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1243     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1244 
 1245     // r27 is not allocatable when compressed oops is on and heapbase is not
 1246     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1247     if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
 1248       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1249       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1250       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1251     }
 1252 
 1253     // r29 is not allocatable when PreserveFramePointer is on
 1254     if (PreserveFramePointer) {
 1255       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1256       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1257       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1258     }
 1259 
 1260     _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1261     _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1262   }
 1263 
 1264   // Optimizaton of volatile gets and puts
 1265   // -------------------------------------
 1266   //
 1267   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1268   // use to implement volatile reads and writes. For a volatile read
 1269   // we simply need
 1270   //
 1271   //   ldar<x>
 1272   //
 1273   // and for a volatile write we need
 1274   //
 1275   //   stlr<x>
 1276   //
 1277   // Alternatively, we can implement them by pairing a normal
 1278   // load/store with a memory barrier. For a volatile read we need
 1279   //
 1280   //   ldr<x>
 1281   //   dmb ishld
 1282   //
 1283   // for a volatile write
 1284   //
 1285   //   dmb ish
 1286   //   str<x>
 1287   //   dmb ish
 1288   //
 1289   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1290   // sequences. These are normally translated to an instruction
 1291   // sequence like the following
 1292   //
 1293   //   dmb      ish
 1294   // retry:
 1295   //   ldxr<x>   rval raddr
 1296   //   cmp       rval rold
 1297   //   b.ne done
 1298   //   stlxr<x>  rval, rnew, rold
 1299   //   cbnz      rval retry
 1300   // done:
 1301   //   cset      r0, eq
 1302   //   dmb ishld
 1303   //
 1304   // Note that the exclusive store is already using an stlxr
 1305   // instruction. That is required to ensure visibility to other
 1306   // threads of the exclusive write (assuming it succeeds) before that
 1307   // of any subsequent writes.
 1308   //
 1309   // The following instruction sequence is an improvement on the above
 1310   //
 1311   // retry:
 1312   //   ldaxr<x>  rval raddr
 1313   //   cmp       rval rold
 1314   //   b.ne done
 1315   //   stlxr<x>  rval, rnew, rold
 1316   //   cbnz      rval retry
 1317   // done:
 1318   //   cset      r0, eq
 1319   //
 1320   // We don't need the leading dmb ish since the stlxr guarantees
 1321   // visibility of prior writes in the case that the swap is
 1322   // successful. Crucially we don't have to worry about the case where
 1323   // the swap is not successful since no valid program should be
 1324   // relying on visibility of prior changes by the attempting thread
 1325   // in the case where the CAS fails.
 1326   //
 1327   // Similarly, we don't need the trailing dmb ishld if we substitute
 1328   // an ldaxr instruction since that will provide all the guarantees we
 1329   // require regarding observation of changes made by other threads
 1330   // before any change to the CAS address observed by the load.
 1331   //
 1332   // In order to generate the desired instruction sequence we need to
 1333   // be able to identify specific 'signature' ideal graph node
 1334   // sequences which i) occur as a translation of a volatile reads or
 1335   // writes or CAS operations and ii) do not occur through any other
 1336   // translation or graph transformation. We can then provide
 1337   // alternative aldc matching rules which translate these node
 1338   // sequences to the desired machine code sequences. Selection of the
 1339   // alternative rules can be implemented by predicates which identify
 1340   // the relevant node sequences.
 1341   //
 1342   // The ideal graph generator translates a volatile read to the node
 1343   // sequence
 1344   //
 1345   //   LoadX[mo_acquire]
 1346   //   MemBarAcquire
 1347   //
 1348   // As a special case when using the compressed oops optimization we
 1349   // may also see this variant
 1350   //
 1351   //   LoadN[mo_acquire]
 1352   //   DecodeN
 1353   //   MemBarAcquire
 1354   //
 1355   // A volatile write is translated to the node sequence
 1356   //
 1357   //   MemBarRelease
 1358   //   StoreX[mo_release] {CardMark}-optional
 1359   //   MemBarVolatile
 1360   //
 1361   // n.b. the above node patterns are generated with a strict
 1362   // 'signature' configuration of input and output dependencies (see
 1363   // the predicates below for exact details). The card mark may be as
 1364   // simple as a few extra nodes or, in a few GC configurations, may
 1365   // include more complex control flow between the leading and
 1366   // trailing memory barriers. However, whatever the card mark
 1367   // configuration these signatures are unique to translated volatile
 1368   // reads/stores -- they will not appear as a result of any other
 1369   // bytecode translation or inlining nor as a consequence of
 1370   // optimizing transforms.
 1371   //
 1372   // We also want to catch inlined unsafe volatile gets and puts and
 1373   // be able to implement them using either ldar<x>/stlr<x> or some
 1374   // combination of ldr<x>/stlr<x> and dmb instructions.
 1375   //
 1376   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1377   // normal volatile put node sequence containing an extra cpuorder
 1378   // membar
 1379   //
 1380   //   MemBarRelease
 1381   //   MemBarCPUOrder
 1382   //   StoreX[mo_release] {CardMark}-optional
 1383   //   MemBarCPUOrder
 1384   //   MemBarVolatile
 1385   //
 1386   // n.b. as an aside, a cpuorder membar is not itself subject to
 1387   // matching and translation by adlc rules.  However, the rule
 1388   // predicates need to detect its presence in order to correctly
 1389   // select the desired adlc rules.
 1390   //
 1391   // Inlined unsafe volatile gets manifest as a slightly different
 1392   // node sequence to a normal volatile get because of the
 1393   // introduction of some CPUOrder memory barriers to bracket the
 1394   // Load. However, but the same basic skeleton of a LoadX feeding a
 1395   // MemBarAcquire, possibly through an optional DecodeN, is still
 1396   // present
 1397   //
 1398   //   MemBarCPUOrder
 1399   //        ||       \\
 1400   //   MemBarCPUOrder LoadX[mo_acquire]
 1401   //        ||            |
 1402   //        ||       {DecodeN} optional
 1403   //        ||       /
 1404   //     MemBarAcquire
 1405   //
 1406   // In this case the acquire membar does not directly depend on the
 1407   // load. However, we can be sure that the load is generated from an
 1408   // inlined unsafe volatile get if we see it dependent on this unique
 1409   // sequence of membar nodes. Similarly, given an acquire membar we
 1410   // can know that it was added because of an inlined unsafe volatile
 1411   // get if it is fed and feeds a cpuorder membar and if its feed
 1412   // membar also feeds an acquiring load.
 1413   //
 1414   // Finally an inlined (Unsafe) CAS operation is translated to the
 1415   // following ideal graph
 1416   //
 1417   //   MemBarRelease
 1418   //   MemBarCPUOrder
 1419   //   CompareAndSwapX {CardMark}-optional
 1420   //   MemBarCPUOrder
 1421   //   MemBarAcquire
 1422   //
 1423   // So, where we can identify these volatile read and write
 1424   // signatures we can choose to plant either of the above two code
 1425   // sequences. For a volatile read we can simply plant a normal
 1426   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1427   // also choose to inhibit translation of the MemBarAcquire and
 1428   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1429   //
 1430   // When we recognise a volatile store signature we can choose to
 1431   // plant at a dmb ish as a translation for the MemBarRelease, a
 1432   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1433   // Alternatively, we can inhibit translation of the MemBarRelease
 1434   // and MemBarVolatile and instead plant a simple stlr<x>
 1435   // instruction.
 1436   //
 1437   // when we recognise a CAS signature we can choose to plant a dmb
 1438   // ish as a translation for the MemBarRelease, the conventional
 1439   // macro-instruction sequence for the CompareAndSwap node (which
 1440   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1441   // Alternatively, we can elide generation of the dmb instructions
 1442   // and plant the alternative CompareAndSwap macro-instruction
 1443   // sequence (which uses ldaxr<x>).
 1444   //
 1445   // Of course, the above only applies when we see these signature
 1446   // configurations. We still want to plant dmb instructions in any
 1447   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1448   // MemBarVolatile. For example, at the end of a constructor which
 1449   // writes final/volatile fields we will see a MemBarRelease
 1450   // instruction and this needs a 'dmb ish' lest we risk the
 1451   // constructed object being visible without making the
 1452   // final/volatile field writes visible.
 1453   //
 1454   // n.b. the translation rules below which rely on detection of the
 1455   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1456   // If we see anything other than the signature configurations we
 1457   // always just translate the loads and stores to ldr<x> and str<x>
 1458   // and translate acquire, release and volatile membars to the
 1459   // relevant dmb instructions.
 1460   //
 1461 
 1462   // is_CAS(int opcode, bool maybe_volatile)
 1463   //
 1464   // return true if opcode is one of the possible CompareAndSwapX
 1465   // values otherwise false.
 1466 
 1467   bool is_CAS(int opcode, bool maybe_volatile)
 1468   {
 1469     switch(opcode) {
 1470       // We handle these
 1471     case Op_CompareAndSwapI:
 1472     case Op_CompareAndSwapL:
 1473     case Op_CompareAndSwapP:
 1474     case Op_CompareAndSwapN:
 1475     case Op_ShenandoahCompareAndSwapP:
 1476     case Op_ShenandoahCompareAndSwapN:
 1477     case Op_CompareAndSwapB:
 1478     case Op_CompareAndSwapS:
 1479     case Op_GetAndSetI:
 1480     case Op_GetAndSetL:
 1481     case Op_GetAndSetP:
 1482     case Op_GetAndSetN:
 1483     case Op_GetAndAddI:
 1484     case Op_GetAndAddL:
 1485       return true;
 1486     case Op_CompareAndExchangeI:
 1487     case Op_CompareAndExchangeN:
 1488     case Op_CompareAndExchangeB:
 1489     case Op_CompareAndExchangeS:
 1490     case Op_CompareAndExchangeL:
 1491     case Op_CompareAndExchangeP:
 1492     case Op_WeakCompareAndSwapB:
 1493     case Op_WeakCompareAndSwapS:
 1494     case Op_WeakCompareAndSwapI:
 1495     case Op_WeakCompareAndSwapL:
 1496     case Op_WeakCompareAndSwapP:
 1497     case Op_WeakCompareAndSwapN:
 1498     case Op_ShenandoahWeakCompareAndSwapP:
 1499     case Op_ShenandoahWeakCompareAndSwapN:
 1500     case Op_ShenandoahCompareAndExchangeP:
 1501     case Op_ShenandoahCompareAndExchangeN:
 1502       return maybe_volatile;
 1503     default:
 1504       return false;
 1505     }
 1506   }
 1507 
 1508   // helper to determine the maximum number of Phi nodes we may need to
 1509   // traverse when searching from a card mark membar for the merge mem
 1510   // feeding a trailing membar or vice versa
 1511 
 1512 // predicates controlling emit of ldr<x>/ldar<x>
 1513 
 1514 bool unnecessary_acquire(const Node *barrier)
 1515 {
 1516   assert(barrier->is_MemBar(), "expecting a membar");
 1517 
 1518   MemBarNode* mb = barrier->as_MemBar();
 1519 
 1520   if (mb->trailing_load()) {
 1521     return true;
 1522   }
 1523 
 1524   if (mb->trailing_load_store()) {
 1525     Node* load_store = mb->in(MemBarNode::Precedent);
 1526     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1527     return is_CAS(load_store->Opcode(), true);
 1528   }
 1529 
 1530   return false;
 1531 }
 1532 
 1533 bool needs_acquiring_load(const Node *n)
 1534 {
 1535   assert(n->is_Load(), "expecting a load");
 1536   LoadNode *ld = n->as_Load();
 1537   return ld->is_acquire();
 1538 }
 1539 
 1540 bool unnecessary_release(const Node *n)
 1541 {
 1542   assert((n->is_MemBar() &&
 1543           n->Opcode() == Op_MemBarRelease),
 1544          "expecting a release membar");
 1545 
 1546   MemBarNode *barrier = n->as_MemBar();
 1547   if (!barrier->leading()) {
 1548     return false;
 1549   } else {
 1550     Node* trailing = barrier->trailing_membar();
 1551     MemBarNode* trailing_mb = trailing->as_MemBar();
 1552     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1553     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1554 
 1555     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1556     if (mem->is_Store()) {
 1557       assert(mem->as_Store()->is_release(), "");
 1558       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1559       return true;
 1560     } else {
 1561       assert(mem->is_LoadStore(), "");
 1562       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1563       return is_CAS(mem->Opcode(), true);
 1564     }
 1565   }
 1566   return false;
 1567 }
 1568 
 1569 bool unnecessary_volatile(const Node *n)
 1570 {
 1571   // assert n->is_MemBar();
 1572   MemBarNode *mbvol = n->as_MemBar();
 1573 
 1574   bool release = mbvol->trailing_store();
 1575   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1576 #ifdef ASSERT
 1577   if (release) {
 1578     Node* leading = mbvol->leading_membar();
 1579     assert(leading->Opcode() == Op_MemBarRelease, "");
 1580     assert(leading->as_MemBar()->leading_store(), "");
 1581     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1582   }
 1583 #endif
 1584 
 1585   return release;
 1586 }
 1587 
 1588 // predicates controlling emit of str<x>/stlr<x>
 1589 
 1590 bool needs_releasing_store(const Node *n)
 1591 {
 1592   // assert n->is_Store();
 1593   StoreNode *st = n->as_Store();
 1594   return st->trailing_membar() != nullptr;
 1595 }
 1596 
 1597 // predicate controlling translation of CAS
 1598 //
 1599 // returns true if CAS needs to use an acquiring load otherwise false
 1600 
 1601 bool needs_acquiring_load_exclusive(const Node *n)
 1602 {
 1603   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1604   LoadStoreNode* ldst = n->as_LoadStore();
 1605   if (is_CAS(n->Opcode(), false)) {
 1606     assert(ldst->trailing_membar() != nullptr, "expected trailing membar");
 1607   } else {
 1608     return ldst->trailing_membar() != nullptr;
 1609   }
 1610 
 1611   // so we can just return true here
 1612   return true;
 1613 }
 1614 
 1615 #define __ masm->
 1616 
 1617 // advance declarations for helper functions to convert register
 1618 // indices to register objects
 1619 
 1620 // the ad file has to provide implementations of certain methods
 1621 // expected by the generic code
 1622 //
 1623 // REQUIRED FUNCTIONALITY
 1624 
 1625 //=============================================================================
 1626 
 1627 // !!!!! Special hack to get all types of calls to specify the byte offset
 1628 //       from the start of the call to the point where the return address
 1629 //       will point.
 1630 
 1631 int MachCallStaticJavaNode::ret_addr_offset()
 1632 {
 1633   // call should be a simple bl
 1634   int off = 4;
 1635   return off;
 1636 }
 1637 
 1638 int MachCallDynamicJavaNode::ret_addr_offset()
 1639 {
 1640   return 16; // movz, movk, movk, bl
 1641 }
 1642 
 1643 int MachCallRuntimeNode::ret_addr_offset() {
 1644   // for generated stubs the call will be
 1645   //   bl(addr)
 1646   // or with far branches
 1647   //   bl(trampoline_stub)
 1648   // for real runtime callouts it will be six instructions
 1649   // see aarch64_enc_java_to_runtime
 1650   //   adr(rscratch2, retaddr)
 1651   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1652   //   lea(rscratch1, RuntimeAddress(addr)
 1653   //   blr(rscratch1)
 1654   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1655   if (cb) {
 1656     return 1 * NativeInstruction::instruction_size;
 1657   } else {
 1658     return 6 * NativeInstruction::instruction_size;
 1659   }
 1660 }
 1661 
 1662 //=============================================================================
 1663 
 1664 #ifndef PRODUCT
 1665 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1666   st->print("BREAKPOINT");
 1667 }
 1668 #endif
 1669 
 1670 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1671   __ brk(0);
 1672 }
 1673 
 1674 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1675   return MachNode::size(ra_);
 1676 }
 1677 
 1678 //=============================================================================
 1679 
 1680 #ifndef PRODUCT
 1681   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1682     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1683   }
 1684 #endif
 1685 
 1686   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1687     for (int i = 0; i < _count; i++) {
 1688       __ nop();
 1689     }
 1690   }
 1691 
 1692   uint MachNopNode::size(PhaseRegAlloc*) const {
 1693     return _count * NativeInstruction::instruction_size;
 1694   }
 1695 
 1696 //=============================================================================
 1697 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1698 
 1699 int ConstantTable::calculate_table_base_offset() const {
 1700   return 0;  // absolute addressing, no offset
 1701 }
 1702 
 1703 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1704 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1705   ShouldNotReachHere();
 1706 }
 1707 
 1708 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1709   // Empty encoding
 1710 }
 1711 
 1712 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1713   return 0;
 1714 }
 1715 
 1716 #ifndef PRODUCT
 1717 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1718   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1719 }
 1720 #endif
 1721 
 1722 #ifndef PRODUCT
 1723 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1724   Compile* C = ra_->C;
 1725 
 1726   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1727 
 1728   if (C->output()->need_stack_bang(framesize))
 1729     st->print("# stack bang size=%d\n\t", framesize);
 1730 
 1731   if (VM_Version::use_rop_protection()) {
 1732     st->print("ldr  zr, [lr]\n\t");
 1733     st->print("paciaz\n\t");
 1734   }
 1735   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1736     st->print("sub  sp, sp, #%d\n\t", framesize);
 1737     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1738     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1739   } else {
 1740     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1741     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1742     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1743     st->print("sub  sp, sp, rscratch1");
 1744   }
 1745   if (C->stub_function() == nullptr) {
 1746     st->print("\n\t");
 1747     st->print("ldr  rscratch1, [guard]\n\t");
 1748     st->print("dmb ishld\n\t");
 1749     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1750     st->print("cmp  rscratch1, rscratch2\n\t");
 1751     st->print("b.eq skip");
 1752     st->print("\n\t");
 1753     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1754     st->print("b skip\n\t");
 1755     st->print("guard: int\n\t");
 1756     st->print("\n\t");
 1757     st->print("skip:\n\t");
 1758   }
 1759 }
 1760 #endif
 1761 
 1762 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1763   Compile* C = ra_->C;
 1764 
 1765   // n.b. frame size includes space for return pc and rfp
 1766   const int framesize = C->output()->frame_size_in_bytes();
 1767 
 1768   if (C->clinit_barrier_on_entry()) {
 1769     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1770 
 1771     Label L_skip_barrier;
 1772 
 1773     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1774     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1775     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1776     __ bind(L_skip_barrier);
 1777   }
 1778 
 1779   if (C->max_vector_size() > 0) {
 1780     __ reinitialize_ptrue();
 1781   }
 1782 
 1783   int bangsize = C->output()->bang_size_in_bytes();
 1784   if (C->output()->need_stack_bang(bangsize))
 1785     __ generate_stack_overflow_check(bangsize);
 1786 
 1787   __ build_frame(framesize);
 1788 
 1789   if (C->stub_function() == nullptr) {
 1790     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1791     // Dummy labels for just measuring the code size
 1792     Label dummy_slow_path;
 1793     Label dummy_continuation;
 1794     Label dummy_guard;
 1795     Label* slow_path = &dummy_slow_path;
 1796     Label* continuation = &dummy_continuation;
 1797     Label* guard = &dummy_guard;
 1798     if (!Compile::current()->output()->in_scratch_emit_size()) {
 1799       // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1800       C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1801       Compile::current()->output()->add_stub(stub);
 1802       slow_path = &stub->entry();
 1803       continuation = &stub->continuation();
 1804       guard = &stub->guard();
 1805     }
 1806     // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1807     bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1808   }
 1809 
 1810   if (VerifyStackAtCalls) {
 1811     Unimplemented();
 1812   }
 1813 
 1814   C->output()->set_frame_complete(__ offset());
 1815 
 1816   if (C->has_mach_constant_base_node()) {
 1817     // NOTE: We set the table base offset here because users might be
 1818     // emitted before MachConstantBaseNode.
 1819     ConstantTable& constant_table = C->output()->constant_table();
 1820     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1821   }
 1822 }
 1823 
 1824 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1825 {
 1826   return MachNode::size(ra_); // too many variables; just compute it
 1827                               // the hard way
 1828 }
 1829 
 1830 int MachPrologNode::reloc() const
 1831 {
 1832   return 0;
 1833 }
 1834 
 1835 //=============================================================================
 1836 
 1837 #ifndef PRODUCT
 1838 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1839   Compile* C = ra_->C;
 1840   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1841 
 1842   st->print("# pop frame %d\n\t",framesize);
 1843 
 1844   if (framesize == 0) {
 1845     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1846   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1847     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1848     st->print("add  sp, sp, #%d\n\t", framesize);
 1849   } else {
 1850     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1851     st->print("add  sp, sp, rscratch1\n\t");
 1852     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1853   }
 1854   if (VM_Version::use_rop_protection()) {
 1855     st->print("autiaz\n\t");
 1856     st->print("ldr  zr, [lr]\n\t");
 1857   }
 1858 
 1859   if (do_polling() && C->is_method_compilation()) {
 1860     st->print("# test polling word\n\t");
 1861     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1862     st->print("cmp  sp, rscratch1\n\t");
 1863     st->print("bhi #slow_path");
 1864   }
 1865 }
 1866 #endif
 1867 
 1868 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1869   Compile* C = ra_->C;
 1870   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1871 
 1872   __ remove_frame(framesize);
 1873 
 1874   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1875     __ reserved_stack_check();
 1876   }
 1877 
 1878   if (do_polling() && C->is_method_compilation()) {
 1879     Label dummy_label;
 1880     Label* code_stub = &dummy_label;
 1881     if (!C->output()->in_scratch_emit_size()) {
 1882       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1883       C->output()->add_stub(stub);
 1884       code_stub = &stub->entry();
 1885     }
 1886     __ relocate(relocInfo::poll_return_type);
 1887     __ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
 1888   }
 1889 }
 1890 
 1891 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1892   // Variable size. Determine dynamically.
 1893   return MachNode::size(ra_);
 1894 }
 1895 
 1896 int MachEpilogNode::reloc() const {
 1897   // Return number of relocatable values contained in this instruction.
 1898   return 1; // 1 for polling page.
 1899 }
 1900 
 1901 const Pipeline * MachEpilogNode::pipeline() const {
 1902   return MachNode::pipeline_class();
 1903 }
 1904 
 1905 //=============================================================================
 1906 
 1907 static enum RC rc_class(OptoReg::Name reg) {
 1908 
 1909   if (reg == OptoReg::Bad) {
 1910     return rc_bad;
 1911   }
 1912 
 1913   // we have 32 int registers * 2 halves
 1914   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1915 
 1916   if (reg < slots_of_int_registers) {
 1917     return rc_int;
 1918   }
 1919 
 1920   // we have 32 float register * 8 halves
 1921   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1922   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1923     return rc_float;
 1924   }
 1925 
 1926   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1927   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1928     return rc_predicate;
 1929   }
 1930 
 1931   // Between predicate regs & stack is the flags.
 1932   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1933 
 1934   return rc_stack;
 1935 }
 1936 
 1937 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1938   Compile* C = ra_->C;
 1939 
 1940   // Get registers to move.
 1941   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1942   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1943   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1944   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1945 
 1946   enum RC src_hi_rc = rc_class(src_hi);
 1947   enum RC src_lo_rc = rc_class(src_lo);
 1948   enum RC dst_hi_rc = rc_class(dst_hi);
 1949   enum RC dst_lo_rc = rc_class(dst_lo);
 1950 
 1951   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1952 
 1953   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1954     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1955            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1956            "expected aligned-adjacent pairs");
 1957   }
 1958 
 1959   if (src_lo == dst_lo && src_hi == dst_hi) {
 1960     return 0;            // Self copy, no move.
 1961   }
 1962 
 1963   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1964               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1965   int src_offset = ra_->reg2offset(src_lo);
 1966   int dst_offset = ra_->reg2offset(dst_lo);
 1967 
 1968   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1969     uint ireg = ideal_reg();
 1970     if (ireg == Op_VecA && masm) {
 1971       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1972       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1973         // stack->stack
 1974         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 1975                                                 sve_vector_reg_size_in_bytes);
 1976       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1977         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 1978                             sve_vector_reg_size_in_bytes);
 1979       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 1980         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 1981                               sve_vector_reg_size_in_bytes);
 1982       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1983         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1984                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 1985                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 1986       } else {
 1987         ShouldNotReachHere();
 1988       }
 1989     } else if (masm) {
 1990       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 1991       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 1992       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1993         // stack->stack
 1994         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 1995         if (ireg == Op_VecD) {
 1996           __ unspill(rscratch1, true, src_offset);
 1997           __ spill(rscratch1, true, dst_offset);
 1998         } else {
 1999           __ spill_copy128(src_offset, dst_offset);
 2000         }
 2001       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2002         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2003                ireg == Op_VecD ? __ T8B : __ T16B,
 2004                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2005       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2006         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2007                  ireg == Op_VecD ? __ D : __ Q,
 2008                  ra_->reg2offset(dst_lo));
 2009       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2010         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2011                    ireg == Op_VecD ? __ D : __ Q,
 2012                    ra_->reg2offset(src_lo));
 2013       } else {
 2014         ShouldNotReachHere();
 2015       }
 2016     }
 2017   } else if (masm) {
 2018     switch (src_lo_rc) {
 2019     case rc_int:
 2020       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2021         if (is64) {
 2022             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2023                    as_Register(Matcher::_regEncode[src_lo]));
 2024         } else {
 2025             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2026                     as_Register(Matcher::_regEncode[src_lo]));
 2027         }
 2028       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2029         if (is64) {
 2030             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2031                      as_Register(Matcher::_regEncode[src_lo]));
 2032         } else {
 2033             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2034                      as_Register(Matcher::_regEncode[src_lo]));
 2035         }
 2036       } else {                    // gpr --> stack spill
 2037         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2038         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2039       }
 2040       break;
 2041     case rc_float:
 2042       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2043         if (is64) {
 2044             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2045                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2046         } else {
 2047             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2048                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2049         }
 2050       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2051         if (is64) {
 2052             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2053                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2054         } else {
 2055             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2056                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2057         }
 2058       } else {                    // fpr --> stack spill
 2059         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2060         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2061                  is64 ? __ D : __ S, dst_offset);
 2062       }
 2063       break;
 2064     case rc_stack:
 2065       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2066         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2067       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2068         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2069                    is64 ? __ D : __ S, src_offset);
 2070       } else if (dst_lo_rc == rc_predicate) {
 2071         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2072                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2073       } else {                    // stack --> stack copy
 2074         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2075         if (ideal_reg() == Op_RegVectMask) {
 2076           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2077                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2078         } else {
 2079           __ unspill(rscratch1, is64, src_offset);
 2080           __ spill(rscratch1, is64, dst_offset);
 2081         }
 2082       }
 2083       break;
 2084     case rc_predicate:
 2085       if (dst_lo_rc == rc_predicate) {
 2086         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2087       } else if (dst_lo_rc == rc_stack) {
 2088         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2089                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2090       } else {
 2091         assert(false, "bad src and dst rc_class combination.");
 2092         ShouldNotReachHere();
 2093       }
 2094       break;
 2095     default:
 2096       assert(false, "bad rc_class for spill");
 2097       ShouldNotReachHere();
 2098     }
 2099   }
 2100 
 2101   if (st) {
 2102     st->print("spill ");
 2103     if (src_lo_rc == rc_stack) {
 2104       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2105     } else {
 2106       st->print("%s -> ", Matcher::regName[src_lo]);
 2107     }
 2108     if (dst_lo_rc == rc_stack) {
 2109       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2110     } else {
 2111       st->print("%s", Matcher::regName[dst_lo]);
 2112     }
 2113     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2114       int vsize = 0;
 2115       switch (ideal_reg()) {
 2116       case Op_VecD:
 2117         vsize = 64;
 2118         break;
 2119       case Op_VecX:
 2120         vsize = 128;
 2121         break;
 2122       case Op_VecA:
 2123         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2124         break;
 2125       default:
 2126         assert(false, "bad register type for spill");
 2127         ShouldNotReachHere();
 2128       }
 2129       st->print("\t# vector spill size = %d", vsize);
 2130     } else if (ideal_reg() == Op_RegVectMask) {
 2131       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2132       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2133       st->print("\t# predicate spill size = %d", vsize);
 2134     } else {
 2135       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2136     }
 2137   }
 2138 
 2139   return 0;
 2140 
 2141 }
 2142 
 2143 #ifndef PRODUCT
 2144 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2145   if (!ra_)
 2146     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2147   else
 2148     implementation(nullptr, ra_, false, st);
 2149 }
 2150 #endif
 2151 
 2152 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2153   implementation(masm, ra_, false, nullptr);
 2154 }
 2155 
 2156 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2157   return MachNode::size(ra_);
 2158 }
 2159 
 2160 //=============================================================================
 2161 
 2162 #ifndef PRODUCT
 2163 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2164   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2165   int reg = ra_->get_reg_first(this);
 2166   st->print("add %s, rsp, #%d]\t# box lock",
 2167             Matcher::regName[reg], offset);
 2168 }
 2169 #endif
 2170 
 2171 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2172   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2173   int reg    = ra_->get_encode(this);
 2174 
 2175   // This add will handle any 24-bit signed offset. 24 bits allows an
 2176   // 8 megabyte stack frame.
 2177   __ add(as_Register(reg), sp, offset);
 2178 }
 2179 
 2180 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2181   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2182   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2183 
 2184   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2185     return NativeInstruction::instruction_size;
 2186   } else {
 2187     return 2 * NativeInstruction::instruction_size;
 2188   }
 2189 }
 2190 
 2191 //=============================================================================
 2192 
 2193 #ifndef PRODUCT
 2194 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2195 {
 2196   st->print_cr("# MachUEPNode");
 2197   if (UseCompressedClassPointers) {
 2198     st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2199     st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2200     st->print_cr("\tcmpw rscratch1, r10");
 2201   } else {
 2202     st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2203     st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2204     st->print_cr("\tcmp rscratch1, r10");
 2205   }
 2206   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2207 }
 2208 #endif
 2209 
 2210 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 2211 {
 2212   __ ic_check(InteriorEntryAlignment);
 2213 }
 2214 
 2215 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2216 {
 2217   return MachNode::size(ra_);
 2218 }
 2219 
 2220 // REQUIRED EMIT CODE
 2221 
 2222 //=============================================================================
 2223 
 2224 // Emit exception handler code.
 2225 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 2226 {
 2227   // mov rscratch1 #exception_blob_entry_point
 2228   // br rscratch1
 2229   // Note that the code buffer's insts_mark is always relative to insts.
 2230   // That's why we must use the macroassembler to generate a handler.
 2231   address base = __ start_a_stub(size_exception_handler());
 2232   if (base == nullptr) {
 2233     ciEnv::current()->record_failure("CodeCache is full");
 2234     return 0;  // CodeBuffer::expand failed
 2235   }
 2236   int offset = __ offset();
 2237   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2238   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2239   __ end_a_stub();
 2240   return offset;
 2241 }
 2242 
 2243 // Emit deopt handler code.
 2244 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 2245 {
 2246   // Note that the code buffer's insts_mark is always relative to insts.
 2247   // That's why we must use the macroassembler to generate a handler.
 2248   address base = __ start_a_stub(size_deopt_handler());
 2249   if (base == nullptr) {
 2250     ciEnv::current()->record_failure("CodeCache is full");
 2251     return 0;  // CodeBuffer::expand failed
 2252   }
 2253   int offset = __ offset();
 2254 
 2255   __ adr(lr, __ pc());
 2256   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2257 
 2258   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2259   __ end_a_stub();
 2260   return offset;
 2261 }
 2262 
 2263 // REQUIRED MATCHER CODE
 2264 
 2265 //=============================================================================
 2266 
 2267 bool Matcher::match_rule_supported(int opcode) {
 2268   if (!has_match_rule(opcode))
 2269     return false;
 2270 
 2271   switch (opcode) {
 2272     case Op_OnSpinWait:
 2273       return VM_Version::supports_on_spin_wait();
 2274     case Op_CacheWB:
 2275     case Op_CacheWBPreSync:
 2276     case Op_CacheWBPostSync:
 2277       if (!VM_Version::supports_data_cache_line_flush()) {
 2278         return false;
 2279       }
 2280       break;
 2281     case Op_ExpandBits:
 2282     case Op_CompressBits:
 2283       if (!VM_Version::supports_svebitperm()) {
 2284         return false;
 2285       }
 2286       break;
 2287     case Op_FmaF:
 2288     case Op_FmaD:
 2289     case Op_FmaVF:
 2290     case Op_FmaVD:
 2291       if (!UseFMA) {
 2292         return false;
 2293       }
 2294       break;
 2295     case Op_FmaHF:
 2296       // UseFMA flag also needs to be checked along with FEAT_FP16
 2297       if (!UseFMA || !is_feat_fp16_supported()) {
 2298         return false;
 2299       }
 2300       break;
 2301     case Op_AddHF:
 2302     case Op_SubHF:
 2303     case Op_MulHF:
 2304     case Op_DivHF:
 2305     case Op_MinHF:
 2306     case Op_MaxHF:
 2307     case Op_SqrtHF:
 2308       // Half-precision floating point scalar operations require FEAT_FP16
 2309       // to be available. FEAT_FP16 is enabled if both "fphp" and "asimdhp"
 2310       // features are supported.
 2311       if (!is_feat_fp16_supported()) {
 2312         return false;
 2313       }
 2314       break;
 2315   }
 2316 
 2317   return true; // Per default match rules are supported.
 2318 }
 2319 
 2320 const RegMask* Matcher::predicate_reg_mask(void) {
 2321   return &_PR_REG_mask;
 2322 }
 2323 
 2324 bool Matcher::supports_vector_calling_convention(void) {
 2325   return EnableVectorSupport;
 2326 }
 2327 
 2328 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2329   assert(EnableVectorSupport, "sanity");
 2330   int lo = V0_num;
 2331   int hi = V0_H_num;
 2332   if (ideal_reg == Op_VecX || ideal_reg == Op_VecA) {
 2333     hi = V0_K_num;
 2334   }
 2335   return OptoRegPair(hi, lo);
 2336 }
 2337 
 2338 // Is this branch offset short enough that a short branch can be used?
 2339 //
 2340 // NOTE: If the platform does not provide any short branch variants, then
 2341 //       this method should return false for offset 0.
 2342 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2343   // The passed offset is relative to address of the branch.
 2344 
 2345   return (-32768 <= offset && offset < 32768);
 2346 }
 2347 
 2348 // Vector width in bytes.
 2349 int Matcher::vector_width_in_bytes(BasicType bt) {
 2350   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2351   int size = MIN2((UseSVE > 0) ? (int)FloatRegister::sve_vl_max : (int)FloatRegister::neon_vl, (int)MaxVectorSize);
 2352   // Minimum 2 values in vector
 2353   if (size < 2*type2aelembytes(bt)) size = 0;
 2354   // But never < 4
 2355   if (size < 4) size = 0;
 2356   return size;
 2357 }
 2358 
 2359 // Limits on vector size (number of elements) loaded into vector.
 2360 int Matcher::max_vector_size(const BasicType bt) {
 2361   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2362 }
 2363 
 2364 int Matcher::min_vector_size(const BasicType bt) {
 2365   int max_size = max_vector_size(bt);
 2366   // Limit the min vector size to 8 bytes.
 2367   int size = 8 / type2aelembytes(bt);
 2368   if (bt == T_BYTE) {
 2369     // To support vector api shuffle/rearrange.
 2370     size = 4;
 2371   } else if (bt == T_BOOLEAN) {
 2372     // To support vector api load/store mask.
 2373     size = 2;
 2374   }
 2375   if (size < 2) size = 2;
 2376   return MIN2(size, max_size);
 2377 }
 2378 
 2379 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2380   return Matcher::max_vector_size(bt);
 2381 }
 2382 
 2383 // Actual max scalable vector register length.
 2384 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2385   return Matcher::max_vector_size(bt);
 2386 }
 2387 
 2388 // Vector ideal reg.
 2389 uint Matcher::vector_ideal_reg(int len) {
 2390   if (UseSVE > 0 && FloatRegister::neon_vl < len && len <= FloatRegister::sve_vl_max) {
 2391     return Op_VecA;
 2392   }
 2393   switch(len) {
 2394     // For 16-bit/32-bit mask vector, reuse VecD.
 2395     case  2:
 2396     case  4:
 2397     case  8: return Op_VecD;
 2398     case 16: return Op_VecX;
 2399   }
 2400   ShouldNotReachHere();
 2401   return 0;
 2402 }
 2403 
 2404 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2405   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2406   switch (ideal_reg) {
 2407     case Op_VecA: return new vecAOper();
 2408     case Op_VecD: return new vecDOper();
 2409     case Op_VecX: return new vecXOper();
 2410   }
 2411   ShouldNotReachHere();
 2412   return nullptr;
 2413 }
 2414 
 2415 bool Matcher::is_reg2reg_move(MachNode* m) {
 2416   return false;
 2417 }
 2418 
 2419 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2420   return opnd->opcode() == VREG;
 2421 }
 2422 
 2423 // Return whether or not this register is ever used as an argument.
 2424 // This function is used on startup to build the trampoline stubs in
 2425 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2426 // call in the trampoline, and arguments in those registers not be
 2427 // available to the callee.
 2428 bool Matcher::can_be_java_arg(int reg)
 2429 {
 2430   return
 2431     reg ==  R0_num || reg == R0_H_num ||
 2432     reg ==  R1_num || reg == R1_H_num ||
 2433     reg ==  R2_num || reg == R2_H_num ||
 2434     reg ==  R3_num || reg == R3_H_num ||
 2435     reg ==  R4_num || reg == R4_H_num ||
 2436     reg ==  R5_num || reg == R5_H_num ||
 2437     reg ==  R6_num || reg == R6_H_num ||
 2438     reg ==  R7_num || reg == R7_H_num ||
 2439     reg ==  V0_num || reg == V0_H_num ||
 2440     reg ==  V1_num || reg == V1_H_num ||
 2441     reg ==  V2_num || reg == V2_H_num ||
 2442     reg ==  V3_num || reg == V3_H_num ||
 2443     reg ==  V4_num || reg == V4_H_num ||
 2444     reg ==  V5_num || reg == V5_H_num ||
 2445     reg ==  V6_num || reg == V6_H_num ||
 2446     reg ==  V7_num || reg == V7_H_num;
 2447 }
 2448 
 2449 bool Matcher::is_spillable_arg(int reg)
 2450 {
 2451   return can_be_java_arg(reg);
 2452 }
 2453 
 2454 uint Matcher::int_pressure_limit()
 2455 {
 2456   // JDK-8183543: When taking the number of available registers as int
 2457   // register pressure threshold, the jtreg test:
 2458   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2459   // failed due to C2 compilation failure with
 2460   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2461   //
 2462   // A derived pointer is live at CallNode and then is flagged by RA
 2463   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2464   // derived pointers and lastly fail to spill after reaching maximum
 2465   // number of iterations. Lowering the default pressure threshold to
 2466   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2467   // a high register pressure area of the code so that split_DEF can
 2468   // generate DefinitionSpillCopy for the derived pointer.
 2469   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2470   if (!PreserveFramePointer) {
 2471     // When PreserveFramePointer is off, frame pointer is allocatable,
 2472     // but different from other SOC registers, it is excluded from
 2473     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2474     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2475     // See check_pressure_at_fatproj().
 2476     default_int_pressure_threshold--;
 2477   }
 2478   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2479 }
 2480 
 2481 uint Matcher::float_pressure_limit()
 2482 {
 2483   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2484   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2485 }
 2486 
 2487 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2488   return false;
 2489 }
 2490 
 2491 RegMask Matcher::divI_proj_mask() {
 2492   ShouldNotReachHere();
 2493   return RegMask();
 2494 }
 2495 
 2496 // Register for MODI projection of divmodI.
 2497 RegMask Matcher::modI_proj_mask() {
 2498   ShouldNotReachHere();
 2499   return RegMask();
 2500 }
 2501 
 2502 // Register for DIVL projection of divmodL.
 2503 RegMask Matcher::divL_proj_mask() {
 2504   ShouldNotReachHere();
 2505   return RegMask();
 2506 }
 2507 
 2508 // Register for MODL projection of divmodL.
 2509 RegMask Matcher::modL_proj_mask() {
 2510   ShouldNotReachHere();
 2511   return RegMask();
 2512 }
 2513 
 2514 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2515   return FP_REG_mask();
 2516 }
 2517 
 2518 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2519   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2520     Node* u = addp->fast_out(i);
 2521     if (u->is_LoadStore()) {
 2522       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2523       // instructions) only take register indirect as an operand, so
 2524       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2525       // must fail.
 2526       return false;
 2527     }
 2528     if (u->is_Mem()) {
 2529       int opsize = u->as_Mem()->memory_size();
 2530       assert(opsize > 0, "unexpected memory operand size");
 2531       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2532         return false;
 2533       }
 2534     }
 2535   }
 2536   return true;
 2537 }
 2538 
 2539 // Convert BootTest condition to Assembler condition.
 2540 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2541 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2542   Assembler::Condition result;
 2543   switch(cond) {
 2544     case BoolTest::eq:
 2545       result = Assembler::EQ; break;
 2546     case BoolTest::ne:
 2547       result = Assembler::NE; break;
 2548     case BoolTest::le:
 2549       result = Assembler::LE; break;
 2550     case BoolTest::ge:
 2551       result = Assembler::GE; break;
 2552     case BoolTest::lt:
 2553       result = Assembler::LT; break;
 2554     case BoolTest::gt:
 2555       result = Assembler::GT; break;
 2556     case BoolTest::ule:
 2557       result = Assembler::LS; break;
 2558     case BoolTest::uge:
 2559       result = Assembler::HS; break;
 2560     case BoolTest::ult:
 2561       result = Assembler::LO; break;
 2562     case BoolTest::ugt:
 2563       result = Assembler::HI; break;
 2564     case BoolTest::overflow:
 2565       result = Assembler::VS; break;
 2566     case BoolTest::no_overflow:
 2567       result = Assembler::VC; break;
 2568     default:
 2569       ShouldNotReachHere();
 2570       return Assembler::Condition(-1);
 2571   }
 2572 
 2573   // Check conversion
 2574   if (cond & BoolTest::unsigned_compare) {
 2575     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2576   } else {
 2577     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2578   }
 2579 
 2580   return result;
 2581 }
 2582 
 2583 // Binary src (Replicate con)
 2584 static bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2585   if (n == nullptr || m == nullptr) {
 2586     return false;
 2587   }
 2588 
 2589   if (UseSVE == 0 || m->Opcode() != Op_Replicate) {
 2590     return false;
 2591   }
 2592 
 2593   Node* imm_node = m->in(1);
 2594   if (!imm_node->is_Con()) {
 2595     return false;
 2596   }
 2597 
 2598   const Type* t = imm_node->bottom_type();
 2599   if (!(t->isa_int() || t->isa_long())) {
 2600     return false;
 2601   }
 2602 
 2603   switch (n->Opcode()) {
 2604   case Op_AndV:
 2605   case Op_OrV:
 2606   case Op_XorV: {
 2607     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2608     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2609     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2610   }
 2611   case Op_AddVB:
 2612     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2613   case Op_AddVS:
 2614   case Op_AddVI:
 2615     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2616   case Op_AddVL:
 2617     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2618   default:
 2619     return false;
 2620   }
 2621 }
 2622 
 2623 // (XorV src (Replicate m1))
 2624 // (XorVMask src (MaskAll m1))
 2625 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2626   if (n != nullptr && m != nullptr) {
 2627     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2628            VectorNode::is_all_ones_vector(m);
 2629   }
 2630   return false;
 2631 }
 2632 
 2633 // Should the matcher clone input 'm' of node 'n'?
 2634 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2635   if (is_vshift_con_pattern(n, m) ||
 2636       is_vector_bitwise_not_pattern(n, m) ||
 2637       is_valid_sve_arith_imm_pattern(n, m) ||
 2638       is_encode_and_store_pattern(n, m)) {
 2639     mstack.push(m, Visit);
 2640     return true;
 2641   }
 2642   return false;
 2643 }
 2644 
 2645 // Should the Matcher clone shifts on addressing modes, expecting them
 2646 // to be subsumed into complex addressing expressions or compute them
 2647 // into registers?
 2648 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2649 
 2650   // Loads and stores with indirect memory input (e.g., volatile loads and
 2651   // stores) do not subsume the input into complex addressing expressions. If
 2652   // the addressing expression is input to at least one such load or store, do
 2653   // not clone the addressing expression. Query needs_acquiring_load and
 2654   // needs_releasing_store as a proxy for indirect memory input, as it is not
 2655   // possible to directly query for indirect memory input at this stage.
 2656   for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
 2657     Node* n = m->fast_out(i);
 2658     if (n->is_Load() && needs_acquiring_load(n)) {
 2659       return false;
 2660     }
 2661     if (n->is_Store() && needs_releasing_store(n)) {
 2662       return false;
 2663     }
 2664   }
 2665 
 2666   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2667     return true;
 2668   }
 2669 
 2670   Node *off = m->in(AddPNode::Offset);
 2671   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2672       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2673       // Are there other uses besides address expressions?
 2674       !is_visited(off)) {
 2675     address_visited.set(off->_idx); // Flag as address_visited
 2676     mstack.push(off->in(2), Visit);
 2677     Node *conv = off->in(1);
 2678     if (conv->Opcode() == Op_ConvI2L &&
 2679         // Are there other uses besides address expressions?
 2680         !is_visited(conv)) {
 2681       address_visited.set(conv->_idx); // Flag as address_visited
 2682       mstack.push(conv->in(1), Pre_Visit);
 2683     } else {
 2684       mstack.push(conv, Pre_Visit);
 2685     }
 2686     address_visited.test_set(m->_idx); // Flag as address_visited
 2687     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2688     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2689     return true;
 2690   } else if (off->Opcode() == Op_ConvI2L &&
 2691              // Are there other uses besides address expressions?
 2692              !is_visited(off)) {
 2693     address_visited.test_set(m->_idx); // Flag as address_visited
 2694     address_visited.set(off->_idx); // Flag as address_visited
 2695     mstack.push(off->in(1), Pre_Visit);
 2696     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2697     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2698     return true;
 2699   }
 2700   return false;
 2701 }
 2702 
 2703 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2704   {                                                                     \
 2705     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2706     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2707     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2708     __ INSN(REG, as_Register(BASE));                                    \
 2709   }
 2710 
 2711 
 2712 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2713   {
 2714     Address::extend scale;
 2715 
 2716     // Hooboy, this is fugly.  We need a way to communicate to the
 2717     // encoder that the index needs to be sign extended, so we have to
 2718     // enumerate all the cases.
 2719     switch (opcode) {
 2720     case INDINDEXSCALEDI2L:
 2721     case INDINDEXSCALEDI2LN:
 2722     case INDINDEXI2L:
 2723     case INDINDEXI2LN:
 2724       scale = Address::sxtw(size);
 2725       break;
 2726     default:
 2727       scale = Address::lsl(size);
 2728     }
 2729 
 2730     if (index == -1) {
 2731       return Address(base, disp);
 2732     } else {
 2733       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2734       return Address(base, as_Register(index), scale);
 2735     }
 2736   }
 2737 
 2738 
 2739 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2740 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2741 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2742 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2743                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2744 
 2745   // Used for all non-volatile memory accesses.  The use of
 2746   // $mem->opcode() to discover whether this pattern uses sign-extended
 2747   // offsets is something of a kludge.
 2748   static void loadStore(C2_MacroAssembler* masm, mem_insn insn,
 2749                         Register reg, int opcode,
 2750                         Register base, int index, int scale, int disp,
 2751                         int size_in_memory)
 2752   {
 2753     Address addr = mem2address(opcode, base, index, scale, disp);
 2754     if (addr.getMode() == Address::base_plus_offset) {
 2755       /* Fix up any out-of-range offsets. */
 2756       assert_different_registers(rscratch1, base);
 2757       assert_different_registers(rscratch1, reg);
 2758       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2759     }
 2760     (masm->*insn)(reg, addr);
 2761   }
 2762 
 2763   static void loadStore(C2_MacroAssembler* masm, mem_float_insn insn,
 2764                         FloatRegister reg, int opcode,
 2765                         Register base, int index, int size, int disp,
 2766                         int size_in_memory)
 2767   {
 2768     Address::extend scale;
 2769 
 2770     switch (opcode) {
 2771     case INDINDEXSCALEDI2L:
 2772     case INDINDEXSCALEDI2LN:
 2773       scale = Address::sxtw(size);
 2774       break;
 2775     default:
 2776       scale = Address::lsl(size);
 2777     }
 2778 
 2779     if (index == -1) {
 2780       // Fix up any out-of-range offsets.
 2781       assert_different_registers(rscratch1, base);
 2782       Address addr = Address(base, disp);
 2783       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2784       (masm->*insn)(reg, addr);
 2785     } else {
 2786       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2787       (masm->*insn)(reg, Address(base, as_Register(index), scale));
 2788     }
 2789   }
 2790 
 2791   static void loadStore(C2_MacroAssembler* masm, mem_vector_insn insn,
 2792                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2793                         int opcode, Register base, int index, int size, int disp)
 2794   {
 2795     if (index == -1) {
 2796       (masm->*insn)(reg, T, Address(base, disp));
 2797     } else {
 2798       assert(disp == 0, "unsupported address mode");
 2799       (masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2800     }
 2801   }
 2802 
 2803 %}
 2804 
 2805 
 2806 
 2807 //----------ENCODING BLOCK-----------------------------------------------------
 2808 // This block specifies the encoding classes used by the compiler to
 2809 // output byte streams.  Encoding classes are parameterized macros
 2810 // used by Machine Instruction Nodes in order to generate the bit
 2811 // encoding of the instruction.  Operands specify their base encoding
 2812 // interface with the interface keyword.  There are currently
 2813 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2814 // COND_INTER.  REG_INTER causes an operand to generate a function
 2815 // which returns its register number when queried.  CONST_INTER causes
 2816 // an operand to generate a function which returns the value of the
 2817 // constant when queried.  MEMORY_INTER causes an operand to generate
 2818 // four functions which return the Base Register, the Index Register,
 2819 // the Scale Value, and the Offset Value of the operand when queried.
 2820 // COND_INTER causes an operand to generate six functions which return
 2821 // the encoding code (ie - encoding bits for the instruction)
 2822 // associated with each basic boolean condition for a conditional
 2823 // instruction.
 2824 //
 2825 // Instructions specify two basic values for encoding.  Again, a
 2826 // function is available to check if the constant displacement is an
 2827 // oop. They use the ins_encode keyword to specify their encoding
 2828 // classes (which must be a sequence of enc_class names, and their
 2829 // parameters, specified in the encoding block), and they use the
 2830 // opcode keyword to specify, in order, their primary, secondary, and
 2831 // tertiary opcode.  Only the opcode sections which a particular
 2832 // instruction needs for encoding need to be specified.
 2833 encode %{
 2834   // Build emit functions for each basic byte or larger field in the
 2835   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2836   // from C++ code in the enc_class source block.  Emit functions will
 2837   // live in the main source block for now.  In future, we can
 2838   // generalize this by adding a syntax that specifies the sizes of
 2839   // fields in an order, so that the adlc can build the emit functions
 2840   // automagically
 2841 
 2842   // catch all for unimplemented encodings
 2843   enc_class enc_unimplemented %{
 2844     __ unimplemented("C2 catch all");
 2845   %}
 2846 
 2847   // BEGIN Non-volatile memory access
 2848 
 2849   // This encoding class is generated automatically from ad_encode.m4.
 2850   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2851   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2852     Register dst_reg = as_Register($dst$$reg);
 2853     loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2854                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2855   %}
 2856 
 2857   // This encoding class is generated automatically from ad_encode.m4.
 2858   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2859   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2860     Register dst_reg = as_Register($dst$$reg);
 2861     loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2862                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2863   %}
 2864 
 2865   // This encoding class is generated automatically from ad_encode.m4.
 2866   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2867   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2868     Register dst_reg = as_Register($dst$$reg);
 2869     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2870                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2871   %}
 2872 
 2873   // This encoding class is generated automatically from ad_encode.m4.
 2874   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2875   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2876     Register dst_reg = as_Register($dst$$reg);
 2877     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2878                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2879   %}
 2880 
 2881   // This encoding class is generated automatically from ad_encode.m4.
 2882   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2883   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2884     Register dst_reg = as_Register($dst$$reg);
 2885     loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2886                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2887   %}
 2888 
 2889   // This encoding class is generated automatically from ad_encode.m4.
 2890   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2891   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2892     Register dst_reg = as_Register($dst$$reg);
 2893     loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2894                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2895   %}
 2896 
 2897   // This encoding class is generated automatically from ad_encode.m4.
 2898   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2899   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2900     Register dst_reg = as_Register($dst$$reg);
 2901     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2902                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2903   %}
 2904 
 2905   // This encoding class is generated automatically from ad_encode.m4.
 2906   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2907   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2908     Register dst_reg = as_Register($dst$$reg);
 2909     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2910                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2911   %}
 2912 
 2913   // This encoding class is generated automatically from ad_encode.m4.
 2914   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2915   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2916     Register dst_reg = as_Register($dst$$reg);
 2917     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2918                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2919   %}
 2920 
 2921   // This encoding class is generated automatically from ad_encode.m4.
 2922   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2923   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2924     Register dst_reg = as_Register($dst$$reg);
 2925     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2926                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2927   %}
 2928 
 2929   // This encoding class is generated automatically from ad_encode.m4.
 2930   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2931   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2932     Register dst_reg = as_Register($dst$$reg);
 2933     loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2934                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2935   %}
 2936 
 2937   // This encoding class is generated automatically from ad_encode.m4.
 2938   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2939   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2940     Register dst_reg = as_Register($dst$$reg);
 2941     loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2942                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2943   %}
 2944 
 2945   // This encoding class is generated automatically from ad_encode.m4.
 2946   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2947   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2948     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2949     loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2950                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2951   %}
 2952 
 2953   // This encoding class is generated automatically from ad_encode.m4.
 2954   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2955   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2956     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2957     loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2958                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2959   %}
 2960 
 2961   // This encoding class is generated automatically from ad_encode.m4.
 2962   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2963   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2964     Register src_reg = as_Register($src$$reg);
 2965     loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
 2966                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2967   %}
 2968 
 2969   // This encoding class is generated automatically from ad_encode.m4.
 2970   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2971   enc_class aarch64_enc_strb0(memory1 mem) %{
 2972     loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2973                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2974   %}
 2975 
 2976   // This encoding class is generated automatically from ad_encode.m4.
 2977   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2978   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2979     Register src_reg = as_Register($src$$reg);
 2980     loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
 2981                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2982   %}
 2983 
 2984   // This encoding class is generated automatically from ad_encode.m4.
 2985   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2986   enc_class aarch64_enc_strh0(memory2 mem) %{
 2987     loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2988                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2989   %}
 2990 
 2991   // This encoding class is generated automatically from ad_encode.m4.
 2992   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2993   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 2994     Register src_reg = as_Register($src$$reg);
 2995     loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
 2996                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2997   %}
 2998 
 2999   // This encoding class is generated automatically from ad_encode.m4.
 3000   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3001   enc_class aarch64_enc_strw0(memory4 mem) %{
 3002     loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3003                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3004   %}
 3005 
 3006   // This encoding class is generated automatically from ad_encode.m4.
 3007   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3008   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3009     Register src_reg = as_Register($src$$reg);
 3010     // we sometimes get asked to store the stack pointer into the
 3011     // current thread -- we cannot do that directly on AArch64
 3012     if (src_reg == r31_sp) {
 3013       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3014       __ mov(rscratch2, sp);
 3015       src_reg = rscratch2;
 3016     }
 3017     loadStore(masm, &MacroAssembler::str, src_reg, $mem->opcode(),
 3018                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3019   %}
 3020 
 3021   // This encoding class is generated automatically from ad_encode.m4.
 3022   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3023   enc_class aarch64_enc_str0(memory8 mem) %{
 3024     loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
 3025                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3026   %}
 3027 
 3028   // This encoding class is generated automatically from ad_encode.m4.
 3029   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3030   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3031     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3032     loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
 3033                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3034   %}
 3035 
 3036   // This encoding class is generated automatically from ad_encode.m4.
 3037   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3038   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3039     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3040     loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
 3041                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3042   %}
 3043 
 3044   // This encoding class is generated automatically from ad_encode.m4.
 3045   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3046   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3047       __ membar(Assembler::StoreStore);
 3048       loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3049                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3050   %}
 3051 
 3052   // END Non-volatile memory access
 3053 
 3054   // Vector loads and stores
 3055   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3056     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3057     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3058        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3059   %}
 3060 
 3061   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3062     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3063     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3064        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3065   %}
 3066 
 3067   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3068     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3069     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3070        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3071   %}
 3072 
 3073   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3074     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3075     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3076        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3077   %}
 3078 
 3079   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3080     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3081     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::H,
 3082        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3083   %}
 3084 
 3085   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3086     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3087     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::S,
 3088        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3089   %}
 3090 
 3091   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3092     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3093     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::D,
 3094        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3095   %}
 3096 
 3097   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3098     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3099     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3100        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3101   %}
 3102 
 3103   // volatile loads and stores
 3104 
 3105   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3106     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3107                  rscratch1, stlrb);
 3108   %}
 3109 
 3110   enc_class aarch64_enc_stlrb0(memory mem) %{
 3111     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3112                  rscratch1, stlrb);
 3113   %}
 3114 
 3115   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3116     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3117                  rscratch1, stlrh);
 3118   %}
 3119 
 3120   enc_class aarch64_enc_stlrh0(memory mem) %{
 3121     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3122                  rscratch1, stlrh);
 3123   %}
 3124 
 3125   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3126     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3127                  rscratch1, stlrw);
 3128   %}
 3129 
 3130   enc_class aarch64_enc_stlrw0(memory mem) %{
 3131     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3132                  rscratch1, stlrw);
 3133   %}
 3134 
 3135   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3136     Register dst_reg = as_Register($dst$$reg);
 3137     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3138              rscratch1, ldarb);
 3139     __ sxtbw(dst_reg, dst_reg);
 3140   %}
 3141 
 3142   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3143     Register dst_reg = as_Register($dst$$reg);
 3144     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3145              rscratch1, ldarb);
 3146     __ sxtb(dst_reg, dst_reg);
 3147   %}
 3148 
 3149   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3150     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3151              rscratch1, ldarb);
 3152   %}
 3153 
 3154   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3155     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3156              rscratch1, ldarb);
 3157   %}
 3158 
 3159   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3160     Register dst_reg = as_Register($dst$$reg);
 3161     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3162              rscratch1, ldarh);
 3163     __ sxthw(dst_reg, dst_reg);
 3164   %}
 3165 
 3166   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3167     Register dst_reg = as_Register($dst$$reg);
 3168     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3169              rscratch1, ldarh);
 3170     __ sxth(dst_reg, dst_reg);
 3171   %}
 3172 
 3173   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3174     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3175              rscratch1, ldarh);
 3176   %}
 3177 
 3178   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3179     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3180              rscratch1, ldarh);
 3181   %}
 3182 
 3183   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3184     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3185              rscratch1, ldarw);
 3186   %}
 3187 
 3188   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3189     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3190              rscratch1, ldarw);
 3191   %}
 3192 
 3193   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3194     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3195              rscratch1, ldar);
 3196   %}
 3197 
 3198   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3199     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3200              rscratch1, ldarw);
 3201     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3202   %}
 3203 
 3204   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3205     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3206              rscratch1, ldar);
 3207     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3208   %}
 3209 
 3210   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3211     Register src_reg = as_Register($src$$reg);
 3212     // we sometimes get asked to store the stack pointer into the
 3213     // current thread -- we cannot do that directly on AArch64
 3214     if (src_reg == r31_sp) {
 3215       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3216       __ mov(rscratch2, sp);
 3217       src_reg = rscratch2;
 3218     }
 3219     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3220                  rscratch1, stlr);
 3221   %}
 3222 
 3223   enc_class aarch64_enc_stlr0(memory mem) %{
 3224     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3225                  rscratch1, stlr);
 3226   %}
 3227 
 3228   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3229     {
 3230       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3231       __ fmovs(rscratch2, src_reg);
 3232     }
 3233     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3234                  rscratch1, stlrw);
 3235   %}
 3236 
 3237   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3238     {
 3239       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3240       __ fmovd(rscratch2, src_reg);
 3241     }
 3242     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3243                  rscratch1, stlr);
 3244   %}
 3245 
 3246   // synchronized read/update encodings
 3247 
 3248   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3249     Register dst_reg = as_Register($dst$$reg);
 3250     Register base = as_Register($mem$$base);
 3251     int index = $mem$$index;
 3252     int scale = $mem$$scale;
 3253     int disp = $mem$$disp;
 3254     if (index == -1) {
 3255        if (disp != 0) {
 3256         __ lea(rscratch1, Address(base, disp));
 3257         __ ldaxr(dst_reg, rscratch1);
 3258       } else {
 3259         // TODO
 3260         // should we ever get anything other than this case?
 3261         __ ldaxr(dst_reg, base);
 3262       }
 3263     } else {
 3264       Register index_reg = as_Register(index);
 3265       if (disp == 0) {
 3266         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3267         __ ldaxr(dst_reg, rscratch1);
 3268       } else {
 3269         __ lea(rscratch1, Address(base, disp));
 3270         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3271         __ ldaxr(dst_reg, rscratch1);
 3272       }
 3273     }
 3274   %}
 3275 
 3276   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3277     Register src_reg = as_Register($src$$reg);
 3278     Register base = as_Register($mem$$base);
 3279     int index = $mem$$index;
 3280     int scale = $mem$$scale;
 3281     int disp = $mem$$disp;
 3282     if (index == -1) {
 3283        if (disp != 0) {
 3284         __ lea(rscratch2, Address(base, disp));
 3285         __ stlxr(rscratch1, src_reg, rscratch2);
 3286       } else {
 3287         // TODO
 3288         // should we ever get anything other than this case?
 3289         __ stlxr(rscratch1, src_reg, base);
 3290       }
 3291     } else {
 3292       Register index_reg = as_Register(index);
 3293       if (disp == 0) {
 3294         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3295         __ stlxr(rscratch1, src_reg, rscratch2);
 3296       } else {
 3297         __ lea(rscratch2, Address(base, disp));
 3298         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3299         __ stlxr(rscratch1, src_reg, rscratch2);
 3300       }
 3301     }
 3302     __ cmpw(rscratch1, zr);
 3303   %}
 3304 
 3305   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3306     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3307     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3308                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3309                /*weak*/ false, noreg);
 3310   %}
 3311 
 3312   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3313     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3314     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3315                Assembler::word, /*acquire*/ false, /*release*/ true,
 3316                /*weak*/ false, noreg);
 3317   %}
 3318 
 3319   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3320     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3321     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3322                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3323                /*weak*/ false, noreg);
 3324   %}
 3325 
 3326   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3327     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3328     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3329                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3330                /*weak*/ false, noreg);
 3331   %}
 3332 
 3333 
 3334   // The only difference between aarch64_enc_cmpxchg and
 3335   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3336   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3337   // lock.
 3338   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3339     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3340     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3341                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3342                /*weak*/ false, noreg);
 3343   %}
 3344 
 3345   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3346     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3347     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3348                Assembler::word, /*acquire*/ true, /*release*/ true,
 3349                /*weak*/ false, noreg);
 3350   %}
 3351 
 3352   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3353     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3354     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3355                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3356                /*weak*/ false, noreg);
 3357   %}
 3358 
 3359   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3360     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3361     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3362                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3363                /*weak*/ false, noreg);
 3364   %}
 3365 
 3366   // auxiliary used for CompareAndSwapX to set result register
 3367   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3368     Register res_reg = as_Register($res$$reg);
 3369     __ cset(res_reg, Assembler::EQ);
 3370   %}
 3371 
 3372   // prefetch encodings
 3373 
 3374   enc_class aarch64_enc_prefetchw(memory mem) %{
 3375     Register base = as_Register($mem$$base);
 3376     int index = $mem$$index;
 3377     int scale = $mem$$scale;
 3378     int disp = $mem$$disp;
 3379     if (index == -1) {
 3380       // Fix up any out-of-range offsets.
 3381       assert_different_registers(rscratch1, base);
 3382       Address addr = Address(base, disp);
 3383       addr = __ legitimize_address(addr, 8, rscratch1);
 3384       __ prfm(addr, PSTL1KEEP);
 3385     } else {
 3386       Register index_reg = as_Register(index);
 3387       if (disp == 0) {
 3388         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3389       } else {
 3390         __ lea(rscratch1, Address(base, disp));
 3391 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3392       }
 3393     }
 3394   %}
 3395 
 3396   // mov encodings
 3397 
 3398   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3399     uint32_t con = (uint32_t)$src$$constant;
 3400     Register dst_reg = as_Register($dst$$reg);
 3401     if (con == 0) {
 3402       __ movw(dst_reg, zr);
 3403     } else {
 3404       __ movw(dst_reg, con);
 3405     }
 3406   %}
 3407 
 3408   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3409     Register dst_reg = as_Register($dst$$reg);
 3410     uint64_t con = (uint64_t)$src$$constant;
 3411     if (con == 0) {
 3412       __ mov(dst_reg, zr);
 3413     } else {
 3414       __ mov(dst_reg, con);
 3415     }
 3416   %}
 3417 
 3418   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3419     Register dst_reg = as_Register($dst$$reg);
 3420     address con = (address)$src$$constant;
 3421     if (con == nullptr || con == (address)1) {
 3422       ShouldNotReachHere();
 3423     } else {
 3424       relocInfo::relocType rtype = $src->constant_reloc();
 3425       if (rtype == relocInfo::oop_type) {
 3426         __ movoop(dst_reg, (jobject)con);
 3427       } else if (rtype == relocInfo::metadata_type) {
 3428         __ mov_metadata(dst_reg, (Metadata*)con);
 3429       } else {
 3430         assert(rtype == relocInfo::none, "unexpected reloc type");
 3431         if (! __ is_valid_AArch64_address(con) ||
 3432             con < (address)(uintptr_t)os::vm_page_size()) {
 3433           __ mov(dst_reg, con);
 3434         } else {
 3435           uint64_t offset;
 3436           __ adrp(dst_reg, con, offset);
 3437           __ add(dst_reg, dst_reg, offset);
 3438         }
 3439       }
 3440     }
 3441   %}
 3442 
 3443   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3444     Register dst_reg = as_Register($dst$$reg);
 3445     __ mov(dst_reg, zr);
 3446   %}
 3447 
 3448   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3449     Register dst_reg = as_Register($dst$$reg);
 3450     __ mov(dst_reg, (uint64_t)1);
 3451   %}
 3452 
 3453   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3454     __ load_byte_map_base($dst$$Register);
 3455   %}
 3456 
 3457   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3458     Register dst_reg = as_Register($dst$$reg);
 3459     address con = (address)$src$$constant;
 3460     if (con == nullptr) {
 3461       ShouldNotReachHere();
 3462     } else {
 3463       relocInfo::relocType rtype = $src->constant_reloc();
 3464       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3465       __ set_narrow_oop(dst_reg, (jobject)con);
 3466     }
 3467   %}
 3468 
 3469   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3470     Register dst_reg = as_Register($dst$$reg);
 3471     __ mov(dst_reg, zr);
 3472   %}
 3473 
 3474   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3475     Register dst_reg = as_Register($dst$$reg);
 3476     address con = (address)$src$$constant;
 3477     if (con == nullptr) {
 3478       ShouldNotReachHere();
 3479     } else {
 3480       relocInfo::relocType rtype = $src->constant_reloc();
 3481       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3482       __ set_narrow_klass(dst_reg, (Klass *)con);
 3483     }
 3484   %}
 3485 
 3486   // arithmetic encodings
 3487 
 3488   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3489     Register dst_reg = as_Register($dst$$reg);
 3490     Register src_reg = as_Register($src1$$reg);
 3491     int32_t con = (int32_t)$src2$$constant;
 3492     // add has primary == 0, subtract has primary == 1
 3493     if ($primary) { con = -con; }
 3494     if (con < 0) {
 3495       __ subw(dst_reg, src_reg, -con);
 3496     } else {
 3497       __ addw(dst_reg, src_reg, con);
 3498     }
 3499   %}
 3500 
 3501   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3502     Register dst_reg = as_Register($dst$$reg);
 3503     Register src_reg = as_Register($src1$$reg);
 3504     int32_t con = (int32_t)$src2$$constant;
 3505     // add has primary == 0, subtract has primary == 1
 3506     if ($primary) { con = -con; }
 3507     if (con < 0) {
 3508       __ sub(dst_reg, src_reg, -con);
 3509     } else {
 3510       __ add(dst_reg, src_reg, con);
 3511     }
 3512   %}
 3513 
 3514   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3515    Register dst_reg = as_Register($dst$$reg);
 3516    Register src1_reg = as_Register($src1$$reg);
 3517    Register src2_reg = as_Register($src2$$reg);
 3518     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3519   %}
 3520 
 3521   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3522    Register dst_reg = as_Register($dst$$reg);
 3523    Register src1_reg = as_Register($src1$$reg);
 3524    Register src2_reg = as_Register($src2$$reg);
 3525     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3526   %}
 3527 
 3528   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3529    Register dst_reg = as_Register($dst$$reg);
 3530    Register src1_reg = as_Register($src1$$reg);
 3531    Register src2_reg = as_Register($src2$$reg);
 3532     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3533   %}
 3534 
 3535   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3536    Register dst_reg = as_Register($dst$$reg);
 3537    Register src1_reg = as_Register($src1$$reg);
 3538    Register src2_reg = as_Register($src2$$reg);
 3539     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3540   %}
 3541 
 3542   // compare instruction encodings
 3543 
 3544   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3545     Register reg1 = as_Register($src1$$reg);
 3546     Register reg2 = as_Register($src2$$reg);
 3547     __ cmpw(reg1, reg2);
 3548   %}
 3549 
 3550   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3551     Register reg = as_Register($src1$$reg);
 3552     int32_t val = $src2$$constant;
 3553     if (val >= 0) {
 3554       __ subsw(zr, reg, val);
 3555     } else {
 3556       __ addsw(zr, reg, -val);
 3557     }
 3558   %}
 3559 
 3560   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3561     Register reg1 = as_Register($src1$$reg);
 3562     uint32_t val = (uint32_t)$src2$$constant;
 3563     __ movw(rscratch1, val);
 3564     __ cmpw(reg1, rscratch1);
 3565   %}
 3566 
 3567   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3568     Register reg1 = as_Register($src1$$reg);
 3569     Register reg2 = as_Register($src2$$reg);
 3570     __ cmp(reg1, reg2);
 3571   %}
 3572 
 3573   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3574     Register reg = as_Register($src1$$reg);
 3575     int64_t val = $src2$$constant;
 3576     if (val >= 0) {
 3577       __ subs(zr, reg, val);
 3578     } else if (val != -val) {
 3579       __ adds(zr, reg, -val);
 3580     } else {
 3581     // aargh, Long.MIN_VALUE is a special case
 3582       __ orr(rscratch1, zr, (uint64_t)val);
 3583       __ subs(zr, reg, rscratch1);
 3584     }
 3585   %}
 3586 
 3587   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3588     Register reg1 = as_Register($src1$$reg);
 3589     uint64_t val = (uint64_t)$src2$$constant;
 3590     __ mov(rscratch1, val);
 3591     __ cmp(reg1, rscratch1);
 3592   %}
 3593 
 3594   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3595     Register reg1 = as_Register($src1$$reg);
 3596     Register reg2 = as_Register($src2$$reg);
 3597     __ cmp(reg1, reg2);
 3598   %}
 3599 
 3600   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3601     Register reg1 = as_Register($src1$$reg);
 3602     Register reg2 = as_Register($src2$$reg);
 3603     __ cmpw(reg1, reg2);
 3604   %}
 3605 
 3606   enc_class aarch64_enc_testp(iRegP src) %{
 3607     Register reg = as_Register($src$$reg);
 3608     __ cmp(reg, zr);
 3609   %}
 3610 
 3611   enc_class aarch64_enc_testn(iRegN src) %{
 3612     Register reg = as_Register($src$$reg);
 3613     __ cmpw(reg, zr);
 3614   %}
 3615 
 3616   enc_class aarch64_enc_b(label lbl) %{
 3617     Label *L = $lbl$$label;
 3618     __ b(*L);
 3619   %}
 3620 
 3621   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3622     Label *L = $lbl$$label;
 3623     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3624   %}
 3625 
 3626   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3627     Label *L = $lbl$$label;
 3628     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3629   %}
 3630 
 3631   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3632   %{
 3633      Register sub_reg = as_Register($sub$$reg);
 3634      Register super_reg = as_Register($super$$reg);
 3635      Register temp_reg = as_Register($temp$$reg);
 3636      Register result_reg = as_Register($result$$reg);
 3637 
 3638      Label miss;
 3639      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3640                                      nullptr, &miss,
 3641                                      /*set_cond_codes:*/ true);
 3642      if ($primary) {
 3643        __ mov(result_reg, zr);
 3644      }
 3645      __ bind(miss);
 3646   %}
 3647 
 3648   enc_class aarch64_enc_java_static_call(method meth) %{
 3649     address addr = (address)$meth$$method;
 3650     address call;
 3651     if (!_method) {
 3652       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3653       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3654       if (call == nullptr) {
 3655         ciEnv::current()->record_failure("CodeCache is full");
 3656         return;
 3657       }
 3658     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3659       // The NOP here is purely to ensure that eliding a call to
 3660       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3661       __ nop();
 3662       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3663     } else {
 3664       int method_index = resolved_method_index(masm);
 3665       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3666                                                   : static_call_Relocation::spec(method_index);
 3667       call = __ trampoline_call(Address(addr, rspec));
 3668       if (call == nullptr) {
 3669         ciEnv::current()->record_failure("CodeCache is full");
 3670         return;
 3671       }
 3672       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3673         // Calls of the same statically bound method can share
 3674         // a stub to the interpreter.
 3675         __ code()->shared_stub_to_interp_for(_method, call - __ begin());
 3676       } else {
 3677         // Emit stub for static call
 3678         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 3679         if (stub == nullptr) {
 3680           ciEnv::current()->record_failure("CodeCache is full");
 3681           return;
 3682         }
 3683       }
 3684     }
 3685 
 3686     __ post_call_nop();
 3687 
 3688     // Only non uncommon_trap calls need to reinitialize ptrue.
 3689     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3690       __ reinitialize_ptrue();
 3691     }
 3692   %}
 3693 
 3694   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3695     int method_index = resolved_method_index(masm);
 3696     address call = __ ic_call((address)$meth$$method, method_index);
 3697     if (call == nullptr) {
 3698       ciEnv::current()->record_failure("CodeCache is full");
 3699       return;
 3700     }
 3701     __ post_call_nop();
 3702     if (Compile::current()->max_vector_size() > 0) {
 3703       __ reinitialize_ptrue();
 3704     }
 3705   %}
 3706 
 3707   enc_class aarch64_enc_call_epilog() %{
 3708     if (VerifyStackAtCalls) {
 3709       // Check that stack depth is unchanged: find majik cookie on stack
 3710       __ call_Unimplemented();
 3711     }
 3712   %}
 3713 
 3714   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3715     // some calls to generated routines (arraycopy code) are scheduled
 3716     // by C2 as runtime calls. if so we can call them using a br (they
 3717     // will be in a reachable segment) otherwise we have to use a blr
 3718     // which loads the absolute address into a register.
 3719     address entry = (address)$meth$$method;
 3720     CodeBlob *cb = CodeCache::find_blob(entry);
 3721     if (cb) {
 3722       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3723       if (call == nullptr) {
 3724         ciEnv::current()->record_failure("CodeCache is full");
 3725         return;
 3726       }
 3727       __ post_call_nop();
 3728     } else {
 3729       Label retaddr;
 3730       // Make the anchor frame walkable
 3731       __ adr(rscratch2, retaddr);
 3732       __ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 3733       __ lea(rscratch1, RuntimeAddress(entry));
 3734       __ blr(rscratch1);
 3735       __ bind(retaddr);
 3736       __ post_call_nop();
 3737     }
 3738     if (Compile::current()->max_vector_size() > 0) {
 3739       __ reinitialize_ptrue();
 3740     }
 3741   %}
 3742 
 3743   enc_class aarch64_enc_rethrow() %{
 3744     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3745   %}
 3746 
 3747   enc_class aarch64_enc_ret() %{
 3748 #ifdef ASSERT
 3749     if (Compile::current()->max_vector_size() > 0) {
 3750       __ verify_ptrue();
 3751     }
 3752 #endif
 3753     __ ret(lr);
 3754   %}
 3755 
 3756   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3757     Register target_reg = as_Register($jump_target$$reg);
 3758     __ br(target_reg);
 3759   %}
 3760 
 3761   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3762     Register target_reg = as_Register($jump_target$$reg);
 3763     // exception oop should be in r0
 3764     // ret addr has been popped into lr
 3765     // callee expects it in r3
 3766     __ mov(r3, lr);
 3767     __ br(target_reg);
 3768   %}
 3769 
 3770 %}
 3771 
 3772 //----------FRAME--------------------------------------------------------------
 3773 // Definition of frame structure and management information.
 3774 //
 3775 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3776 //                             |   (to get allocators register number
 3777 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3778 //  r   CALLER     |        |
 3779 //  o     |        +--------+      pad to even-align allocators stack-slot
 3780 //  w     V        |  pad0  |        numbers; owned by CALLER
 3781 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3782 //  h     ^        |   in   |  5
 3783 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3784 //  |     |        |        |  3
 3785 //  |     |        +--------+
 3786 //  V     |        | old out|      Empty on Intel, window on Sparc
 3787 //        |    old |preserve|      Must be even aligned.
 3788 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3789 //        |        |   in   |  3   area for Intel ret address
 3790 //     Owned by    |preserve|      Empty on Sparc.
 3791 //       SELF      +--------+
 3792 //        |        |  pad2  |  2   pad to align old SP
 3793 //        |        +--------+  1
 3794 //        |        | locks  |  0
 3795 //        |        +--------+----> OptoReg::stack0(), even aligned
 3796 //        |        |  pad1  | 11   pad to align new SP
 3797 //        |        +--------+
 3798 //        |        |        | 10
 3799 //        |        | spills |  9   spills
 3800 //        V        |        |  8   (pad0 slot for callee)
 3801 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3802 //        ^        |  out   |  7
 3803 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3804 //     Owned by    +--------+
 3805 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3806 //        |    new |preserve|      Must be even-aligned.
 3807 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3808 //        |        |        |
 3809 //
 3810 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3811 //         known from SELF's arguments and the Java calling convention.
 3812 //         Region 6-7 is determined per call site.
 3813 // Note 2: If the calling convention leaves holes in the incoming argument
 3814 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3815 //         are owned by the CALLEE.  Holes should not be necessary in the
 3816 //         incoming area, as the Java calling convention is completely under
 3817 //         the control of the AD file.  Doubles can be sorted and packed to
 3818 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3819 //         varargs C calling conventions.
 3820 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3821 //         even aligned with pad0 as needed.
 3822 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3823 //           (the latter is true on Intel but is it false on AArch64?)
 3824 //         region 6-11 is even aligned; it may be padded out more so that
 3825 //         the region from SP to FP meets the minimum stack alignment.
 3826 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3827 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3828 //         SP meets the minimum alignment.
 3829 
 3830 frame %{
 3831   // These three registers define part of the calling convention
 3832   // between compiled code and the interpreter.
 3833 
 3834   // Inline Cache Register or Method for I2C.
 3835   inline_cache_reg(R12);
 3836 
 3837   // Number of stack slots consumed by locking an object
 3838   sync_stack_slots(2);
 3839 
 3840   // Compiled code's Frame Pointer
 3841   frame_pointer(R31);
 3842 
 3843   // Interpreter stores its frame pointer in a register which is
 3844   // stored to the stack by I2CAdaptors.
 3845   // I2CAdaptors convert from interpreted java to compiled java.
 3846   interpreter_frame_pointer(R29);
 3847 
 3848   // Stack alignment requirement
 3849   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3850 
 3851   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3852   // for calls to C.  Supports the var-args backing area for register parms.
 3853   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3854 
 3855   // The after-PROLOG location of the return address.  Location of
 3856   // return address specifies a type (REG or STACK) and a number
 3857   // representing the register number (i.e. - use a register name) or
 3858   // stack slot.
 3859   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3860   // Otherwise, it is above the locks and verification slot and alignment word
 3861   // TODO this may well be correct but need to check why that - 2 is there
 3862   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3863   // which folds in the space used for monitors
 3864   return_addr(STACK - 2 +
 3865               align_up((Compile::current()->in_preserve_stack_slots() +
 3866                         Compile::current()->fixed_slots()),
 3867                        stack_alignment_in_slots()));
 3868 
 3869   // Location of compiled Java return values.  Same as C for now.
 3870   return_value
 3871   %{
 3872     // TODO do we allow ideal_reg == Op_RegN???
 3873     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3874            "only return normal values");
 3875 
 3876     static const int lo[Op_RegL + 1] = { // enum name
 3877       0,                                 // Op_Node
 3878       0,                                 // Op_Set
 3879       R0_num,                            // Op_RegN
 3880       R0_num,                            // Op_RegI
 3881       R0_num,                            // Op_RegP
 3882       V0_num,                            // Op_RegF
 3883       V0_num,                            // Op_RegD
 3884       R0_num                             // Op_RegL
 3885     };
 3886 
 3887     static const int hi[Op_RegL + 1] = { // enum name
 3888       0,                                 // Op_Node
 3889       0,                                 // Op_Set
 3890       OptoReg::Bad,                      // Op_RegN
 3891       OptoReg::Bad,                      // Op_RegI
 3892       R0_H_num,                          // Op_RegP
 3893       OptoReg::Bad,                      // Op_RegF
 3894       V0_H_num,                          // Op_RegD
 3895       R0_H_num                           // Op_RegL
 3896     };
 3897 
 3898     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3899   %}
 3900 %}
 3901 
 3902 //----------ATTRIBUTES---------------------------------------------------------
 3903 //----------Operand Attributes-------------------------------------------------
 3904 op_attrib op_cost(1);        // Required cost attribute
 3905 
 3906 //----------Instruction Attributes---------------------------------------------
 3907 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3908 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3909 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3910                                 // a non-matching short branch variant
 3911                                 // of some long branch?
 3912 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3913                                 // be a power of 2) specifies the
 3914                                 // alignment that some part of the
 3915                                 // instruction (not necessarily the
 3916                                 // start) requires.  If > 1, a
 3917                                 // compute_padding() function must be
 3918                                 // provided for the instruction
 3919 
 3920 // Whether this node is expanded during code emission into a sequence of
 3921 // instructions and the first instruction can perform an implicit null check.
 3922 ins_attrib ins_is_late_expanded_null_check_candidate(false);
 3923 
 3924 //----------OPERANDS-----------------------------------------------------------
 3925 // Operand definitions must precede instruction definitions for correct parsing
 3926 // in the ADLC because operands constitute user defined types which are used in
 3927 // instruction definitions.
 3928 
 3929 //----------Simple Operands----------------------------------------------------
 3930 
 3931 // Integer operands 32 bit
 3932 // 32 bit immediate
 3933 operand immI()
 3934 %{
 3935   match(ConI);
 3936 
 3937   op_cost(0);
 3938   format %{ %}
 3939   interface(CONST_INTER);
 3940 %}
 3941 
 3942 // 32 bit zero
 3943 operand immI0()
 3944 %{
 3945   predicate(n->get_int() == 0);
 3946   match(ConI);
 3947 
 3948   op_cost(0);
 3949   format %{ %}
 3950   interface(CONST_INTER);
 3951 %}
 3952 
 3953 // 32 bit unit increment
 3954 operand immI_1()
 3955 %{
 3956   predicate(n->get_int() == 1);
 3957   match(ConI);
 3958 
 3959   op_cost(0);
 3960   format %{ %}
 3961   interface(CONST_INTER);
 3962 %}
 3963 
 3964 // 32 bit unit decrement
 3965 operand immI_M1()
 3966 %{
 3967   predicate(n->get_int() == -1);
 3968   match(ConI);
 3969 
 3970   op_cost(0);
 3971   format %{ %}
 3972   interface(CONST_INTER);
 3973 %}
 3974 
 3975 // Shift values for add/sub extension shift
 3976 operand immIExt()
 3977 %{
 3978   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 3979   match(ConI);
 3980 
 3981   op_cost(0);
 3982   format %{ %}
 3983   interface(CONST_INTER);
 3984 %}
 3985 
 3986 operand immI_gt_1()
 3987 %{
 3988   predicate(n->get_int() > 1);
 3989   match(ConI);
 3990 
 3991   op_cost(0);
 3992   format %{ %}
 3993   interface(CONST_INTER);
 3994 %}
 3995 
 3996 operand immI_le_4()
 3997 %{
 3998   predicate(n->get_int() <= 4);
 3999   match(ConI);
 4000 
 4001   op_cost(0);
 4002   format %{ %}
 4003   interface(CONST_INTER);
 4004 %}
 4005 
 4006 operand immI_16()
 4007 %{
 4008   predicate(n->get_int() == 16);
 4009   match(ConI);
 4010 
 4011   op_cost(0);
 4012   format %{ %}
 4013   interface(CONST_INTER);
 4014 %}
 4015 
 4016 operand immI_24()
 4017 %{
 4018   predicate(n->get_int() == 24);
 4019   match(ConI);
 4020 
 4021   op_cost(0);
 4022   format %{ %}
 4023   interface(CONST_INTER);
 4024 %}
 4025 
 4026 operand immI_32()
 4027 %{
 4028   predicate(n->get_int() == 32);
 4029   match(ConI);
 4030 
 4031   op_cost(0);
 4032   format %{ %}
 4033   interface(CONST_INTER);
 4034 %}
 4035 
 4036 operand immI_48()
 4037 %{
 4038   predicate(n->get_int() == 48);
 4039   match(ConI);
 4040 
 4041   op_cost(0);
 4042   format %{ %}
 4043   interface(CONST_INTER);
 4044 %}
 4045 
 4046 operand immI_56()
 4047 %{
 4048   predicate(n->get_int() == 56);
 4049   match(ConI);
 4050 
 4051   op_cost(0);
 4052   format %{ %}
 4053   interface(CONST_INTER);
 4054 %}
 4055 
 4056 operand immI_255()
 4057 %{
 4058   predicate(n->get_int() == 255);
 4059   match(ConI);
 4060 
 4061   op_cost(0);
 4062   format %{ %}
 4063   interface(CONST_INTER);
 4064 %}
 4065 
 4066 operand immI_65535()
 4067 %{
 4068   predicate(n->get_int() == 65535);
 4069   match(ConI);
 4070 
 4071   op_cost(0);
 4072   format %{ %}
 4073   interface(CONST_INTER);
 4074 %}
 4075 
 4076 operand immI_positive()
 4077 %{
 4078   predicate(n->get_int() > 0);
 4079   match(ConI);
 4080 
 4081   op_cost(0);
 4082   format %{ %}
 4083   interface(CONST_INTER);
 4084 %}
 4085 
 4086 // BoolTest condition for signed compare
 4087 operand immI_cmp_cond()
 4088 %{
 4089   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4090   match(ConI);
 4091 
 4092   op_cost(0);
 4093   format %{ %}
 4094   interface(CONST_INTER);
 4095 %}
 4096 
 4097 // BoolTest condition for unsigned compare
 4098 operand immI_cmpU_cond()
 4099 %{
 4100   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4101   match(ConI);
 4102 
 4103   op_cost(0);
 4104   format %{ %}
 4105   interface(CONST_INTER);
 4106 %}
 4107 
 4108 operand immL_255()
 4109 %{
 4110   predicate(n->get_long() == 255L);
 4111   match(ConL);
 4112 
 4113   op_cost(0);
 4114   format %{ %}
 4115   interface(CONST_INTER);
 4116 %}
 4117 
 4118 operand immL_65535()
 4119 %{
 4120   predicate(n->get_long() == 65535L);
 4121   match(ConL);
 4122 
 4123   op_cost(0);
 4124   format %{ %}
 4125   interface(CONST_INTER);
 4126 %}
 4127 
 4128 operand immL_4294967295()
 4129 %{
 4130   predicate(n->get_long() == 4294967295L);
 4131   match(ConL);
 4132 
 4133   op_cost(0);
 4134   format %{ %}
 4135   interface(CONST_INTER);
 4136 %}
 4137 
 4138 operand immL_bitmask()
 4139 %{
 4140   predicate((n->get_long() != 0)
 4141             && ((n->get_long() & 0xc000000000000000l) == 0)
 4142             && is_power_of_2(n->get_long() + 1));
 4143   match(ConL);
 4144 
 4145   op_cost(0);
 4146   format %{ %}
 4147   interface(CONST_INTER);
 4148 %}
 4149 
 4150 operand immI_bitmask()
 4151 %{
 4152   predicate((n->get_int() != 0)
 4153             && ((n->get_int() & 0xc0000000) == 0)
 4154             && is_power_of_2(n->get_int() + 1));
 4155   match(ConI);
 4156 
 4157   op_cost(0);
 4158   format %{ %}
 4159   interface(CONST_INTER);
 4160 %}
 4161 
 4162 operand immL_positive_bitmaskI()
 4163 %{
 4164   predicate((n->get_long() != 0)
 4165             && ((julong)n->get_long() < 0x80000000ULL)
 4166             && is_power_of_2(n->get_long() + 1));
 4167   match(ConL);
 4168 
 4169   op_cost(0);
 4170   format %{ %}
 4171   interface(CONST_INTER);
 4172 %}
 4173 
 4174 // Scale values for scaled offset addressing modes (up to long but not quad)
 4175 operand immIScale()
 4176 %{
 4177   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4178   match(ConI);
 4179 
 4180   op_cost(0);
 4181   format %{ %}
 4182   interface(CONST_INTER);
 4183 %}
 4184 
 4185 // 5 bit signed integer
 4186 operand immI5()
 4187 %{
 4188   predicate(Assembler::is_simm(n->get_int(), 5));
 4189   match(ConI);
 4190 
 4191   op_cost(0);
 4192   format %{ %}
 4193   interface(CONST_INTER);
 4194 %}
 4195 
 4196 // 7 bit unsigned integer
 4197 operand immIU7()
 4198 %{
 4199   predicate(Assembler::is_uimm(n->get_int(), 7));
 4200   match(ConI);
 4201 
 4202   op_cost(0);
 4203   format %{ %}
 4204   interface(CONST_INTER);
 4205 %}
 4206 
 4207 // Offset for scaled or unscaled immediate loads and stores
 4208 operand immIOffset()
 4209 %{
 4210   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4211   match(ConI);
 4212 
 4213   op_cost(0);
 4214   format %{ %}
 4215   interface(CONST_INTER);
 4216 %}
 4217 
 4218 operand immIOffset1()
 4219 %{
 4220   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4221   match(ConI);
 4222 
 4223   op_cost(0);
 4224   format %{ %}
 4225   interface(CONST_INTER);
 4226 %}
 4227 
 4228 operand immIOffset2()
 4229 %{
 4230   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4231   match(ConI);
 4232 
 4233   op_cost(0);
 4234   format %{ %}
 4235   interface(CONST_INTER);
 4236 %}
 4237 
 4238 operand immIOffset4()
 4239 %{
 4240   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4241   match(ConI);
 4242 
 4243   op_cost(0);
 4244   format %{ %}
 4245   interface(CONST_INTER);
 4246 %}
 4247 
 4248 operand immIOffset8()
 4249 %{
 4250   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4251   match(ConI);
 4252 
 4253   op_cost(0);
 4254   format %{ %}
 4255   interface(CONST_INTER);
 4256 %}
 4257 
 4258 operand immIOffset16()
 4259 %{
 4260   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4261   match(ConI);
 4262 
 4263   op_cost(0);
 4264   format %{ %}
 4265   interface(CONST_INTER);
 4266 %}
 4267 
 4268 operand immLOffset()
 4269 %{
 4270   predicate(n->get_long() >= -256 && n->get_long() <= 65520);
 4271   match(ConL);
 4272 
 4273   op_cost(0);
 4274   format %{ %}
 4275   interface(CONST_INTER);
 4276 %}
 4277 
 4278 operand immLoffset1()
 4279 %{
 4280   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4281   match(ConL);
 4282 
 4283   op_cost(0);
 4284   format %{ %}
 4285   interface(CONST_INTER);
 4286 %}
 4287 
 4288 operand immLoffset2()
 4289 %{
 4290   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4291   match(ConL);
 4292 
 4293   op_cost(0);
 4294   format %{ %}
 4295   interface(CONST_INTER);
 4296 %}
 4297 
 4298 operand immLoffset4()
 4299 %{
 4300   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4301   match(ConL);
 4302 
 4303   op_cost(0);
 4304   format %{ %}
 4305   interface(CONST_INTER);
 4306 %}
 4307 
 4308 operand immLoffset8()
 4309 %{
 4310   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4311   match(ConL);
 4312 
 4313   op_cost(0);
 4314   format %{ %}
 4315   interface(CONST_INTER);
 4316 %}
 4317 
 4318 operand immLoffset16()
 4319 %{
 4320   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4321   match(ConL);
 4322 
 4323   op_cost(0);
 4324   format %{ %}
 4325   interface(CONST_INTER);
 4326 %}
 4327 
 4328 // 5 bit signed long integer
 4329 operand immL5()
 4330 %{
 4331   predicate(Assembler::is_simm(n->get_long(), 5));
 4332   match(ConL);
 4333 
 4334   op_cost(0);
 4335   format %{ %}
 4336   interface(CONST_INTER);
 4337 %}
 4338 
 4339 // 7 bit unsigned long integer
 4340 operand immLU7()
 4341 %{
 4342   predicate(Assembler::is_uimm(n->get_long(), 7));
 4343   match(ConL);
 4344 
 4345   op_cost(0);
 4346   format %{ %}
 4347   interface(CONST_INTER);
 4348 %}
 4349 
 4350 // 8 bit signed value.
 4351 operand immI8()
 4352 %{
 4353   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4354   match(ConI);
 4355 
 4356   op_cost(0);
 4357   format %{ %}
 4358   interface(CONST_INTER);
 4359 %}
 4360 
 4361 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4362 operand immI8_shift8()
 4363 %{
 4364   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4365             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4366   match(ConI);
 4367 
 4368   op_cost(0);
 4369   format %{ %}
 4370   interface(CONST_INTER);
 4371 %}
 4372 
 4373 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4374 operand immL8_shift8()
 4375 %{
 4376   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4377             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4378   match(ConL);
 4379 
 4380   op_cost(0);
 4381   format %{ %}
 4382   interface(CONST_INTER);
 4383 %}
 4384 
 4385 // 8 bit integer valid for vector add sub immediate
 4386 operand immBAddSubV()
 4387 %{
 4388   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4389   match(ConI);
 4390 
 4391   op_cost(0);
 4392   format %{ %}
 4393   interface(CONST_INTER);
 4394 %}
 4395 
 4396 // 32 bit integer valid for add sub immediate
 4397 operand immIAddSub()
 4398 %{
 4399   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4400   match(ConI);
 4401   op_cost(0);
 4402   format %{ %}
 4403   interface(CONST_INTER);
 4404 %}
 4405 
 4406 // 32 bit integer valid for vector add sub immediate
 4407 operand immIAddSubV()
 4408 %{
 4409   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4410   match(ConI);
 4411 
 4412   op_cost(0);
 4413   format %{ %}
 4414   interface(CONST_INTER);
 4415 %}
 4416 
 4417 // 32 bit unsigned integer valid for logical immediate
 4418 
 4419 operand immBLog()
 4420 %{
 4421   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4422   match(ConI);
 4423 
 4424   op_cost(0);
 4425   format %{ %}
 4426   interface(CONST_INTER);
 4427 %}
 4428 
 4429 operand immSLog()
 4430 %{
 4431   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4432   match(ConI);
 4433 
 4434   op_cost(0);
 4435   format %{ %}
 4436   interface(CONST_INTER);
 4437 %}
 4438 
 4439 operand immILog()
 4440 %{
 4441   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4442   match(ConI);
 4443 
 4444   op_cost(0);
 4445   format %{ %}
 4446   interface(CONST_INTER);
 4447 %}
 4448 
 4449 // Integer operands 64 bit
 4450 // 64 bit immediate
 4451 operand immL()
 4452 %{
 4453   match(ConL);
 4454 
 4455   op_cost(0);
 4456   format %{ %}
 4457   interface(CONST_INTER);
 4458 %}
 4459 
 4460 // 64 bit zero
 4461 operand immL0()
 4462 %{
 4463   predicate(n->get_long() == 0);
 4464   match(ConL);
 4465 
 4466   op_cost(0);
 4467   format %{ %}
 4468   interface(CONST_INTER);
 4469 %}
 4470 
 4471 // 64 bit unit decrement
 4472 operand immL_M1()
 4473 %{
 4474   predicate(n->get_long() == -1);
 4475   match(ConL);
 4476 
 4477   op_cost(0);
 4478   format %{ %}
 4479   interface(CONST_INTER);
 4480 %}
 4481 
 4482 // 64 bit integer valid for add sub immediate
 4483 operand immLAddSub()
 4484 %{
 4485   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4486   match(ConL);
 4487   op_cost(0);
 4488   format %{ %}
 4489   interface(CONST_INTER);
 4490 %}
 4491 
 4492 // 64 bit integer valid for addv subv immediate
 4493 operand immLAddSubV()
 4494 %{
 4495   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4496   match(ConL);
 4497 
 4498   op_cost(0);
 4499   format %{ %}
 4500   interface(CONST_INTER);
 4501 %}
 4502 
 4503 // 64 bit integer valid for logical immediate
 4504 operand immLLog()
 4505 %{
 4506   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4507   match(ConL);
 4508   op_cost(0);
 4509   format %{ %}
 4510   interface(CONST_INTER);
 4511 %}
 4512 
 4513 // Long Immediate: low 32-bit mask
 4514 operand immL_32bits()
 4515 %{
 4516   predicate(n->get_long() == 0xFFFFFFFFL);
 4517   match(ConL);
 4518   op_cost(0);
 4519   format %{ %}
 4520   interface(CONST_INTER);
 4521 %}
 4522 
 4523 // Pointer operands
 4524 // Pointer Immediate
 4525 operand immP()
 4526 %{
 4527   match(ConP);
 4528 
 4529   op_cost(0);
 4530   format %{ %}
 4531   interface(CONST_INTER);
 4532 %}
 4533 
 4534 // nullptr Pointer Immediate
 4535 operand immP0()
 4536 %{
 4537   predicate(n->get_ptr() == 0);
 4538   match(ConP);
 4539 
 4540   op_cost(0);
 4541   format %{ %}
 4542   interface(CONST_INTER);
 4543 %}
 4544 
 4545 // Pointer Immediate One
 4546 // this is used in object initialization (initial object header)
 4547 operand immP_1()
 4548 %{
 4549   predicate(n->get_ptr() == 1);
 4550   match(ConP);
 4551 
 4552   op_cost(0);
 4553   format %{ %}
 4554   interface(CONST_INTER);
 4555 %}
 4556 
 4557 // Card Table Byte Map Base
 4558 operand immByteMapBase()
 4559 %{
 4560   // Get base of card map
 4561   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4562             SHENANDOAHGC_ONLY(!BarrierSet::barrier_set()->is_a(BarrierSet::ShenandoahBarrierSet) &&)
 4563             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4564   match(ConP);
 4565 
 4566   op_cost(0);
 4567   format %{ %}
 4568   interface(CONST_INTER);
 4569 %}
 4570 
 4571 // Float and Double operands
 4572 // Double Immediate
 4573 operand immD()
 4574 %{
 4575   match(ConD);
 4576   op_cost(0);
 4577   format %{ %}
 4578   interface(CONST_INTER);
 4579 %}
 4580 
 4581 // Double Immediate: +0.0d
 4582 operand immD0()
 4583 %{
 4584   predicate(jlong_cast(n->getd()) == 0);
 4585   match(ConD);
 4586 
 4587   op_cost(0);
 4588   format %{ %}
 4589   interface(CONST_INTER);
 4590 %}
 4591 
 4592 // constant 'double +0.0'.
 4593 operand immDPacked()
 4594 %{
 4595   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4596   match(ConD);
 4597   op_cost(0);
 4598   format %{ %}
 4599   interface(CONST_INTER);
 4600 %}
 4601 
 4602 // Float Immediate
 4603 operand immF()
 4604 %{
 4605   match(ConF);
 4606   op_cost(0);
 4607   format %{ %}
 4608   interface(CONST_INTER);
 4609 %}
 4610 
 4611 // Float Immediate: +0.0f.
 4612 operand immF0()
 4613 %{
 4614   predicate(jint_cast(n->getf()) == 0);
 4615   match(ConF);
 4616 
 4617   op_cost(0);
 4618   format %{ %}
 4619   interface(CONST_INTER);
 4620 %}
 4621 
 4622 // Half Float (FP16) Immediate
 4623 operand immH()
 4624 %{
 4625   match(ConH);
 4626   op_cost(0);
 4627   format %{ %}
 4628   interface(CONST_INTER);
 4629 %}
 4630 
 4631 //
 4632 operand immFPacked()
 4633 %{
 4634   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4635   match(ConF);
 4636   op_cost(0);
 4637   format %{ %}
 4638   interface(CONST_INTER);
 4639 %}
 4640 
 4641 // Narrow pointer operands
 4642 // Narrow Pointer Immediate
 4643 operand immN()
 4644 %{
 4645   match(ConN);
 4646 
 4647   op_cost(0);
 4648   format %{ %}
 4649   interface(CONST_INTER);
 4650 %}
 4651 
 4652 // Narrow nullptr Pointer Immediate
 4653 operand immN0()
 4654 %{
 4655   predicate(n->get_narrowcon() == 0);
 4656   match(ConN);
 4657 
 4658   op_cost(0);
 4659   format %{ %}
 4660   interface(CONST_INTER);
 4661 %}
 4662 
 4663 operand immNKlass()
 4664 %{
 4665   match(ConNKlass);
 4666 
 4667   op_cost(0);
 4668   format %{ %}
 4669   interface(CONST_INTER);
 4670 %}
 4671 
 4672 // Integer 32 bit Register Operands
 4673 // Integer 32 bitRegister (excludes SP)
 4674 operand iRegI()
 4675 %{
 4676   constraint(ALLOC_IN_RC(any_reg32));
 4677   match(RegI);
 4678   match(iRegINoSp);
 4679   op_cost(0);
 4680   format %{ %}
 4681   interface(REG_INTER);
 4682 %}
 4683 
 4684 // Integer 32 bit Register not Special
 4685 operand iRegINoSp()
 4686 %{
 4687   constraint(ALLOC_IN_RC(no_special_reg32));
 4688   match(RegI);
 4689   op_cost(0);
 4690   format %{ %}
 4691   interface(REG_INTER);
 4692 %}
 4693 
 4694 // Integer 64 bit Register Operands
 4695 // Integer 64 bit Register (includes SP)
 4696 operand iRegL()
 4697 %{
 4698   constraint(ALLOC_IN_RC(any_reg));
 4699   match(RegL);
 4700   match(iRegLNoSp);
 4701   op_cost(0);
 4702   format %{ %}
 4703   interface(REG_INTER);
 4704 %}
 4705 
 4706 // Integer 64 bit Register not Special
 4707 operand iRegLNoSp()
 4708 %{
 4709   constraint(ALLOC_IN_RC(no_special_reg));
 4710   match(RegL);
 4711   match(iRegL_R0);
 4712   format %{ %}
 4713   interface(REG_INTER);
 4714 %}
 4715 
 4716 // Pointer Register Operands
 4717 // Pointer Register
 4718 operand iRegP()
 4719 %{
 4720   constraint(ALLOC_IN_RC(ptr_reg));
 4721   match(RegP);
 4722   match(iRegPNoSp);
 4723   match(iRegP_R0);
 4724   //match(iRegP_R2);
 4725   //match(iRegP_R4);
 4726   match(iRegP_R5);
 4727   match(thread_RegP);
 4728   op_cost(0);
 4729   format %{ %}
 4730   interface(REG_INTER);
 4731 %}
 4732 
 4733 // Pointer 64 bit Register not Special
 4734 operand iRegPNoSp()
 4735 %{
 4736   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4737   match(RegP);
 4738   // match(iRegP);
 4739   // match(iRegP_R0);
 4740   // match(iRegP_R2);
 4741   // match(iRegP_R4);
 4742   // match(iRegP_R5);
 4743   // match(thread_RegP);
 4744   op_cost(0);
 4745   format %{ %}
 4746   interface(REG_INTER);
 4747 %}
 4748 
 4749 // This operand is not allowed to use rfp even if
 4750 // rfp is not used to hold the frame pointer.
 4751 operand iRegPNoSpNoRfp()
 4752 %{
 4753   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4754   match(RegP);
 4755   match(iRegPNoSp);
 4756   op_cost(0);
 4757   format %{ %}
 4758   interface(REG_INTER);
 4759 %}
 4760 
 4761 // Pointer 64 bit Register R0 only
 4762 operand iRegP_R0()
 4763 %{
 4764   constraint(ALLOC_IN_RC(r0_reg));
 4765   match(RegP);
 4766   // match(iRegP);
 4767   match(iRegPNoSp);
 4768   op_cost(0);
 4769   format %{ %}
 4770   interface(REG_INTER);
 4771 %}
 4772 
 4773 // Pointer 64 bit Register R1 only
 4774 operand iRegP_R1()
 4775 %{
 4776   constraint(ALLOC_IN_RC(r1_reg));
 4777   match(RegP);
 4778   // match(iRegP);
 4779   match(iRegPNoSp);
 4780   op_cost(0);
 4781   format %{ %}
 4782   interface(REG_INTER);
 4783 %}
 4784 
 4785 // Pointer 64 bit Register R2 only
 4786 operand iRegP_R2()
 4787 %{
 4788   constraint(ALLOC_IN_RC(r2_reg));
 4789   match(RegP);
 4790   // match(iRegP);
 4791   match(iRegPNoSp);
 4792   op_cost(0);
 4793   format %{ %}
 4794   interface(REG_INTER);
 4795 %}
 4796 
 4797 // Pointer 64 bit Register R3 only
 4798 operand iRegP_R3()
 4799 %{
 4800   constraint(ALLOC_IN_RC(r3_reg));
 4801   match(RegP);
 4802   // match(iRegP);
 4803   match(iRegPNoSp);
 4804   op_cost(0);
 4805   format %{ %}
 4806   interface(REG_INTER);
 4807 %}
 4808 
 4809 // Pointer 64 bit Register R4 only
 4810 operand iRegP_R4()
 4811 %{
 4812   constraint(ALLOC_IN_RC(r4_reg));
 4813   match(RegP);
 4814   // match(iRegP);
 4815   match(iRegPNoSp);
 4816   op_cost(0);
 4817   format %{ %}
 4818   interface(REG_INTER);
 4819 %}
 4820 
 4821 // Pointer 64 bit Register R5 only
 4822 operand iRegP_R5()
 4823 %{
 4824   constraint(ALLOC_IN_RC(r5_reg));
 4825   match(RegP);
 4826   // match(iRegP);
 4827   match(iRegPNoSp);
 4828   op_cost(0);
 4829   format %{ %}
 4830   interface(REG_INTER);
 4831 %}
 4832 
 4833 // Pointer 64 bit Register R10 only
 4834 operand iRegP_R10()
 4835 %{
 4836   constraint(ALLOC_IN_RC(r10_reg));
 4837   match(RegP);
 4838   // match(iRegP);
 4839   match(iRegPNoSp);
 4840   op_cost(0);
 4841   format %{ %}
 4842   interface(REG_INTER);
 4843 %}
 4844 
 4845 // Long 64 bit Register R0 only
 4846 operand iRegL_R0()
 4847 %{
 4848   constraint(ALLOC_IN_RC(r0_reg));
 4849   match(RegL);
 4850   match(iRegLNoSp);
 4851   op_cost(0);
 4852   format %{ %}
 4853   interface(REG_INTER);
 4854 %}
 4855 
 4856 // Long 64 bit Register R11 only
 4857 operand iRegL_R11()
 4858 %{
 4859   constraint(ALLOC_IN_RC(r11_reg));
 4860   match(RegL);
 4861   match(iRegLNoSp);
 4862   op_cost(0);
 4863   format %{ %}
 4864   interface(REG_INTER);
 4865 %}
 4866 
 4867 // Register R0 only
 4868 operand iRegI_R0()
 4869 %{
 4870   constraint(ALLOC_IN_RC(int_r0_reg));
 4871   match(RegI);
 4872   match(iRegINoSp);
 4873   op_cost(0);
 4874   format %{ %}
 4875   interface(REG_INTER);
 4876 %}
 4877 
 4878 // Register R2 only
 4879 operand iRegI_R2()
 4880 %{
 4881   constraint(ALLOC_IN_RC(int_r2_reg));
 4882   match(RegI);
 4883   match(iRegINoSp);
 4884   op_cost(0);
 4885   format %{ %}
 4886   interface(REG_INTER);
 4887 %}
 4888 
 4889 // Register R3 only
 4890 operand iRegI_R3()
 4891 %{
 4892   constraint(ALLOC_IN_RC(int_r3_reg));
 4893   match(RegI);
 4894   match(iRegINoSp);
 4895   op_cost(0);
 4896   format %{ %}
 4897   interface(REG_INTER);
 4898 %}
 4899 
 4900 
 4901 // Register R4 only
 4902 operand iRegI_R4()
 4903 %{
 4904   constraint(ALLOC_IN_RC(int_r4_reg));
 4905   match(RegI);
 4906   match(iRegINoSp);
 4907   op_cost(0);
 4908   format %{ %}
 4909   interface(REG_INTER);
 4910 %}
 4911 
 4912 
 4913 // Pointer Register Operands
 4914 // Narrow Pointer Register
 4915 operand iRegN()
 4916 %{
 4917   constraint(ALLOC_IN_RC(any_reg32));
 4918   match(RegN);
 4919   match(iRegNNoSp);
 4920   op_cost(0);
 4921   format %{ %}
 4922   interface(REG_INTER);
 4923 %}
 4924 
 4925 // Integer 64 bit Register not Special
 4926 operand iRegNNoSp()
 4927 %{
 4928   constraint(ALLOC_IN_RC(no_special_reg32));
 4929   match(RegN);
 4930   op_cost(0);
 4931   format %{ %}
 4932   interface(REG_INTER);
 4933 %}
 4934 
 4935 // Float Register
 4936 // Float register operands
 4937 operand vRegF()
 4938 %{
 4939   constraint(ALLOC_IN_RC(float_reg));
 4940   match(RegF);
 4941 
 4942   op_cost(0);
 4943   format %{ %}
 4944   interface(REG_INTER);
 4945 %}
 4946 
 4947 // Double Register
 4948 // Double register operands
 4949 operand vRegD()
 4950 %{
 4951   constraint(ALLOC_IN_RC(double_reg));
 4952   match(RegD);
 4953 
 4954   op_cost(0);
 4955   format %{ %}
 4956   interface(REG_INTER);
 4957 %}
 4958 
 4959 // Generic vector class. This will be used for
 4960 // all vector operands, including NEON and SVE.
 4961 operand vReg()
 4962 %{
 4963   constraint(ALLOC_IN_RC(dynamic));
 4964   match(VecA);
 4965   match(VecD);
 4966   match(VecX);
 4967 
 4968   op_cost(0);
 4969   format %{ %}
 4970   interface(REG_INTER);
 4971 %}
 4972 
 4973 operand vecA()
 4974 %{
 4975   constraint(ALLOC_IN_RC(vectora_reg));
 4976   match(VecA);
 4977 
 4978   op_cost(0);
 4979   format %{ %}
 4980   interface(REG_INTER);
 4981 %}
 4982 
 4983 operand vecD()
 4984 %{
 4985   constraint(ALLOC_IN_RC(vectord_reg));
 4986   match(VecD);
 4987 
 4988   op_cost(0);
 4989   format %{ %}
 4990   interface(REG_INTER);
 4991 %}
 4992 
 4993 operand vecX()
 4994 %{
 4995   constraint(ALLOC_IN_RC(vectorx_reg));
 4996   match(VecX);
 4997 
 4998   op_cost(0);
 4999   format %{ %}
 5000   interface(REG_INTER);
 5001 %}
 5002 
 5003 operand vRegD_V0()
 5004 %{
 5005   constraint(ALLOC_IN_RC(v0_reg));
 5006   match(RegD);
 5007   op_cost(0);
 5008   format %{ %}
 5009   interface(REG_INTER);
 5010 %}
 5011 
 5012 operand vRegD_V1()
 5013 %{
 5014   constraint(ALLOC_IN_RC(v1_reg));
 5015   match(RegD);
 5016   op_cost(0);
 5017   format %{ %}
 5018   interface(REG_INTER);
 5019 %}
 5020 
 5021 operand vRegD_V2()
 5022 %{
 5023   constraint(ALLOC_IN_RC(v2_reg));
 5024   match(RegD);
 5025   op_cost(0);
 5026   format %{ %}
 5027   interface(REG_INTER);
 5028 %}
 5029 
 5030 operand vRegD_V3()
 5031 %{
 5032   constraint(ALLOC_IN_RC(v3_reg));
 5033   match(RegD);
 5034   op_cost(0);
 5035   format %{ %}
 5036   interface(REG_INTER);
 5037 %}
 5038 
 5039 operand vRegD_V4()
 5040 %{
 5041   constraint(ALLOC_IN_RC(v4_reg));
 5042   match(RegD);
 5043   op_cost(0);
 5044   format %{ %}
 5045   interface(REG_INTER);
 5046 %}
 5047 
 5048 operand vRegD_V5()
 5049 %{
 5050   constraint(ALLOC_IN_RC(v5_reg));
 5051   match(RegD);
 5052   op_cost(0);
 5053   format %{ %}
 5054   interface(REG_INTER);
 5055 %}
 5056 
 5057 operand vRegD_V6()
 5058 %{
 5059   constraint(ALLOC_IN_RC(v6_reg));
 5060   match(RegD);
 5061   op_cost(0);
 5062   format %{ %}
 5063   interface(REG_INTER);
 5064 %}
 5065 
 5066 operand vRegD_V7()
 5067 %{
 5068   constraint(ALLOC_IN_RC(v7_reg));
 5069   match(RegD);
 5070   op_cost(0);
 5071   format %{ %}
 5072   interface(REG_INTER);
 5073 %}
 5074 
 5075 operand vRegD_V12()
 5076 %{
 5077   constraint(ALLOC_IN_RC(v12_reg));
 5078   match(RegD);
 5079   op_cost(0);
 5080   format %{ %}
 5081   interface(REG_INTER);
 5082 %}
 5083 
 5084 operand vRegD_V13()
 5085 %{
 5086   constraint(ALLOC_IN_RC(v13_reg));
 5087   match(RegD);
 5088   op_cost(0);
 5089   format %{ %}
 5090   interface(REG_INTER);
 5091 %}
 5092 
 5093 operand pReg()
 5094 %{
 5095   constraint(ALLOC_IN_RC(pr_reg));
 5096   match(RegVectMask);
 5097   match(pRegGov);
 5098   op_cost(0);
 5099   format %{ %}
 5100   interface(REG_INTER);
 5101 %}
 5102 
 5103 operand pRegGov()
 5104 %{
 5105   constraint(ALLOC_IN_RC(gov_pr));
 5106   match(RegVectMask);
 5107   match(pReg);
 5108   op_cost(0);
 5109   format %{ %}
 5110   interface(REG_INTER);
 5111 %}
 5112 
 5113 operand pRegGov_P0()
 5114 %{
 5115   constraint(ALLOC_IN_RC(p0_reg));
 5116   match(RegVectMask);
 5117   op_cost(0);
 5118   format %{ %}
 5119   interface(REG_INTER);
 5120 %}
 5121 
 5122 operand pRegGov_P1()
 5123 %{
 5124   constraint(ALLOC_IN_RC(p1_reg));
 5125   match(RegVectMask);
 5126   op_cost(0);
 5127   format %{ %}
 5128   interface(REG_INTER);
 5129 %}
 5130 
 5131 // Flags register, used as output of signed compare instructions
 5132 
 5133 // note that on AArch64 we also use this register as the output for
 5134 // for floating point compare instructions (CmpF CmpD). this ensures
 5135 // that ordered inequality tests use GT, GE, LT or LE none of which
 5136 // pass through cases where the result is unordered i.e. one or both
 5137 // inputs to the compare is a NaN. this means that the ideal code can
 5138 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5139 // (where the comparison should always fail). EQ and NE tests are
 5140 // always generated in ideal code so that unordered folds into the NE
 5141 // case, matching the behaviour of AArch64 NE.
 5142 //
 5143 // This differs from x86 where the outputs of FP compares use a
 5144 // special FP flags registers and where compares based on this
 5145 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5146 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5147 // to explicitly handle the unordered case in branches. x86 also has
 5148 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5149 
 5150 operand rFlagsReg()
 5151 %{
 5152   constraint(ALLOC_IN_RC(int_flags));
 5153   match(RegFlags);
 5154 
 5155   op_cost(0);
 5156   format %{ "RFLAGS" %}
 5157   interface(REG_INTER);
 5158 %}
 5159 
 5160 // Flags register, used as output of unsigned compare instructions
 5161 operand rFlagsRegU()
 5162 %{
 5163   constraint(ALLOC_IN_RC(int_flags));
 5164   match(RegFlags);
 5165 
 5166   op_cost(0);
 5167   format %{ "RFLAGSU" %}
 5168   interface(REG_INTER);
 5169 %}
 5170 
 5171 // Special Registers
 5172 
 5173 // Method Register
 5174 operand inline_cache_RegP(iRegP reg)
 5175 %{
 5176   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5177   match(reg);
 5178   match(iRegPNoSp);
 5179   op_cost(0);
 5180   format %{ %}
 5181   interface(REG_INTER);
 5182 %}
 5183 
 5184 // Thread Register
 5185 operand thread_RegP(iRegP reg)
 5186 %{
 5187   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5188   match(reg);
 5189   op_cost(0);
 5190   format %{ %}
 5191   interface(REG_INTER);
 5192 %}
 5193 
 5194 //----------Memory Operands----------------------------------------------------
 5195 
 5196 operand indirect(iRegP reg)
 5197 %{
 5198   constraint(ALLOC_IN_RC(ptr_reg));
 5199   match(reg);
 5200   op_cost(0);
 5201   format %{ "[$reg]" %}
 5202   interface(MEMORY_INTER) %{
 5203     base($reg);
 5204     index(0xffffffff);
 5205     scale(0x0);
 5206     disp(0x0);
 5207   %}
 5208 %}
 5209 
 5210 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5211 %{
 5212   constraint(ALLOC_IN_RC(ptr_reg));
 5213   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5214   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5215   op_cost(0);
 5216   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5217   interface(MEMORY_INTER) %{
 5218     base($reg);
 5219     index($ireg);
 5220     scale($scale);
 5221     disp(0x0);
 5222   %}
 5223 %}
 5224 
 5225 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5226 %{
 5227   constraint(ALLOC_IN_RC(ptr_reg));
 5228   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5229   match(AddP reg (LShiftL lreg scale));
 5230   op_cost(0);
 5231   format %{ "$reg, $lreg lsl($scale)" %}
 5232   interface(MEMORY_INTER) %{
 5233     base($reg);
 5234     index($lreg);
 5235     scale($scale);
 5236     disp(0x0);
 5237   %}
 5238 %}
 5239 
 5240 operand indIndexI2L(iRegP reg, iRegI ireg)
 5241 %{
 5242   constraint(ALLOC_IN_RC(ptr_reg));
 5243   match(AddP reg (ConvI2L ireg));
 5244   op_cost(0);
 5245   format %{ "$reg, $ireg, 0, I2L" %}
 5246   interface(MEMORY_INTER) %{
 5247     base($reg);
 5248     index($ireg);
 5249     scale(0x0);
 5250     disp(0x0);
 5251   %}
 5252 %}
 5253 
 5254 operand indIndex(iRegP reg, iRegL lreg)
 5255 %{
 5256   constraint(ALLOC_IN_RC(ptr_reg));
 5257   match(AddP reg lreg);
 5258   op_cost(0);
 5259   format %{ "$reg, $lreg" %}
 5260   interface(MEMORY_INTER) %{
 5261     base($reg);
 5262     index($lreg);
 5263     scale(0x0);
 5264     disp(0x0);
 5265   %}
 5266 %}
 5267 
 5268 operand indOffI1(iRegP reg, immIOffset1 off)
 5269 %{
 5270   constraint(ALLOC_IN_RC(ptr_reg));
 5271   match(AddP reg off);
 5272   op_cost(0);
 5273   format %{ "[$reg, $off]" %}
 5274   interface(MEMORY_INTER) %{
 5275     base($reg);
 5276     index(0xffffffff);
 5277     scale(0x0);
 5278     disp($off);
 5279   %}
 5280 %}
 5281 
 5282 operand indOffI2(iRegP reg, immIOffset2 off)
 5283 %{
 5284   constraint(ALLOC_IN_RC(ptr_reg));
 5285   match(AddP reg off);
 5286   op_cost(0);
 5287   format %{ "[$reg, $off]" %}
 5288   interface(MEMORY_INTER) %{
 5289     base($reg);
 5290     index(0xffffffff);
 5291     scale(0x0);
 5292     disp($off);
 5293   %}
 5294 %}
 5295 
 5296 operand indOffI4(iRegP reg, immIOffset4 off)
 5297 %{
 5298   constraint(ALLOC_IN_RC(ptr_reg));
 5299   match(AddP reg off);
 5300   op_cost(0);
 5301   format %{ "[$reg, $off]" %}
 5302   interface(MEMORY_INTER) %{
 5303     base($reg);
 5304     index(0xffffffff);
 5305     scale(0x0);
 5306     disp($off);
 5307   %}
 5308 %}
 5309 
 5310 operand indOffI8(iRegP reg, immIOffset8 off)
 5311 %{
 5312   constraint(ALLOC_IN_RC(ptr_reg));
 5313   match(AddP reg off);
 5314   op_cost(0);
 5315   format %{ "[$reg, $off]" %}
 5316   interface(MEMORY_INTER) %{
 5317     base($reg);
 5318     index(0xffffffff);
 5319     scale(0x0);
 5320     disp($off);
 5321   %}
 5322 %}
 5323 
 5324 operand indOffI16(iRegP reg, immIOffset16 off)
 5325 %{
 5326   constraint(ALLOC_IN_RC(ptr_reg));
 5327   match(AddP reg off);
 5328   op_cost(0);
 5329   format %{ "[$reg, $off]" %}
 5330   interface(MEMORY_INTER) %{
 5331     base($reg);
 5332     index(0xffffffff);
 5333     scale(0x0);
 5334     disp($off);
 5335   %}
 5336 %}
 5337 
 5338 operand indOffL1(iRegP reg, immLoffset1 off)
 5339 %{
 5340   constraint(ALLOC_IN_RC(ptr_reg));
 5341   match(AddP reg off);
 5342   op_cost(0);
 5343   format %{ "[$reg, $off]" %}
 5344   interface(MEMORY_INTER) %{
 5345     base($reg);
 5346     index(0xffffffff);
 5347     scale(0x0);
 5348     disp($off);
 5349   %}
 5350 %}
 5351 
 5352 operand indOffL2(iRegP reg, immLoffset2 off)
 5353 %{
 5354   constraint(ALLOC_IN_RC(ptr_reg));
 5355   match(AddP reg off);
 5356   op_cost(0);
 5357   format %{ "[$reg, $off]" %}
 5358   interface(MEMORY_INTER) %{
 5359     base($reg);
 5360     index(0xffffffff);
 5361     scale(0x0);
 5362     disp($off);
 5363   %}
 5364 %}
 5365 
 5366 operand indOffL4(iRegP reg, immLoffset4 off)
 5367 %{
 5368   constraint(ALLOC_IN_RC(ptr_reg));
 5369   match(AddP reg off);
 5370   op_cost(0);
 5371   format %{ "[$reg, $off]" %}
 5372   interface(MEMORY_INTER) %{
 5373     base($reg);
 5374     index(0xffffffff);
 5375     scale(0x0);
 5376     disp($off);
 5377   %}
 5378 %}
 5379 
 5380 operand indOffL8(iRegP reg, immLoffset8 off)
 5381 %{
 5382   constraint(ALLOC_IN_RC(ptr_reg));
 5383   match(AddP reg off);
 5384   op_cost(0);
 5385   format %{ "[$reg, $off]" %}
 5386   interface(MEMORY_INTER) %{
 5387     base($reg);
 5388     index(0xffffffff);
 5389     scale(0x0);
 5390     disp($off);
 5391   %}
 5392 %}
 5393 
 5394 operand indOffL16(iRegP reg, immLoffset16 off)
 5395 %{
 5396   constraint(ALLOC_IN_RC(ptr_reg));
 5397   match(AddP reg off);
 5398   op_cost(0);
 5399   format %{ "[$reg, $off]" %}
 5400   interface(MEMORY_INTER) %{
 5401     base($reg);
 5402     index(0xffffffff);
 5403     scale(0x0);
 5404     disp($off);
 5405   %}
 5406 %}
 5407 
 5408 operand indirectX2P(iRegL reg)
 5409 %{
 5410   constraint(ALLOC_IN_RC(ptr_reg));
 5411   match(CastX2P reg);
 5412   op_cost(0);
 5413   format %{ "[$reg]\t# long -> ptr" %}
 5414   interface(MEMORY_INTER) %{
 5415     base($reg);
 5416     index(0xffffffff);
 5417     scale(0x0);
 5418     disp(0x0);
 5419   %}
 5420 %}
 5421 
 5422 operand indOffX2P(iRegL reg, immLOffset off)
 5423 %{
 5424   constraint(ALLOC_IN_RC(ptr_reg));
 5425   match(AddP (CastX2P reg) off);
 5426   op_cost(0);
 5427   format %{ "[$reg, $off]\t# long -> ptr" %}
 5428   interface(MEMORY_INTER) %{
 5429     base($reg);
 5430     index(0xffffffff);
 5431     scale(0x0);
 5432     disp($off);
 5433   %}
 5434 %}
 5435 
 5436 operand indirectN(iRegN reg)
 5437 %{
 5438   predicate(CompressedOops::shift() == 0);
 5439   constraint(ALLOC_IN_RC(ptr_reg));
 5440   match(DecodeN reg);
 5441   op_cost(0);
 5442   format %{ "[$reg]\t# narrow" %}
 5443   interface(MEMORY_INTER) %{
 5444     base($reg);
 5445     index(0xffffffff);
 5446     scale(0x0);
 5447     disp(0x0);
 5448   %}
 5449 %}
 5450 
 5451 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5452 %{
 5453   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5454   constraint(ALLOC_IN_RC(ptr_reg));
 5455   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5456   op_cost(0);
 5457   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5458   interface(MEMORY_INTER) %{
 5459     base($reg);
 5460     index($ireg);
 5461     scale($scale);
 5462     disp(0x0);
 5463   %}
 5464 %}
 5465 
 5466 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5467 %{
 5468   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5469   constraint(ALLOC_IN_RC(ptr_reg));
 5470   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5471   op_cost(0);
 5472   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5473   interface(MEMORY_INTER) %{
 5474     base($reg);
 5475     index($lreg);
 5476     scale($scale);
 5477     disp(0x0);
 5478   %}
 5479 %}
 5480 
 5481 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5482 %{
 5483   predicate(CompressedOops::shift() == 0);
 5484   constraint(ALLOC_IN_RC(ptr_reg));
 5485   match(AddP (DecodeN reg) (ConvI2L ireg));
 5486   op_cost(0);
 5487   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5488   interface(MEMORY_INTER) %{
 5489     base($reg);
 5490     index($ireg);
 5491     scale(0x0);
 5492     disp(0x0);
 5493   %}
 5494 %}
 5495 
 5496 operand indIndexN(iRegN reg, iRegL lreg)
 5497 %{
 5498   predicate(CompressedOops::shift() == 0);
 5499   constraint(ALLOC_IN_RC(ptr_reg));
 5500   match(AddP (DecodeN reg) lreg);
 5501   op_cost(0);
 5502   format %{ "$reg, $lreg\t# narrow" %}
 5503   interface(MEMORY_INTER) %{
 5504     base($reg);
 5505     index($lreg);
 5506     scale(0x0);
 5507     disp(0x0);
 5508   %}
 5509 %}
 5510 
 5511 operand indOffIN(iRegN reg, immIOffset off)
 5512 %{
 5513   predicate(CompressedOops::shift() == 0);
 5514   constraint(ALLOC_IN_RC(ptr_reg));
 5515   match(AddP (DecodeN reg) off);
 5516   op_cost(0);
 5517   format %{ "[$reg, $off]\t# narrow" %}
 5518   interface(MEMORY_INTER) %{
 5519     base($reg);
 5520     index(0xffffffff);
 5521     scale(0x0);
 5522     disp($off);
 5523   %}
 5524 %}
 5525 
 5526 operand indOffLN(iRegN reg, immLOffset off)
 5527 %{
 5528   predicate(CompressedOops::shift() == 0);
 5529   constraint(ALLOC_IN_RC(ptr_reg));
 5530   match(AddP (DecodeN reg) off);
 5531   op_cost(0);
 5532   format %{ "[$reg, $off]\t# narrow" %}
 5533   interface(MEMORY_INTER) %{
 5534     base($reg);
 5535     index(0xffffffff);
 5536     scale(0x0);
 5537     disp($off);
 5538   %}
 5539 %}
 5540 
 5541 
 5542 //----------Special Memory Operands--------------------------------------------
 5543 // Stack Slot Operand - This operand is used for loading and storing temporary
 5544 //                      values on the stack where a match requires a value to
 5545 //                      flow through memory.
 5546 operand stackSlotP(sRegP reg)
 5547 %{
 5548   constraint(ALLOC_IN_RC(stack_slots));
 5549   op_cost(100);
 5550   // No match rule because this operand is only generated in matching
 5551   // match(RegP);
 5552   format %{ "[$reg]" %}
 5553   interface(MEMORY_INTER) %{
 5554     base(0x1e);  // RSP
 5555     index(0x0);  // No Index
 5556     scale(0x0);  // No Scale
 5557     disp($reg);  // Stack Offset
 5558   %}
 5559 %}
 5560 
 5561 operand stackSlotI(sRegI reg)
 5562 %{
 5563   constraint(ALLOC_IN_RC(stack_slots));
 5564   // No match rule because this operand is only generated in matching
 5565   // match(RegI);
 5566   format %{ "[$reg]" %}
 5567   interface(MEMORY_INTER) %{
 5568     base(0x1e);  // RSP
 5569     index(0x0);  // No Index
 5570     scale(0x0);  // No Scale
 5571     disp($reg);  // Stack Offset
 5572   %}
 5573 %}
 5574 
 5575 operand stackSlotF(sRegF reg)
 5576 %{
 5577   constraint(ALLOC_IN_RC(stack_slots));
 5578   // No match rule because this operand is only generated in matching
 5579   // match(RegF);
 5580   format %{ "[$reg]" %}
 5581   interface(MEMORY_INTER) %{
 5582     base(0x1e);  // RSP
 5583     index(0x0);  // No Index
 5584     scale(0x0);  // No Scale
 5585     disp($reg);  // Stack Offset
 5586   %}
 5587 %}
 5588 
 5589 operand stackSlotD(sRegD reg)
 5590 %{
 5591   constraint(ALLOC_IN_RC(stack_slots));
 5592   // No match rule because this operand is only generated in matching
 5593   // match(RegD);
 5594   format %{ "[$reg]" %}
 5595   interface(MEMORY_INTER) %{
 5596     base(0x1e);  // RSP
 5597     index(0x0);  // No Index
 5598     scale(0x0);  // No Scale
 5599     disp($reg);  // Stack Offset
 5600   %}
 5601 %}
 5602 
 5603 operand stackSlotL(sRegL reg)
 5604 %{
 5605   constraint(ALLOC_IN_RC(stack_slots));
 5606   // No match rule because this operand is only generated in matching
 5607   // match(RegL);
 5608   format %{ "[$reg]" %}
 5609   interface(MEMORY_INTER) %{
 5610     base(0x1e);  // RSP
 5611     index(0x0);  // No Index
 5612     scale(0x0);  // No Scale
 5613     disp($reg);  // Stack Offset
 5614   %}
 5615 %}
 5616 
 5617 // Operands for expressing Control Flow
 5618 // NOTE: Label is a predefined operand which should not be redefined in
 5619 //       the AD file. It is generically handled within the ADLC.
 5620 
 5621 //----------Conditional Branch Operands----------------------------------------
 5622 // Comparison Op  - This is the operation of the comparison, and is limited to
 5623 //                  the following set of codes:
 5624 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 5625 //
 5626 // Other attributes of the comparison, such as unsignedness, are specified
 5627 // by the comparison instruction that sets a condition code flags register.
 5628 // That result is represented by a flags operand whose subtype is appropriate
 5629 // to the unsignedness (etc.) of the comparison.
 5630 //
 5631 // Later, the instruction which matches both the Comparison Op (a Bool) and
 5632 // the flags (produced by the Cmp) specifies the coding of the comparison op
 5633 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 5634 
 5635 // used for signed integral comparisons and fp comparisons
 5636 
 5637 operand cmpOp()
 5638 %{
 5639   match(Bool);
 5640 
 5641   format %{ "" %}
 5642   interface(COND_INTER) %{
 5643     equal(0x0, "eq");
 5644     not_equal(0x1, "ne");
 5645     less(0xb, "lt");
 5646     greater_equal(0xa, "ge");
 5647     less_equal(0xd, "le");
 5648     greater(0xc, "gt");
 5649     overflow(0x6, "vs");
 5650     no_overflow(0x7, "vc");
 5651   %}
 5652 %}
 5653 
 5654 // used for unsigned integral comparisons
 5655 
 5656 operand cmpOpU()
 5657 %{
 5658   match(Bool);
 5659 
 5660   format %{ "" %}
 5661   interface(COND_INTER) %{
 5662     equal(0x0, "eq");
 5663     not_equal(0x1, "ne");
 5664     less(0x3, "lo");
 5665     greater_equal(0x2, "hs");
 5666     less_equal(0x9, "ls");
 5667     greater(0x8, "hi");
 5668     overflow(0x6, "vs");
 5669     no_overflow(0x7, "vc");
 5670   %}
 5671 %}
 5672 
 5673 // used for certain integral comparisons which can be
 5674 // converted to cbxx or tbxx instructions
 5675 
 5676 operand cmpOpEqNe()
 5677 %{
 5678   match(Bool);
 5679   op_cost(0);
 5680   predicate(n->as_Bool()->_test._test == BoolTest::ne
 5681             || n->as_Bool()->_test._test == BoolTest::eq);
 5682 
 5683   format %{ "" %}
 5684   interface(COND_INTER) %{
 5685     equal(0x0, "eq");
 5686     not_equal(0x1, "ne");
 5687     less(0xb, "lt");
 5688     greater_equal(0xa, "ge");
 5689     less_equal(0xd, "le");
 5690     greater(0xc, "gt");
 5691     overflow(0x6, "vs");
 5692     no_overflow(0x7, "vc");
 5693   %}
 5694 %}
 5695 
 5696 // used for certain integral comparisons which can be
 5697 // converted to cbxx or tbxx instructions
 5698 
 5699 operand cmpOpLtGe()
 5700 %{
 5701   match(Bool);
 5702   op_cost(0);
 5703 
 5704   predicate(n->as_Bool()->_test._test == BoolTest::lt
 5705             || n->as_Bool()->_test._test == BoolTest::ge);
 5706 
 5707   format %{ "" %}
 5708   interface(COND_INTER) %{
 5709     equal(0x0, "eq");
 5710     not_equal(0x1, "ne");
 5711     less(0xb, "lt");
 5712     greater_equal(0xa, "ge");
 5713     less_equal(0xd, "le");
 5714     greater(0xc, "gt");
 5715     overflow(0x6, "vs");
 5716     no_overflow(0x7, "vc");
 5717   %}
 5718 %}
 5719 
 5720 // used for certain unsigned integral comparisons which can be
 5721 // converted to cbxx or tbxx instructions
 5722 
 5723 operand cmpOpUEqNeLeGt()
 5724 %{
 5725   match(Bool);
 5726   op_cost(0);
 5727 
 5728   predicate(n->as_Bool()->_test._test == BoolTest::eq ||
 5729             n->as_Bool()->_test._test == BoolTest::ne ||
 5730             n->as_Bool()->_test._test == BoolTest::le ||
 5731             n->as_Bool()->_test._test == BoolTest::gt);
 5732 
 5733   format %{ "" %}
 5734   interface(COND_INTER) %{
 5735     equal(0x0, "eq");
 5736     not_equal(0x1, "ne");
 5737     less(0x3, "lo");
 5738     greater_equal(0x2, "hs");
 5739     less_equal(0x9, "ls");
 5740     greater(0x8, "hi");
 5741     overflow(0x6, "vs");
 5742     no_overflow(0x7, "vc");
 5743   %}
 5744 %}
 5745 
 5746 // Special operand allowing long args to int ops to be truncated for free
 5747 
 5748 operand iRegL2I(iRegL reg) %{
 5749 
 5750   op_cost(0);
 5751 
 5752   match(ConvL2I reg);
 5753 
 5754   format %{ "l2i($reg)" %}
 5755 
 5756   interface(REG_INTER)
 5757 %}
 5758 
 5759 operand iRegL2P(iRegL reg) %{
 5760 
 5761   op_cost(0);
 5762 
 5763   match(CastX2P reg);
 5764 
 5765   format %{ "l2p($reg)" %}
 5766 
 5767   interface(REG_INTER)
 5768 %}
 5769 
 5770 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 5771 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 5772 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 5773 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 5774 
 5775 //----------OPERAND CLASSES----------------------------------------------------
 5776 // Operand Classes are groups of operands that are used as to simplify
 5777 // instruction definitions by not requiring the AD writer to specify
 5778 // separate instructions for every form of operand when the
 5779 // instruction accepts multiple operand types with the same basic
 5780 // encoding and format. The classic case of this is memory operands.
 5781 
 5782 // memory is used to define read/write location for load/store
 5783 // instruction defs. we can turn a memory op into an Address
 5784 
 5785 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 5786                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5787 
 5788 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 5789                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5790 
 5791 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 5792                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5793 
 5794 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 5795                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5796 
 5797 // All of the memory operands. For the pipeline description.
 5798 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 5799                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 5800                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5801 
 5802 opclass memory_noindex(indirect,
 5803                        indOffI1, indOffL1,indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 5804                        indirectN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5805 
 5806 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 5807 // operations. it allows the src to be either an iRegI or a (ConvL2I
 5808 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 5809 // can be elided because the 32-bit instruction will just employ the
 5810 // lower 32 bits anyway.
 5811 //
 5812 // n.b. this does not elide all L2I conversions. if the truncated
 5813 // value is consumed by more than one operation then the ConvL2I
 5814 // cannot be bundled into the consuming nodes so an l2i gets planted
 5815 // (actually a movw $dst $src) and the downstream instructions consume
 5816 // the result of the l2i as an iRegI input. That's a shame since the
 5817 // movw is actually redundant but its not too costly.
 5818 
 5819 opclass iRegIorL2I(iRegI, iRegL2I);
 5820 opclass iRegPorL2P(iRegP, iRegL2P);
 5821 
 5822 //----------PIPELINE-----------------------------------------------------------
 5823 // Rules which define the behavior of the target architectures pipeline.
 5824 
 5825 // For specific pipelines, eg A53, define the stages of that pipeline
 5826 //pipe_desc(ISS, EX1, EX2, WR);
 5827 #define ISS S0
 5828 #define EX1 S1
 5829 #define EX2 S2
 5830 #define WR  S3
 5831 
 5832 // Integer ALU reg operation
 5833 pipeline %{
 5834 
 5835 attributes %{
 5836   // ARM instructions are of fixed length
 5837   fixed_size_instructions;        // Fixed size instructions TODO does
 5838   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 5839   // ARM instructions come in 32-bit word units
 5840   instruction_unit_size = 4;         // An instruction is 4 bytes long
 5841   instruction_fetch_unit_size = 64;  // The processor fetches one line
 5842   instruction_fetch_units = 1;       // of 64 bytes
 5843 
 5844   // List of nop instructions
 5845   nops( MachNop );
 5846 %}
 5847 
 5848 // We don't use an actual pipeline model so don't care about resources
 5849 // or description. we do use pipeline classes to introduce fixed
 5850 // latencies
 5851 
 5852 //----------RESOURCES----------------------------------------------------------
 5853 // Resources are the functional units available to the machine
 5854 
 5855 resources( INS0, INS1, INS01 = INS0 | INS1,
 5856            ALU0, ALU1, ALU = ALU0 | ALU1,
 5857            MAC,
 5858            DIV,
 5859            BRANCH,
 5860            LDST,
 5861            NEON_FP);
 5862 
 5863 //----------PIPELINE DESCRIPTION-----------------------------------------------
 5864 // Pipeline Description specifies the stages in the machine's pipeline
 5865 
 5866 // Define the pipeline as a generic 6 stage pipeline
 5867 pipe_desc(S0, S1, S2, S3, S4, S5);
 5868 
 5869 //----------PIPELINE CLASSES---------------------------------------------------
 5870 // Pipeline Classes describe the stages in which input and output are
 5871 // referenced by the hardware pipeline.
 5872 
 5873 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 5874 %{
 5875   single_instruction;
 5876   src1   : S1(read);
 5877   src2   : S2(read);
 5878   dst    : S5(write);
 5879   INS01  : ISS;
 5880   NEON_FP : S5;
 5881 %}
 5882 
 5883 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 5884 %{
 5885   single_instruction;
 5886   src1   : S1(read);
 5887   src2   : S2(read);
 5888   dst    : S5(write);
 5889   INS01  : ISS;
 5890   NEON_FP : S5;
 5891 %}
 5892 
 5893 pipe_class fp_uop_s(vRegF dst, vRegF src)
 5894 %{
 5895   single_instruction;
 5896   src    : S1(read);
 5897   dst    : S5(write);
 5898   INS01  : ISS;
 5899   NEON_FP : S5;
 5900 %}
 5901 
 5902 pipe_class fp_uop_d(vRegD dst, vRegD src)
 5903 %{
 5904   single_instruction;
 5905   src    : S1(read);
 5906   dst    : S5(write);
 5907   INS01  : ISS;
 5908   NEON_FP : S5;
 5909 %}
 5910 
 5911 pipe_class fp_d2f(vRegF dst, vRegD src)
 5912 %{
 5913   single_instruction;
 5914   src    : S1(read);
 5915   dst    : S5(write);
 5916   INS01  : ISS;
 5917   NEON_FP : S5;
 5918 %}
 5919 
 5920 pipe_class fp_f2d(vRegD dst, vRegF src)
 5921 %{
 5922   single_instruction;
 5923   src    : S1(read);
 5924   dst    : S5(write);
 5925   INS01  : ISS;
 5926   NEON_FP : S5;
 5927 %}
 5928 
 5929 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 5930 %{
 5931   single_instruction;
 5932   src    : S1(read);
 5933   dst    : S5(write);
 5934   INS01  : ISS;
 5935   NEON_FP : S5;
 5936 %}
 5937 
 5938 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 5939 %{
 5940   single_instruction;
 5941   src    : S1(read);
 5942   dst    : S5(write);
 5943   INS01  : ISS;
 5944   NEON_FP : S5;
 5945 %}
 5946 
 5947 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 5948 %{
 5949   single_instruction;
 5950   src    : S1(read);
 5951   dst    : S5(write);
 5952   INS01  : ISS;
 5953   NEON_FP : S5;
 5954 %}
 5955 
 5956 pipe_class fp_l2f(vRegF dst, iRegL src)
 5957 %{
 5958   single_instruction;
 5959   src    : S1(read);
 5960   dst    : S5(write);
 5961   INS01  : ISS;
 5962   NEON_FP : S5;
 5963 %}
 5964 
 5965 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 5966 %{
 5967   single_instruction;
 5968   src    : S1(read);
 5969   dst    : S5(write);
 5970   INS01  : ISS;
 5971   NEON_FP : S5;
 5972 %}
 5973 
 5974 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 5975 %{
 5976   single_instruction;
 5977   src    : S1(read);
 5978   dst    : S5(write);
 5979   INS01  : ISS;
 5980   NEON_FP : S5;
 5981 %}
 5982 
 5983 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 5984 %{
 5985   single_instruction;
 5986   src    : S1(read);
 5987   dst    : S5(write);
 5988   INS01  : ISS;
 5989   NEON_FP : S5;
 5990 %}
 5991 
 5992 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 5993 %{
 5994   single_instruction;
 5995   src    : S1(read);
 5996   dst    : S5(write);
 5997   INS01  : ISS;
 5998   NEON_FP : S5;
 5999 %}
 6000 
 6001 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6002 %{
 6003   single_instruction;
 6004   src1   : S1(read);
 6005   src2   : S2(read);
 6006   dst    : S5(write);
 6007   INS0   : ISS;
 6008   NEON_FP : S5;
 6009 %}
 6010 
 6011 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6012 %{
 6013   single_instruction;
 6014   src1   : S1(read);
 6015   src2   : S2(read);
 6016   dst    : S5(write);
 6017   INS0   : ISS;
 6018   NEON_FP : S5;
 6019 %}
 6020 
 6021 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6022 %{
 6023   single_instruction;
 6024   cr     : S1(read);
 6025   src1   : S1(read);
 6026   src2   : S1(read);
 6027   dst    : S3(write);
 6028   INS01  : ISS;
 6029   NEON_FP : S3;
 6030 %}
 6031 
 6032 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6033 %{
 6034   single_instruction;
 6035   cr     : S1(read);
 6036   src1   : S1(read);
 6037   src2   : S1(read);
 6038   dst    : S3(write);
 6039   INS01  : ISS;
 6040   NEON_FP : S3;
 6041 %}
 6042 
 6043 pipe_class fp_imm_s(vRegF dst)
 6044 %{
 6045   single_instruction;
 6046   dst    : S3(write);
 6047   INS01  : ISS;
 6048   NEON_FP : S3;
 6049 %}
 6050 
 6051 pipe_class fp_imm_d(vRegD dst)
 6052 %{
 6053   single_instruction;
 6054   dst    : S3(write);
 6055   INS01  : ISS;
 6056   NEON_FP : S3;
 6057 %}
 6058 
 6059 pipe_class fp_load_constant_s(vRegF dst)
 6060 %{
 6061   single_instruction;
 6062   dst    : S4(write);
 6063   INS01  : ISS;
 6064   NEON_FP : S4;
 6065 %}
 6066 
 6067 pipe_class fp_load_constant_d(vRegD dst)
 6068 %{
 6069   single_instruction;
 6070   dst    : S4(write);
 6071   INS01  : ISS;
 6072   NEON_FP : S4;
 6073 %}
 6074 
 6075 //------- Integer ALU operations --------------------------
 6076 
 6077 // Integer ALU reg-reg operation
 6078 // Operands needed in EX1, result generated in EX2
 6079 // Eg.  ADD     x0, x1, x2
 6080 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6081 %{
 6082   single_instruction;
 6083   dst    : EX2(write);
 6084   src1   : EX1(read);
 6085   src2   : EX1(read);
 6086   INS01  : ISS; // Dual issue as instruction 0 or 1
 6087   ALU    : EX2;
 6088 %}
 6089 
 6090 // Integer ALU reg-reg operation with constant shift
 6091 // Shifted register must be available in LATE_ISS instead of EX1
 6092 // Eg.  ADD     x0, x1, x2, LSL #2
 6093 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6094 %{
 6095   single_instruction;
 6096   dst    : EX2(write);
 6097   src1   : EX1(read);
 6098   src2   : ISS(read);
 6099   INS01  : ISS;
 6100   ALU    : EX2;
 6101 %}
 6102 
 6103 // Integer ALU reg operation with constant shift
 6104 // Eg.  LSL     x0, x1, #shift
 6105 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6106 %{
 6107   single_instruction;
 6108   dst    : EX2(write);
 6109   src1   : ISS(read);
 6110   INS01  : ISS;
 6111   ALU    : EX2;
 6112 %}
 6113 
 6114 // Integer ALU reg-reg operation with variable shift
 6115 // Both operands must be available in LATE_ISS instead of EX1
 6116 // Result is available in EX1 instead of EX2
 6117 // Eg.  LSLV    x0, x1, x2
 6118 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6119 %{
 6120   single_instruction;
 6121   dst    : EX1(write);
 6122   src1   : ISS(read);
 6123   src2   : ISS(read);
 6124   INS01  : ISS;
 6125   ALU    : EX1;
 6126 %}
 6127 
 6128 // Integer ALU reg-reg operation with extract
 6129 // As for _vshift above, but result generated in EX2
 6130 // Eg.  EXTR    x0, x1, x2, #N
 6131 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6132 %{
 6133   single_instruction;
 6134   dst    : EX2(write);
 6135   src1   : ISS(read);
 6136   src2   : ISS(read);
 6137   INS1   : ISS; // Can only dual issue as Instruction 1
 6138   ALU    : EX1;
 6139 %}
 6140 
 6141 // Integer ALU reg operation
 6142 // Eg.  NEG     x0, x1
 6143 pipe_class ialu_reg(iRegI dst, iRegI src)
 6144 %{
 6145   single_instruction;
 6146   dst    : EX2(write);
 6147   src    : EX1(read);
 6148   INS01  : ISS;
 6149   ALU    : EX2;
 6150 %}
 6151 
 6152 // Integer ALU reg mmediate operation
 6153 // Eg.  ADD     x0, x1, #N
 6154 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6155 %{
 6156   single_instruction;
 6157   dst    : EX2(write);
 6158   src1   : EX1(read);
 6159   INS01  : ISS;
 6160   ALU    : EX2;
 6161 %}
 6162 
 6163 // Integer ALU immediate operation (no source operands)
 6164 // Eg.  MOV     x0, #N
 6165 pipe_class ialu_imm(iRegI dst)
 6166 %{
 6167   single_instruction;
 6168   dst    : EX1(write);
 6169   INS01  : ISS;
 6170   ALU    : EX1;
 6171 %}
 6172 
 6173 //------- Compare operation -------------------------------
 6174 
 6175 // Compare reg-reg
 6176 // Eg.  CMP     x0, x1
 6177 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6178 %{
 6179   single_instruction;
 6180 //  fixed_latency(16);
 6181   cr     : EX2(write);
 6182   op1    : EX1(read);
 6183   op2    : EX1(read);
 6184   INS01  : ISS;
 6185   ALU    : EX2;
 6186 %}
 6187 
 6188 // Compare reg-reg
 6189 // Eg.  CMP     x0, #N
 6190 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6191 %{
 6192   single_instruction;
 6193 //  fixed_latency(16);
 6194   cr     : EX2(write);
 6195   op1    : EX1(read);
 6196   INS01  : ISS;
 6197   ALU    : EX2;
 6198 %}
 6199 
 6200 //------- Conditional instructions ------------------------
 6201 
 6202 // Conditional no operands
 6203 // Eg.  CSINC   x0, zr, zr, <cond>
 6204 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6205 %{
 6206   single_instruction;
 6207   cr     : EX1(read);
 6208   dst    : EX2(write);
 6209   INS01  : ISS;
 6210   ALU    : EX2;
 6211 %}
 6212 
 6213 // Conditional 2 operand
 6214 // EG.  CSEL    X0, X1, X2, <cond>
 6215 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6216 %{
 6217   single_instruction;
 6218   cr     : EX1(read);
 6219   src1   : EX1(read);
 6220   src2   : EX1(read);
 6221   dst    : EX2(write);
 6222   INS01  : ISS;
 6223   ALU    : EX2;
 6224 %}
 6225 
 6226 // Conditional 2 operand
 6227 // EG.  CSEL    X0, X1, X2, <cond>
 6228 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6229 %{
 6230   single_instruction;
 6231   cr     : EX1(read);
 6232   src    : EX1(read);
 6233   dst    : EX2(write);
 6234   INS01  : ISS;
 6235   ALU    : EX2;
 6236 %}
 6237 
 6238 //------- Multiply pipeline operations --------------------
 6239 
 6240 // Multiply reg-reg
 6241 // Eg.  MUL     w0, w1, w2
 6242 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6243 %{
 6244   single_instruction;
 6245   dst    : WR(write);
 6246   src1   : ISS(read);
 6247   src2   : ISS(read);
 6248   INS01  : ISS;
 6249   MAC    : WR;
 6250 %}
 6251 
 6252 // Multiply accumulate
 6253 // Eg.  MADD    w0, w1, w2, w3
 6254 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6255 %{
 6256   single_instruction;
 6257   dst    : WR(write);
 6258   src1   : ISS(read);
 6259   src2   : ISS(read);
 6260   src3   : ISS(read);
 6261   INS01  : ISS;
 6262   MAC    : WR;
 6263 %}
 6264 
 6265 // Eg.  MUL     w0, w1, w2
 6266 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6267 %{
 6268   single_instruction;
 6269   fixed_latency(3); // Maximum latency for 64 bit mul
 6270   dst    : WR(write);
 6271   src1   : ISS(read);
 6272   src2   : ISS(read);
 6273   INS01  : ISS;
 6274   MAC    : WR;
 6275 %}
 6276 
 6277 // Multiply accumulate
 6278 // Eg.  MADD    w0, w1, w2, w3
 6279 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6280 %{
 6281   single_instruction;
 6282   fixed_latency(3); // Maximum latency for 64 bit mul
 6283   dst    : WR(write);
 6284   src1   : ISS(read);
 6285   src2   : ISS(read);
 6286   src3   : ISS(read);
 6287   INS01  : ISS;
 6288   MAC    : WR;
 6289 %}
 6290 
 6291 //------- Divide pipeline operations --------------------
 6292 
 6293 // Eg.  SDIV    w0, w1, w2
 6294 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6295 %{
 6296   single_instruction;
 6297   fixed_latency(8); // Maximum latency for 32 bit divide
 6298   dst    : WR(write);
 6299   src1   : ISS(read);
 6300   src2   : ISS(read);
 6301   INS0   : ISS; // Can only dual issue as instruction 0
 6302   DIV    : WR;
 6303 %}
 6304 
 6305 // Eg.  SDIV    x0, x1, x2
 6306 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6307 %{
 6308   single_instruction;
 6309   fixed_latency(16); // Maximum latency for 64 bit divide
 6310   dst    : WR(write);
 6311   src1   : ISS(read);
 6312   src2   : ISS(read);
 6313   INS0   : ISS; // Can only dual issue as instruction 0
 6314   DIV    : WR;
 6315 %}
 6316 
 6317 //------- Load pipeline operations ------------------------
 6318 
 6319 // Load - prefetch
 6320 // Eg.  PFRM    <mem>
 6321 pipe_class iload_prefetch(memory mem)
 6322 %{
 6323   single_instruction;
 6324   mem    : ISS(read);
 6325   INS01  : ISS;
 6326   LDST   : WR;
 6327 %}
 6328 
 6329 // Load - reg, mem
 6330 // Eg.  LDR     x0, <mem>
 6331 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6332 %{
 6333   single_instruction;
 6334   dst    : WR(write);
 6335   mem    : ISS(read);
 6336   INS01  : ISS;
 6337   LDST   : WR;
 6338 %}
 6339 
 6340 // Load - reg, reg
 6341 // Eg.  LDR     x0, [sp, x1]
 6342 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6343 %{
 6344   single_instruction;
 6345   dst    : WR(write);
 6346   src    : ISS(read);
 6347   INS01  : ISS;
 6348   LDST   : WR;
 6349 %}
 6350 
 6351 //------- Store pipeline operations -----------------------
 6352 
 6353 // Store - zr, mem
 6354 // Eg.  STR     zr, <mem>
 6355 pipe_class istore_mem(memory mem)
 6356 %{
 6357   single_instruction;
 6358   mem    : ISS(read);
 6359   INS01  : ISS;
 6360   LDST   : WR;
 6361 %}
 6362 
 6363 // Store - reg, mem
 6364 // Eg.  STR     x0, <mem>
 6365 pipe_class istore_reg_mem(iRegI src, memory mem)
 6366 %{
 6367   single_instruction;
 6368   mem    : ISS(read);
 6369   src    : EX2(read);
 6370   INS01  : ISS;
 6371   LDST   : WR;
 6372 %}
 6373 
 6374 // Store - reg, reg
 6375 // Eg. STR      x0, [sp, x1]
 6376 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6377 %{
 6378   single_instruction;
 6379   dst    : ISS(read);
 6380   src    : EX2(read);
 6381   INS01  : ISS;
 6382   LDST   : WR;
 6383 %}
 6384 
 6385 //------- Store pipeline operations -----------------------
 6386 
 6387 // Branch
 6388 pipe_class pipe_branch()
 6389 %{
 6390   single_instruction;
 6391   INS01  : ISS;
 6392   BRANCH : EX1;
 6393 %}
 6394 
 6395 // Conditional branch
 6396 pipe_class pipe_branch_cond(rFlagsReg cr)
 6397 %{
 6398   single_instruction;
 6399   cr     : EX1(read);
 6400   INS01  : ISS;
 6401   BRANCH : EX1;
 6402 %}
 6403 
 6404 // Compare & Branch
 6405 // EG.  CBZ/CBNZ
 6406 pipe_class pipe_cmp_branch(iRegI op1)
 6407 %{
 6408   single_instruction;
 6409   op1    : EX1(read);
 6410   INS01  : ISS;
 6411   BRANCH : EX1;
 6412 %}
 6413 
 6414 //------- Synchronisation operations ----------------------
 6415 
 6416 // Any operation requiring serialization.
 6417 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6418 pipe_class pipe_serial()
 6419 %{
 6420   single_instruction;
 6421   force_serialization;
 6422   fixed_latency(16);
 6423   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6424   LDST   : WR;
 6425 %}
 6426 
 6427 // Generic big/slow expanded idiom - also serialized
 6428 pipe_class pipe_slow()
 6429 %{
 6430   instruction_count(10);
 6431   multiple_bundles;
 6432   force_serialization;
 6433   fixed_latency(16);
 6434   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6435   LDST   : WR;
 6436 %}
 6437 
 6438 // Empty pipeline class
 6439 pipe_class pipe_class_empty()
 6440 %{
 6441   single_instruction;
 6442   fixed_latency(0);
 6443 %}
 6444 
 6445 // Default pipeline class.
 6446 pipe_class pipe_class_default()
 6447 %{
 6448   single_instruction;
 6449   fixed_latency(2);
 6450 %}
 6451 
 6452 // Pipeline class for compares.
 6453 pipe_class pipe_class_compare()
 6454 %{
 6455   single_instruction;
 6456   fixed_latency(16);
 6457 %}
 6458 
 6459 // Pipeline class for memory operations.
 6460 pipe_class pipe_class_memory()
 6461 %{
 6462   single_instruction;
 6463   fixed_latency(16);
 6464 %}
 6465 
 6466 // Pipeline class for call.
 6467 pipe_class pipe_class_call()
 6468 %{
 6469   single_instruction;
 6470   fixed_latency(100);
 6471 %}
 6472 
 6473 // Define the class for the Nop node.
 6474 define %{
 6475    MachNop = pipe_class_empty;
 6476 %}
 6477 
 6478 %}
 6479 //----------INSTRUCTIONS-------------------------------------------------------
 6480 //
 6481 // match      -- States which machine-independent subtree may be replaced
 6482 //               by this instruction.
 6483 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6484 //               selection to identify a minimum cost tree of machine
 6485 //               instructions that matches a tree of machine-independent
 6486 //               instructions.
 6487 // format     -- A string providing the disassembly for this instruction.
 6488 //               The value of an instruction's operand may be inserted
 6489 //               by referring to it with a '$' prefix.
 6490 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6491 //               to within an encode class as $primary, $secondary, and $tertiary
 6492 //               rrspectively.  The primary opcode is commonly used to
 6493 //               indicate the type of machine instruction, while secondary
 6494 //               and tertiary are often used for prefix options or addressing
 6495 //               modes.
 6496 // ins_encode -- A list of encode classes with parameters. The encode class
 6497 //               name must have been defined in an 'enc_class' specification
 6498 //               in the encode section of the architecture description.
 6499 
 6500 // ============================================================================
 6501 // Memory (Load/Store) Instructions
 6502 
 6503 // Load Instructions
 6504 
 6505 // Load Byte (8 bit signed)
 6506 instruct loadB(iRegINoSp dst, memory1 mem)
 6507 %{
 6508   match(Set dst (LoadB mem));
 6509   predicate(!needs_acquiring_load(n));
 6510 
 6511   ins_cost(4 * INSN_COST);
 6512   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6513 
 6514   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6515 
 6516   ins_pipe(iload_reg_mem);
 6517 %}
 6518 
 6519 // Load Byte (8 bit signed) into long
 6520 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6521 %{
 6522   match(Set dst (ConvI2L (LoadB mem)));
 6523   predicate(!needs_acquiring_load(n->in(1)));
 6524 
 6525   ins_cost(4 * INSN_COST);
 6526   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6527 
 6528   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6529 
 6530   ins_pipe(iload_reg_mem);
 6531 %}
 6532 
 6533 // Load Byte (8 bit unsigned)
 6534 instruct loadUB(iRegINoSp dst, memory1 mem)
 6535 %{
 6536   match(Set dst (LoadUB mem));
 6537   predicate(!needs_acquiring_load(n));
 6538 
 6539   ins_cost(4 * INSN_COST);
 6540   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6541 
 6542   ins_encode(aarch64_enc_ldrb(dst, mem));
 6543 
 6544   ins_pipe(iload_reg_mem);
 6545 %}
 6546 
 6547 // Load Byte (8 bit unsigned) into long
 6548 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6549 %{
 6550   match(Set dst (ConvI2L (LoadUB mem)));
 6551   predicate(!needs_acquiring_load(n->in(1)));
 6552 
 6553   ins_cost(4 * INSN_COST);
 6554   format %{ "ldrb  $dst, $mem\t# byte" %}
 6555 
 6556   ins_encode(aarch64_enc_ldrb(dst, mem));
 6557 
 6558   ins_pipe(iload_reg_mem);
 6559 %}
 6560 
 6561 // Load Short (16 bit signed)
 6562 instruct loadS(iRegINoSp dst, memory2 mem)
 6563 %{
 6564   match(Set dst (LoadS mem));
 6565   predicate(!needs_acquiring_load(n));
 6566 
 6567   ins_cost(4 * INSN_COST);
 6568   format %{ "ldrshw  $dst, $mem\t# short" %}
 6569 
 6570   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6571 
 6572   ins_pipe(iload_reg_mem);
 6573 %}
 6574 
 6575 // Load Short (16 bit signed) into long
 6576 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6577 %{
 6578   match(Set dst (ConvI2L (LoadS mem)));
 6579   predicate(!needs_acquiring_load(n->in(1)));
 6580 
 6581   ins_cost(4 * INSN_COST);
 6582   format %{ "ldrsh  $dst, $mem\t# short" %}
 6583 
 6584   ins_encode(aarch64_enc_ldrsh(dst, mem));
 6585 
 6586   ins_pipe(iload_reg_mem);
 6587 %}
 6588 
 6589 // Load Char (16 bit unsigned)
 6590 instruct loadUS(iRegINoSp dst, memory2 mem)
 6591 %{
 6592   match(Set dst (LoadUS mem));
 6593   predicate(!needs_acquiring_load(n));
 6594 
 6595   ins_cost(4 * INSN_COST);
 6596   format %{ "ldrh  $dst, $mem\t# short" %}
 6597 
 6598   ins_encode(aarch64_enc_ldrh(dst, mem));
 6599 
 6600   ins_pipe(iload_reg_mem);
 6601 %}
 6602 
 6603 // Load Short/Char (16 bit unsigned) into long
 6604 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 6605 %{
 6606   match(Set dst (ConvI2L (LoadUS mem)));
 6607   predicate(!needs_acquiring_load(n->in(1)));
 6608 
 6609   ins_cost(4 * INSN_COST);
 6610   format %{ "ldrh  $dst, $mem\t# short" %}
 6611 
 6612   ins_encode(aarch64_enc_ldrh(dst, mem));
 6613 
 6614   ins_pipe(iload_reg_mem);
 6615 %}
 6616 
 6617 // Load Integer (32 bit signed)
 6618 instruct loadI(iRegINoSp dst, memory4 mem)
 6619 %{
 6620   match(Set dst (LoadI mem));
 6621   predicate(!needs_acquiring_load(n));
 6622 
 6623   ins_cost(4 * INSN_COST);
 6624   format %{ "ldrw  $dst, $mem\t# int" %}
 6625 
 6626   ins_encode(aarch64_enc_ldrw(dst, mem));
 6627 
 6628   ins_pipe(iload_reg_mem);
 6629 %}
 6630 
 6631 // Load Integer (32 bit signed) into long
 6632 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 6633 %{
 6634   match(Set dst (ConvI2L (LoadI mem)));
 6635   predicate(!needs_acquiring_load(n->in(1)));
 6636 
 6637   ins_cost(4 * INSN_COST);
 6638   format %{ "ldrsw  $dst, $mem\t# int" %}
 6639 
 6640   ins_encode(aarch64_enc_ldrsw(dst, mem));
 6641 
 6642   ins_pipe(iload_reg_mem);
 6643 %}
 6644 
 6645 // Load Integer (32 bit unsigned) into long
 6646 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 6647 %{
 6648   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 6649   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 6650 
 6651   ins_cost(4 * INSN_COST);
 6652   format %{ "ldrw  $dst, $mem\t# int" %}
 6653 
 6654   ins_encode(aarch64_enc_ldrw(dst, mem));
 6655 
 6656   ins_pipe(iload_reg_mem);
 6657 %}
 6658 
 6659 // Load Long (64 bit signed)
 6660 instruct loadL(iRegLNoSp dst, memory8 mem)
 6661 %{
 6662   match(Set dst (LoadL mem));
 6663   predicate(!needs_acquiring_load(n));
 6664 
 6665   ins_cost(4 * INSN_COST);
 6666   format %{ "ldr  $dst, $mem\t# int" %}
 6667 
 6668   ins_encode(aarch64_enc_ldr(dst, mem));
 6669 
 6670   ins_pipe(iload_reg_mem);
 6671 %}
 6672 
 6673 // Load Range
 6674 instruct loadRange(iRegINoSp dst, memory4 mem)
 6675 %{
 6676   match(Set dst (LoadRange mem));
 6677 
 6678   ins_cost(4 * INSN_COST);
 6679   format %{ "ldrw  $dst, $mem\t# range" %}
 6680 
 6681   ins_encode(aarch64_enc_ldrw(dst, mem));
 6682 
 6683   ins_pipe(iload_reg_mem);
 6684 %}
 6685 
 6686 // Load Pointer
 6687 instruct loadP(iRegPNoSp dst, memory8 mem)
 6688 %{
 6689   match(Set dst (LoadP mem));
 6690   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 6691 
 6692   ins_cost(4 * INSN_COST);
 6693   format %{ "ldr  $dst, $mem\t# ptr" %}
 6694 
 6695   ins_encode(aarch64_enc_ldr(dst, mem));
 6696 
 6697   ins_pipe(iload_reg_mem);
 6698 %}
 6699 
 6700 // Load Compressed Pointer
 6701 instruct loadN(iRegNNoSp dst, memory4 mem)
 6702 %{
 6703   match(Set dst (LoadN mem));
 6704   predicate(!needs_acquiring_load(n) && n->as_Load()->barrier_data() == 0);
 6705 
 6706   ins_cost(4 * INSN_COST);
 6707   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 6708 
 6709   ins_encode(aarch64_enc_ldrw(dst, mem));
 6710 
 6711   ins_pipe(iload_reg_mem);
 6712 %}
 6713 
 6714 // Load Klass Pointer
 6715 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 6716 %{
 6717   match(Set dst (LoadKlass mem));
 6718   predicate(!needs_acquiring_load(n));
 6719 
 6720   ins_cost(4 * INSN_COST);
 6721   format %{ "ldr  $dst, $mem\t# class" %}
 6722 
 6723   ins_encode(aarch64_enc_ldr(dst, mem));
 6724 
 6725   ins_pipe(iload_reg_mem);
 6726 %}
 6727 
 6728 // Load Narrow Klass Pointer
 6729 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 6730 %{
 6731   match(Set dst (LoadNKlass mem));
 6732   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
 6733 
 6734   ins_cost(4 * INSN_COST);
 6735   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 6736 
 6737   ins_encode(aarch64_enc_ldrw(dst, mem));
 6738 
 6739   ins_pipe(iload_reg_mem);
 6740 %}
 6741 
 6742 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory_noindex mem)
 6743 %{
 6744   match(Set dst (LoadNKlass mem));
 6745   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
 6746 
 6747   ins_cost(4 * INSN_COST);
 6748   format %{
 6749     "ldrw  $dst, $mem\t# compressed class ptr, shifted\n\t"
 6750     "lsrw  $dst, $dst, markWord::klass_shift"
 6751   %}
 6752   ins_encode %{
 6753     assert($mem$$index$$Register == noreg, "must not have indexed address");
 6754     // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract
 6755     // obj-start, so that we can load from the object's mark-word instead.
 6756     __ ldrw($dst$$Register, Address($mem$$base$$Register, $mem$$disp - Type::klass_offset()));
 6757     __ lsrw($dst$$Register, $dst$$Register, markWord::klass_shift);
 6758   %}
 6759   ins_pipe(iload_reg_mem);
 6760 %}
 6761 
 6762 // Load Float
 6763 instruct loadF(vRegF dst, memory4 mem)
 6764 %{
 6765   match(Set dst (LoadF mem));
 6766   predicate(!needs_acquiring_load(n));
 6767 
 6768   ins_cost(4 * INSN_COST);
 6769   format %{ "ldrs  $dst, $mem\t# float" %}
 6770 
 6771   ins_encode( aarch64_enc_ldrs(dst, mem) );
 6772 
 6773   ins_pipe(pipe_class_memory);
 6774 %}
 6775 
 6776 // Load Double
 6777 instruct loadD(vRegD dst, memory8 mem)
 6778 %{
 6779   match(Set dst (LoadD mem));
 6780   predicate(!needs_acquiring_load(n));
 6781 
 6782   ins_cost(4 * INSN_COST);
 6783   format %{ "ldrd  $dst, $mem\t# double" %}
 6784 
 6785   ins_encode( aarch64_enc_ldrd(dst, mem) );
 6786 
 6787   ins_pipe(pipe_class_memory);
 6788 %}
 6789 
 6790 
 6791 // Load Int Constant
 6792 instruct loadConI(iRegINoSp dst, immI src)
 6793 %{
 6794   match(Set dst src);
 6795 
 6796   ins_cost(INSN_COST);
 6797   format %{ "mov $dst, $src\t# int" %}
 6798 
 6799   ins_encode( aarch64_enc_movw_imm(dst, src) );
 6800 
 6801   ins_pipe(ialu_imm);
 6802 %}
 6803 
 6804 // Load Long Constant
 6805 instruct loadConL(iRegLNoSp dst, immL src)
 6806 %{
 6807   match(Set dst src);
 6808 
 6809   ins_cost(INSN_COST);
 6810   format %{ "mov $dst, $src\t# long" %}
 6811 
 6812   ins_encode( aarch64_enc_mov_imm(dst, src) );
 6813 
 6814   ins_pipe(ialu_imm);
 6815 %}
 6816 
 6817 // Load Pointer Constant
 6818 
 6819 instruct loadConP(iRegPNoSp dst, immP con)
 6820 %{
 6821   match(Set dst con);
 6822 
 6823   ins_cost(INSN_COST * 4);
 6824   format %{
 6825     "mov  $dst, $con\t# ptr\n\t"
 6826   %}
 6827 
 6828   ins_encode(aarch64_enc_mov_p(dst, con));
 6829 
 6830   ins_pipe(ialu_imm);
 6831 %}
 6832 
 6833 // Load Null Pointer Constant
 6834 
 6835 instruct loadConP0(iRegPNoSp dst, immP0 con)
 6836 %{
 6837   match(Set dst con);
 6838 
 6839   ins_cost(INSN_COST);
 6840   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6841 
 6842   ins_encode(aarch64_enc_mov_p0(dst, con));
 6843 
 6844   ins_pipe(ialu_imm);
 6845 %}
 6846 
 6847 // Load Pointer Constant One
 6848 
 6849 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 6850 %{
 6851   match(Set dst con);
 6852 
 6853   ins_cost(INSN_COST);
 6854   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6855 
 6856   ins_encode(aarch64_enc_mov_p1(dst, con));
 6857 
 6858   ins_pipe(ialu_imm);
 6859 %}
 6860 
 6861 // Load Byte Map Base Constant
 6862 
 6863 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 6864 %{
 6865   match(Set dst con);
 6866 
 6867   ins_cost(INSN_COST);
 6868   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 6869 
 6870   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 6871 
 6872   ins_pipe(ialu_imm);
 6873 %}
 6874 
 6875 // Load Narrow Pointer Constant
 6876 
 6877 instruct loadConN(iRegNNoSp dst, immN con)
 6878 %{
 6879   match(Set dst con);
 6880 
 6881   ins_cost(INSN_COST * 4);
 6882   format %{ "mov  $dst, $con\t# compressed ptr" %}
 6883 
 6884   ins_encode(aarch64_enc_mov_n(dst, con));
 6885 
 6886   ins_pipe(ialu_imm);
 6887 %}
 6888 
 6889 // Load Narrow Null Pointer Constant
 6890 
 6891 instruct loadConN0(iRegNNoSp dst, immN0 con)
 6892 %{
 6893   match(Set dst con);
 6894 
 6895   ins_cost(INSN_COST);
 6896   format %{ "mov  $dst, $con\t# compressed nullptr ptr" %}
 6897 
 6898   ins_encode(aarch64_enc_mov_n0(dst, con));
 6899 
 6900   ins_pipe(ialu_imm);
 6901 %}
 6902 
 6903 // Load Narrow Klass Constant
 6904 
 6905 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 6906 %{
 6907   match(Set dst con);
 6908 
 6909   ins_cost(INSN_COST);
 6910   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 6911 
 6912   ins_encode(aarch64_enc_mov_nk(dst, con));
 6913 
 6914   ins_pipe(ialu_imm);
 6915 %}
 6916 
 6917 // Load Packed Float Constant
 6918 
 6919 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 6920   match(Set dst con);
 6921   ins_cost(INSN_COST * 4);
 6922   format %{ "fmovs  $dst, $con"%}
 6923   ins_encode %{
 6924     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 6925   %}
 6926 
 6927   ins_pipe(fp_imm_s);
 6928 %}
 6929 
 6930 // Load Float Constant
 6931 
 6932 instruct loadConF(vRegF dst, immF con) %{
 6933   match(Set dst con);
 6934 
 6935   ins_cost(INSN_COST * 4);
 6936 
 6937   format %{
 6938     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 6939   %}
 6940 
 6941   ins_encode %{
 6942     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 6943   %}
 6944 
 6945   ins_pipe(fp_load_constant_s);
 6946 %}
 6947 
 6948 // Load Packed Double Constant
 6949 
 6950 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 6951   match(Set dst con);
 6952   ins_cost(INSN_COST);
 6953   format %{ "fmovd  $dst, $con"%}
 6954   ins_encode %{
 6955     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 6956   %}
 6957 
 6958   ins_pipe(fp_imm_d);
 6959 %}
 6960 
 6961 // Load Double Constant
 6962 
 6963 instruct loadConD(vRegD dst, immD con) %{
 6964   match(Set dst con);
 6965 
 6966   ins_cost(INSN_COST * 5);
 6967   format %{
 6968     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 6969   %}
 6970 
 6971   ins_encode %{
 6972     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 6973   %}
 6974 
 6975   ins_pipe(fp_load_constant_d);
 6976 %}
 6977 
 6978 // Load Half Float Constant
 6979 // The "ldr" instruction loads a 32-bit word from the constant pool into a
 6980 // 32-bit register but only the bottom half will be populated and the top
 6981 // 16 bits are zero.
 6982 instruct loadConH(vRegF dst, immH con) %{
 6983   match(Set dst con);
 6984   format %{
 6985     "ldrs $dst, [$constantaddress]\t# load from constant table: half float=$con\n\t"
 6986   %}
 6987   ins_encode %{
 6988     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 6989   %}
 6990   ins_pipe(fp_load_constant_s);
 6991 %}
 6992 
 6993 // Store Instructions
 6994 
 6995 // Store Byte
 6996 instruct storeB(iRegIorL2I src, memory1 mem)
 6997 %{
 6998   match(Set mem (StoreB mem src));
 6999   predicate(!needs_releasing_store(n));
 7000 
 7001   ins_cost(INSN_COST);
 7002   format %{ "strb  $src, $mem\t# byte" %}
 7003 
 7004   ins_encode(aarch64_enc_strb(src, mem));
 7005 
 7006   ins_pipe(istore_reg_mem);
 7007 %}
 7008 
 7009 
 7010 instruct storeimmB0(immI0 zero, memory1 mem)
 7011 %{
 7012   match(Set mem (StoreB mem zero));
 7013   predicate(!needs_releasing_store(n));
 7014 
 7015   ins_cost(INSN_COST);
 7016   format %{ "strb rscractch2, $mem\t# byte" %}
 7017 
 7018   ins_encode(aarch64_enc_strb0(mem));
 7019 
 7020   ins_pipe(istore_mem);
 7021 %}
 7022 
 7023 // Store Char/Short
 7024 instruct storeC(iRegIorL2I src, memory2 mem)
 7025 %{
 7026   match(Set mem (StoreC mem src));
 7027   predicate(!needs_releasing_store(n));
 7028 
 7029   ins_cost(INSN_COST);
 7030   format %{ "strh  $src, $mem\t# short" %}
 7031 
 7032   ins_encode(aarch64_enc_strh(src, mem));
 7033 
 7034   ins_pipe(istore_reg_mem);
 7035 %}
 7036 
 7037 instruct storeimmC0(immI0 zero, memory2 mem)
 7038 %{
 7039   match(Set mem (StoreC mem zero));
 7040   predicate(!needs_releasing_store(n));
 7041 
 7042   ins_cost(INSN_COST);
 7043   format %{ "strh  zr, $mem\t# short" %}
 7044 
 7045   ins_encode(aarch64_enc_strh0(mem));
 7046 
 7047   ins_pipe(istore_mem);
 7048 %}
 7049 
 7050 // Store Integer
 7051 
 7052 instruct storeI(iRegIorL2I src, memory4 mem)
 7053 %{
 7054   match(Set mem(StoreI mem src));
 7055   predicate(!needs_releasing_store(n));
 7056 
 7057   ins_cost(INSN_COST);
 7058   format %{ "strw  $src, $mem\t# int" %}
 7059 
 7060   ins_encode(aarch64_enc_strw(src, mem));
 7061 
 7062   ins_pipe(istore_reg_mem);
 7063 %}
 7064 
 7065 instruct storeimmI0(immI0 zero, memory4 mem)
 7066 %{
 7067   match(Set mem(StoreI mem zero));
 7068   predicate(!needs_releasing_store(n));
 7069 
 7070   ins_cost(INSN_COST);
 7071   format %{ "strw  zr, $mem\t# int" %}
 7072 
 7073   ins_encode(aarch64_enc_strw0(mem));
 7074 
 7075   ins_pipe(istore_mem);
 7076 %}
 7077 
 7078 // Store Long (64 bit signed)
 7079 instruct storeL(iRegL src, memory8 mem)
 7080 %{
 7081   match(Set mem (StoreL mem src));
 7082   predicate(!needs_releasing_store(n));
 7083 
 7084   ins_cost(INSN_COST);
 7085   format %{ "str  $src, $mem\t# int" %}
 7086 
 7087   ins_encode(aarch64_enc_str(src, mem));
 7088 
 7089   ins_pipe(istore_reg_mem);
 7090 %}
 7091 
 7092 // Store Long (64 bit signed)
 7093 instruct storeimmL0(immL0 zero, memory8 mem)
 7094 %{
 7095   match(Set mem (StoreL mem zero));
 7096   predicate(!needs_releasing_store(n));
 7097 
 7098   ins_cost(INSN_COST);
 7099   format %{ "str  zr, $mem\t# int" %}
 7100 
 7101   ins_encode(aarch64_enc_str0(mem));
 7102 
 7103   ins_pipe(istore_mem);
 7104 %}
 7105 
 7106 // Store Pointer
 7107 instruct storeP(iRegP src, memory8 mem)
 7108 %{
 7109   match(Set mem (StoreP mem src));
 7110   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7111 
 7112   ins_cost(INSN_COST);
 7113   format %{ "str  $src, $mem\t# ptr" %}
 7114 
 7115   ins_encode(aarch64_enc_str(src, mem));
 7116 
 7117   ins_pipe(istore_reg_mem);
 7118 %}
 7119 
 7120 // Store Pointer
 7121 instruct storeimmP0(immP0 zero, memory8 mem)
 7122 %{
 7123   match(Set mem (StoreP mem zero));
 7124   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7125 
 7126   ins_cost(INSN_COST);
 7127   format %{ "str zr, $mem\t# ptr" %}
 7128 
 7129   ins_encode(aarch64_enc_str0(mem));
 7130 
 7131   ins_pipe(istore_mem);
 7132 %}
 7133 
 7134 // Store Compressed Pointer
 7135 instruct storeN(iRegN src, memory4 mem)
 7136 %{
 7137   match(Set mem (StoreN mem src));
 7138   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7139 
 7140   ins_cost(INSN_COST);
 7141   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7142 
 7143   ins_encode(aarch64_enc_strw(src, mem));
 7144 
 7145   ins_pipe(istore_reg_mem);
 7146 %}
 7147 
 7148 instruct storeImmN0(immN0 zero, memory4 mem)
 7149 %{
 7150   match(Set mem (StoreN mem zero));
 7151   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7152 
 7153   ins_cost(INSN_COST);
 7154   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7155 
 7156   ins_encode(aarch64_enc_strw0(mem));
 7157 
 7158   ins_pipe(istore_mem);
 7159 %}
 7160 
 7161 // Store Float
 7162 instruct storeF(vRegF src, memory4 mem)
 7163 %{
 7164   match(Set mem (StoreF mem src));
 7165   predicate(!needs_releasing_store(n));
 7166 
 7167   ins_cost(INSN_COST);
 7168   format %{ "strs  $src, $mem\t# float" %}
 7169 
 7170   ins_encode( aarch64_enc_strs(src, mem) );
 7171 
 7172   ins_pipe(pipe_class_memory);
 7173 %}
 7174 
 7175 // TODO
 7176 // implement storeImmF0 and storeFImmPacked
 7177 
 7178 // Store Double
 7179 instruct storeD(vRegD src, memory8 mem)
 7180 %{
 7181   match(Set mem (StoreD mem src));
 7182   predicate(!needs_releasing_store(n));
 7183 
 7184   ins_cost(INSN_COST);
 7185   format %{ "strd  $src, $mem\t# double" %}
 7186 
 7187   ins_encode( aarch64_enc_strd(src, mem) );
 7188 
 7189   ins_pipe(pipe_class_memory);
 7190 %}
 7191 
 7192 // Store Compressed Klass Pointer
 7193 instruct storeNKlass(iRegN src, memory4 mem)
 7194 %{
 7195   predicate(!needs_releasing_store(n));
 7196   match(Set mem (StoreNKlass mem src));
 7197 
 7198   ins_cost(INSN_COST);
 7199   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7200 
 7201   ins_encode(aarch64_enc_strw(src, mem));
 7202 
 7203   ins_pipe(istore_reg_mem);
 7204 %}
 7205 
 7206 // TODO
 7207 // implement storeImmD0 and storeDImmPacked
 7208 
 7209 // prefetch instructions
 7210 // Must be safe to execute with invalid address (cannot fault).
 7211 
 7212 instruct prefetchalloc( memory8 mem ) %{
 7213   match(PrefetchAllocation mem);
 7214 
 7215   ins_cost(INSN_COST);
 7216   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7217 
 7218   ins_encode( aarch64_enc_prefetchw(mem) );
 7219 
 7220   ins_pipe(iload_prefetch);
 7221 %}
 7222 
 7223 //  ---------------- volatile loads and stores ----------------
 7224 
 7225 // Load Byte (8 bit signed)
 7226 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7227 %{
 7228   match(Set dst (LoadB mem));
 7229 
 7230   ins_cost(VOLATILE_REF_COST);
 7231   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7232 
 7233   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7234 
 7235   ins_pipe(pipe_serial);
 7236 %}
 7237 
 7238 // Load Byte (8 bit signed) into long
 7239 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7240 %{
 7241   match(Set dst (ConvI2L (LoadB mem)));
 7242 
 7243   ins_cost(VOLATILE_REF_COST);
 7244   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7245 
 7246   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7247 
 7248   ins_pipe(pipe_serial);
 7249 %}
 7250 
 7251 // Load Byte (8 bit unsigned)
 7252 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7253 %{
 7254   match(Set dst (LoadUB mem));
 7255 
 7256   ins_cost(VOLATILE_REF_COST);
 7257   format %{ "ldarb  $dst, $mem\t# byte" %}
 7258 
 7259   ins_encode(aarch64_enc_ldarb(dst, mem));
 7260 
 7261   ins_pipe(pipe_serial);
 7262 %}
 7263 
 7264 // Load Byte (8 bit unsigned) into long
 7265 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7266 %{
 7267   match(Set dst (ConvI2L (LoadUB mem)));
 7268 
 7269   ins_cost(VOLATILE_REF_COST);
 7270   format %{ "ldarb  $dst, $mem\t# byte" %}
 7271 
 7272   ins_encode(aarch64_enc_ldarb(dst, mem));
 7273 
 7274   ins_pipe(pipe_serial);
 7275 %}
 7276 
 7277 // Load Short (16 bit signed)
 7278 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7279 %{
 7280   match(Set dst (LoadS mem));
 7281 
 7282   ins_cost(VOLATILE_REF_COST);
 7283   format %{ "ldarshw  $dst, $mem\t# short" %}
 7284 
 7285   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7286 
 7287   ins_pipe(pipe_serial);
 7288 %}
 7289 
 7290 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7291 %{
 7292   match(Set dst (LoadUS mem));
 7293 
 7294   ins_cost(VOLATILE_REF_COST);
 7295   format %{ "ldarhw  $dst, $mem\t# short" %}
 7296 
 7297   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7298 
 7299   ins_pipe(pipe_serial);
 7300 %}
 7301 
 7302 // Load Short/Char (16 bit unsigned) into long
 7303 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7304 %{
 7305   match(Set dst (ConvI2L (LoadUS mem)));
 7306 
 7307   ins_cost(VOLATILE_REF_COST);
 7308   format %{ "ldarh  $dst, $mem\t# short" %}
 7309 
 7310   ins_encode(aarch64_enc_ldarh(dst, mem));
 7311 
 7312   ins_pipe(pipe_serial);
 7313 %}
 7314 
 7315 // Load Short/Char (16 bit signed) into long
 7316 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7317 %{
 7318   match(Set dst (ConvI2L (LoadS mem)));
 7319 
 7320   ins_cost(VOLATILE_REF_COST);
 7321   format %{ "ldarh  $dst, $mem\t# short" %}
 7322 
 7323   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7324 
 7325   ins_pipe(pipe_serial);
 7326 %}
 7327 
 7328 // Load Integer (32 bit signed)
 7329 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7330 %{
 7331   match(Set dst (LoadI mem));
 7332 
 7333   ins_cost(VOLATILE_REF_COST);
 7334   format %{ "ldarw  $dst, $mem\t# int" %}
 7335 
 7336   ins_encode(aarch64_enc_ldarw(dst, mem));
 7337 
 7338   ins_pipe(pipe_serial);
 7339 %}
 7340 
 7341 // Load Integer (32 bit unsigned) into long
 7342 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7343 %{
 7344   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7345 
 7346   ins_cost(VOLATILE_REF_COST);
 7347   format %{ "ldarw  $dst, $mem\t# int" %}
 7348 
 7349   ins_encode(aarch64_enc_ldarw(dst, mem));
 7350 
 7351   ins_pipe(pipe_serial);
 7352 %}
 7353 
 7354 // Load Long (64 bit signed)
 7355 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7356 %{
 7357   match(Set dst (LoadL mem));
 7358 
 7359   ins_cost(VOLATILE_REF_COST);
 7360   format %{ "ldar  $dst, $mem\t# int" %}
 7361 
 7362   ins_encode(aarch64_enc_ldar(dst, mem));
 7363 
 7364   ins_pipe(pipe_serial);
 7365 %}
 7366 
 7367 // Load Pointer
 7368 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7369 %{
 7370   match(Set dst (LoadP mem));
 7371   predicate(n->as_Load()->barrier_data() == 0);
 7372 
 7373   ins_cost(VOLATILE_REF_COST);
 7374   format %{ "ldar  $dst, $mem\t# ptr" %}
 7375 
 7376   ins_encode(aarch64_enc_ldar(dst, mem));
 7377 
 7378   ins_pipe(pipe_serial);
 7379 %}
 7380 
 7381 // Load Compressed Pointer
 7382 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7383 %{
 7384   match(Set dst (LoadN mem));
 7385   predicate(n->as_Load()->barrier_data() == 0);
 7386 
 7387   ins_cost(VOLATILE_REF_COST);
 7388   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7389 
 7390   ins_encode(aarch64_enc_ldarw(dst, mem));
 7391 
 7392   ins_pipe(pipe_serial);
 7393 %}
 7394 
 7395 // Load Float
 7396 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7397 %{
 7398   match(Set dst (LoadF mem));
 7399 
 7400   ins_cost(VOLATILE_REF_COST);
 7401   format %{ "ldars  $dst, $mem\t# float" %}
 7402 
 7403   ins_encode( aarch64_enc_fldars(dst, mem) );
 7404 
 7405   ins_pipe(pipe_serial);
 7406 %}
 7407 
 7408 // Load Double
 7409 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7410 %{
 7411   match(Set dst (LoadD mem));
 7412 
 7413   ins_cost(VOLATILE_REF_COST);
 7414   format %{ "ldard  $dst, $mem\t# double" %}
 7415 
 7416   ins_encode( aarch64_enc_fldard(dst, mem) );
 7417 
 7418   ins_pipe(pipe_serial);
 7419 %}
 7420 
 7421 // Store Byte
 7422 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7423 %{
 7424   match(Set mem (StoreB mem src));
 7425 
 7426   ins_cost(VOLATILE_REF_COST);
 7427   format %{ "stlrb  $src, $mem\t# byte" %}
 7428 
 7429   ins_encode(aarch64_enc_stlrb(src, mem));
 7430 
 7431   ins_pipe(pipe_class_memory);
 7432 %}
 7433 
 7434 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7435 %{
 7436   match(Set mem (StoreB mem zero));
 7437 
 7438   ins_cost(VOLATILE_REF_COST);
 7439   format %{ "stlrb  zr, $mem\t# byte" %}
 7440 
 7441   ins_encode(aarch64_enc_stlrb0(mem));
 7442 
 7443   ins_pipe(pipe_class_memory);
 7444 %}
 7445 
 7446 // Store Char/Short
 7447 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7448 %{
 7449   match(Set mem (StoreC mem src));
 7450 
 7451   ins_cost(VOLATILE_REF_COST);
 7452   format %{ "stlrh  $src, $mem\t# short" %}
 7453 
 7454   ins_encode(aarch64_enc_stlrh(src, mem));
 7455 
 7456   ins_pipe(pipe_class_memory);
 7457 %}
 7458 
 7459 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7460 %{
 7461   match(Set mem (StoreC mem zero));
 7462 
 7463   ins_cost(VOLATILE_REF_COST);
 7464   format %{ "stlrh  zr, $mem\t# short" %}
 7465 
 7466   ins_encode(aarch64_enc_stlrh0(mem));
 7467 
 7468   ins_pipe(pipe_class_memory);
 7469 %}
 7470 
 7471 // Store Integer
 7472 
 7473 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7474 %{
 7475   match(Set mem(StoreI mem src));
 7476 
 7477   ins_cost(VOLATILE_REF_COST);
 7478   format %{ "stlrw  $src, $mem\t# int" %}
 7479 
 7480   ins_encode(aarch64_enc_stlrw(src, mem));
 7481 
 7482   ins_pipe(pipe_class_memory);
 7483 %}
 7484 
 7485 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7486 %{
 7487   match(Set mem(StoreI mem zero));
 7488 
 7489   ins_cost(VOLATILE_REF_COST);
 7490   format %{ "stlrw  zr, $mem\t# int" %}
 7491 
 7492   ins_encode(aarch64_enc_stlrw0(mem));
 7493 
 7494   ins_pipe(pipe_class_memory);
 7495 %}
 7496 
 7497 // Store Long (64 bit signed)
 7498 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7499 %{
 7500   match(Set mem (StoreL mem src));
 7501 
 7502   ins_cost(VOLATILE_REF_COST);
 7503   format %{ "stlr  $src, $mem\t# int" %}
 7504 
 7505   ins_encode(aarch64_enc_stlr(src, mem));
 7506 
 7507   ins_pipe(pipe_class_memory);
 7508 %}
 7509 
 7510 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7511 %{
 7512   match(Set mem (StoreL mem zero));
 7513 
 7514   ins_cost(VOLATILE_REF_COST);
 7515   format %{ "stlr  zr, $mem\t# int" %}
 7516 
 7517   ins_encode(aarch64_enc_stlr0(mem));
 7518 
 7519   ins_pipe(pipe_class_memory);
 7520 %}
 7521 
 7522 // Store Pointer
 7523 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7524 %{
 7525   match(Set mem (StoreP mem src));
 7526   predicate(n->as_Store()->barrier_data() == 0);
 7527 
 7528   ins_cost(VOLATILE_REF_COST);
 7529   format %{ "stlr  $src, $mem\t# ptr" %}
 7530 
 7531   ins_encode(aarch64_enc_stlr(src, mem));
 7532 
 7533   ins_pipe(pipe_class_memory);
 7534 %}
 7535 
 7536 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7537 %{
 7538   match(Set mem (StoreP mem zero));
 7539   predicate(n->as_Store()->barrier_data() == 0);
 7540 
 7541   ins_cost(VOLATILE_REF_COST);
 7542   format %{ "stlr  zr, $mem\t# ptr" %}
 7543 
 7544   ins_encode(aarch64_enc_stlr0(mem));
 7545 
 7546   ins_pipe(pipe_class_memory);
 7547 %}
 7548 
 7549 // Store Compressed Pointer
 7550 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7551 %{
 7552   match(Set mem (StoreN mem src));
 7553   predicate(n->as_Store()->barrier_data() == 0);
 7554 
 7555   ins_cost(VOLATILE_REF_COST);
 7556   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7557 
 7558   ins_encode(aarch64_enc_stlrw(src, mem));
 7559 
 7560   ins_pipe(pipe_class_memory);
 7561 %}
 7562 
 7563 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7564 %{
 7565   match(Set mem (StoreN mem zero));
 7566   predicate(n->as_Store()->barrier_data() == 0);
 7567 
 7568   ins_cost(VOLATILE_REF_COST);
 7569   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7570 
 7571   ins_encode(aarch64_enc_stlrw0(mem));
 7572 
 7573   ins_pipe(pipe_class_memory);
 7574 %}
 7575 
 7576 // Store Float
 7577 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7578 %{
 7579   match(Set mem (StoreF mem src));
 7580 
 7581   ins_cost(VOLATILE_REF_COST);
 7582   format %{ "stlrs  $src, $mem\t# float" %}
 7583 
 7584   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7585 
 7586   ins_pipe(pipe_class_memory);
 7587 %}
 7588 
 7589 // TODO
 7590 // implement storeImmF0 and storeFImmPacked
 7591 
 7592 // Store Double
 7593 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 7594 %{
 7595   match(Set mem (StoreD mem src));
 7596 
 7597   ins_cost(VOLATILE_REF_COST);
 7598   format %{ "stlrd  $src, $mem\t# double" %}
 7599 
 7600   ins_encode( aarch64_enc_fstlrd(src, mem) );
 7601 
 7602   ins_pipe(pipe_class_memory);
 7603 %}
 7604 
 7605 //  ---------------- end of volatile loads and stores ----------------
 7606 
 7607 instruct cacheWB(indirect addr)
 7608 %{
 7609   predicate(VM_Version::supports_data_cache_line_flush());
 7610   match(CacheWB addr);
 7611 
 7612   ins_cost(100);
 7613   format %{"cache wb $addr" %}
 7614   ins_encode %{
 7615     assert($addr->index_position() < 0, "should be");
 7616     assert($addr$$disp == 0, "should be");
 7617     __ cache_wb(Address($addr$$base$$Register, 0));
 7618   %}
 7619   ins_pipe(pipe_slow); // XXX
 7620 %}
 7621 
 7622 instruct cacheWBPreSync()
 7623 %{
 7624   predicate(VM_Version::supports_data_cache_line_flush());
 7625   match(CacheWBPreSync);
 7626 
 7627   ins_cost(100);
 7628   format %{"cache wb presync" %}
 7629   ins_encode %{
 7630     __ cache_wbsync(true);
 7631   %}
 7632   ins_pipe(pipe_slow); // XXX
 7633 %}
 7634 
 7635 instruct cacheWBPostSync()
 7636 %{
 7637   predicate(VM_Version::supports_data_cache_line_flush());
 7638   match(CacheWBPostSync);
 7639 
 7640   ins_cost(100);
 7641   format %{"cache wb postsync" %}
 7642   ins_encode %{
 7643     __ cache_wbsync(false);
 7644   %}
 7645   ins_pipe(pipe_slow); // XXX
 7646 %}
 7647 
 7648 // ============================================================================
 7649 // BSWAP Instructions
 7650 
 7651 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 7652   match(Set dst (ReverseBytesI src));
 7653 
 7654   ins_cost(INSN_COST);
 7655   format %{ "revw  $dst, $src" %}
 7656 
 7657   ins_encode %{
 7658     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 7659   %}
 7660 
 7661   ins_pipe(ialu_reg);
 7662 %}
 7663 
 7664 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 7665   match(Set dst (ReverseBytesL src));
 7666 
 7667   ins_cost(INSN_COST);
 7668   format %{ "rev  $dst, $src" %}
 7669 
 7670   ins_encode %{
 7671     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 7672   %}
 7673 
 7674   ins_pipe(ialu_reg);
 7675 %}
 7676 
 7677 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 7678   match(Set dst (ReverseBytesUS src));
 7679 
 7680   ins_cost(INSN_COST);
 7681   format %{ "rev16w  $dst, $src" %}
 7682 
 7683   ins_encode %{
 7684     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7685   %}
 7686 
 7687   ins_pipe(ialu_reg);
 7688 %}
 7689 
 7690 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 7691   match(Set dst (ReverseBytesS src));
 7692 
 7693   ins_cost(INSN_COST);
 7694   format %{ "rev16w  $dst, $src\n\t"
 7695             "sbfmw $dst, $dst, #0, #15" %}
 7696 
 7697   ins_encode %{
 7698     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7699     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 7700   %}
 7701 
 7702   ins_pipe(ialu_reg);
 7703 %}
 7704 
 7705 // ============================================================================
 7706 // Zero Count Instructions
 7707 
 7708 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7709   match(Set dst (CountLeadingZerosI src));
 7710 
 7711   ins_cost(INSN_COST);
 7712   format %{ "clzw  $dst, $src" %}
 7713   ins_encode %{
 7714     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 7715   %}
 7716 
 7717   ins_pipe(ialu_reg);
 7718 %}
 7719 
 7720 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 7721   match(Set dst (CountLeadingZerosL src));
 7722 
 7723   ins_cost(INSN_COST);
 7724   format %{ "clz   $dst, $src" %}
 7725   ins_encode %{
 7726     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 7727   %}
 7728 
 7729   ins_pipe(ialu_reg);
 7730 %}
 7731 
 7732 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7733   match(Set dst (CountTrailingZerosI src));
 7734 
 7735   ins_cost(INSN_COST * 2);
 7736   format %{ "rbitw  $dst, $src\n\t"
 7737             "clzw   $dst, $dst" %}
 7738   ins_encode %{
 7739     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 7740     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 7741   %}
 7742 
 7743   ins_pipe(ialu_reg);
 7744 %}
 7745 
 7746 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 7747   match(Set dst (CountTrailingZerosL src));
 7748 
 7749   ins_cost(INSN_COST * 2);
 7750   format %{ "rbit   $dst, $src\n\t"
 7751             "clz    $dst, $dst" %}
 7752   ins_encode %{
 7753     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 7754     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 7755   %}
 7756 
 7757   ins_pipe(ialu_reg);
 7758 %}
 7759 
 7760 //---------- Population Count Instructions -------------------------------------
 7761 //
 7762 
 7763 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 7764   match(Set dst (PopCountI src));
 7765   effect(TEMP tmp);
 7766   ins_cost(INSN_COST * 13);
 7767 
 7768   format %{ "fmovs  $tmp, $src\t# vector (1S)\n\t"
 7769             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7770             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7771             "mov    $dst, $tmp\t# vector (1D)" %}
 7772   ins_encode %{
 7773     __ fmovs($tmp$$FloatRegister, $src$$Register);
 7774     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7775     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7776     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7777   %}
 7778 
 7779   ins_pipe(pipe_class_default);
 7780 %}
 7781 
 7782 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 7783   match(Set dst (PopCountI (LoadI mem)));
 7784   effect(TEMP tmp);
 7785   ins_cost(INSN_COST * 13);
 7786 
 7787   format %{ "ldrs   $tmp, $mem\n\t"
 7788             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7789             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7790             "mov    $dst, $tmp\t# vector (1D)" %}
 7791   ins_encode %{
 7792     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7793     loadStore(masm, &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 7794               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 7795     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7796     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7797     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7798   %}
 7799 
 7800   ins_pipe(pipe_class_default);
 7801 %}
 7802 
 7803 // Note: Long.bitCount(long) returns an int.
 7804 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 7805   match(Set dst (PopCountL src));
 7806   effect(TEMP tmp);
 7807   ins_cost(INSN_COST * 13);
 7808 
 7809   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 7810             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7811             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7812             "mov    $dst, $tmp\t# vector (1D)" %}
 7813   ins_encode %{
 7814     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 7815     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7816     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7817     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7818   %}
 7819 
 7820   ins_pipe(pipe_class_default);
 7821 %}
 7822 
 7823 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 7824   match(Set dst (PopCountL (LoadL mem)));
 7825   effect(TEMP tmp);
 7826   ins_cost(INSN_COST * 13);
 7827 
 7828   format %{ "ldrd   $tmp, $mem\n\t"
 7829             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7830             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7831             "mov    $dst, $tmp\t# vector (1D)" %}
 7832   ins_encode %{
 7833     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7834     loadStore(masm, &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 7835               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 7836     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7837     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7838     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7839   %}
 7840 
 7841   ins_pipe(pipe_class_default);
 7842 %}
 7843 
 7844 // ============================================================================
 7845 // VerifyVectorAlignment Instruction
 7846 
 7847 instruct verify_vector_alignment(iRegP addr, immL_positive_bitmaskI mask, rFlagsReg cr) %{
 7848   match(Set addr (VerifyVectorAlignment addr mask));
 7849   effect(KILL cr);
 7850   format %{ "verify_vector_alignment $addr $mask \t! verify alignment" %}
 7851   ins_encode %{
 7852     Label Lskip;
 7853     // check if masked bits of addr are zero
 7854     __ tst($addr$$Register, $mask$$constant);
 7855     __ br(Assembler::EQ, Lskip);
 7856     __ stop("verify_vector_alignment found a misaligned vector memory access");
 7857     __ bind(Lskip);
 7858   %}
 7859   ins_pipe(pipe_slow);
 7860 %}
 7861 
 7862 // ============================================================================
 7863 // MemBar Instruction
 7864 
 7865 instruct load_fence() %{
 7866   match(LoadFence);
 7867   ins_cost(VOLATILE_REF_COST);
 7868 
 7869   format %{ "load_fence" %}
 7870 
 7871   ins_encode %{
 7872     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7873   %}
 7874   ins_pipe(pipe_serial);
 7875 %}
 7876 
 7877 instruct unnecessary_membar_acquire() %{
 7878   predicate(unnecessary_acquire(n));
 7879   match(MemBarAcquire);
 7880   ins_cost(0);
 7881 
 7882   format %{ "membar_acquire (elided)" %}
 7883 
 7884   ins_encode %{
 7885     __ block_comment("membar_acquire (elided)");
 7886   %}
 7887 
 7888   ins_pipe(pipe_class_empty);
 7889 %}
 7890 
 7891 instruct membar_acquire() %{
 7892   match(MemBarAcquire);
 7893   ins_cost(VOLATILE_REF_COST);
 7894 
 7895   format %{ "membar_acquire\n\t"
 7896             "dmb ishld" %}
 7897 
 7898   ins_encode %{
 7899     __ block_comment("membar_acquire");
 7900     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7901   %}
 7902 
 7903   ins_pipe(pipe_serial);
 7904 %}
 7905 
 7906 
 7907 instruct membar_acquire_lock() %{
 7908   match(MemBarAcquireLock);
 7909   ins_cost(VOLATILE_REF_COST);
 7910 
 7911   format %{ "membar_acquire_lock (elided)" %}
 7912 
 7913   ins_encode %{
 7914     __ block_comment("membar_acquire_lock (elided)");
 7915   %}
 7916 
 7917   ins_pipe(pipe_serial);
 7918 %}
 7919 
 7920 instruct store_fence() %{
 7921   match(StoreFence);
 7922   ins_cost(VOLATILE_REF_COST);
 7923 
 7924   format %{ "store_fence" %}
 7925 
 7926   ins_encode %{
 7927     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 7928   %}
 7929   ins_pipe(pipe_serial);
 7930 %}
 7931 
 7932 instruct unnecessary_membar_release() %{
 7933   predicate(unnecessary_release(n));
 7934   match(MemBarRelease);
 7935   ins_cost(0);
 7936 
 7937   format %{ "membar_release (elided)" %}
 7938 
 7939   ins_encode %{
 7940     __ block_comment("membar_release (elided)");
 7941   %}
 7942   ins_pipe(pipe_serial);
 7943 %}
 7944 
 7945 instruct membar_release() %{
 7946   match(MemBarRelease);
 7947   ins_cost(VOLATILE_REF_COST);
 7948 
 7949   format %{ "membar_release\n\t"
 7950             "dmb ishst\n\tdmb ishld" %}
 7951 
 7952   ins_encode %{
 7953     __ block_comment("membar_release");
 7954     // These will be merged if AlwaysMergeDMB is enabled.
 7955     __ membar(Assembler::StoreStore);
 7956     __ membar(Assembler::LoadStore);
 7957   %}
 7958   ins_pipe(pipe_serial);
 7959 %}
 7960 
 7961 instruct membar_storestore() %{
 7962   match(MemBarStoreStore);
 7963   match(StoreStoreFence);
 7964   ins_cost(VOLATILE_REF_COST);
 7965 
 7966   format %{ "MEMBAR-store-store" %}
 7967 
 7968   ins_encode %{
 7969     __ membar(Assembler::StoreStore);
 7970   %}
 7971   ins_pipe(pipe_serial);
 7972 %}
 7973 
 7974 instruct membar_release_lock() %{
 7975   match(MemBarReleaseLock);
 7976   ins_cost(VOLATILE_REF_COST);
 7977 
 7978   format %{ "membar_release_lock (elided)" %}
 7979 
 7980   ins_encode %{
 7981     __ block_comment("membar_release_lock (elided)");
 7982   %}
 7983 
 7984   ins_pipe(pipe_serial);
 7985 %}
 7986 
 7987 instruct unnecessary_membar_volatile() %{
 7988   predicate(unnecessary_volatile(n));
 7989   match(MemBarVolatile);
 7990   ins_cost(0);
 7991 
 7992   format %{ "membar_volatile (elided)" %}
 7993 
 7994   ins_encode %{
 7995     __ block_comment("membar_volatile (elided)");
 7996   %}
 7997 
 7998   ins_pipe(pipe_serial);
 7999 %}
 8000 
 8001 instruct membar_volatile() %{
 8002   match(MemBarVolatile);
 8003   ins_cost(VOLATILE_REF_COST*100);
 8004 
 8005   format %{ "membar_volatile\n\t"
 8006              "dmb ish"%}
 8007 
 8008   ins_encode %{
 8009     __ block_comment("membar_volatile");
 8010     __ membar(Assembler::StoreLoad);
 8011   %}
 8012 
 8013   ins_pipe(pipe_serial);
 8014 %}
 8015 
 8016 // ============================================================================
 8017 // Cast/Convert Instructions
 8018 
 8019 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8020   match(Set dst (CastX2P src));
 8021 
 8022   ins_cost(INSN_COST);
 8023   format %{ "mov $dst, $src\t# long -> ptr" %}
 8024 
 8025   ins_encode %{
 8026     if ($dst$$reg != $src$$reg) {
 8027       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8028     }
 8029   %}
 8030 
 8031   ins_pipe(ialu_reg);
 8032 %}
 8033 
 8034 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8035   match(Set dst (CastP2X src));
 8036 
 8037   ins_cost(INSN_COST);
 8038   format %{ "mov $dst, $src\t# ptr -> long" %}
 8039 
 8040   ins_encode %{
 8041     if ($dst$$reg != $src$$reg) {
 8042       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8043     }
 8044   %}
 8045 
 8046   ins_pipe(ialu_reg);
 8047 %}
 8048 
 8049 // Convert oop into int for vectors alignment masking
 8050 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8051   match(Set dst (ConvL2I (CastP2X src)));
 8052 
 8053   ins_cost(INSN_COST);
 8054   format %{ "movw $dst, $src\t# ptr -> int" %}
 8055   ins_encode %{
 8056     __ movw($dst$$Register, $src$$Register);
 8057   %}
 8058 
 8059   ins_pipe(ialu_reg);
 8060 %}
 8061 
 8062 // Convert compressed oop into int for vectors alignment masking
 8063 // in case of 32bit oops (heap < 4Gb).
 8064 instruct convN2I(iRegINoSp dst, iRegN src)
 8065 %{
 8066   predicate(CompressedOops::shift() == 0);
 8067   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8068 
 8069   ins_cost(INSN_COST);
 8070   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8071   ins_encode %{
 8072     __ movw($dst$$Register, $src$$Register);
 8073   %}
 8074 
 8075   ins_pipe(ialu_reg);
 8076 %}
 8077 
 8078 
 8079 // Convert oop pointer into compressed form
 8080 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8081   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8082   match(Set dst (EncodeP src));
 8083   effect(KILL cr);
 8084   ins_cost(INSN_COST * 3);
 8085   format %{ "encode_heap_oop $dst, $src" %}
 8086   ins_encode %{
 8087     Register s = $src$$Register;
 8088     Register d = $dst$$Register;
 8089     __ encode_heap_oop(d, s);
 8090   %}
 8091   ins_pipe(ialu_reg);
 8092 %}
 8093 
 8094 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8095   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8096   match(Set dst (EncodeP src));
 8097   ins_cost(INSN_COST * 3);
 8098   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8099   ins_encode %{
 8100     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8101   %}
 8102   ins_pipe(ialu_reg);
 8103 %}
 8104 
 8105 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8106   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8107             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8108   match(Set dst (DecodeN src));
 8109   ins_cost(INSN_COST * 3);
 8110   format %{ "decode_heap_oop $dst, $src" %}
 8111   ins_encode %{
 8112     Register s = $src$$Register;
 8113     Register d = $dst$$Register;
 8114     __ decode_heap_oop(d, s);
 8115   %}
 8116   ins_pipe(ialu_reg);
 8117 %}
 8118 
 8119 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8120   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8121             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8122   match(Set dst (DecodeN src));
 8123   ins_cost(INSN_COST * 3);
 8124   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8125   ins_encode %{
 8126     Register s = $src$$Register;
 8127     Register d = $dst$$Register;
 8128     __ decode_heap_oop_not_null(d, s);
 8129   %}
 8130   ins_pipe(ialu_reg);
 8131 %}
 8132 
 8133 // n.b. AArch64 implementations of encode_klass_not_null and
 8134 // decode_klass_not_null do not modify the flags register so, unlike
 8135 // Intel, we don't kill CR as a side effect here
 8136 
 8137 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8138   match(Set dst (EncodePKlass src));
 8139 
 8140   ins_cost(INSN_COST * 3);
 8141   format %{ "encode_klass_not_null $dst,$src" %}
 8142 
 8143   ins_encode %{
 8144     Register src_reg = as_Register($src$$reg);
 8145     Register dst_reg = as_Register($dst$$reg);
 8146     __ encode_klass_not_null(dst_reg, src_reg);
 8147   %}
 8148 
 8149    ins_pipe(ialu_reg);
 8150 %}
 8151 
 8152 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8153   match(Set dst (DecodeNKlass src));
 8154 
 8155   ins_cost(INSN_COST * 3);
 8156   format %{ "decode_klass_not_null $dst,$src" %}
 8157 
 8158   ins_encode %{
 8159     Register src_reg = as_Register($src$$reg);
 8160     Register dst_reg = as_Register($dst$$reg);
 8161     if (dst_reg != src_reg) {
 8162       __ decode_klass_not_null(dst_reg, src_reg);
 8163     } else {
 8164       __ decode_klass_not_null(dst_reg);
 8165     }
 8166   %}
 8167 
 8168    ins_pipe(ialu_reg);
 8169 %}
 8170 
 8171 instruct checkCastPP(iRegPNoSp dst)
 8172 %{
 8173   match(Set dst (CheckCastPP dst));
 8174 
 8175   size(0);
 8176   format %{ "# checkcastPP of $dst" %}
 8177   ins_encode(/* empty encoding */);
 8178   ins_pipe(pipe_class_empty);
 8179 %}
 8180 
 8181 instruct castPP(iRegPNoSp dst)
 8182 %{
 8183   match(Set dst (CastPP dst));
 8184 
 8185   size(0);
 8186   format %{ "# castPP of $dst" %}
 8187   ins_encode(/* empty encoding */);
 8188   ins_pipe(pipe_class_empty);
 8189 %}
 8190 
 8191 instruct castII(iRegI dst)
 8192 %{
 8193   predicate(VerifyConstraintCasts == 0);
 8194   match(Set dst (CastII dst));
 8195 
 8196   size(0);
 8197   format %{ "# castII of $dst" %}
 8198   ins_encode(/* empty encoding */);
 8199   ins_cost(0);
 8200   ins_pipe(pipe_class_empty);
 8201 %}
 8202 
 8203 instruct castII_checked(iRegI dst, rFlagsReg cr)
 8204 %{
 8205   predicate(VerifyConstraintCasts > 0);
 8206   match(Set dst (CastII dst));
 8207   effect(KILL cr);
 8208 
 8209   format %{ "# castII_checked of $dst" %}
 8210   ins_encode %{
 8211     __ verify_int_in_range(_idx, bottom_type()->is_int(), $dst$$Register, rscratch1);
 8212   %}
 8213   ins_pipe(pipe_slow);
 8214 %}
 8215 
 8216 instruct castLL(iRegL dst)
 8217 %{
 8218   predicate(VerifyConstraintCasts == 0);
 8219   match(Set dst (CastLL dst));
 8220 
 8221   size(0);
 8222   format %{ "# castLL of $dst" %}
 8223   ins_encode(/* empty encoding */);
 8224   ins_cost(0);
 8225   ins_pipe(pipe_class_empty);
 8226 %}
 8227 
 8228 instruct castLL_checked(iRegL dst, rFlagsReg cr)
 8229 %{
 8230   predicate(VerifyConstraintCasts > 0);
 8231   match(Set dst (CastLL dst));
 8232   effect(KILL cr);
 8233 
 8234   format %{ "# castLL_checked of $dst" %}
 8235   ins_encode %{
 8236     __ verify_long_in_range(_idx, bottom_type()->is_long(), $dst$$Register, rscratch1);
 8237   %}
 8238   ins_pipe(pipe_slow);
 8239 %}
 8240 
 8241 instruct castHH(vRegF dst)
 8242 %{
 8243   match(Set dst (CastHH dst));
 8244   size(0);
 8245   format %{ "# castHH of $dst" %}
 8246   ins_encode(/* empty encoding */);
 8247   ins_cost(0);
 8248   ins_pipe(pipe_class_empty);
 8249 %}
 8250 
 8251 instruct castFF(vRegF dst)
 8252 %{
 8253   match(Set dst (CastFF dst));
 8254 
 8255   size(0);
 8256   format %{ "# castFF of $dst" %}
 8257   ins_encode(/* empty encoding */);
 8258   ins_cost(0);
 8259   ins_pipe(pipe_class_empty);
 8260 %}
 8261 
 8262 instruct castDD(vRegD dst)
 8263 %{
 8264   match(Set dst (CastDD dst));
 8265 
 8266   size(0);
 8267   format %{ "# castDD of $dst" %}
 8268   ins_encode(/* empty encoding */);
 8269   ins_cost(0);
 8270   ins_pipe(pipe_class_empty);
 8271 %}
 8272 
 8273 instruct castVV(vReg dst)
 8274 %{
 8275   match(Set dst (CastVV dst));
 8276 
 8277   size(0);
 8278   format %{ "# castVV of $dst" %}
 8279   ins_encode(/* empty encoding */);
 8280   ins_cost(0);
 8281   ins_pipe(pipe_class_empty);
 8282 %}
 8283 
 8284 instruct castVVMask(pRegGov dst)
 8285 %{
 8286   match(Set dst (CastVV dst));
 8287 
 8288   size(0);
 8289   format %{ "# castVV of $dst" %}
 8290   ins_encode(/* empty encoding */);
 8291   ins_cost(0);
 8292   ins_pipe(pipe_class_empty);
 8293 %}
 8294 
 8295 // ============================================================================
 8296 // Atomic operation instructions
 8297 //
 8298 
 8299 // standard CompareAndSwapX when we are using barriers
 8300 // these have higher priority than the rules selected by a predicate
 8301 
 8302 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8303 // can't match them
 8304 
 8305 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8306 
 8307   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8308   ins_cost(2 * VOLATILE_REF_COST);
 8309 
 8310   effect(KILL cr);
 8311 
 8312   format %{
 8313     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8314     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8315   %}
 8316 
 8317   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8318             aarch64_enc_cset_eq(res));
 8319 
 8320   ins_pipe(pipe_slow);
 8321 %}
 8322 
 8323 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8324 
 8325   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8326   ins_cost(2 * VOLATILE_REF_COST);
 8327 
 8328   effect(KILL cr);
 8329 
 8330   format %{
 8331     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8332     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8333   %}
 8334 
 8335   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8336             aarch64_enc_cset_eq(res));
 8337 
 8338   ins_pipe(pipe_slow);
 8339 %}
 8340 
 8341 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8342 
 8343   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8344   ins_cost(2 * VOLATILE_REF_COST);
 8345 
 8346   effect(KILL cr);
 8347 
 8348  format %{
 8349     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8350     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8351  %}
 8352 
 8353  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8354             aarch64_enc_cset_eq(res));
 8355 
 8356   ins_pipe(pipe_slow);
 8357 %}
 8358 
 8359 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8360 
 8361   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8362   ins_cost(2 * VOLATILE_REF_COST);
 8363 
 8364   effect(KILL cr);
 8365 
 8366  format %{
 8367     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8368     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8369  %}
 8370 
 8371  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8372             aarch64_enc_cset_eq(res));
 8373 
 8374   ins_pipe(pipe_slow);
 8375 %}
 8376 
 8377 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8378 
 8379   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8380   predicate(n->as_LoadStore()->barrier_data() == 0);
 8381   ins_cost(2 * VOLATILE_REF_COST);
 8382 
 8383   effect(KILL cr);
 8384 
 8385  format %{
 8386     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8387     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8388  %}
 8389 
 8390  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8391             aarch64_enc_cset_eq(res));
 8392 
 8393   ins_pipe(pipe_slow);
 8394 %}
 8395 
 8396 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8397 
 8398   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8399   predicate(n->as_LoadStore()->barrier_data() == 0);
 8400   ins_cost(2 * VOLATILE_REF_COST);
 8401 
 8402   effect(KILL cr);
 8403 
 8404  format %{
 8405     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8406     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8407  %}
 8408 
 8409  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8410             aarch64_enc_cset_eq(res));
 8411 
 8412   ins_pipe(pipe_slow);
 8413 %}
 8414 
 8415 // alternative CompareAndSwapX when we are eliding barriers
 8416 
 8417 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8418 
 8419   predicate(needs_acquiring_load_exclusive(n));
 8420   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8421   ins_cost(VOLATILE_REF_COST);
 8422 
 8423   effect(KILL cr);
 8424 
 8425   format %{
 8426     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8427     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8428   %}
 8429 
 8430   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8431             aarch64_enc_cset_eq(res));
 8432 
 8433   ins_pipe(pipe_slow);
 8434 %}
 8435 
 8436 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8437 
 8438   predicate(needs_acquiring_load_exclusive(n));
 8439   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8440   ins_cost(VOLATILE_REF_COST);
 8441 
 8442   effect(KILL cr);
 8443 
 8444   format %{
 8445     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8446     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8447   %}
 8448 
 8449   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8450             aarch64_enc_cset_eq(res));
 8451 
 8452   ins_pipe(pipe_slow);
 8453 %}
 8454 
 8455 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8456 
 8457   predicate(needs_acquiring_load_exclusive(n));
 8458   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8459   ins_cost(VOLATILE_REF_COST);
 8460 
 8461   effect(KILL cr);
 8462 
 8463  format %{
 8464     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8465     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8466  %}
 8467 
 8468  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8469             aarch64_enc_cset_eq(res));
 8470 
 8471   ins_pipe(pipe_slow);
 8472 %}
 8473 
 8474 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8475 
 8476   predicate(needs_acquiring_load_exclusive(n));
 8477   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8478   ins_cost(VOLATILE_REF_COST);
 8479 
 8480   effect(KILL cr);
 8481 
 8482  format %{
 8483     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8484     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8485  %}
 8486 
 8487  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8488             aarch64_enc_cset_eq(res));
 8489 
 8490   ins_pipe(pipe_slow);
 8491 %}
 8492 
 8493 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8494 
 8495   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8496   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8497   ins_cost(VOLATILE_REF_COST);
 8498 
 8499   effect(KILL cr);
 8500 
 8501  format %{
 8502     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8503     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8504  %}
 8505 
 8506  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8507             aarch64_enc_cset_eq(res));
 8508 
 8509   ins_pipe(pipe_slow);
 8510 %}
 8511 
 8512 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8513 
 8514   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8515   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8516   ins_cost(VOLATILE_REF_COST);
 8517 
 8518   effect(KILL cr);
 8519 
 8520  format %{
 8521     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8522     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8523  %}
 8524 
 8525  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8526             aarch64_enc_cset_eq(res));
 8527 
 8528   ins_pipe(pipe_slow);
 8529 %}
 8530 
 8531 
 8532 // ---------------------------------------------------------------------
 8533 
 8534 // BEGIN This section of the file is automatically generated. Do not edit --------------
 8535 
 8536 // Sundry CAS operations.  Note that release is always true,
 8537 // regardless of the memory ordering of the CAS.  This is because we
 8538 // need the volatile case to be sequentially consistent but there is
 8539 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 8540 // can't check the type of memory ordering here, so we always emit a
 8541 // STLXR.
 8542 
 8543 // This section is generated from cas.m4
 8544 
 8545 
 8546 // This pattern is generated automatically from cas.m4.
 8547 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8548 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8549   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8550   ins_cost(2 * VOLATILE_REF_COST);
 8551   effect(TEMP_DEF res, KILL cr);
 8552   format %{
 8553     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8554   %}
 8555   ins_encode %{
 8556     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8557                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8558                /*weak*/ false, $res$$Register);
 8559     __ sxtbw($res$$Register, $res$$Register);
 8560   %}
 8561   ins_pipe(pipe_slow);
 8562 %}
 8563 
 8564 // This pattern is generated automatically from cas.m4.
 8565 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8566 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8567   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8568   ins_cost(2 * VOLATILE_REF_COST);
 8569   effect(TEMP_DEF res, KILL cr);
 8570   format %{
 8571     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8572   %}
 8573   ins_encode %{
 8574     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8575                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8576                /*weak*/ false, $res$$Register);
 8577     __ sxthw($res$$Register, $res$$Register);
 8578   %}
 8579   ins_pipe(pipe_slow);
 8580 %}
 8581 
 8582 // This pattern is generated automatically from cas.m4.
 8583 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8584 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8585   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8586   ins_cost(2 * VOLATILE_REF_COST);
 8587   effect(TEMP_DEF res, KILL cr);
 8588   format %{
 8589     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8590   %}
 8591   ins_encode %{
 8592     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8593                Assembler::word, /*acquire*/ false, /*release*/ true,
 8594                /*weak*/ false, $res$$Register);
 8595   %}
 8596   ins_pipe(pipe_slow);
 8597 %}
 8598 
 8599 // This pattern is generated automatically from cas.m4.
 8600 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8601 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8602   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8603   ins_cost(2 * VOLATILE_REF_COST);
 8604   effect(TEMP_DEF res, KILL cr);
 8605   format %{
 8606     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8607   %}
 8608   ins_encode %{
 8609     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8610                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8611                /*weak*/ false, $res$$Register);
 8612   %}
 8613   ins_pipe(pipe_slow);
 8614 %}
 8615 
 8616 // This pattern is generated automatically from cas.m4.
 8617 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8618 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8619   predicate(n->as_LoadStore()->barrier_data() == 0);
 8620   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8621   ins_cost(2 * VOLATILE_REF_COST);
 8622   effect(TEMP_DEF res, KILL cr);
 8623   format %{
 8624     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8625   %}
 8626   ins_encode %{
 8627     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8628                Assembler::word, /*acquire*/ false, /*release*/ true,
 8629                /*weak*/ false, $res$$Register);
 8630   %}
 8631   ins_pipe(pipe_slow);
 8632 %}
 8633 
 8634 // This pattern is generated automatically from cas.m4.
 8635 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8636 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8637   predicate(n->as_LoadStore()->barrier_data() == 0);
 8638   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8639   ins_cost(2 * VOLATILE_REF_COST);
 8640   effect(TEMP_DEF res, KILL cr);
 8641   format %{
 8642     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8643   %}
 8644   ins_encode %{
 8645     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8646                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8647                /*weak*/ false, $res$$Register);
 8648   %}
 8649   ins_pipe(pipe_slow);
 8650 %}
 8651 
 8652 // This pattern is generated automatically from cas.m4.
 8653 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8654 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8655   predicate(needs_acquiring_load_exclusive(n));
 8656   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8657   ins_cost(VOLATILE_REF_COST);
 8658   effect(TEMP_DEF res, KILL cr);
 8659   format %{
 8660     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8661   %}
 8662   ins_encode %{
 8663     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8664                Assembler::byte, /*acquire*/ true, /*release*/ true,
 8665                /*weak*/ false, $res$$Register);
 8666     __ sxtbw($res$$Register, $res$$Register);
 8667   %}
 8668   ins_pipe(pipe_slow);
 8669 %}
 8670 
 8671 // This pattern is generated automatically from cas.m4.
 8672 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8673 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8674   predicate(needs_acquiring_load_exclusive(n));
 8675   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8676   ins_cost(VOLATILE_REF_COST);
 8677   effect(TEMP_DEF res, KILL cr);
 8678   format %{
 8679     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8680   %}
 8681   ins_encode %{
 8682     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8683                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 8684                /*weak*/ false, $res$$Register);
 8685     __ sxthw($res$$Register, $res$$Register);
 8686   %}
 8687   ins_pipe(pipe_slow);
 8688 %}
 8689 
 8690 // This pattern is generated automatically from cas.m4.
 8691 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8692 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8693   predicate(needs_acquiring_load_exclusive(n));
 8694   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8695   ins_cost(VOLATILE_REF_COST);
 8696   effect(TEMP_DEF res, KILL cr);
 8697   format %{
 8698     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8699   %}
 8700   ins_encode %{
 8701     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8702                Assembler::word, /*acquire*/ true, /*release*/ true,
 8703                /*weak*/ false, $res$$Register);
 8704   %}
 8705   ins_pipe(pipe_slow);
 8706 %}
 8707 
 8708 // This pattern is generated automatically from cas.m4.
 8709 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8710 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8711   predicate(needs_acquiring_load_exclusive(n));
 8712   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8713   ins_cost(VOLATILE_REF_COST);
 8714   effect(TEMP_DEF res, KILL cr);
 8715   format %{
 8716     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8717   %}
 8718   ins_encode %{
 8719     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8720                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8721                /*weak*/ false, $res$$Register);
 8722   %}
 8723   ins_pipe(pipe_slow);
 8724 %}
 8725 
 8726 // This pattern is generated automatically from cas.m4.
 8727 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8728 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8729   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8730   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8731   ins_cost(VOLATILE_REF_COST);
 8732   effect(TEMP_DEF res, KILL cr);
 8733   format %{
 8734     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8735   %}
 8736   ins_encode %{
 8737     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8738                Assembler::word, /*acquire*/ true, /*release*/ true,
 8739                /*weak*/ false, $res$$Register);
 8740   %}
 8741   ins_pipe(pipe_slow);
 8742 %}
 8743 
 8744 // This pattern is generated automatically from cas.m4.
 8745 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8746 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8747   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8748   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8749   ins_cost(VOLATILE_REF_COST);
 8750   effect(TEMP_DEF res, KILL cr);
 8751   format %{
 8752     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8753   %}
 8754   ins_encode %{
 8755     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8756                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8757                /*weak*/ false, $res$$Register);
 8758   %}
 8759   ins_pipe(pipe_slow);
 8760 %}
 8761 
 8762 // This pattern is generated automatically from cas.m4.
 8763 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8764 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8765   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8766   ins_cost(2 * VOLATILE_REF_COST);
 8767   effect(KILL cr);
 8768   format %{
 8769     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8770     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8771   %}
 8772   ins_encode %{
 8773     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8774                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8775                /*weak*/ true, noreg);
 8776     __ csetw($res$$Register, Assembler::EQ);
 8777   %}
 8778   ins_pipe(pipe_slow);
 8779 %}
 8780 
 8781 // This pattern is generated automatically from cas.m4.
 8782 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8783 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8784   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 8785   ins_cost(2 * VOLATILE_REF_COST);
 8786   effect(KILL cr);
 8787   format %{
 8788     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8789     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8790   %}
 8791   ins_encode %{
 8792     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8793                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8794                /*weak*/ true, noreg);
 8795     __ csetw($res$$Register, Assembler::EQ);
 8796   %}
 8797   ins_pipe(pipe_slow);
 8798 %}
 8799 
 8800 // This pattern is generated automatically from cas.m4.
 8801 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8802 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8803   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 8804   ins_cost(2 * VOLATILE_REF_COST);
 8805   effect(KILL cr);
 8806   format %{
 8807     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8808     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8809   %}
 8810   ins_encode %{
 8811     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8812                Assembler::word, /*acquire*/ false, /*release*/ true,
 8813                /*weak*/ true, noreg);
 8814     __ csetw($res$$Register, Assembler::EQ);
 8815   %}
 8816   ins_pipe(pipe_slow);
 8817 %}
 8818 
 8819 // This pattern is generated automatically from cas.m4.
 8820 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8821 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8822   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 8823   ins_cost(2 * VOLATILE_REF_COST);
 8824   effect(KILL cr);
 8825   format %{
 8826     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8827     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8828   %}
 8829   ins_encode %{
 8830     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8831                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8832                /*weak*/ true, noreg);
 8833     __ csetw($res$$Register, Assembler::EQ);
 8834   %}
 8835   ins_pipe(pipe_slow);
 8836 %}
 8837 
 8838 // This pattern is generated automatically from cas.m4.
 8839 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8840 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8841   predicate(n->as_LoadStore()->barrier_data() == 0);
 8842   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 8843   ins_cost(2 * VOLATILE_REF_COST);
 8844   effect(KILL cr);
 8845   format %{
 8846     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8847     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8848   %}
 8849   ins_encode %{
 8850     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8851                Assembler::word, /*acquire*/ false, /*release*/ true,
 8852                /*weak*/ true, noreg);
 8853     __ csetw($res$$Register, Assembler::EQ);
 8854   %}
 8855   ins_pipe(pipe_slow);
 8856 %}
 8857 
 8858 // This pattern is generated automatically from cas.m4.
 8859 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8860 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8861   predicate(n->as_LoadStore()->barrier_data() == 0);
 8862   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 8863   ins_cost(2 * VOLATILE_REF_COST);
 8864   effect(KILL cr);
 8865   format %{
 8866     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8867     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8868   %}
 8869   ins_encode %{
 8870     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8871                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8872                /*weak*/ true, noreg);
 8873     __ csetw($res$$Register, Assembler::EQ);
 8874   %}
 8875   ins_pipe(pipe_slow);
 8876 %}
 8877 
 8878 // This pattern is generated automatically from cas.m4.
 8879 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8880 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8881   predicate(needs_acquiring_load_exclusive(n));
 8882   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8883   ins_cost(VOLATILE_REF_COST);
 8884   effect(KILL cr);
 8885   format %{
 8886     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8887     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8888   %}
 8889   ins_encode %{
 8890     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8891                Assembler::byte, /*acquire*/ true, /*release*/ true,
 8892                /*weak*/ true, noreg);
 8893     __ csetw($res$$Register, Assembler::EQ);
 8894   %}
 8895   ins_pipe(pipe_slow);
 8896 %}
 8897 
 8898 // This pattern is generated automatically from cas.m4.
 8899 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8900 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8901   predicate(needs_acquiring_load_exclusive(n));
 8902   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 8903   ins_cost(VOLATILE_REF_COST);
 8904   effect(KILL cr);
 8905   format %{
 8906     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8907     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8908   %}
 8909   ins_encode %{
 8910     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8911                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 8912                /*weak*/ true, noreg);
 8913     __ csetw($res$$Register, Assembler::EQ);
 8914   %}
 8915   ins_pipe(pipe_slow);
 8916 %}
 8917 
 8918 // This pattern is generated automatically from cas.m4.
 8919 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8920 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8921   predicate(needs_acquiring_load_exclusive(n));
 8922   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 8923   ins_cost(VOLATILE_REF_COST);
 8924   effect(KILL cr);
 8925   format %{
 8926     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8927     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8928   %}
 8929   ins_encode %{
 8930     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8931                Assembler::word, /*acquire*/ true, /*release*/ true,
 8932                /*weak*/ true, noreg);
 8933     __ csetw($res$$Register, Assembler::EQ);
 8934   %}
 8935   ins_pipe(pipe_slow);
 8936 %}
 8937 
 8938 // This pattern is generated automatically from cas.m4.
 8939 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8940 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8941   predicate(needs_acquiring_load_exclusive(n));
 8942   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 8943   ins_cost(VOLATILE_REF_COST);
 8944   effect(KILL cr);
 8945   format %{
 8946     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8947     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8948   %}
 8949   ins_encode %{
 8950     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8951                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8952                /*weak*/ true, noreg);
 8953     __ csetw($res$$Register, Assembler::EQ);
 8954   %}
 8955   ins_pipe(pipe_slow);
 8956 %}
 8957 
 8958 // This pattern is generated automatically from cas.m4.
 8959 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8960 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8961   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8962   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 8963   ins_cost(VOLATILE_REF_COST);
 8964   effect(KILL cr);
 8965   format %{
 8966     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8967     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8968   %}
 8969   ins_encode %{
 8970     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8971                Assembler::word, /*acquire*/ true, /*release*/ true,
 8972                /*weak*/ true, noreg);
 8973     __ csetw($res$$Register, Assembler::EQ);
 8974   %}
 8975   ins_pipe(pipe_slow);
 8976 %}
 8977 
 8978 // This pattern is generated automatically from cas.m4.
 8979 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8980 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8981   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8982   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 8983   ins_cost(VOLATILE_REF_COST);
 8984   effect(KILL cr);
 8985   format %{
 8986     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8987     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8988   %}
 8989   ins_encode %{
 8990     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8991                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8992                /*weak*/ true, noreg);
 8993     __ csetw($res$$Register, Assembler::EQ);
 8994   %}
 8995   ins_pipe(pipe_slow);
 8996 %}
 8997 
 8998 // END This section of the file is automatically generated. Do not edit --------------
 8999 // ---------------------------------------------------------------------
 9000 
 9001 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9002   match(Set prev (GetAndSetI mem newv));
 9003   ins_cost(2 * VOLATILE_REF_COST);
 9004   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9005   ins_encode %{
 9006     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9007   %}
 9008   ins_pipe(pipe_serial);
 9009 %}
 9010 
 9011 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9012   match(Set prev (GetAndSetL mem newv));
 9013   ins_cost(2 * VOLATILE_REF_COST);
 9014   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9015   ins_encode %{
 9016     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9017   %}
 9018   ins_pipe(pipe_serial);
 9019 %}
 9020 
 9021 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9022   predicate(n->as_LoadStore()->barrier_data() == 0);
 9023   match(Set prev (GetAndSetN mem newv));
 9024   ins_cost(2 * VOLATILE_REF_COST);
 9025   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9026   ins_encode %{
 9027     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9028   %}
 9029   ins_pipe(pipe_serial);
 9030 %}
 9031 
 9032 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9033   predicate(n->as_LoadStore()->barrier_data() == 0);
 9034   match(Set prev (GetAndSetP mem newv));
 9035   ins_cost(2 * VOLATILE_REF_COST);
 9036   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9037   ins_encode %{
 9038     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9039   %}
 9040   ins_pipe(pipe_serial);
 9041 %}
 9042 
 9043 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9044   predicate(needs_acquiring_load_exclusive(n));
 9045   match(Set prev (GetAndSetI mem newv));
 9046   ins_cost(VOLATILE_REF_COST);
 9047   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9048   ins_encode %{
 9049     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9050   %}
 9051   ins_pipe(pipe_serial);
 9052 %}
 9053 
 9054 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9055   predicate(needs_acquiring_load_exclusive(n));
 9056   match(Set prev (GetAndSetL mem newv));
 9057   ins_cost(VOLATILE_REF_COST);
 9058   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9059   ins_encode %{
 9060     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9061   %}
 9062   ins_pipe(pipe_serial);
 9063 %}
 9064 
 9065 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9066   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 9067   match(Set prev (GetAndSetN mem newv));
 9068   ins_cost(VOLATILE_REF_COST);
 9069   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9070   ins_encode %{
 9071     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9072   %}
 9073   ins_pipe(pipe_serial);
 9074 %}
 9075 
 9076 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9077   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9078   match(Set prev (GetAndSetP mem newv));
 9079   ins_cost(VOLATILE_REF_COST);
 9080   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9081   ins_encode %{
 9082     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9083   %}
 9084   ins_pipe(pipe_serial);
 9085 %}
 9086 
 9087 
 9088 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9089   match(Set newval (GetAndAddL mem incr));
 9090   ins_cost(2 * VOLATILE_REF_COST + 1);
 9091   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9092   ins_encode %{
 9093     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9094   %}
 9095   ins_pipe(pipe_serial);
 9096 %}
 9097 
 9098 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9099   predicate(n->as_LoadStore()->result_not_used());
 9100   match(Set dummy (GetAndAddL mem incr));
 9101   ins_cost(2 * VOLATILE_REF_COST);
 9102   format %{ "get_and_addL [$mem], $incr" %}
 9103   ins_encode %{
 9104     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9105   %}
 9106   ins_pipe(pipe_serial);
 9107 %}
 9108 
 9109 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9110   match(Set newval (GetAndAddL mem incr));
 9111   ins_cost(2 * VOLATILE_REF_COST + 1);
 9112   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9113   ins_encode %{
 9114     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9115   %}
 9116   ins_pipe(pipe_serial);
 9117 %}
 9118 
 9119 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9120   predicate(n->as_LoadStore()->result_not_used());
 9121   match(Set dummy (GetAndAddL mem incr));
 9122   ins_cost(2 * VOLATILE_REF_COST);
 9123   format %{ "get_and_addL [$mem], $incr" %}
 9124   ins_encode %{
 9125     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9126   %}
 9127   ins_pipe(pipe_serial);
 9128 %}
 9129 
 9130 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9131   match(Set newval (GetAndAddI mem incr));
 9132   ins_cost(2 * VOLATILE_REF_COST + 1);
 9133   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9134   ins_encode %{
 9135     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9136   %}
 9137   ins_pipe(pipe_serial);
 9138 %}
 9139 
 9140 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9141   predicate(n->as_LoadStore()->result_not_used());
 9142   match(Set dummy (GetAndAddI mem incr));
 9143   ins_cost(2 * VOLATILE_REF_COST);
 9144   format %{ "get_and_addI [$mem], $incr" %}
 9145   ins_encode %{
 9146     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9147   %}
 9148   ins_pipe(pipe_serial);
 9149 %}
 9150 
 9151 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9152   match(Set newval (GetAndAddI mem incr));
 9153   ins_cost(2 * VOLATILE_REF_COST + 1);
 9154   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9155   ins_encode %{
 9156     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9157   %}
 9158   ins_pipe(pipe_serial);
 9159 %}
 9160 
 9161 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9162   predicate(n->as_LoadStore()->result_not_used());
 9163   match(Set dummy (GetAndAddI mem incr));
 9164   ins_cost(2 * VOLATILE_REF_COST);
 9165   format %{ "get_and_addI [$mem], $incr" %}
 9166   ins_encode %{
 9167     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9168   %}
 9169   ins_pipe(pipe_serial);
 9170 %}
 9171 
 9172 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9173   predicate(needs_acquiring_load_exclusive(n));
 9174   match(Set newval (GetAndAddL mem incr));
 9175   ins_cost(VOLATILE_REF_COST + 1);
 9176   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9177   ins_encode %{
 9178     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9179   %}
 9180   ins_pipe(pipe_serial);
 9181 %}
 9182 
 9183 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9184   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9185   match(Set dummy (GetAndAddL mem incr));
 9186   ins_cost(VOLATILE_REF_COST);
 9187   format %{ "get_and_addL_acq [$mem], $incr" %}
 9188   ins_encode %{
 9189     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9190   %}
 9191   ins_pipe(pipe_serial);
 9192 %}
 9193 
 9194 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9195   predicate(needs_acquiring_load_exclusive(n));
 9196   match(Set newval (GetAndAddL mem incr));
 9197   ins_cost(VOLATILE_REF_COST + 1);
 9198   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9199   ins_encode %{
 9200     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9201   %}
 9202   ins_pipe(pipe_serial);
 9203 %}
 9204 
 9205 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9206   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9207   match(Set dummy (GetAndAddL mem incr));
 9208   ins_cost(VOLATILE_REF_COST);
 9209   format %{ "get_and_addL_acq [$mem], $incr" %}
 9210   ins_encode %{
 9211     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9212   %}
 9213   ins_pipe(pipe_serial);
 9214 %}
 9215 
 9216 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9217   predicate(needs_acquiring_load_exclusive(n));
 9218   match(Set newval (GetAndAddI mem incr));
 9219   ins_cost(VOLATILE_REF_COST + 1);
 9220   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9221   ins_encode %{
 9222     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9223   %}
 9224   ins_pipe(pipe_serial);
 9225 %}
 9226 
 9227 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9228   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9229   match(Set dummy (GetAndAddI mem incr));
 9230   ins_cost(VOLATILE_REF_COST);
 9231   format %{ "get_and_addI_acq [$mem], $incr" %}
 9232   ins_encode %{
 9233     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9234   %}
 9235   ins_pipe(pipe_serial);
 9236 %}
 9237 
 9238 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9239   predicate(needs_acquiring_load_exclusive(n));
 9240   match(Set newval (GetAndAddI mem incr));
 9241   ins_cost(VOLATILE_REF_COST + 1);
 9242   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9243   ins_encode %{
 9244     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9245   %}
 9246   ins_pipe(pipe_serial);
 9247 %}
 9248 
 9249 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9250   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9251   match(Set dummy (GetAndAddI mem incr));
 9252   ins_cost(VOLATILE_REF_COST);
 9253   format %{ "get_and_addI_acq [$mem], $incr" %}
 9254   ins_encode %{
 9255     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9256   %}
 9257   ins_pipe(pipe_serial);
 9258 %}
 9259 
 9260 // Manifest a CmpU result in an integer register.
 9261 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9262 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 9263 %{
 9264   match(Set dst (CmpU3 src1 src2));
 9265   effect(KILL flags);
 9266 
 9267   ins_cost(INSN_COST * 3);
 9268   format %{
 9269       "cmpw $src1, $src2\n\t"
 9270       "csetw $dst, ne\n\t"
 9271       "cnegw $dst, lo\t# CmpU3(reg)"
 9272   %}
 9273   ins_encode %{
 9274     __ cmpw($src1$$Register, $src2$$Register);
 9275     __ csetw($dst$$Register, Assembler::NE);
 9276     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9277   %}
 9278 
 9279   ins_pipe(pipe_class_default);
 9280 %}
 9281 
 9282 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 9283 %{
 9284   match(Set dst (CmpU3 src1 src2));
 9285   effect(KILL flags);
 9286 
 9287   ins_cost(INSN_COST * 3);
 9288   format %{
 9289       "subsw zr, $src1, $src2\n\t"
 9290       "csetw $dst, ne\n\t"
 9291       "cnegw $dst, lo\t# CmpU3(imm)"
 9292   %}
 9293   ins_encode %{
 9294     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 9295     __ csetw($dst$$Register, Assembler::NE);
 9296     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9297   %}
 9298 
 9299   ins_pipe(pipe_class_default);
 9300 %}
 9301 
 9302 // Manifest a CmpUL result in an integer register.
 9303 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9304 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9305 %{
 9306   match(Set dst (CmpUL3 src1 src2));
 9307   effect(KILL flags);
 9308 
 9309   ins_cost(INSN_COST * 3);
 9310   format %{
 9311       "cmp $src1, $src2\n\t"
 9312       "csetw $dst, ne\n\t"
 9313       "cnegw $dst, lo\t# CmpUL3(reg)"
 9314   %}
 9315   ins_encode %{
 9316     __ cmp($src1$$Register, $src2$$Register);
 9317     __ csetw($dst$$Register, Assembler::NE);
 9318     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9319   %}
 9320 
 9321   ins_pipe(pipe_class_default);
 9322 %}
 9323 
 9324 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9325 %{
 9326   match(Set dst (CmpUL3 src1 src2));
 9327   effect(KILL flags);
 9328 
 9329   ins_cost(INSN_COST * 3);
 9330   format %{
 9331       "subs zr, $src1, $src2\n\t"
 9332       "csetw $dst, ne\n\t"
 9333       "cnegw $dst, lo\t# CmpUL3(imm)"
 9334   %}
 9335   ins_encode %{
 9336     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9337     __ csetw($dst$$Register, Assembler::NE);
 9338     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9339   %}
 9340 
 9341   ins_pipe(pipe_class_default);
 9342 %}
 9343 
 9344 // Manifest a CmpL result in an integer register.
 9345 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9346 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9347 %{
 9348   match(Set dst (CmpL3 src1 src2));
 9349   effect(KILL flags);
 9350 
 9351   ins_cost(INSN_COST * 3);
 9352   format %{
 9353       "cmp $src1, $src2\n\t"
 9354       "csetw $dst, ne\n\t"
 9355       "cnegw $dst, lt\t# CmpL3(reg)"
 9356   %}
 9357   ins_encode %{
 9358     __ cmp($src1$$Register, $src2$$Register);
 9359     __ csetw($dst$$Register, Assembler::NE);
 9360     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9361   %}
 9362 
 9363   ins_pipe(pipe_class_default);
 9364 %}
 9365 
 9366 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9367 %{
 9368   match(Set dst (CmpL3 src1 src2));
 9369   effect(KILL flags);
 9370 
 9371   ins_cost(INSN_COST * 3);
 9372   format %{
 9373       "subs zr, $src1, $src2\n\t"
 9374       "csetw $dst, ne\n\t"
 9375       "cnegw $dst, lt\t# CmpL3(imm)"
 9376   %}
 9377   ins_encode %{
 9378     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9379     __ csetw($dst$$Register, Assembler::NE);
 9380     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9381   %}
 9382 
 9383   ins_pipe(pipe_class_default);
 9384 %}
 9385 
 9386 // ============================================================================
 9387 // Conditional Move Instructions
 9388 
 9389 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9390 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9391 // define an op class which merged both inputs and use it to type the
 9392 // argument to a single rule. unfortunatelyt his fails because the
 9393 // opclass does not live up to the COND_INTER interface of its
 9394 // component operands. When the generic code tries to negate the
 9395 // operand it ends up running the generci Machoper::negate method
 9396 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9397 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9398 
 9399 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9400   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9401 
 9402   ins_cost(INSN_COST * 2);
 9403   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9404 
 9405   ins_encode %{
 9406     __ cselw(as_Register($dst$$reg),
 9407              as_Register($src2$$reg),
 9408              as_Register($src1$$reg),
 9409              (Assembler::Condition)$cmp$$cmpcode);
 9410   %}
 9411 
 9412   ins_pipe(icond_reg_reg);
 9413 %}
 9414 
 9415 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9416   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9417 
 9418   ins_cost(INSN_COST * 2);
 9419   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9420 
 9421   ins_encode %{
 9422     __ cselw(as_Register($dst$$reg),
 9423              as_Register($src2$$reg),
 9424              as_Register($src1$$reg),
 9425              (Assembler::Condition)$cmp$$cmpcode);
 9426   %}
 9427 
 9428   ins_pipe(icond_reg_reg);
 9429 %}
 9430 
 9431 // special cases where one arg is zero
 9432 
 9433 // n.b. this is selected in preference to the rule above because it
 9434 // avoids loading constant 0 into a source register
 9435 
 9436 // TODO
 9437 // we ought only to be able to cull one of these variants as the ideal
 9438 // transforms ought always to order the zero consistently (to left/right?)
 9439 
 9440 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9441   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9442 
 9443   ins_cost(INSN_COST * 2);
 9444   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9445 
 9446   ins_encode %{
 9447     __ cselw(as_Register($dst$$reg),
 9448              as_Register($src$$reg),
 9449              zr,
 9450              (Assembler::Condition)$cmp$$cmpcode);
 9451   %}
 9452 
 9453   ins_pipe(icond_reg);
 9454 %}
 9455 
 9456 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9457   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9458 
 9459   ins_cost(INSN_COST * 2);
 9460   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9461 
 9462   ins_encode %{
 9463     __ cselw(as_Register($dst$$reg),
 9464              as_Register($src$$reg),
 9465              zr,
 9466              (Assembler::Condition)$cmp$$cmpcode);
 9467   %}
 9468 
 9469   ins_pipe(icond_reg);
 9470 %}
 9471 
 9472 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9473   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9474 
 9475   ins_cost(INSN_COST * 2);
 9476   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9477 
 9478   ins_encode %{
 9479     __ cselw(as_Register($dst$$reg),
 9480              zr,
 9481              as_Register($src$$reg),
 9482              (Assembler::Condition)$cmp$$cmpcode);
 9483   %}
 9484 
 9485   ins_pipe(icond_reg);
 9486 %}
 9487 
 9488 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9489   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9490 
 9491   ins_cost(INSN_COST * 2);
 9492   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9493 
 9494   ins_encode %{
 9495     __ cselw(as_Register($dst$$reg),
 9496              zr,
 9497              as_Register($src$$reg),
 9498              (Assembler::Condition)$cmp$$cmpcode);
 9499   %}
 9500 
 9501   ins_pipe(icond_reg);
 9502 %}
 9503 
 9504 // special case for creating a boolean 0 or 1
 9505 
 9506 // n.b. this is selected in preference to the rule above because it
 9507 // avoids loading constants 0 and 1 into a source register
 9508 
 9509 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9510   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9511 
 9512   ins_cost(INSN_COST * 2);
 9513   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9514 
 9515   ins_encode %{
 9516     // equivalently
 9517     // cset(as_Register($dst$$reg),
 9518     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9519     __ csincw(as_Register($dst$$reg),
 9520              zr,
 9521              zr,
 9522              (Assembler::Condition)$cmp$$cmpcode);
 9523   %}
 9524 
 9525   ins_pipe(icond_none);
 9526 %}
 9527 
 9528 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9529   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9530 
 9531   ins_cost(INSN_COST * 2);
 9532   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9533 
 9534   ins_encode %{
 9535     // equivalently
 9536     // cset(as_Register($dst$$reg),
 9537     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9538     __ csincw(as_Register($dst$$reg),
 9539              zr,
 9540              zr,
 9541              (Assembler::Condition)$cmp$$cmpcode);
 9542   %}
 9543 
 9544   ins_pipe(icond_none);
 9545 %}
 9546 
 9547 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9548   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9549 
 9550   ins_cost(INSN_COST * 2);
 9551   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9552 
 9553   ins_encode %{
 9554     __ csel(as_Register($dst$$reg),
 9555             as_Register($src2$$reg),
 9556             as_Register($src1$$reg),
 9557             (Assembler::Condition)$cmp$$cmpcode);
 9558   %}
 9559 
 9560   ins_pipe(icond_reg_reg);
 9561 %}
 9562 
 9563 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9564   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9565 
 9566   ins_cost(INSN_COST * 2);
 9567   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9568 
 9569   ins_encode %{
 9570     __ csel(as_Register($dst$$reg),
 9571             as_Register($src2$$reg),
 9572             as_Register($src1$$reg),
 9573             (Assembler::Condition)$cmp$$cmpcode);
 9574   %}
 9575 
 9576   ins_pipe(icond_reg_reg);
 9577 %}
 9578 
 9579 // special cases where one arg is zero
 9580 
 9581 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9582   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9583 
 9584   ins_cost(INSN_COST * 2);
 9585   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 9586 
 9587   ins_encode %{
 9588     __ csel(as_Register($dst$$reg),
 9589             zr,
 9590             as_Register($src$$reg),
 9591             (Assembler::Condition)$cmp$$cmpcode);
 9592   %}
 9593 
 9594   ins_pipe(icond_reg);
 9595 %}
 9596 
 9597 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9598   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9599 
 9600   ins_cost(INSN_COST * 2);
 9601   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 9602 
 9603   ins_encode %{
 9604     __ csel(as_Register($dst$$reg),
 9605             zr,
 9606             as_Register($src$$reg),
 9607             (Assembler::Condition)$cmp$$cmpcode);
 9608   %}
 9609 
 9610   ins_pipe(icond_reg);
 9611 %}
 9612 
 9613 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9614   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9615 
 9616   ins_cost(INSN_COST * 2);
 9617   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 9618 
 9619   ins_encode %{
 9620     __ csel(as_Register($dst$$reg),
 9621             as_Register($src$$reg),
 9622             zr,
 9623             (Assembler::Condition)$cmp$$cmpcode);
 9624   %}
 9625 
 9626   ins_pipe(icond_reg);
 9627 %}
 9628 
 9629 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9630   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9631 
 9632   ins_cost(INSN_COST * 2);
 9633   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 9634 
 9635   ins_encode %{
 9636     __ csel(as_Register($dst$$reg),
 9637             as_Register($src$$reg),
 9638             zr,
 9639             (Assembler::Condition)$cmp$$cmpcode);
 9640   %}
 9641 
 9642   ins_pipe(icond_reg);
 9643 %}
 9644 
 9645 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9646   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9647 
 9648   ins_cost(INSN_COST * 2);
 9649   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
 9650 
 9651   ins_encode %{
 9652     __ csel(as_Register($dst$$reg),
 9653             as_Register($src2$$reg),
 9654             as_Register($src1$$reg),
 9655             (Assembler::Condition)$cmp$$cmpcode);
 9656   %}
 9657 
 9658   ins_pipe(icond_reg_reg);
 9659 %}
 9660 
 9661 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9662   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9663 
 9664   ins_cost(INSN_COST * 2);
 9665   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
 9666 
 9667   ins_encode %{
 9668     __ csel(as_Register($dst$$reg),
 9669             as_Register($src2$$reg),
 9670             as_Register($src1$$reg),
 9671             (Assembler::Condition)$cmp$$cmpcode);
 9672   %}
 9673 
 9674   ins_pipe(icond_reg_reg);
 9675 %}
 9676 
 9677 // special cases where one arg is zero
 9678 
 9679 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9680   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9681 
 9682   ins_cost(INSN_COST * 2);
 9683   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
 9684 
 9685   ins_encode %{
 9686     __ csel(as_Register($dst$$reg),
 9687             zr,
 9688             as_Register($src$$reg),
 9689             (Assembler::Condition)$cmp$$cmpcode);
 9690   %}
 9691 
 9692   ins_pipe(icond_reg);
 9693 %}
 9694 
 9695 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9696   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9697 
 9698   ins_cost(INSN_COST * 2);
 9699   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
 9700 
 9701   ins_encode %{
 9702     __ csel(as_Register($dst$$reg),
 9703             zr,
 9704             as_Register($src$$reg),
 9705             (Assembler::Condition)$cmp$$cmpcode);
 9706   %}
 9707 
 9708   ins_pipe(icond_reg);
 9709 %}
 9710 
 9711 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9712   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9713 
 9714   ins_cost(INSN_COST * 2);
 9715   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
 9716 
 9717   ins_encode %{
 9718     __ csel(as_Register($dst$$reg),
 9719             as_Register($src$$reg),
 9720             zr,
 9721             (Assembler::Condition)$cmp$$cmpcode);
 9722   %}
 9723 
 9724   ins_pipe(icond_reg);
 9725 %}
 9726 
 9727 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9728   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9729 
 9730   ins_cost(INSN_COST * 2);
 9731   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
 9732 
 9733   ins_encode %{
 9734     __ csel(as_Register($dst$$reg),
 9735             as_Register($src$$reg),
 9736             zr,
 9737             (Assembler::Condition)$cmp$$cmpcode);
 9738   %}
 9739 
 9740   ins_pipe(icond_reg);
 9741 %}
 9742 
 9743 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9744   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9745 
 9746   ins_cost(INSN_COST * 2);
 9747   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9748 
 9749   ins_encode %{
 9750     __ cselw(as_Register($dst$$reg),
 9751              as_Register($src2$$reg),
 9752              as_Register($src1$$reg),
 9753              (Assembler::Condition)$cmp$$cmpcode);
 9754   %}
 9755 
 9756   ins_pipe(icond_reg_reg);
 9757 %}
 9758 
 9759 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9760   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9761 
 9762   ins_cost(INSN_COST * 2);
 9763   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9764 
 9765   ins_encode %{
 9766     __ cselw(as_Register($dst$$reg),
 9767              as_Register($src2$$reg),
 9768              as_Register($src1$$reg),
 9769              (Assembler::Condition)$cmp$$cmpcode);
 9770   %}
 9771 
 9772   ins_pipe(icond_reg_reg);
 9773 %}
 9774 
 9775 // special cases where one arg is zero
 9776 
 9777 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9778   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9779 
 9780   ins_cost(INSN_COST * 2);
 9781   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
 9782 
 9783   ins_encode %{
 9784     __ cselw(as_Register($dst$$reg),
 9785              zr,
 9786              as_Register($src$$reg),
 9787              (Assembler::Condition)$cmp$$cmpcode);
 9788   %}
 9789 
 9790   ins_pipe(icond_reg);
 9791 %}
 9792 
 9793 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9794   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9795 
 9796   ins_cost(INSN_COST * 2);
 9797   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
 9798 
 9799   ins_encode %{
 9800     __ cselw(as_Register($dst$$reg),
 9801              zr,
 9802              as_Register($src$$reg),
 9803              (Assembler::Condition)$cmp$$cmpcode);
 9804   %}
 9805 
 9806   ins_pipe(icond_reg);
 9807 %}
 9808 
 9809 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9810   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9811 
 9812   ins_cost(INSN_COST * 2);
 9813   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
 9814 
 9815   ins_encode %{
 9816     __ cselw(as_Register($dst$$reg),
 9817              as_Register($src$$reg),
 9818              zr,
 9819              (Assembler::Condition)$cmp$$cmpcode);
 9820   %}
 9821 
 9822   ins_pipe(icond_reg);
 9823 %}
 9824 
 9825 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9826   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9827 
 9828   ins_cost(INSN_COST * 2);
 9829   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
 9830 
 9831   ins_encode %{
 9832     __ cselw(as_Register($dst$$reg),
 9833              as_Register($src$$reg),
 9834              zr,
 9835              (Assembler::Condition)$cmp$$cmpcode);
 9836   %}
 9837 
 9838   ins_pipe(icond_reg);
 9839 %}
 9840 
 9841 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
 9842 %{
 9843   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9844 
 9845   ins_cost(INSN_COST * 3);
 9846 
 9847   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9848   ins_encode %{
 9849     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9850     __ fcsels(as_FloatRegister($dst$$reg),
 9851               as_FloatRegister($src2$$reg),
 9852               as_FloatRegister($src1$$reg),
 9853               cond);
 9854   %}
 9855 
 9856   ins_pipe(fp_cond_reg_reg_s);
 9857 %}
 9858 
 9859 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
 9860 %{
 9861   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9862 
 9863   ins_cost(INSN_COST * 3);
 9864 
 9865   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 9866   ins_encode %{
 9867     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9868     __ fcsels(as_FloatRegister($dst$$reg),
 9869               as_FloatRegister($src2$$reg),
 9870               as_FloatRegister($src1$$reg),
 9871               cond);
 9872   %}
 9873 
 9874   ins_pipe(fp_cond_reg_reg_s);
 9875 %}
 9876 
 9877 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
 9878 %{
 9879   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 9880 
 9881   ins_cost(INSN_COST * 3);
 9882 
 9883   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9884   ins_encode %{
 9885     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9886     __ fcseld(as_FloatRegister($dst$$reg),
 9887               as_FloatRegister($src2$$reg),
 9888               as_FloatRegister($src1$$reg),
 9889               cond);
 9890   %}
 9891 
 9892   ins_pipe(fp_cond_reg_reg_d);
 9893 %}
 9894 
 9895 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
 9896 %{
 9897   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 9898 
 9899   ins_cost(INSN_COST * 3);
 9900 
 9901   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 9902   ins_encode %{
 9903     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9904     __ fcseld(as_FloatRegister($dst$$reg),
 9905               as_FloatRegister($src2$$reg),
 9906               as_FloatRegister($src1$$reg),
 9907               cond);
 9908   %}
 9909 
 9910   ins_pipe(fp_cond_reg_reg_d);
 9911 %}
 9912 
 9913 // ============================================================================
 9914 // Arithmetic Instructions
 9915 //
 9916 
 9917 // Integer Addition
 9918 
 9919 // TODO
 9920 // these currently employ operations which do not set CR and hence are
 9921 // not flagged as killing CR but we would like to isolate the cases
 9922 // where we want to set flags from those where we don't. need to work
 9923 // out how to do that.
 9924 
 9925 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9926   match(Set dst (AddI src1 src2));
 9927 
 9928   ins_cost(INSN_COST);
 9929   format %{ "addw  $dst, $src1, $src2" %}
 9930 
 9931   ins_encode %{
 9932     __ addw(as_Register($dst$$reg),
 9933             as_Register($src1$$reg),
 9934             as_Register($src2$$reg));
 9935   %}
 9936 
 9937   ins_pipe(ialu_reg_reg);
 9938 %}
 9939 
 9940 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
 9941   match(Set dst (AddI src1 src2));
 9942 
 9943   ins_cost(INSN_COST);
 9944   format %{ "addw $dst, $src1, $src2" %}
 9945 
 9946   // use opcode to indicate that this is an add not a sub
 9947   opcode(0x0);
 9948 
 9949   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9950 
 9951   ins_pipe(ialu_reg_imm);
 9952 %}
 9953 
 9954 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
 9955   match(Set dst (AddI (ConvL2I src1) src2));
 9956 
 9957   ins_cost(INSN_COST);
 9958   format %{ "addw $dst, $src1, $src2" %}
 9959 
 9960   // use opcode to indicate that this is an add not a sub
 9961   opcode(0x0);
 9962 
 9963   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9964 
 9965   ins_pipe(ialu_reg_imm);
 9966 %}
 9967 
 9968 // Pointer Addition
 9969 instruct addP_reg_reg(iRegPNoSp dst, iRegPorL2P src1, iRegL src2) %{
 9970   match(Set dst (AddP src1 src2));
 9971 
 9972   ins_cost(INSN_COST);
 9973   format %{ "add $dst, $src1, $src2\t# ptr" %}
 9974 
 9975   ins_encode %{
 9976     __ add(as_Register($dst$$reg),
 9977            as_Register($src1$$reg),
 9978            as_Register($src2$$reg));
 9979   %}
 9980 
 9981   ins_pipe(ialu_reg_reg);
 9982 %}
 9983 
 9984 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2) %{
 9985   match(Set dst (AddP src1 (ConvI2L src2)));
 9986 
 9987   ins_cost(1.9 * INSN_COST);
 9988   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
 9989 
 9990   ins_encode %{
 9991     __ add(as_Register($dst$$reg),
 9992            as_Register($src1$$reg),
 9993            as_Register($src2$$reg), ext::sxtw);
 9994   %}
 9995 
 9996   ins_pipe(ialu_reg_reg);
 9997 %}
 9998 
 9999 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegPorL2P src1, iRegL src2, immIScale scale) %{
10000   match(Set dst (AddP src1 (LShiftL src2 scale)));
10001 
10002   ins_cost(1.9 * INSN_COST);
10003   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10004 
10005   ins_encode %{
10006     __ lea(as_Register($dst$$reg),
10007            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10008                    Address::lsl($scale$$constant)));
10009   %}
10010 
10011   ins_pipe(ialu_reg_reg_shift);
10012 %}
10013 
10014 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2, immIScale scale) %{
10015   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10016 
10017   ins_cost(1.9 * INSN_COST);
10018   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10019 
10020   ins_encode %{
10021     __ lea(as_Register($dst$$reg),
10022            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10023                    Address::sxtw($scale$$constant)));
10024   %}
10025 
10026   ins_pipe(ialu_reg_reg_shift);
10027 %}
10028 
10029 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10030   match(Set dst (LShiftL (ConvI2L src) scale));
10031 
10032   ins_cost(INSN_COST);
10033   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10034 
10035   ins_encode %{
10036     __ sbfiz(as_Register($dst$$reg),
10037           as_Register($src$$reg),
10038           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10039   %}
10040 
10041   ins_pipe(ialu_reg_shift);
10042 %}
10043 
10044 // Pointer Immediate Addition
10045 // n.b. this needs to be more expensive than using an indirect memory
10046 // operand
10047 instruct addP_reg_imm(iRegPNoSp dst, iRegPorL2P src1, immLAddSub src2) %{
10048   match(Set dst (AddP src1 src2));
10049 
10050   ins_cost(INSN_COST);
10051   format %{ "add $dst, $src1, $src2\t# ptr" %}
10052 
10053   // use opcode to indicate that this is an add not a sub
10054   opcode(0x0);
10055 
10056   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10057 
10058   ins_pipe(ialu_reg_imm);
10059 %}
10060 
10061 // Long Addition
10062 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10063 
10064   match(Set dst (AddL src1 src2));
10065 
10066   ins_cost(INSN_COST);
10067   format %{ "add  $dst, $src1, $src2" %}
10068 
10069   ins_encode %{
10070     __ add(as_Register($dst$$reg),
10071            as_Register($src1$$reg),
10072            as_Register($src2$$reg));
10073   %}
10074 
10075   ins_pipe(ialu_reg_reg);
10076 %}
10077 
10078 // No constant pool entries requiredLong Immediate Addition.
10079 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10080   match(Set dst (AddL src1 src2));
10081 
10082   ins_cost(INSN_COST);
10083   format %{ "add $dst, $src1, $src2" %}
10084 
10085   // use opcode to indicate that this is an add not a sub
10086   opcode(0x0);
10087 
10088   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10089 
10090   ins_pipe(ialu_reg_imm);
10091 %}
10092 
10093 // Integer Subtraction
10094 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10095   match(Set dst (SubI src1 src2));
10096 
10097   ins_cost(INSN_COST);
10098   format %{ "subw  $dst, $src1, $src2" %}
10099 
10100   ins_encode %{
10101     __ subw(as_Register($dst$$reg),
10102             as_Register($src1$$reg),
10103             as_Register($src2$$reg));
10104   %}
10105 
10106   ins_pipe(ialu_reg_reg);
10107 %}
10108 
10109 // Immediate Subtraction
10110 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10111   match(Set dst (SubI src1 src2));
10112 
10113   ins_cost(INSN_COST);
10114   format %{ "subw $dst, $src1, $src2" %}
10115 
10116   // use opcode to indicate that this is a sub not an add
10117   opcode(0x1);
10118 
10119   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10120 
10121   ins_pipe(ialu_reg_imm);
10122 %}
10123 
10124 // Long Subtraction
10125 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10126 
10127   match(Set dst (SubL src1 src2));
10128 
10129   ins_cost(INSN_COST);
10130   format %{ "sub  $dst, $src1, $src2" %}
10131 
10132   ins_encode %{
10133     __ sub(as_Register($dst$$reg),
10134            as_Register($src1$$reg),
10135            as_Register($src2$$reg));
10136   %}
10137 
10138   ins_pipe(ialu_reg_reg);
10139 %}
10140 
10141 // No constant pool entries requiredLong Immediate Subtraction.
10142 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10143   match(Set dst (SubL src1 src2));
10144 
10145   ins_cost(INSN_COST);
10146   format %{ "sub$dst, $src1, $src2" %}
10147 
10148   // use opcode to indicate that this is a sub not an add
10149   opcode(0x1);
10150 
10151   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10152 
10153   ins_pipe(ialu_reg_imm);
10154 %}
10155 
10156 // Integer Negation (special case for sub)
10157 
10158 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10159   match(Set dst (SubI zero src));
10160 
10161   ins_cost(INSN_COST);
10162   format %{ "negw $dst, $src\t# int" %}
10163 
10164   ins_encode %{
10165     __ negw(as_Register($dst$$reg),
10166             as_Register($src$$reg));
10167   %}
10168 
10169   ins_pipe(ialu_reg);
10170 %}
10171 
10172 // Long Negation
10173 
10174 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10175   match(Set dst (SubL zero src));
10176 
10177   ins_cost(INSN_COST);
10178   format %{ "neg $dst, $src\t# long" %}
10179 
10180   ins_encode %{
10181     __ neg(as_Register($dst$$reg),
10182            as_Register($src$$reg));
10183   %}
10184 
10185   ins_pipe(ialu_reg);
10186 %}
10187 
10188 // Integer Multiply
10189 
10190 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10191   match(Set dst (MulI src1 src2));
10192 
10193   ins_cost(INSN_COST * 3);
10194   format %{ "mulw  $dst, $src1, $src2" %}
10195 
10196   ins_encode %{
10197     __ mulw(as_Register($dst$$reg),
10198             as_Register($src1$$reg),
10199             as_Register($src2$$reg));
10200   %}
10201 
10202   ins_pipe(imul_reg_reg);
10203 %}
10204 
10205 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10206   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10207 
10208   ins_cost(INSN_COST * 3);
10209   format %{ "smull  $dst, $src1, $src2" %}
10210 
10211   ins_encode %{
10212     __ smull(as_Register($dst$$reg),
10213              as_Register($src1$$reg),
10214              as_Register($src2$$reg));
10215   %}
10216 
10217   ins_pipe(imul_reg_reg);
10218 %}
10219 
10220 // Long Multiply
10221 
10222 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10223   match(Set dst (MulL src1 src2));
10224 
10225   ins_cost(INSN_COST * 5);
10226   format %{ "mul  $dst, $src1, $src2" %}
10227 
10228   ins_encode %{
10229     __ mul(as_Register($dst$$reg),
10230            as_Register($src1$$reg),
10231            as_Register($src2$$reg));
10232   %}
10233 
10234   ins_pipe(lmul_reg_reg);
10235 %}
10236 
10237 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10238 %{
10239   match(Set dst (MulHiL src1 src2));
10240 
10241   ins_cost(INSN_COST * 7);
10242   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
10243 
10244   ins_encode %{
10245     __ smulh(as_Register($dst$$reg),
10246              as_Register($src1$$reg),
10247              as_Register($src2$$reg));
10248   %}
10249 
10250   ins_pipe(lmul_reg_reg);
10251 %}
10252 
10253 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10254 %{
10255   match(Set dst (UMulHiL src1 src2));
10256 
10257   ins_cost(INSN_COST * 7);
10258   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
10259 
10260   ins_encode %{
10261     __ umulh(as_Register($dst$$reg),
10262              as_Register($src1$$reg),
10263              as_Register($src2$$reg));
10264   %}
10265 
10266   ins_pipe(lmul_reg_reg);
10267 %}
10268 
10269 // Combined Integer Multiply & Add/Sub
10270 
10271 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10272   match(Set dst (AddI src3 (MulI src1 src2)));
10273 
10274   ins_cost(INSN_COST * 3);
10275   format %{ "madd  $dst, $src1, $src2, $src3" %}
10276 
10277   ins_encode %{
10278     __ maddw(as_Register($dst$$reg),
10279              as_Register($src1$$reg),
10280              as_Register($src2$$reg),
10281              as_Register($src3$$reg));
10282   %}
10283 
10284   ins_pipe(imac_reg_reg);
10285 %}
10286 
10287 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10288   match(Set dst (SubI src3 (MulI src1 src2)));
10289 
10290   ins_cost(INSN_COST * 3);
10291   format %{ "msub  $dst, $src1, $src2, $src3" %}
10292 
10293   ins_encode %{
10294     __ msubw(as_Register($dst$$reg),
10295              as_Register($src1$$reg),
10296              as_Register($src2$$reg),
10297              as_Register($src3$$reg));
10298   %}
10299 
10300   ins_pipe(imac_reg_reg);
10301 %}
10302 
10303 // Combined Integer Multiply & Neg
10304 
10305 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10306   match(Set dst (MulI (SubI zero src1) src2));
10307 
10308   ins_cost(INSN_COST * 3);
10309   format %{ "mneg  $dst, $src1, $src2" %}
10310 
10311   ins_encode %{
10312     __ mnegw(as_Register($dst$$reg),
10313              as_Register($src1$$reg),
10314              as_Register($src2$$reg));
10315   %}
10316 
10317   ins_pipe(imac_reg_reg);
10318 %}
10319 
10320 // Combined Long Multiply & Add/Sub
10321 
10322 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10323   match(Set dst (AddL src3 (MulL src1 src2)));
10324 
10325   ins_cost(INSN_COST * 5);
10326   format %{ "madd  $dst, $src1, $src2, $src3" %}
10327 
10328   ins_encode %{
10329     __ madd(as_Register($dst$$reg),
10330             as_Register($src1$$reg),
10331             as_Register($src2$$reg),
10332             as_Register($src3$$reg));
10333   %}
10334 
10335   ins_pipe(lmac_reg_reg);
10336 %}
10337 
10338 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10339   match(Set dst (SubL src3 (MulL src1 src2)));
10340 
10341   ins_cost(INSN_COST * 5);
10342   format %{ "msub  $dst, $src1, $src2, $src3" %}
10343 
10344   ins_encode %{
10345     __ msub(as_Register($dst$$reg),
10346             as_Register($src1$$reg),
10347             as_Register($src2$$reg),
10348             as_Register($src3$$reg));
10349   %}
10350 
10351   ins_pipe(lmac_reg_reg);
10352 %}
10353 
10354 // Combined Long Multiply & Neg
10355 
10356 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10357   match(Set dst (MulL (SubL zero src1) src2));
10358 
10359   ins_cost(INSN_COST * 5);
10360   format %{ "mneg  $dst, $src1, $src2" %}
10361 
10362   ins_encode %{
10363     __ mneg(as_Register($dst$$reg),
10364             as_Register($src1$$reg),
10365             as_Register($src2$$reg));
10366   %}
10367 
10368   ins_pipe(lmac_reg_reg);
10369 %}
10370 
10371 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10372 
10373 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10374   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10375 
10376   ins_cost(INSN_COST * 3);
10377   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10378 
10379   ins_encode %{
10380     __ smaddl(as_Register($dst$$reg),
10381               as_Register($src1$$reg),
10382               as_Register($src2$$reg),
10383               as_Register($src3$$reg));
10384   %}
10385 
10386   ins_pipe(imac_reg_reg);
10387 %}
10388 
10389 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10390   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10391 
10392   ins_cost(INSN_COST * 3);
10393   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10394 
10395   ins_encode %{
10396     __ smsubl(as_Register($dst$$reg),
10397               as_Register($src1$$reg),
10398               as_Register($src2$$reg),
10399               as_Register($src3$$reg));
10400   %}
10401 
10402   ins_pipe(imac_reg_reg);
10403 %}
10404 
10405 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10406   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10407 
10408   ins_cost(INSN_COST * 3);
10409   format %{ "smnegl  $dst, $src1, $src2" %}
10410 
10411   ins_encode %{
10412     __ smnegl(as_Register($dst$$reg),
10413               as_Register($src1$$reg),
10414               as_Register($src2$$reg));
10415   %}
10416 
10417   ins_pipe(imac_reg_reg);
10418 %}
10419 
10420 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10421 
10422 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10423   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10424 
10425   ins_cost(INSN_COST * 5);
10426   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10427             "maddw $dst, $src3, $src4, rscratch1" %}
10428 
10429   ins_encode %{
10430     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10431     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10432 
10433   ins_pipe(imac_reg_reg);
10434 %}
10435 
10436 // Integer Divide
10437 
10438 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10439   match(Set dst (DivI src1 src2));
10440 
10441   ins_cost(INSN_COST * 19);
10442   format %{ "sdivw  $dst, $src1, $src2" %}
10443 
10444   ins_encode(aarch64_enc_divw(dst, src1, src2));
10445   ins_pipe(idiv_reg_reg);
10446 %}
10447 
10448 // Long Divide
10449 
10450 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10451   match(Set dst (DivL src1 src2));
10452 
10453   ins_cost(INSN_COST * 35);
10454   format %{ "sdiv   $dst, $src1, $src2" %}
10455 
10456   ins_encode(aarch64_enc_div(dst, src1, src2));
10457   ins_pipe(ldiv_reg_reg);
10458 %}
10459 
10460 // Integer Remainder
10461 
10462 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10463   match(Set dst (ModI src1 src2));
10464 
10465   ins_cost(INSN_COST * 22);
10466   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10467             "msubw  $dst, rscratch1, $src2, $src1" %}
10468 
10469   ins_encode(aarch64_enc_modw(dst, src1, src2));
10470   ins_pipe(idiv_reg_reg);
10471 %}
10472 
10473 // Long Remainder
10474 
10475 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10476   match(Set dst (ModL src1 src2));
10477 
10478   ins_cost(INSN_COST * 38);
10479   format %{ "sdiv   rscratch1, $src1, $src2\n"
10480             "msub   $dst, rscratch1, $src2, $src1" %}
10481 
10482   ins_encode(aarch64_enc_mod(dst, src1, src2));
10483   ins_pipe(ldiv_reg_reg);
10484 %}
10485 
10486 // Unsigned Integer Divide
10487 
10488 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10489   match(Set dst (UDivI src1 src2));
10490 
10491   ins_cost(INSN_COST * 19);
10492   format %{ "udivw  $dst, $src1, $src2" %}
10493 
10494   ins_encode %{
10495     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
10496   %}
10497 
10498   ins_pipe(idiv_reg_reg);
10499 %}
10500 
10501 //  Unsigned Long Divide
10502 
10503 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10504   match(Set dst (UDivL src1 src2));
10505 
10506   ins_cost(INSN_COST * 35);
10507   format %{ "udiv   $dst, $src1, $src2" %}
10508 
10509   ins_encode %{
10510     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
10511   %}
10512 
10513   ins_pipe(ldiv_reg_reg);
10514 %}
10515 
10516 // Unsigned Integer Remainder
10517 
10518 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10519   match(Set dst (UModI src1 src2));
10520 
10521   ins_cost(INSN_COST * 22);
10522   format %{ "udivw  rscratch1, $src1, $src2\n\t"
10523             "msubw  $dst, rscratch1, $src2, $src1" %}
10524 
10525   ins_encode %{
10526     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
10527     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10528   %}
10529 
10530   ins_pipe(idiv_reg_reg);
10531 %}
10532 
10533 // Unsigned Long Remainder
10534 
10535 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10536   match(Set dst (UModL src1 src2));
10537 
10538   ins_cost(INSN_COST * 38);
10539   format %{ "udiv   rscratch1, $src1, $src2\n"
10540             "msub   $dst, rscratch1, $src2, $src1" %}
10541 
10542   ins_encode %{
10543     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
10544     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10545   %}
10546 
10547   ins_pipe(ldiv_reg_reg);
10548 %}
10549 
10550 // Integer Shifts
10551 
10552 // Shift Left Register
10553 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10554   match(Set dst (LShiftI src1 src2));
10555 
10556   ins_cost(INSN_COST * 2);
10557   format %{ "lslvw  $dst, $src1, $src2" %}
10558 
10559   ins_encode %{
10560     __ lslvw(as_Register($dst$$reg),
10561              as_Register($src1$$reg),
10562              as_Register($src2$$reg));
10563   %}
10564 
10565   ins_pipe(ialu_reg_reg_vshift);
10566 %}
10567 
10568 // Shift Left Immediate
10569 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10570   match(Set dst (LShiftI src1 src2));
10571 
10572   ins_cost(INSN_COST);
10573   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10574 
10575   ins_encode %{
10576     __ lslw(as_Register($dst$$reg),
10577             as_Register($src1$$reg),
10578             $src2$$constant & 0x1f);
10579   %}
10580 
10581   ins_pipe(ialu_reg_shift);
10582 %}
10583 
10584 // Shift Right Logical Register
10585 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10586   match(Set dst (URShiftI src1 src2));
10587 
10588   ins_cost(INSN_COST * 2);
10589   format %{ "lsrvw  $dst, $src1, $src2" %}
10590 
10591   ins_encode %{
10592     __ lsrvw(as_Register($dst$$reg),
10593              as_Register($src1$$reg),
10594              as_Register($src2$$reg));
10595   %}
10596 
10597   ins_pipe(ialu_reg_reg_vshift);
10598 %}
10599 
10600 // Shift Right Logical Immediate
10601 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10602   match(Set dst (URShiftI src1 src2));
10603 
10604   ins_cost(INSN_COST);
10605   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10606 
10607   ins_encode %{
10608     __ lsrw(as_Register($dst$$reg),
10609             as_Register($src1$$reg),
10610             $src2$$constant & 0x1f);
10611   %}
10612 
10613   ins_pipe(ialu_reg_shift);
10614 %}
10615 
10616 // Shift Right Arithmetic Register
10617 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10618   match(Set dst (RShiftI src1 src2));
10619 
10620   ins_cost(INSN_COST * 2);
10621   format %{ "asrvw  $dst, $src1, $src2" %}
10622 
10623   ins_encode %{
10624     __ asrvw(as_Register($dst$$reg),
10625              as_Register($src1$$reg),
10626              as_Register($src2$$reg));
10627   %}
10628 
10629   ins_pipe(ialu_reg_reg_vshift);
10630 %}
10631 
10632 // Shift Right Arithmetic Immediate
10633 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10634   match(Set dst (RShiftI src1 src2));
10635 
10636   ins_cost(INSN_COST);
10637   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10638 
10639   ins_encode %{
10640     __ asrw(as_Register($dst$$reg),
10641             as_Register($src1$$reg),
10642             $src2$$constant & 0x1f);
10643   %}
10644 
10645   ins_pipe(ialu_reg_shift);
10646 %}
10647 
10648 // Combined Int Mask and Right Shift (using UBFM)
10649 // TODO
10650 
10651 // Long Shifts
10652 
10653 // Shift Left Register
10654 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10655   match(Set dst (LShiftL src1 src2));
10656 
10657   ins_cost(INSN_COST * 2);
10658   format %{ "lslv  $dst, $src1, $src2" %}
10659 
10660   ins_encode %{
10661     __ lslv(as_Register($dst$$reg),
10662             as_Register($src1$$reg),
10663             as_Register($src2$$reg));
10664   %}
10665 
10666   ins_pipe(ialu_reg_reg_vshift);
10667 %}
10668 
10669 // Shift Left Immediate
10670 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10671   match(Set dst (LShiftL src1 src2));
10672 
10673   ins_cost(INSN_COST);
10674   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10675 
10676   ins_encode %{
10677     __ lsl(as_Register($dst$$reg),
10678             as_Register($src1$$reg),
10679             $src2$$constant & 0x3f);
10680   %}
10681 
10682   ins_pipe(ialu_reg_shift);
10683 %}
10684 
10685 // Shift Right Logical Register
10686 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10687   match(Set dst (URShiftL src1 src2));
10688 
10689   ins_cost(INSN_COST * 2);
10690   format %{ "lsrv  $dst, $src1, $src2" %}
10691 
10692   ins_encode %{
10693     __ lsrv(as_Register($dst$$reg),
10694             as_Register($src1$$reg),
10695             as_Register($src2$$reg));
10696   %}
10697 
10698   ins_pipe(ialu_reg_reg_vshift);
10699 %}
10700 
10701 // Shift Right Logical Immediate
10702 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10703   match(Set dst (URShiftL src1 src2));
10704 
10705   ins_cost(INSN_COST);
10706   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10707 
10708   ins_encode %{
10709     __ lsr(as_Register($dst$$reg),
10710            as_Register($src1$$reg),
10711            $src2$$constant & 0x3f);
10712   %}
10713 
10714   ins_pipe(ialu_reg_shift);
10715 %}
10716 
10717 // A special-case pattern for card table stores.
10718 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10719   match(Set dst (URShiftL (CastP2X src1) src2));
10720 
10721   ins_cost(INSN_COST);
10722   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10723 
10724   ins_encode %{
10725     __ lsr(as_Register($dst$$reg),
10726            as_Register($src1$$reg),
10727            $src2$$constant & 0x3f);
10728   %}
10729 
10730   ins_pipe(ialu_reg_shift);
10731 %}
10732 
10733 // Shift Right Arithmetic Register
10734 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10735   match(Set dst (RShiftL src1 src2));
10736 
10737   ins_cost(INSN_COST * 2);
10738   format %{ "asrv  $dst, $src1, $src2" %}
10739 
10740   ins_encode %{
10741     __ asrv(as_Register($dst$$reg),
10742             as_Register($src1$$reg),
10743             as_Register($src2$$reg));
10744   %}
10745 
10746   ins_pipe(ialu_reg_reg_vshift);
10747 %}
10748 
10749 // Shift Right Arithmetic Immediate
10750 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10751   match(Set dst (RShiftL src1 src2));
10752 
10753   ins_cost(INSN_COST);
10754   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10755 
10756   ins_encode %{
10757     __ asr(as_Register($dst$$reg),
10758            as_Register($src1$$reg),
10759            $src2$$constant & 0x3f);
10760   %}
10761 
10762   ins_pipe(ialu_reg_shift);
10763 %}
10764 
10765 // BEGIN This section of the file is automatically generated. Do not edit --------------
10766 // This section is generated from aarch64_ad.m4
10767 
10768 // This pattern is automatically generated from aarch64_ad.m4
10769 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10770 instruct regL_not_reg(iRegLNoSp dst,
10771                          iRegL src1, immL_M1 m1,
10772                          rFlagsReg cr) %{
10773   match(Set dst (XorL src1 m1));
10774   ins_cost(INSN_COST);
10775   format %{ "eon  $dst, $src1, zr" %}
10776 
10777   ins_encode %{
10778     __ eon(as_Register($dst$$reg),
10779               as_Register($src1$$reg),
10780               zr,
10781               Assembler::LSL, 0);
10782   %}
10783 
10784   ins_pipe(ialu_reg);
10785 %}
10786 
10787 // This pattern is automatically generated from aarch64_ad.m4
10788 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10789 instruct regI_not_reg(iRegINoSp dst,
10790                          iRegIorL2I src1, immI_M1 m1,
10791                          rFlagsReg cr) %{
10792   match(Set dst (XorI src1 m1));
10793   ins_cost(INSN_COST);
10794   format %{ "eonw  $dst, $src1, zr" %}
10795 
10796   ins_encode %{
10797     __ eonw(as_Register($dst$$reg),
10798               as_Register($src1$$reg),
10799               zr,
10800               Assembler::LSL, 0);
10801   %}
10802 
10803   ins_pipe(ialu_reg);
10804 %}
10805 
10806 // This pattern is automatically generated from aarch64_ad.m4
10807 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10808 instruct NegI_reg_URShift_reg(iRegINoSp dst,
10809                               immI0 zero, iRegIorL2I src1, immI src2) %{
10810   match(Set dst (SubI zero (URShiftI src1 src2)));
10811 
10812   ins_cost(1.9 * INSN_COST);
10813   format %{ "negw  $dst, $src1, LSR $src2" %}
10814 
10815   ins_encode %{
10816     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10817             Assembler::LSR, $src2$$constant & 0x1f);
10818   %}
10819 
10820   ins_pipe(ialu_reg_shift);
10821 %}
10822 
10823 // This pattern is automatically generated from aarch64_ad.m4
10824 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10825 instruct NegI_reg_RShift_reg(iRegINoSp dst,
10826                               immI0 zero, iRegIorL2I src1, immI src2) %{
10827   match(Set dst (SubI zero (RShiftI src1 src2)));
10828 
10829   ins_cost(1.9 * INSN_COST);
10830   format %{ "negw  $dst, $src1, ASR $src2" %}
10831 
10832   ins_encode %{
10833     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10834             Assembler::ASR, $src2$$constant & 0x1f);
10835   %}
10836 
10837   ins_pipe(ialu_reg_shift);
10838 %}
10839 
10840 // This pattern is automatically generated from aarch64_ad.m4
10841 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10842 instruct NegI_reg_LShift_reg(iRegINoSp dst,
10843                               immI0 zero, iRegIorL2I src1, immI src2) %{
10844   match(Set dst (SubI zero (LShiftI src1 src2)));
10845 
10846   ins_cost(1.9 * INSN_COST);
10847   format %{ "negw  $dst, $src1, LSL $src2" %}
10848 
10849   ins_encode %{
10850     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10851             Assembler::LSL, $src2$$constant & 0x1f);
10852   %}
10853 
10854   ins_pipe(ialu_reg_shift);
10855 %}
10856 
10857 // This pattern is automatically generated from aarch64_ad.m4
10858 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10859 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
10860                               immL0 zero, iRegL src1, immI src2) %{
10861   match(Set dst (SubL zero (URShiftL src1 src2)));
10862 
10863   ins_cost(1.9 * INSN_COST);
10864   format %{ "neg  $dst, $src1, LSR $src2" %}
10865 
10866   ins_encode %{
10867     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10868             Assembler::LSR, $src2$$constant & 0x3f);
10869   %}
10870 
10871   ins_pipe(ialu_reg_shift);
10872 %}
10873 
10874 // This pattern is automatically generated from aarch64_ad.m4
10875 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10876 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
10877                               immL0 zero, iRegL src1, immI src2) %{
10878   match(Set dst (SubL zero (RShiftL src1 src2)));
10879 
10880   ins_cost(1.9 * INSN_COST);
10881   format %{ "neg  $dst, $src1, ASR $src2" %}
10882 
10883   ins_encode %{
10884     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10885             Assembler::ASR, $src2$$constant & 0x3f);
10886   %}
10887 
10888   ins_pipe(ialu_reg_shift);
10889 %}
10890 
10891 // This pattern is automatically generated from aarch64_ad.m4
10892 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10893 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
10894                               immL0 zero, iRegL src1, immI src2) %{
10895   match(Set dst (SubL zero (LShiftL src1 src2)));
10896 
10897   ins_cost(1.9 * INSN_COST);
10898   format %{ "neg  $dst, $src1, LSL $src2" %}
10899 
10900   ins_encode %{
10901     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10902             Assembler::LSL, $src2$$constant & 0x3f);
10903   %}
10904 
10905   ins_pipe(ialu_reg_shift);
10906 %}
10907 
10908 // This pattern is automatically generated from aarch64_ad.m4
10909 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10910 instruct AndI_reg_not_reg(iRegINoSp dst,
10911                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10912   match(Set dst (AndI src1 (XorI src2 m1)));
10913   ins_cost(INSN_COST);
10914   format %{ "bicw  $dst, $src1, $src2" %}
10915 
10916   ins_encode %{
10917     __ bicw(as_Register($dst$$reg),
10918               as_Register($src1$$reg),
10919               as_Register($src2$$reg),
10920               Assembler::LSL, 0);
10921   %}
10922 
10923   ins_pipe(ialu_reg_reg);
10924 %}
10925 
10926 // This pattern is automatically generated from aarch64_ad.m4
10927 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10928 instruct AndL_reg_not_reg(iRegLNoSp dst,
10929                          iRegL src1, iRegL src2, immL_M1 m1) %{
10930   match(Set dst (AndL src1 (XorL src2 m1)));
10931   ins_cost(INSN_COST);
10932   format %{ "bic  $dst, $src1, $src2" %}
10933 
10934   ins_encode %{
10935     __ bic(as_Register($dst$$reg),
10936               as_Register($src1$$reg),
10937               as_Register($src2$$reg),
10938               Assembler::LSL, 0);
10939   %}
10940 
10941   ins_pipe(ialu_reg_reg);
10942 %}
10943 
10944 // This pattern is automatically generated from aarch64_ad.m4
10945 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10946 instruct OrI_reg_not_reg(iRegINoSp dst,
10947                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10948   match(Set dst (OrI src1 (XorI src2 m1)));
10949   ins_cost(INSN_COST);
10950   format %{ "ornw  $dst, $src1, $src2" %}
10951 
10952   ins_encode %{
10953     __ ornw(as_Register($dst$$reg),
10954               as_Register($src1$$reg),
10955               as_Register($src2$$reg),
10956               Assembler::LSL, 0);
10957   %}
10958 
10959   ins_pipe(ialu_reg_reg);
10960 %}
10961 
10962 // This pattern is automatically generated from aarch64_ad.m4
10963 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10964 instruct OrL_reg_not_reg(iRegLNoSp dst,
10965                          iRegL src1, iRegL src2, immL_M1 m1) %{
10966   match(Set dst (OrL src1 (XorL src2 m1)));
10967   ins_cost(INSN_COST);
10968   format %{ "orn  $dst, $src1, $src2" %}
10969 
10970   ins_encode %{
10971     __ orn(as_Register($dst$$reg),
10972               as_Register($src1$$reg),
10973               as_Register($src2$$reg),
10974               Assembler::LSL, 0);
10975   %}
10976 
10977   ins_pipe(ialu_reg_reg);
10978 %}
10979 
10980 // This pattern is automatically generated from aarch64_ad.m4
10981 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10982 instruct XorI_reg_not_reg(iRegINoSp dst,
10983                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10984   match(Set dst (XorI m1 (XorI src2 src1)));
10985   ins_cost(INSN_COST);
10986   format %{ "eonw  $dst, $src1, $src2" %}
10987 
10988   ins_encode %{
10989     __ eonw(as_Register($dst$$reg),
10990               as_Register($src1$$reg),
10991               as_Register($src2$$reg),
10992               Assembler::LSL, 0);
10993   %}
10994 
10995   ins_pipe(ialu_reg_reg);
10996 %}
10997 
10998 // This pattern is automatically generated from aarch64_ad.m4
10999 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11000 instruct XorL_reg_not_reg(iRegLNoSp dst,
11001                          iRegL src1, iRegL src2, immL_M1 m1) %{
11002   match(Set dst (XorL m1 (XorL src2 src1)));
11003   ins_cost(INSN_COST);
11004   format %{ "eon  $dst, $src1, $src2" %}
11005 
11006   ins_encode %{
11007     __ eon(as_Register($dst$$reg),
11008               as_Register($src1$$reg),
11009               as_Register($src2$$reg),
11010               Assembler::LSL, 0);
11011   %}
11012 
11013   ins_pipe(ialu_reg_reg);
11014 %}
11015 
11016 // This pattern is automatically generated from aarch64_ad.m4
11017 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11018 // val & (-1 ^ (val >>> shift)) ==> bicw
11019 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11020                          iRegIorL2I src1, iRegIorL2I src2,
11021                          immI src3, immI_M1 src4) %{
11022   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11023   ins_cost(1.9 * INSN_COST);
11024   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11025 
11026   ins_encode %{
11027     __ bicw(as_Register($dst$$reg),
11028               as_Register($src1$$reg),
11029               as_Register($src2$$reg),
11030               Assembler::LSR,
11031               $src3$$constant & 0x1f);
11032   %}
11033 
11034   ins_pipe(ialu_reg_reg_shift);
11035 %}
11036 
11037 // This pattern is automatically generated from aarch64_ad.m4
11038 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11039 // val & (-1 ^ (val >>> shift)) ==> bic
11040 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11041                          iRegL src1, iRegL src2,
11042                          immI src3, immL_M1 src4) %{
11043   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11044   ins_cost(1.9 * INSN_COST);
11045   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11046 
11047   ins_encode %{
11048     __ bic(as_Register($dst$$reg),
11049               as_Register($src1$$reg),
11050               as_Register($src2$$reg),
11051               Assembler::LSR,
11052               $src3$$constant & 0x3f);
11053   %}
11054 
11055   ins_pipe(ialu_reg_reg_shift);
11056 %}
11057 
11058 // This pattern is automatically generated from aarch64_ad.m4
11059 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11060 // val & (-1 ^ (val >> shift)) ==> bicw
11061 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11062                          iRegIorL2I src1, iRegIorL2I src2,
11063                          immI src3, immI_M1 src4) %{
11064   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11065   ins_cost(1.9 * INSN_COST);
11066   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11067 
11068   ins_encode %{
11069     __ bicw(as_Register($dst$$reg),
11070               as_Register($src1$$reg),
11071               as_Register($src2$$reg),
11072               Assembler::ASR,
11073               $src3$$constant & 0x1f);
11074   %}
11075 
11076   ins_pipe(ialu_reg_reg_shift);
11077 %}
11078 
11079 // This pattern is automatically generated from aarch64_ad.m4
11080 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11081 // val & (-1 ^ (val >> shift)) ==> bic
11082 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11083                          iRegL src1, iRegL src2,
11084                          immI src3, immL_M1 src4) %{
11085   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11086   ins_cost(1.9 * INSN_COST);
11087   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11088 
11089   ins_encode %{
11090     __ bic(as_Register($dst$$reg),
11091               as_Register($src1$$reg),
11092               as_Register($src2$$reg),
11093               Assembler::ASR,
11094               $src3$$constant & 0x3f);
11095   %}
11096 
11097   ins_pipe(ialu_reg_reg_shift);
11098 %}
11099 
11100 // This pattern is automatically generated from aarch64_ad.m4
11101 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11102 // val & (-1 ^ (val ror shift)) ==> bicw
11103 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11104                          iRegIorL2I src1, iRegIorL2I src2,
11105                          immI src3, immI_M1 src4) %{
11106   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11107   ins_cost(1.9 * INSN_COST);
11108   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11109 
11110   ins_encode %{
11111     __ bicw(as_Register($dst$$reg),
11112               as_Register($src1$$reg),
11113               as_Register($src2$$reg),
11114               Assembler::ROR,
11115               $src3$$constant & 0x1f);
11116   %}
11117 
11118   ins_pipe(ialu_reg_reg_shift);
11119 %}
11120 
11121 // This pattern is automatically generated from aarch64_ad.m4
11122 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11123 // val & (-1 ^ (val ror shift)) ==> bic
11124 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11125                          iRegL src1, iRegL src2,
11126                          immI src3, immL_M1 src4) %{
11127   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11128   ins_cost(1.9 * INSN_COST);
11129   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11130 
11131   ins_encode %{
11132     __ bic(as_Register($dst$$reg),
11133               as_Register($src1$$reg),
11134               as_Register($src2$$reg),
11135               Assembler::ROR,
11136               $src3$$constant & 0x3f);
11137   %}
11138 
11139   ins_pipe(ialu_reg_reg_shift);
11140 %}
11141 
11142 // This pattern is automatically generated from aarch64_ad.m4
11143 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11144 // val & (-1 ^ (val << shift)) ==> bicw
11145 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11146                          iRegIorL2I src1, iRegIorL2I src2,
11147                          immI src3, immI_M1 src4) %{
11148   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11149   ins_cost(1.9 * INSN_COST);
11150   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11151 
11152   ins_encode %{
11153     __ bicw(as_Register($dst$$reg),
11154               as_Register($src1$$reg),
11155               as_Register($src2$$reg),
11156               Assembler::LSL,
11157               $src3$$constant & 0x1f);
11158   %}
11159 
11160   ins_pipe(ialu_reg_reg_shift);
11161 %}
11162 
11163 // This pattern is automatically generated from aarch64_ad.m4
11164 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11165 // val & (-1 ^ (val << shift)) ==> bic
11166 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11167                          iRegL src1, iRegL src2,
11168                          immI src3, immL_M1 src4) %{
11169   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11170   ins_cost(1.9 * INSN_COST);
11171   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11172 
11173   ins_encode %{
11174     __ bic(as_Register($dst$$reg),
11175               as_Register($src1$$reg),
11176               as_Register($src2$$reg),
11177               Assembler::LSL,
11178               $src3$$constant & 0x3f);
11179   %}
11180 
11181   ins_pipe(ialu_reg_reg_shift);
11182 %}
11183 
11184 // This pattern is automatically generated from aarch64_ad.m4
11185 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11186 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11187 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11188                          iRegIorL2I src1, iRegIorL2I src2,
11189                          immI src3, immI_M1 src4) %{
11190   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11191   ins_cost(1.9 * INSN_COST);
11192   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11193 
11194   ins_encode %{
11195     __ eonw(as_Register($dst$$reg),
11196               as_Register($src1$$reg),
11197               as_Register($src2$$reg),
11198               Assembler::LSR,
11199               $src3$$constant & 0x1f);
11200   %}
11201 
11202   ins_pipe(ialu_reg_reg_shift);
11203 %}
11204 
11205 // This pattern is automatically generated from aarch64_ad.m4
11206 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11207 // val ^ (-1 ^ (val >>> shift)) ==> eon
11208 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11209                          iRegL src1, iRegL src2,
11210                          immI src3, immL_M1 src4) %{
11211   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11212   ins_cost(1.9 * INSN_COST);
11213   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11214 
11215   ins_encode %{
11216     __ eon(as_Register($dst$$reg),
11217               as_Register($src1$$reg),
11218               as_Register($src2$$reg),
11219               Assembler::LSR,
11220               $src3$$constant & 0x3f);
11221   %}
11222 
11223   ins_pipe(ialu_reg_reg_shift);
11224 %}
11225 
11226 // This pattern is automatically generated from aarch64_ad.m4
11227 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11228 // val ^ (-1 ^ (val >> shift)) ==> eonw
11229 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11230                          iRegIorL2I src1, iRegIorL2I src2,
11231                          immI src3, immI_M1 src4) %{
11232   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11233   ins_cost(1.9 * INSN_COST);
11234   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11235 
11236   ins_encode %{
11237     __ eonw(as_Register($dst$$reg),
11238               as_Register($src1$$reg),
11239               as_Register($src2$$reg),
11240               Assembler::ASR,
11241               $src3$$constant & 0x1f);
11242   %}
11243 
11244   ins_pipe(ialu_reg_reg_shift);
11245 %}
11246 
11247 // This pattern is automatically generated from aarch64_ad.m4
11248 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11249 // val ^ (-1 ^ (val >> shift)) ==> eon
11250 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11251                          iRegL src1, iRegL src2,
11252                          immI src3, immL_M1 src4) %{
11253   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11254   ins_cost(1.9 * INSN_COST);
11255   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11256 
11257   ins_encode %{
11258     __ eon(as_Register($dst$$reg),
11259               as_Register($src1$$reg),
11260               as_Register($src2$$reg),
11261               Assembler::ASR,
11262               $src3$$constant & 0x3f);
11263   %}
11264 
11265   ins_pipe(ialu_reg_reg_shift);
11266 %}
11267 
11268 // This pattern is automatically generated from aarch64_ad.m4
11269 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11270 // val ^ (-1 ^ (val ror shift)) ==> eonw
11271 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11272                          iRegIorL2I src1, iRegIorL2I src2,
11273                          immI src3, immI_M1 src4) %{
11274   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11275   ins_cost(1.9 * INSN_COST);
11276   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11277 
11278   ins_encode %{
11279     __ eonw(as_Register($dst$$reg),
11280               as_Register($src1$$reg),
11281               as_Register($src2$$reg),
11282               Assembler::ROR,
11283               $src3$$constant & 0x1f);
11284   %}
11285 
11286   ins_pipe(ialu_reg_reg_shift);
11287 %}
11288 
11289 // This pattern is automatically generated from aarch64_ad.m4
11290 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11291 // val ^ (-1 ^ (val ror shift)) ==> eon
11292 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11293                          iRegL src1, iRegL src2,
11294                          immI src3, immL_M1 src4) %{
11295   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11296   ins_cost(1.9 * INSN_COST);
11297   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11298 
11299   ins_encode %{
11300     __ eon(as_Register($dst$$reg),
11301               as_Register($src1$$reg),
11302               as_Register($src2$$reg),
11303               Assembler::ROR,
11304               $src3$$constant & 0x3f);
11305   %}
11306 
11307   ins_pipe(ialu_reg_reg_shift);
11308 %}
11309 
11310 // This pattern is automatically generated from aarch64_ad.m4
11311 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11312 // val ^ (-1 ^ (val << shift)) ==> eonw
11313 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11314                          iRegIorL2I src1, iRegIorL2I src2,
11315                          immI src3, immI_M1 src4) %{
11316   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11317   ins_cost(1.9 * INSN_COST);
11318   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11319 
11320   ins_encode %{
11321     __ eonw(as_Register($dst$$reg),
11322               as_Register($src1$$reg),
11323               as_Register($src2$$reg),
11324               Assembler::LSL,
11325               $src3$$constant & 0x1f);
11326   %}
11327 
11328   ins_pipe(ialu_reg_reg_shift);
11329 %}
11330 
11331 // This pattern is automatically generated from aarch64_ad.m4
11332 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11333 // val ^ (-1 ^ (val << shift)) ==> eon
11334 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11335                          iRegL src1, iRegL src2,
11336                          immI src3, immL_M1 src4) %{
11337   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11338   ins_cost(1.9 * INSN_COST);
11339   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11340 
11341   ins_encode %{
11342     __ eon(as_Register($dst$$reg),
11343               as_Register($src1$$reg),
11344               as_Register($src2$$reg),
11345               Assembler::LSL,
11346               $src3$$constant & 0x3f);
11347   %}
11348 
11349   ins_pipe(ialu_reg_reg_shift);
11350 %}
11351 
11352 // This pattern is automatically generated from aarch64_ad.m4
11353 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11354 // val | (-1 ^ (val >>> shift)) ==> ornw
11355 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11356                          iRegIorL2I src1, iRegIorL2I src2,
11357                          immI src3, immI_M1 src4) %{
11358   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11359   ins_cost(1.9 * INSN_COST);
11360   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11361 
11362   ins_encode %{
11363     __ ornw(as_Register($dst$$reg),
11364               as_Register($src1$$reg),
11365               as_Register($src2$$reg),
11366               Assembler::LSR,
11367               $src3$$constant & 0x1f);
11368   %}
11369 
11370   ins_pipe(ialu_reg_reg_shift);
11371 %}
11372 
11373 // This pattern is automatically generated from aarch64_ad.m4
11374 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11375 // val | (-1 ^ (val >>> shift)) ==> orn
11376 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11377                          iRegL src1, iRegL src2,
11378                          immI src3, immL_M1 src4) %{
11379   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11380   ins_cost(1.9 * INSN_COST);
11381   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11382 
11383   ins_encode %{
11384     __ orn(as_Register($dst$$reg),
11385               as_Register($src1$$reg),
11386               as_Register($src2$$reg),
11387               Assembler::LSR,
11388               $src3$$constant & 0x3f);
11389   %}
11390 
11391   ins_pipe(ialu_reg_reg_shift);
11392 %}
11393 
11394 // This pattern is automatically generated from aarch64_ad.m4
11395 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11396 // val | (-1 ^ (val >> shift)) ==> ornw
11397 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11398                          iRegIorL2I src1, iRegIorL2I src2,
11399                          immI src3, immI_M1 src4) %{
11400   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11401   ins_cost(1.9 * INSN_COST);
11402   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11403 
11404   ins_encode %{
11405     __ ornw(as_Register($dst$$reg),
11406               as_Register($src1$$reg),
11407               as_Register($src2$$reg),
11408               Assembler::ASR,
11409               $src3$$constant & 0x1f);
11410   %}
11411 
11412   ins_pipe(ialu_reg_reg_shift);
11413 %}
11414 
11415 // This pattern is automatically generated from aarch64_ad.m4
11416 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11417 // val | (-1 ^ (val >> shift)) ==> orn
11418 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11419                          iRegL src1, iRegL src2,
11420                          immI src3, immL_M1 src4) %{
11421   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11422   ins_cost(1.9 * INSN_COST);
11423   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11424 
11425   ins_encode %{
11426     __ orn(as_Register($dst$$reg),
11427               as_Register($src1$$reg),
11428               as_Register($src2$$reg),
11429               Assembler::ASR,
11430               $src3$$constant & 0x3f);
11431   %}
11432 
11433   ins_pipe(ialu_reg_reg_shift);
11434 %}
11435 
11436 // This pattern is automatically generated from aarch64_ad.m4
11437 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11438 // val | (-1 ^ (val ror shift)) ==> ornw
11439 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11440                          iRegIorL2I src1, iRegIorL2I src2,
11441                          immI src3, immI_M1 src4) %{
11442   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11443   ins_cost(1.9 * INSN_COST);
11444   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11445 
11446   ins_encode %{
11447     __ ornw(as_Register($dst$$reg),
11448               as_Register($src1$$reg),
11449               as_Register($src2$$reg),
11450               Assembler::ROR,
11451               $src3$$constant & 0x1f);
11452   %}
11453 
11454   ins_pipe(ialu_reg_reg_shift);
11455 %}
11456 
11457 // This pattern is automatically generated from aarch64_ad.m4
11458 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11459 // val | (-1 ^ (val ror shift)) ==> orn
11460 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11461                          iRegL src1, iRegL src2,
11462                          immI src3, immL_M1 src4) %{
11463   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11464   ins_cost(1.9 * INSN_COST);
11465   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11466 
11467   ins_encode %{
11468     __ orn(as_Register($dst$$reg),
11469               as_Register($src1$$reg),
11470               as_Register($src2$$reg),
11471               Assembler::ROR,
11472               $src3$$constant & 0x3f);
11473   %}
11474 
11475   ins_pipe(ialu_reg_reg_shift);
11476 %}
11477 
11478 // This pattern is automatically generated from aarch64_ad.m4
11479 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11480 // val | (-1 ^ (val << shift)) ==> ornw
11481 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11482                          iRegIorL2I src1, iRegIorL2I src2,
11483                          immI src3, immI_M1 src4) %{
11484   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11485   ins_cost(1.9 * INSN_COST);
11486   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11487 
11488   ins_encode %{
11489     __ ornw(as_Register($dst$$reg),
11490               as_Register($src1$$reg),
11491               as_Register($src2$$reg),
11492               Assembler::LSL,
11493               $src3$$constant & 0x1f);
11494   %}
11495 
11496   ins_pipe(ialu_reg_reg_shift);
11497 %}
11498 
11499 // This pattern is automatically generated from aarch64_ad.m4
11500 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11501 // val | (-1 ^ (val << shift)) ==> orn
11502 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11503                          iRegL src1, iRegL src2,
11504                          immI src3, immL_M1 src4) %{
11505   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11506   ins_cost(1.9 * INSN_COST);
11507   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11508 
11509   ins_encode %{
11510     __ orn(as_Register($dst$$reg),
11511               as_Register($src1$$reg),
11512               as_Register($src2$$reg),
11513               Assembler::LSL,
11514               $src3$$constant & 0x3f);
11515   %}
11516 
11517   ins_pipe(ialu_reg_reg_shift);
11518 %}
11519 
11520 // This pattern is automatically generated from aarch64_ad.m4
11521 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11522 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11523                          iRegIorL2I src1, iRegIorL2I src2,
11524                          immI src3) %{
11525   match(Set dst (AndI src1 (URShiftI src2 src3)));
11526 
11527   ins_cost(1.9 * INSN_COST);
11528   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11529 
11530   ins_encode %{
11531     __ andw(as_Register($dst$$reg),
11532               as_Register($src1$$reg),
11533               as_Register($src2$$reg),
11534               Assembler::LSR,
11535               $src3$$constant & 0x1f);
11536   %}
11537 
11538   ins_pipe(ialu_reg_reg_shift);
11539 %}
11540 
11541 // This pattern is automatically generated from aarch64_ad.m4
11542 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11543 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11544                          iRegL src1, iRegL src2,
11545                          immI src3) %{
11546   match(Set dst (AndL src1 (URShiftL src2 src3)));
11547 
11548   ins_cost(1.9 * INSN_COST);
11549   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11550 
11551   ins_encode %{
11552     __ andr(as_Register($dst$$reg),
11553               as_Register($src1$$reg),
11554               as_Register($src2$$reg),
11555               Assembler::LSR,
11556               $src3$$constant & 0x3f);
11557   %}
11558 
11559   ins_pipe(ialu_reg_reg_shift);
11560 %}
11561 
11562 // This pattern is automatically generated from aarch64_ad.m4
11563 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11564 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11565                          iRegIorL2I src1, iRegIorL2I src2,
11566                          immI src3) %{
11567   match(Set dst (AndI src1 (RShiftI src2 src3)));
11568 
11569   ins_cost(1.9 * INSN_COST);
11570   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11571 
11572   ins_encode %{
11573     __ andw(as_Register($dst$$reg),
11574               as_Register($src1$$reg),
11575               as_Register($src2$$reg),
11576               Assembler::ASR,
11577               $src3$$constant & 0x1f);
11578   %}
11579 
11580   ins_pipe(ialu_reg_reg_shift);
11581 %}
11582 
11583 // This pattern is automatically generated from aarch64_ad.m4
11584 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11585 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11586                          iRegL src1, iRegL src2,
11587                          immI src3) %{
11588   match(Set dst (AndL src1 (RShiftL src2 src3)));
11589 
11590   ins_cost(1.9 * INSN_COST);
11591   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11592 
11593   ins_encode %{
11594     __ andr(as_Register($dst$$reg),
11595               as_Register($src1$$reg),
11596               as_Register($src2$$reg),
11597               Assembler::ASR,
11598               $src3$$constant & 0x3f);
11599   %}
11600 
11601   ins_pipe(ialu_reg_reg_shift);
11602 %}
11603 
11604 // This pattern is automatically generated from aarch64_ad.m4
11605 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11606 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11607                          iRegIorL2I src1, iRegIorL2I src2,
11608                          immI src3) %{
11609   match(Set dst (AndI src1 (LShiftI src2 src3)));
11610 
11611   ins_cost(1.9 * INSN_COST);
11612   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11613 
11614   ins_encode %{
11615     __ andw(as_Register($dst$$reg),
11616               as_Register($src1$$reg),
11617               as_Register($src2$$reg),
11618               Assembler::LSL,
11619               $src3$$constant & 0x1f);
11620   %}
11621 
11622   ins_pipe(ialu_reg_reg_shift);
11623 %}
11624 
11625 // This pattern is automatically generated from aarch64_ad.m4
11626 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11627 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11628                          iRegL src1, iRegL src2,
11629                          immI src3) %{
11630   match(Set dst (AndL src1 (LShiftL src2 src3)));
11631 
11632   ins_cost(1.9 * INSN_COST);
11633   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11634 
11635   ins_encode %{
11636     __ andr(as_Register($dst$$reg),
11637               as_Register($src1$$reg),
11638               as_Register($src2$$reg),
11639               Assembler::LSL,
11640               $src3$$constant & 0x3f);
11641   %}
11642 
11643   ins_pipe(ialu_reg_reg_shift);
11644 %}
11645 
11646 // This pattern is automatically generated from aarch64_ad.m4
11647 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11648 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
11649                          iRegIorL2I src1, iRegIorL2I src2,
11650                          immI src3) %{
11651   match(Set dst (AndI src1 (RotateRight src2 src3)));
11652 
11653   ins_cost(1.9 * INSN_COST);
11654   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
11655 
11656   ins_encode %{
11657     __ andw(as_Register($dst$$reg),
11658               as_Register($src1$$reg),
11659               as_Register($src2$$reg),
11660               Assembler::ROR,
11661               $src3$$constant & 0x1f);
11662   %}
11663 
11664   ins_pipe(ialu_reg_reg_shift);
11665 %}
11666 
11667 // This pattern is automatically generated from aarch64_ad.m4
11668 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11669 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
11670                          iRegL src1, iRegL src2,
11671                          immI src3) %{
11672   match(Set dst (AndL src1 (RotateRight src2 src3)));
11673 
11674   ins_cost(1.9 * INSN_COST);
11675   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
11676 
11677   ins_encode %{
11678     __ andr(as_Register($dst$$reg),
11679               as_Register($src1$$reg),
11680               as_Register($src2$$reg),
11681               Assembler::ROR,
11682               $src3$$constant & 0x3f);
11683   %}
11684 
11685   ins_pipe(ialu_reg_reg_shift);
11686 %}
11687 
11688 // This pattern is automatically generated from aarch64_ad.m4
11689 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11690 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11691                          iRegIorL2I src1, iRegIorL2I src2,
11692                          immI src3) %{
11693   match(Set dst (XorI src1 (URShiftI src2 src3)));
11694 
11695   ins_cost(1.9 * INSN_COST);
11696   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11697 
11698   ins_encode %{
11699     __ eorw(as_Register($dst$$reg),
11700               as_Register($src1$$reg),
11701               as_Register($src2$$reg),
11702               Assembler::LSR,
11703               $src3$$constant & 0x1f);
11704   %}
11705 
11706   ins_pipe(ialu_reg_reg_shift);
11707 %}
11708 
11709 // This pattern is automatically generated from aarch64_ad.m4
11710 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11711 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11712                          iRegL src1, iRegL src2,
11713                          immI src3) %{
11714   match(Set dst (XorL src1 (URShiftL src2 src3)));
11715 
11716   ins_cost(1.9 * INSN_COST);
11717   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11718 
11719   ins_encode %{
11720     __ eor(as_Register($dst$$reg),
11721               as_Register($src1$$reg),
11722               as_Register($src2$$reg),
11723               Assembler::LSR,
11724               $src3$$constant & 0x3f);
11725   %}
11726 
11727   ins_pipe(ialu_reg_reg_shift);
11728 %}
11729 
11730 // This pattern is automatically generated from aarch64_ad.m4
11731 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11732 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11733                          iRegIorL2I src1, iRegIorL2I src2,
11734                          immI src3) %{
11735   match(Set dst (XorI src1 (RShiftI src2 src3)));
11736 
11737   ins_cost(1.9 * INSN_COST);
11738   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11739 
11740   ins_encode %{
11741     __ eorw(as_Register($dst$$reg),
11742               as_Register($src1$$reg),
11743               as_Register($src2$$reg),
11744               Assembler::ASR,
11745               $src3$$constant & 0x1f);
11746   %}
11747 
11748   ins_pipe(ialu_reg_reg_shift);
11749 %}
11750 
11751 // This pattern is automatically generated from aarch64_ad.m4
11752 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11753 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11754                          iRegL src1, iRegL src2,
11755                          immI src3) %{
11756   match(Set dst (XorL src1 (RShiftL src2 src3)));
11757 
11758   ins_cost(1.9 * INSN_COST);
11759   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11760 
11761   ins_encode %{
11762     __ eor(as_Register($dst$$reg),
11763               as_Register($src1$$reg),
11764               as_Register($src2$$reg),
11765               Assembler::ASR,
11766               $src3$$constant & 0x3f);
11767   %}
11768 
11769   ins_pipe(ialu_reg_reg_shift);
11770 %}
11771 
11772 // This pattern is automatically generated from aarch64_ad.m4
11773 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11774 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11775                          iRegIorL2I src1, iRegIorL2I src2,
11776                          immI src3) %{
11777   match(Set dst (XorI src1 (LShiftI src2 src3)));
11778 
11779   ins_cost(1.9 * INSN_COST);
11780   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11781 
11782   ins_encode %{
11783     __ eorw(as_Register($dst$$reg),
11784               as_Register($src1$$reg),
11785               as_Register($src2$$reg),
11786               Assembler::LSL,
11787               $src3$$constant & 0x1f);
11788   %}
11789 
11790   ins_pipe(ialu_reg_reg_shift);
11791 %}
11792 
11793 // This pattern is automatically generated from aarch64_ad.m4
11794 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11795 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11796                          iRegL src1, iRegL src2,
11797                          immI src3) %{
11798   match(Set dst (XorL src1 (LShiftL src2 src3)));
11799 
11800   ins_cost(1.9 * INSN_COST);
11801   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11802 
11803   ins_encode %{
11804     __ eor(as_Register($dst$$reg),
11805               as_Register($src1$$reg),
11806               as_Register($src2$$reg),
11807               Assembler::LSL,
11808               $src3$$constant & 0x3f);
11809   %}
11810 
11811   ins_pipe(ialu_reg_reg_shift);
11812 %}
11813 
11814 // This pattern is automatically generated from aarch64_ad.m4
11815 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11816 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
11817                          iRegIorL2I src1, iRegIorL2I src2,
11818                          immI src3) %{
11819   match(Set dst (XorI src1 (RotateRight src2 src3)));
11820 
11821   ins_cost(1.9 * INSN_COST);
11822   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
11823 
11824   ins_encode %{
11825     __ eorw(as_Register($dst$$reg),
11826               as_Register($src1$$reg),
11827               as_Register($src2$$reg),
11828               Assembler::ROR,
11829               $src3$$constant & 0x1f);
11830   %}
11831 
11832   ins_pipe(ialu_reg_reg_shift);
11833 %}
11834 
11835 // This pattern is automatically generated from aarch64_ad.m4
11836 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11837 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
11838                          iRegL src1, iRegL src2,
11839                          immI src3) %{
11840   match(Set dst (XorL src1 (RotateRight src2 src3)));
11841 
11842   ins_cost(1.9 * INSN_COST);
11843   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
11844 
11845   ins_encode %{
11846     __ eor(as_Register($dst$$reg),
11847               as_Register($src1$$reg),
11848               as_Register($src2$$reg),
11849               Assembler::ROR,
11850               $src3$$constant & 0x3f);
11851   %}
11852 
11853   ins_pipe(ialu_reg_reg_shift);
11854 %}
11855 
11856 // This pattern is automatically generated from aarch64_ad.m4
11857 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11858 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11859                          iRegIorL2I src1, iRegIorL2I src2,
11860                          immI src3) %{
11861   match(Set dst (OrI src1 (URShiftI src2 src3)));
11862 
11863   ins_cost(1.9 * INSN_COST);
11864   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11865 
11866   ins_encode %{
11867     __ orrw(as_Register($dst$$reg),
11868               as_Register($src1$$reg),
11869               as_Register($src2$$reg),
11870               Assembler::LSR,
11871               $src3$$constant & 0x1f);
11872   %}
11873 
11874   ins_pipe(ialu_reg_reg_shift);
11875 %}
11876 
11877 // This pattern is automatically generated from aarch64_ad.m4
11878 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11879 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11880                          iRegL src1, iRegL src2,
11881                          immI src3) %{
11882   match(Set dst (OrL src1 (URShiftL src2 src3)));
11883 
11884   ins_cost(1.9 * INSN_COST);
11885   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11886 
11887   ins_encode %{
11888     __ orr(as_Register($dst$$reg),
11889               as_Register($src1$$reg),
11890               as_Register($src2$$reg),
11891               Assembler::LSR,
11892               $src3$$constant & 0x3f);
11893   %}
11894 
11895   ins_pipe(ialu_reg_reg_shift);
11896 %}
11897 
11898 // This pattern is automatically generated from aarch64_ad.m4
11899 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11900 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11901                          iRegIorL2I src1, iRegIorL2I src2,
11902                          immI src3) %{
11903   match(Set dst (OrI src1 (RShiftI src2 src3)));
11904 
11905   ins_cost(1.9 * INSN_COST);
11906   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11907 
11908   ins_encode %{
11909     __ orrw(as_Register($dst$$reg),
11910               as_Register($src1$$reg),
11911               as_Register($src2$$reg),
11912               Assembler::ASR,
11913               $src3$$constant & 0x1f);
11914   %}
11915 
11916   ins_pipe(ialu_reg_reg_shift);
11917 %}
11918 
11919 // This pattern is automatically generated from aarch64_ad.m4
11920 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11921 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11922                          iRegL src1, iRegL src2,
11923                          immI src3) %{
11924   match(Set dst (OrL src1 (RShiftL src2 src3)));
11925 
11926   ins_cost(1.9 * INSN_COST);
11927   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11928 
11929   ins_encode %{
11930     __ orr(as_Register($dst$$reg),
11931               as_Register($src1$$reg),
11932               as_Register($src2$$reg),
11933               Assembler::ASR,
11934               $src3$$constant & 0x3f);
11935   %}
11936 
11937   ins_pipe(ialu_reg_reg_shift);
11938 %}
11939 
11940 // This pattern is automatically generated from aarch64_ad.m4
11941 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11942 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11943                          iRegIorL2I src1, iRegIorL2I src2,
11944                          immI src3) %{
11945   match(Set dst (OrI src1 (LShiftI src2 src3)));
11946 
11947   ins_cost(1.9 * INSN_COST);
11948   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11949 
11950   ins_encode %{
11951     __ orrw(as_Register($dst$$reg),
11952               as_Register($src1$$reg),
11953               as_Register($src2$$reg),
11954               Assembler::LSL,
11955               $src3$$constant & 0x1f);
11956   %}
11957 
11958   ins_pipe(ialu_reg_reg_shift);
11959 %}
11960 
11961 // This pattern is automatically generated from aarch64_ad.m4
11962 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11963 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11964                          iRegL src1, iRegL src2,
11965                          immI src3) %{
11966   match(Set dst (OrL src1 (LShiftL src2 src3)));
11967 
11968   ins_cost(1.9 * INSN_COST);
11969   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11970 
11971   ins_encode %{
11972     __ orr(as_Register($dst$$reg),
11973               as_Register($src1$$reg),
11974               as_Register($src2$$reg),
11975               Assembler::LSL,
11976               $src3$$constant & 0x3f);
11977   %}
11978 
11979   ins_pipe(ialu_reg_reg_shift);
11980 %}
11981 
11982 // This pattern is automatically generated from aarch64_ad.m4
11983 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11984 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
11985                          iRegIorL2I src1, iRegIorL2I src2,
11986                          immI src3) %{
11987   match(Set dst (OrI src1 (RotateRight src2 src3)));
11988 
11989   ins_cost(1.9 * INSN_COST);
11990   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
11991 
11992   ins_encode %{
11993     __ orrw(as_Register($dst$$reg),
11994               as_Register($src1$$reg),
11995               as_Register($src2$$reg),
11996               Assembler::ROR,
11997               $src3$$constant & 0x1f);
11998   %}
11999 
12000   ins_pipe(ialu_reg_reg_shift);
12001 %}
12002 
12003 // This pattern is automatically generated from aarch64_ad.m4
12004 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12005 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
12006                          iRegL src1, iRegL src2,
12007                          immI src3) %{
12008   match(Set dst (OrL src1 (RotateRight src2 src3)));
12009 
12010   ins_cost(1.9 * INSN_COST);
12011   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
12012 
12013   ins_encode %{
12014     __ orr(as_Register($dst$$reg),
12015               as_Register($src1$$reg),
12016               as_Register($src2$$reg),
12017               Assembler::ROR,
12018               $src3$$constant & 0x3f);
12019   %}
12020 
12021   ins_pipe(ialu_reg_reg_shift);
12022 %}
12023 
12024 // This pattern is automatically generated from aarch64_ad.m4
12025 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12026 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12027                          iRegIorL2I src1, iRegIorL2I src2,
12028                          immI src3) %{
12029   match(Set dst (AddI src1 (URShiftI src2 src3)));
12030 
12031   ins_cost(1.9 * INSN_COST);
12032   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12033 
12034   ins_encode %{
12035     __ addw(as_Register($dst$$reg),
12036               as_Register($src1$$reg),
12037               as_Register($src2$$reg),
12038               Assembler::LSR,
12039               $src3$$constant & 0x1f);
12040   %}
12041 
12042   ins_pipe(ialu_reg_reg_shift);
12043 %}
12044 
12045 // This pattern is automatically generated from aarch64_ad.m4
12046 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12047 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12048                          iRegL src1, iRegL src2,
12049                          immI src3) %{
12050   match(Set dst (AddL src1 (URShiftL src2 src3)));
12051 
12052   ins_cost(1.9 * INSN_COST);
12053   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12054 
12055   ins_encode %{
12056     __ add(as_Register($dst$$reg),
12057               as_Register($src1$$reg),
12058               as_Register($src2$$reg),
12059               Assembler::LSR,
12060               $src3$$constant & 0x3f);
12061   %}
12062 
12063   ins_pipe(ialu_reg_reg_shift);
12064 %}
12065 
12066 // This pattern is automatically generated from aarch64_ad.m4
12067 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12068 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12069                          iRegIorL2I src1, iRegIorL2I src2,
12070                          immI src3) %{
12071   match(Set dst (AddI src1 (RShiftI src2 src3)));
12072 
12073   ins_cost(1.9 * INSN_COST);
12074   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12075 
12076   ins_encode %{
12077     __ addw(as_Register($dst$$reg),
12078               as_Register($src1$$reg),
12079               as_Register($src2$$reg),
12080               Assembler::ASR,
12081               $src3$$constant & 0x1f);
12082   %}
12083 
12084   ins_pipe(ialu_reg_reg_shift);
12085 %}
12086 
12087 // This pattern is automatically generated from aarch64_ad.m4
12088 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12089 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12090                          iRegL src1, iRegL src2,
12091                          immI src3) %{
12092   match(Set dst (AddL src1 (RShiftL src2 src3)));
12093 
12094   ins_cost(1.9 * INSN_COST);
12095   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12096 
12097   ins_encode %{
12098     __ add(as_Register($dst$$reg),
12099               as_Register($src1$$reg),
12100               as_Register($src2$$reg),
12101               Assembler::ASR,
12102               $src3$$constant & 0x3f);
12103   %}
12104 
12105   ins_pipe(ialu_reg_reg_shift);
12106 %}
12107 
12108 // This pattern is automatically generated from aarch64_ad.m4
12109 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12110 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12111                          iRegIorL2I src1, iRegIorL2I src2,
12112                          immI src3) %{
12113   match(Set dst (AddI src1 (LShiftI src2 src3)));
12114 
12115   ins_cost(1.9 * INSN_COST);
12116   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12117 
12118   ins_encode %{
12119     __ addw(as_Register($dst$$reg),
12120               as_Register($src1$$reg),
12121               as_Register($src2$$reg),
12122               Assembler::LSL,
12123               $src3$$constant & 0x1f);
12124   %}
12125 
12126   ins_pipe(ialu_reg_reg_shift);
12127 %}
12128 
12129 // This pattern is automatically generated from aarch64_ad.m4
12130 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12131 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12132                          iRegL src1, iRegL src2,
12133                          immI src3) %{
12134   match(Set dst (AddL src1 (LShiftL src2 src3)));
12135 
12136   ins_cost(1.9 * INSN_COST);
12137   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12138 
12139   ins_encode %{
12140     __ add(as_Register($dst$$reg),
12141               as_Register($src1$$reg),
12142               as_Register($src2$$reg),
12143               Assembler::LSL,
12144               $src3$$constant & 0x3f);
12145   %}
12146 
12147   ins_pipe(ialu_reg_reg_shift);
12148 %}
12149 
12150 // This pattern is automatically generated from aarch64_ad.m4
12151 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12152 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12153                          iRegIorL2I src1, iRegIorL2I src2,
12154                          immI src3) %{
12155   match(Set dst (SubI src1 (URShiftI src2 src3)));
12156 
12157   ins_cost(1.9 * INSN_COST);
12158   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12159 
12160   ins_encode %{
12161     __ subw(as_Register($dst$$reg),
12162               as_Register($src1$$reg),
12163               as_Register($src2$$reg),
12164               Assembler::LSR,
12165               $src3$$constant & 0x1f);
12166   %}
12167 
12168   ins_pipe(ialu_reg_reg_shift);
12169 %}
12170 
12171 // This pattern is automatically generated from aarch64_ad.m4
12172 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12173 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12174                          iRegL src1, iRegL src2,
12175                          immI src3) %{
12176   match(Set dst (SubL src1 (URShiftL src2 src3)));
12177 
12178   ins_cost(1.9 * INSN_COST);
12179   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12180 
12181   ins_encode %{
12182     __ sub(as_Register($dst$$reg),
12183               as_Register($src1$$reg),
12184               as_Register($src2$$reg),
12185               Assembler::LSR,
12186               $src3$$constant & 0x3f);
12187   %}
12188 
12189   ins_pipe(ialu_reg_reg_shift);
12190 %}
12191 
12192 // This pattern is automatically generated from aarch64_ad.m4
12193 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12194 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12195                          iRegIorL2I src1, iRegIorL2I src2,
12196                          immI src3) %{
12197   match(Set dst (SubI src1 (RShiftI src2 src3)));
12198 
12199   ins_cost(1.9 * INSN_COST);
12200   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12201 
12202   ins_encode %{
12203     __ subw(as_Register($dst$$reg),
12204               as_Register($src1$$reg),
12205               as_Register($src2$$reg),
12206               Assembler::ASR,
12207               $src3$$constant & 0x1f);
12208   %}
12209 
12210   ins_pipe(ialu_reg_reg_shift);
12211 %}
12212 
12213 // This pattern is automatically generated from aarch64_ad.m4
12214 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12215 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12216                          iRegL src1, iRegL src2,
12217                          immI src3) %{
12218   match(Set dst (SubL src1 (RShiftL src2 src3)));
12219 
12220   ins_cost(1.9 * INSN_COST);
12221   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12222 
12223   ins_encode %{
12224     __ sub(as_Register($dst$$reg),
12225               as_Register($src1$$reg),
12226               as_Register($src2$$reg),
12227               Assembler::ASR,
12228               $src3$$constant & 0x3f);
12229   %}
12230 
12231   ins_pipe(ialu_reg_reg_shift);
12232 %}
12233 
12234 // This pattern is automatically generated from aarch64_ad.m4
12235 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12236 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12237                          iRegIorL2I src1, iRegIorL2I src2,
12238                          immI src3) %{
12239   match(Set dst (SubI src1 (LShiftI src2 src3)));
12240 
12241   ins_cost(1.9 * INSN_COST);
12242   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12243 
12244   ins_encode %{
12245     __ subw(as_Register($dst$$reg),
12246               as_Register($src1$$reg),
12247               as_Register($src2$$reg),
12248               Assembler::LSL,
12249               $src3$$constant & 0x1f);
12250   %}
12251 
12252   ins_pipe(ialu_reg_reg_shift);
12253 %}
12254 
12255 // This pattern is automatically generated from aarch64_ad.m4
12256 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12257 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12258                          iRegL src1, iRegL src2,
12259                          immI src3) %{
12260   match(Set dst (SubL src1 (LShiftL src2 src3)));
12261 
12262   ins_cost(1.9 * INSN_COST);
12263   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12264 
12265   ins_encode %{
12266     __ sub(as_Register($dst$$reg),
12267               as_Register($src1$$reg),
12268               as_Register($src2$$reg),
12269               Assembler::LSL,
12270               $src3$$constant & 0x3f);
12271   %}
12272 
12273   ins_pipe(ialu_reg_reg_shift);
12274 %}
12275 
12276 // This pattern is automatically generated from aarch64_ad.m4
12277 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12278 
12279 // Shift Left followed by Shift Right.
12280 // This idiom is used by the compiler for the i2b bytecode etc.
12281 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12282 %{
12283   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12284   ins_cost(INSN_COST * 2);
12285   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12286   ins_encode %{
12287     int lshift = $lshift_count$$constant & 63;
12288     int rshift = $rshift_count$$constant & 63;
12289     int s = 63 - lshift;
12290     int r = (rshift - lshift) & 63;
12291     __ sbfm(as_Register($dst$$reg),
12292             as_Register($src$$reg),
12293             r, s);
12294   %}
12295 
12296   ins_pipe(ialu_reg_shift);
12297 %}
12298 
12299 // This pattern is automatically generated from aarch64_ad.m4
12300 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12301 
12302 // Shift Left followed by Shift Right.
12303 // This idiom is used by the compiler for the i2b bytecode etc.
12304 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12305 %{
12306   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12307   ins_cost(INSN_COST * 2);
12308   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12309   ins_encode %{
12310     int lshift = $lshift_count$$constant & 31;
12311     int rshift = $rshift_count$$constant & 31;
12312     int s = 31 - lshift;
12313     int r = (rshift - lshift) & 31;
12314     __ sbfmw(as_Register($dst$$reg),
12315             as_Register($src$$reg),
12316             r, s);
12317   %}
12318 
12319   ins_pipe(ialu_reg_shift);
12320 %}
12321 
12322 // This pattern is automatically generated from aarch64_ad.m4
12323 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12324 
12325 // Shift Left followed by Shift Right.
12326 // This idiom is used by the compiler for the i2b bytecode etc.
12327 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12328 %{
12329   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12330   ins_cost(INSN_COST * 2);
12331   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12332   ins_encode %{
12333     int lshift = $lshift_count$$constant & 63;
12334     int rshift = $rshift_count$$constant & 63;
12335     int s = 63 - lshift;
12336     int r = (rshift - lshift) & 63;
12337     __ ubfm(as_Register($dst$$reg),
12338             as_Register($src$$reg),
12339             r, s);
12340   %}
12341 
12342   ins_pipe(ialu_reg_shift);
12343 %}
12344 
12345 // This pattern is automatically generated from aarch64_ad.m4
12346 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12347 
12348 // Shift Left followed by Shift Right.
12349 // This idiom is used by the compiler for the i2b bytecode etc.
12350 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12351 %{
12352   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12353   ins_cost(INSN_COST * 2);
12354   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12355   ins_encode %{
12356     int lshift = $lshift_count$$constant & 31;
12357     int rshift = $rshift_count$$constant & 31;
12358     int s = 31 - lshift;
12359     int r = (rshift - lshift) & 31;
12360     __ ubfmw(as_Register($dst$$reg),
12361             as_Register($src$$reg),
12362             r, s);
12363   %}
12364 
12365   ins_pipe(ialu_reg_shift);
12366 %}
12367 
12368 // Bitfield extract with shift & mask
12369 
12370 // This pattern is automatically generated from aarch64_ad.m4
12371 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12372 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12373 %{
12374   match(Set dst (AndI (URShiftI src rshift) mask));
12375   // Make sure we are not going to exceed what ubfxw can do.
12376   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12377 
12378   ins_cost(INSN_COST);
12379   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12380   ins_encode %{
12381     int rshift = $rshift$$constant & 31;
12382     intptr_t mask = $mask$$constant;
12383     int width = exact_log2(mask+1);
12384     __ ubfxw(as_Register($dst$$reg),
12385             as_Register($src$$reg), rshift, width);
12386   %}
12387   ins_pipe(ialu_reg_shift);
12388 %}
12389 
12390 // This pattern is automatically generated from aarch64_ad.m4
12391 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12392 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12393 %{
12394   match(Set dst (AndL (URShiftL src rshift) mask));
12395   // Make sure we are not going to exceed what ubfx can do.
12396   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12397 
12398   ins_cost(INSN_COST);
12399   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12400   ins_encode %{
12401     int rshift = $rshift$$constant & 63;
12402     intptr_t mask = $mask$$constant;
12403     int width = exact_log2_long(mask+1);
12404     __ ubfx(as_Register($dst$$reg),
12405             as_Register($src$$reg), rshift, width);
12406   %}
12407   ins_pipe(ialu_reg_shift);
12408 %}
12409 
12410 
12411 // This pattern is automatically generated from aarch64_ad.m4
12412 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12413 
12414 // We can use ubfx when extending an And with a mask when we know mask
12415 // is positive.  We know that because immI_bitmask guarantees it.
12416 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12417 %{
12418   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12419   // Make sure we are not going to exceed what ubfxw can do.
12420   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12421 
12422   ins_cost(INSN_COST * 2);
12423   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12424   ins_encode %{
12425     int rshift = $rshift$$constant & 31;
12426     intptr_t mask = $mask$$constant;
12427     int width = exact_log2(mask+1);
12428     __ ubfx(as_Register($dst$$reg),
12429             as_Register($src$$reg), rshift, width);
12430   %}
12431   ins_pipe(ialu_reg_shift);
12432 %}
12433 
12434 
12435 // This pattern is automatically generated from aarch64_ad.m4
12436 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12437 
12438 // We can use ubfiz when masking by a positive number and then left shifting the result.
12439 // We know that the mask is positive because immI_bitmask guarantees it.
12440 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12441 %{
12442   match(Set dst (LShiftI (AndI src mask) lshift));
12443   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12444 
12445   ins_cost(INSN_COST);
12446   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12447   ins_encode %{
12448     int lshift = $lshift$$constant & 31;
12449     intptr_t mask = $mask$$constant;
12450     int width = exact_log2(mask+1);
12451     __ ubfizw(as_Register($dst$$reg),
12452           as_Register($src$$reg), lshift, width);
12453   %}
12454   ins_pipe(ialu_reg_shift);
12455 %}
12456 
12457 // This pattern is automatically generated from aarch64_ad.m4
12458 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12459 
12460 // We can use ubfiz when masking by a positive number and then left shifting the result.
12461 // We know that the mask is positive because immL_bitmask guarantees it.
12462 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12463 %{
12464   match(Set dst (LShiftL (AndL src mask) lshift));
12465   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12466 
12467   ins_cost(INSN_COST);
12468   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12469   ins_encode %{
12470     int lshift = $lshift$$constant & 63;
12471     intptr_t mask = $mask$$constant;
12472     int width = exact_log2_long(mask+1);
12473     __ ubfiz(as_Register($dst$$reg),
12474           as_Register($src$$reg), lshift, width);
12475   %}
12476   ins_pipe(ialu_reg_shift);
12477 %}
12478 
12479 // This pattern is automatically generated from aarch64_ad.m4
12480 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12481 
12482 // We can use ubfiz when masking by a positive number and then left shifting the result.
12483 // We know that the mask is positive because immI_bitmask guarantees it.
12484 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12485 %{
12486   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12487   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12488 
12489   ins_cost(INSN_COST);
12490   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12491   ins_encode %{
12492     int lshift = $lshift$$constant & 31;
12493     intptr_t mask = $mask$$constant;
12494     int width = exact_log2(mask+1);
12495     __ ubfizw(as_Register($dst$$reg),
12496           as_Register($src$$reg), lshift, width);
12497   %}
12498   ins_pipe(ialu_reg_shift);
12499 %}
12500 
12501 // This pattern is automatically generated from aarch64_ad.m4
12502 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12503 
12504 // We can use ubfiz when masking by a positive number and then left shifting the result.
12505 // We know that the mask is positive because immL_bitmask guarantees it.
12506 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12507 %{
12508   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12509   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12510 
12511   ins_cost(INSN_COST);
12512   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12513   ins_encode %{
12514     int lshift = $lshift$$constant & 63;
12515     intptr_t mask = $mask$$constant;
12516     int width = exact_log2_long(mask+1);
12517     __ ubfiz(as_Register($dst$$reg),
12518           as_Register($src$$reg), lshift, width);
12519   %}
12520   ins_pipe(ialu_reg_shift);
12521 %}
12522 
12523 
12524 // This pattern is automatically generated from aarch64_ad.m4
12525 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12526 
12527 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12528 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12529 %{
12530   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12531   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12532 
12533   ins_cost(INSN_COST);
12534   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12535   ins_encode %{
12536     int lshift = $lshift$$constant & 63;
12537     intptr_t mask = $mask$$constant;
12538     int width = exact_log2(mask+1);
12539     __ ubfiz(as_Register($dst$$reg),
12540              as_Register($src$$reg), lshift, width);
12541   %}
12542   ins_pipe(ialu_reg_shift);
12543 %}
12544 
12545 // This pattern is automatically generated from aarch64_ad.m4
12546 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12547 
12548 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12549 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12550 %{
12551   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12552   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12553 
12554   ins_cost(INSN_COST);
12555   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12556   ins_encode %{
12557     int lshift = $lshift$$constant & 31;
12558     intptr_t mask = $mask$$constant;
12559     int width = exact_log2(mask+1);
12560     __ ubfiz(as_Register($dst$$reg),
12561              as_Register($src$$reg), lshift, width);
12562   %}
12563   ins_pipe(ialu_reg_shift);
12564 %}
12565 
12566 // This pattern is automatically generated from aarch64_ad.m4
12567 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12568 
12569 // Can skip int2long conversions after AND with small bitmask
12570 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12571 %{
12572   match(Set dst (ConvI2L (AndI src msk)));
12573   ins_cost(INSN_COST);
12574   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12575   ins_encode %{
12576     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12577   %}
12578   ins_pipe(ialu_reg_shift);
12579 %}
12580 
12581 
12582 // Rotations
12583 
12584 // This pattern is automatically generated from aarch64_ad.m4
12585 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12586 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12587 %{
12588   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12589   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12590 
12591   ins_cost(INSN_COST);
12592   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12593 
12594   ins_encode %{
12595     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12596             $rshift$$constant & 63);
12597   %}
12598   ins_pipe(ialu_reg_reg_extr);
12599 %}
12600 
12601 
12602 // This pattern is automatically generated from aarch64_ad.m4
12603 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12604 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12605 %{
12606   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12607   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12608 
12609   ins_cost(INSN_COST);
12610   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12611 
12612   ins_encode %{
12613     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12614             $rshift$$constant & 31);
12615   %}
12616   ins_pipe(ialu_reg_reg_extr);
12617 %}
12618 
12619 
12620 // This pattern is automatically generated from aarch64_ad.m4
12621 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12622 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12623 %{
12624   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12625   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12626 
12627   ins_cost(INSN_COST);
12628   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12629 
12630   ins_encode %{
12631     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12632             $rshift$$constant & 63);
12633   %}
12634   ins_pipe(ialu_reg_reg_extr);
12635 %}
12636 
12637 
12638 // This pattern is automatically generated from aarch64_ad.m4
12639 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12640 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12641 %{
12642   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12643   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12644 
12645   ins_cost(INSN_COST);
12646   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12647 
12648   ins_encode %{
12649     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12650             $rshift$$constant & 31);
12651   %}
12652   ins_pipe(ialu_reg_reg_extr);
12653 %}
12654 
12655 // This pattern is automatically generated from aarch64_ad.m4
12656 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12657 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
12658 %{
12659   match(Set dst (RotateRight src shift));
12660 
12661   ins_cost(INSN_COST);
12662   format %{ "ror    $dst, $src, $shift" %}
12663 
12664   ins_encode %{
12665      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12666                $shift$$constant & 0x1f);
12667   %}
12668   ins_pipe(ialu_reg_reg_vshift);
12669 %}
12670 
12671 // This pattern is automatically generated from aarch64_ad.m4
12672 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12673 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
12674 %{
12675   match(Set dst (RotateRight src shift));
12676 
12677   ins_cost(INSN_COST);
12678   format %{ "ror    $dst, $src, $shift" %}
12679 
12680   ins_encode %{
12681      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12682                $shift$$constant & 0x3f);
12683   %}
12684   ins_pipe(ialu_reg_reg_vshift);
12685 %}
12686 
12687 // This pattern is automatically generated from aarch64_ad.m4
12688 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12689 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12690 %{
12691   match(Set dst (RotateRight src shift));
12692 
12693   ins_cost(INSN_COST);
12694   format %{ "ror    $dst, $src, $shift" %}
12695 
12696   ins_encode %{
12697      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12698   %}
12699   ins_pipe(ialu_reg_reg_vshift);
12700 %}
12701 
12702 // This pattern is automatically generated from aarch64_ad.m4
12703 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12704 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12705 %{
12706   match(Set dst (RotateRight src shift));
12707 
12708   ins_cost(INSN_COST);
12709   format %{ "ror    $dst, $src, $shift" %}
12710 
12711   ins_encode %{
12712      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12713   %}
12714   ins_pipe(ialu_reg_reg_vshift);
12715 %}
12716 
12717 // This pattern is automatically generated from aarch64_ad.m4
12718 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12719 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12720 %{
12721   match(Set dst (RotateLeft src shift));
12722 
12723   ins_cost(INSN_COST);
12724   format %{ "rol    $dst, $src, $shift" %}
12725 
12726   ins_encode %{
12727      __ subw(rscratch1, zr, as_Register($shift$$reg));
12728      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12729   %}
12730   ins_pipe(ialu_reg_reg_vshift);
12731 %}
12732 
12733 // This pattern is automatically generated from aarch64_ad.m4
12734 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12735 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12736 %{
12737   match(Set dst (RotateLeft src shift));
12738 
12739   ins_cost(INSN_COST);
12740   format %{ "rol    $dst, $src, $shift" %}
12741 
12742   ins_encode %{
12743      __ subw(rscratch1, zr, as_Register($shift$$reg));
12744      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12745   %}
12746   ins_pipe(ialu_reg_reg_vshift);
12747 %}
12748 
12749 
12750 // Add/subtract (extended)
12751 
12752 // This pattern is automatically generated from aarch64_ad.m4
12753 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12754 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12755 %{
12756   match(Set dst (AddL src1 (ConvI2L src2)));
12757   ins_cost(INSN_COST);
12758   format %{ "add  $dst, $src1, $src2, sxtw" %}
12759 
12760    ins_encode %{
12761      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12762             as_Register($src2$$reg), ext::sxtw);
12763    %}
12764   ins_pipe(ialu_reg_reg);
12765 %}
12766 
12767 // This pattern is automatically generated from aarch64_ad.m4
12768 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12769 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12770 %{
12771   match(Set dst (SubL src1 (ConvI2L src2)));
12772   ins_cost(INSN_COST);
12773   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12774 
12775    ins_encode %{
12776      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12777             as_Register($src2$$reg), ext::sxtw);
12778    %}
12779   ins_pipe(ialu_reg_reg);
12780 %}
12781 
12782 // This pattern is automatically generated from aarch64_ad.m4
12783 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12784 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12785 %{
12786   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12787   ins_cost(INSN_COST);
12788   format %{ "add  $dst, $src1, $src2, sxth" %}
12789 
12790    ins_encode %{
12791      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12792             as_Register($src2$$reg), ext::sxth);
12793    %}
12794   ins_pipe(ialu_reg_reg);
12795 %}
12796 
12797 // This pattern is automatically generated from aarch64_ad.m4
12798 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12799 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12800 %{
12801   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12802   ins_cost(INSN_COST);
12803   format %{ "add  $dst, $src1, $src2, sxtb" %}
12804 
12805    ins_encode %{
12806      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12807             as_Register($src2$$reg), ext::sxtb);
12808    %}
12809   ins_pipe(ialu_reg_reg);
12810 %}
12811 
12812 // This pattern is automatically generated from aarch64_ad.m4
12813 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12814 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12815 %{
12816   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12817   ins_cost(INSN_COST);
12818   format %{ "add  $dst, $src1, $src2, uxtb" %}
12819 
12820    ins_encode %{
12821      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12822             as_Register($src2$$reg), ext::uxtb);
12823    %}
12824   ins_pipe(ialu_reg_reg);
12825 %}
12826 
12827 // This pattern is automatically generated from aarch64_ad.m4
12828 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12829 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12830 %{
12831   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12832   ins_cost(INSN_COST);
12833   format %{ "add  $dst, $src1, $src2, sxth" %}
12834 
12835    ins_encode %{
12836      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12837             as_Register($src2$$reg), ext::sxth);
12838    %}
12839   ins_pipe(ialu_reg_reg);
12840 %}
12841 
12842 // This pattern is automatically generated from aarch64_ad.m4
12843 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12844 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12845 %{
12846   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12847   ins_cost(INSN_COST);
12848   format %{ "add  $dst, $src1, $src2, sxtw" %}
12849 
12850    ins_encode %{
12851      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12852             as_Register($src2$$reg), ext::sxtw);
12853    %}
12854   ins_pipe(ialu_reg_reg);
12855 %}
12856 
12857 // This pattern is automatically generated from aarch64_ad.m4
12858 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12859 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12860 %{
12861   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12862   ins_cost(INSN_COST);
12863   format %{ "add  $dst, $src1, $src2, sxtb" %}
12864 
12865    ins_encode %{
12866      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12867             as_Register($src2$$reg), ext::sxtb);
12868    %}
12869   ins_pipe(ialu_reg_reg);
12870 %}
12871 
12872 // This pattern is automatically generated from aarch64_ad.m4
12873 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12874 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12875 %{
12876   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12877   ins_cost(INSN_COST);
12878   format %{ "add  $dst, $src1, $src2, uxtb" %}
12879 
12880    ins_encode %{
12881      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12882             as_Register($src2$$reg), ext::uxtb);
12883    %}
12884   ins_pipe(ialu_reg_reg);
12885 %}
12886 
12887 // This pattern is automatically generated from aarch64_ad.m4
12888 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12889 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12890 %{
12891   match(Set dst (AddI src1 (AndI src2 mask)));
12892   ins_cost(INSN_COST);
12893   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12894 
12895    ins_encode %{
12896      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12897             as_Register($src2$$reg), ext::uxtb);
12898    %}
12899   ins_pipe(ialu_reg_reg);
12900 %}
12901 
12902 // This pattern is automatically generated from aarch64_ad.m4
12903 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12904 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12905 %{
12906   match(Set dst (AddI src1 (AndI src2 mask)));
12907   ins_cost(INSN_COST);
12908   format %{ "addw  $dst, $src1, $src2, uxth" %}
12909 
12910    ins_encode %{
12911      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12912             as_Register($src2$$reg), ext::uxth);
12913    %}
12914   ins_pipe(ialu_reg_reg);
12915 %}
12916 
12917 // This pattern is automatically generated from aarch64_ad.m4
12918 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12919 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12920 %{
12921   match(Set dst (AddL src1 (AndL src2 mask)));
12922   ins_cost(INSN_COST);
12923   format %{ "add  $dst, $src1, $src2, uxtb" %}
12924 
12925    ins_encode %{
12926      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12927             as_Register($src2$$reg), ext::uxtb);
12928    %}
12929   ins_pipe(ialu_reg_reg);
12930 %}
12931 
12932 // This pattern is automatically generated from aarch64_ad.m4
12933 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12934 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12935 %{
12936   match(Set dst (AddL src1 (AndL src2 mask)));
12937   ins_cost(INSN_COST);
12938   format %{ "add  $dst, $src1, $src2, uxth" %}
12939 
12940    ins_encode %{
12941      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12942             as_Register($src2$$reg), ext::uxth);
12943    %}
12944   ins_pipe(ialu_reg_reg);
12945 %}
12946 
12947 // This pattern is automatically generated from aarch64_ad.m4
12948 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12949 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12950 %{
12951   match(Set dst (AddL src1 (AndL src2 mask)));
12952   ins_cost(INSN_COST);
12953   format %{ "add  $dst, $src1, $src2, uxtw" %}
12954 
12955    ins_encode %{
12956      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12957             as_Register($src2$$reg), ext::uxtw);
12958    %}
12959   ins_pipe(ialu_reg_reg);
12960 %}
12961 
12962 // This pattern is automatically generated from aarch64_ad.m4
12963 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12964 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12965 %{
12966   match(Set dst (SubI src1 (AndI src2 mask)));
12967   ins_cost(INSN_COST);
12968   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12969 
12970    ins_encode %{
12971      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12972             as_Register($src2$$reg), ext::uxtb);
12973    %}
12974   ins_pipe(ialu_reg_reg);
12975 %}
12976 
12977 // This pattern is automatically generated from aarch64_ad.m4
12978 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12979 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12980 %{
12981   match(Set dst (SubI src1 (AndI src2 mask)));
12982   ins_cost(INSN_COST);
12983   format %{ "subw  $dst, $src1, $src2, uxth" %}
12984 
12985    ins_encode %{
12986      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12987             as_Register($src2$$reg), ext::uxth);
12988    %}
12989   ins_pipe(ialu_reg_reg);
12990 %}
12991 
12992 // This pattern is automatically generated from aarch64_ad.m4
12993 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12994 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12995 %{
12996   match(Set dst (SubL src1 (AndL src2 mask)));
12997   ins_cost(INSN_COST);
12998   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12999 
13000    ins_encode %{
13001      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13002             as_Register($src2$$reg), ext::uxtb);
13003    %}
13004   ins_pipe(ialu_reg_reg);
13005 %}
13006 
13007 // This pattern is automatically generated from aarch64_ad.m4
13008 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13009 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13010 %{
13011   match(Set dst (SubL src1 (AndL src2 mask)));
13012   ins_cost(INSN_COST);
13013   format %{ "sub  $dst, $src1, $src2, uxth" %}
13014 
13015    ins_encode %{
13016      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13017             as_Register($src2$$reg), ext::uxth);
13018    %}
13019   ins_pipe(ialu_reg_reg);
13020 %}
13021 
13022 // This pattern is automatically generated from aarch64_ad.m4
13023 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13024 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13025 %{
13026   match(Set dst (SubL src1 (AndL src2 mask)));
13027   ins_cost(INSN_COST);
13028   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13029 
13030    ins_encode %{
13031      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13032             as_Register($src2$$reg), ext::uxtw);
13033    %}
13034   ins_pipe(ialu_reg_reg);
13035 %}
13036 
13037 
13038 // This pattern is automatically generated from aarch64_ad.m4
13039 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13040 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13041 %{
13042   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13043   ins_cost(1.9 * INSN_COST);
13044   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13045 
13046    ins_encode %{
13047      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13048             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13049    %}
13050   ins_pipe(ialu_reg_reg_shift);
13051 %}
13052 
13053 // This pattern is automatically generated from aarch64_ad.m4
13054 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13055 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13056 %{
13057   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13058   ins_cost(1.9 * INSN_COST);
13059   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13060 
13061    ins_encode %{
13062      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13063             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13064    %}
13065   ins_pipe(ialu_reg_reg_shift);
13066 %}
13067 
13068 // This pattern is automatically generated from aarch64_ad.m4
13069 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13070 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13071 %{
13072   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13073   ins_cost(1.9 * INSN_COST);
13074   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13075 
13076    ins_encode %{
13077      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13078             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13079    %}
13080   ins_pipe(ialu_reg_reg_shift);
13081 %}
13082 
13083 // This pattern is automatically generated from aarch64_ad.m4
13084 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13085 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13086 %{
13087   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13088   ins_cost(1.9 * INSN_COST);
13089   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13090 
13091    ins_encode %{
13092      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13093             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13094    %}
13095   ins_pipe(ialu_reg_reg_shift);
13096 %}
13097 
13098 // This pattern is automatically generated from aarch64_ad.m4
13099 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13100 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13101 %{
13102   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13103   ins_cost(1.9 * INSN_COST);
13104   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13105 
13106    ins_encode %{
13107      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13108             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13109    %}
13110   ins_pipe(ialu_reg_reg_shift);
13111 %}
13112 
13113 // This pattern is automatically generated from aarch64_ad.m4
13114 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13115 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13116 %{
13117   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13118   ins_cost(1.9 * INSN_COST);
13119   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13120 
13121    ins_encode %{
13122      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13123             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13124    %}
13125   ins_pipe(ialu_reg_reg_shift);
13126 %}
13127 
13128 // This pattern is automatically generated from aarch64_ad.m4
13129 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13130 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13131 %{
13132   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13133   ins_cost(1.9 * INSN_COST);
13134   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13135 
13136    ins_encode %{
13137      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13138             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13139    %}
13140   ins_pipe(ialu_reg_reg_shift);
13141 %}
13142 
13143 // This pattern is automatically generated from aarch64_ad.m4
13144 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13145 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13146 %{
13147   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13148   ins_cost(1.9 * INSN_COST);
13149   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13150 
13151    ins_encode %{
13152      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13153             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13154    %}
13155   ins_pipe(ialu_reg_reg_shift);
13156 %}
13157 
13158 // This pattern is automatically generated from aarch64_ad.m4
13159 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13160 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13161 %{
13162   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13163   ins_cost(1.9 * INSN_COST);
13164   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13165 
13166    ins_encode %{
13167      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13168             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13169    %}
13170   ins_pipe(ialu_reg_reg_shift);
13171 %}
13172 
13173 // This pattern is automatically generated from aarch64_ad.m4
13174 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13175 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13176 %{
13177   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13178   ins_cost(1.9 * INSN_COST);
13179   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13180 
13181    ins_encode %{
13182      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13183             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13184    %}
13185   ins_pipe(ialu_reg_reg_shift);
13186 %}
13187 
13188 // This pattern is automatically generated from aarch64_ad.m4
13189 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13190 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13191 %{
13192   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13193   ins_cost(1.9 * INSN_COST);
13194   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13195 
13196    ins_encode %{
13197      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13198             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13199    %}
13200   ins_pipe(ialu_reg_reg_shift);
13201 %}
13202 
13203 // This pattern is automatically generated from aarch64_ad.m4
13204 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13205 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13206 %{
13207   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13208   ins_cost(1.9 * INSN_COST);
13209   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13210 
13211    ins_encode %{
13212      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13213             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13214    %}
13215   ins_pipe(ialu_reg_reg_shift);
13216 %}
13217 
13218 // This pattern is automatically generated from aarch64_ad.m4
13219 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13220 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13221 %{
13222   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13223   ins_cost(1.9 * INSN_COST);
13224   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13225 
13226    ins_encode %{
13227      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13228             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13229    %}
13230   ins_pipe(ialu_reg_reg_shift);
13231 %}
13232 
13233 // This pattern is automatically generated from aarch64_ad.m4
13234 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13235 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13236 %{
13237   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13238   ins_cost(1.9 * INSN_COST);
13239   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13240 
13241    ins_encode %{
13242      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13243             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13244    %}
13245   ins_pipe(ialu_reg_reg_shift);
13246 %}
13247 
13248 // This pattern is automatically generated from aarch64_ad.m4
13249 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13250 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13251 %{
13252   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13253   ins_cost(1.9 * INSN_COST);
13254   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13255 
13256    ins_encode %{
13257      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13258             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13259    %}
13260   ins_pipe(ialu_reg_reg_shift);
13261 %}
13262 
13263 // This pattern is automatically generated from aarch64_ad.m4
13264 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13265 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13266 %{
13267   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13268   ins_cost(1.9 * INSN_COST);
13269   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13270 
13271    ins_encode %{
13272      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13273             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13274    %}
13275   ins_pipe(ialu_reg_reg_shift);
13276 %}
13277 
13278 // This pattern is automatically generated from aarch64_ad.m4
13279 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13280 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13281 %{
13282   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13283   ins_cost(1.9 * INSN_COST);
13284   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13285 
13286    ins_encode %{
13287      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13288             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13289    %}
13290   ins_pipe(ialu_reg_reg_shift);
13291 %}
13292 
13293 // This pattern is automatically generated from aarch64_ad.m4
13294 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13295 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13296 %{
13297   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13298   ins_cost(1.9 * INSN_COST);
13299   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13300 
13301    ins_encode %{
13302      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13303             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13304    %}
13305   ins_pipe(ialu_reg_reg_shift);
13306 %}
13307 
13308 // This pattern is automatically generated from aarch64_ad.m4
13309 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13310 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13311 %{
13312   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13313   ins_cost(1.9 * INSN_COST);
13314   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13315 
13316    ins_encode %{
13317      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13318             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13319    %}
13320   ins_pipe(ialu_reg_reg_shift);
13321 %}
13322 
13323 // This pattern is automatically generated from aarch64_ad.m4
13324 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13325 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13326 %{
13327   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13328   ins_cost(1.9 * INSN_COST);
13329   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13330 
13331    ins_encode %{
13332      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13333             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13334    %}
13335   ins_pipe(ialu_reg_reg_shift);
13336 %}
13337 
13338 // This pattern is automatically generated from aarch64_ad.m4
13339 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13340 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13341 %{
13342   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13343   ins_cost(1.9 * INSN_COST);
13344   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13345 
13346    ins_encode %{
13347      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13348             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13349    %}
13350   ins_pipe(ialu_reg_reg_shift);
13351 %}
13352 
13353 // This pattern is automatically generated from aarch64_ad.m4
13354 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13355 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13356 %{
13357   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13358   ins_cost(1.9 * INSN_COST);
13359   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13360 
13361    ins_encode %{
13362      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13363             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13364    %}
13365   ins_pipe(ialu_reg_reg_shift);
13366 %}
13367 
13368 // This pattern is automatically generated from aarch64_ad.m4
13369 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13370 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13371 %{
13372   effect(DEF dst, USE src1, USE src2, USE cr);
13373   ins_cost(INSN_COST * 2);
13374   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13375 
13376   ins_encode %{
13377     __ cselw($dst$$Register,
13378              $src1$$Register,
13379              $src2$$Register,
13380              Assembler::LT);
13381   %}
13382   ins_pipe(icond_reg_reg);
13383 %}
13384 
13385 // This pattern is automatically generated from aarch64_ad.m4
13386 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13387 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13388 %{
13389   effect(DEF dst, USE src1, USE src2, USE cr);
13390   ins_cost(INSN_COST * 2);
13391   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13392 
13393   ins_encode %{
13394     __ cselw($dst$$Register,
13395              $src1$$Register,
13396              $src2$$Register,
13397              Assembler::GT);
13398   %}
13399   ins_pipe(icond_reg_reg);
13400 %}
13401 
13402 // This pattern is automatically generated from aarch64_ad.m4
13403 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13404 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13405 %{
13406   effect(DEF dst, USE src1, USE cr);
13407   ins_cost(INSN_COST * 2);
13408   format %{ "cselw $dst, $src1, zr lt\t"  %}
13409 
13410   ins_encode %{
13411     __ cselw($dst$$Register,
13412              $src1$$Register,
13413              zr,
13414              Assembler::LT);
13415   %}
13416   ins_pipe(icond_reg);
13417 %}
13418 
13419 // This pattern is automatically generated from aarch64_ad.m4
13420 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13421 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13422 %{
13423   effect(DEF dst, USE src1, USE cr);
13424   ins_cost(INSN_COST * 2);
13425   format %{ "cselw $dst, $src1, zr gt\t"  %}
13426 
13427   ins_encode %{
13428     __ cselw($dst$$Register,
13429              $src1$$Register,
13430              zr,
13431              Assembler::GT);
13432   %}
13433   ins_pipe(icond_reg);
13434 %}
13435 
13436 // This pattern is automatically generated from aarch64_ad.m4
13437 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13438 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13439 %{
13440   effect(DEF dst, USE src1, USE cr);
13441   ins_cost(INSN_COST * 2);
13442   format %{ "csincw $dst, $src1, zr le\t"  %}
13443 
13444   ins_encode %{
13445     __ csincw($dst$$Register,
13446              $src1$$Register,
13447              zr,
13448              Assembler::LE);
13449   %}
13450   ins_pipe(icond_reg);
13451 %}
13452 
13453 // This pattern is automatically generated from aarch64_ad.m4
13454 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13455 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13456 %{
13457   effect(DEF dst, USE src1, USE cr);
13458   ins_cost(INSN_COST * 2);
13459   format %{ "csincw $dst, $src1, zr gt\t"  %}
13460 
13461   ins_encode %{
13462     __ csincw($dst$$Register,
13463              $src1$$Register,
13464              zr,
13465              Assembler::GT);
13466   %}
13467   ins_pipe(icond_reg);
13468 %}
13469 
13470 // This pattern is automatically generated from aarch64_ad.m4
13471 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13472 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13473 %{
13474   effect(DEF dst, USE src1, USE cr);
13475   ins_cost(INSN_COST * 2);
13476   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13477 
13478   ins_encode %{
13479     __ csinvw($dst$$Register,
13480              $src1$$Register,
13481              zr,
13482              Assembler::LT);
13483   %}
13484   ins_pipe(icond_reg);
13485 %}
13486 
13487 // This pattern is automatically generated from aarch64_ad.m4
13488 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13489 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13490 %{
13491   effect(DEF dst, USE src1, USE cr);
13492   ins_cost(INSN_COST * 2);
13493   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13494 
13495   ins_encode %{
13496     __ csinvw($dst$$Register,
13497              $src1$$Register,
13498              zr,
13499              Assembler::GE);
13500   %}
13501   ins_pipe(icond_reg);
13502 %}
13503 
13504 // This pattern is automatically generated from aarch64_ad.m4
13505 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13506 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13507 %{
13508   match(Set dst (MinI src imm));
13509   ins_cost(INSN_COST * 3);
13510   expand %{
13511     rFlagsReg cr;
13512     compI_reg_imm0(cr, src);
13513     cmovI_reg_imm0_lt(dst, src, cr);
13514   %}
13515 %}
13516 
13517 // This pattern is automatically generated from aarch64_ad.m4
13518 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13519 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13520 %{
13521   match(Set dst (MinI imm src));
13522   ins_cost(INSN_COST * 3);
13523   expand %{
13524     rFlagsReg cr;
13525     compI_reg_imm0(cr, src);
13526     cmovI_reg_imm0_lt(dst, src, cr);
13527   %}
13528 %}
13529 
13530 // This pattern is automatically generated from aarch64_ad.m4
13531 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13532 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13533 %{
13534   match(Set dst (MinI src imm));
13535   ins_cost(INSN_COST * 3);
13536   expand %{
13537     rFlagsReg cr;
13538     compI_reg_imm0(cr, src);
13539     cmovI_reg_imm1_le(dst, src, cr);
13540   %}
13541 %}
13542 
13543 // This pattern is automatically generated from aarch64_ad.m4
13544 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13545 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13546 %{
13547   match(Set dst (MinI imm src));
13548   ins_cost(INSN_COST * 3);
13549   expand %{
13550     rFlagsReg cr;
13551     compI_reg_imm0(cr, src);
13552     cmovI_reg_imm1_le(dst, src, cr);
13553   %}
13554 %}
13555 
13556 // This pattern is automatically generated from aarch64_ad.m4
13557 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13558 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13559 %{
13560   match(Set dst (MinI src imm));
13561   ins_cost(INSN_COST * 3);
13562   expand %{
13563     rFlagsReg cr;
13564     compI_reg_imm0(cr, src);
13565     cmovI_reg_immM1_lt(dst, src, cr);
13566   %}
13567 %}
13568 
13569 // This pattern is automatically generated from aarch64_ad.m4
13570 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13571 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13572 %{
13573   match(Set dst (MinI imm src));
13574   ins_cost(INSN_COST * 3);
13575   expand %{
13576     rFlagsReg cr;
13577     compI_reg_imm0(cr, src);
13578     cmovI_reg_immM1_lt(dst, src, cr);
13579   %}
13580 %}
13581 
13582 // This pattern is automatically generated from aarch64_ad.m4
13583 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13584 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13585 %{
13586   match(Set dst (MaxI src imm));
13587   ins_cost(INSN_COST * 3);
13588   expand %{
13589     rFlagsReg cr;
13590     compI_reg_imm0(cr, src);
13591     cmovI_reg_imm0_gt(dst, src, cr);
13592   %}
13593 %}
13594 
13595 // This pattern is automatically generated from aarch64_ad.m4
13596 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13597 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13598 %{
13599   match(Set dst (MaxI imm src));
13600   ins_cost(INSN_COST * 3);
13601   expand %{
13602     rFlagsReg cr;
13603     compI_reg_imm0(cr, src);
13604     cmovI_reg_imm0_gt(dst, src, cr);
13605   %}
13606 %}
13607 
13608 // This pattern is automatically generated from aarch64_ad.m4
13609 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13610 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13611 %{
13612   match(Set dst (MaxI src imm));
13613   ins_cost(INSN_COST * 3);
13614   expand %{
13615     rFlagsReg cr;
13616     compI_reg_imm0(cr, src);
13617     cmovI_reg_imm1_gt(dst, src, cr);
13618   %}
13619 %}
13620 
13621 // This pattern is automatically generated from aarch64_ad.m4
13622 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13623 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13624 %{
13625   match(Set dst (MaxI imm src));
13626   ins_cost(INSN_COST * 3);
13627   expand %{
13628     rFlagsReg cr;
13629     compI_reg_imm0(cr, src);
13630     cmovI_reg_imm1_gt(dst, src, cr);
13631   %}
13632 %}
13633 
13634 // This pattern is automatically generated from aarch64_ad.m4
13635 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13636 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13637 %{
13638   match(Set dst (MaxI src imm));
13639   ins_cost(INSN_COST * 3);
13640   expand %{
13641     rFlagsReg cr;
13642     compI_reg_imm0(cr, src);
13643     cmovI_reg_immM1_ge(dst, src, cr);
13644   %}
13645 %}
13646 
13647 // This pattern is automatically generated from aarch64_ad.m4
13648 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13649 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13650 %{
13651   match(Set dst (MaxI imm src));
13652   ins_cost(INSN_COST * 3);
13653   expand %{
13654     rFlagsReg cr;
13655     compI_reg_imm0(cr, src);
13656     cmovI_reg_immM1_ge(dst, src, cr);
13657   %}
13658 %}
13659 
13660 // This pattern is automatically generated from aarch64_ad.m4
13661 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13662 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
13663 %{
13664   match(Set dst (ReverseI src));
13665   ins_cost(INSN_COST);
13666   format %{ "rbitw  $dst, $src" %}
13667   ins_encode %{
13668     __ rbitw($dst$$Register, $src$$Register);
13669   %}
13670   ins_pipe(ialu_reg);
13671 %}
13672 
13673 // This pattern is automatically generated from aarch64_ad.m4
13674 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13675 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
13676 %{
13677   match(Set dst (ReverseL src));
13678   ins_cost(INSN_COST);
13679   format %{ "rbit  $dst, $src" %}
13680   ins_encode %{
13681     __ rbit($dst$$Register, $src$$Register);
13682   %}
13683   ins_pipe(ialu_reg);
13684 %}
13685 
13686 
13687 // END This section of the file is automatically generated. Do not edit --------------
13688 
13689 
13690 // ============================================================================
13691 // Floating Point Arithmetic Instructions
13692 
13693 instruct addHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13694   match(Set dst (AddHF src1 src2));
13695   format %{ "faddh $dst, $src1, $src2" %}
13696   ins_encode %{
13697     __ faddh($dst$$FloatRegister,
13698              $src1$$FloatRegister,
13699              $src2$$FloatRegister);
13700   %}
13701   ins_pipe(fp_dop_reg_reg_s);
13702 %}
13703 
13704 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13705   match(Set dst (AddF src1 src2));
13706 
13707   ins_cost(INSN_COST * 5);
13708   format %{ "fadds   $dst, $src1, $src2" %}
13709 
13710   ins_encode %{
13711     __ fadds(as_FloatRegister($dst$$reg),
13712              as_FloatRegister($src1$$reg),
13713              as_FloatRegister($src2$$reg));
13714   %}
13715 
13716   ins_pipe(fp_dop_reg_reg_s);
13717 %}
13718 
13719 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13720   match(Set dst (AddD src1 src2));
13721 
13722   ins_cost(INSN_COST * 5);
13723   format %{ "faddd   $dst, $src1, $src2" %}
13724 
13725   ins_encode %{
13726     __ faddd(as_FloatRegister($dst$$reg),
13727              as_FloatRegister($src1$$reg),
13728              as_FloatRegister($src2$$reg));
13729   %}
13730 
13731   ins_pipe(fp_dop_reg_reg_d);
13732 %}
13733 
13734 instruct subHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13735   match(Set dst (SubHF src1 src2));
13736   format %{ "fsubh $dst, $src1, $src2" %}
13737   ins_encode %{
13738     __ fsubh($dst$$FloatRegister,
13739              $src1$$FloatRegister,
13740              $src2$$FloatRegister);
13741   %}
13742   ins_pipe(fp_dop_reg_reg_s);
13743 %}
13744 
13745 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13746   match(Set dst (SubF src1 src2));
13747 
13748   ins_cost(INSN_COST * 5);
13749   format %{ "fsubs   $dst, $src1, $src2" %}
13750 
13751   ins_encode %{
13752     __ fsubs(as_FloatRegister($dst$$reg),
13753              as_FloatRegister($src1$$reg),
13754              as_FloatRegister($src2$$reg));
13755   %}
13756 
13757   ins_pipe(fp_dop_reg_reg_s);
13758 %}
13759 
13760 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13761   match(Set dst (SubD src1 src2));
13762 
13763   ins_cost(INSN_COST * 5);
13764   format %{ "fsubd   $dst, $src1, $src2" %}
13765 
13766   ins_encode %{
13767     __ fsubd(as_FloatRegister($dst$$reg),
13768              as_FloatRegister($src1$$reg),
13769              as_FloatRegister($src2$$reg));
13770   %}
13771 
13772   ins_pipe(fp_dop_reg_reg_d);
13773 %}
13774 
13775 instruct mulHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13776   match(Set dst (MulHF src1 src2));
13777   format %{ "fmulh $dst, $src1, $src2" %}
13778   ins_encode %{
13779     __ fmulh($dst$$FloatRegister,
13780              $src1$$FloatRegister,
13781              $src2$$FloatRegister);
13782   %}
13783   ins_pipe(fp_dop_reg_reg_s);
13784 %}
13785 
13786 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13787   match(Set dst (MulF src1 src2));
13788 
13789   ins_cost(INSN_COST * 6);
13790   format %{ "fmuls   $dst, $src1, $src2" %}
13791 
13792   ins_encode %{
13793     __ fmuls(as_FloatRegister($dst$$reg),
13794              as_FloatRegister($src1$$reg),
13795              as_FloatRegister($src2$$reg));
13796   %}
13797 
13798   ins_pipe(fp_dop_reg_reg_s);
13799 %}
13800 
13801 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13802   match(Set dst (MulD src1 src2));
13803 
13804   ins_cost(INSN_COST * 6);
13805   format %{ "fmuld   $dst, $src1, $src2" %}
13806 
13807   ins_encode %{
13808     __ fmuld(as_FloatRegister($dst$$reg),
13809              as_FloatRegister($src1$$reg),
13810              as_FloatRegister($src2$$reg));
13811   %}
13812 
13813   ins_pipe(fp_dop_reg_reg_d);
13814 %}
13815 
13816 // src1 * src2 + src3 (half-precision float)
13817 instruct maddHF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13818   match(Set dst (FmaHF src3 (Binary src1 src2)));
13819   format %{ "fmaddh $dst, $src1, $src2, $src3" %}
13820   ins_encode %{
13821     assert(UseFMA, "Needs FMA instructions support.");
13822     __ fmaddh($dst$$FloatRegister,
13823               $src1$$FloatRegister,
13824               $src2$$FloatRegister,
13825               $src3$$FloatRegister);
13826   %}
13827   ins_pipe(pipe_class_default);
13828 %}
13829 
13830 // src1 * src2 + src3
13831 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13832   match(Set dst (FmaF src3 (Binary src1 src2)));
13833 
13834   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13835 
13836   ins_encode %{
13837     assert(UseFMA, "Needs FMA instructions support.");
13838     __ fmadds(as_FloatRegister($dst$$reg),
13839              as_FloatRegister($src1$$reg),
13840              as_FloatRegister($src2$$reg),
13841              as_FloatRegister($src3$$reg));
13842   %}
13843 
13844   ins_pipe(pipe_class_default);
13845 %}
13846 
13847 // src1 * src2 + src3
13848 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13849   match(Set dst (FmaD src3 (Binary src1 src2)));
13850 
13851   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13852 
13853   ins_encode %{
13854     assert(UseFMA, "Needs FMA instructions support.");
13855     __ fmaddd(as_FloatRegister($dst$$reg),
13856              as_FloatRegister($src1$$reg),
13857              as_FloatRegister($src2$$reg),
13858              as_FloatRegister($src3$$reg));
13859   %}
13860 
13861   ins_pipe(pipe_class_default);
13862 %}
13863 
13864 // src1 * (-src2) + src3
13865 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13866 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13867   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13868 
13869   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13870 
13871   ins_encode %{
13872     assert(UseFMA, "Needs FMA instructions support.");
13873     __ fmsubs(as_FloatRegister($dst$$reg),
13874               as_FloatRegister($src1$$reg),
13875               as_FloatRegister($src2$$reg),
13876               as_FloatRegister($src3$$reg));
13877   %}
13878 
13879   ins_pipe(pipe_class_default);
13880 %}
13881 
13882 // src1 * (-src2) + src3
13883 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13884 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13885   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13886 
13887   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13888 
13889   ins_encode %{
13890     assert(UseFMA, "Needs FMA instructions support.");
13891     __ fmsubd(as_FloatRegister($dst$$reg),
13892               as_FloatRegister($src1$$reg),
13893               as_FloatRegister($src2$$reg),
13894               as_FloatRegister($src3$$reg));
13895   %}
13896 
13897   ins_pipe(pipe_class_default);
13898 %}
13899 
13900 // src1 * (-src2) - src3
13901 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
13902 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13903   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13904 
13905   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13906 
13907   ins_encode %{
13908     assert(UseFMA, "Needs FMA instructions support.");
13909     __ fnmadds(as_FloatRegister($dst$$reg),
13910                as_FloatRegister($src1$$reg),
13911                as_FloatRegister($src2$$reg),
13912                as_FloatRegister($src3$$reg));
13913   %}
13914 
13915   ins_pipe(pipe_class_default);
13916 %}
13917 
13918 // src1 * (-src2) - src3
13919 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
13920 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13921   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13922 
13923   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13924 
13925   ins_encode %{
13926     assert(UseFMA, "Needs FMA instructions support.");
13927     __ fnmaddd(as_FloatRegister($dst$$reg),
13928                as_FloatRegister($src1$$reg),
13929                as_FloatRegister($src2$$reg),
13930                as_FloatRegister($src3$$reg));
13931   %}
13932 
13933   ins_pipe(pipe_class_default);
13934 %}
13935 
13936 // src1 * src2 - src3
13937 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13938   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13939 
13940   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13941 
13942   ins_encode %{
13943     assert(UseFMA, "Needs FMA instructions support.");
13944     __ fnmsubs(as_FloatRegister($dst$$reg),
13945                as_FloatRegister($src1$$reg),
13946                as_FloatRegister($src2$$reg),
13947                as_FloatRegister($src3$$reg));
13948   %}
13949 
13950   ins_pipe(pipe_class_default);
13951 %}
13952 
13953 // src1 * src2 - src3
13954 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13955   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13956 
13957   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13958 
13959   ins_encode %{
13960     assert(UseFMA, "Needs FMA instructions support.");
13961     // n.b. insn name should be fnmsubd
13962     __ fnmsub(as_FloatRegister($dst$$reg),
13963               as_FloatRegister($src1$$reg),
13964               as_FloatRegister($src2$$reg),
13965               as_FloatRegister($src3$$reg));
13966   %}
13967 
13968   ins_pipe(pipe_class_default);
13969 %}
13970 
13971 // Math.max(HH)H (half-precision float)
13972 instruct maxHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13973   match(Set dst (MaxHF src1 src2));
13974   format %{ "fmaxh $dst, $src1, $src2" %}
13975   ins_encode %{
13976     __ fmaxh($dst$$FloatRegister,
13977              $src1$$FloatRegister,
13978              $src2$$FloatRegister);
13979   %}
13980   ins_pipe(fp_dop_reg_reg_s);
13981 %}
13982 
13983 // Math.min(HH)H (half-precision float)
13984 instruct minHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13985   match(Set dst (MinHF src1 src2));
13986   format %{ "fminh $dst, $src1, $src2" %}
13987   ins_encode %{
13988     __ fminh($dst$$FloatRegister,
13989              $src1$$FloatRegister,
13990              $src2$$FloatRegister);
13991   %}
13992   ins_pipe(fp_dop_reg_reg_s);
13993 %}
13994 
13995 // Math.max(FF)F
13996 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13997   match(Set dst (MaxF src1 src2));
13998 
13999   format %{ "fmaxs   $dst, $src1, $src2" %}
14000   ins_encode %{
14001     __ fmaxs(as_FloatRegister($dst$$reg),
14002              as_FloatRegister($src1$$reg),
14003              as_FloatRegister($src2$$reg));
14004   %}
14005 
14006   ins_pipe(fp_dop_reg_reg_s);
14007 %}
14008 
14009 // Math.min(FF)F
14010 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14011   match(Set dst (MinF src1 src2));
14012 
14013   format %{ "fmins   $dst, $src1, $src2" %}
14014   ins_encode %{
14015     __ fmins(as_FloatRegister($dst$$reg),
14016              as_FloatRegister($src1$$reg),
14017              as_FloatRegister($src2$$reg));
14018   %}
14019 
14020   ins_pipe(fp_dop_reg_reg_s);
14021 %}
14022 
14023 // Math.max(DD)D
14024 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14025   match(Set dst (MaxD src1 src2));
14026 
14027   format %{ "fmaxd   $dst, $src1, $src2" %}
14028   ins_encode %{
14029     __ fmaxd(as_FloatRegister($dst$$reg),
14030              as_FloatRegister($src1$$reg),
14031              as_FloatRegister($src2$$reg));
14032   %}
14033 
14034   ins_pipe(fp_dop_reg_reg_d);
14035 %}
14036 
14037 // Math.min(DD)D
14038 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14039   match(Set dst (MinD src1 src2));
14040 
14041   format %{ "fmind   $dst, $src1, $src2" %}
14042   ins_encode %{
14043     __ fmind(as_FloatRegister($dst$$reg),
14044              as_FloatRegister($src1$$reg),
14045              as_FloatRegister($src2$$reg));
14046   %}
14047 
14048   ins_pipe(fp_dop_reg_reg_d);
14049 %}
14050 
14051 instruct divHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14052   match(Set dst (DivHF src1  src2));
14053   format %{ "fdivh $dst, $src1, $src2" %}
14054   ins_encode %{
14055     __ fdivh($dst$$FloatRegister,
14056              $src1$$FloatRegister,
14057              $src2$$FloatRegister);
14058   %}
14059   ins_pipe(fp_div_s);
14060 %}
14061 
14062 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14063   match(Set dst (DivF src1  src2));
14064 
14065   ins_cost(INSN_COST * 18);
14066   format %{ "fdivs   $dst, $src1, $src2" %}
14067 
14068   ins_encode %{
14069     __ fdivs(as_FloatRegister($dst$$reg),
14070              as_FloatRegister($src1$$reg),
14071              as_FloatRegister($src2$$reg));
14072   %}
14073 
14074   ins_pipe(fp_div_s);
14075 %}
14076 
14077 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14078   match(Set dst (DivD src1  src2));
14079 
14080   ins_cost(INSN_COST * 32);
14081   format %{ "fdivd   $dst, $src1, $src2" %}
14082 
14083   ins_encode %{
14084     __ fdivd(as_FloatRegister($dst$$reg),
14085              as_FloatRegister($src1$$reg),
14086              as_FloatRegister($src2$$reg));
14087   %}
14088 
14089   ins_pipe(fp_div_d);
14090 %}
14091 
14092 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14093   match(Set dst (NegF src));
14094 
14095   ins_cost(INSN_COST * 3);
14096   format %{ "fneg   $dst, $src" %}
14097 
14098   ins_encode %{
14099     __ fnegs(as_FloatRegister($dst$$reg),
14100              as_FloatRegister($src$$reg));
14101   %}
14102 
14103   ins_pipe(fp_uop_s);
14104 %}
14105 
14106 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14107   match(Set dst (NegD src));
14108 
14109   ins_cost(INSN_COST * 3);
14110   format %{ "fnegd   $dst, $src" %}
14111 
14112   ins_encode %{
14113     __ fnegd(as_FloatRegister($dst$$reg),
14114              as_FloatRegister($src$$reg));
14115   %}
14116 
14117   ins_pipe(fp_uop_d);
14118 %}
14119 
14120 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14121 %{
14122   match(Set dst (AbsI src));
14123 
14124   effect(KILL cr);
14125   ins_cost(INSN_COST * 2);
14126   format %{ "cmpw  $src, zr\n\t"
14127             "cnegw $dst, $src, Assembler::LT\t# int abs"
14128   %}
14129 
14130   ins_encode %{
14131     __ cmpw(as_Register($src$$reg), zr);
14132     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14133   %}
14134   ins_pipe(pipe_class_default);
14135 %}
14136 
14137 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
14138 %{
14139   match(Set dst (AbsL src));
14140 
14141   effect(KILL cr);
14142   ins_cost(INSN_COST * 2);
14143   format %{ "cmp  $src, zr\n\t"
14144             "cneg $dst, $src, Assembler::LT\t# long abs"
14145   %}
14146 
14147   ins_encode %{
14148     __ cmp(as_Register($src$$reg), zr);
14149     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14150   %}
14151   ins_pipe(pipe_class_default);
14152 %}
14153 
14154 instruct absF_reg(vRegF dst, vRegF src) %{
14155   match(Set dst (AbsF src));
14156 
14157   ins_cost(INSN_COST * 3);
14158   format %{ "fabss   $dst, $src" %}
14159   ins_encode %{
14160     __ fabss(as_FloatRegister($dst$$reg),
14161              as_FloatRegister($src$$reg));
14162   %}
14163 
14164   ins_pipe(fp_uop_s);
14165 %}
14166 
14167 instruct absD_reg(vRegD dst, vRegD src) %{
14168   match(Set dst (AbsD src));
14169 
14170   ins_cost(INSN_COST * 3);
14171   format %{ "fabsd   $dst, $src" %}
14172   ins_encode %{
14173     __ fabsd(as_FloatRegister($dst$$reg),
14174              as_FloatRegister($src$$reg));
14175   %}
14176 
14177   ins_pipe(fp_uop_d);
14178 %}
14179 
14180 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14181   match(Set dst (AbsF (SubF src1 src2)));
14182 
14183   ins_cost(INSN_COST * 3);
14184   format %{ "fabds   $dst, $src1, $src2" %}
14185   ins_encode %{
14186     __ fabds(as_FloatRegister($dst$$reg),
14187              as_FloatRegister($src1$$reg),
14188              as_FloatRegister($src2$$reg));
14189   %}
14190 
14191   ins_pipe(fp_uop_s);
14192 %}
14193 
14194 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14195   match(Set dst (AbsD (SubD src1 src2)));
14196 
14197   ins_cost(INSN_COST * 3);
14198   format %{ "fabdd   $dst, $src1, $src2" %}
14199   ins_encode %{
14200     __ fabdd(as_FloatRegister($dst$$reg),
14201              as_FloatRegister($src1$$reg),
14202              as_FloatRegister($src2$$reg));
14203   %}
14204 
14205   ins_pipe(fp_uop_d);
14206 %}
14207 
14208 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14209   match(Set dst (SqrtD src));
14210 
14211   ins_cost(INSN_COST * 50);
14212   format %{ "fsqrtd  $dst, $src" %}
14213   ins_encode %{
14214     __ fsqrtd(as_FloatRegister($dst$$reg),
14215              as_FloatRegister($src$$reg));
14216   %}
14217 
14218   ins_pipe(fp_div_s);
14219 %}
14220 
14221 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14222   match(Set dst (SqrtF src));
14223 
14224   ins_cost(INSN_COST * 50);
14225   format %{ "fsqrts  $dst, $src" %}
14226   ins_encode %{
14227     __ fsqrts(as_FloatRegister($dst$$reg),
14228              as_FloatRegister($src$$reg));
14229   %}
14230 
14231   ins_pipe(fp_div_d);
14232 %}
14233 
14234 instruct sqrtHF_reg(vRegF dst, vRegF src) %{
14235   match(Set dst (SqrtHF src));
14236   format %{ "fsqrth $dst, $src" %}
14237   ins_encode %{
14238     __ fsqrth($dst$$FloatRegister,
14239               $src$$FloatRegister);
14240   %}
14241   ins_pipe(fp_div_s);
14242 %}
14243 
14244 // Math.rint, floor, ceil
14245 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14246   match(Set dst (RoundDoubleMode src rmode));
14247   format %{ "frint  $dst, $src, $rmode" %}
14248   ins_encode %{
14249     switch ($rmode$$constant) {
14250       case RoundDoubleModeNode::rmode_rint:
14251         __ frintnd(as_FloatRegister($dst$$reg),
14252                    as_FloatRegister($src$$reg));
14253         break;
14254       case RoundDoubleModeNode::rmode_floor:
14255         __ frintmd(as_FloatRegister($dst$$reg),
14256                    as_FloatRegister($src$$reg));
14257         break;
14258       case RoundDoubleModeNode::rmode_ceil:
14259         __ frintpd(as_FloatRegister($dst$$reg),
14260                    as_FloatRegister($src$$reg));
14261         break;
14262     }
14263   %}
14264   ins_pipe(fp_uop_d);
14265 %}
14266 
14267 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14268   match(Set dst (CopySignD src1 (Binary src2 zero)));
14269   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14270   format %{ "CopySignD  $dst $src1 $src2" %}
14271   ins_encode %{
14272     FloatRegister dst = as_FloatRegister($dst$$reg),
14273                   src1 = as_FloatRegister($src1$$reg),
14274                   src2 = as_FloatRegister($src2$$reg),
14275                   zero = as_FloatRegister($zero$$reg);
14276     __ fnegd(dst, zero);
14277     __ bsl(dst, __ T8B, src2, src1);
14278   %}
14279   ins_pipe(fp_uop_d);
14280 %}
14281 
14282 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14283   match(Set dst (CopySignF src1 src2));
14284   effect(TEMP_DEF dst, USE src1, USE src2);
14285   format %{ "CopySignF  $dst $src1 $src2" %}
14286   ins_encode %{
14287     FloatRegister dst = as_FloatRegister($dst$$reg),
14288                   src1 = as_FloatRegister($src1$$reg),
14289                   src2 = as_FloatRegister($src2$$reg);
14290     __ movi(dst, __ T2S, 0x80, 24);
14291     __ bsl(dst, __ T8B, src2, src1);
14292   %}
14293   ins_pipe(fp_uop_d);
14294 %}
14295 
14296 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14297   match(Set dst (SignumD src (Binary zero one)));
14298   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14299   format %{ "signumD  $dst, $src" %}
14300   ins_encode %{
14301     FloatRegister src = as_FloatRegister($src$$reg),
14302                   dst = as_FloatRegister($dst$$reg),
14303                   zero = as_FloatRegister($zero$$reg),
14304                   one = as_FloatRegister($one$$reg);
14305     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14306     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14307     // Bit selection instruction gets bit from "one" for each enabled bit in
14308     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14309     // NaN the whole "src" will be copied because "dst" is zero. For all other
14310     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14311     // from "src", and all other bits are copied from 1.0.
14312     __ bsl(dst, __ T8B, one, src);
14313   %}
14314   ins_pipe(fp_uop_d);
14315 %}
14316 
14317 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14318   match(Set dst (SignumF src (Binary zero one)));
14319   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14320   format %{ "signumF  $dst, $src" %}
14321   ins_encode %{
14322     FloatRegister src = as_FloatRegister($src$$reg),
14323                   dst = as_FloatRegister($dst$$reg),
14324                   zero = as_FloatRegister($zero$$reg),
14325                   one = as_FloatRegister($one$$reg);
14326     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14327     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14328     // Bit selection instruction gets bit from "one" for each enabled bit in
14329     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14330     // NaN the whole "src" will be copied because "dst" is zero. For all other
14331     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14332     // from "src", and all other bits are copied from 1.0.
14333     __ bsl(dst, __ T8B, one, src);
14334   %}
14335   ins_pipe(fp_uop_d);
14336 %}
14337 
14338 instruct onspinwait() %{
14339   match(OnSpinWait);
14340   ins_cost(INSN_COST);
14341 
14342   format %{ "onspinwait" %}
14343 
14344   ins_encode %{
14345     __ spin_wait();
14346   %}
14347   ins_pipe(pipe_class_empty);
14348 %}
14349 
14350 // ============================================================================
14351 // Logical Instructions
14352 
14353 // Integer Logical Instructions
14354 
14355 // And Instructions
14356 
14357 
14358 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14359   match(Set dst (AndI src1 src2));
14360 
14361   format %{ "andw  $dst, $src1, $src2\t# int" %}
14362 
14363   ins_cost(INSN_COST);
14364   ins_encode %{
14365     __ andw(as_Register($dst$$reg),
14366             as_Register($src1$$reg),
14367             as_Register($src2$$reg));
14368   %}
14369 
14370   ins_pipe(ialu_reg_reg);
14371 %}
14372 
14373 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14374   match(Set dst (AndI src1 src2));
14375 
14376   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14377 
14378   ins_cost(INSN_COST);
14379   ins_encode %{
14380     __ andw(as_Register($dst$$reg),
14381             as_Register($src1$$reg),
14382             (uint64_t)($src2$$constant));
14383   %}
14384 
14385   ins_pipe(ialu_reg_imm);
14386 %}
14387 
14388 // Or Instructions
14389 
14390 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14391   match(Set dst (OrI src1 src2));
14392 
14393   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14394 
14395   ins_cost(INSN_COST);
14396   ins_encode %{
14397     __ orrw(as_Register($dst$$reg),
14398             as_Register($src1$$reg),
14399             as_Register($src2$$reg));
14400   %}
14401 
14402   ins_pipe(ialu_reg_reg);
14403 %}
14404 
14405 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14406   match(Set dst (OrI src1 src2));
14407 
14408   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14409 
14410   ins_cost(INSN_COST);
14411   ins_encode %{
14412     __ orrw(as_Register($dst$$reg),
14413             as_Register($src1$$reg),
14414             (uint64_t)($src2$$constant));
14415   %}
14416 
14417   ins_pipe(ialu_reg_imm);
14418 %}
14419 
14420 // Xor Instructions
14421 
14422 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14423   match(Set dst (XorI src1 src2));
14424 
14425   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14426 
14427   ins_cost(INSN_COST);
14428   ins_encode %{
14429     __ eorw(as_Register($dst$$reg),
14430             as_Register($src1$$reg),
14431             as_Register($src2$$reg));
14432   %}
14433 
14434   ins_pipe(ialu_reg_reg);
14435 %}
14436 
14437 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14438   match(Set dst (XorI src1 src2));
14439 
14440   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14441 
14442   ins_cost(INSN_COST);
14443   ins_encode %{
14444     __ eorw(as_Register($dst$$reg),
14445             as_Register($src1$$reg),
14446             (uint64_t)($src2$$constant));
14447   %}
14448 
14449   ins_pipe(ialu_reg_imm);
14450 %}
14451 
14452 // Long Logical Instructions
14453 // TODO
14454 
14455 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14456   match(Set dst (AndL src1 src2));
14457 
14458   format %{ "and  $dst, $src1, $src2\t# int" %}
14459 
14460   ins_cost(INSN_COST);
14461   ins_encode %{
14462     __ andr(as_Register($dst$$reg),
14463             as_Register($src1$$reg),
14464             as_Register($src2$$reg));
14465   %}
14466 
14467   ins_pipe(ialu_reg_reg);
14468 %}
14469 
14470 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14471   match(Set dst (AndL src1 src2));
14472 
14473   format %{ "and  $dst, $src1, $src2\t# int" %}
14474 
14475   ins_cost(INSN_COST);
14476   ins_encode %{
14477     __ andr(as_Register($dst$$reg),
14478             as_Register($src1$$reg),
14479             (uint64_t)($src2$$constant));
14480   %}
14481 
14482   ins_pipe(ialu_reg_imm);
14483 %}
14484 
14485 // Or Instructions
14486 
14487 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14488   match(Set dst (OrL src1 src2));
14489 
14490   format %{ "orr  $dst, $src1, $src2\t# int" %}
14491 
14492   ins_cost(INSN_COST);
14493   ins_encode %{
14494     __ orr(as_Register($dst$$reg),
14495            as_Register($src1$$reg),
14496            as_Register($src2$$reg));
14497   %}
14498 
14499   ins_pipe(ialu_reg_reg);
14500 %}
14501 
14502 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14503   match(Set dst (OrL src1 src2));
14504 
14505   format %{ "orr  $dst, $src1, $src2\t# int" %}
14506 
14507   ins_cost(INSN_COST);
14508   ins_encode %{
14509     __ orr(as_Register($dst$$reg),
14510            as_Register($src1$$reg),
14511            (uint64_t)($src2$$constant));
14512   %}
14513 
14514   ins_pipe(ialu_reg_imm);
14515 %}
14516 
14517 // Xor Instructions
14518 
14519 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14520   match(Set dst (XorL src1 src2));
14521 
14522   format %{ "eor  $dst, $src1, $src2\t# int" %}
14523 
14524   ins_cost(INSN_COST);
14525   ins_encode %{
14526     __ eor(as_Register($dst$$reg),
14527            as_Register($src1$$reg),
14528            as_Register($src2$$reg));
14529   %}
14530 
14531   ins_pipe(ialu_reg_reg);
14532 %}
14533 
14534 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14535   match(Set dst (XorL src1 src2));
14536 
14537   ins_cost(INSN_COST);
14538   format %{ "eor  $dst, $src1, $src2\t# int" %}
14539 
14540   ins_encode %{
14541     __ eor(as_Register($dst$$reg),
14542            as_Register($src1$$reg),
14543            (uint64_t)($src2$$constant));
14544   %}
14545 
14546   ins_pipe(ialu_reg_imm);
14547 %}
14548 
14549 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14550 %{
14551   match(Set dst (ConvI2L src));
14552 
14553   ins_cost(INSN_COST);
14554   format %{ "sxtw  $dst, $src\t# i2l" %}
14555   ins_encode %{
14556     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14557   %}
14558   ins_pipe(ialu_reg_shift);
14559 %}
14560 
14561 // this pattern occurs in bigmath arithmetic
14562 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14563 %{
14564   match(Set dst (AndL (ConvI2L src) mask));
14565 
14566   ins_cost(INSN_COST);
14567   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14568   ins_encode %{
14569     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14570   %}
14571 
14572   ins_pipe(ialu_reg_shift);
14573 %}
14574 
14575 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14576   match(Set dst (ConvL2I src));
14577 
14578   ins_cost(INSN_COST);
14579   format %{ "movw  $dst, $src \t// l2i" %}
14580 
14581   ins_encode %{
14582     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14583   %}
14584 
14585   ins_pipe(ialu_reg);
14586 %}
14587 
14588 instruct convD2F_reg(vRegF dst, vRegD src) %{
14589   match(Set dst (ConvD2F src));
14590 
14591   ins_cost(INSN_COST * 5);
14592   format %{ "fcvtd  $dst, $src \t// d2f" %}
14593 
14594   ins_encode %{
14595     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14596   %}
14597 
14598   ins_pipe(fp_d2f);
14599 %}
14600 
14601 instruct convF2D_reg(vRegD dst, vRegF src) %{
14602   match(Set dst (ConvF2D src));
14603 
14604   ins_cost(INSN_COST * 5);
14605   format %{ "fcvts  $dst, $src \t// f2d" %}
14606 
14607   ins_encode %{
14608     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14609   %}
14610 
14611   ins_pipe(fp_f2d);
14612 %}
14613 
14614 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14615   match(Set dst (ConvF2I src));
14616 
14617   ins_cost(INSN_COST * 5);
14618   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14619 
14620   ins_encode %{
14621     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14622   %}
14623 
14624   ins_pipe(fp_f2i);
14625 %}
14626 
14627 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14628   match(Set dst (ConvF2L src));
14629 
14630   ins_cost(INSN_COST * 5);
14631   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14632 
14633   ins_encode %{
14634     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14635   %}
14636 
14637   ins_pipe(fp_f2l);
14638 %}
14639 
14640 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
14641   match(Set dst (ConvF2HF src));
14642   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
14643             "smov $dst, $tmp\t# move result from $tmp to $dst"
14644   %}
14645   effect(TEMP tmp);
14646   ins_encode %{
14647       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
14648   %}
14649   ins_pipe(pipe_slow);
14650 %}
14651 
14652 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
14653   match(Set dst (ConvHF2F src));
14654   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
14655             "fcvt $dst, $tmp\t# convert half to single precision"
14656   %}
14657   effect(TEMP tmp);
14658   ins_encode %{
14659       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
14660   %}
14661   ins_pipe(pipe_slow);
14662 %}
14663 
14664 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14665   match(Set dst (ConvI2F src));
14666 
14667   ins_cost(INSN_COST * 5);
14668   format %{ "scvtfws  $dst, $src \t// i2f" %}
14669 
14670   ins_encode %{
14671     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14672   %}
14673 
14674   ins_pipe(fp_i2f);
14675 %}
14676 
14677 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14678   match(Set dst (ConvL2F src));
14679 
14680   ins_cost(INSN_COST * 5);
14681   format %{ "scvtfs  $dst, $src \t// l2f" %}
14682 
14683   ins_encode %{
14684     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14685   %}
14686 
14687   ins_pipe(fp_l2f);
14688 %}
14689 
14690 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14691   match(Set dst (ConvD2I src));
14692 
14693   ins_cost(INSN_COST * 5);
14694   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14695 
14696   ins_encode %{
14697     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14698   %}
14699 
14700   ins_pipe(fp_d2i);
14701 %}
14702 
14703 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14704   match(Set dst (ConvD2L src));
14705 
14706   ins_cost(INSN_COST * 5);
14707   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14708 
14709   ins_encode %{
14710     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14711   %}
14712 
14713   ins_pipe(fp_d2l);
14714 %}
14715 
14716 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14717   match(Set dst (ConvI2D src));
14718 
14719   ins_cost(INSN_COST * 5);
14720   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14721 
14722   ins_encode %{
14723     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14724   %}
14725 
14726   ins_pipe(fp_i2d);
14727 %}
14728 
14729 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14730   match(Set dst (ConvL2D src));
14731 
14732   ins_cost(INSN_COST * 5);
14733   format %{ "scvtfd  $dst, $src \t// l2d" %}
14734 
14735   ins_encode %{
14736     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14737   %}
14738 
14739   ins_pipe(fp_l2d);
14740 %}
14741 
14742 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
14743 %{
14744   match(Set dst (RoundD src));
14745   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14746   format %{ "java_round_double $dst,$src"%}
14747   ins_encode %{
14748     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
14749                          as_FloatRegister($ftmp$$reg));
14750   %}
14751   ins_pipe(pipe_slow);
14752 %}
14753 
14754 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
14755 %{
14756   match(Set dst (RoundF src));
14757   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14758   format %{ "java_round_float $dst,$src"%}
14759   ins_encode %{
14760     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
14761                         as_FloatRegister($ftmp$$reg));
14762   %}
14763   ins_pipe(pipe_slow);
14764 %}
14765 
14766 // stack <-> reg and reg <-> reg shuffles with no conversion
14767 
14768 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14769 
14770   match(Set dst (MoveF2I src));
14771 
14772   effect(DEF dst, USE src);
14773 
14774   ins_cost(4 * INSN_COST);
14775 
14776   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14777 
14778   ins_encode %{
14779     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14780   %}
14781 
14782   ins_pipe(iload_reg_reg);
14783 
14784 %}
14785 
14786 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14787 
14788   match(Set dst (MoveI2F src));
14789 
14790   effect(DEF dst, USE src);
14791 
14792   ins_cost(4 * INSN_COST);
14793 
14794   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14795 
14796   ins_encode %{
14797     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14798   %}
14799 
14800   ins_pipe(pipe_class_memory);
14801 
14802 %}
14803 
14804 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14805 
14806   match(Set dst (MoveD2L src));
14807 
14808   effect(DEF dst, USE src);
14809 
14810   ins_cost(4 * INSN_COST);
14811 
14812   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14813 
14814   ins_encode %{
14815     __ ldr($dst$$Register, Address(sp, $src$$disp));
14816   %}
14817 
14818   ins_pipe(iload_reg_reg);
14819 
14820 %}
14821 
14822 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14823 
14824   match(Set dst (MoveL2D src));
14825 
14826   effect(DEF dst, USE src);
14827 
14828   ins_cost(4 * INSN_COST);
14829 
14830   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14831 
14832   ins_encode %{
14833     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14834   %}
14835 
14836   ins_pipe(pipe_class_memory);
14837 
14838 %}
14839 
14840 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14841 
14842   match(Set dst (MoveF2I src));
14843 
14844   effect(DEF dst, USE src);
14845 
14846   ins_cost(INSN_COST);
14847 
14848   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14849 
14850   ins_encode %{
14851     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14852   %}
14853 
14854   ins_pipe(pipe_class_memory);
14855 
14856 %}
14857 
14858 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14859 
14860   match(Set dst (MoveI2F src));
14861 
14862   effect(DEF dst, USE src);
14863 
14864   ins_cost(INSN_COST);
14865 
14866   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14867 
14868   ins_encode %{
14869     __ strw($src$$Register, Address(sp, $dst$$disp));
14870   %}
14871 
14872   ins_pipe(istore_reg_reg);
14873 
14874 %}
14875 
14876 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14877 
14878   match(Set dst (MoveD2L src));
14879 
14880   effect(DEF dst, USE src);
14881 
14882   ins_cost(INSN_COST);
14883 
14884   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14885 
14886   ins_encode %{
14887     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14888   %}
14889 
14890   ins_pipe(pipe_class_memory);
14891 
14892 %}
14893 
14894 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14895 
14896   match(Set dst (MoveL2D src));
14897 
14898   effect(DEF dst, USE src);
14899 
14900   ins_cost(INSN_COST);
14901 
14902   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14903 
14904   ins_encode %{
14905     __ str($src$$Register, Address(sp, $dst$$disp));
14906   %}
14907 
14908   ins_pipe(istore_reg_reg);
14909 
14910 %}
14911 
14912 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14913 
14914   match(Set dst (MoveF2I src));
14915 
14916   effect(DEF dst, USE src);
14917 
14918   ins_cost(INSN_COST);
14919 
14920   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14921 
14922   ins_encode %{
14923     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14924   %}
14925 
14926   ins_pipe(fp_f2i);
14927 
14928 %}
14929 
14930 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14931 
14932   match(Set dst (MoveI2F src));
14933 
14934   effect(DEF dst, USE src);
14935 
14936   ins_cost(INSN_COST);
14937 
14938   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14939 
14940   ins_encode %{
14941     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14942   %}
14943 
14944   ins_pipe(fp_i2f);
14945 
14946 %}
14947 
14948 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14949 
14950   match(Set dst (MoveD2L src));
14951 
14952   effect(DEF dst, USE src);
14953 
14954   ins_cost(INSN_COST);
14955 
14956   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14957 
14958   ins_encode %{
14959     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14960   %}
14961 
14962   ins_pipe(fp_d2l);
14963 
14964 %}
14965 
14966 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14967 
14968   match(Set dst (MoveL2D src));
14969 
14970   effect(DEF dst, USE src);
14971 
14972   ins_cost(INSN_COST);
14973 
14974   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14975 
14976   ins_encode %{
14977     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14978   %}
14979 
14980   ins_pipe(fp_l2d);
14981 
14982 %}
14983 
14984 // ============================================================================
14985 // clearing of an array
14986 
14987 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14988 %{
14989   match(Set dummy (ClearArray cnt base));
14990   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14991 
14992   ins_cost(4 * INSN_COST);
14993   format %{ "ClearArray $cnt, $base" %}
14994 
14995   ins_encode %{
14996     address tpc = __ zero_words($base$$Register, $cnt$$Register);
14997     if (tpc == nullptr) {
14998       ciEnv::current()->record_failure("CodeCache is full");
14999       return;
15000     }
15001   %}
15002 
15003   ins_pipe(pipe_class_memory);
15004 %}
15005 
15006 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15007 %{
15008   predicate((uint64_t)n->in(2)->get_long()
15009             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
15010   match(Set dummy (ClearArray cnt base));
15011   effect(TEMP temp, USE_KILL base, KILL cr);
15012 
15013   ins_cost(4 * INSN_COST);
15014   format %{ "ClearArray $cnt, $base" %}
15015 
15016   ins_encode %{
15017     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15018     if (tpc == nullptr) {
15019       ciEnv::current()->record_failure("CodeCache is full");
15020       return;
15021     }
15022   %}
15023 
15024   ins_pipe(pipe_class_memory);
15025 %}
15026 
15027 // ============================================================================
15028 // Overflow Math Instructions
15029 
15030 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15031 %{
15032   match(Set cr (OverflowAddI op1 op2));
15033 
15034   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15035   ins_cost(INSN_COST);
15036   ins_encode %{
15037     __ cmnw($op1$$Register, $op2$$Register);
15038   %}
15039 
15040   ins_pipe(icmp_reg_reg);
15041 %}
15042 
15043 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15044 %{
15045   match(Set cr (OverflowAddI op1 op2));
15046 
15047   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15048   ins_cost(INSN_COST);
15049   ins_encode %{
15050     __ cmnw($op1$$Register, $op2$$constant);
15051   %}
15052 
15053   ins_pipe(icmp_reg_imm);
15054 %}
15055 
15056 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15057 %{
15058   match(Set cr (OverflowAddL op1 op2));
15059 
15060   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15061   ins_cost(INSN_COST);
15062   ins_encode %{
15063     __ cmn($op1$$Register, $op2$$Register);
15064   %}
15065 
15066   ins_pipe(icmp_reg_reg);
15067 %}
15068 
15069 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15070 %{
15071   match(Set cr (OverflowAddL op1 op2));
15072 
15073   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
15074   ins_cost(INSN_COST);
15075   ins_encode %{
15076     __ adds(zr, $op1$$Register, $op2$$constant);
15077   %}
15078 
15079   ins_pipe(icmp_reg_imm);
15080 %}
15081 
15082 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15083 %{
15084   match(Set cr (OverflowSubI op1 op2));
15085 
15086   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15087   ins_cost(INSN_COST);
15088   ins_encode %{
15089     __ cmpw($op1$$Register, $op2$$Register);
15090   %}
15091 
15092   ins_pipe(icmp_reg_reg);
15093 %}
15094 
15095 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15096 %{
15097   match(Set cr (OverflowSubI op1 op2));
15098 
15099   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15100   ins_cost(INSN_COST);
15101   ins_encode %{
15102     __ cmpw($op1$$Register, $op2$$constant);
15103   %}
15104 
15105   ins_pipe(icmp_reg_imm);
15106 %}
15107 
15108 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15109 %{
15110   match(Set cr (OverflowSubL op1 op2));
15111 
15112   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15113   ins_cost(INSN_COST);
15114   ins_encode %{
15115     __ cmp($op1$$Register, $op2$$Register);
15116   %}
15117 
15118   ins_pipe(icmp_reg_reg);
15119 %}
15120 
15121 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15122 %{
15123   match(Set cr (OverflowSubL op1 op2));
15124 
15125   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15126   ins_cost(INSN_COST);
15127   ins_encode %{
15128     __ subs(zr, $op1$$Register, $op2$$constant);
15129   %}
15130 
15131   ins_pipe(icmp_reg_imm);
15132 %}
15133 
15134 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15135 %{
15136   match(Set cr (OverflowSubI zero op1));
15137 
15138   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15139   ins_cost(INSN_COST);
15140   ins_encode %{
15141     __ cmpw(zr, $op1$$Register);
15142   %}
15143 
15144   ins_pipe(icmp_reg_imm);
15145 %}
15146 
15147 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15148 %{
15149   match(Set cr (OverflowSubL zero op1));
15150 
15151   format %{ "cmp   zr, $op1\t# overflow check long" %}
15152   ins_cost(INSN_COST);
15153   ins_encode %{
15154     __ cmp(zr, $op1$$Register);
15155   %}
15156 
15157   ins_pipe(icmp_reg_imm);
15158 %}
15159 
15160 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15161 %{
15162   match(Set cr (OverflowMulI op1 op2));
15163 
15164   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15165             "cmp   rscratch1, rscratch1, sxtw\n\t"
15166             "movw  rscratch1, #0x80000000\n\t"
15167             "cselw rscratch1, rscratch1, zr, NE\n\t"
15168             "cmpw  rscratch1, #1" %}
15169   ins_cost(5 * INSN_COST);
15170   ins_encode %{
15171     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15172     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15173     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15174     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15175     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15176   %}
15177 
15178   ins_pipe(pipe_slow);
15179 %}
15180 
15181 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15182 %{
15183   match(If cmp (OverflowMulI op1 op2));
15184   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15185             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15186   effect(USE labl, KILL cr);
15187 
15188   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15189             "cmp   rscratch1, rscratch1, sxtw\n\t"
15190             "b$cmp   $labl" %}
15191   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15192   ins_encode %{
15193     Label* L = $labl$$label;
15194     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15195     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15196     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15197     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15198   %}
15199 
15200   ins_pipe(pipe_serial);
15201 %}
15202 
15203 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15204 %{
15205   match(Set cr (OverflowMulL op1 op2));
15206 
15207   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15208             "smulh rscratch2, $op1, $op2\n\t"
15209             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15210             "movw  rscratch1, #0x80000000\n\t"
15211             "cselw rscratch1, rscratch1, zr, NE\n\t"
15212             "cmpw  rscratch1, #1" %}
15213   ins_cost(6 * INSN_COST);
15214   ins_encode %{
15215     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15216     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15217     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15218     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15219     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15220     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15221   %}
15222 
15223   ins_pipe(pipe_slow);
15224 %}
15225 
15226 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15227 %{
15228   match(If cmp (OverflowMulL op1 op2));
15229   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15230             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15231   effect(USE labl, KILL cr);
15232 
15233   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15234             "smulh rscratch2, $op1, $op2\n\t"
15235             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15236             "b$cmp $labl" %}
15237   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15238   ins_encode %{
15239     Label* L = $labl$$label;
15240     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15241     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15242     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15243     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15244     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15245   %}
15246 
15247   ins_pipe(pipe_serial);
15248 %}
15249 
15250 // ============================================================================
15251 // Compare Instructions
15252 
15253 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15254 %{
15255   match(Set cr (CmpI op1 op2));
15256 
15257   effect(DEF cr, USE op1, USE op2);
15258 
15259   ins_cost(INSN_COST);
15260   format %{ "cmpw  $op1, $op2" %}
15261 
15262   ins_encode(aarch64_enc_cmpw(op1, op2));
15263 
15264   ins_pipe(icmp_reg_reg);
15265 %}
15266 
15267 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15268 %{
15269   match(Set cr (CmpI op1 zero));
15270 
15271   effect(DEF cr, USE op1);
15272 
15273   ins_cost(INSN_COST);
15274   format %{ "cmpw $op1, 0" %}
15275 
15276   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15277 
15278   ins_pipe(icmp_reg_imm);
15279 %}
15280 
15281 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15282 %{
15283   match(Set cr (CmpI op1 op2));
15284 
15285   effect(DEF cr, USE op1);
15286 
15287   ins_cost(INSN_COST);
15288   format %{ "cmpw  $op1, $op2" %}
15289 
15290   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15291 
15292   ins_pipe(icmp_reg_imm);
15293 %}
15294 
15295 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15296 %{
15297   match(Set cr (CmpI op1 op2));
15298 
15299   effect(DEF cr, USE op1);
15300 
15301   ins_cost(INSN_COST * 2);
15302   format %{ "cmpw  $op1, $op2" %}
15303 
15304   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15305 
15306   ins_pipe(icmp_reg_imm);
15307 %}
15308 
15309 // Unsigned compare Instructions; really, same as signed compare
15310 // except it should only be used to feed an If or a CMovI which takes a
15311 // cmpOpU.
15312 
15313 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15314 %{
15315   match(Set cr (CmpU op1 op2));
15316 
15317   effect(DEF cr, USE op1, USE op2);
15318 
15319   ins_cost(INSN_COST);
15320   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15321 
15322   ins_encode(aarch64_enc_cmpw(op1, op2));
15323 
15324   ins_pipe(icmp_reg_reg);
15325 %}
15326 
15327 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15328 %{
15329   match(Set cr (CmpU op1 zero));
15330 
15331   effect(DEF cr, USE op1);
15332 
15333   ins_cost(INSN_COST);
15334   format %{ "cmpw $op1, #0\t# unsigned" %}
15335 
15336   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15337 
15338   ins_pipe(icmp_reg_imm);
15339 %}
15340 
15341 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15342 %{
15343   match(Set cr (CmpU op1 op2));
15344 
15345   effect(DEF cr, USE op1);
15346 
15347   ins_cost(INSN_COST);
15348   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15349 
15350   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15351 
15352   ins_pipe(icmp_reg_imm);
15353 %}
15354 
15355 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15356 %{
15357   match(Set cr (CmpU op1 op2));
15358 
15359   effect(DEF cr, USE op1);
15360 
15361   ins_cost(INSN_COST * 2);
15362   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15363 
15364   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15365 
15366   ins_pipe(icmp_reg_imm);
15367 %}
15368 
15369 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15370 %{
15371   match(Set cr (CmpL op1 op2));
15372 
15373   effect(DEF cr, USE op1, USE op2);
15374 
15375   ins_cost(INSN_COST);
15376   format %{ "cmp  $op1, $op2" %}
15377 
15378   ins_encode(aarch64_enc_cmp(op1, op2));
15379 
15380   ins_pipe(icmp_reg_reg);
15381 %}
15382 
15383 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15384 %{
15385   match(Set cr (CmpL op1 zero));
15386 
15387   effect(DEF cr, USE op1);
15388 
15389   ins_cost(INSN_COST);
15390   format %{ "tst  $op1" %}
15391 
15392   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15393 
15394   ins_pipe(icmp_reg_imm);
15395 %}
15396 
15397 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15398 %{
15399   match(Set cr (CmpL op1 op2));
15400 
15401   effect(DEF cr, USE op1);
15402 
15403   ins_cost(INSN_COST);
15404   format %{ "cmp  $op1, $op2" %}
15405 
15406   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15407 
15408   ins_pipe(icmp_reg_imm);
15409 %}
15410 
15411 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15412 %{
15413   match(Set cr (CmpL op1 op2));
15414 
15415   effect(DEF cr, USE op1);
15416 
15417   ins_cost(INSN_COST * 2);
15418   format %{ "cmp  $op1, $op2" %}
15419 
15420   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15421 
15422   ins_pipe(icmp_reg_imm);
15423 %}
15424 
15425 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15426 %{
15427   match(Set cr (CmpUL op1 op2));
15428 
15429   effect(DEF cr, USE op1, USE op2);
15430 
15431   ins_cost(INSN_COST);
15432   format %{ "cmp  $op1, $op2" %}
15433 
15434   ins_encode(aarch64_enc_cmp(op1, op2));
15435 
15436   ins_pipe(icmp_reg_reg);
15437 %}
15438 
15439 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15440 %{
15441   match(Set cr (CmpUL op1 zero));
15442 
15443   effect(DEF cr, USE op1);
15444 
15445   ins_cost(INSN_COST);
15446   format %{ "tst  $op1" %}
15447 
15448   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15449 
15450   ins_pipe(icmp_reg_imm);
15451 %}
15452 
15453 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15454 %{
15455   match(Set cr (CmpUL op1 op2));
15456 
15457   effect(DEF cr, USE op1);
15458 
15459   ins_cost(INSN_COST);
15460   format %{ "cmp  $op1, $op2" %}
15461 
15462   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15463 
15464   ins_pipe(icmp_reg_imm);
15465 %}
15466 
15467 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15468 %{
15469   match(Set cr (CmpUL op1 op2));
15470 
15471   effect(DEF cr, USE op1);
15472 
15473   ins_cost(INSN_COST * 2);
15474   format %{ "cmp  $op1, $op2" %}
15475 
15476   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15477 
15478   ins_pipe(icmp_reg_imm);
15479 %}
15480 
15481 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15482 %{
15483   match(Set cr (CmpP op1 op2));
15484 
15485   effect(DEF cr, USE op1, USE op2);
15486 
15487   ins_cost(INSN_COST);
15488   format %{ "cmp  $op1, $op2\t // ptr" %}
15489 
15490   ins_encode(aarch64_enc_cmpp(op1, op2));
15491 
15492   ins_pipe(icmp_reg_reg);
15493 %}
15494 
15495 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15496 %{
15497   match(Set cr (CmpN op1 op2));
15498 
15499   effect(DEF cr, USE op1, USE op2);
15500 
15501   ins_cost(INSN_COST);
15502   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15503 
15504   ins_encode(aarch64_enc_cmpn(op1, op2));
15505 
15506   ins_pipe(icmp_reg_reg);
15507 %}
15508 
15509 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15510 %{
15511   match(Set cr (CmpP op1 zero));
15512 
15513   effect(DEF cr, USE op1, USE zero);
15514 
15515   ins_cost(INSN_COST);
15516   format %{ "cmp  $op1, 0\t // ptr" %}
15517 
15518   ins_encode(aarch64_enc_testp(op1));
15519 
15520   ins_pipe(icmp_reg_imm);
15521 %}
15522 
15523 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15524 %{
15525   match(Set cr (CmpN op1 zero));
15526 
15527   effect(DEF cr, USE op1, USE zero);
15528 
15529   ins_cost(INSN_COST);
15530   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15531 
15532   ins_encode(aarch64_enc_testn(op1));
15533 
15534   ins_pipe(icmp_reg_imm);
15535 %}
15536 
15537 // FP comparisons
15538 //
15539 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15540 // using normal cmpOp. See declaration of rFlagsReg for details.
15541 
15542 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15543 %{
15544   match(Set cr (CmpF src1 src2));
15545 
15546   ins_cost(3 * INSN_COST);
15547   format %{ "fcmps $src1, $src2" %}
15548 
15549   ins_encode %{
15550     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15551   %}
15552 
15553   ins_pipe(pipe_class_compare);
15554 %}
15555 
15556 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15557 %{
15558   match(Set cr (CmpF src1 src2));
15559 
15560   ins_cost(3 * INSN_COST);
15561   format %{ "fcmps $src1, 0.0" %}
15562 
15563   ins_encode %{
15564     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15565   %}
15566 
15567   ins_pipe(pipe_class_compare);
15568 %}
15569 // FROM HERE
15570 
15571 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15572 %{
15573   match(Set cr (CmpD src1 src2));
15574 
15575   ins_cost(3 * INSN_COST);
15576   format %{ "fcmpd $src1, $src2" %}
15577 
15578   ins_encode %{
15579     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15580   %}
15581 
15582   ins_pipe(pipe_class_compare);
15583 %}
15584 
15585 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15586 %{
15587   match(Set cr (CmpD src1 src2));
15588 
15589   ins_cost(3 * INSN_COST);
15590   format %{ "fcmpd $src1, 0.0" %}
15591 
15592   ins_encode %{
15593     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15594   %}
15595 
15596   ins_pipe(pipe_class_compare);
15597 %}
15598 
15599 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15600 %{
15601   match(Set dst (CmpF3 src1 src2));
15602   effect(KILL cr);
15603 
15604   ins_cost(5 * INSN_COST);
15605   format %{ "fcmps $src1, $src2\n\t"
15606             "csinvw($dst, zr, zr, eq\n\t"
15607             "csnegw($dst, $dst, $dst, lt)"
15608   %}
15609 
15610   ins_encode %{
15611     Label done;
15612     FloatRegister s1 = as_FloatRegister($src1$$reg);
15613     FloatRegister s2 = as_FloatRegister($src2$$reg);
15614     Register d = as_Register($dst$$reg);
15615     __ fcmps(s1, s2);
15616     // installs 0 if EQ else -1
15617     __ csinvw(d, zr, zr, Assembler::EQ);
15618     // keeps -1 if less or unordered else installs 1
15619     __ csnegw(d, d, d, Assembler::LT);
15620     __ bind(done);
15621   %}
15622 
15623   ins_pipe(pipe_class_default);
15624 
15625 %}
15626 
15627 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15628 %{
15629   match(Set dst (CmpD3 src1 src2));
15630   effect(KILL cr);
15631 
15632   ins_cost(5 * INSN_COST);
15633   format %{ "fcmpd $src1, $src2\n\t"
15634             "csinvw($dst, zr, zr, eq\n\t"
15635             "csnegw($dst, $dst, $dst, lt)"
15636   %}
15637 
15638   ins_encode %{
15639     Label done;
15640     FloatRegister s1 = as_FloatRegister($src1$$reg);
15641     FloatRegister s2 = as_FloatRegister($src2$$reg);
15642     Register d = as_Register($dst$$reg);
15643     __ fcmpd(s1, s2);
15644     // installs 0 if EQ else -1
15645     __ csinvw(d, zr, zr, Assembler::EQ);
15646     // keeps -1 if less or unordered else installs 1
15647     __ csnegw(d, d, d, Assembler::LT);
15648     __ bind(done);
15649   %}
15650   ins_pipe(pipe_class_default);
15651 
15652 %}
15653 
15654 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15655 %{
15656   match(Set dst (CmpF3 src1 zero));
15657   effect(KILL cr);
15658 
15659   ins_cost(5 * INSN_COST);
15660   format %{ "fcmps $src1, 0.0\n\t"
15661             "csinvw($dst, zr, zr, eq\n\t"
15662             "csnegw($dst, $dst, $dst, lt)"
15663   %}
15664 
15665   ins_encode %{
15666     Label done;
15667     FloatRegister s1 = as_FloatRegister($src1$$reg);
15668     Register d = as_Register($dst$$reg);
15669     __ fcmps(s1, 0.0);
15670     // installs 0 if EQ else -1
15671     __ csinvw(d, zr, zr, Assembler::EQ);
15672     // keeps -1 if less or unordered else installs 1
15673     __ csnegw(d, d, d, Assembler::LT);
15674     __ bind(done);
15675   %}
15676 
15677   ins_pipe(pipe_class_default);
15678 
15679 %}
15680 
15681 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15682 %{
15683   match(Set dst (CmpD3 src1 zero));
15684   effect(KILL cr);
15685 
15686   ins_cost(5 * INSN_COST);
15687   format %{ "fcmpd $src1, 0.0\n\t"
15688             "csinvw($dst, zr, zr, eq\n\t"
15689             "csnegw($dst, $dst, $dst, lt)"
15690   %}
15691 
15692   ins_encode %{
15693     Label done;
15694     FloatRegister s1 = as_FloatRegister($src1$$reg);
15695     Register d = as_Register($dst$$reg);
15696     __ fcmpd(s1, 0.0);
15697     // installs 0 if EQ else -1
15698     __ csinvw(d, zr, zr, Assembler::EQ);
15699     // keeps -1 if less or unordered else installs 1
15700     __ csnegw(d, d, d, Assembler::LT);
15701     __ bind(done);
15702   %}
15703   ins_pipe(pipe_class_default);
15704 
15705 %}
15706 
15707 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15708 %{
15709   match(Set dst (CmpLTMask p q));
15710   effect(KILL cr);
15711 
15712   ins_cost(3 * INSN_COST);
15713 
15714   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15715             "csetw $dst, lt\n\t"
15716             "subw $dst, zr, $dst"
15717   %}
15718 
15719   ins_encode %{
15720     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15721     __ csetw(as_Register($dst$$reg), Assembler::LT);
15722     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15723   %}
15724 
15725   ins_pipe(ialu_reg_reg);
15726 %}
15727 
15728 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15729 %{
15730   match(Set dst (CmpLTMask src zero));
15731   effect(KILL cr);
15732 
15733   ins_cost(INSN_COST);
15734 
15735   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15736 
15737   ins_encode %{
15738     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15739   %}
15740 
15741   ins_pipe(ialu_reg_shift);
15742 %}
15743 
15744 // ============================================================================
15745 // Max and Min
15746 
15747 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
15748 
15749 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
15750 %{
15751   effect(DEF cr, USE src);
15752   ins_cost(INSN_COST);
15753   format %{ "cmpw $src, 0" %}
15754 
15755   ins_encode %{
15756     __ cmpw($src$$Register, 0);
15757   %}
15758   ins_pipe(icmp_reg_imm);
15759 %}
15760 
15761 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15762 %{
15763   match(Set dst (MinI src1 src2));
15764   ins_cost(INSN_COST * 3);
15765 
15766   expand %{
15767     rFlagsReg cr;
15768     compI_reg_reg(cr, src1, src2);
15769     cmovI_reg_reg_lt(dst, src1, src2, cr);
15770   %}
15771 %}
15772 
15773 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15774 %{
15775   match(Set dst (MaxI src1 src2));
15776   ins_cost(INSN_COST * 3);
15777 
15778   expand %{
15779     rFlagsReg cr;
15780     compI_reg_reg(cr, src1, src2);
15781     cmovI_reg_reg_gt(dst, src1, src2, cr);
15782   %}
15783 %}
15784 
15785 
15786 // ============================================================================
15787 // Branch Instructions
15788 
15789 // Direct Branch.
15790 instruct branch(label lbl)
15791 %{
15792   match(Goto);
15793 
15794   effect(USE lbl);
15795 
15796   ins_cost(BRANCH_COST);
15797   format %{ "b  $lbl" %}
15798 
15799   ins_encode(aarch64_enc_b(lbl));
15800 
15801   ins_pipe(pipe_branch);
15802 %}
15803 
15804 // Conditional Near Branch
15805 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15806 %{
15807   // Same match rule as `branchConFar'.
15808   match(If cmp cr);
15809 
15810   effect(USE lbl);
15811 
15812   ins_cost(BRANCH_COST);
15813   // If set to 1 this indicates that the current instruction is a
15814   // short variant of a long branch. This avoids using this
15815   // instruction in first-pass matching. It will then only be used in
15816   // the `Shorten_branches' pass.
15817   // ins_short_branch(1);
15818   format %{ "b$cmp  $lbl" %}
15819 
15820   ins_encode(aarch64_enc_br_con(cmp, lbl));
15821 
15822   ins_pipe(pipe_branch_cond);
15823 %}
15824 
15825 // Conditional Near Branch Unsigned
15826 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15827 %{
15828   // Same match rule as `branchConFar'.
15829   match(If cmp cr);
15830 
15831   effect(USE lbl);
15832 
15833   ins_cost(BRANCH_COST);
15834   // If set to 1 this indicates that the current instruction is a
15835   // short variant of a long branch. This avoids using this
15836   // instruction in first-pass matching. It will then only be used in
15837   // the `Shorten_branches' pass.
15838   // ins_short_branch(1);
15839   format %{ "b$cmp  $lbl\t# unsigned" %}
15840 
15841   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15842 
15843   ins_pipe(pipe_branch_cond);
15844 %}
15845 
15846 // Make use of CBZ and CBNZ.  These instructions, as well as being
15847 // shorter than (cmp; branch), have the additional benefit of not
15848 // killing the flags.
15849 
15850 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15851   match(If cmp (CmpI op1 op2));
15852   effect(USE labl);
15853 
15854   ins_cost(BRANCH_COST);
15855   format %{ "cbw$cmp   $op1, $labl" %}
15856   ins_encode %{
15857     Label* L = $labl$$label;
15858     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15859     if (cond == Assembler::EQ)
15860       __ cbzw($op1$$Register, *L);
15861     else
15862       __ cbnzw($op1$$Register, *L);
15863   %}
15864   ins_pipe(pipe_cmp_branch);
15865 %}
15866 
15867 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15868   match(If cmp (CmpL op1 op2));
15869   effect(USE labl);
15870 
15871   ins_cost(BRANCH_COST);
15872   format %{ "cb$cmp   $op1, $labl" %}
15873   ins_encode %{
15874     Label* L = $labl$$label;
15875     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15876     if (cond == Assembler::EQ)
15877       __ cbz($op1$$Register, *L);
15878     else
15879       __ cbnz($op1$$Register, *L);
15880   %}
15881   ins_pipe(pipe_cmp_branch);
15882 %}
15883 
15884 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15885   match(If cmp (CmpP op1 op2));
15886   effect(USE labl);
15887 
15888   ins_cost(BRANCH_COST);
15889   format %{ "cb$cmp   $op1, $labl" %}
15890   ins_encode %{
15891     Label* L = $labl$$label;
15892     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15893     if (cond == Assembler::EQ)
15894       __ cbz($op1$$Register, *L);
15895     else
15896       __ cbnz($op1$$Register, *L);
15897   %}
15898   ins_pipe(pipe_cmp_branch);
15899 %}
15900 
15901 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15902   match(If cmp (CmpN op1 op2));
15903   effect(USE labl);
15904 
15905   ins_cost(BRANCH_COST);
15906   format %{ "cbw$cmp   $op1, $labl" %}
15907   ins_encode %{
15908     Label* L = $labl$$label;
15909     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15910     if (cond == Assembler::EQ)
15911       __ cbzw($op1$$Register, *L);
15912     else
15913       __ cbnzw($op1$$Register, *L);
15914   %}
15915   ins_pipe(pipe_cmp_branch);
15916 %}
15917 
15918 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15919   match(If cmp (CmpP (DecodeN oop) zero));
15920   effect(USE labl);
15921 
15922   ins_cost(BRANCH_COST);
15923   format %{ "cb$cmp   $oop, $labl" %}
15924   ins_encode %{
15925     Label* L = $labl$$label;
15926     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15927     if (cond == Assembler::EQ)
15928       __ cbzw($oop$$Register, *L);
15929     else
15930       __ cbnzw($oop$$Register, *L);
15931   %}
15932   ins_pipe(pipe_cmp_branch);
15933 %}
15934 
15935 instruct cmpUI_imm0_branch(cmpOpUEqNeLeGt cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15936   match(If cmp (CmpU op1 op2));
15937   effect(USE labl);
15938 
15939   ins_cost(BRANCH_COST);
15940   format %{ "cbw$cmp   $op1, $labl" %}
15941   ins_encode %{
15942     Label* L = $labl$$label;
15943     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15944     if (cond == Assembler::EQ || cond == Assembler::LS) {
15945       __ cbzw($op1$$Register, *L);
15946     } else {
15947       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
15948       __ cbnzw($op1$$Register, *L);
15949     }
15950   %}
15951   ins_pipe(pipe_cmp_branch);
15952 %}
15953 
15954 instruct cmpUL_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 op2, label labl) %{
15955   match(If cmp (CmpUL op1 op2));
15956   effect(USE labl);
15957 
15958   ins_cost(BRANCH_COST);
15959   format %{ "cb$cmp   $op1, $labl" %}
15960   ins_encode %{
15961     Label* L = $labl$$label;
15962     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15963     if (cond == Assembler::EQ || cond == Assembler::LS) {
15964       __ cbz($op1$$Register, *L);
15965     } else {
15966       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
15967       __ cbnz($op1$$Register, *L);
15968     }
15969   %}
15970   ins_pipe(pipe_cmp_branch);
15971 %}
15972 
15973 // Test bit and Branch
15974 
15975 // Patterns for short (< 32KiB) variants
15976 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15977   match(If cmp (CmpL op1 op2));
15978   effect(USE labl);
15979 
15980   ins_cost(BRANCH_COST);
15981   format %{ "cb$cmp   $op1, $labl # long" %}
15982   ins_encode %{
15983     Label* L = $labl$$label;
15984     Assembler::Condition cond =
15985       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15986     __ tbr(cond, $op1$$Register, 63, *L);
15987   %}
15988   ins_pipe(pipe_cmp_branch);
15989   ins_short_branch(1);
15990 %}
15991 
15992 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15993   match(If cmp (CmpI op1 op2));
15994   effect(USE labl);
15995 
15996   ins_cost(BRANCH_COST);
15997   format %{ "cb$cmp   $op1, $labl # int" %}
15998   ins_encode %{
15999     Label* L = $labl$$label;
16000     Assembler::Condition cond =
16001       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16002     __ tbr(cond, $op1$$Register, 31, *L);
16003   %}
16004   ins_pipe(pipe_cmp_branch);
16005   ins_short_branch(1);
16006 %}
16007 
16008 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16009   match(If cmp (CmpL (AndL op1 op2) op3));
16010   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16011   effect(USE labl);
16012 
16013   ins_cost(BRANCH_COST);
16014   format %{ "tb$cmp   $op1, $op2, $labl" %}
16015   ins_encode %{
16016     Label* L = $labl$$label;
16017     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16018     int bit = exact_log2_long($op2$$constant);
16019     __ tbr(cond, $op1$$Register, bit, *L);
16020   %}
16021   ins_pipe(pipe_cmp_branch);
16022   ins_short_branch(1);
16023 %}
16024 
16025 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16026   match(If cmp (CmpI (AndI op1 op2) op3));
16027   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16028   effect(USE labl);
16029 
16030   ins_cost(BRANCH_COST);
16031   format %{ "tb$cmp   $op1, $op2, $labl" %}
16032   ins_encode %{
16033     Label* L = $labl$$label;
16034     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16035     int bit = exact_log2((juint)$op2$$constant);
16036     __ tbr(cond, $op1$$Register, bit, *L);
16037   %}
16038   ins_pipe(pipe_cmp_branch);
16039   ins_short_branch(1);
16040 %}
16041 
16042 // And far variants
16043 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16044   match(If cmp (CmpL op1 op2));
16045   effect(USE labl);
16046 
16047   ins_cost(BRANCH_COST);
16048   format %{ "cb$cmp   $op1, $labl # long" %}
16049   ins_encode %{
16050     Label* L = $labl$$label;
16051     Assembler::Condition cond =
16052       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16053     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
16054   %}
16055   ins_pipe(pipe_cmp_branch);
16056 %}
16057 
16058 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16059   match(If cmp (CmpI op1 op2));
16060   effect(USE labl);
16061 
16062   ins_cost(BRANCH_COST);
16063   format %{ "cb$cmp   $op1, $labl # int" %}
16064   ins_encode %{
16065     Label* L = $labl$$label;
16066     Assembler::Condition cond =
16067       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16068     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16069   %}
16070   ins_pipe(pipe_cmp_branch);
16071 %}
16072 
16073 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16074   match(If cmp (CmpL (AndL op1 op2) op3));
16075   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16076   effect(USE labl);
16077 
16078   ins_cost(BRANCH_COST);
16079   format %{ "tb$cmp   $op1, $op2, $labl" %}
16080   ins_encode %{
16081     Label* L = $labl$$label;
16082     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16083     int bit = exact_log2_long($op2$$constant);
16084     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16085   %}
16086   ins_pipe(pipe_cmp_branch);
16087 %}
16088 
16089 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16090   match(If cmp (CmpI (AndI op1 op2) op3));
16091   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16092   effect(USE labl);
16093 
16094   ins_cost(BRANCH_COST);
16095   format %{ "tb$cmp   $op1, $op2, $labl" %}
16096   ins_encode %{
16097     Label* L = $labl$$label;
16098     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16099     int bit = exact_log2((juint)$op2$$constant);
16100     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16101   %}
16102   ins_pipe(pipe_cmp_branch);
16103 %}
16104 
16105 // Test bits
16106 
16107 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16108   match(Set cr (CmpL (AndL op1 op2) op3));
16109   predicate(Assembler::operand_valid_for_logical_immediate
16110             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16111 
16112   ins_cost(INSN_COST);
16113   format %{ "tst $op1, $op2 # long" %}
16114   ins_encode %{
16115     __ tst($op1$$Register, $op2$$constant);
16116   %}
16117   ins_pipe(ialu_reg_reg);
16118 %}
16119 
16120 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16121   match(Set cr (CmpI (AndI op1 op2) op3));
16122   predicate(Assembler::operand_valid_for_logical_immediate
16123             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16124 
16125   ins_cost(INSN_COST);
16126   format %{ "tst $op1, $op2 # int" %}
16127   ins_encode %{
16128     __ tstw($op1$$Register, $op2$$constant);
16129   %}
16130   ins_pipe(ialu_reg_reg);
16131 %}
16132 
16133 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16134   match(Set cr (CmpL (AndL op1 op2) op3));
16135 
16136   ins_cost(INSN_COST);
16137   format %{ "tst $op1, $op2 # long" %}
16138   ins_encode %{
16139     __ tst($op1$$Register, $op2$$Register);
16140   %}
16141   ins_pipe(ialu_reg_reg);
16142 %}
16143 
16144 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16145   match(Set cr (CmpI (AndI op1 op2) op3));
16146 
16147   ins_cost(INSN_COST);
16148   format %{ "tstw $op1, $op2 # int" %}
16149   ins_encode %{
16150     __ tstw($op1$$Register, $op2$$Register);
16151   %}
16152   ins_pipe(ialu_reg_reg);
16153 %}
16154 
16155 
16156 // Conditional Far Branch
16157 // Conditional Far Branch Unsigned
16158 // TODO: fixme
16159 
16160 // counted loop end branch near
16161 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16162 %{
16163   match(CountedLoopEnd cmp cr);
16164 
16165   effect(USE lbl);
16166 
16167   ins_cost(BRANCH_COST);
16168   // short variant.
16169   // ins_short_branch(1);
16170   format %{ "b$cmp $lbl \t// counted loop end" %}
16171 
16172   ins_encode(aarch64_enc_br_con(cmp, lbl));
16173 
16174   ins_pipe(pipe_branch);
16175 %}
16176 
16177 // counted loop end branch far
16178 // TODO: fixme
16179 
16180 // ============================================================================
16181 // inlined locking and unlocking
16182 
16183 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16184 %{
16185   predicate(LockingMode != LM_LIGHTWEIGHT);
16186   match(Set cr (FastLock object box));
16187   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16188 
16189   ins_cost(5 * INSN_COST);
16190   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16191 
16192   ins_encode %{
16193     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16194   %}
16195 
16196   ins_pipe(pipe_serial);
16197 %}
16198 
16199 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16200 %{
16201   predicate(LockingMode != LM_LIGHTWEIGHT);
16202   match(Set cr (FastUnlock object box));
16203   effect(TEMP tmp, TEMP tmp2);
16204 
16205   ins_cost(5 * INSN_COST);
16206   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16207 
16208   ins_encode %{
16209     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16210   %}
16211 
16212   ins_pipe(pipe_serial);
16213 %}
16214 
16215 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16216 %{
16217   predicate(LockingMode == LM_LIGHTWEIGHT);
16218   match(Set cr (FastLock object box));
16219   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16220 
16221   ins_cost(5 * INSN_COST);
16222   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16223 
16224   ins_encode %{
16225     __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16226   %}
16227 
16228   ins_pipe(pipe_serial);
16229 %}
16230 
16231 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16232 %{
16233   predicate(LockingMode == LM_LIGHTWEIGHT);
16234   match(Set cr (FastUnlock object box));
16235   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16236 
16237   ins_cost(5 * INSN_COST);
16238   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %}
16239 
16240   ins_encode %{
16241     __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16242   %}
16243 
16244   ins_pipe(pipe_serial);
16245 %}
16246 
16247 // ============================================================================
16248 // Safepoint Instructions
16249 
16250 // TODO
16251 // provide a near and far version of this code
16252 
16253 instruct safePoint(rFlagsReg cr, iRegP poll)
16254 %{
16255   match(SafePoint poll);
16256   effect(KILL cr);
16257 
16258   format %{
16259     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16260   %}
16261   ins_encode %{
16262     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16263   %}
16264   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16265 %}
16266 
16267 
16268 // ============================================================================
16269 // Procedure Call/Return Instructions
16270 
16271 // Call Java Static Instruction
16272 
16273 instruct CallStaticJavaDirect(method meth)
16274 %{
16275   match(CallStaticJava);
16276 
16277   effect(USE meth);
16278 
16279   ins_cost(CALL_COST);
16280 
16281   format %{ "call,static $meth \t// ==> " %}
16282 
16283   ins_encode(aarch64_enc_java_static_call(meth),
16284              aarch64_enc_call_epilog);
16285 
16286   ins_pipe(pipe_class_call);
16287 %}
16288 
16289 // TO HERE
16290 
16291 // Call Java Dynamic Instruction
16292 instruct CallDynamicJavaDirect(method meth)
16293 %{
16294   match(CallDynamicJava);
16295 
16296   effect(USE meth);
16297 
16298   ins_cost(CALL_COST);
16299 
16300   format %{ "CALL,dynamic $meth \t// ==> " %}
16301 
16302   ins_encode(aarch64_enc_java_dynamic_call(meth),
16303              aarch64_enc_call_epilog);
16304 
16305   ins_pipe(pipe_class_call);
16306 %}
16307 
16308 // Call Runtime Instruction
16309 
16310 instruct CallRuntimeDirect(method meth)
16311 %{
16312   match(CallRuntime);
16313 
16314   effect(USE meth);
16315 
16316   ins_cost(CALL_COST);
16317 
16318   format %{ "CALL, runtime $meth" %}
16319 
16320   ins_encode( aarch64_enc_java_to_runtime(meth) );
16321 
16322   ins_pipe(pipe_class_call);
16323 %}
16324 
16325 // Call Runtime Instruction
16326 
16327 instruct CallLeafDirect(method meth)
16328 %{
16329   match(CallLeaf);
16330 
16331   effect(USE meth);
16332 
16333   ins_cost(CALL_COST);
16334 
16335   format %{ "CALL, runtime leaf $meth" %}
16336 
16337   ins_encode( aarch64_enc_java_to_runtime(meth) );
16338 
16339   ins_pipe(pipe_class_call);
16340 %}
16341 
16342 // Call Runtime Instruction without safepoint and with vector arguments
16343 instruct CallLeafDirectVector(method meth)
16344 %{
16345   match(CallLeafVector);
16346 
16347   effect(USE meth);
16348 
16349   ins_cost(CALL_COST);
16350 
16351   format %{ "CALL, runtime leaf vector $meth" %}
16352 
16353   ins_encode(aarch64_enc_java_to_runtime(meth));
16354 
16355   ins_pipe(pipe_class_call);
16356 %}
16357 
16358 // Call Runtime Instruction
16359 
16360 instruct CallLeafNoFPDirect(method meth)
16361 %{
16362   match(CallLeafNoFP);
16363 
16364   effect(USE meth);
16365 
16366   ins_cost(CALL_COST);
16367 
16368   format %{ "CALL, runtime leaf nofp $meth" %}
16369 
16370   ins_encode( aarch64_enc_java_to_runtime(meth) );
16371 
16372   ins_pipe(pipe_class_call);
16373 %}
16374 
16375 // Tail Call; Jump from runtime stub to Java code.
16376 // Also known as an 'interprocedural jump'.
16377 // Target of jump will eventually return to caller.
16378 // TailJump below removes the return address.
16379 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16380 // emitted just above the TailCall which has reset rfp to the caller state.
16381 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
16382 %{
16383   match(TailCall jump_target method_ptr);
16384 
16385   ins_cost(CALL_COST);
16386 
16387   format %{ "br $jump_target\t# $method_ptr holds method" %}
16388 
16389   ins_encode(aarch64_enc_tail_call(jump_target));
16390 
16391   ins_pipe(pipe_class_call);
16392 %}
16393 
16394 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
16395 %{
16396   match(TailJump jump_target ex_oop);
16397 
16398   ins_cost(CALL_COST);
16399 
16400   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16401 
16402   ins_encode(aarch64_enc_tail_jmp(jump_target));
16403 
16404   ins_pipe(pipe_class_call);
16405 %}
16406 
16407 // Forward exception.
16408 instruct ForwardExceptionjmp()
16409 %{
16410   match(ForwardException);
16411   ins_cost(CALL_COST);
16412 
16413   format %{ "b forward_exception_stub" %}
16414   ins_encode %{
16415     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
16416   %}
16417   ins_pipe(pipe_class_call);
16418 %}
16419 
16420 // Create exception oop: created by stack-crawling runtime code.
16421 // Created exception is now available to this handler, and is setup
16422 // just prior to jumping to this handler. No code emitted.
16423 // TODO check
16424 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16425 instruct CreateException(iRegP_R0 ex_oop)
16426 %{
16427   match(Set ex_oop (CreateEx));
16428 
16429   format %{ " -- \t// exception oop; no code emitted" %}
16430 
16431   size(0);
16432 
16433   ins_encode( /*empty*/ );
16434 
16435   ins_pipe(pipe_class_empty);
16436 %}
16437 
16438 // Rethrow exception: The exception oop will come in the first
16439 // argument position. Then JUMP (not call) to the rethrow stub code.
16440 instruct RethrowException() %{
16441   match(Rethrow);
16442   ins_cost(CALL_COST);
16443 
16444   format %{ "b rethrow_stub" %}
16445 
16446   ins_encode( aarch64_enc_rethrow() );
16447 
16448   ins_pipe(pipe_class_call);
16449 %}
16450 
16451 
16452 // Return Instruction
16453 // epilog node loads ret address into lr as part of frame pop
16454 instruct Ret()
16455 %{
16456   match(Return);
16457 
16458   format %{ "ret\t// return register" %}
16459 
16460   ins_encode( aarch64_enc_ret() );
16461 
16462   ins_pipe(pipe_branch);
16463 %}
16464 
16465 // Die now.
16466 instruct ShouldNotReachHere() %{
16467   match(Halt);
16468 
16469   ins_cost(CALL_COST);
16470   format %{ "ShouldNotReachHere" %}
16471 
16472   ins_encode %{
16473     if (is_reachable()) {
16474       const char* str = __ code_string(_halt_reason);
16475       __ stop(str);
16476     }
16477   %}
16478 
16479   ins_pipe(pipe_class_default);
16480 %}
16481 
16482 // ============================================================================
16483 // Partial Subtype Check
16484 //
16485 // superklass array for an instance of the superklass.  Set a hidden
16486 // internal cache on a hit (cache is checked with exposed code in
16487 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16488 // encoding ALSO sets flags.
16489 
16490 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16491 %{
16492   match(Set result (PartialSubtypeCheck sub super));
16493   predicate(!UseSecondarySupersTable);
16494   effect(KILL cr, KILL temp);
16495 
16496   ins_cost(20 * INSN_COST);  // slightly larger than the next version
16497   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16498 
16499   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16500 
16501   opcode(0x1); // Force zero of result reg on hit
16502 
16503   ins_pipe(pipe_class_memory);
16504 %}
16505 
16506 // Two versions of partialSubtypeCheck, both used when we need to
16507 // search for a super class in the secondary supers array. The first
16508 // is used when we don't know _a priori_ the class being searched
16509 // for. The second, far more common, is used when we do know: this is
16510 // used for instanceof, checkcast, and any case where C2 can determine
16511 // it by constant propagation.
16512 
16513 instruct partialSubtypeCheckVarSuper(iRegP_R4 sub, iRegP_R0 super, vRegD_V0 vtemp, iRegP_R5 result,
16514                                      iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16515                                      rFlagsReg cr)
16516 %{
16517   match(Set result (PartialSubtypeCheck sub super));
16518   predicate(UseSecondarySupersTable);
16519   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16520 
16521   ins_cost(10 * INSN_COST);  // slightly larger than the next version
16522   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16523 
16524   ins_encode %{
16525     __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
16526                                          $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16527                                          $vtemp$$FloatRegister,
16528                                          $result$$Register, /*L_success*/nullptr);
16529   %}
16530 
16531   ins_pipe(pipe_class_memory);
16532 %}
16533 
16534 instruct partialSubtypeCheckConstSuper(iRegP_R4 sub, iRegP_R0 super_reg, immP super_con, vRegD_V0 vtemp, iRegP_R5 result,
16535                                        iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16536                                        rFlagsReg cr)
16537 %{
16538   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
16539   predicate(UseSecondarySupersTable);
16540   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16541 
16542   ins_cost(5 * INSN_COST);  // smaller than the next version
16543   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
16544 
16545   ins_encode %{
16546     bool success = false;
16547     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
16548     if (InlineSecondarySupersTest) {
16549       success =
16550         __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
16551                                                $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16552                                                $vtemp$$FloatRegister,
16553                                                $result$$Register,
16554                                                super_klass_slot);
16555     } else {
16556       address call = __ trampoline_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
16557       success = (call != nullptr);
16558     }
16559     if (!success) {
16560       ciEnv::current()->record_failure("CodeCache is full");
16561       return;
16562     }
16563   %}
16564 
16565   ins_pipe(pipe_class_memory);
16566 %}
16567 
16568 // Intrisics for String.compareTo()
16569 
16570 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16571                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16572 %{
16573   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16574   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16575   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16576 
16577   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16578   ins_encode %{
16579     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16580     __ string_compare($str1$$Register, $str2$$Register,
16581                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16582                       $tmp1$$Register, $tmp2$$Register,
16583                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
16584   %}
16585   ins_pipe(pipe_class_memory);
16586 %}
16587 
16588 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16589                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16590 %{
16591   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16592   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16593   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16594 
16595   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16596   ins_encode %{
16597     __ string_compare($str1$$Register, $str2$$Register,
16598                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16599                       $tmp1$$Register, $tmp2$$Register,
16600                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
16601   %}
16602   ins_pipe(pipe_class_memory);
16603 %}
16604 
16605 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16606                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16607                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16608 %{
16609   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16610   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16611   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16612          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16613 
16614   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16615   ins_encode %{
16616     __ string_compare($str1$$Register, $str2$$Register,
16617                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16618                       $tmp1$$Register, $tmp2$$Register,
16619                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16620                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
16621   %}
16622   ins_pipe(pipe_class_memory);
16623 %}
16624 
16625 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16626                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16627                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16628 %{
16629   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16630   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16631   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16632          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16633 
16634   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16635   ins_encode %{
16636     __ string_compare($str1$$Register, $str2$$Register,
16637                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16638                       $tmp1$$Register, $tmp2$$Register,
16639                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16640                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
16641   %}
16642   ins_pipe(pipe_class_memory);
16643 %}
16644 
16645 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
16646 // these string_compare variants as NEON register type for convenience so that the prototype of
16647 // string_compare can be shared with all variants.
16648 
16649 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16650                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16651                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16652                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16653 %{
16654   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16655   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16656   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16657          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16658 
16659   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16660   ins_encode %{
16661     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16662     __ string_compare($str1$$Register, $str2$$Register,
16663                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16664                       $tmp1$$Register, $tmp2$$Register,
16665                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16666                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16667                       StrIntrinsicNode::LL);
16668   %}
16669   ins_pipe(pipe_class_memory);
16670 %}
16671 
16672 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16673                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16674                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16675                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16676 %{
16677   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16678   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16679   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16680          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16681 
16682   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16683   ins_encode %{
16684     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16685     __ string_compare($str1$$Register, $str2$$Register,
16686                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16687                       $tmp1$$Register, $tmp2$$Register,
16688                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16689                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16690                       StrIntrinsicNode::LU);
16691   %}
16692   ins_pipe(pipe_class_memory);
16693 %}
16694 
16695 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16696                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16697                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16698                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16699 %{
16700   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16701   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16702   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16703          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16704 
16705   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16706   ins_encode %{
16707     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16708     __ string_compare($str1$$Register, $str2$$Register,
16709                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16710                       $tmp1$$Register, $tmp2$$Register,
16711                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16712                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16713                       StrIntrinsicNode::UL);
16714   %}
16715   ins_pipe(pipe_class_memory);
16716 %}
16717 
16718 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16719                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16720                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16721                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16722 %{
16723   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16724   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16725   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16726          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16727 
16728   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16729   ins_encode %{
16730     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16731     __ string_compare($str1$$Register, $str2$$Register,
16732                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16733                       $tmp1$$Register, $tmp2$$Register,
16734                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16735                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16736                       StrIntrinsicNode::UU);
16737   %}
16738   ins_pipe(pipe_class_memory);
16739 %}
16740 
16741 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16742                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16743                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16744                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16745 %{
16746   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16747   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16748   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16749          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16750          TEMP vtmp0, TEMP vtmp1, KILL cr);
16751   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16752             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16753 
16754   ins_encode %{
16755     __ string_indexof($str1$$Register, $str2$$Register,
16756                       $cnt1$$Register, $cnt2$$Register,
16757                       $tmp1$$Register, $tmp2$$Register,
16758                       $tmp3$$Register, $tmp4$$Register,
16759                       $tmp5$$Register, $tmp6$$Register,
16760                       -1, $result$$Register, StrIntrinsicNode::UU);
16761   %}
16762   ins_pipe(pipe_class_memory);
16763 %}
16764 
16765 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16766                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16767                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16768                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16769 %{
16770   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16771   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16772   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16773          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16774          TEMP vtmp0, TEMP vtmp1, KILL cr);
16775   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16776             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16777 
16778   ins_encode %{
16779     __ string_indexof($str1$$Register, $str2$$Register,
16780                       $cnt1$$Register, $cnt2$$Register,
16781                       $tmp1$$Register, $tmp2$$Register,
16782                       $tmp3$$Register, $tmp4$$Register,
16783                       $tmp5$$Register, $tmp6$$Register,
16784                       -1, $result$$Register, StrIntrinsicNode::LL);
16785   %}
16786   ins_pipe(pipe_class_memory);
16787 %}
16788 
16789 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16790                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16791                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16792                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16793 %{
16794   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16795   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16796   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16797          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16798          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16799   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16800             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16801 
16802   ins_encode %{
16803     __ string_indexof($str1$$Register, $str2$$Register,
16804                       $cnt1$$Register, $cnt2$$Register,
16805                       $tmp1$$Register, $tmp2$$Register,
16806                       $tmp3$$Register, $tmp4$$Register,
16807                       $tmp5$$Register, $tmp6$$Register,
16808                       -1, $result$$Register, StrIntrinsicNode::UL);
16809   %}
16810   ins_pipe(pipe_class_memory);
16811 %}
16812 
16813 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16814                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16815                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16816 %{
16817   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16818   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16819   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16820          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16821   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
16822             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16823 
16824   ins_encode %{
16825     int icnt2 = (int)$int_cnt2$$constant;
16826     __ string_indexof($str1$$Register, $str2$$Register,
16827                       $cnt1$$Register, zr,
16828                       $tmp1$$Register, $tmp2$$Register,
16829                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16830                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16831   %}
16832   ins_pipe(pipe_class_memory);
16833 %}
16834 
16835 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16836                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16837                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16838 %{
16839   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16840   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16841   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16842          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16843   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
16844             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16845 
16846   ins_encode %{
16847     int icnt2 = (int)$int_cnt2$$constant;
16848     __ string_indexof($str1$$Register, $str2$$Register,
16849                       $cnt1$$Register, zr,
16850                       $tmp1$$Register, $tmp2$$Register,
16851                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16852                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16853   %}
16854   ins_pipe(pipe_class_memory);
16855 %}
16856 
16857 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16858                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16859                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16860 %{
16861   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16862   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16863   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16864          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16865   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
16866             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16867 
16868   ins_encode %{
16869     int icnt2 = (int)$int_cnt2$$constant;
16870     __ string_indexof($str1$$Register, $str2$$Register,
16871                       $cnt1$$Register, zr,
16872                       $tmp1$$Register, $tmp2$$Register,
16873                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16874                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16875   %}
16876   ins_pipe(pipe_class_memory);
16877 %}
16878 
16879 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16880                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16881                              iRegINoSp tmp3, rFlagsReg cr)
16882 %{
16883   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16884   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
16885   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16886          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16887 
16888   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16889 
16890   ins_encode %{
16891     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16892                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16893                            $tmp3$$Register);
16894   %}
16895   ins_pipe(pipe_class_memory);
16896 %}
16897 
16898 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16899                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16900                               iRegINoSp tmp3, rFlagsReg cr)
16901 %{
16902   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16903   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
16904   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16905          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16906 
16907   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16908 
16909   ins_encode %{
16910     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16911                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
16912                             $tmp3$$Register);
16913   %}
16914   ins_pipe(pipe_class_memory);
16915 %}
16916 
16917 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16918                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
16919                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
16920   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
16921   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16922   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
16923   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
16924   ins_encode %{
16925     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
16926                                $result$$Register, $ztmp1$$FloatRegister,
16927                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
16928                                $ptmp$$PRegister, true /* isL */);
16929   %}
16930   ins_pipe(pipe_class_memory);
16931 %}
16932 
16933 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16934                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
16935                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
16936   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
16937   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16938   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
16939   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
16940   ins_encode %{
16941     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
16942                                $result$$Register, $ztmp1$$FloatRegister,
16943                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
16944                                $ptmp$$PRegister, false /* isL */);
16945   %}
16946   ins_pipe(pipe_class_memory);
16947 %}
16948 
16949 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16950                         iRegI_R0 result, rFlagsReg cr)
16951 %{
16952   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16953   match(Set result (StrEquals (Binary str1 str2) cnt));
16954   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16955 
16956   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16957   ins_encode %{
16958     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16959     __ string_equals($str1$$Register, $str2$$Register,
16960                      $result$$Register, $cnt$$Register);
16961   %}
16962   ins_pipe(pipe_class_memory);
16963 %}
16964 
16965 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16966                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16967                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16968                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16969                        iRegP_R10 tmp, rFlagsReg cr)
16970 %{
16971   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16972   match(Set result (AryEq ary1 ary2));
16973   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
16974          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16975          TEMP vtmp6, TEMP vtmp7, KILL cr);
16976 
16977   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
16978   ins_encode %{
16979     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
16980                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16981                                    $result$$Register, $tmp$$Register, 1);
16982     if (tpc == nullptr) {
16983       ciEnv::current()->record_failure("CodeCache is full");
16984       return;
16985     }
16986   %}
16987   ins_pipe(pipe_class_memory);
16988 %}
16989 
16990 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16991                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16992                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16993                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16994                        iRegP_R10 tmp, rFlagsReg cr)
16995 %{
16996   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16997   match(Set result (AryEq ary1 ary2));
16998   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
16999          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17000          TEMP vtmp6, TEMP vtmp7, KILL cr);
17001 
17002   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17003   ins_encode %{
17004     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17005                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17006                                    $result$$Register, $tmp$$Register, 2);
17007     if (tpc == nullptr) {
17008       ciEnv::current()->record_failure("CodeCache is full");
17009       return;
17010     }
17011   %}
17012   ins_pipe(pipe_class_memory);
17013 %}
17014 
17015 instruct arrays_hashcode(iRegP_R1 ary, iRegI_R2 cnt, iRegI_R0 result, immI basic_type,
17016                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17017                          vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17018                          vRegD_V12 vtmp8, vRegD_V13 vtmp9, rFlagsReg cr)
17019 %{
17020   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
17021   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, TEMP vtmp6,
17022          TEMP vtmp7, TEMP vtmp8, TEMP vtmp9, USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
17023 
17024   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
17025   ins_encode %{
17026     address tpc = __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
17027                                      $vtmp3$$FloatRegister, $vtmp2$$FloatRegister,
17028                                      $vtmp1$$FloatRegister, $vtmp0$$FloatRegister,
17029                                      $vtmp4$$FloatRegister, $vtmp5$$FloatRegister,
17030                                      $vtmp6$$FloatRegister, $vtmp7$$FloatRegister,
17031                                      $vtmp8$$FloatRegister, $vtmp9$$FloatRegister,
17032                                      (BasicType)$basic_type$$constant);
17033     if (tpc == nullptr) {
17034       ciEnv::current()->record_failure("CodeCache is full");
17035       return;
17036     }
17037   %}
17038   ins_pipe(pipe_class_memory);
17039 %}
17040 
17041 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
17042 %{
17043   match(Set result (CountPositives ary1 len));
17044   effect(USE_KILL ary1, USE_KILL len, KILL cr);
17045   format %{ "count positives byte[] $ary1,$len -> $result" %}
17046   ins_encode %{
17047     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
17048     if (tpc == nullptr) {
17049       ciEnv::current()->record_failure("CodeCache is full");
17050       return;
17051     }
17052   %}
17053   ins_pipe( pipe_slow );
17054 %}
17055 
17056 // fast char[] to byte[] compression
17057 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17058                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17059                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17060                          iRegI_R0 result, rFlagsReg cr)
17061 %{
17062   match(Set result (StrCompressedCopy src (Binary dst len)));
17063   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17064          USE_KILL src, USE_KILL dst, USE len, KILL cr);
17065 
17066   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17067   ins_encode %{
17068     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
17069                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17070                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17071                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17072   %}
17073   ins_pipe(pipe_slow);
17074 %}
17075 
17076 // fast byte[] to char[] inflation
17077 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
17078                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17079                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
17080 %{
17081   match(Set dummy (StrInflatedCopy src (Binary dst len)));
17082   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
17083          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
17084          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
17085 
17086   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
17087   ins_encode %{
17088     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
17089                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17090                                         $vtmp2$$FloatRegister, $tmp$$Register);
17091     if (tpc == nullptr) {
17092       ciEnv::current()->record_failure("CodeCache is full");
17093       return;
17094     }
17095   %}
17096   ins_pipe(pipe_class_memory);
17097 %}
17098 
17099 // encode char[] to byte[] in ISO_8859_1
17100 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17101                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17102                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17103                           iRegI_R0 result, rFlagsReg cr)
17104 %{
17105   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
17106   match(Set result (EncodeISOArray src (Binary dst len)));
17107   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17108          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17109 
17110   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17111   ins_encode %{
17112     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17113                         $result$$Register, false,
17114                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17115                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17116                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17117   %}
17118   ins_pipe(pipe_class_memory);
17119 %}
17120 
17121 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17122                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17123                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17124                             iRegI_R0 result, rFlagsReg cr)
17125 %{
17126   predicate(((EncodeISOArrayNode*)n)->is_ascii());
17127   match(Set result (EncodeISOArray src (Binary dst len)));
17128   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17129          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17130 
17131   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17132   ins_encode %{
17133     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17134                         $result$$Register, true,
17135                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17136                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17137                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17138   %}
17139   ins_pipe(pipe_class_memory);
17140 %}
17141 
17142 //----------------------------- CompressBits/ExpandBits ------------------------
17143 
17144 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17145                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17146   match(Set dst (CompressBits src mask));
17147   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17148   format %{ "mov    $tsrc, $src\n\t"
17149             "mov    $tmask, $mask\n\t"
17150             "bext   $tdst, $tsrc, $tmask\n\t"
17151             "mov    $dst, $tdst"
17152           %}
17153   ins_encode %{
17154     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17155     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17156     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17157     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17158   %}
17159   ins_pipe(pipe_slow);
17160 %}
17161 
17162 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17163                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17164   match(Set dst (CompressBits (LoadI mem) mask));
17165   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17166   format %{ "ldrs   $tsrc, $mem\n\t"
17167             "ldrs   $tmask, $mask\n\t"
17168             "bext   $tdst, $tsrc, $tmask\n\t"
17169             "mov    $dst, $tdst"
17170           %}
17171   ins_encode %{
17172     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17173               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17174     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17175     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17176     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17177   %}
17178   ins_pipe(pipe_slow);
17179 %}
17180 
17181 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17182                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
17183   match(Set dst (CompressBits src mask));
17184   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17185   format %{ "mov    $tsrc, $src\n\t"
17186             "mov    $tmask, $mask\n\t"
17187             "bext   $tdst, $tsrc, $tmask\n\t"
17188             "mov    $dst, $tdst"
17189           %}
17190   ins_encode %{
17191     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17192     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17193     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17194     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17195   %}
17196   ins_pipe(pipe_slow);
17197 %}
17198 
17199 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
17200                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17201   match(Set dst (CompressBits (LoadL mem) mask));
17202   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17203   format %{ "ldrd   $tsrc, $mem\n\t"
17204             "ldrd   $tmask, $mask\n\t"
17205             "bext   $tdst, $tsrc, $tmask\n\t"
17206             "mov    $dst, $tdst"
17207           %}
17208   ins_encode %{
17209     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17210               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17211     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17212     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17213     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17214   %}
17215   ins_pipe(pipe_slow);
17216 %}
17217 
17218 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17219                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17220   match(Set dst (ExpandBits src mask));
17221   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17222   format %{ "mov    $tsrc, $src\n\t"
17223             "mov    $tmask, $mask\n\t"
17224             "bdep   $tdst, $tsrc, $tmask\n\t"
17225             "mov    $dst, $tdst"
17226           %}
17227   ins_encode %{
17228     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17229     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17230     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17231     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17232   %}
17233   ins_pipe(pipe_slow);
17234 %}
17235 
17236 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17237                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17238   match(Set dst (ExpandBits (LoadI mem) mask));
17239   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17240   format %{ "ldrs   $tsrc, $mem\n\t"
17241             "ldrs   $tmask, $mask\n\t"
17242             "bdep   $tdst, $tsrc, $tmask\n\t"
17243             "mov    $dst, $tdst"
17244           %}
17245   ins_encode %{
17246     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17247               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17248     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17249     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17250     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17251   %}
17252   ins_pipe(pipe_slow);
17253 %}
17254 
17255 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17256                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
17257   match(Set dst (ExpandBits src mask));
17258   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17259   format %{ "mov    $tsrc, $src\n\t"
17260             "mov    $tmask, $mask\n\t"
17261             "bdep   $tdst, $tsrc, $tmask\n\t"
17262             "mov    $dst, $tdst"
17263           %}
17264   ins_encode %{
17265     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17266     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17267     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17268     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17269   %}
17270   ins_pipe(pipe_slow);
17271 %}
17272 
17273 
17274 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
17275                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17276   match(Set dst (ExpandBits (LoadL mem) mask));
17277   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17278   format %{ "ldrd   $tsrc, $mem\n\t"
17279             "ldrd   $tmask, $mask\n\t"
17280             "bdep   $tdst, $tsrc, $tmask\n\t"
17281             "mov    $dst, $tdst"
17282           %}
17283   ins_encode %{
17284     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17285               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17286     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17287     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17288     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17289   %}
17290   ins_pipe(pipe_slow);
17291 %}
17292 
17293 //----------------------------- Reinterpret ----------------------------------
17294 // Reinterpret a half-precision float value in a floating point register to a general purpose register
17295 instruct reinterpretHF2S(iRegINoSp dst, vRegF src) %{
17296   match(Set dst (ReinterpretHF2S src));
17297   format %{ "reinterpretHF2S $dst, $src" %}
17298   ins_encode %{
17299     __ smov($dst$$Register, $src$$FloatRegister, __ H, 0);
17300   %}
17301   ins_pipe(pipe_slow);
17302 %}
17303 
17304 // Reinterpret a half-precision float value in a general purpose register to a floating point register
17305 instruct reinterpretS2HF(vRegF dst, iRegINoSp src) %{
17306   match(Set dst (ReinterpretS2HF src));
17307   format %{ "reinterpretS2HF $dst, $src" %}
17308   ins_encode %{
17309     __ mov($dst$$FloatRegister, __ H, 0, $src$$Register);
17310   %}
17311   ins_pipe(pipe_slow);
17312 %}
17313 
17314 // Without this optimization, ReinterpretS2HF (ConvF2HF src) would result in the following
17315 // instructions (the first two are for ConvF2HF and the last instruction is for ReinterpretS2HF) -
17316 // fcvt $tmp1_fpr, $src_fpr    // Convert float to half-precision float
17317 // mov  $tmp2_gpr, $tmp1_fpr   // Move half-precision float in FPR to a GPR
17318 // mov  $dst_fpr,  $tmp2_gpr   // Move the result from a GPR to an FPR
17319 // The move from FPR to GPR in ConvF2HF and the move from GPR to FPR in ReinterpretS2HF
17320 // can be omitted in this pattern, resulting in -
17321 // fcvt $dst, $src  // Convert float to half-precision float
17322 instruct convF2HFAndS2HF(vRegF dst, vRegF src)
17323 %{
17324   match(Set dst (ReinterpretS2HF (ConvF2HF src)));
17325   format %{ "convF2HFAndS2HF $dst, $src" %}
17326   ins_encode %{
17327     __ fcvtsh($dst$$FloatRegister, $src$$FloatRegister);
17328   %}
17329   ins_pipe(pipe_slow);
17330 %}
17331 
17332 // Without this optimization, ConvHF2F (ReinterpretHF2S src) would result in the following
17333 // instructions (the first one is for ReinterpretHF2S and the last two are for ConvHF2F) -
17334 // mov  $tmp1_gpr, $src_fpr  // Move the half-precision float from an FPR to a GPR
17335 // mov  $tmp2_fpr, $tmp1_gpr // Move the same value from GPR to an FPR
17336 // fcvt $dst_fpr,  $tmp2_fpr // Convert the half-precision float to 32-bit float
17337 // The move from FPR to GPR in ReinterpretHF2S and the move from GPR to FPR in ConvHF2F
17338 // can be omitted as the input (src) is already in an FPR required for the fcvths instruction
17339 // resulting in -
17340 // fcvt $dst, $src  // Convert half-precision float to a 32-bit float
17341 instruct convHF2SAndHF2F(vRegF dst, vRegF src)
17342 %{
17343   match(Set dst (ConvHF2F (ReinterpretHF2S src)));
17344   format %{ "convHF2SAndHF2F $dst, $src" %}
17345   ins_encode %{
17346     __ fcvths($dst$$FloatRegister, $src$$FloatRegister);
17347   %}
17348   ins_pipe(pipe_slow);
17349 %}
17350 
17351 // ============================================================================
17352 // This name is KNOWN by the ADLC and cannot be changed.
17353 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17354 // for this guy.
17355 instruct tlsLoadP(thread_RegP dst)
17356 %{
17357   match(Set dst (ThreadLocal));
17358 
17359   ins_cost(0);
17360 
17361   format %{ " -- \t// $dst=Thread::current(), empty" %}
17362 
17363   size(0);
17364 
17365   ins_encode( /*empty*/ );
17366 
17367   ins_pipe(pipe_class_empty);
17368 %}
17369 
17370 //----------PEEPHOLE RULES-----------------------------------------------------
17371 // These must follow all instruction definitions as they use the names
17372 // defined in the instructions definitions.
17373 //
17374 // peepmatch ( root_instr_name [preceding_instruction]* );
17375 //
17376 // peepconstraint %{
17377 // (instruction_number.operand_name relational_op instruction_number.operand_name
17378 //  [, ...] );
17379 // // instruction numbers are zero-based using left to right order in peepmatch
17380 //
17381 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17382 // // provide an instruction_number.operand_name for each operand that appears
17383 // // in the replacement instruction's match rule
17384 //
17385 // ---------VM FLAGS---------------------------------------------------------
17386 //
17387 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17388 //
17389 // Each peephole rule is given an identifying number starting with zero and
17390 // increasing by one in the order seen by the parser.  An individual peephole
17391 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17392 // on the command-line.
17393 //
17394 // ---------CURRENT LIMITATIONS----------------------------------------------
17395 //
17396 // Only match adjacent instructions in same basic block
17397 // Only equality constraints
17398 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17399 // Only one replacement instruction
17400 //
17401 // ---------EXAMPLE----------------------------------------------------------
17402 //
17403 // // pertinent parts of existing instructions in architecture description
17404 // instruct movI(iRegINoSp dst, iRegI src)
17405 // %{
17406 //   match(Set dst (CopyI src));
17407 // %}
17408 //
17409 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17410 // %{
17411 //   match(Set dst (AddI dst src));
17412 //   effect(KILL cr);
17413 // %}
17414 //
17415 // // Change (inc mov) to lea
17416 // peephole %{
17417 //   // increment preceded by register-register move
17418 //   peepmatch ( incI_iReg movI );
17419 //   // require that the destination register of the increment
17420 //   // match the destination register of the move
17421 //   peepconstraint ( 0.dst == 1.dst );
17422 //   // construct a replacement instruction that sets
17423 //   // the destination to ( move's source register + one )
17424 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17425 // %}
17426 //
17427 
17428 // Implementation no longer uses movX instructions since
17429 // machine-independent system no longer uses CopyX nodes.
17430 //
17431 // peephole
17432 // %{
17433 //   peepmatch (incI_iReg movI);
17434 //   peepconstraint (0.dst == 1.dst);
17435 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17436 // %}
17437 
17438 // peephole
17439 // %{
17440 //   peepmatch (decI_iReg movI);
17441 //   peepconstraint (0.dst == 1.dst);
17442 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17443 // %}
17444 
17445 // peephole
17446 // %{
17447 //   peepmatch (addI_iReg_imm movI);
17448 //   peepconstraint (0.dst == 1.dst);
17449 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17450 // %}
17451 
17452 // peephole
17453 // %{
17454 //   peepmatch (incL_iReg movL);
17455 //   peepconstraint (0.dst == 1.dst);
17456 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17457 // %}
17458 
17459 // peephole
17460 // %{
17461 //   peepmatch (decL_iReg movL);
17462 //   peepconstraint (0.dst == 1.dst);
17463 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17464 // %}
17465 
17466 // peephole
17467 // %{
17468 //   peepmatch (addL_iReg_imm movL);
17469 //   peepconstraint (0.dst == 1.dst);
17470 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17471 // %}
17472 
17473 // peephole
17474 // %{
17475 //   peepmatch (addP_iReg_imm movP);
17476 //   peepconstraint (0.dst == 1.dst);
17477 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17478 // %}
17479 
17480 // // Change load of spilled value to only a spill
17481 // instruct storeI(memory mem, iRegI src)
17482 // %{
17483 //   match(Set mem (StoreI mem src));
17484 // %}
17485 //
17486 // instruct loadI(iRegINoSp dst, memory mem)
17487 // %{
17488 //   match(Set dst (LoadI mem));
17489 // %}
17490 //
17491 
17492 //----------SMARTSPILL RULES---------------------------------------------------
17493 // These must follow all instruction definitions as they use the names
17494 // defined in the instructions definitions.
17495 
17496 // Local Variables:
17497 // mode: c++
17498 // End: