1 //
    2 // Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // archtecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182   reg_def V0_L ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(4) );
  183   reg_def V0_M ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(5) );
  184   reg_def V0_N ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(6) );
  185   reg_def V0_O ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(7) );
  186 
  187   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  188   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  189   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  190   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  191   reg_def V1_L ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(4) );
  192   reg_def V1_M ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(5) );
  193   reg_def V1_N ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(6) );
  194   reg_def V1_O ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(7) );
  195 
  196   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  197   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  198   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  199   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  200   reg_def V2_L ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(4) );
  201   reg_def V2_M ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(5) );
  202   reg_def V2_N ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(6) );
  203   reg_def V2_O ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(7) );
  204 
  205   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  206   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  207   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  208   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  209   reg_def V3_L ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(4) );
  210   reg_def V3_M ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(5) );
  211   reg_def V3_N ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(6) );
  212   reg_def V3_O ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(7) );
  213 
  214   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  215   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  216   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  217   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  218   reg_def V4_L ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(4) );
  219   reg_def V4_M ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(5) );
  220   reg_def V4_N ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(6) );
  221   reg_def V4_O ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(7) );
  222 
  223   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  224   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  225   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  226   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  227   reg_def V5_L ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(4) );
  228   reg_def V5_M ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(5) );
  229   reg_def V5_N ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(6) );
  230   reg_def V5_O ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(7) );
  231 
  232   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  233   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  234   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  235   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  236   reg_def V6_L ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(4) );
  237   reg_def V6_M ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(5) );
  238   reg_def V6_N ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(6) );
  239   reg_def V6_O ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(7) );
  240 
  241   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  242   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  243   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  244   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  245   reg_def V7_L ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(4) );
  246   reg_def V7_M ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(5) );
  247   reg_def V7_N ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(6) );
  248   reg_def V7_O ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(7) );
  249 
  250   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  251   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  252   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  253   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  254   reg_def V8_L ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(4) );
  255   reg_def V8_M ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(5) );
  256   reg_def V8_N ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(6) );
  257   reg_def V8_O ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(7) );
  258 
  259   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  260   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  261   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  262   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  263   reg_def V9_L ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(4) );
  264   reg_def V9_M ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(5) );
  265   reg_def V9_N ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(6) );
  266   reg_def V9_O ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(7) );
  267 
  268   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  269   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  270   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  271   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  272   reg_def V10_L ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(4) );
  273   reg_def V10_M ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(5) );
  274   reg_def V10_N ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(6) );
  275   reg_def V10_O ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(7) );
  276 
  277   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  278   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  279   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  280   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  281   reg_def V11_L ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(4) );
  282   reg_def V11_M ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(5) );
  283   reg_def V11_N ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(6) );
  284   reg_def V11_O ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(7) );
  285 
  286   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  287   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  288   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  289   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  290   reg_def V12_L ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(4) );
  291   reg_def V12_M ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(5) );
  292   reg_def V12_N ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(6) );
  293   reg_def V12_O ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(7) );
  294 
  295   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  296   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  297   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  298   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  299   reg_def V13_L ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(4) );
  300   reg_def V13_M ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(5) );
  301   reg_def V13_N ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(6) );
  302   reg_def V13_O ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(7) );
  303 
  304   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  305   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  306   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  307   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  308   reg_def V14_L ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(4) );
  309   reg_def V14_M ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(5) );
  310   reg_def V14_N ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(6) );
  311   reg_def V14_O ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(7) );
  312 
  313   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  314   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  315   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  316   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  317   reg_def V15_L ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(4) );
  318   reg_def V15_M ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(5) );
  319   reg_def V15_N ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(6) );
  320   reg_def V15_O ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(7) );
  321 
  322   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  323   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  324   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  325   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  326   reg_def V16_L ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(4) );
  327   reg_def V16_M ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(5) );
  328   reg_def V16_N ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(6) );
  329   reg_def V16_O ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(7) );
  330 
  331   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  332   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  333   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  334   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  335   reg_def V17_L ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(4) );
  336   reg_def V17_M ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(5) );
  337   reg_def V17_N ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(6) );
  338   reg_def V17_O ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(7) );
  339 
  340   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  341   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  342   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  343   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  344   reg_def V18_L ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(4) );
  345   reg_def V18_M ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(5) );
  346   reg_def V18_N ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(6) );
  347   reg_def V18_O ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(7) );
  348 
  349   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  350   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  351   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  352   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  353   reg_def V19_L ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(4) );
  354   reg_def V19_M ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(5) );
  355   reg_def V19_N ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(6) );
  356   reg_def V19_O ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(7) );
  357 
  358   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  359   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  360   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  361   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  362   reg_def V20_L ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(4) );
  363   reg_def V20_M ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(5) );
  364   reg_def V20_N ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(6) );
  365   reg_def V20_O ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(7) );
  366 
  367   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  368   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  369   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  370   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  371   reg_def V21_L ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(4) );
  372   reg_def V21_M ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(5) );
  373   reg_def V21_N ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(6) );
  374   reg_def V21_O ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(7) );
  375 
  376   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  377   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  378   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  379   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  380   reg_def V22_L ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(4) );
  381   reg_def V22_M ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(5) );
  382   reg_def V22_N ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(6) );
  383   reg_def V22_O ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(7) );
  384 
  385   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  386   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  387   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  388   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  389   reg_def V23_L ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(4) );
  390   reg_def V23_M ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(5) );
  391   reg_def V23_N ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(6) );
  392   reg_def V23_O ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(7) );
  393 
  394   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  395   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  396   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  397   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  398   reg_def V24_L ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(4) );
  399   reg_def V24_M ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(5) );
  400   reg_def V24_N ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(6) );
  401   reg_def V24_O ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(7) );
  402 
  403   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  404   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  405   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  406   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  407   reg_def V25_L ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(4) );
  408   reg_def V25_M ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(5) );
  409   reg_def V25_N ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(6) );
  410   reg_def V25_O ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(7) );
  411 
  412   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  413   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  414   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  415   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  416   reg_def V26_L ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(4) );
  417   reg_def V26_M ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(5) );
  418   reg_def V26_N ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(6) );
  419   reg_def V26_O ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(7) );
  420 
  421   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  422   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  423   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  424   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  425   reg_def V27_L ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(4) );
  426   reg_def V27_M ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(5) );
  427   reg_def V27_N ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(6) );
  428   reg_def V27_O ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(7) );
  429 
  430   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  431   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  432   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  433   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  434   reg_def V28_L ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(4) );
  435   reg_def V28_M ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(5) );
  436   reg_def V28_N ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(6) );
  437   reg_def V28_O ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(7) );
  438 
  439   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  440   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  441   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  442   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  443   reg_def V29_L ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(4) );
  444   reg_def V29_M ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(5) );
  445   reg_def V29_N ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(6) );
  446   reg_def V29_O ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(7) );
  447 
  448   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  449   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  450   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  451   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  452   reg_def V30_L ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(4) );
  453   reg_def V30_M ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(5) );
  454   reg_def V30_N ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(6) );
  455   reg_def V30_O ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(7) );
  456 
  457   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  458   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  459   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  460   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  461   reg_def V31_L ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(4) );
  462   reg_def V31_M ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(5) );
  463   reg_def V31_N ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(6) );
  464   reg_def V31_O ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(7) );
  465 
  466 
  467 // ----------------------------
  468 // SVE Predicate Registers
  469 // ----------------------------
  470   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  471   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  472   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  473   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  474   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  475   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  476   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  477   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  478   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  479   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  480   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  481   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  482   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  483   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  484   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  485   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  486 
  487 // ----------------------------
  488 // Special Registers
  489 // ----------------------------
  490 
  491 // the AArch64 CSPR status flag register is not directly acessible as
  492 // instruction operand. the FPSR status flag register is a system
  493 // register which can be written/read using MSR/MRS but again does not
  494 // appear as an operand (a code identifying the FSPR occurs as an
  495 // immediate value in the instruction).
  496 
  497 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  498 
  499 // Specify priority of register selection within phases of register
  500 // allocation.  Highest priority is first.  A useful heuristic is to
  501 // give registers a low priority when they are required by machine
  502 // instructions, like EAX and EDX on I486, and choose no-save registers
  503 // before save-on-call, & save-on-call before save-on-entry.  Registers
  504 // which participate in fixed calling sequences should come last.
  505 // Registers which are used as pairs must fall on an even boundary.
  506 
  507 alloc_class chunk0(
  508     // volatiles
  509     R10, R10_H,
  510     R11, R11_H,
  511     R12, R12_H,
  512     R13, R13_H,
  513     R14, R14_H,
  514     R15, R15_H,
  515     R16, R16_H,
  516     R17, R17_H,
  517     R18, R18_H,
  518 
  519     // arg registers
  520     R0, R0_H,
  521     R1, R1_H,
  522     R2, R2_H,
  523     R3, R3_H,
  524     R4, R4_H,
  525     R5, R5_H,
  526     R6, R6_H,
  527     R7, R7_H,
  528 
  529     // non-volatiles
  530     R19, R19_H,
  531     R20, R20_H,
  532     R21, R21_H,
  533     R22, R22_H,
  534     R23, R23_H,
  535     R24, R24_H,
  536     R25, R25_H,
  537     R26, R26_H,
  538 
  539     // non-allocatable registers
  540 
  541     R27, R27_H, // heapbase
  542     R28, R28_H, // thread
  543     R29, R29_H, // fp
  544     R30, R30_H, // lr
  545     R31, R31_H, // sp
  546     R8, R8_H,   // rscratch1
  547     R9, R9_H,   // rscratch2
  548 );
  549 
  550 alloc_class chunk1(
  551 
  552     // no save
  553     V16, V16_H, V16_J, V16_K, V16_L, V16_M, V16_N, V16_O,
  554     V17, V17_H, V17_J, V17_K, V17_L, V17_M, V17_N, V17_O,
  555     V18, V18_H, V18_J, V18_K, V18_L, V18_M, V18_N, V18_O,
  556     V19, V19_H, V19_J, V19_K, V19_L, V19_M, V19_N, V19_O,
  557     V20, V20_H, V20_J, V20_K, V20_L, V20_M, V20_N, V20_O,
  558     V21, V21_H, V21_J, V21_K, V21_L, V21_M, V21_N, V21_O,
  559     V22, V22_H, V22_J, V22_K, V22_L, V22_M, V22_N, V22_O,
  560     V23, V23_H, V23_J, V23_K, V23_L, V23_M, V23_N, V23_O,
  561     V24, V24_H, V24_J, V24_K, V24_L, V24_M, V24_N, V24_O,
  562     V25, V25_H, V25_J, V25_K, V25_L, V25_M, V25_N, V25_O,
  563     V26, V26_H, V26_J, V26_K, V26_L, V26_M, V26_N, V26_O,
  564     V27, V27_H, V27_J, V27_K, V27_L, V27_M, V27_N, V27_O,
  565     V28, V28_H, V28_J, V28_K, V28_L, V28_M, V28_N, V28_O,
  566     V29, V29_H, V29_J, V29_K, V29_L, V29_M, V29_N, V29_O,
  567     V30, V30_H, V30_J, V30_K, V30_L, V30_M, V30_N, V30_O,
  568     V31, V31_H, V31_J, V31_K, V31_L, V31_M, V31_N, V31_O,
  569 
  570     // arg registers
  571     V0, V0_H, V0_J, V0_K, V0_L, V0_M, V0_N, V0_O,
  572     V1, V1_H, V1_J, V1_K, V1_L, V1_M, V1_N, V1_O,
  573     V2, V2_H, V2_J, V2_K, V2_L, V2_M, V2_N, V2_O,
  574     V3, V3_H, V3_J, V3_K, V3_L, V3_M, V3_N, V3_O,
  575     V4, V4_H, V4_J, V4_K, V4_L, V4_M, V4_N, V4_O,
  576     V5, V5_H, V5_J, V5_K, V5_L, V5_M, V5_N, V5_O,
  577     V6, V6_H, V6_J, V6_K, V6_L, V6_M, V6_N, V6_O,
  578     V7, V7_H, V7_J, V7_K, V7_L, V7_M, V7_N, V7_O,
  579 
  580     // non-volatiles
  581     V8, V8_H, V8_J, V8_K, V8_L, V8_M, V8_N, V8_O,
  582     V9, V9_H, V9_J, V9_K, V9_L, V9_M, V9_N, V9_O,
  583     V10, V10_H, V10_J, V10_K, V10_L, V10_M, V10_N, V10_O,
  584     V11, V11_H, V11_J, V11_K, V11_L, V11_M, V11_N, V11_O,
  585     V12, V12_H, V12_J, V12_K, V12_L, V12_M, V12_N, V12_O,
  586     V13, V13_H, V13_J, V13_K, V13_L, V13_M, V13_N, V13_O,
  587     V14, V14_H, V14_J, V14_K, V14_L, V14_M, V14_N, V14_O,
  588     V15, V15_H, V15_J, V15_K, V15_L, V15_M, V15_N, V15_O,
  589 );
  590 
  591 alloc_class chunk2 (
  592     P0,
  593     P1,
  594     P2,
  595     P3,
  596     P4,
  597     P5,
  598     P6,
  599     P7,
  600 
  601     P8,
  602     P9,
  603     P10,
  604     P11,
  605     P12,
  606     P13,
  607     P14,
  608     P15,
  609 );
  610 
  611 alloc_class chunk3(RFLAGS);
  612 
  613 //----------Architecture Description Register Classes--------------------------
  614 // Several register classes are automatically defined based upon information in
  615 // this architecture description.
  616 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  617 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  618 //
  619 
  620 // Class for all 32 bit general purpose registers
  621 reg_class all_reg32(
  622     R0,
  623     R1,
  624     R2,
  625     R3,
  626     R4,
  627     R5,
  628     R6,
  629     R7,
  630     R10,
  631     R11,
  632     R12,
  633     R13,
  634     R14,
  635     R15,
  636     R16,
  637     R17,
  638     R18,
  639     R19,
  640     R20,
  641     R21,
  642     R22,
  643     R23,
  644     R24,
  645     R25,
  646     R26,
  647     R27,
  648     R28,
  649     R29,
  650     R30,
  651     R31
  652 );
  653 
  654 
  655 // Class for all 32 bit integer registers (excluding SP which
  656 // will never be used as an integer register)
  657 reg_class any_reg32 %{
  658   return _ANY_REG32_mask;
  659 %}
  660 
  661 // Singleton class for R0 int register
  662 reg_class int_r0_reg(R0);
  663 
  664 // Singleton class for R2 int register
  665 reg_class int_r2_reg(R2);
  666 
  667 // Singleton class for R3 int register
  668 reg_class int_r3_reg(R3);
  669 
  670 // Singleton class for R4 int register
  671 reg_class int_r4_reg(R4);
  672 
  673 // Singleton class for R31 int register
  674 reg_class int_r31_reg(R31);
  675 
  676 // Class for all 64 bit general purpose registers
  677 reg_class all_reg(
  678     R0, R0_H,
  679     R1, R1_H,
  680     R2, R2_H,
  681     R3, R3_H,
  682     R4, R4_H,
  683     R5, R5_H,
  684     R6, R6_H,
  685     R7, R7_H,
  686     R10, R10_H,
  687     R11, R11_H,
  688     R12, R12_H,
  689     R13, R13_H,
  690     R14, R14_H,
  691     R15, R15_H,
  692     R16, R16_H,
  693     R17, R17_H,
  694     R18, R18_H,
  695     R19, R19_H,
  696     R20, R20_H,
  697     R21, R21_H,
  698     R22, R22_H,
  699     R23, R23_H,
  700     R24, R24_H,
  701     R25, R25_H,
  702     R26, R26_H,
  703     R27, R27_H,
  704     R28, R28_H,
  705     R29, R29_H,
  706     R30, R30_H,
  707     R31, R31_H
  708 );
  709 
  710 // Class for all long integer registers (including SP)
  711 reg_class any_reg %{
  712   return _ANY_REG_mask;
  713 %}
  714 
  715 // Class for non-allocatable 32 bit registers
  716 reg_class non_allocatable_reg32(
  717 #ifdef R18_RESERVED
  718     // See comment in register_aarch64.hpp
  719     R18,                        // tls on Windows
  720 #endif
  721     R28,                        // thread
  722     R30,                        // lr
  723     R31                         // sp
  724 );
  725 
  726 // Class for non-allocatable 64 bit registers
  727 reg_class non_allocatable_reg(
  728 #ifdef R18_RESERVED
  729     // See comment in register_aarch64.hpp
  730     R18, R18_H,                 // tls on Windows, platform register on macOS
  731 #endif
  732     R28, R28_H,                 // thread
  733     R30, R30_H,                 // lr
  734     R31, R31_H                  // sp
  735 );
  736 
  737 // Class for all non-special integer registers
  738 reg_class no_special_reg32 %{
  739   return _NO_SPECIAL_REG32_mask;
  740 %}
  741 
  742 // Class for all non-special long integer registers
  743 reg_class no_special_reg %{
  744   return _NO_SPECIAL_REG_mask;
  745 %}
  746 
  747 // Class for 64 bit register r0
  748 reg_class r0_reg(
  749     R0, R0_H
  750 );
  751 
  752 // Class for 64 bit register r1
  753 reg_class r1_reg(
  754     R1, R1_H
  755 );
  756 
  757 // Class for 64 bit register r2
  758 reg_class r2_reg(
  759     R2, R2_H
  760 );
  761 
  762 // Class for 64 bit register r3
  763 reg_class r3_reg(
  764     R3, R3_H
  765 );
  766 
  767 // Class for 64 bit register r4
  768 reg_class r4_reg(
  769     R4, R4_H
  770 );
  771 
  772 // Class for 64 bit register r5
  773 reg_class r5_reg(
  774     R5, R5_H
  775 );
  776 
  777 // Class for 64 bit register r10
  778 reg_class r10_reg(
  779     R10, R10_H
  780 );
  781 
  782 // Class for 64 bit register r11
  783 reg_class r11_reg(
  784     R11, R11_H
  785 );
  786 
  787 // Class for method register
  788 reg_class method_reg(
  789     R12, R12_H
  790 );
  791 
  792 // Class for heapbase register
  793 reg_class heapbase_reg(
  794     R27, R27_H
  795 );
  796 
  797 // Class for thread register
  798 reg_class thread_reg(
  799     R28, R28_H
  800 );
  801 
  802 // Class for frame pointer register
  803 reg_class fp_reg(
  804     R29, R29_H
  805 );
  806 
  807 // Class for link register
  808 reg_class lr_reg(
  809     R30, R30_H
  810 );
  811 
  812 // Class for long sp register
  813 reg_class sp_reg(
  814   R31, R31_H
  815 );
  816 
  817 // Class for all pointer registers
  818 reg_class ptr_reg %{
  819   return _PTR_REG_mask;
  820 %}
  821 
  822 // Class for all non_special pointer registers
  823 reg_class no_special_ptr_reg %{
  824   return _NO_SPECIAL_PTR_REG_mask;
  825 %}
  826 
  827 // Class for all float registers
  828 reg_class float_reg(
  829     V0,
  830     V1,
  831     V2,
  832     V3,
  833     V4,
  834     V5,
  835     V6,
  836     V7,
  837     V8,
  838     V9,
  839     V10,
  840     V11,
  841     V12,
  842     V13,
  843     V14,
  844     V15,
  845     V16,
  846     V17,
  847     V18,
  848     V19,
  849     V20,
  850     V21,
  851     V22,
  852     V23,
  853     V24,
  854     V25,
  855     V26,
  856     V27,
  857     V28,
  858     V29,
  859     V30,
  860     V31
  861 );
  862 
  863 // Double precision float registers have virtual `high halves' that
  864 // are needed by the allocator.
  865 // Class for all double registers
  866 reg_class double_reg(
  867     V0, V0_H,
  868     V1, V1_H,
  869     V2, V2_H,
  870     V3, V3_H,
  871     V4, V4_H,
  872     V5, V5_H,
  873     V6, V6_H,
  874     V7, V7_H,
  875     V8, V8_H,
  876     V9, V9_H,
  877     V10, V10_H,
  878     V11, V11_H,
  879     V12, V12_H,
  880     V13, V13_H,
  881     V14, V14_H,
  882     V15, V15_H,
  883     V16, V16_H,
  884     V17, V17_H,
  885     V18, V18_H,
  886     V19, V19_H,
  887     V20, V20_H,
  888     V21, V21_H,
  889     V22, V22_H,
  890     V23, V23_H,
  891     V24, V24_H,
  892     V25, V25_H,
  893     V26, V26_H,
  894     V27, V27_H,
  895     V28, V28_H,
  896     V29, V29_H,
  897     V30, V30_H,
  898     V31, V31_H
  899 );
  900 
  901 // Class for all SVE vector registers.
  902 reg_class vectora_reg (
  903     V0, V0_H, V0_J, V0_K, V0_L, V0_M, V0_N, V0_O,
  904     V1, V1_H, V1_J, V1_K, V1_L, V1_M, V1_N, V1_O,
  905     V2, V2_H, V2_J, V2_K, V2_L, V2_M, V2_N, V2_O,
  906     V3, V3_H, V3_J, V3_K, V3_L, V3_M, V3_N, V3_O,
  907     V4, V4_H, V4_J, V4_K, V4_L, V4_M, V4_N, V4_O,
  908     V5, V5_H, V5_J, V5_K, V5_L, V5_M, V5_N, V5_O,
  909     V6, V6_H, V6_J, V6_K, V6_L, V6_M, V6_N, V6_O,
  910     V7, V7_H, V7_J, V7_K, V7_L, V7_M, V7_N, V7_O,
  911     V8, V8_H, V8_J, V8_K, V8_L, V8_M, V8_N, V8_O,
  912     V9, V9_H, V9_J, V9_K, V9_L, V9_M, V9_N, V9_O,
  913     V10, V10_H, V10_J, V10_K, V10_L, V10_M, V10_N, V10_O,
  914     V11, V11_H, V11_J, V11_K, V11_L, V11_M, V11_N, V11_O,
  915     V12, V12_H, V12_J, V12_K, V12_L, V12_M, V12_N, V12_O,
  916     V13, V13_H, V13_J, V13_K, V13_L, V13_M, V13_N, V13_O,
  917     V14, V14_H, V14_J, V14_K, V14_L, V14_M, V14_N, V14_O,
  918     V15, V15_H, V15_J, V15_K, V15_L, V15_M, V15_N, V15_O,
  919     V16, V16_H, V16_J, V16_K, V16_L, V16_M, V16_N, V16_O,
  920     V17, V17_H, V17_J, V17_K, V17_L, V17_M, V17_N, V17_O,
  921     V18, V18_H, V18_J, V18_K, V18_L, V18_M, V18_N, V18_O,
  922     V19, V19_H, V19_J, V19_K, V19_L, V19_M, V19_N, V19_O,
  923     V20, V20_H, V20_J, V20_K, V20_L, V20_M, V20_N, V20_O,
  924     V21, V21_H, V21_J, V21_K, V21_L, V21_M, V21_N, V21_O,
  925     V22, V22_H, V22_J, V22_K, V22_L, V22_M, V22_N, V22_O,
  926     V23, V23_H, V23_J, V23_K, V23_L, V23_M, V23_N, V23_O,
  927     V24, V24_H, V24_J, V24_K, V24_L, V24_M, V24_N, V24_O,
  928     V25, V25_H, V25_J, V25_K, V25_L, V25_M, V25_N, V25_O,
  929     V26, V26_H, V26_J, V26_K, V26_L, V26_M, V26_N, V26_O,
  930     V27, V27_H, V27_J, V27_K, V27_L, V27_M, V27_N, V27_O,
  931     V28, V28_H, V28_J, V28_K, V28_L, V28_M, V28_N, V28_O,
  932     V29, V29_H, V29_J, V29_K, V29_L, V29_M, V29_N, V29_O,
  933     V30, V30_H, V30_J, V30_K, V30_L, V30_M, V30_N, V30_O,
  934     V31, V31_H, V31_J, V31_K, V31_L, V31_M, V31_N, V31_O,
  935 );
  936 
  937 // Class for all 64bit vector registers
  938 reg_class vectord_reg(
  939     V0, V0_H,
  940     V1, V1_H,
  941     V2, V2_H,
  942     V3, V3_H,
  943     V4, V4_H,
  944     V5, V5_H,
  945     V6, V6_H,
  946     V7, V7_H,
  947     V8, V8_H,
  948     V9, V9_H,
  949     V10, V10_H,
  950     V11, V11_H,
  951     V12, V12_H,
  952     V13, V13_H,
  953     V14, V14_H,
  954     V15, V15_H,
  955     V16, V16_H,
  956     V17, V17_H,
  957     V18, V18_H,
  958     V19, V19_H,
  959     V20, V20_H,
  960     V21, V21_H,
  961     V22, V22_H,
  962     V23, V23_H,
  963     V24, V24_H,
  964     V25, V25_H,
  965     V26, V26_H,
  966     V27, V27_H,
  967     V28, V28_H,
  968     V29, V29_H,
  969     V30, V30_H,
  970     V31, V31_H
  971 );
  972 
  973 // Class for all 128bit vector registers
  974 reg_class vectorx_reg(
  975     V0, V0_H, V0_J, V0_K,
  976     V1, V1_H, V1_J, V1_K,
  977     V2, V2_H, V2_J, V2_K,
  978     V3, V3_H, V3_J, V3_K,
  979     V4, V4_H, V4_J, V4_K,
  980     V5, V5_H, V5_J, V5_K,
  981     V6, V6_H, V6_J, V6_K,
  982     V7, V7_H, V7_J, V7_K,
  983     V8, V8_H, V8_J, V8_K,
  984     V9, V9_H, V9_J, V9_K,
  985     V10, V10_H, V10_J, V10_K,
  986     V11, V11_H, V11_J, V11_K,
  987     V12, V12_H, V12_J, V12_K,
  988     V13, V13_H, V13_J, V13_K,
  989     V14, V14_H, V14_J, V14_K,
  990     V15, V15_H, V15_J, V15_K,
  991     V16, V16_H, V16_J, V16_K,
  992     V17, V17_H, V17_J, V17_K,
  993     V18, V18_H, V18_J, V18_K,
  994     V19, V19_H, V19_J, V19_K,
  995     V20, V20_H, V20_J, V20_K,
  996     V21, V21_H, V21_J, V21_K,
  997     V22, V22_H, V22_J, V22_K,
  998     V23, V23_H, V23_J, V23_K,
  999     V24, V24_H, V24_J, V24_K,
 1000     V25, V25_H, V25_J, V25_K,
 1001     V26, V26_H, V26_J, V26_K,
 1002     V27, V27_H, V27_J, V27_K,
 1003     V28, V28_H, V28_J, V28_K,
 1004     V29, V29_H, V29_J, V29_K,
 1005     V30, V30_H, V30_J, V30_K,
 1006     V31, V31_H, V31_J, V31_K
 1007 );
 1008 
 1009 // Class for 128 bit register v0
 1010 reg_class v0_reg(
 1011     V0, V0_H
 1012 );
 1013 
 1014 // Class for 128 bit register v1
 1015 reg_class v1_reg(
 1016     V1, V1_H
 1017 );
 1018 
 1019 // Class for 128 bit register v2
 1020 reg_class v2_reg(
 1021     V2, V2_H
 1022 );
 1023 
 1024 // Class for 128 bit register v3
 1025 reg_class v3_reg(
 1026     V3, V3_H
 1027 );
 1028 
 1029 // Class for 128 bit register v4
 1030 reg_class v4_reg(
 1031     V4, V4_H
 1032 );
 1033 
 1034 // Class for 128 bit register v5
 1035 reg_class v5_reg(
 1036     V5, V5_H
 1037 );
 1038 
 1039 // Class for 128 bit register v6
 1040 reg_class v6_reg(
 1041     V6, V6_H
 1042 );
 1043 
 1044 // Class for 128 bit register v7
 1045 reg_class v7_reg(
 1046     V7, V7_H
 1047 );
 1048 
 1049 // Class for 128 bit register v8
 1050 reg_class v8_reg(
 1051     V8, V8_H
 1052 );
 1053 
 1054 // Class for 128 bit register v9
 1055 reg_class v9_reg(
 1056     V9, V9_H
 1057 );
 1058 
 1059 // Class for 128 bit register v10
 1060 reg_class v10_reg(
 1061     V10, V10_H
 1062 );
 1063 
 1064 // Class for 128 bit register v11
 1065 reg_class v11_reg(
 1066     V11, V11_H
 1067 );
 1068 
 1069 // Class for 128 bit register v12
 1070 reg_class v12_reg(
 1071     V12, V12_H
 1072 );
 1073 
 1074 // Class for 128 bit register v13
 1075 reg_class v13_reg(
 1076     V13, V13_H
 1077 );
 1078 
 1079 // Class for 128 bit register v14
 1080 reg_class v14_reg(
 1081     V14, V14_H
 1082 );
 1083 
 1084 // Class for 128 bit register v15
 1085 reg_class v15_reg(
 1086     V15, V15_H
 1087 );
 1088 
 1089 // Class for 128 bit register v16
 1090 reg_class v16_reg(
 1091     V16, V16_H
 1092 );
 1093 
 1094 // Class for 128 bit register v17
 1095 reg_class v17_reg(
 1096     V17, V17_H
 1097 );
 1098 
 1099 // Class for 128 bit register v18
 1100 reg_class v18_reg(
 1101     V18, V18_H
 1102 );
 1103 
 1104 // Class for 128 bit register v19
 1105 reg_class v19_reg(
 1106     V19, V19_H
 1107 );
 1108 
 1109 // Class for 128 bit register v20
 1110 reg_class v20_reg(
 1111     V20, V20_H
 1112 );
 1113 
 1114 // Class for 128 bit register v21
 1115 reg_class v21_reg(
 1116     V21, V21_H
 1117 );
 1118 
 1119 // Class for 128 bit register v22
 1120 reg_class v22_reg(
 1121     V22, V22_H
 1122 );
 1123 
 1124 // Class for 128 bit register v23
 1125 reg_class v23_reg(
 1126     V23, V23_H
 1127 );
 1128 
 1129 // Class for 128 bit register v24
 1130 reg_class v24_reg(
 1131     V24, V24_H
 1132 );
 1133 
 1134 // Class for 128 bit register v25
 1135 reg_class v25_reg(
 1136     V25, V25_H
 1137 );
 1138 
 1139 // Class for 128 bit register v26
 1140 reg_class v26_reg(
 1141     V26, V26_H
 1142 );
 1143 
 1144 // Class for 128 bit register v27
 1145 reg_class v27_reg(
 1146     V27, V27_H
 1147 );
 1148 
 1149 // Class for 128 bit register v28
 1150 reg_class v28_reg(
 1151     V28, V28_H
 1152 );
 1153 
 1154 // Class for 128 bit register v29
 1155 reg_class v29_reg(
 1156     V29, V29_H
 1157 );
 1158 
 1159 // Class for 128 bit register v30
 1160 reg_class v30_reg(
 1161     V30, V30_H
 1162 );
 1163 
 1164 // Class for 128 bit register v31
 1165 reg_class v31_reg(
 1166     V31, V31_H
 1167 );
 1168 
 1169 // Class for all SVE predicate registers.
 1170 reg_class pr_reg (
 1171     P0,
 1172     P1,
 1173     P2,
 1174     P3,
 1175     P4,
 1176     P5,
 1177     P6,
 1178     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1179     P8,
 1180     P9,
 1181     P10,
 1182     P11,
 1183     P12,
 1184     P13,
 1185     P14,
 1186     P15
 1187 );
 1188 
 1189 // Class for SVE governing predicate registers, which are used
 1190 // to determine the active elements of a predicated instruction.
 1191 reg_class gov_pr (
 1192     P0,
 1193     P1,
 1194     P2,
 1195     P3,
 1196     P4,
 1197     P5,
 1198     P6,
 1199     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1200 );
 1201 
 1202 // Singleton class for condition codes
 1203 reg_class int_flags(RFLAGS);
 1204 
 1205 %}
 1206 
 1207 //----------DEFINITION BLOCK---------------------------------------------------
 1208 // Define name --> value mappings to inform the ADLC of an integer valued name
 1209 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1210 // Format:
 1211 //        int_def  <name>         ( <int_value>, <expression>);
 1212 // Generated Code in ad_<arch>.hpp
 1213 //        #define  <name>   (<expression>)
 1214 //        // value == <int_value>
 1215 // Generated code in ad_<arch>.cpp adlc_verification()
 1216 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1217 //
 1218 
 1219 // we follow the ppc-aix port in using a simple cost model which ranks
 1220 // register operations as cheap, memory ops as more expensive and
 1221 // branches as most expensive. the first two have a low as well as a
 1222 // normal cost. huge cost appears to be a way of saying don't do
 1223 // something
 1224 
 1225 definitions %{
 1226   // The default cost (of a register move instruction).
 1227   int_def INSN_COST            (    100,     100);
 1228   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1229   int_def CALL_COST            (    200,     2 * INSN_COST);
 1230   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1231 %}
 1232 
 1233 
 1234 //----------SOURCE BLOCK-------------------------------------------------------
 1235 // This is a block of C++ code which provides values, functions, and
 1236 // definitions necessary in the rest of the architecture description
 1237 
 1238 source_hpp %{
 1239 
 1240 #include "asm/macroAssembler.hpp"
 1241 #include "gc/shared/barrierSetAssembler.hpp"
 1242 #include "gc/shared/cardTable.hpp"
 1243 #include "gc/shared/cardTableBarrierSet.hpp"
 1244 #include "gc/shared/collectedHeap.hpp"
 1245 #include "opto/addnode.hpp"
 1246 #include "opto/convertnode.hpp"
 1247 #include "runtime/objectMonitor.hpp"
 1248 
 1249 extern RegMask _ANY_REG32_mask;
 1250 extern RegMask _ANY_REG_mask;
 1251 extern RegMask _PTR_REG_mask;
 1252 extern RegMask _NO_SPECIAL_REG32_mask;
 1253 extern RegMask _NO_SPECIAL_REG_mask;
 1254 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1255 
 1256 class CallStubImpl {
 1257 
 1258   //--------------------------------------------------------------
 1259   //---<  Used for optimization in Compile::shorten_branches  >---
 1260   //--------------------------------------------------------------
 1261 
 1262  public:
 1263   // Size of call trampoline stub.
 1264   static uint size_call_trampoline() {
 1265     return 0; // no call trampolines on this platform
 1266   }
 1267 
 1268   // number of relocations needed by a call trampoline stub
 1269   static uint reloc_call_trampoline() {
 1270     return 0; // no call trampolines on this platform
 1271   }
 1272 };
 1273 
 1274 class HandlerImpl {
 1275 
 1276  public:
 1277 
 1278   static int emit_exception_handler(CodeBuffer &cbuf);
 1279   static int emit_deopt_handler(CodeBuffer& cbuf);
 1280 
 1281   static uint size_exception_handler() {
 1282     return MacroAssembler::far_codestub_branch_size();
 1283   }
 1284 
 1285   static uint size_deopt_handler() {
 1286     // count one adr and one far branch instruction
 1287     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1288   }
 1289 };
 1290 
 1291 class Node::PD {
 1292 public:
 1293   enum NodeFlags {
 1294     _last_flag = Node::_last_flag
 1295   };
 1296 };
 1297 
 1298  bool is_CAS(int opcode, bool maybe_volatile);
 1299 
 1300   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1301 
 1302   bool unnecessary_acquire(const Node *barrier);
 1303   bool needs_acquiring_load(const Node *load);
 1304 
 1305   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1306 
 1307   bool unnecessary_release(const Node *barrier);
 1308   bool unnecessary_volatile(const Node *barrier);
 1309   bool needs_releasing_store(const Node *store);
 1310 
 1311   // predicate controlling translation of CompareAndSwapX
 1312   bool needs_acquiring_load_exclusive(const Node *load);
 1313 
 1314   // predicate controlling addressing modes
 1315   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1316 %}
 1317 
 1318 source %{
 1319 
 1320   // Derived RegMask with conditionally allocatable registers
 1321 
 1322   void PhaseOutput::pd_perform_mach_node_analysis() {
 1323   }
 1324 
 1325   int MachNode::pd_alignment_required() const {
 1326     return 1;
 1327   }
 1328 
 1329   int MachNode::compute_padding(int current_offset) const {
 1330     return 0;
 1331   }
 1332 
 1333   RegMask _ANY_REG32_mask;
 1334   RegMask _ANY_REG_mask;
 1335   RegMask _PTR_REG_mask;
 1336   RegMask _NO_SPECIAL_REG32_mask;
 1337   RegMask _NO_SPECIAL_REG_mask;
 1338   RegMask _NO_SPECIAL_PTR_REG_mask;
 1339 
 1340   void reg_mask_init() {
 1341     // We derive below RegMask(s) from the ones which are auto-generated from
 1342     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1343     // registers conditionally reserved.
 1344 
 1345     _ANY_REG32_mask = _ALL_REG32_mask;
 1346     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1347 
 1348     _ANY_REG_mask = _ALL_REG_mask;
 1349 
 1350     _PTR_REG_mask = _ALL_REG_mask;
 1351 
 1352     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1353     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1354 
 1355     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1356     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1357 
 1358     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1359     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1360 
 1361     // r27 is not allocatable when compressed oops is on and heapbase is not
 1362     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1363     if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL)) {
 1364       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1365       _NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
 1366       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
 1367     }
 1368 
 1369     // r29 is not allocatable when PreserveFramePointer is on
 1370     if (PreserveFramePointer) {
 1371       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1372       _NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask);
 1373       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask);
 1374     }
 1375   }
 1376 
 1377   // Optimizaton of volatile gets and puts
 1378   // -------------------------------------
 1379   //
 1380   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1381   // use to implement volatile reads and writes. For a volatile read
 1382   // we simply need
 1383   //
 1384   //   ldar<x>
 1385   //
 1386   // and for a volatile write we need
 1387   //
 1388   //   stlr<x>
 1389   //
 1390   // Alternatively, we can implement them by pairing a normal
 1391   // load/store with a memory barrier. For a volatile read we need
 1392   //
 1393   //   ldr<x>
 1394   //   dmb ishld
 1395   //
 1396   // for a volatile write
 1397   //
 1398   //   dmb ish
 1399   //   str<x>
 1400   //   dmb ish
 1401   //
 1402   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1403   // sequences. These are normally translated to an instruction
 1404   // sequence like the following
 1405   //
 1406   //   dmb      ish
 1407   // retry:
 1408   //   ldxr<x>   rval raddr
 1409   //   cmp       rval rold
 1410   //   b.ne done
 1411   //   stlxr<x>  rval, rnew, rold
 1412   //   cbnz      rval retry
 1413   // done:
 1414   //   cset      r0, eq
 1415   //   dmb ishld
 1416   //
 1417   // Note that the exclusive store is already using an stlxr
 1418   // instruction. That is required to ensure visibility to other
 1419   // threads of the exclusive write (assuming it succeeds) before that
 1420   // of any subsequent writes.
 1421   //
 1422   // The following instruction sequence is an improvement on the above
 1423   //
 1424   // retry:
 1425   //   ldaxr<x>  rval raddr
 1426   //   cmp       rval rold
 1427   //   b.ne done
 1428   //   stlxr<x>  rval, rnew, rold
 1429   //   cbnz      rval retry
 1430   // done:
 1431   //   cset      r0, eq
 1432   //
 1433   // We don't need the leading dmb ish since the stlxr guarantees
 1434   // visibility of prior writes in the case that the swap is
 1435   // successful. Crucially we don't have to worry about the case where
 1436   // the swap is not successful since no valid program should be
 1437   // relying on visibility of prior changes by the attempting thread
 1438   // in the case where the CAS fails.
 1439   //
 1440   // Similarly, we don't need the trailing dmb ishld if we substitute
 1441   // an ldaxr instruction since that will provide all the guarantees we
 1442   // require regarding observation of changes made by other threads
 1443   // before any change to the CAS address observed by the load.
 1444   //
 1445   // In order to generate the desired instruction sequence we need to
 1446   // be able to identify specific 'signature' ideal graph node
 1447   // sequences which i) occur as a translation of a volatile reads or
 1448   // writes or CAS operations and ii) do not occur through any other
 1449   // translation or graph transformation. We can then provide
 1450   // alternative aldc matching rules which translate these node
 1451   // sequences to the desired machine code sequences. Selection of the
 1452   // alternative rules can be implemented by predicates which identify
 1453   // the relevant node sequences.
 1454   //
 1455   // The ideal graph generator translates a volatile read to the node
 1456   // sequence
 1457   //
 1458   //   LoadX[mo_acquire]
 1459   //   MemBarAcquire
 1460   //
 1461   // As a special case when using the compressed oops optimization we
 1462   // may also see this variant
 1463   //
 1464   //   LoadN[mo_acquire]
 1465   //   DecodeN
 1466   //   MemBarAcquire
 1467   //
 1468   // A volatile write is translated to the node sequence
 1469   //
 1470   //   MemBarRelease
 1471   //   StoreX[mo_release] {CardMark}-optional
 1472   //   MemBarVolatile
 1473   //
 1474   // n.b. the above node patterns are generated with a strict
 1475   // 'signature' configuration of input and output dependencies (see
 1476   // the predicates below for exact details). The card mark may be as
 1477   // simple as a few extra nodes or, in a few GC configurations, may
 1478   // include more complex control flow between the leading and
 1479   // trailing memory barriers. However, whatever the card mark
 1480   // configuration these signatures are unique to translated volatile
 1481   // reads/stores -- they will not appear as a result of any other
 1482   // bytecode translation or inlining nor as a consequence of
 1483   // optimizing transforms.
 1484   //
 1485   // We also want to catch inlined unsafe volatile gets and puts and
 1486   // be able to implement them using either ldar<x>/stlr<x> or some
 1487   // combination of ldr<x>/stlr<x> and dmb instructions.
 1488   //
 1489   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1490   // normal volatile put node sequence containing an extra cpuorder
 1491   // membar
 1492   //
 1493   //   MemBarRelease
 1494   //   MemBarCPUOrder
 1495   //   StoreX[mo_release] {CardMark}-optional
 1496   //   MemBarCPUOrder
 1497   //   MemBarVolatile
 1498   //
 1499   // n.b. as an aside, a cpuorder membar is not itself subject to
 1500   // matching and translation by adlc rules.  However, the rule
 1501   // predicates need to detect its presence in order to correctly
 1502   // select the desired adlc rules.
 1503   //
 1504   // Inlined unsafe volatile gets manifest as a slightly different
 1505   // node sequence to a normal volatile get because of the
 1506   // introduction of some CPUOrder memory barriers to bracket the
 1507   // Load. However, but the same basic skeleton of a LoadX feeding a
 1508   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
 1509   // present
 1510   //
 1511   //   MemBarCPUOrder
 1512   //        ||       \\
 1513   //   MemBarCPUOrder LoadX[mo_acquire]
 1514   //        ||            |
 1515   //        ||       {DecodeN} optional
 1516   //        ||       /
 1517   //     MemBarAcquire
 1518   //
 1519   // In this case the acquire membar does not directly depend on the
 1520   // load. However, we can be sure that the load is generated from an
 1521   // inlined unsafe volatile get if we see it dependent on this unique
 1522   // sequence of membar nodes. Similarly, given an acquire membar we
 1523   // can know that it was added because of an inlined unsafe volatile
 1524   // get if it is fed and feeds a cpuorder membar and if its feed
 1525   // membar also feeds an acquiring load.
 1526   //
 1527   // Finally an inlined (Unsafe) CAS operation is translated to the
 1528   // following ideal graph
 1529   //
 1530   //   MemBarRelease
 1531   //   MemBarCPUOrder
 1532   //   CompareAndSwapX {CardMark}-optional
 1533   //   MemBarCPUOrder
 1534   //   MemBarAcquire
 1535   //
 1536   // So, where we can identify these volatile read and write
 1537   // signatures we can choose to plant either of the above two code
 1538   // sequences. For a volatile read we can simply plant a normal
 1539   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1540   // also choose to inhibit translation of the MemBarAcquire and
 1541   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1542   //
 1543   // When we recognise a volatile store signature we can choose to
 1544   // plant at a dmb ish as a translation for the MemBarRelease, a
 1545   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1546   // Alternatively, we can inhibit translation of the MemBarRelease
 1547   // and MemBarVolatile and instead plant a simple stlr<x>
 1548   // instruction.
 1549   //
 1550   // when we recognise a CAS signature we can choose to plant a dmb
 1551   // ish as a translation for the MemBarRelease, the conventional
 1552   // macro-instruction sequence for the CompareAndSwap node (which
 1553   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1554   // Alternatively, we can elide generation of the dmb instructions
 1555   // and plant the alternative CompareAndSwap macro-instruction
 1556   // sequence (which uses ldaxr<x>).
 1557   //
 1558   // Of course, the above only applies when we see these signature
 1559   // configurations. We still want to plant dmb instructions in any
 1560   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1561   // MemBarVolatile. For example, at the end of a constructor which
 1562   // writes final/volatile fields we will see a MemBarRelease
 1563   // instruction and this needs a 'dmb ish' lest we risk the
 1564   // constructed object being visible without making the
 1565   // final/volatile field writes visible.
 1566   //
 1567   // n.b. the translation rules below which rely on detection of the
 1568   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1569   // If we see anything other than the signature configurations we
 1570   // always just translate the loads and stores to ldr<x> and str<x>
 1571   // and translate acquire, release and volatile membars to the
 1572   // relevant dmb instructions.
 1573   //
 1574 
 1575   // is_CAS(int opcode, bool maybe_volatile)
 1576   //
 1577   // return true if opcode is one of the possible CompareAndSwapX
 1578   // values otherwise false.
 1579 
 1580   bool is_CAS(int opcode, bool maybe_volatile)
 1581   {
 1582     switch(opcode) {
 1583       // We handle these
 1584     case Op_CompareAndSwapI:
 1585     case Op_CompareAndSwapL:
 1586     case Op_CompareAndSwapP:
 1587     case Op_CompareAndSwapN:
 1588     case Op_ShenandoahCompareAndSwapP:
 1589     case Op_ShenandoahCompareAndSwapN:
 1590     case Op_CompareAndSwapB:
 1591     case Op_CompareAndSwapS:
 1592     case Op_GetAndSetI:
 1593     case Op_GetAndSetL:
 1594     case Op_GetAndSetP:
 1595     case Op_GetAndSetN:
 1596     case Op_GetAndAddI:
 1597     case Op_GetAndAddL:
 1598       return true;
 1599     case Op_CompareAndExchangeI:
 1600     case Op_CompareAndExchangeN:
 1601     case Op_CompareAndExchangeB:
 1602     case Op_CompareAndExchangeS:
 1603     case Op_CompareAndExchangeL:
 1604     case Op_CompareAndExchangeP:
 1605     case Op_WeakCompareAndSwapB:
 1606     case Op_WeakCompareAndSwapS:
 1607     case Op_WeakCompareAndSwapI:
 1608     case Op_WeakCompareAndSwapL:
 1609     case Op_WeakCompareAndSwapP:
 1610     case Op_WeakCompareAndSwapN:
 1611     case Op_ShenandoahWeakCompareAndSwapP:
 1612     case Op_ShenandoahWeakCompareAndSwapN:
 1613     case Op_ShenandoahCompareAndExchangeP:
 1614     case Op_ShenandoahCompareAndExchangeN:
 1615       return maybe_volatile;
 1616     default:
 1617       return false;
 1618     }
 1619   }
 1620 
 1621   // helper to determine the maximum number of Phi nodes we may need to
 1622   // traverse when searching from a card mark membar for the merge mem
 1623   // feeding a trailing membar or vice versa
 1624 
 1625 // predicates controlling emit of ldr<x>/ldar<x>
 1626 
 1627 bool unnecessary_acquire(const Node *barrier)
 1628 {
 1629   assert(barrier->is_MemBar(), "expecting a membar");
 1630 
 1631   MemBarNode* mb = barrier->as_MemBar();
 1632 
 1633   if (mb->trailing_load()) {
 1634     return true;
 1635   }
 1636 
 1637   if (mb->trailing_load_store()) {
 1638     Node* load_store = mb->in(MemBarNode::Precedent);
 1639     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1640     return is_CAS(load_store->Opcode(), true);
 1641   }
 1642 
 1643   return false;
 1644 }
 1645 
 1646 bool needs_acquiring_load(const Node *n)
 1647 {
 1648   assert(n->is_Load(), "expecting a load");
 1649   LoadNode *ld = n->as_Load();
 1650   return ld->is_acquire();
 1651 }
 1652 
 1653 bool unnecessary_release(const Node *n)
 1654 {
 1655   assert((n->is_MemBar() &&
 1656           n->Opcode() == Op_MemBarRelease),
 1657          "expecting a release membar");
 1658 
 1659   MemBarNode *barrier = n->as_MemBar();
 1660   if (!barrier->leading()) {
 1661     return false;
 1662   } else {
 1663     Node* trailing = barrier->trailing_membar();
 1664     MemBarNode* trailing_mb = trailing->as_MemBar();
 1665     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1666     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1667 
 1668     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1669     if (mem->is_Store()) {
 1670       assert(mem->as_Store()->is_release(), "");
 1671       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1672       return true;
 1673     } else {
 1674       assert(mem->is_LoadStore(), "");
 1675       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1676       return is_CAS(mem->Opcode(), true);
 1677     }
 1678   }
 1679   return false;
 1680 }
 1681 
 1682 bool unnecessary_volatile(const Node *n)
 1683 {
 1684   // assert n->is_MemBar();
 1685   MemBarNode *mbvol = n->as_MemBar();
 1686 
 1687   bool release = mbvol->trailing_store();
 1688   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1689 #ifdef ASSERT
 1690   if (release) {
 1691     Node* leading = mbvol->leading_membar();
 1692     assert(leading->Opcode() == Op_MemBarRelease, "");
 1693     assert(leading->as_MemBar()->leading_store(), "");
 1694     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1695   }
 1696 #endif
 1697 
 1698   return release;
 1699 }
 1700 
 1701 // predicates controlling emit of str<x>/stlr<x>
 1702 
 1703 bool needs_releasing_store(const Node *n)
 1704 {
 1705   // assert n->is_Store();
 1706   StoreNode *st = n->as_Store();
 1707   return st->trailing_membar() != NULL;
 1708 }
 1709 
 1710 // predicate controlling translation of CAS
 1711 //
 1712 // returns true if CAS needs to use an acquiring load otherwise false
 1713 
 1714 bool needs_acquiring_load_exclusive(const Node *n)
 1715 {
 1716   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1717   LoadStoreNode* ldst = n->as_LoadStore();
 1718   if (is_CAS(n->Opcode(), false)) {
 1719     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
 1720   } else {
 1721     return ldst->trailing_membar() != NULL;
 1722   }
 1723 
 1724   // so we can just return true here
 1725   return true;
 1726 }
 1727 
 1728 #define __ _masm.
 1729 
 1730 // advance declarations for helper functions to convert register
 1731 // indices to register objects
 1732 
 1733 // the ad file has to provide implementations of certain methods
 1734 // expected by the generic code
 1735 //
 1736 // REQUIRED FUNCTIONALITY
 1737 
 1738 //=============================================================================
 1739 
 1740 // !!!!! Special hack to get all types of calls to specify the byte offset
 1741 //       from the start of the call to the point where the return address
 1742 //       will point.
 1743 
 1744 int MachCallStaticJavaNode::ret_addr_offset()
 1745 {
 1746   // call should be a simple bl
 1747   int off = 4;
 1748   return off;
 1749 }
 1750 
 1751 int MachCallDynamicJavaNode::ret_addr_offset()
 1752 {
 1753   return 16; // movz, movk, movk, bl
 1754 }
 1755 
 1756 int MachCallRuntimeNode::ret_addr_offset() {
 1757   // for generated stubs the call will be
 1758   //   bl(addr)
 1759   // or with far branches
 1760   //   bl(trampoline_stub)
 1761   // for real runtime callouts it will be six instructions
 1762   // see aarch64_enc_java_to_runtime
 1763   //   adr(rscratch2, retaddr)
 1764   //   lea(rscratch1, RuntimeAddress(addr)
 1765   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
 1766   //   blr(rscratch1)
 1767   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1768   if (cb) {
 1769     return 1 * NativeInstruction::instruction_size;
 1770   } else {
 1771     return 6 * NativeInstruction::instruction_size;
 1772   }
 1773 }
 1774 
 1775 int MachCallNativeNode::ret_addr_offset() {
 1776   // This is implemented using aarch64_enc_java_to_runtime as above.
 1777   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1778   if (cb) {
 1779     return 1 * NativeInstruction::instruction_size;
 1780   } else {
 1781     return 6 * NativeInstruction::instruction_size;
 1782   }
 1783 }
 1784 
 1785 //=============================================================================
 1786 
 1787 #ifndef PRODUCT
 1788 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1789   st->print("BREAKPOINT");
 1790 }
 1791 #endif
 1792 
 1793 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1794   C2_MacroAssembler _masm(&cbuf);
 1795   __ brk(0);
 1796 }
 1797 
 1798 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1799   return MachNode::size(ra_);
 1800 }
 1801 
 1802 //=============================================================================
 1803 
 1804 #ifndef PRODUCT
 1805   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1806     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1807   }
 1808 #endif
 1809 
 1810   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
 1811     C2_MacroAssembler _masm(&cbuf);
 1812     for (int i = 0; i < _count; i++) {
 1813       __ nop();
 1814     }
 1815   }
 1816 
 1817   uint MachNopNode::size(PhaseRegAlloc*) const {
 1818     return _count * NativeInstruction::instruction_size;
 1819   }
 1820 
 1821 //=============================================================================
 1822 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1823 
 1824 int ConstantTable::calculate_table_base_offset() const {
 1825   return 0;  // absolute addressing, no offset
 1826 }
 1827 
 1828 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1829 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1830   ShouldNotReachHere();
 1831 }
 1832 
 1833 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 1834   // Empty encoding
 1835 }
 1836 
 1837 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1838   return 0;
 1839 }
 1840 
 1841 #ifndef PRODUCT
 1842 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1843   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1844 }
 1845 #endif
 1846 
 1847 #ifndef PRODUCT
 1848 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1849   Compile* C = ra_->C;
 1850 
 1851   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1852 
 1853   if (C->output()->need_stack_bang(framesize))
 1854     st->print("# stack bang size=%d\n\t", framesize);
 1855 
 1856   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1857     st->print("sub  sp, sp, #%d\n\t", framesize);
 1858     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1859     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1860   } else {
 1861     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1862     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1863     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1864     st->print("sub  sp, sp, rscratch1");
 1865   }
 1866   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1867     st->print("\n\t");
 1868     st->print("ldr  rscratch1, [guard]\n\t");
 1869     st->print("dmb ishld\n\t");
 1870     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
 1871     st->print("cmp  rscratch1, rscratch2\n\t");
 1872     st->print("b.eq skip");
 1873     st->print("\n\t");
 1874     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1875     st->print("b skip\n\t");
 1876     st->print("guard: int\n\t");
 1877     st->print("\n\t");
 1878     st->print("skip:\n\t");
 1879   }
 1880 }
 1881 #endif
 1882 
 1883 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1884   Compile* C = ra_->C;
 1885   C2_MacroAssembler _masm(&cbuf);
 1886 
 1887   // n.b. frame size includes space for return pc and rfp
 1888   const int framesize = C->output()->frame_size_in_bytes();
 1889 
 1890   // insert a nop at the start of the prolog so we can patch in a
 1891   // branch if we need to invalidate the method later
 1892   __ nop();
 1893 
 1894   if (C->clinit_barrier_on_entry()) {
 1895     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1896 
 1897     Label L_skip_barrier;
 1898 
 1899     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1900     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1901     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1902     __ bind(L_skip_barrier);
 1903   }
 1904 
 1905   if (C->max_vector_size() >= 16) {
 1906     __ reinitialize_ptrue();
 1907   }
 1908 
 1909   int bangsize = C->output()->bang_size_in_bytes();
 1910   if (C->output()->need_stack_bang(bangsize))
 1911     __ generate_stack_overflow_check(bangsize);
 1912 
 1913   __ build_frame(framesize);
 1914 
 1915   if (C->stub_function() == NULL) {
 1916     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1917     bs->nmethod_entry_barrier(&_masm);
 1918   }
 1919 
 1920   if (VerifyStackAtCalls) {
 1921     Unimplemented();
 1922   }
 1923 
 1924   C->output()->set_frame_complete(cbuf.insts_size());
 1925 
 1926   if (C->has_mach_constant_base_node()) {
 1927     // NOTE: We set the table base offset here because users might be
 1928     // emitted before MachConstantBaseNode.
 1929     ConstantTable& constant_table = C->output()->constant_table();
 1930     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1931   }
 1932 }
 1933 
 1934 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1935 {
 1936   return MachNode::size(ra_); // too many variables; just compute it
 1937                               // the hard way
 1938 }
 1939 
 1940 int MachPrologNode::reloc() const
 1941 {
 1942   return 0;
 1943 }
 1944 
 1945 //=============================================================================
 1946 
 1947 #ifndef PRODUCT
 1948 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1949   Compile* C = ra_->C;
 1950   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1951 
 1952   st->print("# pop frame %d\n\t",framesize);
 1953 
 1954   if (framesize == 0) {
 1955     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1956   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1957     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1958     st->print("add  sp, sp, #%d\n\t", framesize);
 1959   } else {
 1960     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1961     st->print("add  sp, sp, rscratch1\n\t");
 1962     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1963   }
 1964 
 1965   if (do_polling() && C->is_method_compilation()) {
 1966     st->print("# test polling word\n\t");
 1967     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1968     st->print("cmp  sp, rscratch1\n\t");
 1969     st->print("bhi #slow_path");
 1970   }
 1971 }
 1972 #endif
 1973 
 1974 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1975   Compile* C = ra_->C;
 1976   C2_MacroAssembler _masm(&cbuf);
 1977   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1978 
 1979   __ remove_frame(framesize);
 1980 
 1981   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1982     __ reserved_stack_check();
 1983   }
 1984 
 1985   if (do_polling() && C->is_method_compilation()) {
 1986     Label dummy_label;
 1987     Label* code_stub = &dummy_label;
 1988     if (!C->output()->in_scratch_emit_size()) {
 1989       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1990       C->output()->add_stub(stub);
 1991       code_stub = &stub->entry();
 1992     }
 1993     __ relocate(relocInfo::poll_return_type);
 1994     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1995   }
 1996 }
 1997 
 1998 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1999   // Variable size. Determine dynamically.
 2000   return MachNode::size(ra_);
 2001 }
 2002 
 2003 int MachEpilogNode::reloc() const {
 2004   // Return number of relocatable values contained in this instruction.
 2005   return 1; // 1 for polling page.
 2006 }
 2007 
 2008 const Pipeline * MachEpilogNode::pipeline() const {
 2009   return MachNode::pipeline_class();
 2010 }
 2011 
 2012 //=============================================================================
 2013 
 2014 // Figure out which register class each belongs in: rc_int, rc_float or
 2015 // rc_stack.
 2016 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 2017 
 2018 static enum RC rc_class(OptoReg::Name reg) {
 2019 
 2020   if (reg == OptoReg::Bad) {
 2021     return rc_bad;
 2022   }
 2023 
 2024   // we have 32 int registers * 2 halves
 2025   int slots_of_int_registers = RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers;
 2026 
 2027   if (reg < slots_of_int_registers) {
 2028     return rc_int;
 2029   }
 2030 
 2031   // we have 32 float register * 8 halves
 2032   int slots_of_float_registers = FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers;
 2033   if (reg < slots_of_int_registers + slots_of_float_registers) {
 2034     return rc_float;
 2035   }
 2036 
 2037   int slots_of_predicate_registers = PRegisterImpl::max_slots_per_register * PRegisterImpl::number_of_registers;
 2038   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 2039     return rc_predicate;
 2040   }
 2041 
 2042   // Between predicate regs & stack is the flags.
 2043   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 2044 
 2045   return rc_stack;
 2046 }
 2047 
 2048 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 2049   Compile* C = ra_->C;
 2050 
 2051   // Get registers to move.
 2052   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 2053   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 2054   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 2055   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 2056 
 2057   enum RC src_hi_rc = rc_class(src_hi);
 2058   enum RC src_lo_rc = rc_class(src_lo);
 2059   enum RC dst_hi_rc = rc_class(dst_hi);
 2060   enum RC dst_lo_rc = rc_class(dst_lo);
 2061 
 2062   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 2063 
 2064   if (src_hi != OptoReg::Bad) {
 2065     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 2066            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 2067            "expected aligned-adjacent pairs");
 2068   }
 2069 
 2070   if (src_lo == dst_lo && src_hi == dst_hi) {
 2071     return 0;            // Self copy, no move.
 2072   }
 2073 
 2074   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 2075               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 2076   int src_offset = ra_->reg2offset(src_lo);
 2077   int dst_offset = ra_->reg2offset(dst_lo);
 2078 
 2079   if (bottom_type()->isa_vect() != NULL) {
 2080     uint ireg = ideal_reg();
 2081     if (ireg == Op_VecA && cbuf) {
 2082       C2_MacroAssembler _masm(cbuf);
 2083       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 2084       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2085         // stack->stack
 2086         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 2087                                                 sve_vector_reg_size_in_bytes);
 2088       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2089         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2090                             sve_vector_reg_size_in_bytes);
 2091       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2092         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2093                               sve_vector_reg_size_in_bytes);
 2094       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2095         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2096                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 2097                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 2098       } else {
 2099         ShouldNotReachHere();
 2100       }
 2101     } else if (cbuf) {
 2102       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2103       C2_MacroAssembler _masm(cbuf);
 2104       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2105       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2106         // stack->stack
 2107         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2108         if (ireg == Op_VecD) {
 2109           __ unspill(rscratch1, true, src_offset);
 2110           __ spill(rscratch1, true, dst_offset);
 2111         } else {
 2112           __ spill_copy128(src_offset, dst_offset);
 2113         }
 2114       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2115         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2116                ireg == Op_VecD ? __ T8B : __ T16B,
 2117                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2118       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2119         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2120                  ireg == Op_VecD ? __ D : __ Q,
 2121                  ra_->reg2offset(dst_lo));
 2122       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2123         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2124                    ireg == Op_VecD ? __ D : __ Q,
 2125                    ra_->reg2offset(src_lo));
 2126       } else {
 2127         ShouldNotReachHere();
 2128       }
 2129     }
 2130   } else if (cbuf) {
 2131     C2_MacroAssembler _masm(cbuf);
 2132     switch (src_lo_rc) {
 2133     case rc_int:
 2134       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2135         if (is64) {
 2136             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2137                    as_Register(Matcher::_regEncode[src_lo]));
 2138         } else {
 2139             C2_MacroAssembler _masm(cbuf);
 2140             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2141                     as_Register(Matcher::_regEncode[src_lo]));
 2142         }
 2143       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2144         if (is64) {
 2145             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2146                      as_Register(Matcher::_regEncode[src_lo]));
 2147         } else {
 2148             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2149                      as_Register(Matcher::_regEncode[src_lo]));
 2150         }
 2151       } else {                    // gpr --> stack spill
 2152         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2153         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2154       }
 2155       break;
 2156     case rc_float:
 2157       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2158         if (is64) {
 2159             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2160                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2161         } else {
 2162             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2163                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2164         }
 2165       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2166           if (cbuf) {
 2167             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2168                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2169         } else {
 2170             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2171                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2172         }
 2173       } else {                    // fpr --> stack spill
 2174         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2175         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2176                  is64 ? __ D : __ S, dst_offset);
 2177       }
 2178       break;
 2179     case rc_stack:
 2180       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2181         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2182       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2183         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2184                    is64 ? __ D : __ S, src_offset);
 2185       } else {                    // stack --> stack copy
 2186         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2187         __ unspill(rscratch1, is64, src_offset);
 2188         __ spill(rscratch1, is64, dst_offset);
 2189       }
 2190       break;
 2191     default:
 2192       assert(false, "bad rc_class for spill");
 2193       ShouldNotReachHere();
 2194     }
 2195   }
 2196 
 2197   if (st) {
 2198     st->print("spill ");
 2199     if (src_lo_rc == rc_stack) {
 2200       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2201     } else {
 2202       st->print("%s -> ", Matcher::regName[src_lo]);
 2203     }
 2204     if (dst_lo_rc == rc_stack) {
 2205       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2206     } else {
 2207       st->print("%s", Matcher::regName[dst_lo]);
 2208     }
 2209     if (bottom_type()->isa_vect() != NULL) {
 2210       int vsize = 0;
 2211       switch (ideal_reg()) {
 2212       case Op_VecD:
 2213         vsize = 64;
 2214         break;
 2215       case Op_VecX:
 2216         vsize = 128;
 2217         break;
 2218       case Op_VecA:
 2219         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2220         break;
 2221       default:
 2222         assert(false, "bad register type for spill");
 2223         ShouldNotReachHere();
 2224       }
 2225       st->print("\t# vector spill size = %d", vsize);
 2226     } else {
 2227       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2228     }
 2229   }
 2230 
 2231   return 0;
 2232 
 2233 }
 2234 
 2235 #ifndef PRODUCT
 2236 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2237   if (!ra_)
 2238     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2239   else
 2240     implementation(NULL, ra_, false, st);
 2241 }
 2242 #endif
 2243 
 2244 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2245   implementation(&cbuf, ra_, false, NULL);
 2246 }
 2247 
 2248 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2249   return MachNode::size(ra_);
 2250 }
 2251 
 2252 //=============================================================================
 2253 
 2254 #ifndef PRODUCT
 2255 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2256   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2257   int reg = ra_->get_reg_first(this);
 2258   st->print("add %s, rsp, #%d]\t# box lock",
 2259             Matcher::regName[reg], offset);
 2260 }
 2261 #endif
 2262 
 2263 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2264   C2_MacroAssembler _masm(&cbuf);
 2265 
 2266   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2267   int reg    = ra_->get_encode(this);
 2268 
 2269   // This add will handle any 24-bit signed offset. 24 bits allows an
 2270   // 8 megabyte stack frame.
 2271   __ add(as_Register(reg), sp, offset);
 2272 }
 2273 
 2274 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2275   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2276   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2277 
 2278   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2279     return NativeInstruction::instruction_size;
 2280   } else {
 2281     return 2 * NativeInstruction::instruction_size;
 2282   }
 2283 }
 2284 
 2285 //=============================================================================
 2286 
 2287 #ifndef PRODUCT
 2288 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2289 {
 2290   st->print_cr("# MachUEPNode");
 2291   if (UseCompressedClassPointers) {
 2292     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2293     if (CompressedKlassPointers::shift() != 0) {
 2294       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2295     }
 2296   } else {
 2297    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2298   }
 2299   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2300   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2301 }
 2302 #endif
 2303 
 2304 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2305 {
 2306   // This is the unverified entry point.
 2307   C2_MacroAssembler _masm(&cbuf);
 2308 
 2309   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2310   Label skip;
 2311   // TODO
 2312   // can we avoid this skip and still use a reloc?
 2313   __ br(Assembler::EQ, skip);
 2314   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2315   __ bind(skip);
 2316 }
 2317 
 2318 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2319 {
 2320   return MachNode::size(ra_);
 2321 }
 2322 
 2323 // REQUIRED EMIT CODE
 2324 
 2325 //=============================================================================
 2326 
 2327 // Emit exception handler code.
 2328 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2329 {
 2330   // mov rscratch1 #exception_blob_entry_point
 2331   // br rscratch1
 2332   // Note that the code buffer's insts_mark is always relative to insts.
 2333   // That's why we must use the macroassembler to generate a handler.
 2334   C2_MacroAssembler _masm(&cbuf);
 2335   address base = __ start_a_stub(size_exception_handler());
 2336   if (base == NULL) {
 2337     ciEnv::current()->record_failure("CodeCache is full");
 2338     return 0;  // CodeBuffer::expand failed
 2339   }
 2340   int offset = __ offset();
 2341   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2342   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2343   __ end_a_stub();
 2344   return offset;
 2345 }
 2346 
 2347 // Emit deopt handler code.
 2348 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 2349 {
 2350   // Note that the code buffer's insts_mark is always relative to insts.
 2351   // That's why we must use the macroassembler to generate a handler.
 2352   C2_MacroAssembler _masm(&cbuf);
 2353   address base = __ start_a_stub(size_deopt_handler());
 2354   if (base == NULL) {
 2355     ciEnv::current()->record_failure("CodeCache is full");
 2356     return 0;  // CodeBuffer::expand failed
 2357   }
 2358   int offset = __ offset();
 2359 
 2360   __ adr(lr, __ pc());
 2361   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2362 
 2363   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2364   __ end_a_stub();
 2365   return offset;
 2366 }
 2367 
 2368 // REQUIRED MATCHER CODE
 2369 
 2370 //=============================================================================
 2371 
 2372 const bool Matcher::match_rule_supported(int opcode) {
 2373   if (!has_match_rule(opcode))
 2374     return false;
 2375 
 2376   bool ret_value = true;
 2377   switch (opcode) {
 2378     case Op_OnSpinWait:
 2379       return VM_Version::supports_on_spin_wait();
 2380     case Op_CacheWB:
 2381     case Op_CacheWBPreSync:
 2382     case Op_CacheWBPostSync:
 2383       if (!VM_Version::supports_data_cache_line_flush()) {
 2384         ret_value = false;
 2385       }
 2386       break;
 2387   }
 2388 
 2389   return ret_value; // Per default match rules are supported.
 2390 }
 2391 
 2392 // Identify extra cases that we might want to provide match rules for vector nodes and
 2393 // other intrinsics guarded with vector length (vlen) and element type (bt).
 2394 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
 2395   if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
 2396     return false;
 2397   }
 2398   int bit_size = vlen * type2aelembytes(bt) * 8;
 2399   if (UseSVE == 0 && bit_size > 128) {
 2400     return false;
 2401   }
 2402   if (UseSVE > 0) {
 2403     return op_sve_supported(opcode);
 2404   } else { // NEON
 2405     // Special cases
 2406     switch (opcode) {
 2407     case Op_VectorMaskCmp:
 2408     // We don't have VectorReinterpret with bit_size less than 64 support for
 2409     // now, even for byte type. To be refined with fully VectorCast support.
 2410     case Op_VectorReinterpret:
 2411       if (vlen < 2 || bit_size < 64) {
 2412         return false;
 2413       }
 2414       break;
 2415     case Op_MulAddVS2VI:
 2416       if (bit_size < 128) {
 2417         return false;
 2418       }
 2419       break;
 2420     case Op_MulVL:
 2421       return false;
 2422     case Op_VectorLoadShuffle:
 2423     case Op_VectorRearrange:
 2424       if (vlen < 4) {
 2425         return false;
 2426       }
 2427       break;
 2428     // Some types of VectorCast are not implemented for now.
 2429     case Op_VectorCastI2X:
 2430       if (bt == T_BYTE) {
 2431         return false;
 2432       }
 2433       break;
 2434     case Op_VectorCastS2X:
 2435       if (vlen < 4 || bit_size < 64) {
 2436         return false;
 2437       }
 2438       break;
 2439     case Op_VectorCastF2X:
 2440     case Op_VectorCastD2X:
 2441       if (bt == T_INT || bt == T_SHORT || bt == T_BYTE || bt == T_LONG) {
 2442         return false;
 2443       }
 2444       break;
 2445     default:
 2446       break;
 2447     }
 2448   }
 2449   return true; // Per default match rules are supported.
 2450 }
 2451 
 2452 const RegMask* Matcher::predicate_reg_mask(void) {
 2453   return &_PR_REG_mask;
 2454 }
 2455 
 2456 const TypeVect* Matcher::predicate_reg_type(const Type* elemTy, int length) {
 2457   return new TypeVectMask(elemTy, length);
 2458 }
 2459 
 2460 // Vector calling convention not yet implemented.
 2461 const bool Matcher::supports_vector_calling_convention(void) {
 2462   return false;
 2463 }
 2464 
 2465 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2466   Unimplemented();
 2467   return OptoRegPair(0, 0);
 2468 }
 2469 
 2470 const int Matcher::float_pressure(int default_pressure_threshold) {
 2471   return default_pressure_threshold;
 2472 }
 2473 
 2474 // Is this branch offset short enough that a short branch can be used?
 2475 //
 2476 // NOTE: If the platform does not provide any short branch variants, then
 2477 //       this method should return false for offset 0.
 2478 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2479   // The passed offset is relative to address of the branch.
 2480 
 2481   return (-32768 <= offset && offset < 32768);
 2482 }
 2483 
 2484 // Vector width in bytes.
 2485 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2486   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2487   int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize);
 2488   // Minimum 2 values in vector
 2489   if (size < 2*type2aelembytes(bt)) size = 0;
 2490   // But never < 4
 2491   if (size < 4) size = 0;
 2492   return size;
 2493 }
 2494 
 2495 // Limits on vector size (number of elements) loaded into vector.
 2496 const int Matcher::max_vector_size(const BasicType bt) {
 2497   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2498 }
 2499 const int Matcher::min_vector_size(const BasicType bt) {
 2500   int max_size = max_vector_size(bt);
 2501   if ((UseSVE > 0) && (MaxVectorSize >= 16)) {
 2502     // Currently vector length less than SVE vector register size is not supported.
 2503     return max_size;
 2504   } else { // NEON
 2505     // Limit the vector size to 8 bytes
 2506     int size = 8 / type2aelembytes(bt);
 2507     if (bt == T_BYTE) {
 2508       // To support vector api shuffle/rearrange.
 2509       size = 4;
 2510     } else if (bt == T_BOOLEAN) {
 2511       // To support vector api load/store mask.
 2512       size = 2;
 2513     }
 2514     if (size < 2) size = 2;
 2515     return MIN2(size,max_size);
 2516   }
 2517 }
 2518 
 2519 // Actual max scalable vector register length.
 2520 const int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2521   return Matcher::max_vector_size(bt);
 2522 }
 2523 
 2524 // Vector ideal reg.
 2525 const uint Matcher::vector_ideal_reg(int len) {
 2526   if (UseSVE > 0 && 16 <= len && len <= 256) {
 2527     return Op_VecA;
 2528   }
 2529   switch(len) {
 2530     // For 16-bit/32-bit mask vector, reuse VecD.
 2531     case  2:
 2532     case  4:
 2533     case  8: return Op_VecD;
 2534     case 16: return Op_VecX;
 2535   }
 2536   ShouldNotReachHere();
 2537   return 0;
 2538 }
 2539 
 2540 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
 2541   ShouldNotReachHere(); // generic vector operands not supported
 2542   return NULL;
 2543 }
 2544 
 2545 bool Matcher::is_generic_reg2reg_move(MachNode* m) {
 2546   ShouldNotReachHere();  // generic vector operands not supported
 2547   return false;
 2548 }
 2549 
 2550 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2551   ShouldNotReachHere();  // generic vector operands not supported
 2552   return false;
 2553 }
 2554 
 2555 // Return whether or not this register is ever used as an argument.
 2556 // This function is used on startup to build the trampoline stubs in
 2557 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2558 // call in the trampoline, and arguments in those registers not be
 2559 // available to the callee.
 2560 bool Matcher::can_be_java_arg(int reg)
 2561 {
 2562   return
 2563     reg ==  R0_num || reg == R0_H_num ||
 2564     reg ==  R1_num || reg == R1_H_num ||
 2565     reg ==  R2_num || reg == R2_H_num ||
 2566     reg ==  R3_num || reg == R3_H_num ||
 2567     reg ==  R4_num || reg == R4_H_num ||
 2568     reg ==  R5_num || reg == R5_H_num ||
 2569     reg ==  R6_num || reg == R6_H_num ||
 2570     reg ==  R7_num || reg == R7_H_num ||
 2571     reg ==  V0_num || reg == V0_H_num ||
 2572     reg ==  V1_num || reg == V1_H_num ||
 2573     reg ==  V2_num || reg == V2_H_num ||
 2574     reg ==  V3_num || reg == V3_H_num ||
 2575     reg ==  V4_num || reg == V4_H_num ||
 2576     reg ==  V5_num || reg == V5_H_num ||
 2577     reg ==  V6_num || reg == V6_H_num ||
 2578     reg ==  V7_num || reg == V7_H_num;
 2579 }
 2580 
 2581 bool Matcher::is_spillable_arg(int reg)
 2582 {
 2583   return can_be_java_arg(reg);
 2584 }
 2585 
 2586 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2587   return false;
 2588 }
 2589 
 2590 RegMask Matcher::divI_proj_mask() {
 2591   ShouldNotReachHere();
 2592   return RegMask();
 2593 }
 2594 
 2595 // Register for MODI projection of divmodI.
 2596 RegMask Matcher::modI_proj_mask() {
 2597   ShouldNotReachHere();
 2598   return RegMask();
 2599 }
 2600 
 2601 // Register for DIVL projection of divmodL.
 2602 RegMask Matcher::divL_proj_mask() {
 2603   ShouldNotReachHere();
 2604   return RegMask();
 2605 }
 2606 
 2607 // Register for MODL projection of divmodL.
 2608 RegMask Matcher::modL_proj_mask() {
 2609   ShouldNotReachHere();
 2610   return RegMask();
 2611 }
 2612 
 2613 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2614   return FP_REG_mask();
 2615 }
 2616 
 2617 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2618   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2619     Node* u = addp->fast_out(i);
 2620     if (u->is_LoadStore()) {
 2621       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2622       // instructions) only take register indirect as an operand, so
 2623       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2624       // must fail.
 2625       return false;
 2626     }
 2627     if (u->is_Mem()) {
 2628       int opsize = u->as_Mem()->memory_size();
 2629       assert(opsize > 0, "unexpected memory operand size");
 2630       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2631         return false;
 2632       }
 2633     }
 2634   }
 2635   return true;
 2636 }
 2637 
 2638 // Should the matcher clone input 'm' of node 'n'?
 2639 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2640   if (is_vshift_con_pattern(n, m)) { // ShiftV src (ShiftCntV con)
 2641     mstack.push(m, Visit);           // m = ShiftCntV
 2642     return true;
 2643   }
 2644   return false;
 2645 }
 2646 
 2647 // Should the Matcher clone shifts on addressing modes, expecting them
 2648 // to be subsumed into complex addressing expressions or compute them
 2649 // into registers?
 2650 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2651   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2652     return true;
 2653   }
 2654 
 2655   Node *off = m->in(AddPNode::Offset);
 2656   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2657       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2658       // Are there other uses besides address expressions?
 2659       !is_visited(off)) {
 2660     address_visited.set(off->_idx); // Flag as address_visited
 2661     mstack.push(off->in(2), Visit);
 2662     Node *conv = off->in(1);
 2663     if (conv->Opcode() == Op_ConvI2L &&
 2664         // Are there other uses besides address expressions?
 2665         !is_visited(conv)) {
 2666       address_visited.set(conv->_idx); // Flag as address_visited
 2667       mstack.push(conv->in(1), Pre_Visit);
 2668     } else {
 2669       mstack.push(conv, Pre_Visit);
 2670     }
 2671     address_visited.test_set(m->_idx); // Flag as address_visited
 2672     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2673     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2674     return true;
 2675   } else if (off->Opcode() == Op_ConvI2L &&
 2676              // Are there other uses besides address expressions?
 2677              !is_visited(off)) {
 2678     address_visited.test_set(m->_idx); // Flag as address_visited
 2679     address_visited.set(off->_idx); // Flag as address_visited
 2680     mstack.push(off->in(1), Pre_Visit);
 2681     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2682     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2683     return true;
 2684   }
 2685   return false;
 2686 }
 2687 
 2688 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2689   C2_MacroAssembler _masm(&cbuf);                                       \
 2690   {                                                                     \
 2691     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2692     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2693     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2694     __ INSN(REG, as_Register(BASE));                                    \
 2695   }
 2696 
 2697 
 2698 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2699   {
 2700     Address::extend scale;
 2701 
 2702     // Hooboy, this is fugly.  We need a way to communicate to the
 2703     // encoder that the index needs to be sign extended, so we have to
 2704     // enumerate all the cases.
 2705     switch (opcode) {
 2706     case INDINDEXSCALEDI2L:
 2707     case INDINDEXSCALEDI2LN:
 2708     case INDINDEXI2L:
 2709     case INDINDEXI2LN:
 2710       scale = Address::sxtw(size);
 2711       break;
 2712     default:
 2713       scale = Address::lsl(size);
 2714     }
 2715 
 2716     if (index == -1) {
 2717       return Address(base, disp);
 2718     } else {
 2719       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2720       return Address(base, as_Register(index), scale);
 2721     }
 2722   }
 2723 
 2724 
 2725 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2726 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2727 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2728 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2729                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2730 
 2731   // Used for all non-volatile memory accesses.  The use of
 2732   // $mem->opcode() to discover whether this pattern uses sign-extended
 2733   // offsets is something of a kludge.
 2734   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
 2735                         Register reg, int opcode,
 2736                         Register base, int index, int scale, int disp,
 2737                         int size_in_memory)
 2738   {
 2739     Address addr = mem2address(opcode, base, index, scale, disp);
 2740     if (addr.getMode() == Address::base_plus_offset) {
 2741       /* If we get an out-of-range offset it is a bug in the compiler,
 2742          so we assert here. */
 2743       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
 2744              "c2 compiler bug");
 2745       /* Fix up any out-of-range offsets. */
 2746       assert_different_registers(rscratch1, base);
 2747       assert_different_registers(rscratch1, reg);
 2748       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2749     }
 2750     (masm.*insn)(reg, addr);
 2751   }
 2752 
 2753   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
 2754                         FloatRegister reg, int opcode,
 2755                         Register base, int index, int size, int disp,
 2756                         int size_in_memory)
 2757   {
 2758     Address::extend scale;
 2759 
 2760     switch (opcode) {
 2761     case INDINDEXSCALEDI2L:
 2762     case INDINDEXSCALEDI2LN:
 2763       scale = Address::sxtw(size);
 2764       break;
 2765     default:
 2766       scale = Address::lsl(size);
 2767     }
 2768 
 2769     if (index == -1) {
 2770       /* If we get an out-of-range offset it is a bug in the compiler,
 2771          so we assert here. */
 2772       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
 2773       /* Fix up any out-of-range offsets. */
 2774       assert_different_registers(rscratch1, base);
 2775       Address addr = Address(base, disp);
 2776       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2777       (masm.*insn)(reg, addr);
 2778     } else {
 2779       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2780       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 2781     }
 2782   }
 2783 
 2784   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
 2785                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2786                         int opcode, Register base, int index, int size, int disp)
 2787   {
 2788     if (index == -1) {
 2789       (masm.*insn)(reg, T, Address(base, disp));
 2790     } else {
 2791       assert(disp == 0, "unsupported address mode");
 2792       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2793     }
 2794   }
 2795 
 2796 %}
 2797 
 2798 
 2799 
 2800 //----------ENCODING BLOCK-----------------------------------------------------
 2801 // This block specifies the encoding classes used by the compiler to
 2802 // output byte streams.  Encoding classes are parameterized macros
 2803 // used by Machine Instruction Nodes in order to generate the bit
 2804 // encoding of the instruction.  Operands specify their base encoding
 2805 // interface with the interface keyword.  There are currently
 2806 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2807 // COND_INTER.  REG_INTER causes an operand to generate a function
 2808 // which returns its register number when queried.  CONST_INTER causes
 2809 // an operand to generate a function which returns the value of the
 2810 // constant when queried.  MEMORY_INTER causes an operand to generate
 2811 // four functions which return the Base Register, the Index Register,
 2812 // the Scale Value, and the Offset Value of the operand when queried.
 2813 // COND_INTER causes an operand to generate six functions which return
 2814 // the encoding code (ie - encoding bits for the instruction)
 2815 // associated with each basic boolean condition for a conditional
 2816 // instruction.
 2817 //
 2818 // Instructions specify two basic values for encoding.  Again, a
 2819 // function is available to check if the constant displacement is an
 2820 // oop. They use the ins_encode keyword to specify their encoding
 2821 // classes (which must be a sequence of enc_class names, and their
 2822 // parameters, specified in the encoding block), and they use the
 2823 // opcode keyword to specify, in order, their primary, secondary, and
 2824 // tertiary opcode.  Only the opcode sections which a particular
 2825 // instruction needs for encoding need to be specified.
 2826 encode %{
 2827   // Build emit functions for each basic byte or larger field in the
 2828   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2829   // from C++ code in the enc_class source block.  Emit functions will
 2830   // live in the main source block for now.  In future, we can
 2831   // generalize this by adding a syntax that specifies the sizes of
 2832   // fields in an order, so that the adlc can build the emit functions
 2833   // automagically
 2834 
 2835   // catch all for unimplemented encodings
 2836   enc_class enc_unimplemented %{
 2837     C2_MacroAssembler _masm(&cbuf);
 2838     __ unimplemented("C2 catch all");
 2839   %}
 2840 
 2841   // BEGIN Non-volatile memory access
 2842 
 2843   // This encoding class is generated automatically from ad_encode.m4.
 2844   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2845   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2846     Register dst_reg = as_Register($dst$$reg);
 2847     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2848                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2849   %}
 2850 
 2851   // This encoding class is generated automatically from ad_encode.m4.
 2852   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2853   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2854     Register dst_reg = as_Register($dst$$reg);
 2855     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2856                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2857   %}
 2858 
 2859   // This encoding class is generated automatically from ad_encode.m4.
 2860   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2861   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2862     Register dst_reg = as_Register($dst$$reg);
 2863     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2864                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2865   %}
 2866 
 2867   // This encoding class is generated automatically from ad_encode.m4.
 2868   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2869   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2870     Register dst_reg = as_Register($dst$$reg);
 2871     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2872                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2873   %}
 2874 
 2875   // This encoding class is generated automatically from ad_encode.m4.
 2876   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2877   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2878     Register dst_reg = as_Register($dst$$reg);
 2879     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2880                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2881   %}
 2882 
 2883   // This encoding class is generated automatically from ad_encode.m4.
 2884   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2885   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2886     Register dst_reg = as_Register($dst$$reg);
 2887     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2888                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2889   %}
 2890 
 2891   // This encoding class is generated automatically from ad_encode.m4.
 2892   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2893   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2894     Register dst_reg = as_Register($dst$$reg);
 2895     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2896                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2897   %}
 2898 
 2899   // This encoding class is generated automatically from ad_encode.m4.
 2900   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2901   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2902     Register dst_reg = as_Register($dst$$reg);
 2903     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2904                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2905   %}
 2906 
 2907   // This encoding class is generated automatically from ad_encode.m4.
 2908   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2909   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2910     Register dst_reg = as_Register($dst$$reg);
 2911     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2912                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2913   %}
 2914 
 2915   // This encoding class is generated automatically from ad_encode.m4.
 2916   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2917   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2918     Register dst_reg = as_Register($dst$$reg);
 2919     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2920                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2921   %}
 2922 
 2923   // This encoding class is generated automatically from ad_encode.m4.
 2924   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2925   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2926     Register dst_reg = as_Register($dst$$reg);
 2927     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2928                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2929   %}
 2930 
 2931   // This encoding class is generated automatically from ad_encode.m4.
 2932   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2933   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2934     Register dst_reg = as_Register($dst$$reg);
 2935     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2936                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2937   %}
 2938 
 2939   // This encoding class is generated automatically from ad_encode.m4.
 2940   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2941   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2942     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2943     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2944                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2945   %}
 2946 
 2947   // This encoding class is generated automatically from ad_encode.m4.
 2948   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2949   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2950     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2951     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2952                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2953   %}
 2954 
 2955   // This encoding class is generated automatically from ad_encode.m4.
 2956   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2957   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2958     Register src_reg = as_Register($src$$reg);
 2959     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
 2960                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2961   %}
 2962 
 2963   // This encoding class is generated automatically from ad_encode.m4.
 2964   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2965   enc_class aarch64_enc_strb0(memory1 mem) %{
 2966     C2_MacroAssembler _masm(&cbuf);
 2967     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2968                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2969   %}
 2970 
 2971   // This encoding class is generated automatically from ad_encode.m4.
 2972   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2973   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2974     Register src_reg = as_Register($src$$reg);
 2975     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
 2976                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2977   %}
 2978 
 2979   // This encoding class is generated automatically from ad_encode.m4.
 2980   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2981   enc_class aarch64_enc_strh0(memory2 mem) %{
 2982     C2_MacroAssembler _masm(&cbuf);
 2983     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2984                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2985   %}
 2986 
 2987   // This encoding class is generated automatically from ad_encode.m4.
 2988   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2989   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 2990     Register src_reg = as_Register($src$$reg);
 2991     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
 2992                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2993   %}
 2994 
 2995   // This encoding class is generated automatically from ad_encode.m4.
 2996   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2997   enc_class aarch64_enc_strw0(memory4 mem) %{
 2998     C2_MacroAssembler _masm(&cbuf);
 2999     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3000                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3001   %}
 3002 
 3003   // This encoding class is generated automatically from ad_encode.m4.
 3004   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3005   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3006     Register src_reg = as_Register($src$$reg);
 3007     // we sometimes get asked to store the stack pointer into the
 3008     // current thread -- we cannot do that directly on AArch64
 3009     if (src_reg == r31_sp) {
 3010       C2_MacroAssembler _masm(&cbuf);
 3011       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3012       __ mov(rscratch2, sp);
 3013       src_reg = rscratch2;
 3014     }
 3015     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
 3016                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3017   %}
 3018 
 3019   // This encoding class is generated automatically from ad_encode.m4.
 3020   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3021   enc_class aarch64_enc_str0(memory8 mem) %{
 3022     C2_MacroAssembler _masm(&cbuf);
 3023     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
 3024                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3025   %}
 3026 
 3027   // This encoding class is generated automatically from ad_encode.m4.
 3028   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3029   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3030     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3031     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
 3032                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3033   %}
 3034 
 3035   // This encoding class is generated automatically from ad_encode.m4.
 3036   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3037   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3038     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3039     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
 3040                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3041   %}
 3042 
 3043   // This encoding class is generated automatically from ad_encode.m4.
 3044   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3045   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3046       C2_MacroAssembler _masm(&cbuf);
 3047       __ membar(Assembler::StoreStore);
 3048       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3049                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3050   %}
 3051 
 3052   // END Non-volatile memory access
 3053 
 3054   // Vector loads and stores
 3055   enc_class aarch64_enc_ldrvH(vecD dst, memory mem) %{
 3056     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3057     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3058        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3059   %}
 3060 
 3061   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
 3062     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3063     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3064        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3065   %}
 3066 
 3067   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
 3068     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3069     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3070        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3071   %}
 3072 
 3073   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
 3074     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3075     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3076        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3077   %}
 3078 
 3079   enc_class aarch64_enc_strvH(vecD src, memory mem) %{
 3080     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3081     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
 3082        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3083   %}
 3084 
 3085   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
 3086     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3087     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
 3088        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3089   %}
 3090 
 3091   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
 3092     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3093     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
 3094        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3095   %}
 3096 
 3097   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
 3098     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3099     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3100        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3101   %}
 3102 
 3103   // volatile loads and stores
 3104 
 3105   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3106     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3107                  rscratch1, stlrb);
 3108   %}
 3109 
 3110   enc_class aarch64_enc_stlrb0(memory mem) %{
 3111     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3112                  rscratch1, stlrb);
 3113   %}
 3114 
 3115   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3116     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3117                  rscratch1, stlrh);
 3118   %}
 3119 
 3120   enc_class aarch64_enc_stlrh0(memory mem) %{
 3121     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3122                  rscratch1, stlrh);
 3123   %}
 3124 
 3125   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3126     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3127                  rscratch1, stlrw);
 3128   %}
 3129 
 3130   enc_class aarch64_enc_stlrw0(memory mem) %{
 3131     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3132                  rscratch1, stlrw);
 3133   %}
 3134 
 3135   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3136     Register dst_reg = as_Register($dst$$reg);
 3137     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3138              rscratch1, ldarb);
 3139     __ sxtbw(dst_reg, dst_reg);
 3140   %}
 3141 
 3142   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3143     Register dst_reg = as_Register($dst$$reg);
 3144     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3145              rscratch1, ldarb);
 3146     __ sxtb(dst_reg, dst_reg);
 3147   %}
 3148 
 3149   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3150     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3151              rscratch1, ldarb);
 3152   %}
 3153 
 3154   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3155     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3156              rscratch1, ldarb);
 3157   %}
 3158 
 3159   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3160     Register dst_reg = as_Register($dst$$reg);
 3161     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3162              rscratch1, ldarh);
 3163     __ sxthw(dst_reg, dst_reg);
 3164   %}
 3165 
 3166   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3167     Register dst_reg = as_Register($dst$$reg);
 3168     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3169              rscratch1, ldarh);
 3170     __ sxth(dst_reg, dst_reg);
 3171   %}
 3172 
 3173   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3174     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3175              rscratch1, ldarh);
 3176   %}
 3177 
 3178   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3179     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3180              rscratch1, ldarh);
 3181   %}
 3182 
 3183   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3184     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3185              rscratch1, ldarw);
 3186   %}
 3187 
 3188   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3189     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3190              rscratch1, ldarw);
 3191   %}
 3192 
 3193   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3194     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3195              rscratch1, ldar);
 3196   %}
 3197 
 3198   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3199     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3200              rscratch1, ldarw);
 3201     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3202   %}
 3203 
 3204   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3205     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3206              rscratch1, ldar);
 3207     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3208   %}
 3209 
 3210   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3211     Register src_reg = as_Register($src$$reg);
 3212     // we sometimes get asked to store the stack pointer into the
 3213     // current thread -- we cannot do that directly on AArch64
 3214     if (src_reg == r31_sp) {
 3215       C2_MacroAssembler _masm(&cbuf);
 3216       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3217       __ mov(rscratch2, sp);
 3218       src_reg = rscratch2;
 3219     }
 3220     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3221                  rscratch1, stlr);
 3222   %}
 3223 
 3224   enc_class aarch64_enc_stlr0(memory mem) %{
 3225     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3226                  rscratch1, stlr);
 3227   %}
 3228 
 3229   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3230     {
 3231       C2_MacroAssembler _masm(&cbuf);
 3232       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3233       __ fmovs(rscratch2, src_reg);
 3234     }
 3235     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3236                  rscratch1, stlrw);
 3237   %}
 3238 
 3239   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3240     {
 3241       C2_MacroAssembler _masm(&cbuf);
 3242       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3243       __ fmovd(rscratch2, src_reg);
 3244     }
 3245     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3246                  rscratch1, stlr);
 3247   %}
 3248 
 3249   // synchronized read/update encodings
 3250 
 3251   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3252     C2_MacroAssembler _masm(&cbuf);
 3253     Register dst_reg = as_Register($dst$$reg);
 3254     Register base = as_Register($mem$$base);
 3255     int index = $mem$$index;
 3256     int scale = $mem$$scale;
 3257     int disp = $mem$$disp;
 3258     if (index == -1) {
 3259        if (disp != 0) {
 3260         __ lea(rscratch1, Address(base, disp));
 3261         __ ldaxr(dst_reg, rscratch1);
 3262       } else {
 3263         // TODO
 3264         // should we ever get anything other than this case?
 3265         __ ldaxr(dst_reg, base);
 3266       }
 3267     } else {
 3268       Register index_reg = as_Register(index);
 3269       if (disp == 0) {
 3270         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3271         __ ldaxr(dst_reg, rscratch1);
 3272       } else {
 3273         __ lea(rscratch1, Address(base, disp));
 3274         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3275         __ ldaxr(dst_reg, rscratch1);
 3276       }
 3277     }
 3278   %}
 3279 
 3280   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3281     C2_MacroAssembler _masm(&cbuf);
 3282     Register src_reg = as_Register($src$$reg);
 3283     Register base = as_Register($mem$$base);
 3284     int index = $mem$$index;
 3285     int scale = $mem$$scale;
 3286     int disp = $mem$$disp;
 3287     if (index == -1) {
 3288        if (disp != 0) {
 3289         __ lea(rscratch2, Address(base, disp));
 3290         __ stlxr(rscratch1, src_reg, rscratch2);
 3291       } else {
 3292         // TODO
 3293         // should we ever get anything other than this case?
 3294         __ stlxr(rscratch1, src_reg, base);
 3295       }
 3296     } else {
 3297       Register index_reg = as_Register(index);
 3298       if (disp == 0) {
 3299         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3300         __ stlxr(rscratch1, src_reg, rscratch2);
 3301       } else {
 3302         __ lea(rscratch2, Address(base, disp));
 3303         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3304         __ stlxr(rscratch1, src_reg, rscratch2);
 3305       }
 3306     }
 3307     __ cmpw(rscratch1, zr);
 3308   %}
 3309 
 3310   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3311     C2_MacroAssembler _masm(&cbuf);
 3312     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3313     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3314                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3315                /*weak*/ false, noreg);
 3316   %}
 3317 
 3318   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3319     C2_MacroAssembler _masm(&cbuf);
 3320     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3321     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3322                Assembler::word, /*acquire*/ false, /*release*/ true,
 3323                /*weak*/ false, noreg);
 3324   %}
 3325 
 3326   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3327     C2_MacroAssembler _masm(&cbuf);
 3328     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3329     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3330                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3331                /*weak*/ false, noreg);
 3332   %}
 3333 
 3334   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3335     C2_MacroAssembler _masm(&cbuf);
 3336     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3337     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3338                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3339                /*weak*/ false, noreg);
 3340   %}
 3341 
 3342 
 3343   // The only difference between aarch64_enc_cmpxchg and
 3344   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3345   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3346   // lock.
 3347   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3348     C2_MacroAssembler _masm(&cbuf);
 3349     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3350     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3351                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3352                /*weak*/ false, noreg);
 3353   %}
 3354 
 3355   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3356     C2_MacroAssembler _masm(&cbuf);
 3357     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3358     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3359                Assembler::word, /*acquire*/ true, /*release*/ true,
 3360                /*weak*/ false, noreg);
 3361   %}
 3362 
 3363   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3364     C2_MacroAssembler _masm(&cbuf);
 3365     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3366     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3367                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3368                /*weak*/ false, noreg);
 3369   %}
 3370 
 3371   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3372     C2_MacroAssembler _masm(&cbuf);
 3373     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3374     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3375                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3376                /*weak*/ false, noreg);
 3377   %}
 3378 
 3379   // auxiliary used for CompareAndSwapX to set result register
 3380   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3381     C2_MacroAssembler _masm(&cbuf);
 3382     Register res_reg = as_Register($res$$reg);
 3383     __ cset(res_reg, Assembler::EQ);
 3384   %}
 3385 
 3386   // prefetch encodings
 3387 
 3388   enc_class aarch64_enc_prefetchw(memory mem) %{
 3389     C2_MacroAssembler _masm(&cbuf);
 3390     Register base = as_Register($mem$$base);
 3391     int index = $mem$$index;
 3392     int scale = $mem$$scale;
 3393     int disp = $mem$$disp;
 3394     if (index == -1) {
 3395       __ prfm(Address(base, disp), PSTL1KEEP);
 3396     } else {
 3397       Register index_reg = as_Register(index);
 3398       if (disp == 0) {
 3399         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3400       } else {
 3401         __ lea(rscratch1, Address(base, disp));
 3402 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3403       }
 3404     }
 3405   %}
 3406 
 3407   /// mov envcodings
 3408 
 3409   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3410     C2_MacroAssembler _masm(&cbuf);
 3411     uint32_t con = (uint32_t)$src$$constant;
 3412     Register dst_reg = as_Register($dst$$reg);
 3413     if (con == 0) {
 3414       __ movw(dst_reg, zr);
 3415     } else {
 3416       __ movw(dst_reg, con);
 3417     }
 3418   %}
 3419 
 3420   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3421     C2_MacroAssembler _masm(&cbuf);
 3422     Register dst_reg = as_Register($dst$$reg);
 3423     uint64_t con = (uint64_t)$src$$constant;
 3424     if (con == 0) {
 3425       __ mov(dst_reg, zr);
 3426     } else {
 3427       __ mov(dst_reg, con);
 3428     }
 3429   %}
 3430 
 3431   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3432     C2_MacroAssembler _masm(&cbuf);
 3433     Register dst_reg = as_Register($dst$$reg);
 3434     address con = (address)$src$$constant;
 3435     if (con == NULL || con == (address)1) {
 3436       ShouldNotReachHere();
 3437     } else {
 3438       relocInfo::relocType rtype = $src->constant_reloc();
 3439       if (rtype == relocInfo::oop_type) {
 3440         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
 3441       } else if (rtype == relocInfo::metadata_type) {
 3442         __ mov_metadata(dst_reg, (Metadata*)con);
 3443       } else {
 3444         assert(rtype == relocInfo::none, "unexpected reloc type");
 3445         if (! __ is_valid_AArch64_address(con) ||
 3446             con < (address)(uintptr_t)os::vm_page_size()) {
 3447           __ mov(dst_reg, con);
 3448         } else {
 3449           uint64_t offset;
 3450           __ adrp(dst_reg, con, offset);
 3451           __ add(dst_reg, dst_reg, offset);
 3452         }
 3453       }
 3454     }
 3455   %}
 3456 
 3457   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3458     C2_MacroAssembler _masm(&cbuf);
 3459     Register dst_reg = as_Register($dst$$reg);
 3460     __ mov(dst_reg, zr);
 3461   %}
 3462 
 3463   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3464     C2_MacroAssembler _masm(&cbuf);
 3465     Register dst_reg = as_Register($dst$$reg);
 3466     __ mov(dst_reg, (uint64_t)1);
 3467   %}
 3468 
 3469   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3470     C2_MacroAssembler _masm(&cbuf);
 3471     __ load_byte_map_base($dst$$Register);
 3472   %}
 3473 
 3474   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3475     C2_MacroAssembler _masm(&cbuf);
 3476     Register dst_reg = as_Register($dst$$reg);
 3477     address con = (address)$src$$constant;
 3478     if (con == NULL) {
 3479       ShouldNotReachHere();
 3480     } else {
 3481       relocInfo::relocType rtype = $src->constant_reloc();
 3482       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3483       __ set_narrow_oop(dst_reg, (jobject)con);
 3484     }
 3485   %}
 3486 
 3487   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3488     C2_MacroAssembler _masm(&cbuf);
 3489     Register dst_reg = as_Register($dst$$reg);
 3490     __ mov(dst_reg, zr);
 3491   %}
 3492 
 3493   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3494     C2_MacroAssembler _masm(&cbuf);
 3495     Register dst_reg = as_Register($dst$$reg);
 3496     address con = (address)$src$$constant;
 3497     if (con == NULL) {
 3498       ShouldNotReachHere();
 3499     } else {
 3500       relocInfo::relocType rtype = $src->constant_reloc();
 3501       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3502       __ set_narrow_klass(dst_reg, (Klass *)con);
 3503     }
 3504   %}
 3505 
 3506   // arithmetic encodings
 3507 
 3508   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3509     C2_MacroAssembler _masm(&cbuf);
 3510     Register dst_reg = as_Register($dst$$reg);
 3511     Register src_reg = as_Register($src1$$reg);
 3512     int32_t con = (int32_t)$src2$$constant;
 3513     // add has primary == 0, subtract has primary == 1
 3514     if ($primary) { con = -con; }
 3515     if (con < 0) {
 3516       __ subw(dst_reg, src_reg, -con);
 3517     } else {
 3518       __ addw(dst_reg, src_reg, con);
 3519     }
 3520   %}
 3521 
 3522   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3523     C2_MacroAssembler _masm(&cbuf);
 3524     Register dst_reg = as_Register($dst$$reg);
 3525     Register src_reg = as_Register($src1$$reg);
 3526     int32_t con = (int32_t)$src2$$constant;
 3527     // add has primary == 0, subtract has primary == 1
 3528     if ($primary) { con = -con; }
 3529     if (con < 0) {
 3530       __ sub(dst_reg, src_reg, -con);
 3531     } else {
 3532       __ add(dst_reg, src_reg, con);
 3533     }
 3534   %}
 3535 
 3536   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3537     C2_MacroAssembler _masm(&cbuf);
 3538    Register dst_reg = as_Register($dst$$reg);
 3539    Register src1_reg = as_Register($src1$$reg);
 3540    Register src2_reg = as_Register($src2$$reg);
 3541     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3542   %}
 3543 
 3544   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3545     C2_MacroAssembler _masm(&cbuf);
 3546    Register dst_reg = as_Register($dst$$reg);
 3547    Register src1_reg = as_Register($src1$$reg);
 3548    Register src2_reg = as_Register($src2$$reg);
 3549     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3550   %}
 3551 
 3552   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3553     C2_MacroAssembler _masm(&cbuf);
 3554    Register dst_reg = as_Register($dst$$reg);
 3555    Register src1_reg = as_Register($src1$$reg);
 3556    Register src2_reg = as_Register($src2$$reg);
 3557     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3558   %}
 3559 
 3560   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3561     C2_MacroAssembler _masm(&cbuf);
 3562    Register dst_reg = as_Register($dst$$reg);
 3563    Register src1_reg = as_Register($src1$$reg);
 3564    Register src2_reg = as_Register($src2$$reg);
 3565     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3566   %}
 3567 
 3568   // compare instruction encodings
 3569 
 3570   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3571     C2_MacroAssembler _masm(&cbuf);
 3572     Register reg1 = as_Register($src1$$reg);
 3573     Register reg2 = as_Register($src2$$reg);
 3574     __ cmpw(reg1, reg2);
 3575   %}
 3576 
 3577   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3578     C2_MacroAssembler _masm(&cbuf);
 3579     Register reg = as_Register($src1$$reg);
 3580     int32_t val = $src2$$constant;
 3581     if (val >= 0) {
 3582       __ subsw(zr, reg, val);
 3583     } else {
 3584       __ addsw(zr, reg, -val);
 3585     }
 3586   %}
 3587 
 3588   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3589     C2_MacroAssembler _masm(&cbuf);
 3590     Register reg1 = as_Register($src1$$reg);
 3591     uint32_t val = (uint32_t)$src2$$constant;
 3592     __ movw(rscratch1, val);
 3593     __ cmpw(reg1, rscratch1);
 3594   %}
 3595 
 3596   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3597     C2_MacroAssembler _masm(&cbuf);
 3598     Register reg1 = as_Register($src1$$reg);
 3599     Register reg2 = as_Register($src2$$reg);
 3600     __ cmp(reg1, reg2);
 3601   %}
 3602 
 3603   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3604     C2_MacroAssembler _masm(&cbuf);
 3605     Register reg = as_Register($src1$$reg);
 3606     int64_t val = $src2$$constant;
 3607     if (val >= 0) {
 3608       __ subs(zr, reg, val);
 3609     } else if (val != -val) {
 3610       __ adds(zr, reg, -val);
 3611     } else {
 3612     // aargh, Long.MIN_VALUE is a special case
 3613       __ orr(rscratch1, zr, (uint64_t)val);
 3614       __ subs(zr, reg, rscratch1);
 3615     }
 3616   %}
 3617 
 3618   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3619     C2_MacroAssembler _masm(&cbuf);
 3620     Register reg1 = as_Register($src1$$reg);
 3621     uint64_t val = (uint64_t)$src2$$constant;
 3622     __ mov(rscratch1, val);
 3623     __ cmp(reg1, rscratch1);
 3624   %}
 3625 
 3626   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3627     C2_MacroAssembler _masm(&cbuf);
 3628     Register reg1 = as_Register($src1$$reg);
 3629     Register reg2 = as_Register($src2$$reg);
 3630     __ cmp(reg1, reg2);
 3631   %}
 3632 
 3633   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3634     C2_MacroAssembler _masm(&cbuf);
 3635     Register reg1 = as_Register($src1$$reg);
 3636     Register reg2 = as_Register($src2$$reg);
 3637     __ cmpw(reg1, reg2);
 3638   %}
 3639 
 3640   enc_class aarch64_enc_testp(iRegP src) %{
 3641     C2_MacroAssembler _masm(&cbuf);
 3642     Register reg = as_Register($src$$reg);
 3643     __ cmp(reg, zr);
 3644   %}
 3645 
 3646   enc_class aarch64_enc_testn(iRegN src) %{
 3647     C2_MacroAssembler _masm(&cbuf);
 3648     Register reg = as_Register($src$$reg);
 3649     __ cmpw(reg, zr);
 3650   %}
 3651 
 3652   enc_class aarch64_enc_b(label lbl) %{
 3653     C2_MacroAssembler _masm(&cbuf);
 3654     Label *L = $lbl$$label;
 3655     __ b(*L);
 3656   %}
 3657 
 3658   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3659     C2_MacroAssembler _masm(&cbuf);
 3660     Label *L = $lbl$$label;
 3661     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3662   %}
 3663 
 3664   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3665     C2_MacroAssembler _masm(&cbuf);
 3666     Label *L = $lbl$$label;
 3667     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3668   %}
 3669 
 3670   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3671   %{
 3672      Register sub_reg = as_Register($sub$$reg);
 3673      Register super_reg = as_Register($super$$reg);
 3674      Register temp_reg = as_Register($temp$$reg);
 3675      Register result_reg = as_Register($result$$reg);
 3676 
 3677      Label miss;
 3678      C2_MacroAssembler _masm(&cbuf);
 3679      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3680                                      NULL, &miss,
 3681                                      /*set_cond_codes:*/ true);
 3682      if ($primary) {
 3683        __ mov(result_reg, zr);
 3684      }
 3685      __ bind(miss);
 3686   %}
 3687 
 3688   enc_class aarch64_enc_java_static_call(method meth) %{
 3689     C2_MacroAssembler _masm(&cbuf);
 3690 
 3691     address addr = (address)$meth$$method;
 3692     address call;
 3693     if (!_method) {
 3694       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3695       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
 3696       if (call == NULL) {
 3697         ciEnv::current()->record_failure("CodeCache is full");
 3698         return;
 3699       }
 3700     } else {
 3701       int method_index = resolved_method_index(cbuf);
 3702       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3703                                                   : static_call_Relocation::spec(method_index);
 3704       call = __ trampoline_call(Address(addr, rspec), &cbuf);
 3705       if (call == NULL) {
 3706         ciEnv::current()->record_failure("CodeCache is full");
 3707         return;
 3708       }
 3709       // Emit stub for static call
 3710       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
 3711       if (stub == NULL) {
 3712         ciEnv::current()->record_failure("CodeCache is full");
 3713         return;
 3714       }
 3715     }
 3716 
 3717     // Only non uncommon_trap calls need to reinitialize ptrue.
 3718     if (Compile::current()->max_vector_size() >= 16 && uncommon_trap_request() == 0) {
 3719       __ reinitialize_ptrue();
 3720     }
 3721   %}
 3722 
 3723   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3724     C2_MacroAssembler _masm(&cbuf);
 3725     int method_index = resolved_method_index(cbuf);
 3726     address call = __ ic_call((address)$meth$$method, method_index);
 3727     if (call == NULL) {
 3728       ciEnv::current()->record_failure("CodeCache is full");
 3729       return;
 3730     } else if (Compile::current()->max_vector_size() >= 16) {
 3731       __ reinitialize_ptrue();
 3732     }
 3733   %}
 3734 
 3735   enc_class aarch64_enc_call_epilog() %{
 3736     C2_MacroAssembler _masm(&cbuf);
 3737     if (VerifyStackAtCalls) {
 3738       // Check that stack depth is unchanged: find majik cookie on stack
 3739       __ call_Unimplemented();
 3740     }
 3741   %}
 3742 
 3743   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3744     C2_MacroAssembler _masm(&cbuf);
 3745 
 3746     // some calls to generated routines (arraycopy code) are scheduled
 3747     // by C2 as runtime calls. if so we can call them using a br (they
 3748     // will be in a reachable segment) otherwise we have to use a blr
 3749     // which loads the absolute address into a register.
 3750     address entry = (address)$meth$$method;
 3751     CodeBlob *cb = CodeCache::find_blob(entry);
 3752     if (cb) {
 3753       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3754       if (call == NULL) {
 3755         ciEnv::current()->record_failure("CodeCache is full");
 3756         return;
 3757       }
 3758     } else {
 3759       Label retaddr;
 3760       __ adr(rscratch2, retaddr);
 3761       __ lea(rscratch1, RuntimeAddress(entry));
 3762       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
 3763       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
 3764       __ blr(rscratch1);
 3765       __ bind(retaddr);
 3766       __ add(sp, sp, 2 * wordSize);
 3767     }
 3768     if (Compile::current()->max_vector_size() >= 16) {
 3769       __ reinitialize_ptrue();
 3770     }
 3771   %}
 3772 
 3773   enc_class aarch64_enc_rethrow() %{
 3774     C2_MacroAssembler _masm(&cbuf);
 3775     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3776   %}
 3777 
 3778   enc_class aarch64_enc_ret() %{
 3779     C2_MacroAssembler _masm(&cbuf);
 3780 #ifdef ASSERT
 3781     if (Compile::current()->max_vector_size() >= 16) {
 3782       __ verify_ptrue();
 3783     }
 3784 #endif
 3785     __ ret(lr);
 3786   %}
 3787 
 3788   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3789     C2_MacroAssembler _masm(&cbuf);
 3790     Register target_reg = as_Register($jump_target$$reg);
 3791     __ br(target_reg);
 3792   %}
 3793 
 3794   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3795     C2_MacroAssembler _masm(&cbuf);
 3796     Register target_reg = as_Register($jump_target$$reg);
 3797     // exception oop should be in r0
 3798     // ret addr has been popped into lr
 3799     // callee expects it in r3
 3800     __ mov(r3, lr);
 3801     __ br(target_reg);
 3802   %}
 3803 
 3804 %}
 3805 
 3806 //----------FRAME--------------------------------------------------------------
 3807 // Definition of frame structure and management information.
 3808 //
 3809 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3810 //                             |   (to get allocators register number
 3811 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3812 //  r   CALLER     |        |
 3813 //  o     |        +--------+      pad to even-align allocators stack-slot
 3814 //  w     V        |  pad0  |        numbers; owned by CALLER
 3815 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3816 //  h     ^        |   in   |  5
 3817 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3818 //  |     |        |        |  3
 3819 //  |     |        +--------+
 3820 //  V     |        | old out|      Empty on Intel, window on Sparc
 3821 //        |    old |preserve|      Must be even aligned.
 3822 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3823 //        |        |   in   |  3   area for Intel ret address
 3824 //     Owned by    |preserve|      Empty on Sparc.
 3825 //       SELF      +--------+
 3826 //        |        |  pad2  |  2   pad to align old SP
 3827 //        |        +--------+  1
 3828 //        |        | locks  |  0
 3829 //        |        +--------+----> OptoReg::stack0(), even aligned
 3830 //        |        |  pad1  | 11   pad to align new SP
 3831 //        |        +--------+
 3832 //        |        |        | 10
 3833 //        |        | spills |  9   spills
 3834 //        V        |        |  8   (pad0 slot for callee)
 3835 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3836 //        ^        |  out   |  7
 3837 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3838 //     Owned by    +--------+
 3839 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3840 //        |    new |preserve|      Must be even-aligned.
 3841 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3842 //        |        |        |
 3843 //
 3844 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3845 //         known from SELF's arguments and the Java calling convention.
 3846 //         Region 6-7 is determined per call site.
 3847 // Note 2: If the calling convention leaves holes in the incoming argument
 3848 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3849 //         are owned by the CALLEE.  Holes should not be nessecary in the
 3850 //         incoming area, as the Java calling convention is completely under
 3851 //         the control of the AD file.  Doubles can be sorted and packed to
 3852 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
 3853 //         varargs C calling conventions.
 3854 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3855 //         even aligned with pad0 as needed.
 3856 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3857 //           (the latter is true on Intel but is it false on AArch64?)
 3858 //         region 6-11 is even aligned; it may be padded out more so that
 3859 //         the region from SP to FP meets the minimum stack alignment.
 3860 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3861 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3862 //         SP meets the minimum alignment.
 3863 
 3864 frame %{
 3865   // These three registers define part of the calling convention
 3866   // between compiled code and the interpreter.
 3867 
 3868   // Inline Cache Register or Method for I2C.
 3869   inline_cache_reg(R12);
 3870 
 3871   // Number of stack slots consumed by locking an object
 3872   sync_stack_slots(2);
 3873 
 3874   // Compiled code's Frame Pointer
 3875   frame_pointer(R31);
 3876 
 3877   // Interpreter stores its frame pointer in a register which is
 3878   // stored to the stack by I2CAdaptors.
 3879   // I2CAdaptors convert from interpreted java to compiled java.
 3880   interpreter_frame_pointer(R29);
 3881 
 3882   // Stack alignment requirement
 3883   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3884 
 3885   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3886   // for calls to C.  Supports the var-args backing area for register parms.
 3887   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3888 
 3889   // The after-PROLOG location of the return address.  Location of
 3890   // return address specifies a type (REG or STACK) and a number
 3891   // representing the register number (i.e. - use a register name) or
 3892   // stack slot.
 3893   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3894   // Otherwise, it is above the locks and verification slot and alignment word
 3895   // TODO this may well be correct but need to check why that - 2 is there
 3896   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3897   // which folds in the space used for monitors
 3898   return_addr(STACK - 2 +
 3899               align_up((Compile::current()->in_preserve_stack_slots() +
 3900                         Compile::current()->fixed_slots()),
 3901                        stack_alignment_in_slots()));
 3902 
 3903   // Location of compiled Java return values.  Same as C for now.
 3904   return_value
 3905   %{
 3906     // TODO do we allow ideal_reg == Op_RegN???
 3907     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3908            "only return normal values");
 3909 
 3910     static const int lo[Op_RegL + 1] = { // enum name
 3911       0,                                 // Op_Node
 3912       0,                                 // Op_Set
 3913       R0_num,                            // Op_RegN
 3914       R0_num,                            // Op_RegI
 3915       R0_num,                            // Op_RegP
 3916       V0_num,                            // Op_RegF
 3917       V0_num,                            // Op_RegD
 3918       R0_num                             // Op_RegL
 3919     };
 3920 
 3921     static const int hi[Op_RegL + 1] = { // enum name
 3922       0,                                 // Op_Node
 3923       0,                                 // Op_Set
 3924       OptoReg::Bad,                      // Op_RegN
 3925       OptoReg::Bad,                      // Op_RegI
 3926       R0_H_num,                          // Op_RegP
 3927       OptoReg::Bad,                      // Op_RegF
 3928       V0_H_num,                          // Op_RegD
 3929       R0_H_num                           // Op_RegL
 3930     };
 3931 
 3932     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3933   %}
 3934 %}
 3935 
 3936 //----------ATTRIBUTES---------------------------------------------------------
 3937 //----------Operand Attributes-------------------------------------------------
 3938 op_attrib op_cost(1);        // Required cost attribute
 3939 
 3940 //----------Instruction Attributes---------------------------------------------
 3941 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3942 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3943 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3944                                 // a non-matching short branch variant
 3945                                 // of some long branch?
 3946 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3947                                 // be a power of 2) specifies the
 3948                                 // alignment that some part of the
 3949                                 // instruction (not necessarily the
 3950                                 // start) requires.  If > 1, a
 3951                                 // compute_padding() function must be
 3952                                 // provided for the instruction
 3953 
 3954 //----------OPERANDS-----------------------------------------------------------
 3955 // Operand definitions must precede instruction definitions for correct parsing
 3956 // in the ADLC because operands constitute user defined types which are used in
 3957 // instruction definitions.
 3958 
 3959 //----------Simple Operands----------------------------------------------------
 3960 
 3961 // Integer operands 32 bit
 3962 // 32 bit immediate
 3963 operand immI()
 3964 %{
 3965   match(ConI);
 3966 
 3967   op_cost(0);
 3968   format %{ %}
 3969   interface(CONST_INTER);
 3970 %}
 3971 
 3972 // 32 bit zero
 3973 operand immI0()
 3974 %{
 3975   predicate(n->get_int() == 0);
 3976   match(ConI);
 3977 
 3978   op_cost(0);
 3979   format %{ %}
 3980   interface(CONST_INTER);
 3981 %}
 3982 
 3983 // 32 bit unit increment
 3984 operand immI_1()
 3985 %{
 3986   predicate(n->get_int() == 1);
 3987   match(ConI);
 3988 
 3989   op_cost(0);
 3990   format %{ %}
 3991   interface(CONST_INTER);
 3992 %}
 3993 
 3994 // 32 bit unit decrement
 3995 operand immI_M1()
 3996 %{
 3997   predicate(n->get_int() == -1);
 3998   match(ConI);
 3999 
 4000   op_cost(0);
 4001   format %{ %}
 4002   interface(CONST_INTER);
 4003 %}
 4004 
 4005 // Shift values for add/sub extension shift
 4006 operand immIExt()
 4007 %{
 4008   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 4009   match(ConI);
 4010 
 4011   op_cost(0);
 4012   format %{ %}
 4013   interface(CONST_INTER);
 4014 %}
 4015 
 4016 operand immI_le_4()
 4017 %{
 4018   predicate(n->get_int() <= 4);
 4019   match(ConI);
 4020 
 4021   op_cost(0);
 4022   format %{ %}
 4023   interface(CONST_INTER);
 4024 %}
 4025 
 4026 operand immI_31()
 4027 %{
 4028   predicate(n->get_int() == 31);
 4029   match(ConI);
 4030 
 4031   op_cost(0);
 4032   format %{ %}
 4033   interface(CONST_INTER);
 4034 %}
 4035 
 4036 operand immI_2()
 4037 %{
 4038   predicate(n->get_int() == 2);
 4039   match(ConI);
 4040 
 4041   op_cost(0);
 4042   format %{ %}
 4043   interface(CONST_INTER);
 4044 %}
 4045 
 4046 operand immI_4()
 4047 %{
 4048   predicate(n->get_int() == 4);
 4049   match(ConI);
 4050 
 4051   op_cost(0);
 4052   format %{ %}
 4053   interface(CONST_INTER);
 4054 %}
 4055 
 4056 operand immI_8()
 4057 %{
 4058   predicate(n->get_int() == 8);
 4059   match(ConI);
 4060 
 4061   op_cost(0);
 4062   format %{ %}
 4063   interface(CONST_INTER);
 4064 %}
 4065 
 4066 operand immI_16()
 4067 %{
 4068   predicate(n->get_int() == 16);
 4069   match(ConI);
 4070 
 4071   op_cost(0);
 4072   format %{ %}
 4073   interface(CONST_INTER);
 4074 %}
 4075 
 4076 operand immI_24()
 4077 %{
 4078   predicate(n->get_int() == 24);
 4079   match(ConI);
 4080 
 4081   op_cost(0);
 4082   format %{ %}
 4083   interface(CONST_INTER);
 4084 %}
 4085 
 4086 operand immI_32()
 4087 %{
 4088   predicate(n->get_int() == 32);
 4089   match(ConI);
 4090 
 4091   op_cost(0);
 4092   format %{ %}
 4093   interface(CONST_INTER);
 4094 %}
 4095 
 4096 operand immI_48()
 4097 %{
 4098   predicate(n->get_int() == 48);
 4099   match(ConI);
 4100 
 4101   op_cost(0);
 4102   format %{ %}
 4103   interface(CONST_INTER);
 4104 %}
 4105 
 4106 operand immI_56()
 4107 %{
 4108   predicate(n->get_int() == 56);
 4109   match(ConI);
 4110 
 4111   op_cost(0);
 4112   format %{ %}
 4113   interface(CONST_INTER);
 4114 %}
 4115 
 4116 operand immI_63()
 4117 %{
 4118   predicate(n->get_int() == 63);
 4119   match(ConI);
 4120 
 4121   op_cost(0);
 4122   format %{ %}
 4123   interface(CONST_INTER);
 4124 %}
 4125 
 4126 operand immI_64()
 4127 %{
 4128   predicate(n->get_int() == 64);
 4129   match(ConI);
 4130 
 4131   op_cost(0);
 4132   format %{ %}
 4133   interface(CONST_INTER);
 4134 %}
 4135 
 4136 operand immI_255()
 4137 %{
 4138   predicate(n->get_int() == 255);
 4139   match(ConI);
 4140 
 4141   op_cost(0);
 4142   format %{ %}
 4143   interface(CONST_INTER);
 4144 %}
 4145 
 4146 operand immI_65535()
 4147 %{
 4148   predicate(n->get_int() == 65535);
 4149   match(ConI);
 4150 
 4151   op_cost(0);
 4152   format %{ %}
 4153   interface(CONST_INTER);
 4154 %}
 4155 
 4156 operand immI_positive()
 4157 %{
 4158   predicate(n->get_int() > 0);
 4159   match(ConI);
 4160 
 4161   op_cost(0);
 4162   format %{ %}
 4163   interface(CONST_INTER);
 4164 %}
 4165 
 4166 operand immL_255()
 4167 %{
 4168   predicate(n->get_long() == 255L);
 4169   match(ConL);
 4170 
 4171   op_cost(0);
 4172   format %{ %}
 4173   interface(CONST_INTER);
 4174 %}
 4175 
 4176 operand immL_65535()
 4177 %{
 4178   predicate(n->get_long() == 65535L);
 4179   match(ConL);
 4180 
 4181   op_cost(0);
 4182   format %{ %}
 4183   interface(CONST_INTER);
 4184 %}
 4185 
 4186 operand immL_4294967295()
 4187 %{
 4188   predicate(n->get_long() == 4294967295L);
 4189   match(ConL);
 4190 
 4191   op_cost(0);
 4192   format %{ %}
 4193   interface(CONST_INTER);
 4194 %}
 4195 
 4196 operand immL_bitmask()
 4197 %{
 4198   predicate((n->get_long() != 0)
 4199             && ((n->get_long() & 0xc000000000000000l) == 0)
 4200             && is_power_of_2(n->get_long() + 1));
 4201   match(ConL);
 4202 
 4203   op_cost(0);
 4204   format %{ %}
 4205   interface(CONST_INTER);
 4206 %}
 4207 
 4208 operand immI_bitmask()
 4209 %{
 4210   predicate((n->get_int() != 0)
 4211             && ((n->get_int() & 0xc0000000) == 0)
 4212             && is_power_of_2(n->get_int() + 1));
 4213   match(ConI);
 4214 
 4215   op_cost(0);
 4216   format %{ %}
 4217   interface(CONST_INTER);
 4218 %}
 4219 
 4220 operand immL_positive_bitmaskI()
 4221 %{
 4222   predicate((n->get_long() != 0)
 4223             && ((julong)n->get_long() < 0x80000000ULL)
 4224             && is_power_of_2(n->get_long() + 1));
 4225   match(ConL);
 4226 
 4227   op_cost(0);
 4228   format %{ %}
 4229   interface(CONST_INTER);
 4230 %}
 4231 
 4232 // Scale values for scaled offset addressing modes (up to long but not quad)
 4233 operand immIScale()
 4234 %{
 4235   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4236   match(ConI);
 4237 
 4238   op_cost(0);
 4239   format %{ %}
 4240   interface(CONST_INTER);
 4241 %}
 4242 
 4243 // 26 bit signed offset -- for pc-relative branches
 4244 operand immI26()
 4245 %{
 4246   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
 4247   match(ConI);
 4248 
 4249   op_cost(0);
 4250   format %{ %}
 4251   interface(CONST_INTER);
 4252 %}
 4253 
 4254 // 19 bit signed offset -- for pc-relative loads
 4255 operand immI19()
 4256 %{
 4257   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
 4258   match(ConI);
 4259 
 4260   op_cost(0);
 4261   format %{ %}
 4262   interface(CONST_INTER);
 4263 %}
 4264 
 4265 // 12 bit unsigned offset -- for base plus immediate loads
 4266 operand immIU12()
 4267 %{
 4268   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
 4269   match(ConI);
 4270 
 4271   op_cost(0);
 4272   format %{ %}
 4273   interface(CONST_INTER);
 4274 %}
 4275 
 4276 operand immLU12()
 4277 %{
 4278   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
 4279   match(ConL);
 4280 
 4281   op_cost(0);
 4282   format %{ %}
 4283   interface(CONST_INTER);
 4284 %}
 4285 
 4286 // Offset for scaled or unscaled immediate loads and stores
 4287 operand immIOffset()
 4288 %{
 4289   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4290   match(ConI);
 4291 
 4292   op_cost(0);
 4293   format %{ %}
 4294   interface(CONST_INTER);
 4295 %}
 4296 
 4297 operand immIOffset1()
 4298 %{
 4299   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4300   match(ConI);
 4301 
 4302   op_cost(0);
 4303   format %{ %}
 4304   interface(CONST_INTER);
 4305 %}
 4306 
 4307 operand immIOffset2()
 4308 %{
 4309   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4310   match(ConI);
 4311 
 4312   op_cost(0);
 4313   format %{ %}
 4314   interface(CONST_INTER);
 4315 %}
 4316 
 4317 operand immIOffset4()
 4318 %{
 4319   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4320   match(ConI);
 4321 
 4322   op_cost(0);
 4323   format %{ %}
 4324   interface(CONST_INTER);
 4325 %}
 4326 
 4327 operand immIOffset8()
 4328 %{
 4329   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4330   match(ConI);
 4331 
 4332   op_cost(0);
 4333   format %{ %}
 4334   interface(CONST_INTER);
 4335 %}
 4336 
 4337 operand immIOffset16()
 4338 %{
 4339   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4340   match(ConI);
 4341 
 4342   op_cost(0);
 4343   format %{ %}
 4344   interface(CONST_INTER);
 4345 %}
 4346 
 4347 operand immLoffset()
 4348 %{
 4349   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4350   match(ConL);
 4351 
 4352   op_cost(0);
 4353   format %{ %}
 4354   interface(CONST_INTER);
 4355 %}
 4356 
 4357 operand immLoffset1()
 4358 %{
 4359   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4360   match(ConL);
 4361 
 4362   op_cost(0);
 4363   format %{ %}
 4364   interface(CONST_INTER);
 4365 %}
 4366 
 4367 operand immLoffset2()
 4368 %{
 4369   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4370   match(ConL);
 4371 
 4372   op_cost(0);
 4373   format %{ %}
 4374   interface(CONST_INTER);
 4375 %}
 4376 
 4377 operand immLoffset4()
 4378 %{
 4379   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4380   match(ConL);
 4381 
 4382   op_cost(0);
 4383   format %{ %}
 4384   interface(CONST_INTER);
 4385 %}
 4386 
 4387 operand immLoffset8()
 4388 %{
 4389   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4390   match(ConL);
 4391 
 4392   op_cost(0);
 4393   format %{ %}
 4394   interface(CONST_INTER);
 4395 %}
 4396 
 4397 operand immLoffset16()
 4398 %{
 4399   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4400   match(ConL);
 4401 
 4402   op_cost(0);
 4403   format %{ %}
 4404   interface(CONST_INTER);
 4405 %}
 4406 
 4407 // 8 bit signed value.
 4408 operand immI8()
 4409 %{
 4410   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4411   match(ConI);
 4412 
 4413   op_cost(0);
 4414   format %{ %}
 4415   interface(CONST_INTER);
 4416 %}
 4417 
 4418 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4419 operand immI8_shift8()
 4420 %{
 4421   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4422             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4423   match(ConI);
 4424 
 4425   op_cost(0);
 4426   format %{ %}
 4427   interface(CONST_INTER);
 4428 %}
 4429 
 4430 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4431 operand immL8_shift8()
 4432 %{
 4433   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4434             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4435   match(ConL);
 4436 
 4437   op_cost(0);
 4438   format %{ %}
 4439   interface(CONST_INTER);
 4440 %}
 4441 
 4442 // 32 bit integer valid for add sub immediate
 4443 operand immIAddSub()
 4444 %{
 4445   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4446   match(ConI);
 4447   op_cost(0);
 4448   format %{ %}
 4449   interface(CONST_INTER);
 4450 %}
 4451 
 4452 // 32 bit unsigned integer valid for logical immediate
 4453 // TODO -- check this is right when e.g the mask is 0x80000000
 4454 operand immILog()
 4455 %{
 4456   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4457   match(ConI);
 4458 
 4459   op_cost(0);
 4460   format %{ %}
 4461   interface(CONST_INTER);
 4462 %}
 4463 
 4464 // Integer operands 64 bit
 4465 // 64 bit immediate
 4466 operand immL()
 4467 %{
 4468   match(ConL);
 4469 
 4470   op_cost(0);
 4471   format %{ %}
 4472   interface(CONST_INTER);
 4473 %}
 4474 
 4475 // 64 bit zero
 4476 operand immL0()
 4477 %{
 4478   predicate(n->get_long() == 0);
 4479   match(ConL);
 4480 
 4481   op_cost(0);
 4482   format %{ %}
 4483   interface(CONST_INTER);
 4484 %}
 4485 
 4486 // 64 bit unit increment
 4487 operand immL_1()
 4488 %{
 4489   predicate(n->get_long() == 1);
 4490   match(ConL);
 4491 
 4492   op_cost(0);
 4493   format %{ %}
 4494   interface(CONST_INTER);
 4495 %}
 4496 
 4497 // 64 bit unit decrement
 4498 operand immL_M1()
 4499 %{
 4500   predicate(n->get_long() == -1);
 4501   match(ConL);
 4502 
 4503   op_cost(0);
 4504   format %{ %}
 4505   interface(CONST_INTER);
 4506 %}
 4507 
 4508 // 32 bit offset of pc in thread anchor
 4509 
 4510 operand immL_pc_off()
 4511 %{
 4512   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
 4513                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 4514   match(ConL);
 4515 
 4516   op_cost(0);
 4517   format %{ %}
 4518   interface(CONST_INTER);
 4519 %}
 4520 
 4521 // 64 bit integer valid for add sub immediate
 4522 operand immLAddSub()
 4523 %{
 4524   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4525   match(ConL);
 4526   op_cost(0);
 4527   format %{ %}
 4528   interface(CONST_INTER);
 4529 %}
 4530 
 4531 // 64 bit integer valid for logical immediate
 4532 operand immLLog()
 4533 %{
 4534   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4535   match(ConL);
 4536   op_cost(0);
 4537   format %{ %}
 4538   interface(CONST_INTER);
 4539 %}
 4540 
 4541 // Long Immediate: low 32-bit mask
 4542 operand immL_32bits()
 4543 %{
 4544   predicate(n->get_long() == 0xFFFFFFFFL);
 4545   match(ConL);
 4546   op_cost(0);
 4547   format %{ %}
 4548   interface(CONST_INTER);
 4549 %}
 4550 
 4551 // Pointer operands
 4552 // Pointer Immediate
 4553 operand immP()
 4554 %{
 4555   match(ConP);
 4556 
 4557   op_cost(0);
 4558   format %{ %}
 4559   interface(CONST_INTER);
 4560 %}
 4561 
 4562 // NULL Pointer Immediate
 4563 operand immP0()
 4564 %{
 4565   predicate(n->get_ptr() == 0);
 4566   match(ConP);
 4567 
 4568   op_cost(0);
 4569   format %{ %}
 4570   interface(CONST_INTER);
 4571 %}
 4572 
 4573 // Pointer Immediate One
 4574 // this is used in object initialization (initial object header)
 4575 operand immP_1()
 4576 %{
 4577   predicate(n->get_ptr() == 1);
 4578   match(ConP);
 4579 
 4580   op_cost(0);
 4581   format %{ %}
 4582   interface(CONST_INTER);
 4583 %}
 4584 
 4585 // Card Table Byte Map Base
 4586 operand immByteMapBase()
 4587 %{
 4588   // Get base of card map
 4589   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4590             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4591   match(ConP);
 4592 
 4593   op_cost(0);
 4594   format %{ %}
 4595   interface(CONST_INTER);
 4596 %}
 4597 
 4598 // Pointer Immediate Minus One
 4599 // this is used when we want to write the current PC to the thread anchor
 4600 operand immP_M1()
 4601 %{
 4602   predicate(n->get_ptr() == -1);
 4603   match(ConP);
 4604 
 4605   op_cost(0);
 4606   format %{ %}
 4607   interface(CONST_INTER);
 4608 %}
 4609 
 4610 // Pointer Immediate Minus Two
 4611 // this is used when we want to write the current PC to the thread anchor
 4612 operand immP_M2()
 4613 %{
 4614   predicate(n->get_ptr() == -2);
 4615   match(ConP);
 4616 
 4617   op_cost(0);
 4618   format %{ %}
 4619   interface(CONST_INTER);
 4620 %}
 4621 
 4622 // Float and Double operands
 4623 // Double Immediate
 4624 operand immD()
 4625 %{
 4626   match(ConD);
 4627   op_cost(0);
 4628   format %{ %}
 4629   interface(CONST_INTER);
 4630 %}
 4631 
 4632 // Double Immediate: +0.0d
 4633 operand immD0()
 4634 %{
 4635   predicate(jlong_cast(n->getd()) == 0);
 4636   match(ConD);
 4637 
 4638   op_cost(0);
 4639   format %{ %}
 4640   interface(CONST_INTER);
 4641 %}
 4642 
 4643 // constant 'double +0.0'.
 4644 operand immDPacked()
 4645 %{
 4646   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4647   match(ConD);
 4648   op_cost(0);
 4649   format %{ %}
 4650   interface(CONST_INTER);
 4651 %}
 4652 
 4653 // Float Immediate
 4654 operand immF()
 4655 %{
 4656   match(ConF);
 4657   op_cost(0);
 4658   format %{ %}
 4659   interface(CONST_INTER);
 4660 %}
 4661 
 4662 // Float Immediate: +0.0f.
 4663 operand immF0()
 4664 %{
 4665   predicate(jint_cast(n->getf()) == 0);
 4666   match(ConF);
 4667 
 4668   op_cost(0);
 4669   format %{ %}
 4670   interface(CONST_INTER);
 4671 %}
 4672 
 4673 //
 4674 operand immFPacked()
 4675 %{
 4676   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4677   match(ConF);
 4678   op_cost(0);
 4679   format %{ %}
 4680   interface(CONST_INTER);
 4681 %}
 4682 
 4683 // Narrow pointer operands
 4684 // Narrow Pointer Immediate
 4685 operand immN()
 4686 %{
 4687   match(ConN);
 4688 
 4689   op_cost(0);
 4690   format %{ %}
 4691   interface(CONST_INTER);
 4692 %}
 4693 
 4694 // Narrow NULL Pointer Immediate
 4695 operand immN0()
 4696 %{
 4697   predicate(n->get_narrowcon() == 0);
 4698   match(ConN);
 4699 
 4700   op_cost(0);
 4701   format %{ %}
 4702   interface(CONST_INTER);
 4703 %}
 4704 
 4705 operand immNKlass()
 4706 %{
 4707   match(ConNKlass);
 4708 
 4709   op_cost(0);
 4710   format %{ %}
 4711   interface(CONST_INTER);
 4712 %}
 4713 
 4714 // Integer 32 bit Register Operands
 4715 // Integer 32 bitRegister (excludes SP)
 4716 operand iRegI()
 4717 %{
 4718   constraint(ALLOC_IN_RC(any_reg32));
 4719   match(RegI);
 4720   match(iRegINoSp);
 4721   op_cost(0);
 4722   format %{ %}
 4723   interface(REG_INTER);
 4724 %}
 4725 
 4726 // Integer 32 bit Register not Special
 4727 operand iRegINoSp()
 4728 %{
 4729   constraint(ALLOC_IN_RC(no_special_reg32));
 4730   match(RegI);
 4731   op_cost(0);
 4732   format %{ %}
 4733   interface(REG_INTER);
 4734 %}
 4735 
 4736 // Integer 64 bit Register Operands
 4737 // Integer 64 bit Register (includes SP)
 4738 operand iRegL()
 4739 %{
 4740   constraint(ALLOC_IN_RC(any_reg));
 4741   match(RegL);
 4742   match(iRegLNoSp);
 4743   op_cost(0);
 4744   format %{ %}
 4745   interface(REG_INTER);
 4746 %}
 4747 
 4748 // Integer 64 bit Register not Special
 4749 operand iRegLNoSp()
 4750 %{
 4751   constraint(ALLOC_IN_RC(no_special_reg));
 4752   match(RegL);
 4753   match(iRegL_R0);
 4754   format %{ %}
 4755   interface(REG_INTER);
 4756 %}
 4757 
 4758 // Pointer Register Operands
 4759 // Pointer Register
 4760 operand iRegP()
 4761 %{
 4762   constraint(ALLOC_IN_RC(ptr_reg));
 4763   match(RegP);
 4764   match(iRegPNoSp);
 4765   match(iRegP_R0);
 4766   //match(iRegP_R2);
 4767   //match(iRegP_R4);
 4768   match(iRegP_R5);
 4769   match(thread_RegP);
 4770   op_cost(0);
 4771   format %{ %}
 4772   interface(REG_INTER);
 4773 %}
 4774 
 4775 // Pointer 64 bit Register not Special
 4776 operand iRegPNoSp()
 4777 %{
 4778   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4779   match(RegP);
 4780   // match(iRegP);
 4781   // match(iRegP_R0);
 4782   // match(iRegP_R2);
 4783   // match(iRegP_R4);
 4784   // match(iRegP_R5);
 4785   // match(thread_RegP);
 4786   op_cost(0);
 4787   format %{ %}
 4788   interface(REG_INTER);
 4789 %}
 4790 
 4791 // Pointer 64 bit Register R0 only
 4792 operand iRegP_R0()
 4793 %{
 4794   constraint(ALLOC_IN_RC(r0_reg));
 4795   match(RegP);
 4796   // match(iRegP);
 4797   match(iRegPNoSp);
 4798   op_cost(0);
 4799   format %{ %}
 4800   interface(REG_INTER);
 4801 %}
 4802 
 4803 // Pointer 64 bit Register R1 only
 4804 operand iRegP_R1()
 4805 %{
 4806   constraint(ALLOC_IN_RC(r1_reg));
 4807   match(RegP);
 4808   // match(iRegP);
 4809   match(iRegPNoSp);
 4810   op_cost(0);
 4811   format %{ %}
 4812   interface(REG_INTER);
 4813 %}
 4814 
 4815 // Pointer 64 bit Register R2 only
 4816 operand iRegP_R2()
 4817 %{
 4818   constraint(ALLOC_IN_RC(r2_reg));
 4819   match(RegP);
 4820   // match(iRegP);
 4821   match(iRegPNoSp);
 4822   op_cost(0);
 4823   format %{ %}
 4824   interface(REG_INTER);
 4825 %}
 4826 
 4827 // Pointer 64 bit Register R3 only
 4828 operand iRegP_R3()
 4829 %{
 4830   constraint(ALLOC_IN_RC(r3_reg));
 4831   match(RegP);
 4832   // match(iRegP);
 4833   match(iRegPNoSp);
 4834   op_cost(0);
 4835   format %{ %}
 4836   interface(REG_INTER);
 4837 %}
 4838 
 4839 // Pointer 64 bit Register R4 only
 4840 operand iRegP_R4()
 4841 %{
 4842   constraint(ALLOC_IN_RC(r4_reg));
 4843   match(RegP);
 4844   // match(iRegP);
 4845   match(iRegPNoSp);
 4846   op_cost(0);
 4847   format %{ %}
 4848   interface(REG_INTER);
 4849 %}
 4850 
 4851 // Pointer 64 bit Register R5 only
 4852 operand iRegP_R5()
 4853 %{
 4854   constraint(ALLOC_IN_RC(r5_reg));
 4855   match(RegP);
 4856   // match(iRegP);
 4857   match(iRegPNoSp);
 4858   op_cost(0);
 4859   format %{ %}
 4860   interface(REG_INTER);
 4861 %}
 4862 
 4863 // Pointer 64 bit Register R10 only
 4864 operand iRegP_R10()
 4865 %{
 4866   constraint(ALLOC_IN_RC(r10_reg));
 4867   match(RegP);
 4868   // match(iRegP);
 4869   match(iRegPNoSp);
 4870   op_cost(0);
 4871   format %{ %}
 4872   interface(REG_INTER);
 4873 %}
 4874 
 4875 // Long 64 bit Register R0 only
 4876 operand iRegL_R0()
 4877 %{
 4878   constraint(ALLOC_IN_RC(r0_reg));
 4879   match(RegL);
 4880   match(iRegLNoSp);
 4881   op_cost(0);
 4882   format %{ %}
 4883   interface(REG_INTER);
 4884 %}
 4885 
 4886 // Long 64 bit Register R2 only
 4887 operand iRegL_R2()
 4888 %{
 4889   constraint(ALLOC_IN_RC(r2_reg));
 4890   match(RegL);
 4891   match(iRegLNoSp);
 4892   op_cost(0);
 4893   format %{ %}
 4894   interface(REG_INTER);
 4895 %}
 4896 
 4897 // Long 64 bit Register R3 only
 4898 operand iRegL_R3()
 4899 %{
 4900   constraint(ALLOC_IN_RC(r3_reg));
 4901   match(RegL);
 4902   match(iRegLNoSp);
 4903   op_cost(0);
 4904   format %{ %}
 4905   interface(REG_INTER);
 4906 %}
 4907 
 4908 // Long 64 bit Register R11 only
 4909 operand iRegL_R11()
 4910 %{
 4911   constraint(ALLOC_IN_RC(r11_reg));
 4912   match(RegL);
 4913   match(iRegLNoSp);
 4914   op_cost(0);
 4915   format %{ %}
 4916   interface(REG_INTER);
 4917 %}
 4918 
 4919 // Pointer 64 bit Register FP only
 4920 operand iRegP_FP()
 4921 %{
 4922   constraint(ALLOC_IN_RC(fp_reg));
 4923   match(RegP);
 4924   // match(iRegP);
 4925   op_cost(0);
 4926   format %{ %}
 4927   interface(REG_INTER);
 4928 %}
 4929 
 4930 // Register R0 only
 4931 operand iRegI_R0()
 4932 %{
 4933   constraint(ALLOC_IN_RC(int_r0_reg));
 4934   match(RegI);
 4935   match(iRegINoSp);
 4936   op_cost(0);
 4937   format %{ %}
 4938   interface(REG_INTER);
 4939 %}
 4940 
 4941 // Register R2 only
 4942 operand iRegI_R2()
 4943 %{
 4944   constraint(ALLOC_IN_RC(int_r2_reg));
 4945   match(RegI);
 4946   match(iRegINoSp);
 4947   op_cost(0);
 4948   format %{ %}
 4949   interface(REG_INTER);
 4950 %}
 4951 
 4952 // Register R3 only
 4953 operand iRegI_R3()
 4954 %{
 4955   constraint(ALLOC_IN_RC(int_r3_reg));
 4956   match(RegI);
 4957   match(iRegINoSp);
 4958   op_cost(0);
 4959   format %{ %}
 4960   interface(REG_INTER);
 4961 %}
 4962 
 4963 
 4964 // Register R4 only
 4965 operand iRegI_R4()
 4966 %{
 4967   constraint(ALLOC_IN_RC(int_r4_reg));
 4968   match(RegI);
 4969   match(iRegINoSp);
 4970   op_cost(0);
 4971   format %{ %}
 4972   interface(REG_INTER);
 4973 %}
 4974 
 4975 
 4976 // Pointer Register Operands
 4977 // Narrow Pointer Register
 4978 operand iRegN()
 4979 %{
 4980   constraint(ALLOC_IN_RC(any_reg32));
 4981   match(RegN);
 4982   match(iRegNNoSp);
 4983   op_cost(0);
 4984   format %{ %}
 4985   interface(REG_INTER);
 4986 %}
 4987 
 4988 operand iRegN_R0()
 4989 %{
 4990   constraint(ALLOC_IN_RC(r0_reg));
 4991   match(iRegN);
 4992   op_cost(0);
 4993   format %{ %}
 4994   interface(REG_INTER);
 4995 %}
 4996 
 4997 operand iRegN_R2()
 4998 %{
 4999   constraint(ALLOC_IN_RC(r2_reg));
 5000   match(iRegN);
 5001   op_cost(0);
 5002   format %{ %}
 5003   interface(REG_INTER);
 5004 %}
 5005 
 5006 operand iRegN_R3()
 5007 %{
 5008   constraint(ALLOC_IN_RC(r3_reg));
 5009   match(iRegN);
 5010   op_cost(0);
 5011   format %{ %}
 5012   interface(REG_INTER);
 5013 %}
 5014 
 5015 // Integer 64 bit Register not Special
 5016 operand iRegNNoSp()
 5017 %{
 5018   constraint(ALLOC_IN_RC(no_special_reg32));
 5019   match(RegN);
 5020   op_cost(0);
 5021   format %{ %}
 5022   interface(REG_INTER);
 5023 %}
 5024 
 5025 // heap base register -- used for encoding immN0
 5026 
 5027 operand iRegIHeapbase()
 5028 %{
 5029   constraint(ALLOC_IN_RC(heapbase_reg));
 5030   match(RegI);
 5031   op_cost(0);
 5032   format %{ %}
 5033   interface(REG_INTER);
 5034 %}
 5035 
 5036 // Float Register
 5037 // Float register operands
 5038 operand vRegF()
 5039 %{
 5040   constraint(ALLOC_IN_RC(float_reg));
 5041   match(RegF);
 5042 
 5043   op_cost(0);
 5044   format %{ %}
 5045   interface(REG_INTER);
 5046 %}
 5047 
 5048 // Double Register
 5049 // Double register operands
 5050 operand vRegD()
 5051 %{
 5052   constraint(ALLOC_IN_RC(double_reg));
 5053   match(RegD);
 5054 
 5055   op_cost(0);
 5056   format %{ %}
 5057   interface(REG_INTER);
 5058 %}
 5059 
 5060 // Generic vector class. This will be used for
 5061 // all vector operands, including NEON and SVE,
 5062 // but currently only used for SVE VecA.
 5063 operand vReg()
 5064 %{
 5065   constraint(ALLOC_IN_RC(vectora_reg));
 5066   match(VecA);
 5067   op_cost(0);
 5068   format %{ %}
 5069   interface(REG_INTER);
 5070 %}
 5071 
 5072 operand vecD()
 5073 %{
 5074   constraint(ALLOC_IN_RC(vectord_reg));
 5075   match(VecD);
 5076 
 5077   op_cost(0);
 5078   format %{ %}
 5079   interface(REG_INTER);
 5080 %}
 5081 
 5082 operand vecX()
 5083 %{
 5084   constraint(ALLOC_IN_RC(vectorx_reg));
 5085   match(VecX);
 5086 
 5087   op_cost(0);
 5088   format %{ %}
 5089   interface(REG_INTER);
 5090 %}
 5091 
 5092 operand vRegD_V0()
 5093 %{
 5094   constraint(ALLOC_IN_RC(v0_reg));
 5095   match(RegD);
 5096   op_cost(0);
 5097   format %{ %}
 5098   interface(REG_INTER);
 5099 %}
 5100 
 5101 operand vRegD_V1()
 5102 %{
 5103   constraint(ALLOC_IN_RC(v1_reg));
 5104   match(RegD);
 5105   op_cost(0);
 5106   format %{ %}
 5107   interface(REG_INTER);
 5108 %}
 5109 
 5110 operand vRegD_V2()
 5111 %{
 5112   constraint(ALLOC_IN_RC(v2_reg));
 5113   match(RegD);
 5114   op_cost(0);
 5115   format %{ %}
 5116   interface(REG_INTER);
 5117 %}
 5118 
 5119 operand vRegD_V3()
 5120 %{
 5121   constraint(ALLOC_IN_RC(v3_reg));
 5122   match(RegD);
 5123   op_cost(0);
 5124   format %{ %}
 5125   interface(REG_INTER);
 5126 %}
 5127 
 5128 operand vRegD_V4()
 5129 %{
 5130   constraint(ALLOC_IN_RC(v4_reg));
 5131   match(RegD);
 5132   op_cost(0);
 5133   format %{ %}
 5134   interface(REG_INTER);
 5135 %}
 5136 
 5137 operand vRegD_V5()
 5138 %{
 5139   constraint(ALLOC_IN_RC(v5_reg));
 5140   match(RegD);
 5141   op_cost(0);
 5142   format %{ %}
 5143   interface(REG_INTER);
 5144 %}
 5145 
 5146 operand vRegD_V6()
 5147 %{
 5148   constraint(ALLOC_IN_RC(v6_reg));
 5149   match(RegD);
 5150   op_cost(0);
 5151   format %{ %}
 5152   interface(REG_INTER);
 5153 %}
 5154 
 5155 operand vRegD_V7()
 5156 %{
 5157   constraint(ALLOC_IN_RC(v7_reg));
 5158   match(RegD);
 5159   op_cost(0);
 5160   format %{ %}
 5161   interface(REG_INTER);
 5162 %}
 5163 
 5164 operand vRegD_V8()
 5165 %{
 5166   constraint(ALLOC_IN_RC(v8_reg));
 5167   match(RegD);
 5168   op_cost(0);
 5169   format %{ %}
 5170   interface(REG_INTER);
 5171 %}
 5172 
 5173 operand vRegD_V9()
 5174 %{
 5175   constraint(ALLOC_IN_RC(v9_reg));
 5176   match(RegD);
 5177   op_cost(0);
 5178   format %{ %}
 5179   interface(REG_INTER);
 5180 %}
 5181 
 5182 operand vRegD_V10()
 5183 %{
 5184   constraint(ALLOC_IN_RC(v10_reg));
 5185   match(RegD);
 5186   op_cost(0);
 5187   format %{ %}
 5188   interface(REG_INTER);
 5189 %}
 5190 
 5191 operand vRegD_V11()
 5192 %{
 5193   constraint(ALLOC_IN_RC(v11_reg));
 5194   match(RegD);
 5195   op_cost(0);
 5196   format %{ %}
 5197   interface(REG_INTER);
 5198 %}
 5199 
 5200 operand vRegD_V12()
 5201 %{
 5202   constraint(ALLOC_IN_RC(v12_reg));
 5203   match(RegD);
 5204   op_cost(0);
 5205   format %{ %}
 5206   interface(REG_INTER);
 5207 %}
 5208 
 5209 operand vRegD_V13()
 5210 %{
 5211   constraint(ALLOC_IN_RC(v13_reg));
 5212   match(RegD);
 5213   op_cost(0);
 5214   format %{ %}
 5215   interface(REG_INTER);
 5216 %}
 5217 
 5218 operand vRegD_V14()
 5219 %{
 5220   constraint(ALLOC_IN_RC(v14_reg));
 5221   match(RegD);
 5222   op_cost(0);
 5223   format %{ %}
 5224   interface(REG_INTER);
 5225 %}
 5226 
 5227 operand vRegD_V15()
 5228 %{
 5229   constraint(ALLOC_IN_RC(v15_reg));
 5230   match(RegD);
 5231   op_cost(0);
 5232   format %{ %}
 5233   interface(REG_INTER);
 5234 %}
 5235 
 5236 operand vRegD_V16()
 5237 %{
 5238   constraint(ALLOC_IN_RC(v16_reg));
 5239   match(RegD);
 5240   op_cost(0);
 5241   format %{ %}
 5242   interface(REG_INTER);
 5243 %}
 5244 
 5245 operand vRegD_V17()
 5246 %{
 5247   constraint(ALLOC_IN_RC(v17_reg));
 5248   match(RegD);
 5249   op_cost(0);
 5250   format %{ %}
 5251   interface(REG_INTER);
 5252 %}
 5253 
 5254 operand vRegD_V18()
 5255 %{
 5256   constraint(ALLOC_IN_RC(v18_reg));
 5257   match(RegD);
 5258   op_cost(0);
 5259   format %{ %}
 5260   interface(REG_INTER);
 5261 %}
 5262 
 5263 operand vRegD_V19()
 5264 %{
 5265   constraint(ALLOC_IN_RC(v19_reg));
 5266   match(RegD);
 5267   op_cost(0);
 5268   format %{ %}
 5269   interface(REG_INTER);
 5270 %}
 5271 
 5272 operand vRegD_V20()
 5273 %{
 5274   constraint(ALLOC_IN_RC(v20_reg));
 5275   match(RegD);
 5276   op_cost(0);
 5277   format %{ %}
 5278   interface(REG_INTER);
 5279 %}
 5280 
 5281 operand vRegD_V21()
 5282 %{
 5283   constraint(ALLOC_IN_RC(v21_reg));
 5284   match(RegD);
 5285   op_cost(0);
 5286   format %{ %}
 5287   interface(REG_INTER);
 5288 %}
 5289 
 5290 operand vRegD_V22()
 5291 %{
 5292   constraint(ALLOC_IN_RC(v22_reg));
 5293   match(RegD);
 5294   op_cost(0);
 5295   format %{ %}
 5296   interface(REG_INTER);
 5297 %}
 5298 
 5299 operand vRegD_V23()
 5300 %{
 5301   constraint(ALLOC_IN_RC(v23_reg));
 5302   match(RegD);
 5303   op_cost(0);
 5304   format %{ %}
 5305   interface(REG_INTER);
 5306 %}
 5307 
 5308 operand vRegD_V24()
 5309 %{
 5310   constraint(ALLOC_IN_RC(v24_reg));
 5311   match(RegD);
 5312   op_cost(0);
 5313   format %{ %}
 5314   interface(REG_INTER);
 5315 %}
 5316 
 5317 operand vRegD_V25()
 5318 %{
 5319   constraint(ALLOC_IN_RC(v25_reg));
 5320   match(RegD);
 5321   op_cost(0);
 5322   format %{ %}
 5323   interface(REG_INTER);
 5324 %}
 5325 
 5326 operand vRegD_V26()
 5327 %{
 5328   constraint(ALLOC_IN_RC(v26_reg));
 5329   match(RegD);
 5330   op_cost(0);
 5331   format %{ %}
 5332   interface(REG_INTER);
 5333 %}
 5334 
 5335 operand vRegD_V27()
 5336 %{
 5337   constraint(ALLOC_IN_RC(v27_reg));
 5338   match(RegD);
 5339   op_cost(0);
 5340   format %{ %}
 5341   interface(REG_INTER);
 5342 %}
 5343 
 5344 operand vRegD_V28()
 5345 %{
 5346   constraint(ALLOC_IN_RC(v28_reg));
 5347   match(RegD);
 5348   op_cost(0);
 5349   format %{ %}
 5350   interface(REG_INTER);
 5351 %}
 5352 
 5353 operand vRegD_V29()
 5354 %{
 5355   constraint(ALLOC_IN_RC(v29_reg));
 5356   match(RegD);
 5357   op_cost(0);
 5358   format %{ %}
 5359   interface(REG_INTER);
 5360 %}
 5361 
 5362 operand vRegD_V30()
 5363 %{
 5364   constraint(ALLOC_IN_RC(v30_reg));
 5365   match(RegD);
 5366   op_cost(0);
 5367   format %{ %}
 5368   interface(REG_INTER);
 5369 %}
 5370 
 5371 operand vRegD_V31()
 5372 %{
 5373   constraint(ALLOC_IN_RC(v31_reg));
 5374   match(RegD);
 5375   op_cost(0);
 5376   format %{ %}
 5377   interface(REG_INTER);
 5378 %}
 5379 
 5380 operand pRegGov()
 5381 %{
 5382   constraint(ALLOC_IN_RC(gov_pr));
 5383   match(RegVectMask);
 5384   op_cost(0);
 5385   format %{ %}
 5386   interface(REG_INTER);
 5387 %}
 5388 
 5389 // Flags register, used as output of signed compare instructions
 5390 
 5391 // note that on AArch64 we also use this register as the output for
 5392 // for floating point compare instructions (CmpF CmpD). this ensures
 5393 // that ordered inequality tests use GT, GE, LT or LE none of which
 5394 // pass through cases where the result is unordered i.e. one or both
 5395 // inputs to the compare is a NaN. this means that the ideal code can
 5396 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5397 // (where the comparison should always fail). EQ and NE tests are
 5398 // always generated in ideal code so that unordered folds into the NE
 5399 // case, matching the behaviour of AArch64 NE.
 5400 //
 5401 // This differs from x86 where the outputs of FP compares use a
 5402 // special FP flags registers and where compares based on this
 5403 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5404 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5405 // to explicitly handle the unordered case in branches. x86 also has
 5406 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5407 
 5408 operand rFlagsReg()
 5409 %{
 5410   constraint(ALLOC_IN_RC(int_flags));
 5411   match(RegFlags);
 5412 
 5413   op_cost(0);
 5414   format %{ "RFLAGS" %}
 5415   interface(REG_INTER);
 5416 %}
 5417 
 5418 // Flags register, used as output of unsigned compare instructions
 5419 operand rFlagsRegU()
 5420 %{
 5421   constraint(ALLOC_IN_RC(int_flags));
 5422   match(RegFlags);
 5423 
 5424   op_cost(0);
 5425   format %{ "RFLAGSU" %}
 5426   interface(REG_INTER);
 5427 %}
 5428 
 5429 // Special Registers
 5430 
 5431 // Method Register
 5432 operand inline_cache_RegP(iRegP reg)
 5433 %{
 5434   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5435   match(reg);
 5436   match(iRegPNoSp);
 5437   op_cost(0);
 5438   format %{ %}
 5439   interface(REG_INTER);
 5440 %}
 5441 
 5442 // Thread Register
 5443 operand thread_RegP(iRegP reg)
 5444 %{
 5445   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5446   match(reg);
 5447   op_cost(0);
 5448   format %{ %}
 5449   interface(REG_INTER);
 5450 %}
 5451 
 5452 operand lr_RegP(iRegP reg)
 5453 %{
 5454   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
 5455   match(reg);
 5456   op_cost(0);
 5457   format %{ %}
 5458   interface(REG_INTER);
 5459 %}
 5460 
 5461 //----------Memory Operands----------------------------------------------------
 5462 
 5463 operand indirect(iRegP reg)
 5464 %{
 5465   constraint(ALLOC_IN_RC(ptr_reg));
 5466   match(reg);
 5467   op_cost(0);
 5468   format %{ "[$reg]" %}
 5469   interface(MEMORY_INTER) %{
 5470     base($reg);
 5471     index(0xffffffff);
 5472     scale(0x0);
 5473     disp(0x0);
 5474   %}
 5475 %}
 5476 
 5477 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5478 %{
 5479   constraint(ALLOC_IN_RC(ptr_reg));
 5480   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5481   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5482   op_cost(0);
 5483   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5484   interface(MEMORY_INTER) %{
 5485     base($reg);
 5486     index($ireg);
 5487     scale($scale);
 5488     disp(0x0);
 5489   %}
 5490 %}
 5491 
 5492 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5493 %{
 5494   constraint(ALLOC_IN_RC(ptr_reg));
 5495   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5496   match(AddP reg (LShiftL lreg scale));
 5497   op_cost(0);
 5498   format %{ "$reg, $lreg lsl($scale)" %}
 5499   interface(MEMORY_INTER) %{
 5500     base($reg);
 5501     index($lreg);
 5502     scale($scale);
 5503     disp(0x0);
 5504   %}
 5505 %}
 5506 
 5507 operand indIndexI2L(iRegP reg, iRegI ireg)
 5508 %{
 5509   constraint(ALLOC_IN_RC(ptr_reg));
 5510   match(AddP reg (ConvI2L ireg));
 5511   op_cost(0);
 5512   format %{ "$reg, $ireg, 0, I2L" %}
 5513   interface(MEMORY_INTER) %{
 5514     base($reg);
 5515     index($ireg);
 5516     scale(0x0);
 5517     disp(0x0);
 5518   %}
 5519 %}
 5520 
 5521 operand indIndex(iRegP reg, iRegL lreg)
 5522 %{
 5523   constraint(ALLOC_IN_RC(ptr_reg));
 5524   match(AddP reg lreg);
 5525   op_cost(0);
 5526   format %{ "$reg, $lreg" %}
 5527   interface(MEMORY_INTER) %{
 5528     base($reg);
 5529     index($lreg);
 5530     scale(0x0);
 5531     disp(0x0);
 5532   %}
 5533 %}
 5534 
 5535 operand indOffI(iRegP reg, immIOffset off)
 5536 %{
 5537   constraint(ALLOC_IN_RC(ptr_reg));
 5538   match(AddP reg off);
 5539   op_cost(0);
 5540   format %{ "[$reg, $off]" %}
 5541   interface(MEMORY_INTER) %{
 5542     base($reg);
 5543     index(0xffffffff);
 5544     scale(0x0);
 5545     disp($off);
 5546   %}
 5547 %}
 5548 
 5549 operand indOffI1(iRegP reg, immIOffset1 off)
 5550 %{
 5551   constraint(ALLOC_IN_RC(ptr_reg));
 5552   match(AddP reg off);
 5553   op_cost(0);
 5554   format %{ "[$reg, $off]" %}
 5555   interface(MEMORY_INTER) %{
 5556     base($reg);
 5557     index(0xffffffff);
 5558     scale(0x0);
 5559     disp($off);
 5560   %}
 5561 %}
 5562 
 5563 operand indOffI2(iRegP reg, immIOffset2 off)
 5564 %{
 5565   constraint(ALLOC_IN_RC(ptr_reg));
 5566   match(AddP reg off);
 5567   op_cost(0);
 5568   format %{ "[$reg, $off]" %}
 5569   interface(MEMORY_INTER) %{
 5570     base($reg);
 5571     index(0xffffffff);
 5572     scale(0x0);
 5573     disp($off);
 5574   %}
 5575 %}
 5576 
 5577 operand indOffI4(iRegP reg, immIOffset4 off)
 5578 %{
 5579   constraint(ALLOC_IN_RC(ptr_reg));
 5580   match(AddP reg off);
 5581   op_cost(0);
 5582   format %{ "[$reg, $off]" %}
 5583   interface(MEMORY_INTER) %{
 5584     base($reg);
 5585     index(0xffffffff);
 5586     scale(0x0);
 5587     disp($off);
 5588   %}
 5589 %}
 5590 
 5591 operand indOffI8(iRegP reg, immIOffset8 off)
 5592 %{
 5593   constraint(ALLOC_IN_RC(ptr_reg));
 5594   match(AddP reg off);
 5595   op_cost(0);
 5596   format %{ "[$reg, $off]" %}
 5597   interface(MEMORY_INTER) %{
 5598     base($reg);
 5599     index(0xffffffff);
 5600     scale(0x0);
 5601     disp($off);
 5602   %}
 5603 %}
 5604 
 5605 operand indOffI16(iRegP reg, immIOffset16 off)
 5606 %{
 5607   constraint(ALLOC_IN_RC(ptr_reg));
 5608   match(AddP reg off);
 5609   op_cost(0);
 5610   format %{ "[$reg, $off]" %}
 5611   interface(MEMORY_INTER) %{
 5612     base($reg);
 5613     index(0xffffffff);
 5614     scale(0x0);
 5615     disp($off);
 5616   %}
 5617 %}
 5618 
 5619 operand indOffL(iRegP reg, immLoffset off)
 5620 %{
 5621   constraint(ALLOC_IN_RC(ptr_reg));
 5622   match(AddP reg off);
 5623   op_cost(0);
 5624   format %{ "[$reg, $off]" %}
 5625   interface(MEMORY_INTER) %{
 5626     base($reg);
 5627     index(0xffffffff);
 5628     scale(0x0);
 5629     disp($off);
 5630   %}
 5631 %}
 5632 
 5633 operand indOffL1(iRegP reg, immLoffset1 off)
 5634 %{
 5635   constraint(ALLOC_IN_RC(ptr_reg));
 5636   match(AddP reg off);
 5637   op_cost(0);
 5638   format %{ "[$reg, $off]" %}
 5639   interface(MEMORY_INTER) %{
 5640     base($reg);
 5641     index(0xffffffff);
 5642     scale(0x0);
 5643     disp($off);
 5644   %}
 5645 %}
 5646 
 5647 operand indOffL2(iRegP reg, immLoffset2 off)
 5648 %{
 5649   constraint(ALLOC_IN_RC(ptr_reg));
 5650   match(AddP reg off);
 5651   op_cost(0);
 5652   format %{ "[$reg, $off]" %}
 5653   interface(MEMORY_INTER) %{
 5654     base($reg);
 5655     index(0xffffffff);
 5656     scale(0x0);
 5657     disp($off);
 5658   %}
 5659 %}
 5660 
 5661 operand indOffL4(iRegP reg, immLoffset4 off)
 5662 %{
 5663   constraint(ALLOC_IN_RC(ptr_reg));
 5664   match(AddP reg off);
 5665   op_cost(0);
 5666   format %{ "[$reg, $off]" %}
 5667   interface(MEMORY_INTER) %{
 5668     base($reg);
 5669     index(0xffffffff);
 5670     scale(0x0);
 5671     disp($off);
 5672   %}
 5673 %}
 5674 
 5675 operand indOffL8(iRegP reg, immLoffset8 off)
 5676 %{
 5677   constraint(ALLOC_IN_RC(ptr_reg));
 5678   match(AddP reg off);
 5679   op_cost(0);
 5680   format %{ "[$reg, $off]" %}
 5681   interface(MEMORY_INTER) %{
 5682     base($reg);
 5683     index(0xffffffff);
 5684     scale(0x0);
 5685     disp($off);
 5686   %}
 5687 %}
 5688 
 5689 operand indOffL16(iRegP reg, immLoffset16 off)
 5690 %{
 5691   constraint(ALLOC_IN_RC(ptr_reg));
 5692   match(AddP reg off);
 5693   op_cost(0);
 5694   format %{ "[$reg, $off]" %}
 5695   interface(MEMORY_INTER) %{
 5696     base($reg);
 5697     index(0xffffffff);
 5698     scale(0x0);
 5699     disp($off);
 5700   %}
 5701 %}
 5702 
 5703 operand indirectN(iRegN reg)
 5704 %{
 5705   predicate(CompressedOops::shift() == 0);
 5706   constraint(ALLOC_IN_RC(ptr_reg));
 5707   match(DecodeN reg);
 5708   op_cost(0);
 5709   format %{ "[$reg]\t# narrow" %}
 5710   interface(MEMORY_INTER) %{
 5711     base($reg);
 5712     index(0xffffffff);
 5713     scale(0x0);
 5714     disp(0x0);
 5715   %}
 5716 %}
 5717 
 5718 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5719 %{
 5720   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5721   constraint(ALLOC_IN_RC(ptr_reg));
 5722   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5723   op_cost(0);
 5724   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5725   interface(MEMORY_INTER) %{
 5726     base($reg);
 5727     index($ireg);
 5728     scale($scale);
 5729     disp(0x0);
 5730   %}
 5731 %}
 5732 
 5733 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5734 %{
 5735   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5736   constraint(ALLOC_IN_RC(ptr_reg));
 5737   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5738   op_cost(0);
 5739   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5740   interface(MEMORY_INTER) %{
 5741     base($reg);
 5742     index($lreg);
 5743     scale($scale);
 5744     disp(0x0);
 5745   %}
 5746 %}
 5747 
 5748 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5749 %{
 5750   predicate(CompressedOops::shift() == 0);
 5751   constraint(ALLOC_IN_RC(ptr_reg));
 5752   match(AddP (DecodeN reg) (ConvI2L ireg));
 5753   op_cost(0);
 5754   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5755   interface(MEMORY_INTER) %{
 5756     base($reg);
 5757     index($ireg);
 5758     scale(0x0);
 5759     disp(0x0);
 5760   %}
 5761 %}
 5762 
 5763 operand indIndexN(iRegN reg, iRegL lreg)
 5764 %{
 5765   predicate(CompressedOops::shift() == 0);
 5766   constraint(ALLOC_IN_RC(ptr_reg));
 5767   match(AddP (DecodeN reg) lreg);
 5768   op_cost(0);
 5769   format %{ "$reg, $lreg\t# narrow" %}
 5770   interface(MEMORY_INTER) %{
 5771     base($reg);
 5772     index($lreg);
 5773     scale(0x0);
 5774     disp(0x0);
 5775   %}
 5776 %}
 5777 
 5778 operand indOffIN(iRegN reg, immIOffset off)
 5779 %{
 5780   predicate(CompressedOops::shift() == 0);
 5781   constraint(ALLOC_IN_RC(ptr_reg));
 5782   match(AddP (DecodeN reg) off);
 5783   op_cost(0);
 5784   format %{ "[$reg, $off]\t# narrow" %}
 5785   interface(MEMORY_INTER) %{
 5786     base($reg);
 5787     index(0xffffffff);
 5788     scale(0x0);
 5789     disp($off);
 5790   %}
 5791 %}
 5792 
 5793 operand indOffLN(iRegN reg, immLoffset off)
 5794 %{
 5795   predicate(CompressedOops::shift() == 0);
 5796   constraint(ALLOC_IN_RC(ptr_reg));
 5797   match(AddP (DecodeN reg) off);
 5798   op_cost(0);
 5799   format %{ "[$reg, $off]\t# narrow" %}
 5800   interface(MEMORY_INTER) %{
 5801     base($reg);
 5802     index(0xffffffff);
 5803     scale(0x0);
 5804     disp($off);
 5805   %}
 5806 %}
 5807 
 5808 
 5809 
 5810 // AArch64 opto stubs need to write to the pc slot in the thread anchor
 5811 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
 5812 %{
 5813   constraint(ALLOC_IN_RC(ptr_reg));
 5814   match(AddP reg off);
 5815   op_cost(0);
 5816   format %{ "[$reg, $off]" %}
 5817   interface(MEMORY_INTER) %{
 5818     base($reg);
 5819     index(0xffffffff);
 5820     scale(0x0);
 5821     disp($off);
 5822   %}
 5823 %}
 5824 
 5825 //----------Special Memory Operands--------------------------------------------
 5826 // Stack Slot Operand - This operand is used for loading and storing temporary
 5827 //                      values on the stack where a match requires a value to
 5828 //                      flow through memory.
 5829 operand stackSlotP(sRegP reg)
 5830 %{
 5831   constraint(ALLOC_IN_RC(stack_slots));
 5832   op_cost(100);
 5833   // No match rule because this operand is only generated in matching
 5834   // match(RegP);
 5835   format %{ "[$reg]" %}
 5836   interface(MEMORY_INTER) %{
 5837     base(0x1e);  // RSP
 5838     index(0x0);  // No Index
 5839     scale(0x0);  // No Scale
 5840     disp($reg);  // Stack Offset
 5841   %}
 5842 %}
 5843 
 5844 operand stackSlotI(sRegI reg)
 5845 %{
 5846   constraint(ALLOC_IN_RC(stack_slots));
 5847   // No match rule because this operand is only generated in matching
 5848   // match(RegI);
 5849   format %{ "[$reg]" %}
 5850   interface(MEMORY_INTER) %{
 5851     base(0x1e);  // RSP
 5852     index(0x0);  // No Index
 5853     scale(0x0);  // No Scale
 5854     disp($reg);  // Stack Offset
 5855   %}
 5856 %}
 5857 
 5858 operand stackSlotF(sRegF reg)
 5859 %{
 5860   constraint(ALLOC_IN_RC(stack_slots));
 5861   // No match rule because this operand is only generated in matching
 5862   // match(RegF);
 5863   format %{ "[$reg]" %}
 5864   interface(MEMORY_INTER) %{
 5865     base(0x1e);  // RSP
 5866     index(0x0);  // No Index
 5867     scale(0x0);  // No Scale
 5868     disp($reg);  // Stack Offset
 5869   %}
 5870 %}
 5871 
 5872 operand stackSlotD(sRegD reg)
 5873 %{
 5874   constraint(ALLOC_IN_RC(stack_slots));
 5875   // No match rule because this operand is only generated in matching
 5876   // match(RegD);
 5877   format %{ "[$reg]" %}
 5878   interface(MEMORY_INTER) %{
 5879     base(0x1e);  // RSP
 5880     index(0x0);  // No Index
 5881     scale(0x0);  // No Scale
 5882     disp($reg);  // Stack Offset
 5883   %}
 5884 %}
 5885 
 5886 operand stackSlotL(sRegL reg)
 5887 %{
 5888   constraint(ALLOC_IN_RC(stack_slots));
 5889   // No match rule because this operand is only generated in matching
 5890   // match(RegL);
 5891   format %{ "[$reg]" %}
 5892   interface(MEMORY_INTER) %{
 5893     base(0x1e);  // RSP
 5894     index(0x0);  // No Index
 5895     scale(0x0);  // No Scale
 5896     disp($reg);  // Stack Offset
 5897   %}
 5898 %}
 5899 
 5900 // Operands for expressing Control Flow
 5901 // NOTE: Label is a predefined operand which should not be redefined in
 5902 //       the AD file. It is generically handled within the ADLC.
 5903 
 5904 //----------Conditional Branch Operands----------------------------------------
 5905 // Comparison Op  - This is the operation of the comparison, and is limited to
 5906 //                  the following set of codes:
 5907 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 5908 //
 5909 // Other attributes of the comparison, such as unsignedness, are specified
 5910 // by the comparison instruction that sets a condition code flags register.
 5911 // That result is represented by a flags operand whose subtype is appropriate
 5912 // to the unsignedness (etc.) of the comparison.
 5913 //
 5914 // Later, the instruction which matches both the Comparison Op (a Bool) and
 5915 // the flags (produced by the Cmp) specifies the coding of the comparison op
 5916 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 5917 
 5918 // used for signed integral comparisons and fp comparisons
 5919 
 5920 operand cmpOp()
 5921 %{
 5922   match(Bool);
 5923 
 5924   format %{ "" %}
 5925   interface(COND_INTER) %{
 5926     equal(0x0, "eq");
 5927     not_equal(0x1, "ne");
 5928     less(0xb, "lt");
 5929     greater_equal(0xa, "ge");
 5930     less_equal(0xd, "le");
 5931     greater(0xc, "gt");
 5932     overflow(0x6, "vs");
 5933     no_overflow(0x7, "vc");
 5934   %}
 5935 %}
 5936 
 5937 // used for unsigned integral comparisons
 5938 
 5939 operand cmpOpU()
 5940 %{
 5941   match(Bool);
 5942 
 5943   format %{ "" %}
 5944   interface(COND_INTER) %{
 5945     equal(0x0, "eq");
 5946     not_equal(0x1, "ne");
 5947     less(0x3, "lo");
 5948     greater_equal(0x2, "hs");
 5949     less_equal(0x9, "ls");
 5950     greater(0x8, "hi");
 5951     overflow(0x6, "vs");
 5952     no_overflow(0x7, "vc");
 5953   %}
 5954 %}
 5955 
 5956 // used for certain integral comparisons which can be
 5957 // converted to cbxx or tbxx instructions
 5958 
 5959 operand cmpOpEqNe()
 5960 %{
 5961   match(Bool);
 5962   op_cost(0);
 5963   predicate(n->as_Bool()->_test._test == BoolTest::ne
 5964             || n->as_Bool()->_test._test == BoolTest::eq);
 5965 
 5966   format %{ "" %}
 5967   interface(COND_INTER) %{
 5968     equal(0x0, "eq");
 5969     not_equal(0x1, "ne");
 5970     less(0xb, "lt");
 5971     greater_equal(0xa, "ge");
 5972     less_equal(0xd, "le");
 5973     greater(0xc, "gt");
 5974     overflow(0x6, "vs");
 5975     no_overflow(0x7, "vc");
 5976   %}
 5977 %}
 5978 
 5979 // used for certain integral comparisons which can be
 5980 // converted to cbxx or tbxx instructions
 5981 
 5982 operand cmpOpLtGe()
 5983 %{
 5984   match(Bool);
 5985   op_cost(0);
 5986 
 5987   predicate(n->as_Bool()->_test._test == BoolTest::lt
 5988             || n->as_Bool()->_test._test == BoolTest::ge);
 5989 
 5990   format %{ "" %}
 5991   interface(COND_INTER) %{
 5992     equal(0x0, "eq");
 5993     not_equal(0x1, "ne");
 5994     less(0xb, "lt");
 5995     greater_equal(0xa, "ge");
 5996     less_equal(0xd, "le");
 5997     greater(0xc, "gt");
 5998     overflow(0x6, "vs");
 5999     no_overflow(0x7, "vc");
 6000   %}
 6001 %}
 6002 
 6003 // used for certain unsigned integral comparisons which can be
 6004 // converted to cbxx or tbxx instructions
 6005 
 6006 operand cmpOpUEqNeLtGe()
 6007 %{
 6008   match(Bool);
 6009   op_cost(0);
 6010 
 6011   predicate(n->as_Bool()->_test._test == BoolTest::eq
 6012             || n->as_Bool()->_test._test == BoolTest::ne
 6013             || n->as_Bool()->_test._test == BoolTest::lt
 6014             || n->as_Bool()->_test._test == BoolTest::ge);
 6015 
 6016   format %{ "" %}
 6017   interface(COND_INTER) %{
 6018     equal(0x0, "eq");
 6019     not_equal(0x1, "ne");
 6020     less(0xb, "lt");
 6021     greater_equal(0xa, "ge");
 6022     less_equal(0xd, "le");
 6023     greater(0xc, "gt");
 6024     overflow(0x6, "vs");
 6025     no_overflow(0x7, "vc");
 6026   %}
 6027 %}
 6028 
 6029 // Special operand allowing long args to int ops to be truncated for free
 6030 
 6031 operand iRegL2I(iRegL reg) %{
 6032 
 6033   op_cost(0);
 6034 
 6035   match(ConvL2I reg);
 6036 
 6037   format %{ "l2i($reg)" %}
 6038 
 6039   interface(REG_INTER)
 6040 %}
 6041 
 6042 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 6043 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 6044 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 6045 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 6046 
 6047 //----------OPERAND CLASSES----------------------------------------------------
 6048 // Operand Classes are groups of operands that are used as to simplify
 6049 // instruction definitions by not requiring the AD writer to specify
 6050 // separate instructions for every form of operand when the
 6051 // instruction accepts multiple operand types with the same basic
 6052 // encoding and format. The classic case of this is memory operands.
 6053 
 6054 // memory is used to define read/write location for load/store
 6055 // instruction defs. we can turn a memory op into an Address
 6056 
 6057 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 6058                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6059 
 6060 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 6061                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6062 
 6063 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 6064                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6065 
 6066 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 6067                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6068 
 6069 // All of the memory operands. For the pipeline description.
 6070 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 6071                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 6072                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6073 
 6074 
 6075 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 6076 // operations. it allows the src to be either an iRegI or a (ConvL2I
 6077 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 6078 // can be elided because the 32-bit instruction will just employ the
 6079 // lower 32 bits anyway.
 6080 //
 6081 // n.b. this does not elide all L2I conversions. if the truncated
 6082 // value is consumed by more than one operation then the ConvL2I
 6083 // cannot be bundled into the consuming nodes so an l2i gets planted
 6084 // (actually a movw $dst $src) and the downstream instructions consume
 6085 // the result of the l2i as an iRegI input. That's a shame since the
 6086 // movw is actually redundant but its not too costly.
 6087 
 6088 opclass iRegIorL2I(iRegI, iRegL2I);
 6089 
 6090 //----------PIPELINE-----------------------------------------------------------
 6091 // Rules which define the behavior of the target architectures pipeline.
 6092 
 6093 // For specific pipelines, eg A53, define the stages of that pipeline
 6094 //pipe_desc(ISS, EX1, EX2, WR);
 6095 #define ISS S0
 6096 #define EX1 S1
 6097 #define EX2 S2
 6098 #define WR  S3
 6099 
 6100 // Integer ALU reg operation
 6101 pipeline %{
 6102 
 6103 attributes %{
 6104   // ARM instructions are of fixed length
 6105   fixed_size_instructions;        // Fixed size instructions TODO does
 6106   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 6107   // ARM instructions come in 32-bit word units
 6108   instruction_unit_size = 4;         // An instruction is 4 bytes long
 6109   instruction_fetch_unit_size = 64;  // The processor fetches one line
 6110   instruction_fetch_units = 1;       // of 64 bytes
 6111 
 6112   // List of nop instructions
 6113   nops( MachNop );
 6114 %}
 6115 
 6116 // We don't use an actual pipeline model so don't care about resources
 6117 // or description. we do use pipeline classes to introduce fixed
 6118 // latencies
 6119 
 6120 //----------RESOURCES----------------------------------------------------------
 6121 // Resources are the functional units available to the machine
 6122 
 6123 resources( INS0, INS1, INS01 = INS0 | INS1,
 6124            ALU0, ALU1, ALU = ALU0 | ALU1,
 6125            MAC,
 6126            DIV,
 6127            BRANCH,
 6128            LDST,
 6129            NEON_FP);
 6130 
 6131 //----------PIPELINE DESCRIPTION-----------------------------------------------
 6132 // Pipeline Description specifies the stages in the machine's pipeline
 6133 
 6134 // Define the pipeline as a generic 6 stage pipeline
 6135 pipe_desc(S0, S1, S2, S3, S4, S5);
 6136 
 6137 //----------PIPELINE CLASSES---------------------------------------------------
 6138 // Pipeline Classes describe the stages in which input and output are
 6139 // referenced by the hardware pipeline.
 6140 
 6141 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 6142 %{
 6143   single_instruction;
 6144   src1   : S1(read);
 6145   src2   : S2(read);
 6146   dst    : S5(write);
 6147   INS01  : ISS;
 6148   NEON_FP : S5;
 6149 %}
 6150 
 6151 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6152 %{
 6153   single_instruction;
 6154   src1   : S1(read);
 6155   src2   : S2(read);
 6156   dst    : S5(write);
 6157   INS01  : ISS;
 6158   NEON_FP : S5;
 6159 %}
 6160 
 6161 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6162 %{
 6163   single_instruction;
 6164   src    : S1(read);
 6165   dst    : S5(write);
 6166   INS01  : ISS;
 6167   NEON_FP : S5;
 6168 %}
 6169 
 6170 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6171 %{
 6172   single_instruction;
 6173   src    : S1(read);
 6174   dst    : S5(write);
 6175   INS01  : ISS;
 6176   NEON_FP : S5;
 6177 %}
 6178 
 6179 pipe_class fp_d2f(vRegF dst, vRegD src)
 6180 %{
 6181   single_instruction;
 6182   src    : S1(read);
 6183   dst    : S5(write);
 6184   INS01  : ISS;
 6185   NEON_FP : S5;
 6186 %}
 6187 
 6188 pipe_class fp_f2d(vRegD dst, vRegF src)
 6189 %{
 6190   single_instruction;
 6191   src    : S1(read);
 6192   dst    : S5(write);
 6193   INS01  : ISS;
 6194   NEON_FP : S5;
 6195 %}
 6196 
 6197 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6198 %{
 6199   single_instruction;
 6200   src    : S1(read);
 6201   dst    : S5(write);
 6202   INS01  : ISS;
 6203   NEON_FP : S5;
 6204 %}
 6205 
 6206 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6207 %{
 6208   single_instruction;
 6209   src    : S1(read);
 6210   dst    : S5(write);
 6211   INS01  : ISS;
 6212   NEON_FP : S5;
 6213 %}
 6214 
 6215 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6216 %{
 6217   single_instruction;
 6218   src    : S1(read);
 6219   dst    : S5(write);
 6220   INS01  : ISS;
 6221   NEON_FP : S5;
 6222 %}
 6223 
 6224 pipe_class fp_l2f(vRegF dst, iRegL src)
 6225 %{
 6226   single_instruction;
 6227   src    : S1(read);
 6228   dst    : S5(write);
 6229   INS01  : ISS;
 6230   NEON_FP : S5;
 6231 %}
 6232 
 6233 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6234 %{
 6235   single_instruction;
 6236   src    : S1(read);
 6237   dst    : S5(write);
 6238   INS01  : ISS;
 6239   NEON_FP : S5;
 6240 %}
 6241 
 6242 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6243 %{
 6244   single_instruction;
 6245   src    : S1(read);
 6246   dst    : S5(write);
 6247   INS01  : ISS;
 6248   NEON_FP : S5;
 6249 %}
 6250 
 6251 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6252 %{
 6253   single_instruction;
 6254   src    : S1(read);
 6255   dst    : S5(write);
 6256   INS01  : ISS;
 6257   NEON_FP : S5;
 6258 %}
 6259 
 6260 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6261 %{
 6262   single_instruction;
 6263   src    : S1(read);
 6264   dst    : S5(write);
 6265   INS01  : ISS;
 6266   NEON_FP : S5;
 6267 %}
 6268 
 6269 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6270 %{
 6271   single_instruction;
 6272   src1   : S1(read);
 6273   src2   : S2(read);
 6274   dst    : S5(write);
 6275   INS0   : ISS;
 6276   NEON_FP : S5;
 6277 %}
 6278 
 6279 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6280 %{
 6281   single_instruction;
 6282   src1   : S1(read);
 6283   src2   : S2(read);
 6284   dst    : S5(write);
 6285   INS0   : ISS;
 6286   NEON_FP : S5;
 6287 %}
 6288 
 6289 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6290 %{
 6291   single_instruction;
 6292   cr     : S1(read);
 6293   src1   : S1(read);
 6294   src2   : S1(read);
 6295   dst    : S3(write);
 6296   INS01  : ISS;
 6297   NEON_FP : S3;
 6298 %}
 6299 
 6300 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6301 %{
 6302   single_instruction;
 6303   cr     : S1(read);
 6304   src1   : S1(read);
 6305   src2   : S1(read);
 6306   dst    : S3(write);
 6307   INS01  : ISS;
 6308   NEON_FP : S3;
 6309 %}
 6310 
 6311 pipe_class fp_imm_s(vRegF dst)
 6312 %{
 6313   single_instruction;
 6314   dst    : S3(write);
 6315   INS01  : ISS;
 6316   NEON_FP : S3;
 6317 %}
 6318 
 6319 pipe_class fp_imm_d(vRegD dst)
 6320 %{
 6321   single_instruction;
 6322   dst    : S3(write);
 6323   INS01  : ISS;
 6324   NEON_FP : S3;
 6325 %}
 6326 
 6327 pipe_class fp_load_constant_s(vRegF dst)
 6328 %{
 6329   single_instruction;
 6330   dst    : S4(write);
 6331   INS01  : ISS;
 6332   NEON_FP : S4;
 6333 %}
 6334 
 6335 pipe_class fp_load_constant_d(vRegD dst)
 6336 %{
 6337   single_instruction;
 6338   dst    : S4(write);
 6339   INS01  : ISS;
 6340   NEON_FP : S4;
 6341 %}
 6342 
 6343 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
 6344 %{
 6345   single_instruction;
 6346   dst    : S5(write);
 6347   src1   : S1(read);
 6348   src2   : S1(read);
 6349   INS01  : ISS;
 6350   NEON_FP : S5;
 6351 %}
 6352 
 6353 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
 6354 %{
 6355   single_instruction;
 6356   dst    : S5(write);
 6357   src1   : S1(read);
 6358   src2   : S1(read);
 6359   INS0   : ISS;
 6360   NEON_FP : S5;
 6361 %}
 6362 
 6363 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
 6364 %{
 6365   single_instruction;
 6366   dst    : S5(write);
 6367   src1   : S1(read);
 6368   src2   : S1(read);
 6369   dst    : S1(read);
 6370   INS01  : ISS;
 6371   NEON_FP : S5;
 6372 %}
 6373 
 6374 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
 6375 %{
 6376   single_instruction;
 6377   dst    : S5(write);
 6378   src1   : S1(read);
 6379   src2   : S1(read);
 6380   dst    : S1(read);
 6381   INS0   : ISS;
 6382   NEON_FP : S5;
 6383 %}
 6384 
 6385 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
 6386 %{
 6387   single_instruction;
 6388   dst    : S4(write);
 6389   src1   : S2(read);
 6390   src2   : S2(read);
 6391   INS01  : ISS;
 6392   NEON_FP : S4;
 6393 %}
 6394 
 6395 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
 6396 %{
 6397   single_instruction;
 6398   dst    : S4(write);
 6399   src1   : S2(read);
 6400   src2   : S2(read);
 6401   INS0   : ISS;
 6402   NEON_FP : S4;
 6403 %}
 6404 
 6405 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
 6406 %{
 6407   single_instruction;
 6408   dst    : S3(write);
 6409   src1   : S2(read);
 6410   src2   : S2(read);
 6411   INS01  : ISS;
 6412   NEON_FP : S3;
 6413 %}
 6414 
 6415 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
 6416 %{
 6417   single_instruction;
 6418   dst    : S3(write);
 6419   src1   : S2(read);
 6420   src2   : S2(read);
 6421   INS0   : ISS;
 6422   NEON_FP : S3;
 6423 %}
 6424 
 6425 pipe_class vshift64(vecD dst, vecD src, vecX shift)
 6426 %{
 6427   single_instruction;
 6428   dst    : S3(write);
 6429   src    : S1(read);
 6430   shift  : S1(read);
 6431   INS01  : ISS;
 6432   NEON_FP : S3;
 6433 %}
 6434 
 6435 pipe_class vshift128(vecX dst, vecX src, vecX shift)
 6436 %{
 6437   single_instruction;
 6438   dst    : S3(write);
 6439   src    : S1(read);
 6440   shift  : S1(read);
 6441   INS0   : ISS;
 6442   NEON_FP : S3;
 6443 %}
 6444 
 6445 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
 6446 %{
 6447   single_instruction;
 6448   dst    : S3(write);
 6449   src    : S1(read);
 6450   INS01  : ISS;
 6451   NEON_FP : S3;
 6452 %}
 6453 
 6454 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
 6455 %{
 6456   single_instruction;
 6457   dst    : S3(write);
 6458   src    : S1(read);
 6459   INS0   : ISS;
 6460   NEON_FP : S3;
 6461 %}
 6462 
 6463 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
 6464 %{
 6465   single_instruction;
 6466   dst    : S5(write);
 6467   src1   : S1(read);
 6468   src2   : S1(read);
 6469   INS01  : ISS;
 6470   NEON_FP : S5;
 6471 %}
 6472 
 6473 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
 6474 %{
 6475   single_instruction;
 6476   dst    : S5(write);
 6477   src1   : S1(read);
 6478   src2   : S1(read);
 6479   INS0   : ISS;
 6480   NEON_FP : S5;
 6481 %}
 6482 
 6483 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
 6484 %{
 6485   single_instruction;
 6486   dst    : S5(write);
 6487   src1   : S1(read);
 6488   src2   : S1(read);
 6489   INS0   : ISS;
 6490   NEON_FP : S5;
 6491 %}
 6492 
 6493 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
 6494 %{
 6495   single_instruction;
 6496   dst    : S5(write);
 6497   src1   : S1(read);
 6498   src2   : S1(read);
 6499   INS0   : ISS;
 6500   NEON_FP : S5;
 6501 %}
 6502 
 6503 pipe_class vsqrt_fp128(vecX dst, vecX src)
 6504 %{
 6505   single_instruction;
 6506   dst    : S5(write);
 6507   src    : S1(read);
 6508   INS0   : ISS;
 6509   NEON_FP : S5;
 6510 %}
 6511 
 6512 pipe_class vunop_fp64(vecD dst, vecD src)
 6513 %{
 6514   single_instruction;
 6515   dst    : S5(write);
 6516   src    : S1(read);
 6517   INS01  : ISS;
 6518   NEON_FP : S5;
 6519 %}
 6520 
 6521 pipe_class vunop_fp128(vecX dst, vecX src)
 6522 %{
 6523   single_instruction;
 6524   dst    : S5(write);
 6525   src    : S1(read);
 6526   INS0   : ISS;
 6527   NEON_FP : S5;
 6528 %}
 6529 
 6530 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
 6531 %{
 6532   single_instruction;
 6533   dst    : S3(write);
 6534   src    : S1(read);
 6535   INS01  : ISS;
 6536   NEON_FP : S3;
 6537 %}
 6538 
 6539 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
 6540 %{
 6541   single_instruction;
 6542   dst    : S3(write);
 6543   src    : S1(read);
 6544   INS01  : ISS;
 6545   NEON_FP : S3;
 6546 %}
 6547 
 6548 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
 6549 %{
 6550   single_instruction;
 6551   dst    : S3(write);
 6552   src    : S1(read);
 6553   INS01  : ISS;
 6554   NEON_FP : S3;
 6555 %}
 6556 
 6557 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
 6558 %{
 6559   single_instruction;
 6560   dst    : S3(write);
 6561   src    : S1(read);
 6562   INS01  : ISS;
 6563   NEON_FP : S3;
 6564 %}
 6565 
 6566 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
 6567 %{
 6568   single_instruction;
 6569   dst    : S3(write);
 6570   src    : S1(read);
 6571   INS01  : ISS;
 6572   NEON_FP : S3;
 6573 %}
 6574 
 6575 pipe_class vmovi_reg_imm64(vecD dst)
 6576 %{
 6577   single_instruction;
 6578   dst    : S3(write);
 6579   INS01  : ISS;
 6580   NEON_FP : S3;
 6581 %}
 6582 
 6583 pipe_class vmovi_reg_imm128(vecX dst)
 6584 %{
 6585   single_instruction;
 6586   dst    : S3(write);
 6587   INS0   : ISS;
 6588   NEON_FP : S3;
 6589 %}
 6590 
 6591 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
 6592 %{
 6593   single_instruction;
 6594   dst    : S5(write);
 6595   mem    : ISS(read);
 6596   INS01  : ISS;
 6597   NEON_FP : S3;
 6598 %}
 6599 
 6600 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
 6601 %{
 6602   single_instruction;
 6603   dst    : S5(write);
 6604   mem    : ISS(read);
 6605   INS01  : ISS;
 6606   NEON_FP : S3;
 6607 %}
 6608 
 6609 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
 6610 %{
 6611   single_instruction;
 6612   mem    : ISS(read);
 6613   src    : S2(read);
 6614   INS01  : ISS;
 6615   NEON_FP : S3;
 6616 %}
 6617 
 6618 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
 6619 %{
 6620   single_instruction;
 6621   mem    : ISS(read);
 6622   src    : S2(read);
 6623   INS01  : ISS;
 6624   NEON_FP : S3;
 6625 %}
 6626 
 6627 //------- Integer ALU operations --------------------------
 6628 
 6629 // Integer ALU reg-reg operation
 6630 // Operands needed in EX1, result generated in EX2
 6631 // Eg.  ADD     x0, x1, x2
 6632 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6633 %{
 6634   single_instruction;
 6635   dst    : EX2(write);
 6636   src1   : EX1(read);
 6637   src2   : EX1(read);
 6638   INS01  : ISS; // Dual issue as instruction 0 or 1
 6639   ALU    : EX2;
 6640 %}
 6641 
 6642 // Integer ALU reg-reg operation with constant shift
 6643 // Shifted register must be available in LATE_ISS instead of EX1
 6644 // Eg.  ADD     x0, x1, x2, LSL #2
 6645 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6646 %{
 6647   single_instruction;
 6648   dst    : EX2(write);
 6649   src1   : EX1(read);
 6650   src2   : ISS(read);
 6651   INS01  : ISS;
 6652   ALU    : EX2;
 6653 %}
 6654 
 6655 // Integer ALU reg operation with constant shift
 6656 // Eg.  LSL     x0, x1, #shift
 6657 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6658 %{
 6659   single_instruction;
 6660   dst    : EX2(write);
 6661   src1   : ISS(read);
 6662   INS01  : ISS;
 6663   ALU    : EX2;
 6664 %}
 6665 
 6666 // Integer ALU reg-reg operation with variable shift
 6667 // Both operands must be available in LATE_ISS instead of EX1
 6668 // Result is available in EX1 instead of EX2
 6669 // Eg.  LSLV    x0, x1, x2
 6670 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6671 %{
 6672   single_instruction;
 6673   dst    : EX1(write);
 6674   src1   : ISS(read);
 6675   src2   : ISS(read);
 6676   INS01  : ISS;
 6677   ALU    : EX1;
 6678 %}
 6679 
 6680 // Integer ALU reg-reg operation with extract
 6681 // As for _vshift above, but result generated in EX2
 6682 // Eg.  EXTR    x0, x1, x2, #N
 6683 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6684 %{
 6685   single_instruction;
 6686   dst    : EX2(write);
 6687   src1   : ISS(read);
 6688   src2   : ISS(read);
 6689   INS1   : ISS; // Can only dual issue as Instruction 1
 6690   ALU    : EX1;
 6691 %}
 6692 
 6693 // Integer ALU reg operation
 6694 // Eg.  NEG     x0, x1
 6695 pipe_class ialu_reg(iRegI dst, iRegI src)
 6696 %{
 6697   single_instruction;
 6698   dst    : EX2(write);
 6699   src    : EX1(read);
 6700   INS01  : ISS;
 6701   ALU    : EX2;
 6702 %}
 6703 
 6704 // Integer ALU reg mmediate operation
 6705 // Eg.  ADD     x0, x1, #N
 6706 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6707 %{
 6708   single_instruction;
 6709   dst    : EX2(write);
 6710   src1   : EX1(read);
 6711   INS01  : ISS;
 6712   ALU    : EX2;
 6713 %}
 6714 
 6715 // Integer ALU immediate operation (no source operands)
 6716 // Eg.  MOV     x0, #N
 6717 pipe_class ialu_imm(iRegI dst)
 6718 %{
 6719   single_instruction;
 6720   dst    : EX1(write);
 6721   INS01  : ISS;
 6722   ALU    : EX1;
 6723 %}
 6724 
 6725 //------- Compare operation -------------------------------
 6726 
 6727 // Compare reg-reg
 6728 // Eg.  CMP     x0, x1
 6729 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6730 %{
 6731   single_instruction;
 6732 //  fixed_latency(16);
 6733   cr     : EX2(write);
 6734   op1    : EX1(read);
 6735   op2    : EX1(read);
 6736   INS01  : ISS;
 6737   ALU    : EX2;
 6738 %}
 6739 
 6740 // Compare reg-reg
 6741 // Eg.  CMP     x0, #N
 6742 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6743 %{
 6744   single_instruction;
 6745 //  fixed_latency(16);
 6746   cr     : EX2(write);
 6747   op1    : EX1(read);
 6748   INS01  : ISS;
 6749   ALU    : EX2;
 6750 %}
 6751 
 6752 //------- Conditional instructions ------------------------
 6753 
 6754 // Conditional no operands
 6755 // Eg.  CSINC   x0, zr, zr, <cond>
 6756 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6757 %{
 6758   single_instruction;
 6759   cr     : EX1(read);
 6760   dst    : EX2(write);
 6761   INS01  : ISS;
 6762   ALU    : EX2;
 6763 %}
 6764 
 6765 // Conditional 2 operand
 6766 // EG.  CSEL    X0, X1, X2, <cond>
 6767 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6768 %{
 6769   single_instruction;
 6770   cr     : EX1(read);
 6771   src1   : EX1(read);
 6772   src2   : EX1(read);
 6773   dst    : EX2(write);
 6774   INS01  : ISS;
 6775   ALU    : EX2;
 6776 %}
 6777 
 6778 // Conditional 2 operand
 6779 // EG.  CSEL    X0, X1, X2, <cond>
 6780 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6781 %{
 6782   single_instruction;
 6783   cr     : EX1(read);
 6784   src    : EX1(read);
 6785   dst    : EX2(write);
 6786   INS01  : ISS;
 6787   ALU    : EX2;
 6788 %}
 6789 
 6790 //------- Multiply pipeline operations --------------------
 6791 
 6792 // Multiply reg-reg
 6793 // Eg.  MUL     w0, w1, w2
 6794 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6795 %{
 6796   single_instruction;
 6797   dst    : WR(write);
 6798   src1   : ISS(read);
 6799   src2   : ISS(read);
 6800   INS01  : ISS;
 6801   MAC    : WR;
 6802 %}
 6803 
 6804 // Multiply accumulate
 6805 // Eg.  MADD    w0, w1, w2, w3
 6806 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6807 %{
 6808   single_instruction;
 6809   dst    : WR(write);
 6810   src1   : ISS(read);
 6811   src2   : ISS(read);
 6812   src3   : ISS(read);
 6813   INS01  : ISS;
 6814   MAC    : WR;
 6815 %}
 6816 
 6817 // Eg.  MUL     w0, w1, w2
 6818 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6819 %{
 6820   single_instruction;
 6821   fixed_latency(3); // Maximum latency for 64 bit mul
 6822   dst    : WR(write);
 6823   src1   : ISS(read);
 6824   src2   : ISS(read);
 6825   INS01  : ISS;
 6826   MAC    : WR;
 6827 %}
 6828 
 6829 // Multiply accumulate
 6830 // Eg.  MADD    w0, w1, w2, w3
 6831 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6832 %{
 6833   single_instruction;
 6834   fixed_latency(3); // Maximum latency for 64 bit mul
 6835   dst    : WR(write);
 6836   src1   : ISS(read);
 6837   src2   : ISS(read);
 6838   src3   : ISS(read);
 6839   INS01  : ISS;
 6840   MAC    : WR;
 6841 %}
 6842 
 6843 //------- Divide pipeline operations --------------------
 6844 
 6845 // Eg.  SDIV    w0, w1, w2
 6846 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6847 %{
 6848   single_instruction;
 6849   fixed_latency(8); // Maximum latency for 32 bit divide
 6850   dst    : WR(write);
 6851   src1   : ISS(read);
 6852   src2   : ISS(read);
 6853   INS0   : ISS; // Can only dual issue as instruction 0
 6854   DIV    : WR;
 6855 %}
 6856 
 6857 // Eg.  SDIV    x0, x1, x2
 6858 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6859 %{
 6860   single_instruction;
 6861   fixed_latency(16); // Maximum latency for 64 bit divide
 6862   dst    : WR(write);
 6863   src1   : ISS(read);
 6864   src2   : ISS(read);
 6865   INS0   : ISS; // Can only dual issue as instruction 0
 6866   DIV    : WR;
 6867 %}
 6868 
 6869 //------- Load pipeline operations ------------------------
 6870 
 6871 // Load - prefetch
 6872 // Eg.  PFRM    <mem>
 6873 pipe_class iload_prefetch(memory mem)
 6874 %{
 6875   single_instruction;
 6876   mem    : ISS(read);
 6877   INS01  : ISS;
 6878   LDST   : WR;
 6879 %}
 6880 
 6881 // Load - reg, mem
 6882 // Eg.  LDR     x0, <mem>
 6883 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6884 %{
 6885   single_instruction;
 6886   dst    : WR(write);
 6887   mem    : ISS(read);
 6888   INS01  : ISS;
 6889   LDST   : WR;
 6890 %}
 6891 
 6892 // Load - reg, reg
 6893 // Eg.  LDR     x0, [sp, x1]
 6894 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6895 %{
 6896   single_instruction;
 6897   dst    : WR(write);
 6898   src    : ISS(read);
 6899   INS01  : ISS;
 6900   LDST   : WR;
 6901 %}
 6902 
 6903 //------- Store pipeline operations -----------------------
 6904 
 6905 // Store - zr, mem
 6906 // Eg.  STR     zr, <mem>
 6907 pipe_class istore_mem(memory mem)
 6908 %{
 6909   single_instruction;
 6910   mem    : ISS(read);
 6911   INS01  : ISS;
 6912   LDST   : WR;
 6913 %}
 6914 
 6915 // Store - reg, mem
 6916 // Eg.  STR     x0, <mem>
 6917 pipe_class istore_reg_mem(iRegI src, memory mem)
 6918 %{
 6919   single_instruction;
 6920   mem    : ISS(read);
 6921   src    : EX2(read);
 6922   INS01  : ISS;
 6923   LDST   : WR;
 6924 %}
 6925 
 6926 // Store - reg, reg
 6927 // Eg. STR      x0, [sp, x1]
 6928 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6929 %{
 6930   single_instruction;
 6931   dst    : ISS(read);
 6932   src    : EX2(read);
 6933   INS01  : ISS;
 6934   LDST   : WR;
 6935 %}
 6936 
 6937 //------- Store pipeline operations -----------------------
 6938 
 6939 // Branch
 6940 pipe_class pipe_branch()
 6941 %{
 6942   single_instruction;
 6943   INS01  : ISS;
 6944   BRANCH : EX1;
 6945 %}
 6946 
 6947 // Conditional branch
 6948 pipe_class pipe_branch_cond(rFlagsReg cr)
 6949 %{
 6950   single_instruction;
 6951   cr     : EX1(read);
 6952   INS01  : ISS;
 6953   BRANCH : EX1;
 6954 %}
 6955 
 6956 // Compare & Branch
 6957 // EG.  CBZ/CBNZ
 6958 pipe_class pipe_cmp_branch(iRegI op1)
 6959 %{
 6960   single_instruction;
 6961   op1    : EX1(read);
 6962   INS01  : ISS;
 6963   BRANCH : EX1;
 6964 %}
 6965 
 6966 //------- Synchronisation operations ----------------------
 6967 
 6968 // Any operation requiring serialization.
 6969 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6970 pipe_class pipe_serial()
 6971 %{
 6972   single_instruction;
 6973   force_serialization;
 6974   fixed_latency(16);
 6975   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6976   LDST   : WR;
 6977 %}
 6978 
 6979 // Generic big/slow expanded idiom - also serialized
 6980 pipe_class pipe_slow()
 6981 %{
 6982   instruction_count(10);
 6983   multiple_bundles;
 6984   force_serialization;
 6985   fixed_latency(16);
 6986   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6987   LDST   : WR;
 6988 %}
 6989 
 6990 // Empty pipeline class
 6991 pipe_class pipe_class_empty()
 6992 %{
 6993   single_instruction;
 6994   fixed_latency(0);
 6995 %}
 6996 
 6997 // Default pipeline class.
 6998 pipe_class pipe_class_default()
 6999 %{
 7000   single_instruction;
 7001   fixed_latency(2);
 7002 %}
 7003 
 7004 // Pipeline class for compares.
 7005 pipe_class pipe_class_compare()
 7006 %{
 7007   single_instruction;
 7008   fixed_latency(16);
 7009 %}
 7010 
 7011 // Pipeline class for memory operations.
 7012 pipe_class pipe_class_memory()
 7013 %{
 7014   single_instruction;
 7015   fixed_latency(16);
 7016 %}
 7017 
 7018 // Pipeline class for call.
 7019 pipe_class pipe_class_call()
 7020 %{
 7021   single_instruction;
 7022   fixed_latency(100);
 7023 %}
 7024 
 7025 // Define the class for the Nop node.
 7026 define %{
 7027    MachNop = pipe_class_empty;
 7028 %}
 7029 
 7030 %}
 7031 //----------INSTRUCTIONS-------------------------------------------------------
 7032 //
 7033 // match      -- States which machine-independent subtree may be replaced
 7034 //               by this instruction.
 7035 // ins_cost   -- The estimated cost of this instruction is used by instruction
 7036 //               selection to identify a minimum cost tree of machine
 7037 //               instructions that matches a tree of machine-independent
 7038 //               instructions.
 7039 // format     -- A string providing the disassembly for this instruction.
 7040 //               The value of an instruction's operand may be inserted
 7041 //               by referring to it with a '$' prefix.
 7042 // opcode     -- Three instruction opcodes may be provided.  These are referred
 7043 //               to within an encode class as $primary, $secondary, and $tertiary
 7044 //               rrspectively.  The primary opcode is commonly used to
 7045 //               indicate the type of machine instruction, while secondary
 7046 //               and tertiary are often used for prefix options or addressing
 7047 //               modes.
 7048 // ins_encode -- A list of encode classes with parameters. The encode class
 7049 //               name must have been defined in an 'enc_class' specification
 7050 //               in the encode section of the architecture description.
 7051 
 7052 // ============================================================================
 7053 // Memory (Load/Store) Instructions
 7054 
 7055 // Load Instructions
 7056 
 7057 // Load Byte (8 bit signed)
 7058 instruct loadB(iRegINoSp dst, memory1 mem)
 7059 %{
 7060   match(Set dst (LoadB mem));
 7061   predicate(!needs_acquiring_load(n));
 7062 
 7063   ins_cost(4 * INSN_COST);
 7064   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 7065 
 7066   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 7067 
 7068   ins_pipe(iload_reg_mem);
 7069 %}
 7070 
 7071 // Load Byte (8 bit signed) into long
 7072 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 7073 %{
 7074   match(Set dst (ConvI2L (LoadB mem)));
 7075   predicate(!needs_acquiring_load(n->in(1)));
 7076 
 7077   ins_cost(4 * INSN_COST);
 7078   format %{ "ldrsb  $dst, $mem\t# byte" %}
 7079 
 7080   ins_encode(aarch64_enc_ldrsb(dst, mem));
 7081 
 7082   ins_pipe(iload_reg_mem);
 7083 %}
 7084 
 7085 // Load Byte (8 bit unsigned)
 7086 instruct loadUB(iRegINoSp dst, memory1 mem)
 7087 %{
 7088   match(Set dst (LoadUB mem));
 7089   predicate(!needs_acquiring_load(n));
 7090 
 7091   ins_cost(4 * INSN_COST);
 7092   format %{ "ldrbw  $dst, $mem\t# byte" %}
 7093 
 7094   ins_encode(aarch64_enc_ldrb(dst, mem));
 7095 
 7096   ins_pipe(iload_reg_mem);
 7097 %}
 7098 
 7099 // Load Byte (8 bit unsigned) into long
 7100 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 7101 %{
 7102   match(Set dst (ConvI2L (LoadUB mem)));
 7103   predicate(!needs_acquiring_load(n->in(1)));
 7104 
 7105   ins_cost(4 * INSN_COST);
 7106   format %{ "ldrb  $dst, $mem\t# byte" %}
 7107 
 7108   ins_encode(aarch64_enc_ldrb(dst, mem));
 7109 
 7110   ins_pipe(iload_reg_mem);
 7111 %}
 7112 
 7113 // Load Short (16 bit signed)
 7114 instruct loadS(iRegINoSp dst, memory2 mem)
 7115 %{
 7116   match(Set dst (LoadS mem));
 7117   predicate(!needs_acquiring_load(n));
 7118 
 7119   ins_cost(4 * INSN_COST);
 7120   format %{ "ldrshw  $dst, $mem\t# short" %}
 7121 
 7122   ins_encode(aarch64_enc_ldrshw(dst, mem));
 7123 
 7124   ins_pipe(iload_reg_mem);
 7125 %}
 7126 
 7127 // Load Short (16 bit signed) into long
 7128 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 7129 %{
 7130   match(Set dst (ConvI2L (LoadS mem)));
 7131   predicate(!needs_acquiring_load(n->in(1)));
 7132 
 7133   ins_cost(4 * INSN_COST);
 7134   format %{ "ldrsh  $dst, $mem\t# short" %}
 7135 
 7136   ins_encode(aarch64_enc_ldrsh(dst, mem));
 7137 
 7138   ins_pipe(iload_reg_mem);
 7139 %}
 7140 
 7141 // Load Char (16 bit unsigned)
 7142 instruct loadUS(iRegINoSp dst, memory2 mem)
 7143 %{
 7144   match(Set dst (LoadUS mem));
 7145   predicate(!needs_acquiring_load(n));
 7146 
 7147   ins_cost(4 * INSN_COST);
 7148   format %{ "ldrh  $dst, $mem\t# short" %}
 7149 
 7150   ins_encode(aarch64_enc_ldrh(dst, mem));
 7151 
 7152   ins_pipe(iload_reg_mem);
 7153 %}
 7154 
 7155 // Load Short/Char (16 bit unsigned) into long
 7156 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 7157 %{
 7158   match(Set dst (ConvI2L (LoadUS mem)));
 7159   predicate(!needs_acquiring_load(n->in(1)));
 7160 
 7161   ins_cost(4 * INSN_COST);
 7162   format %{ "ldrh  $dst, $mem\t# short" %}
 7163 
 7164   ins_encode(aarch64_enc_ldrh(dst, mem));
 7165 
 7166   ins_pipe(iload_reg_mem);
 7167 %}
 7168 
 7169 // Load Integer (32 bit signed)
 7170 instruct loadI(iRegINoSp dst, memory4 mem)
 7171 %{
 7172   match(Set dst (LoadI mem));
 7173   predicate(!needs_acquiring_load(n));
 7174 
 7175   ins_cost(4 * INSN_COST);
 7176   format %{ "ldrw  $dst, $mem\t# int" %}
 7177 
 7178   ins_encode(aarch64_enc_ldrw(dst, mem));
 7179 
 7180   ins_pipe(iload_reg_mem);
 7181 %}
 7182 
 7183 // Load Integer (32 bit signed) into long
 7184 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 7185 %{
 7186   match(Set dst (ConvI2L (LoadI mem)));
 7187   predicate(!needs_acquiring_load(n->in(1)));
 7188 
 7189   ins_cost(4 * INSN_COST);
 7190   format %{ "ldrsw  $dst, $mem\t# int" %}
 7191 
 7192   ins_encode(aarch64_enc_ldrsw(dst, mem));
 7193 
 7194   ins_pipe(iload_reg_mem);
 7195 %}
 7196 
 7197 // Load Integer (32 bit unsigned) into long
 7198 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 7199 %{
 7200   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7201   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 7202 
 7203   ins_cost(4 * INSN_COST);
 7204   format %{ "ldrw  $dst, $mem\t# int" %}
 7205 
 7206   ins_encode(aarch64_enc_ldrw(dst, mem));
 7207 
 7208   ins_pipe(iload_reg_mem);
 7209 %}
 7210 
 7211 // Load Long (64 bit signed)
 7212 instruct loadL(iRegLNoSp dst, memory8 mem)
 7213 %{
 7214   match(Set dst (LoadL mem));
 7215   predicate(!needs_acquiring_load(n));
 7216 
 7217   ins_cost(4 * INSN_COST);
 7218   format %{ "ldr  $dst, $mem\t# int" %}
 7219 
 7220   ins_encode(aarch64_enc_ldr(dst, mem));
 7221 
 7222   ins_pipe(iload_reg_mem);
 7223 %}
 7224 
 7225 // Load Range
 7226 instruct loadRange(iRegINoSp dst, memory4 mem)
 7227 %{
 7228   match(Set dst (LoadRange mem));
 7229 
 7230   ins_cost(4 * INSN_COST);
 7231   format %{ "ldrw  $dst, $mem\t# range" %}
 7232 
 7233   ins_encode(aarch64_enc_ldrw(dst, mem));
 7234 
 7235   ins_pipe(iload_reg_mem);
 7236 %}
 7237 
 7238 // Load Pointer
 7239 instruct loadP(iRegPNoSp dst, memory8 mem)
 7240 %{
 7241   match(Set dst (LoadP mem));
 7242   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 7243 
 7244   ins_cost(4 * INSN_COST);
 7245   format %{ "ldr  $dst, $mem\t# ptr" %}
 7246 
 7247   ins_encode(aarch64_enc_ldr(dst, mem));
 7248 
 7249   ins_pipe(iload_reg_mem);
 7250 %}
 7251 
 7252 // Load Compressed Pointer
 7253 instruct loadN(iRegNNoSp dst, memory4 mem)
 7254 %{
 7255   match(Set dst (LoadN mem));
 7256   predicate(!needs_acquiring_load(n));
 7257 
 7258   ins_cost(4 * INSN_COST);
 7259   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 7260 
 7261   ins_encode(aarch64_enc_ldrw(dst, mem));
 7262 
 7263   ins_pipe(iload_reg_mem);
 7264 %}
 7265 
 7266 // Load Klass Pointer
 7267 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7268 %{
 7269   match(Set dst (LoadKlass mem));
 7270   predicate(!needs_acquiring_load(n));
 7271 
 7272   ins_cost(4 * INSN_COST);
 7273   format %{ "ldr  $dst, $mem\t# class" %}
 7274 
 7275   ins_encode(aarch64_enc_ldr(dst, mem));
 7276 
 7277   ins_pipe(iload_reg_mem);
 7278 %}
 7279 
 7280 // Load Narrow Klass Pointer
 7281 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7282 %{
 7283   match(Set dst (LoadNKlass mem));
 7284   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
 7285 
 7286   ins_cost(4 * INSN_COST);
 7287   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7288 
 7289   ins_encode(aarch64_enc_ldrw(dst, mem));
 7290 
 7291   ins_pipe(iload_reg_mem);
 7292 %}
 7293 
 7294 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem, rFlagsReg cr)
 7295 %{
 7296   match(Set dst (LoadNKlass mem));
 7297   effect(KILL cr);
 7298   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
 7299 
 7300   ins_cost(4 * INSN_COST);
 7301   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7302   ins_encode %{
 7303     __ load_nklass_compact($dst$$Register, $mem$$base$$Register, $mem$$index$$Register, $mem$$scale, $mem$$disp);
 7304   %}
 7305   ins_pipe(pipe_slow);
 7306 %}
 7307 
 7308 // Load Float
 7309 instruct loadF(vRegF dst, memory4 mem)
 7310 %{
 7311   match(Set dst (LoadF mem));
 7312   predicate(!needs_acquiring_load(n));
 7313 
 7314   ins_cost(4 * INSN_COST);
 7315   format %{ "ldrs  $dst, $mem\t# float" %}
 7316 
 7317   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7318 
 7319   ins_pipe(pipe_class_memory);
 7320 %}
 7321 
 7322 // Load Double
 7323 instruct loadD(vRegD dst, memory8 mem)
 7324 %{
 7325   match(Set dst (LoadD mem));
 7326   predicate(!needs_acquiring_load(n));
 7327 
 7328   ins_cost(4 * INSN_COST);
 7329   format %{ "ldrd  $dst, $mem\t# double" %}
 7330 
 7331   ins_encode( aarch64_enc_ldrd(dst, mem) );
 7332 
 7333   ins_pipe(pipe_class_memory);
 7334 %}
 7335 
 7336 
 7337 // Load Int Constant
 7338 instruct loadConI(iRegINoSp dst, immI src)
 7339 %{
 7340   match(Set dst src);
 7341 
 7342   ins_cost(INSN_COST);
 7343   format %{ "mov $dst, $src\t# int" %}
 7344 
 7345   ins_encode( aarch64_enc_movw_imm(dst, src) );
 7346 
 7347   ins_pipe(ialu_imm);
 7348 %}
 7349 
 7350 // Load Long Constant
 7351 instruct loadConL(iRegLNoSp dst, immL src)
 7352 %{
 7353   match(Set dst src);
 7354 
 7355   ins_cost(INSN_COST);
 7356   format %{ "mov $dst, $src\t# long" %}
 7357 
 7358   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7359 
 7360   ins_pipe(ialu_imm);
 7361 %}
 7362 
 7363 // Load Pointer Constant
 7364 
 7365 instruct loadConP(iRegPNoSp dst, immP con)
 7366 %{
 7367   match(Set dst con);
 7368 
 7369   ins_cost(INSN_COST * 4);
 7370   format %{
 7371     "mov  $dst, $con\t# ptr\n\t"
 7372   %}
 7373 
 7374   ins_encode(aarch64_enc_mov_p(dst, con));
 7375 
 7376   ins_pipe(ialu_imm);
 7377 %}
 7378 
 7379 // Load Null Pointer Constant
 7380 
 7381 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7382 %{
 7383   match(Set dst con);
 7384 
 7385   ins_cost(INSN_COST);
 7386   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7387 
 7388   ins_encode(aarch64_enc_mov_p0(dst, con));
 7389 
 7390   ins_pipe(ialu_imm);
 7391 %}
 7392 
 7393 // Load Pointer Constant One
 7394 
 7395 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 7396 %{
 7397   match(Set dst con);
 7398 
 7399   ins_cost(INSN_COST);
 7400   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7401 
 7402   ins_encode(aarch64_enc_mov_p1(dst, con));
 7403 
 7404   ins_pipe(ialu_imm);
 7405 %}
 7406 
 7407 // Load Byte Map Base Constant
 7408 
 7409 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 7410 %{
 7411   match(Set dst con);
 7412 
 7413   ins_cost(INSN_COST);
 7414   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 7415 
 7416   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 7417 
 7418   ins_pipe(ialu_imm);
 7419 %}
 7420 
 7421 // Load Narrow Pointer Constant
 7422 
 7423 instruct loadConN(iRegNNoSp dst, immN con)
 7424 %{
 7425   match(Set dst con);
 7426 
 7427   ins_cost(INSN_COST * 4);
 7428   format %{ "mov  $dst, $con\t# compressed ptr" %}
 7429 
 7430   ins_encode(aarch64_enc_mov_n(dst, con));
 7431 
 7432   ins_pipe(ialu_imm);
 7433 %}
 7434 
 7435 // Load Narrow Null Pointer Constant
 7436 
 7437 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7438 %{
 7439   match(Set dst con);
 7440 
 7441   ins_cost(INSN_COST);
 7442   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
 7443 
 7444   ins_encode(aarch64_enc_mov_n0(dst, con));
 7445 
 7446   ins_pipe(ialu_imm);
 7447 %}
 7448 
 7449 // Load Narrow Klass Constant
 7450 
 7451 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7452 %{
 7453   match(Set dst con);
 7454 
 7455   ins_cost(INSN_COST);
 7456   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7457 
 7458   ins_encode(aarch64_enc_mov_nk(dst, con));
 7459 
 7460   ins_pipe(ialu_imm);
 7461 %}
 7462 
 7463 // Load Packed Float Constant
 7464 
 7465 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7466   match(Set dst con);
 7467   ins_cost(INSN_COST * 4);
 7468   format %{ "fmovs  $dst, $con"%}
 7469   ins_encode %{
 7470     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7471   %}
 7472 
 7473   ins_pipe(fp_imm_s);
 7474 %}
 7475 
 7476 // Load Float Constant
 7477 
 7478 instruct loadConF(vRegF dst, immF con) %{
 7479   match(Set dst con);
 7480 
 7481   ins_cost(INSN_COST * 4);
 7482 
 7483   format %{
 7484     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7485   %}
 7486 
 7487   ins_encode %{
 7488     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7489   %}
 7490 
 7491   ins_pipe(fp_load_constant_s);
 7492 %}
 7493 
 7494 // Load Packed Double Constant
 7495 
 7496 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7497   match(Set dst con);
 7498   ins_cost(INSN_COST);
 7499   format %{ "fmovd  $dst, $con"%}
 7500   ins_encode %{
 7501     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7502   %}
 7503 
 7504   ins_pipe(fp_imm_d);
 7505 %}
 7506 
 7507 // Load Double Constant
 7508 
 7509 instruct loadConD(vRegD dst, immD con) %{
 7510   match(Set dst con);
 7511 
 7512   ins_cost(INSN_COST * 5);
 7513   format %{
 7514     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7515   %}
 7516 
 7517   ins_encode %{
 7518     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7519   %}
 7520 
 7521   ins_pipe(fp_load_constant_d);
 7522 %}
 7523 
 7524 // Store Instructions
 7525 
 7526 // Store CMS card-mark Immediate
 7527 instruct storeimmCM0(immI0 zero, memory1 mem)
 7528 %{
 7529   match(Set mem (StoreCM mem zero));
 7530 
 7531   ins_cost(INSN_COST);
 7532   format %{ "storestore (elided)\n\t"
 7533             "strb zr, $mem\t# byte" %}
 7534 
 7535   ins_encode(aarch64_enc_strb0(mem));
 7536 
 7537   ins_pipe(istore_mem);
 7538 %}
 7539 
 7540 // Store CMS card-mark Immediate with intervening StoreStore
 7541 // needed when using CMS with no conditional card marking
 7542 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
 7543 %{
 7544   match(Set mem (StoreCM mem zero));
 7545 
 7546   ins_cost(INSN_COST * 2);
 7547   format %{ "storestore\n\t"
 7548             "dmb ishst"
 7549             "\n\tstrb zr, $mem\t# byte" %}
 7550 
 7551   ins_encode(aarch64_enc_strb0_ordered(mem));
 7552 
 7553   ins_pipe(istore_mem);
 7554 %}
 7555 
 7556 // Store Byte
 7557 instruct storeB(iRegIorL2I src, memory1 mem)
 7558 %{
 7559   match(Set mem (StoreB mem src));
 7560   predicate(!needs_releasing_store(n));
 7561 
 7562   ins_cost(INSN_COST);
 7563   format %{ "strb  $src, $mem\t# byte" %}
 7564 
 7565   ins_encode(aarch64_enc_strb(src, mem));
 7566 
 7567   ins_pipe(istore_reg_mem);
 7568 %}
 7569 
 7570 
 7571 instruct storeimmB0(immI0 zero, memory1 mem)
 7572 %{
 7573   match(Set mem (StoreB mem zero));
 7574   predicate(!needs_releasing_store(n));
 7575 
 7576   ins_cost(INSN_COST);
 7577   format %{ "strb rscractch2, $mem\t# byte" %}
 7578 
 7579   ins_encode(aarch64_enc_strb0(mem));
 7580 
 7581   ins_pipe(istore_mem);
 7582 %}
 7583 
 7584 // Store Char/Short
 7585 instruct storeC(iRegIorL2I src, memory2 mem)
 7586 %{
 7587   match(Set mem (StoreC mem src));
 7588   predicate(!needs_releasing_store(n));
 7589 
 7590   ins_cost(INSN_COST);
 7591   format %{ "strh  $src, $mem\t# short" %}
 7592 
 7593   ins_encode(aarch64_enc_strh(src, mem));
 7594 
 7595   ins_pipe(istore_reg_mem);
 7596 %}
 7597 
 7598 instruct storeimmC0(immI0 zero, memory2 mem)
 7599 %{
 7600   match(Set mem (StoreC mem zero));
 7601   predicate(!needs_releasing_store(n));
 7602 
 7603   ins_cost(INSN_COST);
 7604   format %{ "strh  zr, $mem\t# short" %}
 7605 
 7606   ins_encode(aarch64_enc_strh0(mem));
 7607 
 7608   ins_pipe(istore_mem);
 7609 %}
 7610 
 7611 // Store Integer
 7612 
 7613 instruct storeI(iRegIorL2I src, memory4 mem)
 7614 %{
 7615   match(Set mem(StoreI mem src));
 7616   predicate(!needs_releasing_store(n));
 7617 
 7618   ins_cost(INSN_COST);
 7619   format %{ "strw  $src, $mem\t# int" %}
 7620 
 7621   ins_encode(aarch64_enc_strw(src, mem));
 7622 
 7623   ins_pipe(istore_reg_mem);
 7624 %}
 7625 
 7626 instruct storeimmI0(immI0 zero, memory4 mem)
 7627 %{
 7628   match(Set mem(StoreI mem zero));
 7629   predicate(!needs_releasing_store(n));
 7630 
 7631   ins_cost(INSN_COST);
 7632   format %{ "strw  zr, $mem\t# int" %}
 7633 
 7634   ins_encode(aarch64_enc_strw0(mem));
 7635 
 7636   ins_pipe(istore_mem);
 7637 %}
 7638 
 7639 // Store Long (64 bit signed)
 7640 instruct storeL(iRegL src, memory8 mem)
 7641 %{
 7642   match(Set mem (StoreL mem src));
 7643   predicate(!needs_releasing_store(n));
 7644 
 7645   ins_cost(INSN_COST);
 7646   format %{ "str  $src, $mem\t# int" %}
 7647 
 7648   ins_encode(aarch64_enc_str(src, mem));
 7649 
 7650   ins_pipe(istore_reg_mem);
 7651 %}
 7652 
 7653 // Store Long (64 bit signed)
 7654 instruct storeimmL0(immL0 zero, memory8 mem)
 7655 %{
 7656   match(Set mem (StoreL mem zero));
 7657   predicate(!needs_releasing_store(n));
 7658 
 7659   ins_cost(INSN_COST);
 7660   format %{ "str  zr, $mem\t# int" %}
 7661 
 7662   ins_encode(aarch64_enc_str0(mem));
 7663 
 7664   ins_pipe(istore_mem);
 7665 %}
 7666 
 7667 // Store Pointer
 7668 instruct storeP(iRegP src, memory8 mem)
 7669 %{
 7670   match(Set mem (StoreP mem src));
 7671   predicate(!needs_releasing_store(n));
 7672 
 7673   ins_cost(INSN_COST);
 7674   format %{ "str  $src, $mem\t# ptr" %}
 7675 
 7676   ins_encode(aarch64_enc_str(src, mem));
 7677 
 7678   ins_pipe(istore_reg_mem);
 7679 %}
 7680 
 7681 // Store Pointer
 7682 instruct storeimmP0(immP0 zero, memory8 mem)
 7683 %{
 7684   match(Set mem (StoreP mem zero));
 7685   predicate(!needs_releasing_store(n));
 7686 
 7687   ins_cost(INSN_COST);
 7688   format %{ "str zr, $mem\t# ptr" %}
 7689 
 7690   ins_encode(aarch64_enc_str0(mem));
 7691 
 7692   ins_pipe(istore_mem);
 7693 %}
 7694 
 7695 // Store Compressed Pointer
 7696 instruct storeN(iRegN src, memory4 mem)
 7697 %{
 7698   match(Set mem (StoreN mem src));
 7699   predicate(!needs_releasing_store(n));
 7700 
 7701   ins_cost(INSN_COST);
 7702   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7703 
 7704   ins_encode(aarch64_enc_strw(src, mem));
 7705 
 7706   ins_pipe(istore_reg_mem);
 7707 %}
 7708 
 7709 instruct storeImmN0(immN0 zero, memory4 mem)
 7710 %{
 7711   match(Set mem (StoreN mem zero));
 7712   predicate(!needs_releasing_store(n));
 7713 
 7714   ins_cost(INSN_COST);
 7715   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7716 
 7717   ins_encode(aarch64_enc_strw0(mem));
 7718 
 7719   ins_pipe(istore_mem);
 7720 %}
 7721 
 7722 // Store Float
 7723 instruct storeF(vRegF src, memory4 mem)
 7724 %{
 7725   match(Set mem (StoreF mem src));
 7726   predicate(!needs_releasing_store(n));
 7727 
 7728   ins_cost(INSN_COST);
 7729   format %{ "strs  $src, $mem\t# float" %}
 7730 
 7731   ins_encode( aarch64_enc_strs(src, mem) );
 7732 
 7733   ins_pipe(pipe_class_memory);
 7734 %}
 7735 
 7736 // TODO
 7737 // implement storeImmF0 and storeFImmPacked
 7738 
 7739 // Store Double
 7740 instruct storeD(vRegD src, memory8 mem)
 7741 %{
 7742   match(Set mem (StoreD mem src));
 7743   predicate(!needs_releasing_store(n));
 7744 
 7745   ins_cost(INSN_COST);
 7746   format %{ "strd  $src, $mem\t# double" %}
 7747 
 7748   ins_encode( aarch64_enc_strd(src, mem) );
 7749 
 7750   ins_pipe(pipe_class_memory);
 7751 %}
 7752 
 7753 // Store Compressed Klass Pointer
 7754 instruct storeNKlass(iRegN src, memory4 mem)
 7755 %{
 7756   predicate(!needs_releasing_store(n));
 7757   match(Set mem (StoreNKlass mem src));
 7758 
 7759   ins_cost(INSN_COST);
 7760   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7761 
 7762   ins_encode(aarch64_enc_strw(src, mem));
 7763 
 7764   ins_pipe(istore_reg_mem);
 7765 %}
 7766 
 7767 // TODO
 7768 // implement storeImmD0 and storeDImmPacked
 7769 
 7770 // prefetch instructions
 7771 // Must be safe to execute with invalid address (cannot fault).
 7772 
 7773 instruct prefetchalloc( memory8 mem ) %{
 7774   match(PrefetchAllocation mem);
 7775 
 7776   ins_cost(INSN_COST);
 7777   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7778 
 7779   ins_encode( aarch64_enc_prefetchw(mem) );
 7780 
 7781   ins_pipe(iload_prefetch);
 7782 %}
 7783 
 7784 //  ---------------- volatile loads and stores ----------------
 7785 
 7786 // Load Byte (8 bit signed)
 7787 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7788 %{
 7789   match(Set dst (LoadB mem));
 7790 
 7791   ins_cost(VOLATILE_REF_COST);
 7792   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7793 
 7794   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7795 
 7796   ins_pipe(pipe_serial);
 7797 %}
 7798 
 7799 // Load Byte (8 bit signed) into long
 7800 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7801 %{
 7802   match(Set dst (ConvI2L (LoadB mem)));
 7803 
 7804   ins_cost(VOLATILE_REF_COST);
 7805   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7806 
 7807   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7808 
 7809   ins_pipe(pipe_serial);
 7810 %}
 7811 
 7812 // Load Byte (8 bit unsigned)
 7813 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7814 %{
 7815   match(Set dst (LoadUB mem));
 7816 
 7817   ins_cost(VOLATILE_REF_COST);
 7818   format %{ "ldarb  $dst, $mem\t# byte" %}
 7819 
 7820   ins_encode(aarch64_enc_ldarb(dst, mem));
 7821 
 7822   ins_pipe(pipe_serial);
 7823 %}
 7824 
 7825 // Load Byte (8 bit unsigned) into long
 7826 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7827 %{
 7828   match(Set dst (ConvI2L (LoadUB mem)));
 7829 
 7830   ins_cost(VOLATILE_REF_COST);
 7831   format %{ "ldarb  $dst, $mem\t# byte" %}
 7832 
 7833   ins_encode(aarch64_enc_ldarb(dst, mem));
 7834 
 7835   ins_pipe(pipe_serial);
 7836 %}
 7837 
 7838 // Load Short (16 bit signed)
 7839 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7840 %{
 7841   match(Set dst (LoadS mem));
 7842 
 7843   ins_cost(VOLATILE_REF_COST);
 7844   format %{ "ldarshw  $dst, $mem\t# short" %}
 7845 
 7846   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7847 
 7848   ins_pipe(pipe_serial);
 7849 %}
 7850 
 7851 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7852 %{
 7853   match(Set dst (LoadUS mem));
 7854 
 7855   ins_cost(VOLATILE_REF_COST);
 7856   format %{ "ldarhw  $dst, $mem\t# short" %}
 7857 
 7858   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7859 
 7860   ins_pipe(pipe_serial);
 7861 %}
 7862 
 7863 // Load Short/Char (16 bit unsigned) into long
 7864 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7865 %{
 7866   match(Set dst (ConvI2L (LoadUS mem)));
 7867 
 7868   ins_cost(VOLATILE_REF_COST);
 7869   format %{ "ldarh  $dst, $mem\t# short" %}
 7870 
 7871   ins_encode(aarch64_enc_ldarh(dst, mem));
 7872 
 7873   ins_pipe(pipe_serial);
 7874 %}
 7875 
 7876 // Load Short/Char (16 bit signed) into long
 7877 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7878 %{
 7879   match(Set dst (ConvI2L (LoadS mem)));
 7880 
 7881   ins_cost(VOLATILE_REF_COST);
 7882   format %{ "ldarh  $dst, $mem\t# short" %}
 7883 
 7884   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7885 
 7886   ins_pipe(pipe_serial);
 7887 %}
 7888 
 7889 // Load Integer (32 bit signed)
 7890 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7891 %{
 7892   match(Set dst (LoadI mem));
 7893 
 7894   ins_cost(VOLATILE_REF_COST);
 7895   format %{ "ldarw  $dst, $mem\t# int" %}
 7896 
 7897   ins_encode(aarch64_enc_ldarw(dst, mem));
 7898 
 7899   ins_pipe(pipe_serial);
 7900 %}
 7901 
 7902 // Load Integer (32 bit unsigned) into long
 7903 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7904 %{
 7905   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7906 
 7907   ins_cost(VOLATILE_REF_COST);
 7908   format %{ "ldarw  $dst, $mem\t# int" %}
 7909 
 7910   ins_encode(aarch64_enc_ldarw(dst, mem));
 7911 
 7912   ins_pipe(pipe_serial);
 7913 %}
 7914 
 7915 // Load Long (64 bit signed)
 7916 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7917 %{
 7918   match(Set dst (LoadL mem));
 7919 
 7920   ins_cost(VOLATILE_REF_COST);
 7921   format %{ "ldar  $dst, $mem\t# int" %}
 7922 
 7923   ins_encode(aarch64_enc_ldar(dst, mem));
 7924 
 7925   ins_pipe(pipe_serial);
 7926 %}
 7927 
 7928 // Load Pointer
 7929 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7930 %{
 7931   match(Set dst (LoadP mem));
 7932   predicate(n->as_Load()->barrier_data() == 0);
 7933 
 7934   ins_cost(VOLATILE_REF_COST);
 7935   format %{ "ldar  $dst, $mem\t# ptr" %}
 7936 
 7937   ins_encode(aarch64_enc_ldar(dst, mem));
 7938 
 7939   ins_pipe(pipe_serial);
 7940 %}
 7941 
 7942 // Load Compressed Pointer
 7943 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7944 %{
 7945   match(Set dst (LoadN mem));
 7946 
 7947   ins_cost(VOLATILE_REF_COST);
 7948   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7949 
 7950   ins_encode(aarch64_enc_ldarw(dst, mem));
 7951 
 7952   ins_pipe(pipe_serial);
 7953 %}
 7954 
 7955 // Load Float
 7956 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7957 %{
 7958   match(Set dst (LoadF mem));
 7959 
 7960   ins_cost(VOLATILE_REF_COST);
 7961   format %{ "ldars  $dst, $mem\t# float" %}
 7962 
 7963   ins_encode( aarch64_enc_fldars(dst, mem) );
 7964 
 7965   ins_pipe(pipe_serial);
 7966 %}
 7967 
 7968 // Load Double
 7969 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7970 %{
 7971   match(Set dst (LoadD mem));
 7972 
 7973   ins_cost(VOLATILE_REF_COST);
 7974   format %{ "ldard  $dst, $mem\t# double" %}
 7975 
 7976   ins_encode( aarch64_enc_fldard(dst, mem) );
 7977 
 7978   ins_pipe(pipe_serial);
 7979 %}
 7980 
 7981 // Store Byte
 7982 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7983 %{
 7984   match(Set mem (StoreB mem src));
 7985 
 7986   ins_cost(VOLATILE_REF_COST);
 7987   format %{ "stlrb  $src, $mem\t# byte" %}
 7988 
 7989   ins_encode(aarch64_enc_stlrb(src, mem));
 7990 
 7991   ins_pipe(pipe_class_memory);
 7992 %}
 7993 
 7994 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7995 %{
 7996   match(Set mem (StoreB mem zero));
 7997 
 7998   ins_cost(VOLATILE_REF_COST);
 7999   format %{ "stlrb  zr, $mem\t# byte" %}
 8000 
 8001   ins_encode(aarch64_enc_stlrb0(mem));
 8002 
 8003   ins_pipe(pipe_class_memory);
 8004 %}
 8005 
 8006 // Store Char/Short
 8007 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8008 %{
 8009   match(Set mem (StoreC mem src));
 8010 
 8011   ins_cost(VOLATILE_REF_COST);
 8012   format %{ "stlrh  $src, $mem\t# short" %}
 8013 
 8014   ins_encode(aarch64_enc_stlrh(src, mem));
 8015 
 8016   ins_pipe(pipe_class_memory);
 8017 %}
 8018 
 8019 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 8020 %{
 8021   match(Set mem (StoreC mem zero));
 8022 
 8023   ins_cost(VOLATILE_REF_COST);
 8024   format %{ "stlrh  zr, $mem\t# short" %}
 8025 
 8026   ins_encode(aarch64_enc_stlrh0(mem));
 8027 
 8028   ins_pipe(pipe_class_memory);
 8029 %}
 8030 
 8031 // Store Integer
 8032 
 8033 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8034 %{
 8035   match(Set mem(StoreI mem src));
 8036 
 8037   ins_cost(VOLATILE_REF_COST);
 8038   format %{ "stlrw  $src, $mem\t# int" %}
 8039 
 8040   ins_encode(aarch64_enc_stlrw(src, mem));
 8041 
 8042   ins_pipe(pipe_class_memory);
 8043 %}
 8044 
 8045 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 8046 %{
 8047   match(Set mem(StoreI mem zero));
 8048 
 8049   ins_cost(VOLATILE_REF_COST);
 8050   format %{ "stlrw  zr, $mem\t# int" %}
 8051 
 8052   ins_encode(aarch64_enc_stlrw0(mem));
 8053 
 8054   ins_pipe(pipe_class_memory);
 8055 %}
 8056 
 8057 // Store Long (64 bit signed)
 8058 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 8059 %{
 8060   match(Set mem (StoreL mem src));
 8061 
 8062   ins_cost(VOLATILE_REF_COST);
 8063   format %{ "stlr  $src, $mem\t# int" %}
 8064 
 8065   ins_encode(aarch64_enc_stlr(src, mem));
 8066 
 8067   ins_pipe(pipe_class_memory);
 8068 %}
 8069 
 8070 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 8071 %{
 8072   match(Set mem (StoreL mem zero));
 8073 
 8074   ins_cost(VOLATILE_REF_COST);
 8075   format %{ "stlr  zr, $mem\t# int" %}
 8076 
 8077   ins_encode(aarch64_enc_stlr0(mem));
 8078 
 8079   ins_pipe(pipe_class_memory);
 8080 %}
 8081 
 8082 // Store Pointer
 8083 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 8084 %{
 8085   match(Set mem (StoreP mem src));
 8086 
 8087   ins_cost(VOLATILE_REF_COST);
 8088   format %{ "stlr  $src, $mem\t# ptr" %}
 8089 
 8090   ins_encode(aarch64_enc_stlr(src, mem));
 8091 
 8092   ins_pipe(pipe_class_memory);
 8093 %}
 8094 
 8095 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 8096 %{
 8097   match(Set mem (StoreP mem zero));
 8098 
 8099   ins_cost(VOLATILE_REF_COST);
 8100   format %{ "stlr  zr, $mem\t# ptr" %}
 8101 
 8102   ins_encode(aarch64_enc_stlr0(mem));
 8103 
 8104   ins_pipe(pipe_class_memory);
 8105 %}
 8106 
 8107 // Store Compressed Pointer
 8108 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 8109 %{
 8110   match(Set mem (StoreN mem src));
 8111 
 8112   ins_cost(VOLATILE_REF_COST);
 8113   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 8114 
 8115   ins_encode(aarch64_enc_stlrw(src, mem));
 8116 
 8117   ins_pipe(pipe_class_memory);
 8118 %}
 8119 
 8120 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 8121 %{
 8122   match(Set mem (StoreN mem zero));
 8123 
 8124   ins_cost(VOLATILE_REF_COST);
 8125   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 8126 
 8127   ins_encode(aarch64_enc_stlrw0(mem));
 8128 
 8129   ins_pipe(pipe_class_memory);
 8130 %}
 8131 
 8132 // Store Float
 8133 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 8134 %{
 8135   match(Set mem (StoreF mem src));
 8136 
 8137   ins_cost(VOLATILE_REF_COST);
 8138   format %{ "stlrs  $src, $mem\t# float" %}
 8139 
 8140   ins_encode( aarch64_enc_fstlrs(src, mem) );
 8141 
 8142   ins_pipe(pipe_class_memory);
 8143 %}
 8144 
 8145 // TODO
 8146 // implement storeImmF0 and storeFImmPacked
 8147 
 8148 // Store Double
 8149 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 8150 %{
 8151   match(Set mem (StoreD mem src));
 8152 
 8153   ins_cost(VOLATILE_REF_COST);
 8154   format %{ "stlrd  $src, $mem\t# double" %}
 8155 
 8156   ins_encode( aarch64_enc_fstlrd(src, mem) );
 8157 
 8158   ins_pipe(pipe_class_memory);
 8159 %}
 8160 
 8161 //  ---------------- end of volatile loads and stores ----------------
 8162 
 8163 instruct cacheWB(indirect addr)
 8164 %{
 8165   predicate(VM_Version::supports_data_cache_line_flush());
 8166   match(CacheWB addr);
 8167 
 8168   ins_cost(100);
 8169   format %{"cache wb $addr" %}
 8170   ins_encode %{
 8171     assert($addr->index_position() < 0, "should be");
 8172     assert($addr$$disp == 0, "should be");
 8173     __ cache_wb(Address($addr$$base$$Register, 0));
 8174   %}
 8175   ins_pipe(pipe_slow); // XXX
 8176 %}
 8177 
 8178 instruct cacheWBPreSync()
 8179 %{
 8180   predicate(VM_Version::supports_data_cache_line_flush());
 8181   match(CacheWBPreSync);
 8182 
 8183   ins_cost(100);
 8184   format %{"cache wb presync" %}
 8185   ins_encode %{
 8186     __ cache_wbsync(true);
 8187   %}
 8188   ins_pipe(pipe_slow); // XXX
 8189 %}
 8190 
 8191 instruct cacheWBPostSync()
 8192 %{
 8193   predicate(VM_Version::supports_data_cache_line_flush());
 8194   match(CacheWBPostSync);
 8195 
 8196   ins_cost(100);
 8197   format %{"cache wb postsync" %}
 8198   ins_encode %{
 8199     __ cache_wbsync(false);
 8200   %}
 8201   ins_pipe(pipe_slow); // XXX
 8202 %}
 8203 
 8204 // ============================================================================
 8205 // BSWAP Instructions
 8206 
 8207 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 8208   match(Set dst (ReverseBytesI src));
 8209 
 8210   ins_cost(INSN_COST);
 8211   format %{ "revw  $dst, $src" %}
 8212 
 8213   ins_encode %{
 8214     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 8215   %}
 8216 
 8217   ins_pipe(ialu_reg);
 8218 %}
 8219 
 8220 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 8221   match(Set dst (ReverseBytesL src));
 8222 
 8223   ins_cost(INSN_COST);
 8224   format %{ "rev  $dst, $src" %}
 8225 
 8226   ins_encode %{
 8227     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 8228   %}
 8229 
 8230   ins_pipe(ialu_reg);
 8231 %}
 8232 
 8233 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 8234   match(Set dst (ReverseBytesUS src));
 8235 
 8236   ins_cost(INSN_COST);
 8237   format %{ "rev16w  $dst, $src" %}
 8238 
 8239   ins_encode %{
 8240     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8241   %}
 8242 
 8243   ins_pipe(ialu_reg);
 8244 %}
 8245 
 8246 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 8247   match(Set dst (ReverseBytesS src));
 8248 
 8249   ins_cost(INSN_COST);
 8250   format %{ "rev16w  $dst, $src\n\t"
 8251             "sbfmw $dst, $dst, #0, #15" %}
 8252 
 8253   ins_encode %{
 8254     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8255     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 8256   %}
 8257 
 8258   ins_pipe(ialu_reg);
 8259 %}
 8260 
 8261 // ============================================================================
 8262 // Zero Count Instructions
 8263 
 8264 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8265   match(Set dst (CountLeadingZerosI src));
 8266 
 8267   ins_cost(INSN_COST);
 8268   format %{ "clzw  $dst, $src" %}
 8269   ins_encode %{
 8270     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 8271   %}
 8272 
 8273   ins_pipe(ialu_reg);
 8274 %}
 8275 
 8276 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 8277   match(Set dst (CountLeadingZerosL src));
 8278 
 8279   ins_cost(INSN_COST);
 8280   format %{ "clz   $dst, $src" %}
 8281   ins_encode %{
 8282     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 8283   %}
 8284 
 8285   ins_pipe(ialu_reg);
 8286 %}
 8287 
 8288 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8289   match(Set dst (CountTrailingZerosI src));
 8290 
 8291   ins_cost(INSN_COST * 2);
 8292   format %{ "rbitw  $dst, $src\n\t"
 8293             "clzw   $dst, $dst" %}
 8294   ins_encode %{
 8295     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 8296     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 8297   %}
 8298 
 8299   ins_pipe(ialu_reg);
 8300 %}
 8301 
 8302 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 8303   match(Set dst (CountTrailingZerosL src));
 8304 
 8305   ins_cost(INSN_COST * 2);
 8306   format %{ "rbit   $dst, $src\n\t"
 8307             "clz    $dst, $dst" %}
 8308   ins_encode %{
 8309     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 8310     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 8311   %}
 8312 
 8313   ins_pipe(ialu_reg);
 8314 %}
 8315 
 8316 //---------- Population Count Instructions -------------------------------------
 8317 //
 8318 
 8319 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 8320   predicate(UsePopCountInstruction);
 8321   match(Set dst (PopCountI src));
 8322   effect(TEMP tmp);
 8323   ins_cost(INSN_COST * 13);
 8324 
 8325   format %{ "movw   $src, $src\n\t"
 8326             "mov    $tmp, $src\t# vector (1D)\n\t"
 8327             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8328             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8329             "mov    $dst, $tmp\t# vector (1D)" %}
 8330   ins_encode %{
 8331     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 8332     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 8333     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8334     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8335     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8336   %}
 8337 
 8338   ins_pipe(pipe_class_default);
 8339 %}
 8340 
 8341 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 8342   predicate(UsePopCountInstruction);
 8343   match(Set dst (PopCountI (LoadI mem)));
 8344   effect(TEMP tmp);
 8345   ins_cost(INSN_COST * 13);
 8346 
 8347   format %{ "ldrs   $tmp, $mem\n\t"
 8348             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8349             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8350             "mov    $dst, $tmp\t# vector (1D)" %}
 8351   ins_encode %{
 8352     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8353     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 8354               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 8355     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8356     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8357     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8358   %}
 8359 
 8360   ins_pipe(pipe_class_default);
 8361 %}
 8362 
 8363 // Note: Long.bitCount(long) returns an int.
 8364 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 8365   predicate(UsePopCountInstruction);
 8366   match(Set dst (PopCountL src));
 8367   effect(TEMP tmp);
 8368   ins_cost(INSN_COST * 13);
 8369 
 8370   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 8371             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8372             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8373             "mov    $dst, $tmp\t# vector (1D)" %}
 8374   ins_encode %{
 8375     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 8376     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8377     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8378     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8379   %}
 8380 
 8381   ins_pipe(pipe_class_default);
 8382 %}
 8383 
 8384 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 8385   predicate(UsePopCountInstruction);
 8386   match(Set dst (PopCountL (LoadL mem)));
 8387   effect(TEMP tmp);
 8388   ins_cost(INSN_COST * 13);
 8389 
 8390   format %{ "ldrd   $tmp, $mem\n\t"
 8391             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8392             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8393             "mov    $dst, $tmp\t# vector (1D)" %}
 8394   ins_encode %{
 8395     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8396     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 8397               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 8398     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8399     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8400     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8401   %}
 8402 
 8403   ins_pipe(pipe_class_default);
 8404 %}
 8405 
 8406 // ============================================================================
 8407 // MemBar Instruction
 8408 
 8409 instruct load_fence() %{
 8410   match(LoadFence);
 8411   ins_cost(VOLATILE_REF_COST);
 8412 
 8413   format %{ "load_fence" %}
 8414 
 8415   ins_encode %{
 8416     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8417   %}
 8418   ins_pipe(pipe_serial);
 8419 %}
 8420 
 8421 instruct unnecessary_membar_acquire() %{
 8422   predicate(unnecessary_acquire(n));
 8423   match(MemBarAcquire);
 8424   ins_cost(0);
 8425 
 8426   format %{ "membar_acquire (elided)" %}
 8427 
 8428   ins_encode %{
 8429     __ block_comment("membar_acquire (elided)");
 8430   %}
 8431 
 8432   ins_pipe(pipe_class_empty);
 8433 %}
 8434 
 8435 instruct membar_acquire() %{
 8436   match(MemBarAcquire);
 8437   ins_cost(VOLATILE_REF_COST);
 8438 
 8439   format %{ "membar_acquire\n\t"
 8440             "dmb ish" %}
 8441 
 8442   ins_encode %{
 8443     __ block_comment("membar_acquire");
 8444     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8445   %}
 8446 
 8447   ins_pipe(pipe_serial);
 8448 %}
 8449 
 8450 
 8451 instruct membar_acquire_lock() %{
 8452   match(MemBarAcquireLock);
 8453   ins_cost(VOLATILE_REF_COST);
 8454 
 8455   format %{ "membar_acquire_lock (elided)" %}
 8456 
 8457   ins_encode %{
 8458     __ block_comment("membar_acquire_lock (elided)");
 8459   %}
 8460 
 8461   ins_pipe(pipe_serial);
 8462 %}
 8463 
 8464 instruct store_fence() %{
 8465   match(StoreFence);
 8466   ins_cost(VOLATILE_REF_COST);
 8467 
 8468   format %{ "store_fence" %}
 8469 
 8470   ins_encode %{
 8471     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8472   %}
 8473   ins_pipe(pipe_serial);
 8474 %}
 8475 
 8476 instruct unnecessary_membar_release() %{
 8477   predicate(unnecessary_release(n));
 8478   match(MemBarRelease);
 8479   ins_cost(0);
 8480 
 8481   format %{ "membar_release (elided)" %}
 8482 
 8483   ins_encode %{
 8484     __ block_comment("membar_release (elided)");
 8485   %}
 8486   ins_pipe(pipe_serial);
 8487 %}
 8488 
 8489 instruct membar_release() %{
 8490   match(MemBarRelease);
 8491   ins_cost(VOLATILE_REF_COST);
 8492 
 8493   format %{ "membar_release\n\t"
 8494             "dmb ish" %}
 8495 
 8496   ins_encode %{
 8497     __ block_comment("membar_release");
 8498     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8499   %}
 8500   ins_pipe(pipe_serial);
 8501 %}
 8502 
 8503 instruct membar_storestore() %{
 8504   match(MemBarStoreStore);
 8505   match(StoreStoreFence);
 8506   ins_cost(VOLATILE_REF_COST);
 8507 
 8508   format %{ "MEMBAR-store-store" %}
 8509 
 8510   ins_encode %{
 8511     __ membar(Assembler::StoreStore);
 8512   %}
 8513   ins_pipe(pipe_serial);
 8514 %}
 8515 
 8516 instruct membar_release_lock() %{
 8517   match(MemBarReleaseLock);
 8518   ins_cost(VOLATILE_REF_COST);
 8519 
 8520   format %{ "membar_release_lock (elided)" %}
 8521 
 8522   ins_encode %{
 8523     __ block_comment("membar_release_lock (elided)");
 8524   %}
 8525 
 8526   ins_pipe(pipe_serial);
 8527 %}
 8528 
 8529 instruct unnecessary_membar_volatile() %{
 8530   predicate(unnecessary_volatile(n));
 8531   match(MemBarVolatile);
 8532   ins_cost(0);
 8533 
 8534   format %{ "membar_volatile (elided)" %}
 8535 
 8536   ins_encode %{
 8537     __ block_comment("membar_volatile (elided)");
 8538   %}
 8539 
 8540   ins_pipe(pipe_serial);
 8541 %}
 8542 
 8543 instruct membar_volatile() %{
 8544   match(MemBarVolatile);
 8545   ins_cost(VOLATILE_REF_COST*100);
 8546 
 8547   format %{ "membar_volatile\n\t"
 8548              "dmb ish"%}
 8549 
 8550   ins_encode %{
 8551     __ block_comment("membar_volatile");
 8552     __ membar(Assembler::StoreLoad);
 8553   %}
 8554 
 8555   ins_pipe(pipe_serial);
 8556 %}
 8557 
 8558 // ============================================================================
 8559 // Cast/Convert Instructions
 8560 
 8561 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8562   match(Set dst (CastX2P src));
 8563 
 8564   ins_cost(INSN_COST);
 8565   format %{ "mov $dst, $src\t# long -> ptr" %}
 8566 
 8567   ins_encode %{
 8568     if ($dst$$reg != $src$$reg) {
 8569       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8570     }
 8571   %}
 8572 
 8573   ins_pipe(ialu_reg);
 8574 %}
 8575 
 8576 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8577   match(Set dst (CastP2X src));
 8578 
 8579   ins_cost(INSN_COST);
 8580   format %{ "mov $dst, $src\t# ptr -> long" %}
 8581 
 8582   ins_encode %{
 8583     if ($dst$$reg != $src$$reg) {
 8584       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8585     }
 8586   %}
 8587 
 8588   ins_pipe(ialu_reg);
 8589 %}
 8590 
 8591 // Convert oop into int for vectors alignment masking
 8592 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8593   match(Set dst (ConvL2I (CastP2X src)));
 8594 
 8595   ins_cost(INSN_COST);
 8596   format %{ "movw $dst, $src\t# ptr -> int" %}
 8597   ins_encode %{
 8598     __ movw($dst$$Register, $src$$Register);
 8599   %}
 8600 
 8601   ins_pipe(ialu_reg);
 8602 %}
 8603 
 8604 // Convert compressed oop into int for vectors alignment masking
 8605 // in case of 32bit oops (heap < 4Gb).
 8606 instruct convN2I(iRegINoSp dst, iRegN src)
 8607 %{
 8608   predicate(CompressedOops::shift() == 0);
 8609   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8610 
 8611   ins_cost(INSN_COST);
 8612   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8613   ins_encode %{
 8614     __ movw($dst$$Register, $src$$Register);
 8615   %}
 8616 
 8617   ins_pipe(ialu_reg);
 8618 %}
 8619 
 8620 
 8621 // Convert oop pointer into compressed form
 8622 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8623   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8624   match(Set dst (EncodeP src));
 8625   effect(KILL cr);
 8626   ins_cost(INSN_COST * 3);
 8627   format %{ "encode_heap_oop $dst, $src" %}
 8628   ins_encode %{
 8629     Register s = $src$$Register;
 8630     Register d = $dst$$Register;
 8631     __ encode_heap_oop(d, s);
 8632   %}
 8633   ins_pipe(ialu_reg);
 8634 %}
 8635 
 8636 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8637   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8638   match(Set dst (EncodeP src));
 8639   ins_cost(INSN_COST * 3);
 8640   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8641   ins_encode %{
 8642     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8643   %}
 8644   ins_pipe(ialu_reg);
 8645 %}
 8646 
 8647 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8648   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8649             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8650   match(Set dst (DecodeN src));
 8651   ins_cost(INSN_COST * 3);
 8652   format %{ "decode_heap_oop $dst, $src" %}
 8653   ins_encode %{
 8654     Register s = $src$$Register;
 8655     Register d = $dst$$Register;
 8656     __ decode_heap_oop(d, s);
 8657   %}
 8658   ins_pipe(ialu_reg);
 8659 %}
 8660 
 8661 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8662   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8663             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8664   match(Set dst (DecodeN src));
 8665   ins_cost(INSN_COST * 3);
 8666   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8667   ins_encode %{
 8668     Register s = $src$$Register;
 8669     Register d = $dst$$Register;
 8670     __ decode_heap_oop_not_null(d, s);
 8671   %}
 8672   ins_pipe(ialu_reg);
 8673 %}
 8674 
 8675 // n.b. AArch64 implementations of encode_klass_not_null and
 8676 // decode_klass_not_null do not modify the flags register so, unlike
 8677 // Intel, we don't kill CR as a side effect here
 8678 
 8679 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8680   match(Set dst (EncodePKlass src));
 8681 
 8682   ins_cost(INSN_COST * 3);
 8683   format %{ "encode_klass_not_null $dst,$src" %}
 8684 
 8685   ins_encode %{
 8686     Register src_reg = as_Register($src$$reg);
 8687     Register dst_reg = as_Register($dst$$reg);
 8688     __ encode_klass_not_null(dst_reg, src_reg);
 8689   %}
 8690 
 8691    ins_pipe(ialu_reg);
 8692 %}
 8693 
 8694 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8695   match(Set dst (DecodeNKlass src));
 8696 
 8697   ins_cost(INSN_COST * 3);
 8698   format %{ "decode_klass_not_null $dst,$src" %}
 8699 
 8700   ins_encode %{
 8701     Register src_reg = as_Register($src$$reg);
 8702     Register dst_reg = as_Register($dst$$reg);
 8703     if (dst_reg != src_reg) {
 8704       __ decode_klass_not_null(dst_reg, src_reg);
 8705     } else {
 8706       __ decode_klass_not_null(dst_reg);
 8707     }
 8708   %}
 8709 
 8710    ins_pipe(ialu_reg);
 8711 %}
 8712 
 8713 instruct checkCastPP(iRegPNoSp dst)
 8714 %{
 8715   match(Set dst (CheckCastPP dst));
 8716 
 8717   size(0);
 8718   format %{ "# checkcastPP of $dst" %}
 8719   ins_encode(/* empty encoding */);
 8720   ins_pipe(pipe_class_empty);
 8721 %}
 8722 
 8723 instruct castPP(iRegPNoSp dst)
 8724 %{
 8725   match(Set dst (CastPP dst));
 8726 
 8727   size(0);
 8728   format %{ "# castPP of $dst" %}
 8729   ins_encode(/* empty encoding */);
 8730   ins_pipe(pipe_class_empty);
 8731 %}
 8732 
 8733 instruct castII(iRegI dst)
 8734 %{
 8735   match(Set dst (CastII dst));
 8736 
 8737   size(0);
 8738   format %{ "# castII of $dst" %}
 8739   ins_encode(/* empty encoding */);
 8740   ins_cost(0);
 8741   ins_pipe(pipe_class_empty);
 8742 %}
 8743 
 8744 instruct castLL(iRegL dst)
 8745 %{
 8746   match(Set dst (CastLL dst));
 8747 
 8748   size(0);
 8749   format %{ "# castLL of $dst" %}
 8750   ins_encode(/* empty encoding */);
 8751   ins_cost(0);
 8752   ins_pipe(pipe_class_empty);
 8753 %}
 8754 
 8755 instruct castFF(vRegF dst)
 8756 %{
 8757   match(Set dst (CastFF dst));
 8758 
 8759   size(0);
 8760   format %{ "# castFF of $dst" %}
 8761   ins_encode(/* empty encoding */);
 8762   ins_cost(0);
 8763   ins_pipe(pipe_class_empty);
 8764 %}
 8765 
 8766 instruct castDD(vRegD dst)
 8767 %{
 8768   match(Set dst (CastDD dst));
 8769 
 8770   size(0);
 8771   format %{ "# castDD of $dst" %}
 8772   ins_encode(/* empty encoding */);
 8773   ins_cost(0);
 8774   ins_pipe(pipe_class_empty);
 8775 %}
 8776 
 8777 instruct castVVD(vecD dst)
 8778 %{
 8779   match(Set dst (CastVV dst));
 8780 
 8781   size(0);
 8782   format %{ "# castVV of $dst" %}
 8783   ins_encode(/* empty encoding */);
 8784   ins_cost(0);
 8785   ins_pipe(pipe_class_empty);
 8786 %}
 8787 
 8788 instruct castVVX(vecX dst)
 8789 %{
 8790   match(Set dst (CastVV dst));
 8791 
 8792   size(0);
 8793   format %{ "# castVV of $dst" %}
 8794   ins_encode(/* empty encoding */);
 8795   ins_cost(0);
 8796   ins_pipe(pipe_class_empty);
 8797 %}
 8798 
 8799 instruct castVV(vReg dst)
 8800 %{
 8801   match(Set dst (CastVV dst));
 8802 
 8803   size(0);
 8804   format %{ "# castVV of $dst" %}
 8805   ins_encode(/* empty encoding */);
 8806   ins_cost(0);
 8807   ins_pipe(pipe_class_empty);
 8808 %}
 8809 
 8810 // ============================================================================
 8811 // Atomic operation instructions
 8812 //
 8813 // Intel and SPARC both implement Ideal Node LoadPLocked and
 8814 // Store{PIL}Conditional instructions using a normal load for the
 8815 // LoadPLocked and a CAS for the Store{PIL}Conditional.
 8816 //
 8817 // The ideal code appears only to use LoadPLocked/StorePLocked as a
 8818 // pair to lock object allocations from Eden space when not using
 8819 // TLABs.
 8820 //
 8821 // There does not appear to be a Load{IL}Locked Ideal Node and the
 8822 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
 8823 // and to use StoreIConditional only for 32-bit and StoreLConditional
 8824 // only for 64-bit.
 8825 //
 8826 // We implement LoadPLocked and StorePLocked instructions using,
 8827 // respectively the AArch64 hw load-exclusive and store-conditional
 8828 // instructions. Whereas we must implement each of
 8829 // Store{IL}Conditional using a CAS which employs a pair of
 8830 // instructions comprising a load-exclusive followed by a
 8831 // store-conditional.
 8832 
 8833 
 8834 // Locked-load (linked load) of the current heap-top
 8835 // used when updating the eden heap top
 8836 // implemented using ldaxr on AArch64
 8837 
 8838 instruct loadPLocked(iRegPNoSp dst, indirect mem)
 8839 %{
 8840   match(Set dst (LoadPLocked mem));
 8841 
 8842   ins_cost(VOLATILE_REF_COST);
 8843 
 8844   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
 8845 
 8846   ins_encode(aarch64_enc_ldaxr(dst, mem));
 8847 
 8848   ins_pipe(pipe_serial);
 8849 %}
 8850 
 8851 // Conditional-store of the updated heap-top.
 8852 // Used during allocation of the shared heap.
 8853 // Sets flag (EQ) on success.
 8854 // implemented using stlxr on AArch64.
 8855 
 8856 instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
 8857 %{
 8858   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
 8859 
 8860   ins_cost(VOLATILE_REF_COST);
 8861 
 8862  // TODO
 8863  // do we need to do a store-conditional release or can we just use a
 8864  // plain store-conditional?
 8865 
 8866   format %{
 8867     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
 8868     "cmpw rscratch1, zr\t# EQ on successful write"
 8869   %}
 8870 
 8871   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
 8872 
 8873   ins_pipe(pipe_serial);
 8874 %}
 8875 
 8876 
 8877 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
 8878 // when attempting to rebias a lock towards the current thread.  We
 8879 // must use the acquire form of cmpxchg in order to guarantee acquire
 8880 // semantics in this case.
 8881 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
 8882 %{
 8883   match(Set cr (StoreLConditional mem (Binary oldval newval)));
 8884 
 8885   ins_cost(VOLATILE_REF_COST);
 8886 
 8887   format %{
 8888     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 8889     "cmpw rscratch1, zr\t# EQ on successful write"
 8890   %}
 8891 
 8892   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
 8893 
 8894   ins_pipe(pipe_slow);
 8895 %}
 8896 
 8897 // storeIConditional also has acquire semantics, for no better reason
 8898 // than matching storeLConditional.  At the time of writing this
 8899 // comment storeIConditional was not used anywhere by AArch64.
 8900 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
 8901 %{
 8902   match(Set cr (StoreIConditional mem (Binary oldval newval)));
 8903 
 8904   ins_cost(VOLATILE_REF_COST);
 8905 
 8906   format %{
 8907     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 8908     "cmpw rscratch1, zr\t# EQ on successful write"
 8909   %}
 8910 
 8911   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
 8912 
 8913   ins_pipe(pipe_slow);
 8914 %}
 8915 
 8916 // standard CompareAndSwapX when we are using barriers
 8917 // these have higher priority than the rules selected by a predicate
 8918 
 8919 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8920 // can't match them
 8921 
 8922 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8923 
 8924   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8925   ins_cost(2 * VOLATILE_REF_COST);
 8926 
 8927   effect(KILL cr);
 8928 
 8929   format %{
 8930     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8931     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8932   %}
 8933 
 8934   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8935             aarch64_enc_cset_eq(res));
 8936 
 8937   ins_pipe(pipe_slow);
 8938 %}
 8939 
 8940 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8941 
 8942   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8943   ins_cost(2 * VOLATILE_REF_COST);
 8944 
 8945   effect(KILL cr);
 8946 
 8947   format %{
 8948     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8949     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8950   %}
 8951 
 8952   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8953             aarch64_enc_cset_eq(res));
 8954 
 8955   ins_pipe(pipe_slow);
 8956 %}
 8957 
 8958 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8959 
 8960   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8961   ins_cost(2 * VOLATILE_REF_COST);
 8962 
 8963   effect(KILL cr);
 8964 
 8965  format %{
 8966     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8967     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8968  %}
 8969 
 8970  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8971             aarch64_enc_cset_eq(res));
 8972 
 8973   ins_pipe(pipe_slow);
 8974 %}
 8975 
 8976 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8977 
 8978   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8979   ins_cost(2 * VOLATILE_REF_COST);
 8980 
 8981   effect(KILL cr);
 8982 
 8983  format %{
 8984     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8985     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8986  %}
 8987 
 8988  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8989             aarch64_enc_cset_eq(res));
 8990 
 8991   ins_pipe(pipe_slow);
 8992 %}
 8993 
 8994 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8995 
 8996   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8997   predicate(n->as_LoadStore()->barrier_data() == 0);
 8998   ins_cost(2 * VOLATILE_REF_COST);
 8999 
 9000   effect(KILL cr);
 9001 
 9002  format %{
 9003     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 9004     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9005  %}
 9006 
 9007  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 9008             aarch64_enc_cset_eq(res));
 9009 
 9010   ins_pipe(pipe_slow);
 9011 %}
 9012 
 9013 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 9014 
 9015   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 9016   ins_cost(2 * VOLATILE_REF_COST);
 9017 
 9018   effect(KILL cr);
 9019 
 9020  format %{
 9021     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 9022     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9023  %}
 9024 
 9025  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 9026             aarch64_enc_cset_eq(res));
 9027 
 9028   ins_pipe(pipe_slow);
 9029 %}
 9030 
 9031 // alternative CompareAndSwapX when we are eliding barriers
 9032 
 9033 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9034 
 9035   predicate(needs_acquiring_load_exclusive(n));
 9036   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 9037   ins_cost(VOLATILE_REF_COST);
 9038 
 9039   effect(KILL cr);
 9040 
 9041   format %{
 9042     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9043     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9044   %}
 9045 
 9046   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 9047             aarch64_enc_cset_eq(res));
 9048 
 9049   ins_pipe(pipe_slow);
 9050 %}
 9051 
 9052 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9053 
 9054   predicate(needs_acquiring_load_exclusive(n));
 9055   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 9056   ins_cost(VOLATILE_REF_COST);
 9057 
 9058   effect(KILL cr);
 9059 
 9060   format %{
 9061     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9062     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9063   %}
 9064 
 9065   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 9066             aarch64_enc_cset_eq(res));
 9067 
 9068   ins_pipe(pipe_slow);
 9069 %}
 9070 
 9071 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9072 
 9073   predicate(needs_acquiring_load_exclusive(n));
 9074   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 9075   ins_cost(VOLATILE_REF_COST);
 9076 
 9077   effect(KILL cr);
 9078 
 9079  format %{
 9080     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9081     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9082  %}
 9083 
 9084  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9085             aarch64_enc_cset_eq(res));
 9086 
 9087   ins_pipe(pipe_slow);
 9088 %}
 9089 
 9090 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 9091 
 9092   predicate(needs_acquiring_load_exclusive(n));
 9093   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 9094   ins_cost(VOLATILE_REF_COST);
 9095 
 9096   effect(KILL cr);
 9097 
 9098  format %{
 9099     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 9100     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9101  %}
 9102 
 9103  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9104             aarch64_enc_cset_eq(res));
 9105 
 9106   ins_pipe(pipe_slow);
 9107 %}
 9108 
 9109 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9110 
 9111   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9112   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 9113   ins_cost(VOLATILE_REF_COST);
 9114 
 9115   effect(KILL cr);
 9116 
 9117  format %{
 9118     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 9119     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9120  %}
 9121 
 9122  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9123             aarch64_enc_cset_eq(res));
 9124 
 9125   ins_pipe(pipe_slow);
 9126 %}
 9127 
 9128 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 9129 
 9130   predicate(needs_acquiring_load_exclusive(n));
 9131   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 9132   ins_cost(VOLATILE_REF_COST);
 9133 
 9134   effect(KILL cr);
 9135 
 9136  format %{
 9137     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 9138     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9139  %}
 9140 
 9141  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9142             aarch64_enc_cset_eq(res));
 9143 
 9144   ins_pipe(pipe_slow);
 9145 %}
 9146 
 9147 
 9148 // ---------------------------------------------------------------------
 9149 
 9150 // BEGIN This section of the file is automatically generated. Do not edit --------------
 9151 
 9152 // Sundry CAS operations.  Note that release is always true,
 9153 // regardless of the memory ordering of the CAS.  This is because we
 9154 // need the volatile case to be sequentially consistent but there is
 9155 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 9156 // can't check the type of memory ordering here, so we always emit a
 9157 // STLXR.
 9158 
 9159 // This section is generated from aarch64_ad_cas.m4
 9160 
 9161 
 9162 
 9163 // This pattern is generated automatically from cas.m4.
 9164 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9165 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9166 
 9167   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9168   ins_cost(2 * VOLATILE_REF_COST);
 9169   effect(TEMP_DEF res, KILL cr);
 9170   format %{
 9171     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9172   %}
 9173   ins_encode %{
 9174     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9175                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9176                /*weak*/ false, $res$$Register);
 9177     __ sxtbw($res$$Register, $res$$Register);
 9178   %}
 9179   ins_pipe(pipe_slow);
 9180 %}
 9181 
 9182 // This pattern is generated automatically from cas.m4.
 9183 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9184 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9185 
 9186   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9187   ins_cost(2 * VOLATILE_REF_COST);
 9188   effect(TEMP_DEF res, KILL cr);
 9189   format %{
 9190     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9191   %}
 9192   ins_encode %{
 9193     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9194                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9195                /*weak*/ false, $res$$Register);
 9196     __ sxthw($res$$Register, $res$$Register);
 9197   %}
 9198   ins_pipe(pipe_slow);
 9199 %}
 9200 
 9201 // This pattern is generated automatically from cas.m4.
 9202 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9203 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9204 
 9205   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9206   ins_cost(2 * VOLATILE_REF_COST);
 9207   effect(TEMP_DEF res, KILL cr);
 9208   format %{
 9209     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9210   %}
 9211   ins_encode %{
 9212     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9213                Assembler::word, /*acquire*/ false, /*release*/ true,
 9214                /*weak*/ false, $res$$Register);
 9215   %}
 9216   ins_pipe(pipe_slow);
 9217 %}
 9218 
 9219 // This pattern is generated automatically from cas.m4.
 9220 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9221 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9222 
 9223   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9224   ins_cost(2 * VOLATILE_REF_COST);
 9225   effect(TEMP_DEF res, KILL cr);
 9226   format %{
 9227     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9228   %}
 9229   ins_encode %{
 9230     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9231                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9232                /*weak*/ false, $res$$Register);
 9233   %}
 9234   ins_pipe(pipe_slow);
 9235 %}
 9236 
 9237 // This pattern is generated automatically from cas.m4.
 9238 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9239 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9240 
 9241   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9242   ins_cost(2 * VOLATILE_REF_COST);
 9243   effect(TEMP_DEF res, KILL cr);
 9244   format %{
 9245     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9246   %}
 9247   ins_encode %{
 9248     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9249                Assembler::word, /*acquire*/ false, /*release*/ true,
 9250                /*weak*/ false, $res$$Register);
 9251   %}
 9252   ins_pipe(pipe_slow);
 9253 %}
 9254 
 9255 // This pattern is generated automatically from cas.m4.
 9256 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9257 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9258   predicate(n->as_LoadStore()->barrier_data() == 0);
 9259   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9260   ins_cost(2 * VOLATILE_REF_COST);
 9261   effect(TEMP_DEF res, KILL cr);
 9262   format %{
 9263     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9264   %}
 9265   ins_encode %{
 9266     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9267                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9268                /*weak*/ false, $res$$Register);
 9269   %}
 9270   ins_pipe(pipe_slow);
 9271 %}
 9272 
 9273 // This pattern is generated automatically from cas.m4.
 9274 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9275 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9276   predicate(needs_acquiring_load_exclusive(n));
 9277   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9278   ins_cost(VOLATILE_REF_COST);
 9279   effect(TEMP_DEF res, KILL cr);
 9280   format %{
 9281     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9282   %}
 9283   ins_encode %{
 9284     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9285                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9286                /*weak*/ false, $res$$Register);
 9287     __ sxtbw($res$$Register, $res$$Register);
 9288   %}
 9289   ins_pipe(pipe_slow);
 9290 %}
 9291 
 9292 // This pattern is generated automatically from cas.m4.
 9293 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9294 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9295   predicate(needs_acquiring_load_exclusive(n));
 9296   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9297   ins_cost(VOLATILE_REF_COST);
 9298   effect(TEMP_DEF res, KILL cr);
 9299   format %{
 9300     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9301   %}
 9302   ins_encode %{
 9303     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9304                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9305                /*weak*/ false, $res$$Register);
 9306     __ sxthw($res$$Register, $res$$Register);
 9307   %}
 9308   ins_pipe(pipe_slow);
 9309 %}
 9310 
 9311 // This pattern is generated automatically from cas.m4.
 9312 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9313 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9314   predicate(needs_acquiring_load_exclusive(n));
 9315   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9316   ins_cost(VOLATILE_REF_COST);
 9317   effect(TEMP_DEF res, KILL cr);
 9318   format %{
 9319     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9320   %}
 9321   ins_encode %{
 9322     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9323                Assembler::word, /*acquire*/ true, /*release*/ true,
 9324                /*weak*/ false, $res$$Register);
 9325   %}
 9326   ins_pipe(pipe_slow);
 9327 %}
 9328 
 9329 // This pattern is generated automatically from cas.m4.
 9330 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9331 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9332   predicate(needs_acquiring_load_exclusive(n));
 9333   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9334   ins_cost(VOLATILE_REF_COST);
 9335   effect(TEMP_DEF res, KILL cr);
 9336   format %{
 9337     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9338   %}
 9339   ins_encode %{
 9340     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9341                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9342                /*weak*/ false, $res$$Register);
 9343   %}
 9344   ins_pipe(pipe_slow);
 9345 %}
 9346 
 9347 // This pattern is generated automatically from cas.m4.
 9348 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9349 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9350   predicate(needs_acquiring_load_exclusive(n));
 9351   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9352   ins_cost(VOLATILE_REF_COST);
 9353   effect(TEMP_DEF res, KILL cr);
 9354   format %{
 9355     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9356   %}
 9357   ins_encode %{
 9358     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9359                Assembler::word, /*acquire*/ true, /*release*/ true,
 9360                /*weak*/ false, $res$$Register);
 9361   %}
 9362   ins_pipe(pipe_slow);
 9363 %}
 9364 
 9365 // This pattern is generated automatically from cas.m4.
 9366 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9367 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9368   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9369   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9370   ins_cost(VOLATILE_REF_COST);
 9371   effect(TEMP_DEF res, KILL cr);
 9372   format %{
 9373     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9374   %}
 9375   ins_encode %{
 9376     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9377                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9378                /*weak*/ false, $res$$Register);
 9379   %}
 9380   ins_pipe(pipe_slow);
 9381 %}
 9382 
 9383 // This pattern is generated automatically from cas.m4.
 9384 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9385 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9386 
 9387   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9388   ins_cost(2 * VOLATILE_REF_COST);
 9389   effect(KILL cr);
 9390   format %{
 9391     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9392     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9393   %}
 9394   ins_encode %{
 9395     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9396                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9397                /*weak*/ true, noreg);
 9398     __ csetw($res$$Register, Assembler::EQ);
 9399   %}
 9400   ins_pipe(pipe_slow);
 9401 %}
 9402 
 9403 // This pattern is generated automatically from cas.m4.
 9404 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9405 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9406 
 9407   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9408   ins_cost(2 * VOLATILE_REF_COST);
 9409   effect(KILL cr);
 9410   format %{
 9411     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9412     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9413   %}
 9414   ins_encode %{
 9415     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9416                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9417                /*weak*/ true, noreg);
 9418     __ csetw($res$$Register, Assembler::EQ);
 9419   %}
 9420   ins_pipe(pipe_slow);
 9421 %}
 9422 
 9423 // This pattern is generated automatically from cas.m4.
 9424 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9425 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9426 
 9427   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9428   ins_cost(2 * VOLATILE_REF_COST);
 9429   effect(KILL cr);
 9430   format %{
 9431     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9432     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9433   %}
 9434   ins_encode %{
 9435     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9436                Assembler::word, /*acquire*/ false, /*release*/ true,
 9437                /*weak*/ true, noreg);
 9438     __ csetw($res$$Register, Assembler::EQ);
 9439   %}
 9440   ins_pipe(pipe_slow);
 9441 %}
 9442 
 9443 // This pattern is generated automatically from cas.m4.
 9444 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9445 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9446 
 9447   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9448   ins_cost(2 * VOLATILE_REF_COST);
 9449   effect(KILL cr);
 9450   format %{
 9451     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9452     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9453   %}
 9454   ins_encode %{
 9455     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9456                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9457                /*weak*/ true, noreg);
 9458     __ csetw($res$$Register, Assembler::EQ);
 9459   %}
 9460   ins_pipe(pipe_slow);
 9461 %}
 9462 
 9463 // This pattern is generated automatically from cas.m4.
 9464 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9465 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9466 
 9467   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9468   ins_cost(2 * VOLATILE_REF_COST);
 9469   effect(KILL cr);
 9470   format %{
 9471     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9472     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9473   %}
 9474   ins_encode %{
 9475     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9476                Assembler::word, /*acquire*/ false, /*release*/ true,
 9477                /*weak*/ true, noreg);
 9478     __ csetw($res$$Register, Assembler::EQ);
 9479   %}
 9480   ins_pipe(pipe_slow);
 9481 %}
 9482 
 9483 // This pattern is generated automatically from cas.m4.
 9484 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9485 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9486   predicate(n->as_LoadStore()->barrier_data() == 0);
 9487   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9488   ins_cost(2 * VOLATILE_REF_COST);
 9489   effect(KILL cr);
 9490   format %{
 9491     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9492     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9493   %}
 9494   ins_encode %{
 9495     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9496                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9497                /*weak*/ true, noreg);
 9498     __ csetw($res$$Register, Assembler::EQ);
 9499   %}
 9500   ins_pipe(pipe_slow);
 9501 %}
 9502 
 9503 // This pattern is generated automatically from cas.m4.
 9504 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9505 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9506   predicate(needs_acquiring_load_exclusive(n));
 9507   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9508   ins_cost(VOLATILE_REF_COST);
 9509   effect(KILL cr);
 9510   format %{
 9511     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9512     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9513   %}
 9514   ins_encode %{
 9515     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9516                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9517                /*weak*/ true, noreg);
 9518     __ csetw($res$$Register, Assembler::EQ);
 9519   %}
 9520   ins_pipe(pipe_slow);
 9521 %}
 9522 
 9523 // This pattern is generated automatically from cas.m4.
 9524 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9525 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9526   predicate(needs_acquiring_load_exclusive(n));
 9527   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9528   ins_cost(VOLATILE_REF_COST);
 9529   effect(KILL cr);
 9530   format %{
 9531     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9532     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9533   %}
 9534   ins_encode %{
 9535     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9536                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9537                /*weak*/ true, noreg);
 9538     __ csetw($res$$Register, Assembler::EQ);
 9539   %}
 9540   ins_pipe(pipe_slow);
 9541 %}
 9542 
 9543 // This pattern is generated automatically from cas.m4.
 9544 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9545 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9546   predicate(needs_acquiring_load_exclusive(n));
 9547   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9548   ins_cost(VOLATILE_REF_COST);
 9549   effect(KILL cr);
 9550   format %{
 9551     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9552     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9553   %}
 9554   ins_encode %{
 9555     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9556                Assembler::word, /*acquire*/ true, /*release*/ true,
 9557                /*weak*/ true, noreg);
 9558     __ csetw($res$$Register, Assembler::EQ);
 9559   %}
 9560   ins_pipe(pipe_slow);
 9561 %}
 9562 
 9563 // This pattern is generated automatically from cas.m4.
 9564 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9565 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9566   predicate(needs_acquiring_load_exclusive(n));
 9567   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9568   ins_cost(VOLATILE_REF_COST);
 9569   effect(KILL cr);
 9570   format %{
 9571     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9572     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9573   %}
 9574   ins_encode %{
 9575     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9576                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9577                /*weak*/ true, noreg);
 9578     __ csetw($res$$Register, Assembler::EQ);
 9579   %}
 9580   ins_pipe(pipe_slow);
 9581 %}
 9582 
 9583 // This pattern is generated automatically from cas.m4.
 9584 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9585 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9586   predicate(needs_acquiring_load_exclusive(n));
 9587   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9588   ins_cost(VOLATILE_REF_COST);
 9589   effect(KILL cr);
 9590   format %{
 9591     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9592     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9593   %}
 9594   ins_encode %{
 9595     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9596                Assembler::word, /*acquire*/ true, /*release*/ true,
 9597                /*weak*/ true, noreg);
 9598     __ csetw($res$$Register, Assembler::EQ);
 9599   %}
 9600   ins_pipe(pipe_slow);
 9601 %}
 9602 
 9603 // This pattern is generated automatically from cas.m4.
 9604 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9605 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9606   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9607   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9608   ins_cost(VOLATILE_REF_COST);
 9609   effect(KILL cr);
 9610   format %{
 9611     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9612     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9613   %}
 9614   ins_encode %{
 9615     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9616                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9617                /*weak*/ true, noreg);
 9618     __ csetw($res$$Register, Assembler::EQ);
 9619   %}
 9620   ins_pipe(pipe_slow);
 9621 %}
 9622 
 9623 // END This section of the file is automatically generated. Do not edit --------------
 9624 // ---------------------------------------------------------------------
 9625 
 9626 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9627   match(Set prev (GetAndSetI mem newv));
 9628   ins_cost(2 * VOLATILE_REF_COST);
 9629   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9630   ins_encode %{
 9631     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9632   %}
 9633   ins_pipe(pipe_serial);
 9634 %}
 9635 
 9636 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9637   match(Set prev (GetAndSetL mem newv));
 9638   ins_cost(2 * VOLATILE_REF_COST);
 9639   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9640   ins_encode %{
 9641     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9642   %}
 9643   ins_pipe(pipe_serial);
 9644 %}
 9645 
 9646 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9647   match(Set prev (GetAndSetN mem newv));
 9648   ins_cost(2 * VOLATILE_REF_COST);
 9649   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9650   ins_encode %{
 9651     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9652   %}
 9653   ins_pipe(pipe_serial);
 9654 %}
 9655 
 9656 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9657   predicate(n->as_LoadStore()->barrier_data() == 0);
 9658   match(Set prev (GetAndSetP mem newv));
 9659   ins_cost(2 * VOLATILE_REF_COST);
 9660   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9661   ins_encode %{
 9662     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9663   %}
 9664   ins_pipe(pipe_serial);
 9665 %}
 9666 
 9667 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9668   predicate(needs_acquiring_load_exclusive(n));
 9669   match(Set prev (GetAndSetI mem newv));
 9670   ins_cost(VOLATILE_REF_COST);
 9671   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9672   ins_encode %{
 9673     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9674   %}
 9675   ins_pipe(pipe_serial);
 9676 %}
 9677 
 9678 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9679   predicate(needs_acquiring_load_exclusive(n));
 9680   match(Set prev (GetAndSetL mem newv));
 9681   ins_cost(VOLATILE_REF_COST);
 9682   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9683   ins_encode %{
 9684     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9685   %}
 9686   ins_pipe(pipe_serial);
 9687 %}
 9688 
 9689 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9690   predicate(needs_acquiring_load_exclusive(n));
 9691   match(Set prev (GetAndSetN mem newv));
 9692   ins_cost(VOLATILE_REF_COST);
 9693   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9694   ins_encode %{
 9695     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9696   %}
 9697   ins_pipe(pipe_serial);
 9698 %}
 9699 
 9700 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9701   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9702   match(Set prev (GetAndSetP mem newv));
 9703   ins_cost(VOLATILE_REF_COST);
 9704   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9705   ins_encode %{
 9706     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9707   %}
 9708   ins_pipe(pipe_serial);
 9709 %}
 9710 
 9711 
 9712 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9713   match(Set newval (GetAndAddL mem incr));
 9714   ins_cost(2 * VOLATILE_REF_COST + 1);
 9715   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9716   ins_encode %{
 9717     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9718   %}
 9719   ins_pipe(pipe_serial);
 9720 %}
 9721 
 9722 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9723   predicate(n->as_LoadStore()->result_not_used());
 9724   match(Set dummy (GetAndAddL mem incr));
 9725   ins_cost(2 * VOLATILE_REF_COST);
 9726   format %{ "get_and_addL [$mem], $incr" %}
 9727   ins_encode %{
 9728     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9729   %}
 9730   ins_pipe(pipe_serial);
 9731 %}
 9732 
 9733 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9734   match(Set newval (GetAndAddL mem incr));
 9735   ins_cost(2 * VOLATILE_REF_COST + 1);
 9736   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9737   ins_encode %{
 9738     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9739   %}
 9740   ins_pipe(pipe_serial);
 9741 %}
 9742 
 9743 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9744   predicate(n->as_LoadStore()->result_not_used());
 9745   match(Set dummy (GetAndAddL mem incr));
 9746   ins_cost(2 * VOLATILE_REF_COST);
 9747   format %{ "get_and_addL [$mem], $incr" %}
 9748   ins_encode %{
 9749     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9750   %}
 9751   ins_pipe(pipe_serial);
 9752 %}
 9753 
 9754 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9755   match(Set newval (GetAndAddI mem incr));
 9756   ins_cost(2 * VOLATILE_REF_COST + 1);
 9757   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9758   ins_encode %{
 9759     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9760   %}
 9761   ins_pipe(pipe_serial);
 9762 %}
 9763 
 9764 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9765   predicate(n->as_LoadStore()->result_not_used());
 9766   match(Set dummy (GetAndAddI mem incr));
 9767   ins_cost(2 * VOLATILE_REF_COST);
 9768   format %{ "get_and_addI [$mem], $incr" %}
 9769   ins_encode %{
 9770     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9771   %}
 9772   ins_pipe(pipe_serial);
 9773 %}
 9774 
 9775 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9776   match(Set newval (GetAndAddI mem incr));
 9777   ins_cost(2 * VOLATILE_REF_COST + 1);
 9778   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9779   ins_encode %{
 9780     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9781   %}
 9782   ins_pipe(pipe_serial);
 9783 %}
 9784 
 9785 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9786   predicate(n->as_LoadStore()->result_not_used());
 9787   match(Set dummy (GetAndAddI mem incr));
 9788   ins_cost(2 * VOLATILE_REF_COST);
 9789   format %{ "get_and_addI [$mem], $incr" %}
 9790   ins_encode %{
 9791     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9792   %}
 9793   ins_pipe(pipe_serial);
 9794 %}
 9795 
 9796 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9797   predicate(needs_acquiring_load_exclusive(n));
 9798   match(Set newval (GetAndAddL mem incr));
 9799   ins_cost(VOLATILE_REF_COST + 1);
 9800   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9801   ins_encode %{
 9802     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9803   %}
 9804   ins_pipe(pipe_serial);
 9805 %}
 9806 
 9807 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9808   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9809   match(Set dummy (GetAndAddL mem incr));
 9810   ins_cost(VOLATILE_REF_COST);
 9811   format %{ "get_and_addL_acq [$mem], $incr" %}
 9812   ins_encode %{
 9813     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9814   %}
 9815   ins_pipe(pipe_serial);
 9816 %}
 9817 
 9818 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9819   predicate(needs_acquiring_load_exclusive(n));
 9820   match(Set newval (GetAndAddL mem incr));
 9821   ins_cost(VOLATILE_REF_COST + 1);
 9822   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9823   ins_encode %{
 9824     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9825   %}
 9826   ins_pipe(pipe_serial);
 9827 %}
 9828 
 9829 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9830   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9831   match(Set dummy (GetAndAddL mem incr));
 9832   ins_cost(VOLATILE_REF_COST);
 9833   format %{ "get_and_addL_acq [$mem], $incr" %}
 9834   ins_encode %{
 9835     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9836   %}
 9837   ins_pipe(pipe_serial);
 9838 %}
 9839 
 9840 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9841   predicate(needs_acquiring_load_exclusive(n));
 9842   match(Set newval (GetAndAddI mem incr));
 9843   ins_cost(VOLATILE_REF_COST + 1);
 9844   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9845   ins_encode %{
 9846     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9847   %}
 9848   ins_pipe(pipe_serial);
 9849 %}
 9850 
 9851 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9852   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9853   match(Set dummy (GetAndAddI mem incr));
 9854   ins_cost(VOLATILE_REF_COST);
 9855   format %{ "get_and_addI_acq [$mem], $incr" %}
 9856   ins_encode %{
 9857     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9858   %}
 9859   ins_pipe(pipe_serial);
 9860 %}
 9861 
 9862 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9863   predicate(needs_acquiring_load_exclusive(n));
 9864   match(Set newval (GetAndAddI mem incr));
 9865   ins_cost(VOLATILE_REF_COST + 1);
 9866   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9867   ins_encode %{
 9868     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9869   %}
 9870   ins_pipe(pipe_serial);
 9871 %}
 9872 
 9873 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9874   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9875   match(Set dummy (GetAndAddI mem incr));
 9876   ins_cost(VOLATILE_REF_COST);
 9877   format %{ "get_and_addI_acq [$mem], $incr" %}
 9878   ins_encode %{
 9879     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9880   %}
 9881   ins_pipe(pipe_serial);
 9882 %}
 9883 
 9884 // Manifest a CmpL result in an integer register.
 9885 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9886 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9887 %{
 9888   match(Set dst (CmpL3 src1 src2));
 9889   effect(KILL flags);
 9890 
 9891   ins_cost(INSN_COST * 6);
 9892   format %{
 9893       "cmp $src1, $src2"
 9894       "csetw $dst, ne"
 9895       "cnegw $dst, lt"
 9896   %}
 9897   // format %{ "CmpL3 $dst, $src1, $src2" %}
 9898   ins_encode %{
 9899     __ cmp($src1$$Register, $src2$$Register);
 9900     __ csetw($dst$$Register, Assembler::NE);
 9901     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9902   %}
 9903 
 9904   ins_pipe(pipe_class_default);
 9905 %}
 9906 
 9907 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9908 %{
 9909   match(Set dst (CmpL3 src1 src2));
 9910   effect(KILL flags);
 9911 
 9912   ins_cost(INSN_COST * 6);
 9913   format %{
 9914       "cmp $src1, $src2"
 9915       "csetw $dst, ne"
 9916       "cnegw $dst, lt"
 9917   %}
 9918   ins_encode %{
 9919     int32_t con = (int32_t)$src2$$constant;
 9920      if (con < 0) {
 9921       __ adds(zr, $src1$$Register, -con);
 9922     } else {
 9923       __ subs(zr, $src1$$Register, con);
 9924     }
 9925     __ csetw($dst$$Register, Assembler::NE);
 9926     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9927   %}
 9928 
 9929   ins_pipe(pipe_class_default);
 9930 %}
 9931 
 9932 // ============================================================================
 9933 // Conditional Move Instructions
 9934 
 9935 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9936 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9937 // define an op class which merged both inputs and use it to type the
 9938 // argument to a single rule. unfortunatelyt his fails because the
 9939 // opclass does not live up to the COND_INTER interface of its
 9940 // component operands. When the generic code tries to negate the
 9941 // operand it ends up running the generci Machoper::negate method
 9942 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9943 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9944 
 9945 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9946   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9947 
 9948   ins_cost(INSN_COST * 2);
 9949   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9950 
 9951   ins_encode %{
 9952     __ cselw(as_Register($dst$$reg),
 9953              as_Register($src2$$reg),
 9954              as_Register($src1$$reg),
 9955              (Assembler::Condition)$cmp$$cmpcode);
 9956   %}
 9957 
 9958   ins_pipe(icond_reg_reg);
 9959 %}
 9960 
 9961 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9962   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9963 
 9964   ins_cost(INSN_COST * 2);
 9965   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9966 
 9967   ins_encode %{
 9968     __ cselw(as_Register($dst$$reg),
 9969              as_Register($src2$$reg),
 9970              as_Register($src1$$reg),
 9971              (Assembler::Condition)$cmp$$cmpcode);
 9972   %}
 9973 
 9974   ins_pipe(icond_reg_reg);
 9975 %}
 9976 
 9977 // special cases where one arg is zero
 9978 
 9979 // n.b. this is selected in preference to the rule above because it
 9980 // avoids loading constant 0 into a source register
 9981 
 9982 // TODO
 9983 // we ought only to be able to cull one of these variants as the ideal
 9984 // transforms ought always to order the zero consistently (to left/right?)
 9985 
 9986 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9987   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9988 
 9989   ins_cost(INSN_COST * 2);
 9990   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9991 
 9992   ins_encode %{
 9993     __ cselw(as_Register($dst$$reg),
 9994              as_Register($src$$reg),
 9995              zr,
 9996              (Assembler::Condition)$cmp$$cmpcode);
 9997   %}
 9998 
 9999   ins_pipe(icond_reg);
10000 %}
10001 
10002 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10003   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10004 
10005   ins_cost(INSN_COST * 2);
10006   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
10007 
10008   ins_encode %{
10009     __ cselw(as_Register($dst$$reg),
10010              as_Register($src$$reg),
10011              zr,
10012              (Assembler::Condition)$cmp$$cmpcode);
10013   %}
10014 
10015   ins_pipe(icond_reg);
10016 %}
10017 
10018 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10019   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10020 
10021   ins_cost(INSN_COST * 2);
10022   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
10023 
10024   ins_encode %{
10025     __ cselw(as_Register($dst$$reg),
10026              zr,
10027              as_Register($src$$reg),
10028              (Assembler::Condition)$cmp$$cmpcode);
10029   %}
10030 
10031   ins_pipe(icond_reg);
10032 %}
10033 
10034 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10035   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10036 
10037   ins_cost(INSN_COST * 2);
10038   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10039 
10040   ins_encode %{
10041     __ cselw(as_Register($dst$$reg),
10042              zr,
10043              as_Register($src$$reg),
10044              (Assembler::Condition)$cmp$$cmpcode);
10045   %}
10046 
10047   ins_pipe(icond_reg);
10048 %}
10049 
10050 // special case for creating a boolean 0 or 1
10051 
10052 // n.b. this is selected in preference to the rule above because it
10053 // avoids loading constants 0 and 1 into a source register
10054 
10055 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10056   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10057 
10058   ins_cost(INSN_COST * 2);
10059   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10060 
10061   ins_encode %{
10062     // equivalently
10063     // cset(as_Register($dst$$reg),
10064     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10065     __ csincw(as_Register($dst$$reg),
10066              zr,
10067              zr,
10068              (Assembler::Condition)$cmp$$cmpcode);
10069   %}
10070 
10071   ins_pipe(icond_none);
10072 %}
10073 
10074 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10075   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10076 
10077   ins_cost(INSN_COST * 2);
10078   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10079 
10080   ins_encode %{
10081     // equivalently
10082     // cset(as_Register($dst$$reg),
10083     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10084     __ csincw(as_Register($dst$$reg),
10085              zr,
10086              zr,
10087              (Assembler::Condition)$cmp$$cmpcode);
10088   %}
10089 
10090   ins_pipe(icond_none);
10091 %}
10092 
10093 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10094   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10095 
10096   ins_cost(INSN_COST * 2);
10097   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10098 
10099   ins_encode %{
10100     __ csel(as_Register($dst$$reg),
10101             as_Register($src2$$reg),
10102             as_Register($src1$$reg),
10103             (Assembler::Condition)$cmp$$cmpcode);
10104   %}
10105 
10106   ins_pipe(icond_reg_reg);
10107 %}
10108 
10109 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10110   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10111 
10112   ins_cost(INSN_COST * 2);
10113   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10114 
10115   ins_encode %{
10116     __ csel(as_Register($dst$$reg),
10117             as_Register($src2$$reg),
10118             as_Register($src1$$reg),
10119             (Assembler::Condition)$cmp$$cmpcode);
10120   %}
10121 
10122   ins_pipe(icond_reg_reg);
10123 %}
10124 
10125 // special cases where one arg is zero
10126 
10127 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10128   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10129 
10130   ins_cost(INSN_COST * 2);
10131   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10132 
10133   ins_encode %{
10134     __ csel(as_Register($dst$$reg),
10135             zr,
10136             as_Register($src$$reg),
10137             (Assembler::Condition)$cmp$$cmpcode);
10138   %}
10139 
10140   ins_pipe(icond_reg);
10141 %}
10142 
10143 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10144   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10145 
10146   ins_cost(INSN_COST * 2);
10147   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10148 
10149   ins_encode %{
10150     __ csel(as_Register($dst$$reg),
10151             zr,
10152             as_Register($src$$reg),
10153             (Assembler::Condition)$cmp$$cmpcode);
10154   %}
10155 
10156   ins_pipe(icond_reg);
10157 %}
10158 
10159 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10160   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10161 
10162   ins_cost(INSN_COST * 2);
10163   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10164 
10165   ins_encode %{
10166     __ csel(as_Register($dst$$reg),
10167             as_Register($src$$reg),
10168             zr,
10169             (Assembler::Condition)$cmp$$cmpcode);
10170   %}
10171 
10172   ins_pipe(icond_reg);
10173 %}
10174 
10175 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10176   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10177 
10178   ins_cost(INSN_COST * 2);
10179   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10180 
10181   ins_encode %{
10182     __ csel(as_Register($dst$$reg),
10183             as_Register($src$$reg),
10184             zr,
10185             (Assembler::Condition)$cmp$$cmpcode);
10186   %}
10187 
10188   ins_pipe(icond_reg);
10189 %}
10190 
10191 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10192   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10193 
10194   ins_cost(INSN_COST * 2);
10195   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10196 
10197   ins_encode %{
10198     __ csel(as_Register($dst$$reg),
10199             as_Register($src2$$reg),
10200             as_Register($src1$$reg),
10201             (Assembler::Condition)$cmp$$cmpcode);
10202   %}
10203 
10204   ins_pipe(icond_reg_reg);
10205 %}
10206 
10207 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10208   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10209 
10210   ins_cost(INSN_COST * 2);
10211   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10212 
10213   ins_encode %{
10214     __ csel(as_Register($dst$$reg),
10215             as_Register($src2$$reg),
10216             as_Register($src1$$reg),
10217             (Assembler::Condition)$cmp$$cmpcode);
10218   %}
10219 
10220   ins_pipe(icond_reg_reg);
10221 %}
10222 
10223 // special cases where one arg is zero
10224 
10225 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10226   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10227 
10228   ins_cost(INSN_COST * 2);
10229   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10230 
10231   ins_encode %{
10232     __ csel(as_Register($dst$$reg),
10233             zr,
10234             as_Register($src$$reg),
10235             (Assembler::Condition)$cmp$$cmpcode);
10236   %}
10237 
10238   ins_pipe(icond_reg);
10239 %}
10240 
10241 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10242   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10243 
10244   ins_cost(INSN_COST * 2);
10245   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10246 
10247   ins_encode %{
10248     __ csel(as_Register($dst$$reg),
10249             zr,
10250             as_Register($src$$reg),
10251             (Assembler::Condition)$cmp$$cmpcode);
10252   %}
10253 
10254   ins_pipe(icond_reg);
10255 %}
10256 
10257 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10258   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10259 
10260   ins_cost(INSN_COST * 2);
10261   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10262 
10263   ins_encode %{
10264     __ csel(as_Register($dst$$reg),
10265             as_Register($src$$reg),
10266             zr,
10267             (Assembler::Condition)$cmp$$cmpcode);
10268   %}
10269 
10270   ins_pipe(icond_reg);
10271 %}
10272 
10273 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10274   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10275 
10276   ins_cost(INSN_COST * 2);
10277   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10278 
10279   ins_encode %{
10280     __ csel(as_Register($dst$$reg),
10281             as_Register($src$$reg),
10282             zr,
10283             (Assembler::Condition)$cmp$$cmpcode);
10284   %}
10285 
10286   ins_pipe(icond_reg);
10287 %}
10288 
10289 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10290   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10291 
10292   ins_cost(INSN_COST * 2);
10293   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10294 
10295   ins_encode %{
10296     __ cselw(as_Register($dst$$reg),
10297              as_Register($src2$$reg),
10298              as_Register($src1$$reg),
10299              (Assembler::Condition)$cmp$$cmpcode);
10300   %}
10301 
10302   ins_pipe(icond_reg_reg);
10303 %}
10304 
10305 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10306   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10307 
10308   ins_cost(INSN_COST * 2);
10309   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10310 
10311   ins_encode %{
10312     __ cselw(as_Register($dst$$reg),
10313              as_Register($src2$$reg),
10314              as_Register($src1$$reg),
10315              (Assembler::Condition)$cmp$$cmpcode);
10316   %}
10317 
10318   ins_pipe(icond_reg_reg);
10319 %}
10320 
10321 // special cases where one arg is zero
10322 
10323 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10324   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10325 
10326   ins_cost(INSN_COST * 2);
10327   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10328 
10329   ins_encode %{
10330     __ cselw(as_Register($dst$$reg),
10331              zr,
10332              as_Register($src$$reg),
10333              (Assembler::Condition)$cmp$$cmpcode);
10334   %}
10335 
10336   ins_pipe(icond_reg);
10337 %}
10338 
10339 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10340   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10341 
10342   ins_cost(INSN_COST * 2);
10343   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10344 
10345   ins_encode %{
10346     __ cselw(as_Register($dst$$reg),
10347              zr,
10348              as_Register($src$$reg),
10349              (Assembler::Condition)$cmp$$cmpcode);
10350   %}
10351 
10352   ins_pipe(icond_reg);
10353 %}
10354 
10355 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10356   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10357 
10358   ins_cost(INSN_COST * 2);
10359   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10360 
10361   ins_encode %{
10362     __ cselw(as_Register($dst$$reg),
10363              as_Register($src$$reg),
10364              zr,
10365              (Assembler::Condition)$cmp$$cmpcode);
10366   %}
10367 
10368   ins_pipe(icond_reg);
10369 %}
10370 
10371 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10372   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10373 
10374   ins_cost(INSN_COST * 2);
10375   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10376 
10377   ins_encode %{
10378     __ cselw(as_Register($dst$$reg),
10379              as_Register($src$$reg),
10380              zr,
10381              (Assembler::Condition)$cmp$$cmpcode);
10382   %}
10383 
10384   ins_pipe(icond_reg);
10385 %}
10386 
10387 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10388 %{
10389   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10390 
10391   ins_cost(INSN_COST * 3);
10392 
10393   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10394   ins_encode %{
10395     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10396     __ fcsels(as_FloatRegister($dst$$reg),
10397               as_FloatRegister($src2$$reg),
10398               as_FloatRegister($src1$$reg),
10399               cond);
10400   %}
10401 
10402   ins_pipe(fp_cond_reg_reg_s);
10403 %}
10404 
10405 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10406 %{
10407   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10408 
10409   ins_cost(INSN_COST * 3);
10410 
10411   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10412   ins_encode %{
10413     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10414     __ fcsels(as_FloatRegister($dst$$reg),
10415               as_FloatRegister($src2$$reg),
10416               as_FloatRegister($src1$$reg),
10417               cond);
10418   %}
10419 
10420   ins_pipe(fp_cond_reg_reg_s);
10421 %}
10422 
10423 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10424 %{
10425   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10426 
10427   ins_cost(INSN_COST * 3);
10428 
10429   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10430   ins_encode %{
10431     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10432     __ fcseld(as_FloatRegister($dst$$reg),
10433               as_FloatRegister($src2$$reg),
10434               as_FloatRegister($src1$$reg),
10435               cond);
10436   %}
10437 
10438   ins_pipe(fp_cond_reg_reg_d);
10439 %}
10440 
10441 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10442 %{
10443   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10444 
10445   ins_cost(INSN_COST * 3);
10446 
10447   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10448   ins_encode %{
10449     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10450     __ fcseld(as_FloatRegister($dst$$reg),
10451               as_FloatRegister($src2$$reg),
10452               as_FloatRegister($src1$$reg),
10453               cond);
10454   %}
10455 
10456   ins_pipe(fp_cond_reg_reg_d);
10457 %}
10458 
10459 // ============================================================================
10460 // Arithmetic Instructions
10461 //
10462 
10463 // Integer Addition
10464 
10465 // TODO
10466 // these currently employ operations which do not set CR and hence are
10467 // not flagged as killing CR but we would like to isolate the cases
10468 // where we want to set flags from those where we don't. need to work
10469 // out how to do that.
10470 
10471 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10472   match(Set dst (AddI src1 src2));
10473 
10474   ins_cost(INSN_COST);
10475   format %{ "addw  $dst, $src1, $src2" %}
10476 
10477   ins_encode %{
10478     __ addw(as_Register($dst$$reg),
10479             as_Register($src1$$reg),
10480             as_Register($src2$$reg));
10481   %}
10482 
10483   ins_pipe(ialu_reg_reg);
10484 %}
10485 
10486 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10487   match(Set dst (AddI src1 src2));
10488 
10489   ins_cost(INSN_COST);
10490   format %{ "addw $dst, $src1, $src2" %}
10491 
10492   // use opcode to indicate that this is an add not a sub
10493   opcode(0x0);
10494 
10495   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10496 
10497   ins_pipe(ialu_reg_imm);
10498 %}
10499 
10500 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10501   match(Set dst (AddI (ConvL2I src1) src2));
10502 
10503   ins_cost(INSN_COST);
10504   format %{ "addw $dst, $src1, $src2" %}
10505 
10506   // use opcode to indicate that this is an add not a sub
10507   opcode(0x0);
10508 
10509   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10510 
10511   ins_pipe(ialu_reg_imm);
10512 %}
10513 
10514 // Pointer Addition
10515 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10516   match(Set dst (AddP src1 src2));
10517 
10518   ins_cost(INSN_COST);
10519   format %{ "add $dst, $src1, $src2\t# ptr" %}
10520 
10521   ins_encode %{
10522     __ add(as_Register($dst$$reg),
10523            as_Register($src1$$reg),
10524            as_Register($src2$$reg));
10525   %}
10526 
10527   ins_pipe(ialu_reg_reg);
10528 %}
10529 
10530 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10531   match(Set dst (AddP src1 (ConvI2L src2)));
10532 
10533   ins_cost(1.9 * INSN_COST);
10534   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10535 
10536   ins_encode %{
10537     __ add(as_Register($dst$$reg),
10538            as_Register($src1$$reg),
10539            as_Register($src2$$reg), ext::sxtw);
10540   %}
10541 
10542   ins_pipe(ialu_reg_reg);
10543 %}
10544 
10545 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10546   match(Set dst (AddP src1 (LShiftL src2 scale)));
10547 
10548   ins_cost(1.9 * INSN_COST);
10549   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10550 
10551   ins_encode %{
10552     __ lea(as_Register($dst$$reg),
10553            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10554                    Address::lsl($scale$$constant)));
10555   %}
10556 
10557   ins_pipe(ialu_reg_reg_shift);
10558 %}
10559 
10560 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10561   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10562 
10563   ins_cost(1.9 * INSN_COST);
10564   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10565 
10566   ins_encode %{
10567     __ lea(as_Register($dst$$reg),
10568            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10569                    Address::sxtw($scale$$constant)));
10570   %}
10571 
10572   ins_pipe(ialu_reg_reg_shift);
10573 %}
10574 
10575 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10576   match(Set dst (LShiftL (ConvI2L src) scale));
10577 
10578   ins_cost(INSN_COST);
10579   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10580 
10581   ins_encode %{
10582     __ sbfiz(as_Register($dst$$reg),
10583           as_Register($src$$reg),
10584           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10585   %}
10586 
10587   ins_pipe(ialu_reg_shift);
10588 %}
10589 
10590 // Pointer Immediate Addition
10591 // n.b. this needs to be more expensive than using an indirect memory
10592 // operand
10593 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10594   match(Set dst (AddP src1 src2));
10595 
10596   ins_cost(INSN_COST);
10597   format %{ "add $dst, $src1, $src2\t# ptr" %}
10598 
10599   // use opcode to indicate that this is an add not a sub
10600   opcode(0x0);
10601 
10602   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10603 
10604   ins_pipe(ialu_reg_imm);
10605 %}
10606 
10607 // Long Addition
10608 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10609 
10610   match(Set dst (AddL src1 src2));
10611 
10612   ins_cost(INSN_COST);
10613   format %{ "add  $dst, $src1, $src2" %}
10614 
10615   ins_encode %{
10616     __ add(as_Register($dst$$reg),
10617            as_Register($src1$$reg),
10618            as_Register($src2$$reg));
10619   %}
10620 
10621   ins_pipe(ialu_reg_reg);
10622 %}
10623 
10624 // No constant pool entries requiredLong Immediate Addition.
10625 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10626   match(Set dst (AddL src1 src2));
10627 
10628   ins_cost(INSN_COST);
10629   format %{ "add $dst, $src1, $src2" %}
10630 
10631   // use opcode to indicate that this is an add not a sub
10632   opcode(0x0);
10633 
10634   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10635 
10636   ins_pipe(ialu_reg_imm);
10637 %}
10638 
10639 // Integer Subtraction
10640 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10641   match(Set dst (SubI src1 src2));
10642 
10643   ins_cost(INSN_COST);
10644   format %{ "subw  $dst, $src1, $src2" %}
10645 
10646   ins_encode %{
10647     __ subw(as_Register($dst$$reg),
10648             as_Register($src1$$reg),
10649             as_Register($src2$$reg));
10650   %}
10651 
10652   ins_pipe(ialu_reg_reg);
10653 %}
10654 
10655 // Immediate Subtraction
10656 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10657   match(Set dst (SubI src1 src2));
10658 
10659   ins_cost(INSN_COST);
10660   format %{ "subw $dst, $src1, $src2" %}
10661 
10662   // use opcode to indicate that this is a sub not an add
10663   opcode(0x1);
10664 
10665   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10666 
10667   ins_pipe(ialu_reg_imm);
10668 %}
10669 
10670 // Long Subtraction
10671 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10672 
10673   match(Set dst (SubL src1 src2));
10674 
10675   ins_cost(INSN_COST);
10676   format %{ "sub  $dst, $src1, $src2" %}
10677 
10678   ins_encode %{
10679     __ sub(as_Register($dst$$reg),
10680            as_Register($src1$$reg),
10681            as_Register($src2$$reg));
10682   %}
10683 
10684   ins_pipe(ialu_reg_reg);
10685 %}
10686 
10687 // No constant pool entries requiredLong Immediate Subtraction.
10688 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10689   match(Set dst (SubL src1 src2));
10690 
10691   ins_cost(INSN_COST);
10692   format %{ "sub$dst, $src1, $src2" %}
10693 
10694   // use opcode to indicate that this is a sub not an add
10695   opcode(0x1);
10696 
10697   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10698 
10699   ins_pipe(ialu_reg_imm);
10700 %}
10701 
10702 // Integer Negation (special case for sub)
10703 
10704 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10705   match(Set dst (SubI zero src));
10706 
10707   ins_cost(INSN_COST);
10708   format %{ "negw $dst, $src\t# int" %}
10709 
10710   ins_encode %{
10711     __ negw(as_Register($dst$$reg),
10712             as_Register($src$$reg));
10713   %}
10714 
10715   ins_pipe(ialu_reg);
10716 %}
10717 
10718 // Long Negation
10719 
10720 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10721   match(Set dst (SubL zero src));
10722 
10723   ins_cost(INSN_COST);
10724   format %{ "neg $dst, $src\t# long" %}
10725 
10726   ins_encode %{
10727     __ neg(as_Register($dst$$reg),
10728            as_Register($src$$reg));
10729   %}
10730 
10731   ins_pipe(ialu_reg);
10732 %}
10733 
10734 // Integer Multiply
10735 
10736 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10737   match(Set dst (MulI src1 src2));
10738 
10739   ins_cost(INSN_COST * 3);
10740   format %{ "mulw  $dst, $src1, $src2" %}
10741 
10742   ins_encode %{
10743     __ mulw(as_Register($dst$$reg),
10744             as_Register($src1$$reg),
10745             as_Register($src2$$reg));
10746   %}
10747 
10748   ins_pipe(imul_reg_reg);
10749 %}
10750 
10751 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10752   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10753 
10754   ins_cost(INSN_COST * 3);
10755   format %{ "smull  $dst, $src1, $src2" %}
10756 
10757   ins_encode %{
10758     __ smull(as_Register($dst$$reg),
10759              as_Register($src1$$reg),
10760              as_Register($src2$$reg));
10761   %}
10762 
10763   ins_pipe(imul_reg_reg);
10764 %}
10765 
10766 // Long Multiply
10767 
10768 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10769   match(Set dst (MulL src1 src2));
10770 
10771   ins_cost(INSN_COST * 5);
10772   format %{ "mul  $dst, $src1, $src2" %}
10773 
10774   ins_encode %{
10775     __ mul(as_Register($dst$$reg),
10776            as_Register($src1$$reg),
10777            as_Register($src2$$reg));
10778   %}
10779 
10780   ins_pipe(lmul_reg_reg);
10781 %}
10782 
10783 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10784 %{
10785   match(Set dst (MulHiL src1 src2));
10786 
10787   ins_cost(INSN_COST * 7);
10788   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10789 
10790   ins_encode %{
10791     __ smulh(as_Register($dst$$reg),
10792              as_Register($src1$$reg),
10793              as_Register($src2$$reg));
10794   %}
10795 
10796   ins_pipe(lmul_reg_reg);
10797 %}
10798 
10799 // Combined Integer Multiply & Add/Sub
10800 
10801 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10802   match(Set dst (AddI src3 (MulI src1 src2)));
10803 
10804   ins_cost(INSN_COST * 3);
10805   format %{ "madd  $dst, $src1, $src2, $src3" %}
10806 
10807   ins_encode %{
10808     __ maddw(as_Register($dst$$reg),
10809              as_Register($src1$$reg),
10810              as_Register($src2$$reg),
10811              as_Register($src3$$reg));
10812   %}
10813 
10814   ins_pipe(imac_reg_reg);
10815 %}
10816 
10817 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10818   match(Set dst (SubI src3 (MulI src1 src2)));
10819 
10820   ins_cost(INSN_COST * 3);
10821   format %{ "msub  $dst, $src1, $src2, $src3" %}
10822 
10823   ins_encode %{
10824     __ msubw(as_Register($dst$$reg),
10825              as_Register($src1$$reg),
10826              as_Register($src2$$reg),
10827              as_Register($src3$$reg));
10828   %}
10829 
10830   ins_pipe(imac_reg_reg);
10831 %}
10832 
10833 // Combined Integer Multiply & Neg
10834 
10835 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10836   match(Set dst (MulI (SubI zero src1) src2));
10837   match(Set dst (MulI src1 (SubI zero src2)));
10838 
10839   ins_cost(INSN_COST * 3);
10840   format %{ "mneg  $dst, $src1, $src2" %}
10841 
10842   ins_encode %{
10843     __ mnegw(as_Register($dst$$reg),
10844              as_Register($src1$$reg),
10845              as_Register($src2$$reg));
10846   %}
10847 
10848   ins_pipe(imac_reg_reg);
10849 %}
10850 
10851 // Combined Long Multiply & Add/Sub
10852 
10853 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10854   match(Set dst (AddL src3 (MulL src1 src2)));
10855 
10856   ins_cost(INSN_COST * 5);
10857   format %{ "madd  $dst, $src1, $src2, $src3" %}
10858 
10859   ins_encode %{
10860     __ madd(as_Register($dst$$reg),
10861             as_Register($src1$$reg),
10862             as_Register($src2$$reg),
10863             as_Register($src3$$reg));
10864   %}
10865 
10866   ins_pipe(lmac_reg_reg);
10867 %}
10868 
10869 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10870   match(Set dst (SubL src3 (MulL src1 src2)));
10871 
10872   ins_cost(INSN_COST * 5);
10873   format %{ "msub  $dst, $src1, $src2, $src3" %}
10874 
10875   ins_encode %{
10876     __ msub(as_Register($dst$$reg),
10877             as_Register($src1$$reg),
10878             as_Register($src2$$reg),
10879             as_Register($src3$$reg));
10880   %}
10881 
10882   ins_pipe(lmac_reg_reg);
10883 %}
10884 
10885 // Combined Long Multiply & Neg
10886 
10887 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10888   match(Set dst (MulL (SubL zero src1) src2));
10889   match(Set dst (MulL src1 (SubL zero src2)));
10890 
10891   ins_cost(INSN_COST * 5);
10892   format %{ "mneg  $dst, $src1, $src2" %}
10893 
10894   ins_encode %{
10895     __ mneg(as_Register($dst$$reg),
10896             as_Register($src1$$reg),
10897             as_Register($src2$$reg));
10898   %}
10899 
10900   ins_pipe(lmac_reg_reg);
10901 %}
10902 
10903 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10904 
10905 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10906   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10907 
10908   ins_cost(INSN_COST * 3);
10909   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10910 
10911   ins_encode %{
10912     __ smaddl(as_Register($dst$$reg),
10913               as_Register($src1$$reg),
10914               as_Register($src2$$reg),
10915               as_Register($src3$$reg));
10916   %}
10917 
10918   ins_pipe(imac_reg_reg);
10919 %}
10920 
10921 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10922   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10923 
10924   ins_cost(INSN_COST * 3);
10925   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10926 
10927   ins_encode %{
10928     __ smsubl(as_Register($dst$$reg),
10929               as_Register($src1$$reg),
10930               as_Register($src2$$reg),
10931               as_Register($src3$$reg));
10932   %}
10933 
10934   ins_pipe(imac_reg_reg);
10935 %}
10936 
10937 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10938   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10939   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
10940 
10941   ins_cost(INSN_COST * 3);
10942   format %{ "smnegl  $dst, $src1, $src2" %}
10943 
10944   ins_encode %{
10945     __ smnegl(as_Register($dst$$reg),
10946               as_Register($src1$$reg),
10947               as_Register($src2$$reg));
10948   %}
10949 
10950   ins_pipe(imac_reg_reg);
10951 %}
10952 
10953 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10954 
10955 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10956   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10957 
10958   ins_cost(INSN_COST * 5);
10959   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10960             "maddw $dst, $src3, $src4, rscratch1" %}
10961 
10962   ins_encode %{
10963     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10964     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10965 
10966   ins_pipe(imac_reg_reg);
10967 %}
10968 
10969 // Integer Divide
10970 
10971 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10972   match(Set dst (DivI src1 src2));
10973 
10974   ins_cost(INSN_COST * 19);
10975   format %{ "sdivw  $dst, $src1, $src2" %}
10976 
10977   ins_encode(aarch64_enc_divw(dst, src1, src2));
10978   ins_pipe(idiv_reg_reg);
10979 %}
10980 
10981 // Long Divide
10982 
10983 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10984   match(Set dst (DivL src1 src2));
10985 
10986   ins_cost(INSN_COST * 35);
10987   format %{ "sdiv   $dst, $src1, $src2" %}
10988 
10989   ins_encode(aarch64_enc_div(dst, src1, src2));
10990   ins_pipe(ldiv_reg_reg);
10991 %}
10992 
10993 // Integer Remainder
10994 
10995 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10996   match(Set dst (ModI src1 src2));
10997 
10998   ins_cost(INSN_COST * 22);
10999   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
11000             "msubw($dst, rscratch1, $src2, $src1" %}
11001 
11002   ins_encode(aarch64_enc_modw(dst, src1, src2));
11003   ins_pipe(idiv_reg_reg);
11004 %}
11005 
11006 // Long Remainder
11007 
11008 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11009   match(Set dst (ModL src1 src2));
11010 
11011   ins_cost(INSN_COST * 38);
11012   format %{ "sdiv   rscratch1, $src1, $src2\n"
11013             "msub($dst, rscratch1, $src2, $src1" %}
11014 
11015   ins_encode(aarch64_enc_mod(dst, src1, src2));
11016   ins_pipe(ldiv_reg_reg);
11017 %}
11018 
11019 // Integer Shifts
11020 
11021 // Shift Left Register
11022 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11023   match(Set dst (LShiftI src1 src2));
11024 
11025   ins_cost(INSN_COST * 2);
11026   format %{ "lslvw  $dst, $src1, $src2" %}
11027 
11028   ins_encode %{
11029     __ lslvw(as_Register($dst$$reg),
11030              as_Register($src1$$reg),
11031              as_Register($src2$$reg));
11032   %}
11033 
11034   ins_pipe(ialu_reg_reg_vshift);
11035 %}
11036 
11037 // Shift Left Immediate
11038 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11039   match(Set dst (LShiftI src1 src2));
11040 
11041   ins_cost(INSN_COST);
11042   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11043 
11044   ins_encode %{
11045     __ lslw(as_Register($dst$$reg),
11046             as_Register($src1$$reg),
11047             $src2$$constant & 0x1f);
11048   %}
11049 
11050   ins_pipe(ialu_reg_shift);
11051 %}
11052 
11053 // Shift Right Logical Register
11054 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11055   match(Set dst (URShiftI src1 src2));
11056 
11057   ins_cost(INSN_COST * 2);
11058   format %{ "lsrvw  $dst, $src1, $src2" %}
11059 
11060   ins_encode %{
11061     __ lsrvw(as_Register($dst$$reg),
11062              as_Register($src1$$reg),
11063              as_Register($src2$$reg));
11064   %}
11065 
11066   ins_pipe(ialu_reg_reg_vshift);
11067 %}
11068 
11069 // Shift Right Logical Immediate
11070 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11071   match(Set dst (URShiftI src1 src2));
11072 
11073   ins_cost(INSN_COST);
11074   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11075 
11076   ins_encode %{
11077     __ lsrw(as_Register($dst$$reg),
11078             as_Register($src1$$reg),
11079             $src2$$constant & 0x1f);
11080   %}
11081 
11082   ins_pipe(ialu_reg_shift);
11083 %}
11084 
11085 // Shift Right Arithmetic Register
11086 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11087   match(Set dst (RShiftI src1 src2));
11088 
11089   ins_cost(INSN_COST * 2);
11090   format %{ "asrvw  $dst, $src1, $src2" %}
11091 
11092   ins_encode %{
11093     __ asrvw(as_Register($dst$$reg),
11094              as_Register($src1$$reg),
11095              as_Register($src2$$reg));
11096   %}
11097 
11098   ins_pipe(ialu_reg_reg_vshift);
11099 %}
11100 
11101 // Shift Right Arithmetic Immediate
11102 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11103   match(Set dst (RShiftI src1 src2));
11104 
11105   ins_cost(INSN_COST);
11106   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11107 
11108   ins_encode %{
11109     __ asrw(as_Register($dst$$reg),
11110             as_Register($src1$$reg),
11111             $src2$$constant & 0x1f);
11112   %}
11113 
11114   ins_pipe(ialu_reg_shift);
11115 %}
11116 
11117 // Combined Int Mask and Right Shift (using UBFM)
11118 // TODO
11119 
11120 // Long Shifts
11121 
11122 // Shift Left Register
11123 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11124   match(Set dst (LShiftL src1 src2));
11125 
11126   ins_cost(INSN_COST * 2);
11127   format %{ "lslv  $dst, $src1, $src2" %}
11128 
11129   ins_encode %{
11130     __ lslv(as_Register($dst$$reg),
11131             as_Register($src1$$reg),
11132             as_Register($src2$$reg));
11133   %}
11134 
11135   ins_pipe(ialu_reg_reg_vshift);
11136 %}
11137 
11138 // Shift Left Immediate
11139 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11140   match(Set dst (LShiftL src1 src2));
11141 
11142   ins_cost(INSN_COST);
11143   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11144 
11145   ins_encode %{
11146     __ lsl(as_Register($dst$$reg),
11147             as_Register($src1$$reg),
11148             $src2$$constant & 0x3f);
11149   %}
11150 
11151   ins_pipe(ialu_reg_shift);
11152 %}
11153 
11154 // Shift Right Logical Register
11155 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11156   match(Set dst (URShiftL src1 src2));
11157 
11158   ins_cost(INSN_COST * 2);
11159   format %{ "lsrv  $dst, $src1, $src2" %}
11160 
11161   ins_encode %{
11162     __ lsrv(as_Register($dst$$reg),
11163             as_Register($src1$$reg),
11164             as_Register($src2$$reg));
11165   %}
11166 
11167   ins_pipe(ialu_reg_reg_vshift);
11168 %}
11169 
11170 // Shift Right Logical Immediate
11171 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11172   match(Set dst (URShiftL src1 src2));
11173 
11174   ins_cost(INSN_COST);
11175   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11176 
11177   ins_encode %{
11178     __ lsr(as_Register($dst$$reg),
11179            as_Register($src1$$reg),
11180            $src2$$constant & 0x3f);
11181   %}
11182 
11183   ins_pipe(ialu_reg_shift);
11184 %}
11185 
11186 // A special-case pattern for card table stores.
11187 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11188   match(Set dst (URShiftL (CastP2X src1) src2));
11189 
11190   ins_cost(INSN_COST);
11191   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11192 
11193   ins_encode %{
11194     __ lsr(as_Register($dst$$reg),
11195            as_Register($src1$$reg),
11196            $src2$$constant & 0x3f);
11197   %}
11198 
11199   ins_pipe(ialu_reg_shift);
11200 %}
11201 
11202 // Shift Right Arithmetic Register
11203 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11204   match(Set dst (RShiftL src1 src2));
11205 
11206   ins_cost(INSN_COST * 2);
11207   format %{ "asrv  $dst, $src1, $src2" %}
11208 
11209   ins_encode %{
11210     __ asrv(as_Register($dst$$reg),
11211             as_Register($src1$$reg),
11212             as_Register($src2$$reg));
11213   %}
11214 
11215   ins_pipe(ialu_reg_reg_vshift);
11216 %}
11217 
11218 // Shift Right Arithmetic Immediate
11219 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11220   match(Set dst (RShiftL src1 src2));
11221 
11222   ins_cost(INSN_COST);
11223   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11224 
11225   ins_encode %{
11226     __ asr(as_Register($dst$$reg),
11227            as_Register($src1$$reg),
11228            $src2$$constant & 0x3f);
11229   %}
11230 
11231   ins_pipe(ialu_reg_shift);
11232 %}
11233 
11234 // BEGIN This section of the file is automatically generated. Do not edit --------------
11235 // This section is generated from aarch64_ad.m4
11236 
11237 // This pattern is automatically generated from aarch64_ad.m4
11238 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11239 instruct regL_not_reg(iRegLNoSp dst,
11240                          iRegL src1, immL_M1 m1,
11241                          rFlagsReg cr) %{
11242   match(Set dst (XorL src1 m1));
11243   ins_cost(INSN_COST);
11244   format %{ "eon  $dst, $src1, zr" %}
11245 
11246   ins_encode %{
11247     __ eon(as_Register($dst$$reg),
11248               as_Register($src1$$reg),
11249               zr,
11250               Assembler::LSL, 0);
11251   %}
11252 
11253   ins_pipe(ialu_reg);
11254 %}
11255 
11256 // This pattern is automatically generated from aarch64_ad.m4
11257 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11258 instruct regI_not_reg(iRegINoSp dst,
11259                          iRegIorL2I src1, immI_M1 m1,
11260                          rFlagsReg cr) %{
11261   match(Set dst (XorI src1 m1));
11262   ins_cost(INSN_COST);
11263   format %{ "eonw  $dst, $src1, zr" %}
11264 
11265   ins_encode %{
11266     __ eonw(as_Register($dst$$reg),
11267               as_Register($src1$$reg),
11268               zr,
11269               Assembler::LSL, 0);
11270   %}
11271 
11272   ins_pipe(ialu_reg);
11273 %}
11274 
11275 // This pattern is automatically generated from aarch64_ad.m4
11276 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11277 instruct NegI_reg_URShift_reg(iRegINoSp dst,
11278                               immI0 zero, iRegIorL2I src1, immI src2) %{
11279   match(Set dst (SubI zero (URShiftI src1 src2)));
11280 
11281   ins_cost(1.9 * INSN_COST);
11282   format %{ "negw  $dst, $src1, LSR $src2" %}
11283 
11284   ins_encode %{
11285     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11286             Assembler::LSR, $src2$$constant & 0x1f);
11287   %}
11288 
11289   ins_pipe(ialu_reg_shift);
11290 %}
11291 
11292 // This pattern is automatically generated from aarch64_ad.m4
11293 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11294 instruct NegI_reg_RShift_reg(iRegINoSp dst,
11295                               immI0 zero, iRegIorL2I src1, immI src2) %{
11296   match(Set dst (SubI zero (RShiftI src1 src2)));
11297 
11298   ins_cost(1.9 * INSN_COST);
11299   format %{ "negw  $dst, $src1, ASR $src2" %}
11300 
11301   ins_encode %{
11302     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11303             Assembler::ASR, $src2$$constant & 0x1f);
11304   %}
11305 
11306   ins_pipe(ialu_reg_shift);
11307 %}
11308 
11309 // This pattern is automatically generated from aarch64_ad.m4
11310 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11311 instruct NegI_reg_LShift_reg(iRegINoSp dst,
11312                               immI0 zero, iRegIorL2I src1, immI src2) %{
11313   match(Set dst (SubI zero (LShiftI src1 src2)));
11314 
11315   ins_cost(1.9 * INSN_COST);
11316   format %{ "negw  $dst, $src1, LSL $src2" %}
11317 
11318   ins_encode %{
11319     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
11320             Assembler::LSL, $src2$$constant & 0x1f);
11321   %}
11322 
11323   ins_pipe(ialu_reg_shift);
11324 %}
11325 
11326 // This pattern is automatically generated from aarch64_ad.m4
11327 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11328 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
11329                               immL0 zero, iRegL src1, immI src2) %{
11330   match(Set dst (SubL zero (URShiftL src1 src2)));
11331 
11332   ins_cost(1.9 * INSN_COST);
11333   format %{ "neg  $dst, $src1, LSR $src2" %}
11334 
11335   ins_encode %{
11336     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11337             Assembler::LSR, $src2$$constant & 0x3f);
11338   %}
11339 
11340   ins_pipe(ialu_reg_shift);
11341 %}
11342 
11343 // This pattern is automatically generated from aarch64_ad.m4
11344 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11345 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
11346                               immL0 zero, iRegL src1, immI src2) %{
11347   match(Set dst (SubL zero (RShiftL src1 src2)));
11348 
11349   ins_cost(1.9 * INSN_COST);
11350   format %{ "neg  $dst, $src1, ASR $src2" %}
11351 
11352   ins_encode %{
11353     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11354             Assembler::ASR, $src2$$constant & 0x3f);
11355   %}
11356 
11357   ins_pipe(ialu_reg_shift);
11358 %}
11359 
11360 // This pattern is automatically generated from aarch64_ad.m4
11361 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11362 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
11363                               immL0 zero, iRegL src1, immI src2) %{
11364   match(Set dst (SubL zero (LShiftL src1 src2)));
11365 
11366   ins_cost(1.9 * INSN_COST);
11367   format %{ "neg  $dst, $src1, LSL $src2" %}
11368 
11369   ins_encode %{
11370     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
11371             Assembler::LSL, $src2$$constant & 0x3f);
11372   %}
11373 
11374   ins_pipe(ialu_reg_shift);
11375 %}
11376 
11377 // This pattern is automatically generated from aarch64_ad.m4
11378 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11379 instruct AndI_reg_not_reg(iRegINoSp dst,
11380                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11381   match(Set dst (AndI src1 (XorI src2 m1)));
11382   ins_cost(INSN_COST);
11383   format %{ "bicw  $dst, $src1, $src2" %}
11384 
11385   ins_encode %{
11386     __ bicw(as_Register($dst$$reg),
11387               as_Register($src1$$reg),
11388               as_Register($src2$$reg),
11389               Assembler::LSL, 0);
11390   %}
11391 
11392   ins_pipe(ialu_reg_reg);
11393 %}
11394 
11395 // This pattern is automatically generated from aarch64_ad.m4
11396 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11397 instruct AndL_reg_not_reg(iRegLNoSp dst,
11398                          iRegL src1, iRegL src2, immL_M1 m1) %{
11399   match(Set dst (AndL src1 (XorL src2 m1)));
11400   ins_cost(INSN_COST);
11401   format %{ "bic  $dst, $src1, $src2" %}
11402 
11403   ins_encode %{
11404     __ bic(as_Register($dst$$reg),
11405               as_Register($src1$$reg),
11406               as_Register($src2$$reg),
11407               Assembler::LSL, 0);
11408   %}
11409 
11410   ins_pipe(ialu_reg_reg);
11411 %}
11412 
11413 // This pattern is automatically generated from aarch64_ad.m4
11414 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11415 instruct OrI_reg_not_reg(iRegINoSp dst,
11416                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11417   match(Set dst (OrI src1 (XorI src2 m1)));
11418   ins_cost(INSN_COST);
11419   format %{ "ornw  $dst, $src1, $src2" %}
11420 
11421   ins_encode %{
11422     __ ornw(as_Register($dst$$reg),
11423               as_Register($src1$$reg),
11424               as_Register($src2$$reg),
11425               Assembler::LSL, 0);
11426   %}
11427 
11428   ins_pipe(ialu_reg_reg);
11429 %}
11430 
11431 // This pattern is automatically generated from aarch64_ad.m4
11432 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11433 instruct OrL_reg_not_reg(iRegLNoSp dst,
11434                          iRegL src1, iRegL src2, immL_M1 m1) %{
11435   match(Set dst (OrL src1 (XorL src2 m1)));
11436   ins_cost(INSN_COST);
11437   format %{ "orn  $dst, $src1, $src2" %}
11438 
11439   ins_encode %{
11440     __ orn(as_Register($dst$$reg),
11441               as_Register($src1$$reg),
11442               as_Register($src2$$reg),
11443               Assembler::LSL, 0);
11444   %}
11445 
11446   ins_pipe(ialu_reg_reg);
11447 %}
11448 
11449 // This pattern is automatically generated from aarch64_ad.m4
11450 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11451 instruct XorI_reg_not_reg(iRegINoSp dst,
11452                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
11453   match(Set dst (XorI m1 (XorI src2 src1)));
11454   ins_cost(INSN_COST);
11455   format %{ "eonw  $dst, $src1, $src2" %}
11456 
11457   ins_encode %{
11458     __ eonw(as_Register($dst$$reg),
11459               as_Register($src1$$reg),
11460               as_Register($src2$$reg),
11461               Assembler::LSL, 0);
11462   %}
11463 
11464   ins_pipe(ialu_reg_reg);
11465 %}
11466 
11467 // This pattern is automatically generated from aarch64_ad.m4
11468 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11469 instruct XorL_reg_not_reg(iRegLNoSp dst,
11470                          iRegL src1, iRegL src2, immL_M1 m1) %{
11471   match(Set dst (XorL m1 (XorL src2 src1)));
11472   ins_cost(INSN_COST);
11473   format %{ "eon  $dst, $src1, $src2" %}
11474 
11475   ins_encode %{
11476     __ eon(as_Register($dst$$reg),
11477               as_Register($src1$$reg),
11478               as_Register($src2$$reg),
11479               Assembler::LSL, 0);
11480   %}
11481 
11482   ins_pipe(ialu_reg_reg);
11483 %}
11484 
11485 // This pattern is automatically generated from aarch64_ad.m4
11486 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11487 // val & (-1 ^ (val >>> shift)) ==> bicw
11488 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11489                          iRegIorL2I src1, iRegIorL2I src2,
11490                          immI src3, immI_M1 src4) %{
11491   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11492   ins_cost(1.9 * INSN_COST);
11493   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11494 
11495   ins_encode %{
11496     __ bicw(as_Register($dst$$reg),
11497               as_Register($src1$$reg),
11498               as_Register($src2$$reg),
11499               Assembler::LSR,
11500               $src3$$constant & 0x1f);
11501   %}
11502 
11503   ins_pipe(ialu_reg_reg_shift);
11504 %}
11505 
11506 // This pattern is automatically generated from aarch64_ad.m4
11507 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11508 // val & (-1 ^ (val >>> shift)) ==> bic
11509 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11510                          iRegL src1, iRegL src2,
11511                          immI src3, immL_M1 src4) %{
11512   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11513   ins_cost(1.9 * INSN_COST);
11514   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11515 
11516   ins_encode %{
11517     __ bic(as_Register($dst$$reg),
11518               as_Register($src1$$reg),
11519               as_Register($src2$$reg),
11520               Assembler::LSR,
11521               $src3$$constant & 0x3f);
11522   %}
11523 
11524   ins_pipe(ialu_reg_reg_shift);
11525 %}
11526 
11527 // This pattern is automatically generated from aarch64_ad.m4
11528 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11529 // val & (-1 ^ (val >> shift)) ==> bicw
11530 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11531                          iRegIorL2I src1, iRegIorL2I src2,
11532                          immI src3, immI_M1 src4) %{
11533   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11534   ins_cost(1.9 * INSN_COST);
11535   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11536 
11537   ins_encode %{
11538     __ bicw(as_Register($dst$$reg),
11539               as_Register($src1$$reg),
11540               as_Register($src2$$reg),
11541               Assembler::ASR,
11542               $src3$$constant & 0x1f);
11543   %}
11544 
11545   ins_pipe(ialu_reg_reg_shift);
11546 %}
11547 
11548 // This pattern is automatically generated from aarch64_ad.m4
11549 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11550 // val & (-1 ^ (val >> shift)) ==> bic
11551 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11552                          iRegL src1, iRegL src2,
11553                          immI src3, immL_M1 src4) %{
11554   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11555   ins_cost(1.9 * INSN_COST);
11556   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11557 
11558   ins_encode %{
11559     __ bic(as_Register($dst$$reg),
11560               as_Register($src1$$reg),
11561               as_Register($src2$$reg),
11562               Assembler::ASR,
11563               $src3$$constant & 0x3f);
11564   %}
11565 
11566   ins_pipe(ialu_reg_reg_shift);
11567 %}
11568 
11569 // This pattern is automatically generated from aarch64_ad.m4
11570 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11571 // val & (-1 ^ (val ror shift)) ==> bicw
11572 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11573                          iRegIorL2I src1, iRegIorL2I src2,
11574                          immI src3, immI_M1 src4) %{
11575   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11576   ins_cost(1.9 * INSN_COST);
11577   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11578 
11579   ins_encode %{
11580     __ bicw(as_Register($dst$$reg),
11581               as_Register($src1$$reg),
11582               as_Register($src2$$reg),
11583               Assembler::ROR,
11584               $src3$$constant & 0x1f);
11585   %}
11586 
11587   ins_pipe(ialu_reg_reg_shift);
11588 %}
11589 
11590 // This pattern is automatically generated from aarch64_ad.m4
11591 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11592 // val & (-1 ^ (val ror shift)) ==> bic
11593 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11594                          iRegL src1, iRegL src2,
11595                          immI src3, immL_M1 src4) %{
11596   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11597   ins_cost(1.9 * INSN_COST);
11598   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11599 
11600   ins_encode %{
11601     __ bic(as_Register($dst$$reg),
11602               as_Register($src1$$reg),
11603               as_Register($src2$$reg),
11604               Assembler::ROR,
11605               $src3$$constant & 0x3f);
11606   %}
11607 
11608   ins_pipe(ialu_reg_reg_shift);
11609 %}
11610 
11611 // This pattern is automatically generated from aarch64_ad.m4
11612 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11613 // val & (-1 ^ (val << shift)) ==> bicw
11614 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11615                          iRegIorL2I src1, iRegIorL2I src2,
11616                          immI src3, immI_M1 src4) %{
11617   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11618   ins_cost(1.9 * INSN_COST);
11619   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11620 
11621   ins_encode %{
11622     __ bicw(as_Register($dst$$reg),
11623               as_Register($src1$$reg),
11624               as_Register($src2$$reg),
11625               Assembler::LSL,
11626               $src3$$constant & 0x1f);
11627   %}
11628 
11629   ins_pipe(ialu_reg_reg_shift);
11630 %}
11631 
11632 // This pattern is automatically generated from aarch64_ad.m4
11633 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11634 // val & (-1 ^ (val << shift)) ==> bic
11635 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11636                          iRegL src1, iRegL src2,
11637                          immI src3, immL_M1 src4) %{
11638   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11639   ins_cost(1.9 * INSN_COST);
11640   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11641 
11642   ins_encode %{
11643     __ bic(as_Register($dst$$reg),
11644               as_Register($src1$$reg),
11645               as_Register($src2$$reg),
11646               Assembler::LSL,
11647               $src3$$constant & 0x3f);
11648   %}
11649 
11650   ins_pipe(ialu_reg_reg_shift);
11651 %}
11652 
11653 // This pattern is automatically generated from aarch64_ad.m4
11654 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11655 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11656 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11657                          iRegIorL2I src1, iRegIorL2I src2,
11658                          immI src3, immI_M1 src4) %{
11659   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11660   ins_cost(1.9 * INSN_COST);
11661   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11662 
11663   ins_encode %{
11664     __ eonw(as_Register($dst$$reg),
11665               as_Register($src1$$reg),
11666               as_Register($src2$$reg),
11667               Assembler::LSR,
11668               $src3$$constant & 0x1f);
11669   %}
11670 
11671   ins_pipe(ialu_reg_reg_shift);
11672 %}
11673 
11674 // This pattern is automatically generated from aarch64_ad.m4
11675 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11676 // val ^ (-1 ^ (val >>> shift)) ==> eon
11677 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11678                          iRegL src1, iRegL src2,
11679                          immI src3, immL_M1 src4) %{
11680   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11681   ins_cost(1.9 * INSN_COST);
11682   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11683 
11684   ins_encode %{
11685     __ eon(as_Register($dst$$reg),
11686               as_Register($src1$$reg),
11687               as_Register($src2$$reg),
11688               Assembler::LSR,
11689               $src3$$constant & 0x3f);
11690   %}
11691 
11692   ins_pipe(ialu_reg_reg_shift);
11693 %}
11694 
11695 // This pattern is automatically generated from aarch64_ad.m4
11696 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11697 // val ^ (-1 ^ (val >> shift)) ==> eonw
11698 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11699                          iRegIorL2I src1, iRegIorL2I src2,
11700                          immI src3, immI_M1 src4) %{
11701   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11702   ins_cost(1.9 * INSN_COST);
11703   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11704 
11705   ins_encode %{
11706     __ eonw(as_Register($dst$$reg),
11707               as_Register($src1$$reg),
11708               as_Register($src2$$reg),
11709               Assembler::ASR,
11710               $src3$$constant & 0x1f);
11711   %}
11712 
11713   ins_pipe(ialu_reg_reg_shift);
11714 %}
11715 
11716 // This pattern is automatically generated from aarch64_ad.m4
11717 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11718 // val ^ (-1 ^ (val >> shift)) ==> eon
11719 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11720                          iRegL src1, iRegL src2,
11721                          immI src3, immL_M1 src4) %{
11722   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11723   ins_cost(1.9 * INSN_COST);
11724   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11725 
11726   ins_encode %{
11727     __ eon(as_Register($dst$$reg),
11728               as_Register($src1$$reg),
11729               as_Register($src2$$reg),
11730               Assembler::ASR,
11731               $src3$$constant & 0x3f);
11732   %}
11733 
11734   ins_pipe(ialu_reg_reg_shift);
11735 %}
11736 
11737 // This pattern is automatically generated from aarch64_ad.m4
11738 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11739 // val ^ (-1 ^ (val ror shift)) ==> eonw
11740 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11741                          iRegIorL2I src1, iRegIorL2I src2,
11742                          immI src3, immI_M1 src4) %{
11743   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11744   ins_cost(1.9 * INSN_COST);
11745   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11746 
11747   ins_encode %{
11748     __ eonw(as_Register($dst$$reg),
11749               as_Register($src1$$reg),
11750               as_Register($src2$$reg),
11751               Assembler::ROR,
11752               $src3$$constant & 0x1f);
11753   %}
11754 
11755   ins_pipe(ialu_reg_reg_shift);
11756 %}
11757 
11758 // This pattern is automatically generated from aarch64_ad.m4
11759 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11760 // val ^ (-1 ^ (val ror shift)) ==> eon
11761 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11762                          iRegL src1, iRegL src2,
11763                          immI src3, immL_M1 src4) %{
11764   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11765   ins_cost(1.9 * INSN_COST);
11766   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11767 
11768   ins_encode %{
11769     __ eon(as_Register($dst$$reg),
11770               as_Register($src1$$reg),
11771               as_Register($src2$$reg),
11772               Assembler::ROR,
11773               $src3$$constant & 0x3f);
11774   %}
11775 
11776   ins_pipe(ialu_reg_reg_shift);
11777 %}
11778 
11779 // This pattern is automatically generated from aarch64_ad.m4
11780 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11781 // val ^ (-1 ^ (val << shift)) ==> eonw
11782 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11783                          iRegIorL2I src1, iRegIorL2I src2,
11784                          immI src3, immI_M1 src4) %{
11785   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11786   ins_cost(1.9 * INSN_COST);
11787   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11788 
11789   ins_encode %{
11790     __ eonw(as_Register($dst$$reg),
11791               as_Register($src1$$reg),
11792               as_Register($src2$$reg),
11793               Assembler::LSL,
11794               $src3$$constant & 0x1f);
11795   %}
11796 
11797   ins_pipe(ialu_reg_reg_shift);
11798 %}
11799 
11800 // This pattern is automatically generated from aarch64_ad.m4
11801 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11802 // val ^ (-1 ^ (val << shift)) ==> eon
11803 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11804                          iRegL src1, iRegL src2,
11805                          immI src3, immL_M1 src4) %{
11806   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11807   ins_cost(1.9 * INSN_COST);
11808   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11809 
11810   ins_encode %{
11811     __ eon(as_Register($dst$$reg),
11812               as_Register($src1$$reg),
11813               as_Register($src2$$reg),
11814               Assembler::LSL,
11815               $src3$$constant & 0x3f);
11816   %}
11817 
11818   ins_pipe(ialu_reg_reg_shift);
11819 %}
11820 
11821 // This pattern is automatically generated from aarch64_ad.m4
11822 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11823 // val | (-1 ^ (val >>> shift)) ==> ornw
11824 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11825                          iRegIorL2I src1, iRegIorL2I src2,
11826                          immI src3, immI_M1 src4) %{
11827   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11828   ins_cost(1.9 * INSN_COST);
11829   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11830 
11831   ins_encode %{
11832     __ ornw(as_Register($dst$$reg),
11833               as_Register($src1$$reg),
11834               as_Register($src2$$reg),
11835               Assembler::LSR,
11836               $src3$$constant & 0x1f);
11837   %}
11838 
11839   ins_pipe(ialu_reg_reg_shift);
11840 %}
11841 
11842 // This pattern is automatically generated from aarch64_ad.m4
11843 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11844 // val | (-1 ^ (val >>> shift)) ==> orn
11845 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11846                          iRegL src1, iRegL src2,
11847                          immI src3, immL_M1 src4) %{
11848   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11849   ins_cost(1.9 * INSN_COST);
11850   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11851 
11852   ins_encode %{
11853     __ orn(as_Register($dst$$reg),
11854               as_Register($src1$$reg),
11855               as_Register($src2$$reg),
11856               Assembler::LSR,
11857               $src3$$constant & 0x3f);
11858   %}
11859 
11860   ins_pipe(ialu_reg_reg_shift);
11861 %}
11862 
11863 // This pattern is automatically generated from aarch64_ad.m4
11864 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11865 // val | (-1 ^ (val >> shift)) ==> ornw
11866 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11867                          iRegIorL2I src1, iRegIorL2I src2,
11868                          immI src3, immI_M1 src4) %{
11869   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11870   ins_cost(1.9 * INSN_COST);
11871   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11872 
11873   ins_encode %{
11874     __ ornw(as_Register($dst$$reg),
11875               as_Register($src1$$reg),
11876               as_Register($src2$$reg),
11877               Assembler::ASR,
11878               $src3$$constant & 0x1f);
11879   %}
11880 
11881   ins_pipe(ialu_reg_reg_shift);
11882 %}
11883 
11884 // This pattern is automatically generated from aarch64_ad.m4
11885 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11886 // val | (-1 ^ (val >> shift)) ==> orn
11887 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11888                          iRegL src1, iRegL src2,
11889                          immI src3, immL_M1 src4) %{
11890   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11891   ins_cost(1.9 * INSN_COST);
11892   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11893 
11894   ins_encode %{
11895     __ orn(as_Register($dst$$reg),
11896               as_Register($src1$$reg),
11897               as_Register($src2$$reg),
11898               Assembler::ASR,
11899               $src3$$constant & 0x3f);
11900   %}
11901 
11902   ins_pipe(ialu_reg_reg_shift);
11903 %}
11904 
11905 // This pattern is automatically generated from aarch64_ad.m4
11906 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11907 // val | (-1 ^ (val ror shift)) ==> ornw
11908 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11909                          iRegIorL2I src1, iRegIorL2I src2,
11910                          immI src3, immI_M1 src4) %{
11911   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11912   ins_cost(1.9 * INSN_COST);
11913   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11914 
11915   ins_encode %{
11916     __ ornw(as_Register($dst$$reg),
11917               as_Register($src1$$reg),
11918               as_Register($src2$$reg),
11919               Assembler::ROR,
11920               $src3$$constant & 0x1f);
11921   %}
11922 
11923   ins_pipe(ialu_reg_reg_shift);
11924 %}
11925 
11926 // This pattern is automatically generated from aarch64_ad.m4
11927 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11928 // val | (-1 ^ (val ror shift)) ==> orn
11929 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11930                          iRegL src1, iRegL src2,
11931                          immI src3, immL_M1 src4) %{
11932   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11933   ins_cost(1.9 * INSN_COST);
11934   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11935 
11936   ins_encode %{
11937     __ orn(as_Register($dst$$reg),
11938               as_Register($src1$$reg),
11939               as_Register($src2$$reg),
11940               Assembler::ROR,
11941               $src3$$constant & 0x3f);
11942   %}
11943 
11944   ins_pipe(ialu_reg_reg_shift);
11945 %}
11946 
11947 // This pattern is automatically generated from aarch64_ad.m4
11948 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11949 // val | (-1 ^ (val << shift)) ==> ornw
11950 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11951                          iRegIorL2I src1, iRegIorL2I src2,
11952                          immI src3, immI_M1 src4) %{
11953   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11954   ins_cost(1.9 * INSN_COST);
11955   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11956 
11957   ins_encode %{
11958     __ ornw(as_Register($dst$$reg),
11959               as_Register($src1$$reg),
11960               as_Register($src2$$reg),
11961               Assembler::LSL,
11962               $src3$$constant & 0x1f);
11963   %}
11964 
11965   ins_pipe(ialu_reg_reg_shift);
11966 %}
11967 
11968 // This pattern is automatically generated from aarch64_ad.m4
11969 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11970 // val | (-1 ^ (val << shift)) ==> orn
11971 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11972                          iRegL src1, iRegL src2,
11973                          immI src3, immL_M1 src4) %{
11974   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11975   ins_cost(1.9 * INSN_COST);
11976   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11977 
11978   ins_encode %{
11979     __ orn(as_Register($dst$$reg),
11980               as_Register($src1$$reg),
11981               as_Register($src2$$reg),
11982               Assembler::LSL,
11983               $src3$$constant & 0x3f);
11984   %}
11985 
11986   ins_pipe(ialu_reg_reg_shift);
11987 %}
11988 
11989 // This pattern is automatically generated from aarch64_ad.m4
11990 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11991 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11992                          iRegIorL2I src1, iRegIorL2I src2,
11993                          immI src3) %{
11994   match(Set dst (AndI src1 (URShiftI src2 src3)));
11995 
11996   ins_cost(1.9 * INSN_COST);
11997   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11998 
11999   ins_encode %{
12000     __ andw(as_Register($dst$$reg),
12001               as_Register($src1$$reg),
12002               as_Register($src2$$reg),
12003               Assembler::LSR,
12004               $src3$$constant & 0x1f);
12005   %}
12006 
12007   ins_pipe(ialu_reg_reg_shift);
12008 %}
12009 
12010 // This pattern is automatically generated from aarch64_ad.m4
12011 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12012 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
12013                          iRegL src1, iRegL src2,
12014                          immI src3) %{
12015   match(Set dst (AndL src1 (URShiftL src2 src3)));
12016 
12017   ins_cost(1.9 * INSN_COST);
12018   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
12019 
12020   ins_encode %{
12021     __ andr(as_Register($dst$$reg),
12022               as_Register($src1$$reg),
12023               as_Register($src2$$reg),
12024               Assembler::LSR,
12025               $src3$$constant & 0x3f);
12026   %}
12027 
12028   ins_pipe(ialu_reg_reg_shift);
12029 %}
12030 
12031 // This pattern is automatically generated from aarch64_ad.m4
12032 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12033 instruct AndI_reg_RShift_reg(iRegINoSp dst,
12034                          iRegIorL2I src1, iRegIorL2I src2,
12035                          immI src3) %{
12036   match(Set dst (AndI src1 (RShiftI src2 src3)));
12037 
12038   ins_cost(1.9 * INSN_COST);
12039   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
12040 
12041   ins_encode %{
12042     __ andw(as_Register($dst$$reg),
12043               as_Register($src1$$reg),
12044               as_Register($src2$$reg),
12045               Assembler::ASR,
12046               $src3$$constant & 0x1f);
12047   %}
12048 
12049   ins_pipe(ialu_reg_reg_shift);
12050 %}
12051 
12052 // This pattern is automatically generated from aarch64_ad.m4
12053 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12054 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
12055                          iRegL src1, iRegL src2,
12056                          immI src3) %{
12057   match(Set dst (AndL src1 (RShiftL src2 src3)));
12058 
12059   ins_cost(1.9 * INSN_COST);
12060   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
12061 
12062   ins_encode %{
12063     __ andr(as_Register($dst$$reg),
12064               as_Register($src1$$reg),
12065               as_Register($src2$$reg),
12066               Assembler::ASR,
12067               $src3$$constant & 0x3f);
12068   %}
12069 
12070   ins_pipe(ialu_reg_reg_shift);
12071 %}
12072 
12073 // This pattern is automatically generated from aarch64_ad.m4
12074 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12075 instruct AndI_reg_LShift_reg(iRegINoSp dst,
12076                          iRegIorL2I src1, iRegIorL2I src2,
12077                          immI src3) %{
12078   match(Set dst (AndI src1 (LShiftI src2 src3)));
12079 
12080   ins_cost(1.9 * INSN_COST);
12081   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
12082 
12083   ins_encode %{
12084     __ andw(as_Register($dst$$reg),
12085               as_Register($src1$$reg),
12086               as_Register($src2$$reg),
12087               Assembler::LSL,
12088               $src3$$constant & 0x1f);
12089   %}
12090 
12091   ins_pipe(ialu_reg_reg_shift);
12092 %}
12093 
12094 // This pattern is automatically generated from aarch64_ad.m4
12095 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12096 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
12097                          iRegL src1, iRegL src2,
12098                          immI src3) %{
12099   match(Set dst (AndL src1 (LShiftL src2 src3)));
12100 
12101   ins_cost(1.9 * INSN_COST);
12102   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
12103 
12104   ins_encode %{
12105     __ andr(as_Register($dst$$reg),
12106               as_Register($src1$$reg),
12107               as_Register($src2$$reg),
12108               Assembler::LSL,
12109               $src3$$constant & 0x3f);
12110   %}
12111 
12112   ins_pipe(ialu_reg_reg_shift);
12113 %}
12114 
12115 // This pattern is automatically generated from aarch64_ad.m4
12116 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12117 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
12118                          iRegIorL2I src1, iRegIorL2I src2,
12119                          immI src3) %{
12120   match(Set dst (AndI src1 (RotateRight src2 src3)));
12121 
12122   ins_cost(1.9 * INSN_COST);
12123   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
12124 
12125   ins_encode %{
12126     __ andw(as_Register($dst$$reg),
12127               as_Register($src1$$reg),
12128               as_Register($src2$$reg),
12129               Assembler::ROR,
12130               $src3$$constant & 0x1f);
12131   %}
12132 
12133   ins_pipe(ialu_reg_reg_shift);
12134 %}
12135 
12136 // This pattern is automatically generated from aarch64_ad.m4
12137 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12138 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
12139                          iRegL src1, iRegL src2,
12140                          immI src3) %{
12141   match(Set dst (AndL src1 (RotateRight src2 src3)));
12142 
12143   ins_cost(1.9 * INSN_COST);
12144   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
12145 
12146   ins_encode %{
12147     __ andr(as_Register($dst$$reg),
12148               as_Register($src1$$reg),
12149               as_Register($src2$$reg),
12150               Assembler::ROR,
12151               $src3$$constant & 0x3f);
12152   %}
12153 
12154   ins_pipe(ialu_reg_reg_shift);
12155 %}
12156 
12157 // This pattern is automatically generated from aarch64_ad.m4
12158 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12159 instruct XorI_reg_URShift_reg(iRegINoSp dst,
12160                          iRegIorL2I src1, iRegIorL2I src2,
12161                          immI src3) %{
12162   match(Set dst (XorI src1 (URShiftI src2 src3)));
12163 
12164   ins_cost(1.9 * INSN_COST);
12165   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
12166 
12167   ins_encode %{
12168     __ eorw(as_Register($dst$$reg),
12169               as_Register($src1$$reg),
12170               as_Register($src2$$reg),
12171               Assembler::LSR,
12172               $src3$$constant & 0x1f);
12173   %}
12174 
12175   ins_pipe(ialu_reg_reg_shift);
12176 %}
12177 
12178 // This pattern is automatically generated from aarch64_ad.m4
12179 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12180 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12181                          iRegL src1, iRegL src2,
12182                          immI src3) %{
12183   match(Set dst (XorL src1 (URShiftL src2 src3)));
12184 
12185   ins_cost(1.9 * INSN_COST);
12186   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12187 
12188   ins_encode %{
12189     __ eor(as_Register($dst$$reg),
12190               as_Register($src1$$reg),
12191               as_Register($src2$$reg),
12192               Assembler::LSR,
12193               $src3$$constant & 0x3f);
12194   %}
12195 
12196   ins_pipe(ialu_reg_reg_shift);
12197 %}
12198 
12199 // This pattern is automatically generated from aarch64_ad.m4
12200 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12201 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12202                          iRegIorL2I src1, iRegIorL2I src2,
12203                          immI src3) %{
12204   match(Set dst (XorI src1 (RShiftI src2 src3)));
12205 
12206   ins_cost(1.9 * INSN_COST);
12207   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12208 
12209   ins_encode %{
12210     __ eorw(as_Register($dst$$reg),
12211               as_Register($src1$$reg),
12212               as_Register($src2$$reg),
12213               Assembler::ASR,
12214               $src3$$constant & 0x1f);
12215   %}
12216 
12217   ins_pipe(ialu_reg_reg_shift);
12218 %}
12219 
12220 // This pattern is automatically generated from aarch64_ad.m4
12221 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12222 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12223                          iRegL src1, iRegL src2,
12224                          immI src3) %{
12225   match(Set dst (XorL src1 (RShiftL src2 src3)));
12226 
12227   ins_cost(1.9 * INSN_COST);
12228   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12229 
12230   ins_encode %{
12231     __ eor(as_Register($dst$$reg),
12232               as_Register($src1$$reg),
12233               as_Register($src2$$reg),
12234               Assembler::ASR,
12235               $src3$$constant & 0x3f);
12236   %}
12237 
12238   ins_pipe(ialu_reg_reg_shift);
12239 %}
12240 
12241 // This pattern is automatically generated from aarch64_ad.m4
12242 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12243 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12244                          iRegIorL2I src1, iRegIorL2I src2,
12245                          immI src3) %{
12246   match(Set dst (XorI src1 (LShiftI src2 src3)));
12247 
12248   ins_cost(1.9 * INSN_COST);
12249   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12250 
12251   ins_encode %{
12252     __ eorw(as_Register($dst$$reg),
12253               as_Register($src1$$reg),
12254               as_Register($src2$$reg),
12255               Assembler::LSL,
12256               $src3$$constant & 0x1f);
12257   %}
12258 
12259   ins_pipe(ialu_reg_reg_shift);
12260 %}
12261 
12262 // This pattern is automatically generated from aarch64_ad.m4
12263 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12264 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12265                          iRegL src1, iRegL src2,
12266                          immI src3) %{
12267   match(Set dst (XorL src1 (LShiftL src2 src3)));
12268 
12269   ins_cost(1.9 * INSN_COST);
12270   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12271 
12272   ins_encode %{
12273     __ eor(as_Register($dst$$reg),
12274               as_Register($src1$$reg),
12275               as_Register($src2$$reg),
12276               Assembler::LSL,
12277               $src3$$constant & 0x3f);
12278   %}
12279 
12280   ins_pipe(ialu_reg_reg_shift);
12281 %}
12282 
12283 // This pattern is automatically generated from aarch64_ad.m4
12284 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12285 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
12286                          iRegIorL2I src1, iRegIorL2I src2,
12287                          immI src3) %{
12288   match(Set dst (XorI src1 (RotateRight src2 src3)));
12289 
12290   ins_cost(1.9 * INSN_COST);
12291   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
12292 
12293   ins_encode %{
12294     __ eorw(as_Register($dst$$reg),
12295               as_Register($src1$$reg),
12296               as_Register($src2$$reg),
12297               Assembler::ROR,
12298               $src3$$constant & 0x1f);
12299   %}
12300 
12301   ins_pipe(ialu_reg_reg_shift);
12302 %}
12303 
12304 // This pattern is automatically generated from aarch64_ad.m4
12305 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12306 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
12307                          iRegL src1, iRegL src2,
12308                          immI src3) %{
12309   match(Set dst (XorL src1 (RotateRight src2 src3)));
12310 
12311   ins_cost(1.9 * INSN_COST);
12312   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
12313 
12314   ins_encode %{
12315     __ eor(as_Register($dst$$reg),
12316               as_Register($src1$$reg),
12317               as_Register($src2$$reg),
12318               Assembler::ROR,
12319               $src3$$constant & 0x3f);
12320   %}
12321 
12322   ins_pipe(ialu_reg_reg_shift);
12323 %}
12324 
12325 // This pattern is automatically generated from aarch64_ad.m4
12326 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12327 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12328                          iRegIorL2I src1, iRegIorL2I src2,
12329                          immI src3) %{
12330   match(Set dst (OrI src1 (URShiftI src2 src3)));
12331 
12332   ins_cost(1.9 * INSN_COST);
12333   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12334 
12335   ins_encode %{
12336     __ orrw(as_Register($dst$$reg),
12337               as_Register($src1$$reg),
12338               as_Register($src2$$reg),
12339               Assembler::LSR,
12340               $src3$$constant & 0x1f);
12341   %}
12342 
12343   ins_pipe(ialu_reg_reg_shift);
12344 %}
12345 
12346 // This pattern is automatically generated from aarch64_ad.m4
12347 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12348 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12349                          iRegL src1, iRegL src2,
12350                          immI src3) %{
12351   match(Set dst (OrL src1 (URShiftL src2 src3)));
12352 
12353   ins_cost(1.9 * INSN_COST);
12354   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12355 
12356   ins_encode %{
12357     __ orr(as_Register($dst$$reg),
12358               as_Register($src1$$reg),
12359               as_Register($src2$$reg),
12360               Assembler::LSR,
12361               $src3$$constant & 0x3f);
12362   %}
12363 
12364   ins_pipe(ialu_reg_reg_shift);
12365 %}
12366 
12367 // This pattern is automatically generated from aarch64_ad.m4
12368 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12369 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12370                          iRegIorL2I src1, iRegIorL2I src2,
12371                          immI src3) %{
12372   match(Set dst (OrI src1 (RShiftI src2 src3)));
12373 
12374   ins_cost(1.9 * INSN_COST);
12375   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12376 
12377   ins_encode %{
12378     __ orrw(as_Register($dst$$reg),
12379               as_Register($src1$$reg),
12380               as_Register($src2$$reg),
12381               Assembler::ASR,
12382               $src3$$constant & 0x1f);
12383   %}
12384 
12385   ins_pipe(ialu_reg_reg_shift);
12386 %}
12387 
12388 // This pattern is automatically generated from aarch64_ad.m4
12389 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12390 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12391                          iRegL src1, iRegL src2,
12392                          immI src3) %{
12393   match(Set dst (OrL src1 (RShiftL src2 src3)));
12394 
12395   ins_cost(1.9 * INSN_COST);
12396   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12397 
12398   ins_encode %{
12399     __ orr(as_Register($dst$$reg),
12400               as_Register($src1$$reg),
12401               as_Register($src2$$reg),
12402               Assembler::ASR,
12403               $src3$$constant & 0x3f);
12404   %}
12405 
12406   ins_pipe(ialu_reg_reg_shift);
12407 %}
12408 
12409 // This pattern is automatically generated from aarch64_ad.m4
12410 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12411 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12412                          iRegIorL2I src1, iRegIorL2I src2,
12413                          immI src3) %{
12414   match(Set dst (OrI src1 (LShiftI src2 src3)));
12415 
12416   ins_cost(1.9 * INSN_COST);
12417   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12418 
12419   ins_encode %{
12420     __ orrw(as_Register($dst$$reg),
12421               as_Register($src1$$reg),
12422               as_Register($src2$$reg),
12423               Assembler::LSL,
12424               $src3$$constant & 0x1f);
12425   %}
12426 
12427   ins_pipe(ialu_reg_reg_shift);
12428 %}
12429 
12430 // This pattern is automatically generated from aarch64_ad.m4
12431 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12432 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12433                          iRegL src1, iRegL src2,
12434                          immI src3) %{
12435   match(Set dst (OrL src1 (LShiftL src2 src3)));
12436 
12437   ins_cost(1.9 * INSN_COST);
12438   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12439 
12440   ins_encode %{
12441     __ orr(as_Register($dst$$reg),
12442               as_Register($src1$$reg),
12443               as_Register($src2$$reg),
12444               Assembler::LSL,
12445               $src3$$constant & 0x3f);
12446   %}
12447 
12448   ins_pipe(ialu_reg_reg_shift);
12449 %}
12450 
12451 // This pattern is automatically generated from aarch64_ad.m4
12452 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12453 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
12454                          iRegIorL2I src1, iRegIorL2I src2,
12455                          immI src3) %{
12456   match(Set dst (OrI src1 (RotateRight src2 src3)));
12457 
12458   ins_cost(1.9 * INSN_COST);
12459   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
12460 
12461   ins_encode %{
12462     __ orrw(as_Register($dst$$reg),
12463               as_Register($src1$$reg),
12464               as_Register($src2$$reg),
12465               Assembler::ROR,
12466               $src3$$constant & 0x1f);
12467   %}
12468 
12469   ins_pipe(ialu_reg_reg_shift);
12470 %}
12471 
12472 // This pattern is automatically generated from aarch64_ad.m4
12473 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12474 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
12475                          iRegL src1, iRegL src2,
12476                          immI src3) %{
12477   match(Set dst (OrL src1 (RotateRight src2 src3)));
12478 
12479   ins_cost(1.9 * INSN_COST);
12480   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
12481 
12482   ins_encode %{
12483     __ orr(as_Register($dst$$reg),
12484               as_Register($src1$$reg),
12485               as_Register($src2$$reg),
12486               Assembler::ROR,
12487               $src3$$constant & 0x3f);
12488   %}
12489 
12490   ins_pipe(ialu_reg_reg_shift);
12491 %}
12492 
12493 // This pattern is automatically generated from aarch64_ad.m4
12494 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12495 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12496                          iRegIorL2I src1, iRegIorL2I src2,
12497                          immI src3) %{
12498   match(Set dst (AddI src1 (URShiftI src2 src3)));
12499 
12500   ins_cost(1.9 * INSN_COST);
12501   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12502 
12503   ins_encode %{
12504     __ addw(as_Register($dst$$reg),
12505               as_Register($src1$$reg),
12506               as_Register($src2$$reg),
12507               Assembler::LSR,
12508               $src3$$constant & 0x1f);
12509   %}
12510 
12511   ins_pipe(ialu_reg_reg_shift);
12512 %}
12513 
12514 // This pattern is automatically generated from aarch64_ad.m4
12515 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12516 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12517                          iRegL src1, iRegL src2,
12518                          immI src3) %{
12519   match(Set dst (AddL src1 (URShiftL src2 src3)));
12520 
12521   ins_cost(1.9 * INSN_COST);
12522   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12523 
12524   ins_encode %{
12525     __ add(as_Register($dst$$reg),
12526               as_Register($src1$$reg),
12527               as_Register($src2$$reg),
12528               Assembler::LSR,
12529               $src3$$constant & 0x3f);
12530   %}
12531 
12532   ins_pipe(ialu_reg_reg_shift);
12533 %}
12534 
12535 // This pattern is automatically generated from aarch64_ad.m4
12536 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12537 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12538                          iRegIorL2I src1, iRegIorL2I src2,
12539                          immI src3) %{
12540   match(Set dst (AddI src1 (RShiftI src2 src3)));
12541 
12542   ins_cost(1.9 * INSN_COST);
12543   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12544 
12545   ins_encode %{
12546     __ addw(as_Register($dst$$reg),
12547               as_Register($src1$$reg),
12548               as_Register($src2$$reg),
12549               Assembler::ASR,
12550               $src3$$constant & 0x1f);
12551   %}
12552 
12553   ins_pipe(ialu_reg_reg_shift);
12554 %}
12555 
12556 // This pattern is automatically generated from aarch64_ad.m4
12557 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12558 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12559                          iRegL src1, iRegL src2,
12560                          immI src3) %{
12561   match(Set dst (AddL src1 (RShiftL src2 src3)));
12562 
12563   ins_cost(1.9 * INSN_COST);
12564   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12565 
12566   ins_encode %{
12567     __ add(as_Register($dst$$reg),
12568               as_Register($src1$$reg),
12569               as_Register($src2$$reg),
12570               Assembler::ASR,
12571               $src3$$constant & 0x3f);
12572   %}
12573 
12574   ins_pipe(ialu_reg_reg_shift);
12575 %}
12576 
12577 // This pattern is automatically generated from aarch64_ad.m4
12578 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12579 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12580                          iRegIorL2I src1, iRegIorL2I src2,
12581                          immI src3) %{
12582   match(Set dst (AddI src1 (LShiftI src2 src3)));
12583 
12584   ins_cost(1.9 * INSN_COST);
12585   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12586 
12587   ins_encode %{
12588     __ addw(as_Register($dst$$reg),
12589               as_Register($src1$$reg),
12590               as_Register($src2$$reg),
12591               Assembler::LSL,
12592               $src3$$constant & 0x1f);
12593   %}
12594 
12595   ins_pipe(ialu_reg_reg_shift);
12596 %}
12597 
12598 // This pattern is automatically generated from aarch64_ad.m4
12599 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12600 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12601                          iRegL src1, iRegL src2,
12602                          immI src3) %{
12603   match(Set dst (AddL src1 (LShiftL src2 src3)));
12604 
12605   ins_cost(1.9 * INSN_COST);
12606   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12607 
12608   ins_encode %{
12609     __ add(as_Register($dst$$reg),
12610               as_Register($src1$$reg),
12611               as_Register($src2$$reg),
12612               Assembler::LSL,
12613               $src3$$constant & 0x3f);
12614   %}
12615 
12616   ins_pipe(ialu_reg_reg_shift);
12617 %}
12618 
12619 // This pattern is automatically generated from aarch64_ad.m4
12620 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12621 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12622                          iRegIorL2I src1, iRegIorL2I src2,
12623                          immI src3) %{
12624   match(Set dst (SubI src1 (URShiftI src2 src3)));
12625 
12626   ins_cost(1.9 * INSN_COST);
12627   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12628 
12629   ins_encode %{
12630     __ subw(as_Register($dst$$reg),
12631               as_Register($src1$$reg),
12632               as_Register($src2$$reg),
12633               Assembler::LSR,
12634               $src3$$constant & 0x1f);
12635   %}
12636 
12637   ins_pipe(ialu_reg_reg_shift);
12638 %}
12639 
12640 // This pattern is automatically generated from aarch64_ad.m4
12641 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12642 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12643                          iRegL src1, iRegL src2,
12644                          immI src3) %{
12645   match(Set dst (SubL src1 (URShiftL src2 src3)));
12646 
12647   ins_cost(1.9 * INSN_COST);
12648   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12649 
12650   ins_encode %{
12651     __ sub(as_Register($dst$$reg),
12652               as_Register($src1$$reg),
12653               as_Register($src2$$reg),
12654               Assembler::LSR,
12655               $src3$$constant & 0x3f);
12656   %}
12657 
12658   ins_pipe(ialu_reg_reg_shift);
12659 %}
12660 
12661 // This pattern is automatically generated from aarch64_ad.m4
12662 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12663 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12664                          iRegIorL2I src1, iRegIorL2I src2,
12665                          immI src3) %{
12666   match(Set dst (SubI src1 (RShiftI src2 src3)));
12667 
12668   ins_cost(1.9 * INSN_COST);
12669   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12670 
12671   ins_encode %{
12672     __ subw(as_Register($dst$$reg),
12673               as_Register($src1$$reg),
12674               as_Register($src2$$reg),
12675               Assembler::ASR,
12676               $src3$$constant & 0x1f);
12677   %}
12678 
12679   ins_pipe(ialu_reg_reg_shift);
12680 %}
12681 
12682 // This pattern is automatically generated from aarch64_ad.m4
12683 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12684 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12685                          iRegL src1, iRegL src2,
12686                          immI src3) %{
12687   match(Set dst (SubL src1 (RShiftL src2 src3)));
12688 
12689   ins_cost(1.9 * INSN_COST);
12690   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12691 
12692   ins_encode %{
12693     __ sub(as_Register($dst$$reg),
12694               as_Register($src1$$reg),
12695               as_Register($src2$$reg),
12696               Assembler::ASR,
12697               $src3$$constant & 0x3f);
12698   %}
12699 
12700   ins_pipe(ialu_reg_reg_shift);
12701 %}
12702 
12703 // This pattern is automatically generated from aarch64_ad.m4
12704 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12705 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12706                          iRegIorL2I src1, iRegIorL2I src2,
12707                          immI src3) %{
12708   match(Set dst (SubI src1 (LShiftI src2 src3)));
12709 
12710   ins_cost(1.9 * INSN_COST);
12711   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12712 
12713   ins_encode %{
12714     __ subw(as_Register($dst$$reg),
12715               as_Register($src1$$reg),
12716               as_Register($src2$$reg),
12717               Assembler::LSL,
12718               $src3$$constant & 0x1f);
12719   %}
12720 
12721   ins_pipe(ialu_reg_reg_shift);
12722 %}
12723 
12724 // This pattern is automatically generated from aarch64_ad.m4
12725 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12726 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12727                          iRegL src1, iRegL src2,
12728                          immI src3) %{
12729   match(Set dst (SubL src1 (LShiftL src2 src3)));
12730 
12731   ins_cost(1.9 * INSN_COST);
12732   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12733 
12734   ins_encode %{
12735     __ sub(as_Register($dst$$reg),
12736               as_Register($src1$$reg),
12737               as_Register($src2$$reg),
12738               Assembler::LSL,
12739               $src3$$constant & 0x3f);
12740   %}
12741 
12742   ins_pipe(ialu_reg_reg_shift);
12743 %}
12744 
12745 // This pattern is automatically generated from aarch64_ad.m4
12746 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12747 
12748 // Shift Left followed by Shift Right.
12749 // This idiom is used by the compiler for the i2b bytecode etc.
12750 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12751 %{
12752   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12753   ins_cost(INSN_COST * 2);
12754   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12755   ins_encode %{
12756     int lshift = $lshift_count$$constant & 63;
12757     int rshift = $rshift_count$$constant & 63;
12758     int s = 63 - lshift;
12759     int r = (rshift - lshift) & 63;
12760     __ sbfm(as_Register($dst$$reg),
12761             as_Register($src$$reg),
12762             r, s);
12763   %}
12764 
12765   ins_pipe(ialu_reg_shift);
12766 %}
12767 
12768 // This pattern is automatically generated from aarch64_ad.m4
12769 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12770 
12771 // Shift Left followed by Shift Right.
12772 // This idiom is used by the compiler for the i2b bytecode etc.
12773 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12774 %{
12775   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12776   ins_cost(INSN_COST * 2);
12777   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12778   ins_encode %{
12779     int lshift = $lshift_count$$constant & 31;
12780     int rshift = $rshift_count$$constant & 31;
12781     int s = 31 - lshift;
12782     int r = (rshift - lshift) & 31;
12783     __ sbfmw(as_Register($dst$$reg),
12784             as_Register($src$$reg),
12785             r, s);
12786   %}
12787 
12788   ins_pipe(ialu_reg_shift);
12789 %}
12790 
12791 // This pattern is automatically generated from aarch64_ad.m4
12792 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12793 
12794 // Shift Left followed by Shift Right.
12795 // This idiom is used by the compiler for the i2b bytecode etc.
12796 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12797 %{
12798   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12799   ins_cost(INSN_COST * 2);
12800   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12801   ins_encode %{
12802     int lshift = $lshift_count$$constant & 63;
12803     int rshift = $rshift_count$$constant & 63;
12804     int s = 63 - lshift;
12805     int r = (rshift - lshift) & 63;
12806     __ ubfm(as_Register($dst$$reg),
12807             as_Register($src$$reg),
12808             r, s);
12809   %}
12810 
12811   ins_pipe(ialu_reg_shift);
12812 %}
12813 
12814 // This pattern is automatically generated from aarch64_ad.m4
12815 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12816 
12817 // Shift Left followed by Shift Right.
12818 // This idiom is used by the compiler for the i2b bytecode etc.
12819 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12820 %{
12821   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12822   ins_cost(INSN_COST * 2);
12823   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12824   ins_encode %{
12825     int lshift = $lshift_count$$constant & 31;
12826     int rshift = $rshift_count$$constant & 31;
12827     int s = 31 - lshift;
12828     int r = (rshift - lshift) & 31;
12829     __ ubfmw(as_Register($dst$$reg),
12830             as_Register($src$$reg),
12831             r, s);
12832   %}
12833 
12834   ins_pipe(ialu_reg_shift);
12835 %}
12836 
12837 // Bitfield extract with shift & mask
12838 
12839 // This pattern is automatically generated from aarch64_ad.m4
12840 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12841 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12842 %{
12843   match(Set dst (AndI (URShiftI src rshift) mask));
12844   // Make sure we are not going to exceed what ubfxw can do.
12845   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12846 
12847   ins_cost(INSN_COST);
12848   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12849   ins_encode %{
12850     int rshift = $rshift$$constant & 31;
12851     intptr_t mask = $mask$$constant;
12852     int width = exact_log2(mask+1);
12853     __ ubfxw(as_Register($dst$$reg),
12854             as_Register($src$$reg), rshift, width);
12855   %}
12856   ins_pipe(ialu_reg_shift);
12857 %}
12858 
12859 // This pattern is automatically generated from aarch64_ad.m4
12860 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12861 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12862 %{
12863   match(Set dst (AndL (URShiftL src rshift) mask));
12864   // Make sure we are not going to exceed what ubfx can do.
12865   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12866 
12867   ins_cost(INSN_COST);
12868   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12869   ins_encode %{
12870     int rshift = $rshift$$constant & 63;
12871     intptr_t mask = $mask$$constant;
12872     int width = exact_log2_long(mask+1);
12873     __ ubfx(as_Register($dst$$reg),
12874             as_Register($src$$reg), rshift, width);
12875   %}
12876   ins_pipe(ialu_reg_shift);
12877 %}
12878 
12879 
12880 // This pattern is automatically generated from aarch64_ad.m4
12881 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12882 
12883 // We can use ubfx when extending an And with a mask when we know mask
12884 // is positive.  We know that because immI_bitmask guarantees it.
12885 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12886 %{
12887   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12888   // Make sure we are not going to exceed what ubfxw can do.
12889   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12890 
12891   ins_cost(INSN_COST * 2);
12892   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12893   ins_encode %{
12894     int rshift = $rshift$$constant & 31;
12895     intptr_t mask = $mask$$constant;
12896     int width = exact_log2(mask+1);
12897     __ ubfx(as_Register($dst$$reg),
12898             as_Register($src$$reg), rshift, width);
12899   %}
12900   ins_pipe(ialu_reg_shift);
12901 %}
12902 
12903 
12904 // This pattern is automatically generated from aarch64_ad.m4
12905 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12906 
12907 // We can use ubfiz when masking by a positive number and then left shifting the result.
12908 // We know that the mask is positive because immI_bitmask guarantees it.
12909 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12910 %{
12911   match(Set dst (LShiftI (AndI src mask) lshift));
12912   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12913 
12914   ins_cost(INSN_COST);
12915   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12916   ins_encode %{
12917     int lshift = $lshift$$constant & 31;
12918     intptr_t mask = $mask$$constant;
12919     int width = exact_log2(mask+1);
12920     __ ubfizw(as_Register($dst$$reg),
12921           as_Register($src$$reg), lshift, width);
12922   %}
12923   ins_pipe(ialu_reg_shift);
12924 %}
12925 
12926 // This pattern is automatically generated from aarch64_ad.m4
12927 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12928 
12929 // We can use ubfiz when masking by a positive number and then left shifting the result.
12930 // We know that the mask is positive because immL_bitmask guarantees it.
12931 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12932 %{
12933   match(Set dst (LShiftL (AndL src mask) lshift));
12934   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12935 
12936   ins_cost(INSN_COST);
12937   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12938   ins_encode %{
12939     int lshift = $lshift$$constant & 63;
12940     intptr_t mask = $mask$$constant;
12941     int width = exact_log2_long(mask+1);
12942     __ ubfiz(as_Register($dst$$reg),
12943           as_Register($src$$reg), lshift, width);
12944   %}
12945   ins_pipe(ialu_reg_shift);
12946 %}
12947 
12948 // This pattern is automatically generated from aarch64_ad.m4
12949 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12950 
12951 // We can use ubfiz when masking by a positive number and then left shifting the result.
12952 // We know that the mask is positive because immI_bitmask guarantees it.
12953 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12954 %{
12955   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12956   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12957 
12958   ins_cost(INSN_COST);
12959   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12960   ins_encode %{
12961     int lshift = $lshift$$constant & 31;
12962     intptr_t mask = $mask$$constant;
12963     int width = exact_log2(mask+1);
12964     __ ubfizw(as_Register($dst$$reg),
12965           as_Register($src$$reg), lshift, width);
12966   %}
12967   ins_pipe(ialu_reg_shift);
12968 %}
12969 
12970 // This pattern is automatically generated from aarch64_ad.m4
12971 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12972 
12973 // We can use ubfiz when masking by a positive number and then left shifting the result.
12974 // We know that the mask is positive because immL_bitmask guarantees it.
12975 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12976 %{
12977   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12978   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12979 
12980   ins_cost(INSN_COST);
12981   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12982   ins_encode %{
12983     int lshift = $lshift$$constant & 63;
12984     intptr_t mask = $mask$$constant;
12985     int width = exact_log2_long(mask+1);
12986     __ ubfiz(as_Register($dst$$reg),
12987           as_Register($src$$reg), lshift, width);
12988   %}
12989   ins_pipe(ialu_reg_shift);
12990 %}
12991 
12992 
12993 // This pattern is automatically generated from aarch64_ad.m4
12994 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12995 
12996 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12997 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12998 %{
12999   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
13000   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
13001 
13002   ins_cost(INSN_COST);
13003   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
13004   ins_encode %{
13005     int lshift = $lshift$$constant & 63;
13006     intptr_t mask = $mask$$constant;
13007     int width = exact_log2(mask+1);
13008     __ ubfiz(as_Register($dst$$reg),
13009              as_Register($src$$reg), lshift, width);
13010   %}
13011   ins_pipe(ialu_reg_shift);
13012 %}
13013 
13014 // This pattern is automatically generated from aarch64_ad.m4
13015 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13016 
13017 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
13018 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
13019 %{
13020   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
13021   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
13022 
13023   ins_cost(INSN_COST);
13024   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
13025   ins_encode %{
13026     int lshift = $lshift$$constant & 31;
13027     intptr_t mask = $mask$$constant;
13028     int width = exact_log2(mask+1);
13029     __ ubfiz(as_Register($dst$$reg),
13030              as_Register($src$$reg), lshift, width);
13031   %}
13032   ins_pipe(ialu_reg_shift);
13033 %}
13034 
13035 // This pattern is automatically generated from aarch64_ad.m4
13036 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13037 
13038 // Can skip int2long conversions after AND with small bitmask
13039 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
13040 %{
13041   match(Set dst (ConvI2L (AndI src msk)));
13042   ins_cost(INSN_COST);
13043   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
13044   ins_encode %{
13045     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
13046   %}
13047   ins_pipe(ialu_reg_shift);
13048 %}
13049 
13050 
13051 // Rotations
13052 
13053 // This pattern is automatically generated from aarch64_ad.m4
13054 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13055 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
13056 %{
13057   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
13058   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
13059 
13060   ins_cost(INSN_COST);
13061   format %{ "extr $dst, $src1, $src2, #$rshift" %}
13062 
13063   ins_encode %{
13064     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
13065             $rshift$$constant & 63);
13066   %}
13067   ins_pipe(ialu_reg_reg_extr);
13068 %}
13069 
13070 
13071 // This pattern is automatically generated from aarch64_ad.m4
13072 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13073 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
13074 %{
13075   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
13076   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
13077 
13078   ins_cost(INSN_COST);
13079   format %{ "extr $dst, $src1, $src2, #$rshift" %}
13080 
13081   ins_encode %{
13082     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
13083             $rshift$$constant & 31);
13084   %}
13085   ins_pipe(ialu_reg_reg_extr);
13086 %}
13087 
13088 
13089 // This pattern is automatically generated from aarch64_ad.m4
13090 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13091 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
13092 %{
13093   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
13094   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
13095 
13096   ins_cost(INSN_COST);
13097   format %{ "extr $dst, $src1, $src2, #$rshift" %}
13098 
13099   ins_encode %{
13100     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
13101             $rshift$$constant & 63);
13102   %}
13103   ins_pipe(ialu_reg_reg_extr);
13104 %}
13105 
13106 
13107 // This pattern is automatically generated from aarch64_ad.m4
13108 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13109 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
13110 %{
13111   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
13112   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
13113 
13114   ins_cost(INSN_COST);
13115   format %{ "extr $dst, $src1, $src2, #$rshift" %}
13116 
13117   ins_encode %{
13118     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
13119             $rshift$$constant & 31);
13120   %}
13121   ins_pipe(ialu_reg_reg_extr);
13122 %}
13123 
13124 // This pattern is automatically generated from aarch64_ad.m4
13125 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13126 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
13127 %{
13128   match(Set dst (RotateRight src shift));
13129 
13130   ins_cost(INSN_COST);
13131   format %{ "ror    $dst, $src, $shift" %}
13132 
13133   ins_encode %{
13134      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
13135                $shift$$constant & 0x1f);
13136   %}
13137   ins_pipe(ialu_reg_reg_vshift);
13138 %}
13139 
13140 // This pattern is automatically generated from aarch64_ad.m4
13141 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13142 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
13143 %{
13144   match(Set dst (RotateRight src shift));
13145 
13146   ins_cost(INSN_COST);
13147   format %{ "ror    $dst, $src, $shift" %}
13148 
13149   ins_encode %{
13150      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
13151                $shift$$constant & 0x3f);
13152   %}
13153   ins_pipe(ialu_reg_reg_vshift);
13154 %}
13155 
13156 // This pattern is automatically generated from aarch64_ad.m4
13157 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13158 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
13159 %{
13160   match(Set dst (RotateRight src shift));
13161 
13162   ins_cost(INSN_COST);
13163   format %{ "ror    $dst, $src, $shift" %}
13164 
13165   ins_encode %{
13166      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
13167   %}
13168   ins_pipe(ialu_reg_reg_vshift);
13169 %}
13170 
13171 // This pattern is automatically generated from aarch64_ad.m4
13172 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13173 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
13174 %{
13175   match(Set dst (RotateRight src shift));
13176 
13177   ins_cost(INSN_COST);
13178   format %{ "ror    $dst, $src, $shift" %}
13179 
13180   ins_encode %{
13181      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
13182   %}
13183   ins_pipe(ialu_reg_reg_vshift);
13184 %}
13185 
13186 // This pattern is automatically generated from aarch64_ad.m4
13187 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13188 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
13189 %{
13190   match(Set dst (RotateLeft src shift));
13191 
13192   ins_cost(INSN_COST);
13193   format %{ "rol    $dst, $src, $shift" %}
13194 
13195   ins_encode %{
13196      __ subw(rscratch1, zr, as_Register($shift$$reg));
13197      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
13198   %}
13199   ins_pipe(ialu_reg_reg_vshift);
13200 %}
13201 
13202 // This pattern is automatically generated from aarch64_ad.m4
13203 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13204 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
13205 %{
13206   match(Set dst (RotateLeft src shift));
13207 
13208   ins_cost(INSN_COST);
13209   format %{ "rol    $dst, $src, $shift" %}
13210 
13211   ins_encode %{
13212      __ subw(rscratch1, zr, as_Register($shift$$reg));
13213      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
13214   %}
13215   ins_pipe(ialu_reg_reg_vshift);
13216 %}
13217 
13218 
13219 // Add/subtract (extended)
13220 
13221 // This pattern is automatically generated from aarch64_ad.m4
13222 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13223 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13224 %{
13225   match(Set dst (AddL src1 (ConvI2L src2)));
13226   ins_cost(INSN_COST);
13227   format %{ "add  $dst, $src1, $src2, sxtw" %}
13228 
13229    ins_encode %{
13230      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13231             as_Register($src2$$reg), ext::sxtw);
13232    %}
13233   ins_pipe(ialu_reg_reg);
13234 %}
13235 
13236 // This pattern is automatically generated from aarch64_ad.m4
13237 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13238 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13239 %{
13240   match(Set dst (SubL src1 (ConvI2L src2)));
13241   ins_cost(INSN_COST);
13242   format %{ "sub  $dst, $src1, $src2, sxtw" %}
13243 
13244    ins_encode %{
13245      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13246             as_Register($src2$$reg), ext::sxtw);
13247    %}
13248   ins_pipe(ialu_reg_reg);
13249 %}
13250 
13251 // This pattern is automatically generated from aarch64_ad.m4
13252 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13253 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
13254 %{
13255   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13256   ins_cost(INSN_COST);
13257   format %{ "add  $dst, $src1, $src2, sxth" %}
13258 
13259    ins_encode %{
13260      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13261             as_Register($src2$$reg), ext::sxth);
13262    %}
13263   ins_pipe(ialu_reg_reg);
13264 %}
13265 
13266 // This pattern is automatically generated from aarch64_ad.m4
13267 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13268 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13269 %{
13270   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13271   ins_cost(INSN_COST);
13272   format %{ "add  $dst, $src1, $src2, sxtb" %}
13273 
13274    ins_encode %{
13275      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13276             as_Register($src2$$reg), ext::sxtb);
13277    %}
13278   ins_pipe(ialu_reg_reg);
13279 %}
13280 
13281 // This pattern is automatically generated from aarch64_ad.m4
13282 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13283 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13284 %{
13285   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
13286   ins_cost(INSN_COST);
13287   format %{ "add  $dst, $src1, $src2, uxtb" %}
13288 
13289    ins_encode %{
13290      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13291             as_Register($src2$$reg), ext::uxtb);
13292    %}
13293   ins_pipe(ialu_reg_reg);
13294 %}
13295 
13296 // This pattern is automatically generated from aarch64_ad.m4
13297 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13298 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
13299 %{
13300   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13301   ins_cost(INSN_COST);
13302   format %{ "add  $dst, $src1, $src2, sxth" %}
13303 
13304    ins_encode %{
13305      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13306             as_Register($src2$$reg), ext::sxth);
13307    %}
13308   ins_pipe(ialu_reg_reg);
13309 %}
13310 
13311 // This pattern is automatically generated from aarch64_ad.m4
13312 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13313 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
13314 %{
13315   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13316   ins_cost(INSN_COST);
13317   format %{ "add  $dst, $src1, $src2, sxtw" %}
13318 
13319    ins_encode %{
13320      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13321             as_Register($src2$$reg), ext::sxtw);
13322    %}
13323   ins_pipe(ialu_reg_reg);
13324 %}
13325 
13326 // This pattern is automatically generated from aarch64_ad.m4
13327 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13328 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13329 %{
13330   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13331   ins_cost(INSN_COST);
13332   format %{ "add  $dst, $src1, $src2, sxtb" %}
13333 
13334    ins_encode %{
13335      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13336             as_Register($src2$$reg), ext::sxtb);
13337    %}
13338   ins_pipe(ialu_reg_reg);
13339 %}
13340 
13341 // This pattern is automatically generated from aarch64_ad.m4
13342 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13343 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13344 %{
13345   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13346   ins_cost(INSN_COST);
13347   format %{ "add  $dst, $src1, $src2, uxtb" %}
13348 
13349    ins_encode %{
13350      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13351             as_Register($src2$$reg), ext::uxtb);
13352    %}
13353   ins_pipe(ialu_reg_reg);
13354 %}
13355 
13356 // This pattern is automatically generated from aarch64_ad.m4
13357 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13358 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13359 %{
13360   match(Set dst (AddI src1 (AndI src2 mask)));
13361   ins_cost(INSN_COST);
13362   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13363 
13364    ins_encode %{
13365      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13366             as_Register($src2$$reg), ext::uxtb);
13367    %}
13368   ins_pipe(ialu_reg_reg);
13369 %}
13370 
13371 // This pattern is automatically generated from aarch64_ad.m4
13372 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13373 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13374 %{
13375   match(Set dst (AddI src1 (AndI src2 mask)));
13376   ins_cost(INSN_COST);
13377   format %{ "addw  $dst, $src1, $src2, uxth" %}
13378 
13379    ins_encode %{
13380      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13381             as_Register($src2$$reg), ext::uxth);
13382    %}
13383   ins_pipe(ialu_reg_reg);
13384 %}
13385 
13386 // This pattern is automatically generated from aarch64_ad.m4
13387 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13388 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13389 %{
13390   match(Set dst (AddL src1 (AndL src2 mask)));
13391   ins_cost(INSN_COST);
13392   format %{ "add  $dst, $src1, $src2, uxtb" %}
13393 
13394    ins_encode %{
13395      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13396             as_Register($src2$$reg), ext::uxtb);
13397    %}
13398   ins_pipe(ialu_reg_reg);
13399 %}
13400 
13401 // This pattern is automatically generated from aarch64_ad.m4
13402 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13403 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13404 %{
13405   match(Set dst (AddL src1 (AndL src2 mask)));
13406   ins_cost(INSN_COST);
13407   format %{ "add  $dst, $src1, $src2, uxth" %}
13408 
13409    ins_encode %{
13410      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13411             as_Register($src2$$reg), ext::uxth);
13412    %}
13413   ins_pipe(ialu_reg_reg);
13414 %}
13415 
13416 // This pattern is automatically generated from aarch64_ad.m4
13417 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13418 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13419 %{
13420   match(Set dst (AddL src1 (AndL src2 mask)));
13421   ins_cost(INSN_COST);
13422   format %{ "add  $dst, $src1, $src2, uxtw" %}
13423 
13424    ins_encode %{
13425      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13426             as_Register($src2$$reg), ext::uxtw);
13427    %}
13428   ins_pipe(ialu_reg_reg);
13429 %}
13430 
13431 // This pattern is automatically generated from aarch64_ad.m4
13432 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13433 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13434 %{
13435   match(Set dst (SubI src1 (AndI src2 mask)));
13436   ins_cost(INSN_COST);
13437   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13438 
13439    ins_encode %{
13440      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13441             as_Register($src2$$reg), ext::uxtb);
13442    %}
13443   ins_pipe(ialu_reg_reg);
13444 %}
13445 
13446 // This pattern is automatically generated from aarch64_ad.m4
13447 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13448 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13449 %{
13450   match(Set dst (SubI src1 (AndI src2 mask)));
13451   ins_cost(INSN_COST);
13452   format %{ "subw  $dst, $src1, $src2, uxth" %}
13453 
13454    ins_encode %{
13455      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13456             as_Register($src2$$reg), ext::uxth);
13457    %}
13458   ins_pipe(ialu_reg_reg);
13459 %}
13460 
13461 // This pattern is automatically generated from aarch64_ad.m4
13462 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13463 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13464 %{
13465   match(Set dst (SubL src1 (AndL src2 mask)));
13466   ins_cost(INSN_COST);
13467   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13468 
13469    ins_encode %{
13470      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13471             as_Register($src2$$reg), ext::uxtb);
13472    %}
13473   ins_pipe(ialu_reg_reg);
13474 %}
13475 
13476 // This pattern is automatically generated from aarch64_ad.m4
13477 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13478 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13479 %{
13480   match(Set dst (SubL src1 (AndL src2 mask)));
13481   ins_cost(INSN_COST);
13482   format %{ "sub  $dst, $src1, $src2, uxth" %}
13483 
13484    ins_encode %{
13485      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13486             as_Register($src2$$reg), ext::uxth);
13487    %}
13488   ins_pipe(ialu_reg_reg);
13489 %}
13490 
13491 // This pattern is automatically generated from aarch64_ad.m4
13492 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13493 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13494 %{
13495   match(Set dst (SubL src1 (AndL src2 mask)));
13496   ins_cost(INSN_COST);
13497   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13498 
13499    ins_encode %{
13500      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13501             as_Register($src2$$reg), ext::uxtw);
13502    %}
13503   ins_pipe(ialu_reg_reg);
13504 %}
13505 
13506 
13507 // This pattern is automatically generated from aarch64_ad.m4
13508 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13509 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13510 %{
13511   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13512   ins_cost(1.9 * INSN_COST);
13513   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13514 
13515    ins_encode %{
13516      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13517             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13518    %}
13519   ins_pipe(ialu_reg_reg_shift);
13520 %}
13521 
13522 // This pattern is automatically generated from aarch64_ad.m4
13523 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13524 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13525 %{
13526   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13527   ins_cost(1.9 * INSN_COST);
13528   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13529 
13530    ins_encode %{
13531      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13532             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13533    %}
13534   ins_pipe(ialu_reg_reg_shift);
13535 %}
13536 
13537 // This pattern is automatically generated from aarch64_ad.m4
13538 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13539 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13540 %{
13541   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13542   ins_cost(1.9 * INSN_COST);
13543   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13544 
13545    ins_encode %{
13546      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13547             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13548    %}
13549   ins_pipe(ialu_reg_reg_shift);
13550 %}
13551 
13552 // This pattern is automatically generated from aarch64_ad.m4
13553 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13554 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13555 %{
13556   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13557   ins_cost(1.9 * INSN_COST);
13558   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13559 
13560    ins_encode %{
13561      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13562             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13563    %}
13564   ins_pipe(ialu_reg_reg_shift);
13565 %}
13566 
13567 // This pattern is automatically generated from aarch64_ad.m4
13568 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13569 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13570 %{
13571   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13572   ins_cost(1.9 * INSN_COST);
13573   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13574 
13575    ins_encode %{
13576      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13577             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13578    %}
13579   ins_pipe(ialu_reg_reg_shift);
13580 %}
13581 
13582 // This pattern is automatically generated from aarch64_ad.m4
13583 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13584 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13585 %{
13586   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13587   ins_cost(1.9 * INSN_COST);
13588   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13589 
13590    ins_encode %{
13591      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13592             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13593    %}
13594   ins_pipe(ialu_reg_reg_shift);
13595 %}
13596 
13597 // This pattern is automatically generated from aarch64_ad.m4
13598 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13599 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13600 %{
13601   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13602   ins_cost(1.9 * INSN_COST);
13603   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13604 
13605    ins_encode %{
13606      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13607             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13608    %}
13609   ins_pipe(ialu_reg_reg_shift);
13610 %}
13611 
13612 // This pattern is automatically generated from aarch64_ad.m4
13613 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13614 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13615 %{
13616   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13617   ins_cost(1.9 * INSN_COST);
13618   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13619 
13620    ins_encode %{
13621      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13622             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13623    %}
13624   ins_pipe(ialu_reg_reg_shift);
13625 %}
13626 
13627 // This pattern is automatically generated from aarch64_ad.m4
13628 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13629 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13630 %{
13631   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13632   ins_cost(1.9 * INSN_COST);
13633   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13634 
13635    ins_encode %{
13636      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13637             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13638    %}
13639   ins_pipe(ialu_reg_reg_shift);
13640 %}
13641 
13642 // This pattern is automatically generated from aarch64_ad.m4
13643 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13644 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13645 %{
13646   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13647   ins_cost(1.9 * INSN_COST);
13648   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13649 
13650    ins_encode %{
13651      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13652             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13653    %}
13654   ins_pipe(ialu_reg_reg_shift);
13655 %}
13656 
13657 // This pattern is automatically generated from aarch64_ad.m4
13658 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13659 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13660 %{
13661   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13662   ins_cost(1.9 * INSN_COST);
13663   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13664 
13665    ins_encode %{
13666      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13667             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13668    %}
13669   ins_pipe(ialu_reg_reg_shift);
13670 %}
13671 
13672 // This pattern is automatically generated from aarch64_ad.m4
13673 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13674 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13675 %{
13676   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13677   ins_cost(1.9 * INSN_COST);
13678   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13679 
13680    ins_encode %{
13681      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13682             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13683    %}
13684   ins_pipe(ialu_reg_reg_shift);
13685 %}
13686 
13687 // This pattern is automatically generated from aarch64_ad.m4
13688 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13689 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13690 %{
13691   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13692   ins_cost(1.9 * INSN_COST);
13693   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13694 
13695    ins_encode %{
13696      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13697             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13698    %}
13699   ins_pipe(ialu_reg_reg_shift);
13700 %}
13701 
13702 // This pattern is automatically generated from aarch64_ad.m4
13703 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13704 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13705 %{
13706   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13707   ins_cost(1.9 * INSN_COST);
13708   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13709 
13710    ins_encode %{
13711      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13712             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13713    %}
13714   ins_pipe(ialu_reg_reg_shift);
13715 %}
13716 
13717 // This pattern is automatically generated from aarch64_ad.m4
13718 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13719 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13720 %{
13721   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13722   ins_cost(1.9 * INSN_COST);
13723   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13724 
13725    ins_encode %{
13726      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13727             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13728    %}
13729   ins_pipe(ialu_reg_reg_shift);
13730 %}
13731 
13732 // This pattern is automatically generated from aarch64_ad.m4
13733 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13734 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13735 %{
13736   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13737   ins_cost(1.9 * INSN_COST);
13738   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13739 
13740    ins_encode %{
13741      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13742             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13743    %}
13744   ins_pipe(ialu_reg_reg_shift);
13745 %}
13746 
13747 // This pattern is automatically generated from aarch64_ad.m4
13748 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13749 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13750 %{
13751   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13752   ins_cost(1.9 * INSN_COST);
13753   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13754 
13755    ins_encode %{
13756      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13757             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13758    %}
13759   ins_pipe(ialu_reg_reg_shift);
13760 %}
13761 
13762 // This pattern is automatically generated from aarch64_ad.m4
13763 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13764 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13765 %{
13766   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13767   ins_cost(1.9 * INSN_COST);
13768   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13769 
13770    ins_encode %{
13771      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13772             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13773    %}
13774   ins_pipe(ialu_reg_reg_shift);
13775 %}
13776 
13777 // This pattern is automatically generated from aarch64_ad.m4
13778 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13779 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13780 %{
13781   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13782   ins_cost(1.9 * INSN_COST);
13783   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13784 
13785    ins_encode %{
13786      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13787             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13788    %}
13789   ins_pipe(ialu_reg_reg_shift);
13790 %}
13791 
13792 // This pattern is automatically generated from aarch64_ad.m4
13793 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13794 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13795 %{
13796   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13797   ins_cost(1.9 * INSN_COST);
13798   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13799 
13800    ins_encode %{
13801      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13802             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13803    %}
13804   ins_pipe(ialu_reg_reg_shift);
13805 %}
13806 
13807 // This pattern is automatically generated from aarch64_ad.m4
13808 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13809 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13810 %{
13811   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13812   ins_cost(1.9 * INSN_COST);
13813   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13814 
13815    ins_encode %{
13816      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13817             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13818    %}
13819   ins_pipe(ialu_reg_reg_shift);
13820 %}
13821 
13822 // This pattern is automatically generated from aarch64_ad.m4
13823 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13824 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13825 %{
13826   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13827   ins_cost(1.9 * INSN_COST);
13828   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13829 
13830    ins_encode %{
13831      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13832             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13833    %}
13834   ins_pipe(ialu_reg_reg_shift);
13835 %}
13836 
13837 // This pattern is automatically generated from aarch64_ad.m4
13838 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13839 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13840 %{
13841   effect(DEF dst, USE src1, USE src2, USE cr);
13842   ins_cost(INSN_COST * 2);
13843   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13844 
13845   ins_encode %{
13846     __ cselw($dst$$Register,
13847              $src1$$Register,
13848              $src2$$Register,
13849              Assembler::LT);
13850   %}
13851   ins_pipe(icond_reg_reg);
13852 %}
13853 
13854 // This pattern is automatically generated from aarch64_ad.m4
13855 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13856 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13857 %{
13858   effect(DEF dst, USE src1, USE src2, USE cr);
13859   ins_cost(INSN_COST * 2);
13860   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13861 
13862   ins_encode %{
13863     __ cselw($dst$$Register,
13864              $src1$$Register,
13865              $src2$$Register,
13866              Assembler::GT);
13867   %}
13868   ins_pipe(icond_reg_reg);
13869 %}
13870 
13871 // This pattern is automatically generated from aarch64_ad.m4
13872 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13873 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13874 %{
13875   effect(DEF dst, USE src1, USE cr);
13876   ins_cost(INSN_COST * 2);
13877   format %{ "cselw $dst, $src1, zr lt\t"  %}
13878 
13879   ins_encode %{
13880     __ cselw($dst$$Register,
13881              $src1$$Register,
13882              zr,
13883              Assembler::LT);
13884   %}
13885   ins_pipe(icond_reg);
13886 %}
13887 
13888 // This pattern is automatically generated from aarch64_ad.m4
13889 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13890 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13891 %{
13892   effect(DEF dst, USE src1, USE cr);
13893   ins_cost(INSN_COST * 2);
13894   format %{ "cselw $dst, $src1, zr gt\t"  %}
13895 
13896   ins_encode %{
13897     __ cselw($dst$$Register,
13898              $src1$$Register,
13899              zr,
13900              Assembler::GT);
13901   %}
13902   ins_pipe(icond_reg);
13903 %}
13904 
13905 // This pattern is automatically generated from aarch64_ad.m4
13906 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13907 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13908 %{
13909   effect(DEF dst, USE src1, USE cr);
13910   ins_cost(INSN_COST * 2);
13911   format %{ "csincw $dst, $src1, zr le\t"  %}
13912 
13913   ins_encode %{
13914     __ csincw($dst$$Register,
13915              $src1$$Register,
13916              zr,
13917              Assembler::LE);
13918   %}
13919   ins_pipe(icond_reg);
13920 %}
13921 
13922 // This pattern is automatically generated from aarch64_ad.m4
13923 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13924 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13925 %{
13926   effect(DEF dst, USE src1, USE cr);
13927   ins_cost(INSN_COST * 2);
13928   format %{ "csincw $dst, $src1, zr gt\t"  %}
13929 
13930   ins_encode %{
13931     __ csincw($dst$$Register,
13932              $src1$$Register,
13933              zr,
13934              Assembler::GT);
13935   %}
13936   ins_pipe(icond_reg);
13937 %}
13938 
13939 // This pattern is automatically generated from aarch64_ad.m4
13940 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13941 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13942 %{
13943   effect(DEF dst, USE src1, USE cr);
13944   ins_cost(INSN_COST * 2);
13945   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13946 
13947   ins_encode %{
13948     __ csinvw($dst$$Register,
13949              $src1$$Register,
13950              zr,
13951              Assembler::LT);
13952   %}
13953   ins_pipe(icond_reg);
13954 %}
13955 
13956 // This pattern is automatically generated from aarch64_ad.m4
13957 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13958 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13959 %{
13960   effect(DEF dst, USE src1, USE cr);
13961   ins_cost(INSN_COST * 2);
13962   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13963 
13964   ins_encode %{
13965     __ csinvw($dst$$Register,
13966              $src1$$Register,
13967              zr,
13968              Assembler::GE);
13969   %}
13970   ins_pipe(icond_reg);
13971 %}
13972 
13973 // This pattern is automatically generated from aarch64_ad.m4
13974 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13975 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13976 %{
13977   match(Set dst (MinI src imm));
13978   ins_cost(INSN_COST * 3);
13979   expand %{
13980     rFlagsReg cr;
13981     compI_reg_imm0(cr, src);
13982     cmovI_reg_imm0_lt(dst, src, cr);
13983   %}
13984 %}
13985 
13986 // This pattern is automatically generated from aarch64_ad.m4
13987 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13988 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13989 %{
13990   match(Set dst (MinI imm src));
13991   ins_cost(INSN_COST * 3);
13992   expand %{
13993     rFlagsReg cr;
13994     compI_reg_imm0(cr, src);
13995     cmovI_reg_imm0_lt(dst, src, cr);
13996   %}
13997 %}
13998 
13999 // This pattern is automatically generated from aarch64_ad.m4
14000 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14001 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
14002 %{
14003   match(Set dst (MinI src imm));
14004   ins_cost(INSN_COST * 3);
14005   expand %{
14006     rFlagsReg cr;
14007     compI_reg_imm0(cr, src);
14008     cmovI_reg_imm1_le(dst, src, cr);
14009   %}
14010 %}
14011 
14012 // This pattern is automatically generated from aarch64_ad.m4
14013 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14014 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
14015 %{
14016   match(Set dst (MinI imm src));
14017   ins_cost(INSN_COST * 3);
14018   expand %{
14019     rFlagsReg cr;
14020     compI_reg_imm0(cr, src);
14021     cmovI_reg_imm1_le(dst, src, cr);
14022   %}
14023 %}
14024 
14025 // This pattern is automatically generated from aarch64_ad.m4
14026 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14027 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
14028 %{
14029   match(Set dst (MinI src imm));
14030   ins_cost(INSN_COST * 3);
14031   expand %{
14032     rFlagsReg cr;
14033     compI_reg_imm0(cr, src);
14034     cmovI_reg_immM1_lt(dst, src, cr);
14035   %}
14036 %}
14037 
14038 // This pattern is automatically generated from aarch64_ad.m4
14039 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14040 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
14041 %{
14042   match(Set dst (MinI imm src));
14043   ins_cost(INSN_COST * 3);
14044   expand %{
14045     rFlagsReg cr;
14046     compI_reg_imm0(cr, src);
14047     cmovI_reg_immM1_lt(dst, src, cr);
14048   %}
14049 %}
14050 
14051 // This pattern is automatically generated from aarch64_ad.m4
14052 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14053 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
14054 %{
14055   match(Set dst (MaxI src imm));
14056   ins_cost(INSN_COST * 3);
14057   expand %{
14058     rFlagsReg cr;
14059     compI_reg_imm0(cr, src);
14060     cmovI_reg_imm0_gt(dst, src, cr);
14061   %}
14062 %}
14063 
14064 // This pattern is automatically generated from aarch64_ad.m4
14065 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14066 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
14067 %{
14068   match(Set dst (MaxI imm src));
14069   ins_cost(INSN_COST * 3);
14070   expand %{
14071     rFlagsReg cr;
14072     compI_reg_imm0(cr, src);
14073     cmovI_reg_imm0_gt(dst, src, cr);
14074   %}
14075 %}
14076 
14077 // This pattern is automatically generated from aarch64_ad.m4
14078 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14079 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
14080 %{
14081   match(Set dst (MaxI src imm));
14082   ins_cost(INSN_COST * 3);
14083   expand %{
14084     rFlagsReg cr;
14085     compI_reg_imm0(cr, src);
14086     cmovI_reg_imm1_gt(dst, src, cr);
14087   %}
14088 %}
14089 
14090 // This pattern is automatically generated from aarch64_ad.m4
14091 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14092 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
14093 %{
14094   match(Set dst (MaxI imm src));
14095   ins_cost(INSN_COST * 3);
14096   expand %{
14097     rFlagsReg cr;
14098     compI_reg_imm0(cr, src);
14099     cmovI_reg_imm1_gt(dst, src, cr);
14100   %}
14101 %}
14102 
14103 // This pattern is automatically generated from aarch64_ad.m4
14104 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14105 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
14106 %{
14107   match(Set dst (MaxI src imm));
14108   ins_cost(INSN_COST * 3);
14109   expand %{
14110     rFlagsReg cr;
14111     compI_reg_imm0(cr, src);
14112     cmovI_reg_immM1_ge(dst, src, cr);
14113   %}
14114 %}
14115 
14116 // This pattern is automatically generated from aarch64_ad.m4
14117 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
14118 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
14119 %{
14120   match(Set dst (MaxI imm src));
14121   ins_cost(INSN_COST * 3);
14122   expand %{
14123     rFlagsReg cr;
14124     compI_reg_imm0(cr, src);
14125     cmovI_reg_immM1_ge(dst, src, cr);
14126   %}
14127 %}
14128 
14129 
14130 
14131 // END This section of the file is automatically generated. Do not edit --------------
14132 
14133 
14134 // ============================================================================
14135 // Floating Point Arithmetic Instructions
14136 
14137 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14138   match(Set dst (AddF src1 src2));
14139 
14140   ins_cost(INSN_COST * 5);
14141   format %{ "fadds   $dst, $src1, $src2" %}
14142 
14143   ins_encode %{
14144     __ fadds(as_FloatRegister($dst$$reg),
14145              as_FloatRegister($src1$$reg),
14146              as_FloatRegister($src2$$reg));
14147   %}
14148 
14149   ins_pipe(fp_dop_reg_reg_s);
14150 %}
14151 
14152 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14153   match(Set dst (AddD src1 src2));
14154 
14155   ins_cost(INSN_COST * 5);
14156   format %{ "faddd   $dst, $src1, $src2" %}
14157 
14158   ins_encode %{
14159     __ faddd(as_FloatRegister($dst$$reg),
14160              as_FloatRegister($src1$$reg),
14161              as_FloatRegister($src2$$reg));
14162   %}
14163 
14164   ins_pipe(fp_dop_reg_reg_d);
14165 %}
14166 
14167 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14168   match(Set dst (SubF src1 src2));
14169 
14170   ins_cost(INSN_COST * 5);
14171   format %{ "fsubs   $dst, $src1, $src2" %}
14172 
14173   ins_encode %{
14174     __ fsubs(as_FloatRegister($dst$$reg),
14175              as_FloatRegister($src1$$reg),
14176              as_FloatRegister($src2$$reg));
14177   %}
14178 
14179   ins_pipe(fp_dop_reg_reg_s);
14180 %}
14181 
14182 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14183   match(Set dst (SubD src1 src2));
14184 
14185   ins_cost(INSN_COST * 5);
14186   format %{ "fsubd   $dst, $src1, $src2" %}
14187 
14188   ins_encode %{
14189     __ fsubd(as_FloatRegister($dst$$reg),
14190              as_FloatRegister($src1$$reg),
14191              as_FloatRegister($src2$$reg));
14192   %}
14193 
14194   ins_pipe(fp_dop_reg_reg_d);
14195 %}
14196 
14197 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14198   match(Set dst (MulF src1 src2));
14199 
14200   ins_cost(INSN_COST * 6);
14201   format %{ "fmuls   $dst, $src1, $src2" %}
14202 
14203   ins_encode %{
14204     __ fmuls(as_FloatRegister($dst$$reg),
14205              as_FloatRegister($src1$$reg),
14206              as_FloatRegister($src2$$reg));
14207   %}
14208 
14209   ins_pipe(fp_dop_reg_reg_s);
14210 %}
14211 
14212 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14213   match(Set dst (MulD src1 src2));
14214 
14215   ins_cost(INSN_COST * 6);
14216   format %{ "fmuld   $dst, $src1, $src2" %}
14217 
14218   ins_encode %{
14219     __ fmuld(as_FloatRegister($dst$$reg),
14220              as_FloatRegister($src1$$reg),
14221              as_FloatRegister($src2$$reg));
14222   %}
14223 
14224   ins_pipe(fp_dop_reg_reg_d);
14225 %}
14226 
14227 // src1 * src2 + src3
14228 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14229   predicate(UseFMA);
14230   match(Set dst (FmaF src3 (Binary src1 src2)));
14231 
14232   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
14233 
14234   ins_encode %{
14235     __ fmadds(as_FloatRegister($dst$$reg),
14236              as_FloatRegister($src1$$reg),
14237              as_FloatRegister($src2$$reg),
14238              as_FloatRegister($src3$$reg));
14239   %}
14240 
14241   ins_pipe(pipe_class_default);
14242 %}
14243 
14244 // src1 * src2 + src3
14245 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14246   predicate(UseFMA);
14247   match(Set dst (FmaD src3 (Binary src1 src2)));
14248 
14249   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
14250 
14251   ins_encode %{
14252     __ fmaddd(as_FloatRegister($dst$$reg),
14253              as_FloatRegister($src1$$reg),
14254              as_FloatRegister($src2$$reg),
14255              as_FloatRegister($src3$$reg));
14256   %}
14257 
14258   ins_pipe(pipe_class_default);
14259 %}
14260 
14261 // -src1 * src2 + src3
14262 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14263   predicate(UseFMA);
14264   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
14265   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
14266 
14267   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
14268 
14269   ins_encode %{
14270     __ fmsubs(as_FloatRegister($dst$$reg),
14271               as_FloatRegister($src1$$reg),
14272               as_FloatRegister($src2$$reg),
14273               as_FloatRegister($src3$$reg));
14274   %}
14275 
14276   ins_pipe(pipe_class_default);
14277 %}
14278 
14279 // -src1 * src2 + src3
14280 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14281   predicate(UseFMA);
14282   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
14283   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
14284 
14285   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
14286 
14287   ins_encode %{
14288     __ fmsubd(as_FloatRegister($dst$$reg),
14289               as_FloatRegister($src1$$reg),
14290               as_FloatRegister($src2$$reg),
14291               as_FloatRegister($src3$$reg));
14292   %}
14293 
14294   ins_pipe(pipe_class_default);
14295 %}
14296 
14297 // -src1 * src2 - src3
14298 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14299   predicate(UseFMA);
14300   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
14301   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
14302 
14303   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
14304 
14305   ins_encode %{
14306     __ fnmadds(as_FloatRegister($dst$$reg),
14307                as_FloatRegister($src1$$reg),
14308                as_FloatRegister($src2$$reg),
14309                as_FloatRegister($src3$$reg));
14310   %}
14311 
14312   ins_pipe(pipe_class_default);
14313 %}
14314 
14315 // -src1 * src2 - src3
14316 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14317   predicate(UseFMA);
14318   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
14319   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
14320 
14321   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
14322 
14323   ins_encode %{
14324     __ fnmaddd(as_FloatRegister($dst$$reg),
14325                as_FloatRegister($src1$$reg),
14326                as_FloatRegister($src2$$reg),
14327                as_FloatRegister($src3$$reg));
14328   %}
14329 
14330   ins_pipe(pipe_class_default);
14331 %}
14332 
14333 // src1 * src2 - src3
14334 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
14335   predicate(UseFMA);
14336   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
14337 
14338   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
14339 
14340   ins_encode %{
14341     __ fnmsubs(as_FloatRegister($dst$$reg),
14342                as_FloatRegister($src1$$reg),
14343                as_FloatRegister($src2$$reg),
14344                as_FloatRegister($src3$$reg));
14345   %}
14346 
14347   ins_pipe(pipe_class_default);
14348 %}
14349 
14350 // src1 * src2 - src3
14351 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
14352   predicate(UseFMA);
14353   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
14354 
14355   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
14356 
14357   ins_encode %{
14358   // n.b. insn name should be fnmsubd
14359     __ fnmsub(as_FloatRegister($dst$$reg),
14360               as_FloatRegister($src1$$reg),
14361               as_FloatRegister($src2$$reg),
14362               as_FloatRegister($src3$$reg));
14363   %}
14364 
14365   ins_pipe(pipe_class_default);
14366 %}
14367 
14368 
14369 // Math.max(FF)F
14370 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14371   match(Set dst (MaxF src1 src2));
14372 
14373   format %{ "fmaxs   $dst, $src1, $src2" %}
14374   ins_encode %{
14375     __ fmaxs(as_FloatRegister($dst$$reg),
14376              as_FloatRegister($src1$$reg),
14377              as_FloatRegister($src2$$reg));
14378   %}
14379 
14380   ins_pipe(fp_dop_reg_reg_s);
14381 %}
14382 
14383 // Math.min(FF)F
14384 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14385   match(Set dst (MinF src1 src2));
14386 
14387   format %{ "fmins   $dst, $src1, $src2" %}
14388   ins_encode %{
14389     __ fmins(as_FloatRegister($dst$$reg),
14390              as_FloatRegister($src1$$reg),
14391              as_FloatRegister($src2$$reg));
14392   %}
14393 
14394   ins_pipe(fp_dop_reg_reg_s);
14395 %}
14396 
14397 // Math.max(DD)D
14398 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14399   match(Set dst (MaxD src1 src2));
14400 
14401   format %{ "fmaxd   $dst, $src1, $src2" %}
14402   ins_encode %{
14403     __ fmaxd(as_FloatRegister($dst$$reg),
14404              as_FloatRegister($src1$$reg),
14405              as_FloatRegister($src2$$reg));
14406   %}
14407 
14408   ins_pipe(fp_dop_reg_reg_d);
14409 %}
14410 
14411 // Math.min(DD)D
14412 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14413   match(Set dst (MinD src1 src2));
14414 
14415   format %{ "fmind   $dst, $src1, $src2" %}
14416   ins_encode %{
14417     __ fmind(as_FloatRegister($dst$$reg),
14418              as_FloatRegister($src1$$reg),
14419              as_FloatRegister($src2$$reg));
14420   %}
14421 
14422   ins_pipe(fp_dop_reg_reg_d);
14423 %}
14424 
14425 
14426 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14427   match(Set dst (DivF src1  src2));
14428 
14429   ins_cost(INSN_COST * 18);
14430   format %{ "fdivs   $dst, $src1, $src2" %}
14431 
14432   ins_encode %{
14433     __ fdivs(as_FloatRegister($dst$$reg),
14434              as_FloatRegister($src1$$reg),
14435              as_FloatRegister($src2$$reg));
14436   %}
14437 
14438   ins_pipe(fp_div_s);
14439 %}
14440 
14441 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14442   match(Set dst (DivD src1  src2));
14443 
14444   ins_cost(INSN_COST * 32);
14445   format %{ "fdivd   $dst, $src1, $src2" %}
14446 
14447   ins_encode %{
14448     __ fdivd(as_FloatRegister($dst$$reg),
14449              as_FloatRegister($src1$$reg),
14450              as_FloatRegister($src2$$reg));
14451   %}
14452 
14453   ins_pipe(fp_div_d);
14454 %}
14455 
14456 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14457   match(Set dst (NegF src));
14458 
14459   ins_cost(INSN_COST * 3);
14460   format %{ "fneg   $dst, $src" %}
14461 
14462   ins_encode %{
14463     __ fnegs(as_FloatRegister($dst$$reg),
14464              as_FloatRegister($src$$reg));
14465   %}
14466 
14467   ins_pipe(fp_uop_s);
14468 %}
14469 
14470 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14471   match(Set dst (NegD src));
14472 
14473   ins_cost(INSN_COST * 3);
14474   format %{ "fnegd   $dst, $src" %}
14475 
14476   ins_encode %{
14477     __ fnegd(as_FloatRegister($dst$$reg),
14478              as_FloatRegister($src$$reg));
14479   %}
14480 
14481   ins_pipe(fp_uop_d);
14482 %}
14483 
14484 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14485 %{
14486   match(Set dst (AbsI src));
14487 
14488   effect(KILL cr);
14489   ins_cost(INSN_COST * 2);
14490   format %{ "cmpw  $src, zr\n\t"
14491             "cnegw $dst, $src, Assembler::LT\t# int abs"
14492   %}
14493 
14494   ins_encode %{
14495     __ cmpw(as_Register($src$$reg), zr);
14496     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14497   %}
14498   ins_pipe(pipe_class_default);
14499 %}
14500 
14501 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
14502 %{
14503   match(Set dst (AbsL src));
14504 
14505   effect(KILL cr);
14506   ins_cost(INSN_COST * 2);
14507   format %{ "cmp  $src, zr\n\t"
14508             "cneg $dst, $src, Assembler::LT\t# long abs"
14509   %}
14510 
14511   ins_encode %{
14512     __ cmp(as_Register($src$$reg), zr);
14513     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
14514   %}
14515   ins_pipe(pipe_class_default);
14516 %}
14517 
14518 instruct absF_reg(vRegF dst, vRegF src) %{
14519   match(Set dst (AbsF src));
14520 
14521   ins_cost(INSN_COST * 3);
14522   format %{ "fabss   $dst, $src" %}
14523   ins_encode %{
14524     __ fabss(as_FloatRegister($dst$$reg),
14525              as_FloatRegister($src$$reg));
14526   %}
14527 
14528   ins_pipe(fp_uop_s);
14529 %}
14530 
14531 instruct absD_reg(vRegD dst, vRegD src) %{
14532   match(Set dst (AbsD src));
14533 
14534   ins_cost(INSN_COST * 3);
14535   format %{ "fabsd   $dst, $src" %}
14536   ins_encode %{
14537     __ fabsd(as_FloatRegister($dst$$reg),
14538              as_FloatRegister($src$$reg));
14539   %}
14540 
14541   ins_pipe(fp_uop_d);
14542 %}
14543 
14544 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14545   match(Set dst (AbsF (SubF src1 src2)));
14546 
14547   ins_cost(INSN_COST * 3);
14548   format %{ "fabds   $dst, $src1, $src2" %}
14549   ins_encode %{
14550     __ fabds(as_FloatRegister($dst$$reg),
14551              as_FloatRegister($src1$$reg),
14552              as_FloatRegister($src2$$reg));
14553   %}
14554 
14555   ins_pipe(fp_uop_s);
14556 %}
14557 
14558 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14559   match(Set dst (AbsD (SubD src1 src2)));
14560 
14561   ins_cost(INSN_COST * 3);
14562   format %{ "fabdd   $dst, $src1, $src2" %}
14563   ins_encode %{
14564     __ fabdd(as_FloatRegister($dst$$reg),
14565              as_FloatRegister($src1$$reg),
14566              as_FloatRegister($src2$$reg));
14567   %}
14568 
14569   ins_pipe(fp_uop_d);
14570 %}
14571 
14572 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14573   match(Set dst (SqrtD src));
14574 
14575   ins_cost(INSN_COST * 50);
14576   format %{ "fsqrtd  $dst, $src" %}
14577   ins_encode %{
14578     __ fsqrtd(as_FloatRegister($dst$$reg),
14579              as_FloatRegister($src$$reg));
14580   %}
14581 
14582   ins_pipe(fp_div_s);
14583 %}
14584 
14585 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14586   match(Set dst (SqrtF src));
14587 
14588   ins_cost(INSN_COST * 50);
14589   format %{ "fsqrts  $dst, $src" %}
14590   ins_encode %{
14591     __ fsqrts(as_FloatRegister($dst$$reg),
14592              as_FloatRegister($src$$reg));
14593   %}
14594 
14595   ins_pipe(fp_div_d);
14596 %}
14597 
14598 // Math.rint, floor, ceil
14599 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14600   match(Set dst (RoundDoubleMode src rmode));
14601   format %{ "frint  $dst, $src, $rmode" %}
14602   ins_encode %{
14603     switch ($rmode$$constant) {
14604       case RoundDoubleModeNode::rmode_rint:
14605         __ frintnd(as_FloatRegister($dst$$reg),
14606                    as_FloatRegister($src$$reg));
14607         break;
14608       case RoundDoubleModeNode::rmode_floor:
14609         __ frintmd(as_FloatRegister($dst$$reg),
14610                    as_FloatRegister($src$$reg));
14611         break;
14612       case RoundDoubleModeNode::rmode_ceil:
14613         __ frintpd(as_FloatRegister($dst$$reg),
14614                    as_FloatRegister($src$$reg));
14615         break;
14616     }
14617   %}
14618   ins_pipe(fp_uop_d);
14619 %}
14620 
14621 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14622   match(Set dst (CopySignD src1 (Binary src2 zero)));
14623   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14624   format %{ "CopySignD  $dst $src1 $src2" %}
14625   ins_encode %{
14626     FloatRegister dst = as_FloatRegister($dst$$reg),
14627                   src1 = as_FloatRegister($src1$$reg),
14628                   src2 = as_FloatRegister($src2$$reg),
14629                   zero = as_FloatRegister($zero$$reg);
14630     __ fnegd(dst, zero);
14631     __ bsl(dst, __ T8B, src2, src1);
14632   %}
14633   ins_pipe(fp_uop_d);
14634 %}
14635 
14636 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14637   match(Set dst (CopySignF src1 src2));
14638   effect(TEMP_DEF dst, USE src1, USE src2);
14639   format %{ "CopySignF  $dst $src1 $src2" %}
14640   ins_encode %{
14641     FloatRegister dst = as_FloatRegister($dst$$reg),
14642                   src1 = as_FloatRegister($src1$$reg),
14643                   src2 = as_FloatRegister($src2$$reg);
14644     __ movi(dst, __ T2S, 0x80, 24);
14645     __ bsl(dst, __ T8B, src2, src1);
14646   %}
14647   ins_pipe(fp_uop_d);
14648 %}
14649 
14650 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14651   match(Set dst (SignumD src (Binary zero one)));
14652   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14653   format %{ "signumD  $dst, $src" %}
14654   ins_encode %{
14655     FloatRegister src = as_FloatRegister($src$$reg),
14656                   dst = as_FloatRegister($dst$$reg),
14657                   zero = as_FloatRegister($zero$$reg),
14658                   one = as_FloatRegister($one$$reg);
14659     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14660     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14661     // Bit selection instruction gets bit from "one" for each enabled bit in
14662     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14663     // NaN the whole "src" will be copied because "dst" is zero. For all other
14664     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14665     // from "src", and all other bits are copied from 1.0.
14666     __ bsl(dst, __ T8B, one, src);
14667   %}
14668   ins_pipe(fp_uop_d);
14669 %}
14670 
14671 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14672   match(Set dst (SignumF src (Binary zero one)));
14673   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14674   format %{ "signumF  $dst, $src" %}
14675   ins_encode %{
14676     FloatRegister src = as_FloatRegister($src$$reg),
14677                   dst = as_FloatRegister($dst$$reg),
14678                   zero = as_FloatRegister($zero$$reg),
14679                   one = as_FloatRegister($one$$reg);
14680     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14681     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14682     // Bit selection instruction gets bit from "one" for each enabled bit in
14683     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14684     // NaN the whole "src" will be copied because "dst" is zero. For all other
14685     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14686     // from "src", and all other bits are copied from 1.0.
14687     __ bsl(dst, __ T8B, one, src);
14688   %}
14689   ins_pipe(fp_uop_d);
14690 %}
14691 
14692 instruct onspinwait() %{
14693   match(OnSpinWait);
14694   ins_cost(INSN_COST);
14695 
14696   format %{ "onspinwait" %}
14697 
14698   ins_encode %{
14699     __ spin_wait();
14700   %}
14701   ins_pipe(pipe_class_empty);
14702 %}
14703 
14704 // ============================================================================
14705 // Logical Instructions
14706 
14707 // Integer Logical Instructions
14708 
14709 // And Instructions
14710 
14711 
14712 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14713   match(Set dst (AndI src1 src2));
14714 
14715   format %{ "andw  $dst, $src1, $src2\t# int" %}
14716 
14717   ins_cost(INSN_COST);
14718   ins_encode %{
14719     __ andw(as_Register($dst$$reg),
14720             as_Register($src1$$reg),
14721             as_Register($src2$$reg));
14722   %}
14723 
14724   ins_pipe(ialu_reg_reg);
14725 %}
14726 
14727 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14728   match(Set dst (AndI src1 src2));
14729 
14730   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14731 
14732   ins_cost(INSN_COST);
14733   ins_encode %{
14734     __ andw(as_Register($dst$$reg),
14735             as_Register($src1$$reg),
14736             (uint64_t)($src2$$constant));
14737   %}
14738 
14739   ins_pipe(ialu_reg_imm);
14740 %}
14741 
14742 // Or Instructions
14743 
14744 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14745   match(Set dst (OrI src1 src2));
14746 
14747   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14748 
14749   ins_cost(INSN_COST);
14750   ins_encode %{
14751     __ orrw(as_Register($dst$$reg),
14752             as_Register($src1$$reg),
14753             as_Register($src2$$reg));
14754   %}
14755 
14756   ins_pipe(ialu_reg_reg);
14757 %}
14758 
14759 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14760   match(Set dst (OrI src1 src2));
14761 
14762   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14763 
14764   ins_cost(INSN_COST);
14765   ins_encode %{
14766     __ orrw(as_Register($dst$$reg),
14767             as_Register($src1$$reg),
14768             (uint64_t)($src2$$constant));
14769   %}
14770 
14771   ins_pipe(ialu_reg_imm);
14772 %}
14773 
14774 // Xor Instructions
14775 
14776 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14777   match(Set dst (XorI src1 src2));
14778 
14779   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14780 
14781   ins_cost(INSN_COST);
14782   ins_encode %{
14783     __ eorw(as_Register($dst$$reg),
14784             as_Register($src1$$reg),
14785             as_Register($src2$$reg));
14786   %}
14787 
14788   ins_pipe(ialu_reg_reg);
14789 %}
14790 
14791 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14792   match(Set dst (XorI src1 src2));
14793 
14794   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14795 
14796   ins_cost(INSN_COST);
14797   ins_encode %{
14798     __ eorw(as_Register($dst$$reg),
14799             as_Register($src1$$reg),
14800             (uint64_t)($src2$$constant));
14801   %}
14802 
14803   ins_pipe(ialu_reg_imm);
14804 %}
14805 
14806 // Long Logical Instructions
14807 // TODO
14808 
14809 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14810   match(Set dst (AndL src1 src2));
14811 
14812   format %{ "and  $dst, $src1, $src2\t# int" %}
14813 
14814   ins_cost(INSN_COST);
14815   ins_encode %{
14816     __ andr(as_Register($dst$$reg),
14817             as_Register($src1$$reg),
14818             as_Register($src2$$reg));
14819   %}
14820 
14821   ins_pipe(ialu_reg_reg);
14822 %}
14823 
14824 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14825   match(Set dst (AndL src1 src2));
14826 
14827   format %{ "and  $dst, $src1, $src2\t# int" %}
14828 
14829   ins_cost(INSN_COST);
14830   ins_encode %{
14831     __ andr(as_Register($dst$$reg),
14832             as_Register($src1$$reg),
14833             (uint64_t)($src2$$constant));
14834   %}
14835 
14836   ins_pipe(ialu_reg_imm);
14837 %}
14838 
14839 // Or Instructions
14840 
14841 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14842   match(Set dst (OrL src1 src2));
14843 
14844   format %{ "orr  $dst, $src1, $src2\t# int" %}
14845 
14846   ins_cost(INSN_COST);
14847   ins_encode %{
14848     __ orr(as_Register($dst$$reg),
14849            as_Register($src1$$reg),
14850            as_Register($src2$$reg));
14851   %}
14852 
14853   ins_pipe(ialu_reg_reg);
14854 %}
14855 
14856 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14857   match(Set dst (OrL src1 src2));
14858 
14859   format %{ "orr  $dst, $src1, $src2\t# int" %}
14860 
14861   ins_cost(INSN_COST);
14862   ins_encode %{
14863     __ orr(as_Register($dst$$reg),
14864            as_Register($src1$$reg),
14865            (uint64_t)($src2$$constant));
14866   %}
14867 
14868   ins_pipe(ialu_reg_imm);
14869 %}
14870 
14871 // Xor Instructions
14872 
14873 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14874   match(Set dst (XorL src1 src2));
14875 
14876   format %{ "eor  $dst, $src1, $src2\t# int" %}
14877 
14878   ins_cost(INSN_COST);
14879   ins_encode %{
14880     __ eor(as_Register($dst$$reg),
14881            as_Register($src1$$reg),
14882            as_Register($src2$$reg));
14883   %}
14884 
14885   ins_pipe(ialu_reg_reg);
14886 %}
14887 
14888 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14889   match(Set dst (XorL src1 src2));
14890 
14891   ins_cost(INSN_COST);
14892   format %{ "eor  $dst, $src1, $src2\t# int" %}
14893 
14894   ins_encode %{
14895     __ eor(as_Register($dst$$reg),
14896            as_Register($src1$$reg),
14897            (uint64_t)($src2$$constant));
14898   %}
14899 
14900   ins_pipe(ialu_reg_imm);
14901 %}
14902 
14903 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14904 %{
14905   match(Set dst (ConvI2L src));
14906 
14907   ins_cost(INSN_COST);
14908   format %{ "sxtw  $dst, $src\t# i2l" %}
14909   ins_encode %{
14910     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14911   %}
14912   ins_pipe(ialu_reg_shift);
14913 %}
14914 
14915 // this pattern occurs in bigmath arithmetic
14916 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14917 %{
14918   match(Set dst (AndL (ConvI2L src) mask));
14919 
14920   ins_cost(INSN_COST);
14921   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14922   ins_encode %{
14923     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14924   %}
14925 
14926   ins_pipe(ialu_reg_shift);
14927 %}
14928 
14929 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14930   match(Set dst (ConvL2I src));
14931 
14932   ins_cost(INSN_COST);
14933   format %{ "movw  $dst, $src \t// l2i" %}
14934 
14935   ins_encode %{
14936     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14937   %}
14938 
14939   ins_pipe(ialu_reg);
14940 %}
14941 
14942 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14943 %{
14944   match(Set dst (Conv2B src));
14945   effect(KILL cr);
14946 
14947   format %{
14948     "cmpw $src, zr\n\t"
14949     "cset $dst, ne"
14950   %}
14951 
14952   ins_encode %{
14953     __ cmpw(as_Register($src$$reg), zr);
14954     __ cset(as_Register($dst$$reg), Assembler::NE);
14955   %}
14956 
14957   ins_pipe(ialu_reg);
14958 %}
14959 
14960 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14961 %{
14962   match(Set dst (Conv2B src));
14963   effect(KILL cr);
14964 
14965   format %{
14966     "cmp  $src, zr\n\t"
14967     "cset $dst, ne"
14968   %}
14969 
14970   ins_encode %{
14971     __ cmp(as_Register($src$$reg), zr);
14972     __ cset(as_Register($dst$$reg), Assembler::NE);
14973   %}
14974 
14975   ins_pipe(ialu_reg);
14976 %}
14977 
14978 instruct convD2F_reg(vRegF dst, vRegD src) %{
14979   match(Set dst (ConvD2F src));
14980 
14981   ins_cost(INSN_COST * 5);
14982   format %{ "fcvtd  $dst, $src \t// d2f" %}
14983 
14984   ins_encode %{
14985     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14986   %}
14987 
14988   ins_pipe(fp_d2f);
14989 %}
14990 
14991 instruct convF2D_reg(vRegD dst, vRegF src) %{
14992   match(Set dst (ConvF2D src));
14993 
14994   ins_cost(INSN_COST * 5);
14995   format %{ "fcvts  $dst, $src \t// f2d" %}
14996 
14997   ins_encode %{
14998     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14999   %}
15000 
15001   ins_pipe(fp_f2d);
15002 %}
15003 
15004 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
15005   match(Set dst (ConvF2I src));
15006 
15007   ins_cost(INSN_COST * 5);
15008   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
15009 
15010   ins_encode %{
15011     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
15012   %}
15013 
15014   ins_pipe(fp_f2i);
15015 %}
15016 
15017 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
15018   match(Set dst (ConvF2L src));
15019 
15020   ins_cost(INSN_COST * 5);
15021   format %{ "fcvtzs  $dst, $src \t// f2l" %}
15022 
15023   ins_encode %{
15024     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
15025   %}
15026 
15027   ins_pipe(fp_f2l);
15028 %}
15029 
15030 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
15031   match(Set dst (ConvI2F src));
15032 
15033   ins_cost(INSN_COST * 5);
15034   format %{ "scvtfws  $dst, $src \t// i2f" %}
15035 
15036   ins_encode %{
15037     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
15038   %}
15039 
15040   ins_pipe(fp_i2f);
15041 %}
15042 
15043 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
15044   match(Set dst (ConvL2F src));
15045 
15046   ins_cost(INSN_COST * 5);
15047   format %{ "scvtfs  $dst, $src \t// l2f" %}
15048 
15049   ins_encode %{
15050     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
15051   %}
15052 
15053   ins_pipe(fp_l2f);
15054 %}
15055 
15056 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
15057   match(Set dst (ConvD2I src));
15058 
15059   ins_cost(INSN_COST * 5);
15060   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
15061 
15062   ins_encode %{
15063     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
15064   %}
15065 
15066   ins_pipe(fp_d2i);
15067 %}
15068 
15069 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
15070   match(Set dst (ConvD2L src));
15071 
15072   ins_cost(INSN_COST * 5);
15073   format %{ "fcvtzd  $dst, $src \t// d2l" %}
15074 
15075   ins_encode %{
15076     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
15077   %}
15078 
15079   ins_pipe(fp_d2l);
15080 %}
15081 
15082 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
15083   match(Set dst (ConvI2D src));
15084 
15085   ins_cost(INSN_COST * 5);
15086   format %{ "scvtfwd  $dst, $src \t// i2d" %}
15087 
15088   ins_encode %{
15089     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
15090   %}
15091 
15092   ins_pipe(fp_i2d);
15093 %}
15094 
15095 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
15096   match(Set dst (ConvL2D src));
15097 
15098   ins_cost(INSN_COST * 5);
15099   format %{ "scvtfd  $dst, $src \t// l2d" %}
15100 
15101   ins_encode %{
15102     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
15103   %}
15104 
15105   ins_pipe(fp_l2d);
15106 %}
15107 
15108 // stack <-> reg and reg <-> reg shuffles with no conversion
15109 
15110 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
15111 
15112   match(Set dst (MoveF2I src));
15113 
15114   effect(DEF dst, USE src);
15115 
15116   ins_cost(4 * INSN_COST);
15117 
15118   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
15119 
15120   ins_encode %{
15121     __ ldrw($dst$$Register, Address(sp, $src$$disp));
15122   %}
15123 
15124   ins_pipe(iload_reg_reg);
15125 
15126 %}
15127 
15128 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
15129 
15130   match(Set dst (MoveI2F src));
15131 
15132   effect(DEF dst, USE src);
15133 
15134   ins_cost(4 * INSN_COST);
15135 
15136   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
15137 
15138   ins_encode %{
15139     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
15140   %}
15141 
15142   ins_pipe(pipe_class_memory);
15143 
15144 %}
15145 
15146 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
15147 
15148   match(Set dst (MoveD2L src));
15149 
15150   effect(DEF dst, USE src);
15151 
15152   ins_cost(4 * INSN_COST);
15153 
15154   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
15155 
15156   ins_encode %{
15157     __ ldr($dst$$Register, Address(sp, $src$$disp));
15158   %}
15159 
15160   ins_pipe(iload_reg_reg);
15161 
15162 %}
15163 
15164 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
15165 
15166   match(Set dst (MoveL2D src));
15167 
15168   effect(DEF dst, USE src);
15169 
15170   ins_cost(4 * INSN_COST);
15171 
15172   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
15173 
15174   ins_encode %{
15175     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
15176   %}
15177 
15178   ins_pipe(pipe_class_memory);
15179 
15180 %}
15181 
15182 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
15183 
15184   match(Set dst (MoveF2I src));
15185 
15186   effect(DEF dst, USE src);
15187 
15188   ins_cost(INSN_COST);
15189 
15190   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
15191 
15192   ins_encode %{
15193     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15194   %}
15195 
15196   ins_pipe(pipe_class_memory);
15197 
15198 %}
15199 
15200 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
15201 
15202   match(Set dst (MoveI2F src));
15203 
15204   effect(DEF dst, USE src);
15205 
15206   ins_cost(INSN_COST);
15207 
15208   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
15209 
15210   ins_encode %{
15211     __ strw($src$$Register, Address(sp, $dst$$disp));
15212   %}
15213 
15214   ins_pipe(istore_reg_reg);
15215 
15216 %}
15217 
15218 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
15219 
15220   match(Set dst (MoveD2L src));
15221 
15222   effect(DEF dst, USE src);
15223 
15224   ins_cost(INSN_COST);
15225 
15226   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
15227 
15228   ins_encode %{
15229     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
15230   %}
15231 
15232   ins_pipe(pipe_class_memory);
15233 
15234 %}
15235 
15236 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
15237 
15238   match(Set dst (MoveL2D src));
15239 
15240   effect(DEF dst, USE src);
15241 
15242   ins_cost(INSN_COST);
15243 
15244   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
15245 
15246   ins_encode %{
15247     __ str($src$$Register, Address(sp, $dst$$disp));
15248   %}
15249 
15250   ins_pipe(istore_reg_reg);
15251 
15252 %}
15253 
15254 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
15255 
15256   match(Set dst (MoveF2I src));
15257 
15258   effect(DEF dst, USE src);
15259 
15260   ins_cost(INSN_COST);
15261 
15262   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
15263 
15264   ins_encode %{
15265     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
15266   %}
15267 
15268   ins_pipe(fp_f2i);
15269 
15270 %}
15271 
15272 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
15273 
15274   match(Set dst (MoveI2F src));
15275 
15276   effect(DEF dst, USE src);
15277 
15278   ins_cost(INSN_COST);
15279 
15280   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
15281 
15282   ins_encode %{
15283     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
15284   %}
15285 
15286   ins_pipe(fp_i2f);
15287 
15288 %}
15289 
15290 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
15291 
15292   match(Set dst (MoveD2L src));
15293 
15294   effect(DEF dst, USE src);
15295 
15296   ins_cost(INSN_COST);
15297 
15298   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
15299 
15300   ins_encode %{
15301     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
15302   %}
15303 
15304   ins_pipe(fp_d2l);
15305 
15306 %}
15307 
15308 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
15309 
15310   match(Set dst (MoveL2D src));
15311 
15312   effect(DEF dst, USE src);
15313 
15314   ins_cost(INSN_COST);
15315 
15316   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
15317 
15318   ins_encode %{
15319     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
15320   %}
15321 
15322   ins_pipe(fp_l2d);
15323 
15324 %}
15325 
15326 // ============================================================================
15327 // clearing of an array
15328 
15329 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
15330 %{
15331   match(Set dummy (ClearArray cnt base));
15332   effect(USE_KILL cnt, USE_KILL base, KILL cr);
15333 
15334   ins_cost(4 * INSN_COST);
15335   format %{ "ClearArray $cnt, $base" %}
15336 
15337   ins_encode %{
15338     address tpc = __ zero_words($base$$Register, $cnt$$Register);
15339     if (tpc == NULL) {
15340       ciEnv::current()->record_failure("CodeCache is full");
15341       return;
15342     }
15343   %}
15344 
15345   ins_pipe(pipe_class_memory);
15346 %}
15347 
15348 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15349 %{
15350   predicate((uint64_t)n->in(2)->get_long()
15351             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
15352   match(Set dummy (ClearArray cnt base));
15353   effect(TEMP temp, USE_KILL base, KILL cr);
15354 
15355   ins_cost(4 * INSN_COST);
15356   format %{ "ClearArray $cnt, $base" %}
15357 
15358   ins_encode %{
15359     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15360     if (tpc == NULL) {
15361       ciEnv::current()->record_failure("CodeCache is full");
15362       return;
15363     }
15364   %}
15365 
15366   ins_pipe(pipe_class_memory);
15367 %}
15368 
15369 // ============================================================================
15370 // Overflow Math Instructions
15371 
15372 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15373 %{
15374   match(Set cr (OverflowAddI op1 op2));
15375 
15376   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15377   ins_cost(INSN_COST);
15378   ins_encode %{
15379     __ cmnw($op1$$Register, $op2$$Register);
15380   %}
15381 
15382   ins_pipe(icmp_reg_reg);
15383 %}
15384 
15385 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15386 %{
15387   match(Set cr (OverflowAddI op1 op2));
15388 
15389   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15390   ins_cost(INSN_COST);
15391   ins_encode %{
15392     __ cmnw($op1$$Register, $op2$$constant);
15393   %}
15394 
15395   ins_pipe(icmp_reg_imm);
15396 %}
15397 
15398 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15399 %{
15400   match(Set cr (OverflowAddL op1 op2));
15401 
15402   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15403   ins_cost(INSN_COST);
15404   ins_encode %{
15405     __ cmn($op1$$Register, $op2$$Register);
15406   %}
15407 
15408   ins_pipe(icmp_reg_reg);
15409 %}
15410 
15411 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15412 %{
15413   match(Set cr (OverflowAddL op1 op2));
15414 
15415   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
15416   ins_cost(INSN_COST);
15417   ins_encode %{
15418     __ adds(zr, $op1$$Register, $op2$$constant);
15419   %}
15420 
15421   ins_pipe(icmp_reg_imm);
15422 %}
15423 
15424 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15425 %{
15426   match(Set cr (OverflowSubI op1 op2));
15427 
15428   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15429   ins_cost(INSN_COST);
15430   ins_encode %{
15431     __ cmpw($op1$$Register, $op2$$Register);
15432   %}
15433 
15434   ins_pipe(icmp_reg_reg);
15435 %}
15436 
15437 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15438 %{
15439   match(Set cr (OverflowSubI op1 op2));
15440 
15441   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15442   ins_cost(INSN_COST);
15443   ins_encode %{
15444     __ cmpw($op1$$Register, $op2$$constant);
15445   %}
15446 
15447   ins_pipe(icmp_reg_imm);
15448 %}
15449 
15450 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15451 %{
15452   match(Set cr (OverflowSubL op1 op2));
15453 
15454   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15455   ins_cost(INSN_COST);
15456   ins_encode %{
15457     __ cmp($op1$$Register, $op2$$Register);
15458   %}
15459 
15460   ins_pipe(icmp_reg_reg);
15461 %}
15462 
15463 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15464 %{
15465   match(Set cr (OverflowSubL op1 op2));
15466 
15467   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15468   ins_cost(INSN_COST);
15469   ins_encode %{
15470     __ subs(zr, $op1$$Register, $op2$$constant);
15471   %}
15472 
15473   ins_pipe(icmp_reg_imm);
15474 %}
15475 
15476 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15477 %{
15478   match(Set cr (OverflowSubI zero op1));
15479 
15480   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15481   ins_cost(INSN_COST);
15482   ins_encode %{
15483     __ cmpw(zr, $op1$$Register);
15484   %}
15485 
15486   ins_pipe(icmp_reg_imm);
15487 %}
15488 
15489 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15490 %{
15491   match(Set cr (OverflowSubL zero op1));
15492 
15493   format %{ "cmp   zr, $op1\t# overflow check long" %}
15494   ins_cost(INSN_COST);
15495   ins_encode %{
15496     __ cmp(zr, $op1$$Register);
15497   %}
15498 
15499   ins_pipe(icmp_reg_imm);
15500 %}
15501 
15502 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15503 %{
15504   match(Set cr (OverflowMulI op1 op2));
15505 
15506   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15507             "cmp   rscratch1, rscratch1, sxtw\n\t"
15508             "movw  rscratch1, #0x80000000\n\t"
15509             "cselw rscratch1, rscratch1, zr, NE\n\t"
15510             "cmpw  rscratch1, #1" %}
15511   ins_cost(5 * INSN_COST);
15512   ins_encode %{
15513     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15514     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15515     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15516     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15517     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15518   %}
15519 
15520   ins_pipe(pipe_slow);
15521 %}
15522 
15523 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15524 %{
15525   match(If cmp (OverflowMulI op1 op2));
15526   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15527             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15528   effect(USE labl, KILL cr);
15529 
15530   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15531             "cmp   rscratch1, rscratch1, sxtw\n\t"
15532             "b$cmp   $labl" %}
15533   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15534   ins_encode %{
15535     Label* L = $labl$$label;
15536     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15537     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15538     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15539     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15540   %}
15541 
15542   ins_pipe(pipe_serial);
15543 %}
15544 
15545 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15546 %{
15547   match(Set cr (OverflowMulL op1 op2));
15548 
15549   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15550             "smulh rscratch2, $op1, $op2\n\t"
15551             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15552             "movw  rscratch1, #0x80000000\n\t"
15553             "cselw rscratch1, rscratch1, zr, NE\n\t"
15554             "cmpw  rscratch1, #1" %}
15555   ins_cost(6 * INSN_COST);
15556   ins_encode %{
15557     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15558     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15559     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15560     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15561     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15562     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15563   %}
15564 
15565   ins_pipe(pipe_slow);
15566 %}
15567 
15568 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15569 %{
15570   match(If cmp (OverflowMulL op1 op2));
15571   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15572             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15573   effect(USE labl, KILL cr);
15574 
15575   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15576             "smulh rscratch2, $op1, $op2\n\t"
15577             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15578             "b$cmp $labl" %}
15579   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15580   ins_encode %{
15581     Label* L = $labl$$label;
15582     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15583     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15584     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15585     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15586     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15587   %}
15588 
15589   ins_pipe(pipe_serial);
15590 %}
15591 
15592 // ============================================================================
15593 // Compare Instructions
15594 
15595 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15596 %{
15597   match(Set cr (CmpI op1 op2));
15598 
15599   effect(DEF cr, USE op1, USE op2);
15600 
15601   ins_cost(INSN_COST);
15602   format %{ "cmpw  $op1, $op2" %}
15603 
15604   ins_encode(aarch64_enc_cmpw(op1, op2));
15605 
15606   ins_pipe(icmp_reg_reg);
15607 %}
15608 
15609 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15610 %{
15611   match(Set cr (CmpI op1 zero));
15612 
15613   effect(DEF cr, USE op1);
15614 
15615   ins_cost(INSN_COST);
15616   format %{ "cmpw $op1, 0" %}
15617 
15618   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15619 
15620   ins_pipe(icmp_reg_imm);
15621 %}
15622 
15623 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15624 %{
15625   match(Set cr (CmpI op1 op2));
15626 
15627   effect(DEF cr, USE op1);
15628 
15629   ins_cost(INSN_COST);
15630   format %{ "cmpw  $op1, $op2" %}
15631 
15632   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15633 
15634   ins_pipe(icmp_reg_imm);
15635 %}
15636 
15637 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15638 %{
15639   match(Set cr (CmpI op1 op2));
15640 
15641   effect(DEF cr, USE op1);
15642 
15643   ins_cost(INSN_COST * 2);
15644   format %{ "cmpw  $op1, $op2" %}
15645 
15646   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15647 
15648   ins_pipe(icmp_reg_imm);
15649 %}
15650 
15651 // Unsigned compare Instructions; really, same as signed compare
15652 // except it should only be used to feed an If or a CMovI which takes a
15653 // cmpOpU.
15654 
15655 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15656 %{
15657   match(Set cr (CmpU op1 op2));
15658 
15659   effect(DEF cr, USE op1, USE op2);
15660 
15661   ins_cost(INSN_COST);
15662   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15663 
15664   ins_encode(aarch64_enc_cmpw(op1, op2));
15665 
15666   ins_pipe(icmp_reg_reg);
15667 %}
15668 
15669 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15670 %{
15671   match(Set cr (CmpU op1 zero));
15672 
15673   effect(DEF cr, USE op1);
15674 
15675   ins_cost(INSN_COST);
15676   format %{ "cmpw $op1, #0\t# unsigned" %}
15677 
15678   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15679 
15680   ins_pipe(icmp_reg_imm);
15681 %}
15682 
15683 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15684 %{
15685   match(Set cr (CmpU op1 op2));
15686 
15687   effect(DEF cr, USE op1);
15688 
15689   ins_cost(INSN_COST);
15690   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15691 
15692   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15693 
15694   ins_pipe(icmp_reg_imm);
15695 %}
15696 
15697 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15698 %{
15699   match(Set cr (CmpU op1 op2));
15700 
15701   effect(DEF cr, USE op1);
15702 
15703   ins_cost(INSN_COST * 2);
15704   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15705 
15706   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15707 
15708   ins_pipe(icmp_reg_imm);
15709 %}
15710 
15711 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15712 %{
15713   match(Set cr (CmpL op1 op2));
15714 
15715   effect(DEF cr, USE op1, USE op2);
15716 
15717   ins_cost(INSN_COST);
15718   format %{ "cmp  $op1, $op2" %}
15719 
15720   ins_encode(aarch64_enc_cmp(op1, op2));
15721 
15722   ins_pipe(icmp_reg_reg);
15723 %}
15724 
15725 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15726 %{
15727   match(Set cr (CmpL op1 zero));
15728 
15729   effect(DEF cr, USE op1);
15730 
15731   ins_cost(INSN_COST);
15732   format %{ "tst  $op1" %}
15733 
15734   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15735 
15736   ins_pipe(icmp_reg_imm);
15737 %}
15738 
15739 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15740 %{
15741   match(Set cr (CmpL op1 op2));
15742 
15743   effect(DEF cr, USE op1);
15744 
15745   ins_cost(INSN_COST);
15746   format %{ "cmp  $op1, $op2" %}
15747 
15748   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15749 
15750   ins_pipe(icmp_reg_imm);
15751 %}
15752 
15753 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15754 %{
15755   match(Set cr (CmpL op1 op2));
15756 
15757   effect(DEF cr, USE op1);
15758 
15759   ins_cost(INSN_COST * 2);
15760   format %{ "cmp  $op1, $op2" %}
15761 
15762   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15763 
15764   ins_pipe(icmp_reg_imm);
15765 %}
15766 
15767 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15768 %{
15769   match(Set cr (CmpUL op1 op2));
15770 
15771   effect(DEF cr, USE op1, USE op2);
15772 
15773   ins_cost(INSN_COST);
15774   format %{ "cmp  $op1, $op2" %}
15775 
15776   ins_encode(aarch64_enc_cmp(op1, op2));
15777 
15778   ins_pipe(icmp_reg_reg);
15779 %}
15780 
15781 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15782 %{
15783   match(Set cr (CmpUL op1 zero));
15784 
15785   effect(DEF cr, USE op1);
15786 
15787   ins_cost(INSN_COST);
15788   format %{ "tst  $op1" %}
15789 
15790   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15791 
15792   ins_pipe(icmp_reg_imm);
15793 %}
15794 
15795 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15796 %{
15797   match(Set cr (CmpUL op1 op2));
15798 
15799   effect(DEF cr, USE op1);
15800 
15801   ins_cost(INSN_COST);
15802   format %{ "cmp  $op1, $op2" %}
15803 
15804   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15805 
15806   ins_pipe(icmp_reg_imm);
15807 %}
15808 
15809 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15810 %{
15811   match(Set cr (CmpUL op1 op2));
15812 
15813   effect(DEF cr, USE op1);
15814 
15815   ins_cost(INSN_COST * 2);
15816   format %{ "cmp  $op1, $op2" %}
15817 
15818   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15819 
15820   ins_pipe(icmp_reg_imm);
15821 %}
15822 
15823 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15824 %{
15825   match(Set cr (CmpP op1 op2));
15826 
15827   effect(DEF cr, USE op1, USE op2);
15828 
15829   ins_cost(INSN_COST);
15830   format %{ "cmp  $op1, $op2\t // ptr" %}
15831 
15832   ins_encode(aarch64_enc_cmpp(op1, op2));
15833 
15834   ins_pipe(icmp_reg_reg);
15835 %}
15836 
15837 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15838 %{
15839   match(Set cr (CmpN op1 op2));
15840 
15841   effect(DEF cr, USE op1, USE op2);
15842 
15843   ins_cost(INSN_COST);
15844   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15845 
15846   ins_encode(aarch64_enc_cmpn(op1, op2));
15847 
15848   ins_pipe(icmp_reg_reg);
15849 %}
15850 
15851 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15852 %{
15853   match(Set cr (CmpP op1 zero));
15854 
15855   effect(DEF cr, USE op1, USE zero);
15856 
15857   ins_cost(INSN_COST);
15858   format %{ "cmp  $op1, 0\t // ptr" %}
15859 
15860   ins_encode(aarch64_enc_testp(op1));
15861 
15862   ins_pipe(icmp_reg_imm);
15863 %}
15864 
15865 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15866 %{
15867   match(Set cr (CmpN op1 zero));
15868 
15869   effect(DEF cr, USE op1, USE zero);
15870 
15871   ins_cost(INSN_COST);
15872   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15873 
15874   ins_encode(aarch64_enc_testn(op1));
15875 
15876   ins_pipe(icmp_reg_imm);
15877 %}
15878 
15879 // FP comparisons
15880 //
15881 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15882 // using normal cmpOp. See declaration of rFlagsReg for details.
15883 
15884 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15885 %{
15886   match(Set cr (CmpF src1 src2));
15887 
15888   ins_cost(3 * INSN_COST);
15889   format %{ "fcmps $src1, $src2" %}
15890 
15891   ins_encode %{
15892     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15893   %}
15894 
15895   ins_pipe(pipe_class_compare);
15896 %}
15897 
15898 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15899 %{
15900   match(Set cr (CmpF src1 src2));
15901 
15902   ins_cost(3 * INSN_COST);
15903   format %{ "fcmps $src1, 0.0" %}
15904 
15905   ins_encode %{
15906     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15907   %}
15908 
15909   ins_pipe(pipe_class_compare);
15910 %}
15911 // FROM HERE
15912 
15913 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15914 %{
15915   match(Set cr (CmpD src1 src2));
15916 
15917   ins_cost(3 * INSN_COST);
15918   format %{ "fcmpd $src1, $src2" %}
15919 
15920   ins_encode %{
15921     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15922   %}
15923 
15924   ins_pipe(pipe_class_compare);
15925 %}
15926 
15927 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15928 %{
15929   match(Set cr (CmpD src1 src2));
15930 
15931   ins_cost(3 * INSN_COST);
15932   format %{ "fcmpd $src1, 0.0" %}
15933 
15934   ins_encode %{
15935     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15936   %}
15937 
15938   ins_pipe(pipe_class_compare);
15939 %}
15940 
15941 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15942 %{
15943   match(Set dst (CmpF3 src1 src2));
15944   effect(KILL cr);
15945 
15946   ins_cost(5 * INSN_COST);
15947   format %{ "fcmps $src1, $src2\n\t"
15948             "csinvw($dst, zr, zr, eq\n\t"
15949             "csnegw($dst, $dst, $dst, lt)"
15950   %}
15951 
15952   ins_encode %{
15953     Label done;
15954     FloatRegister s1 = as_FloatRegister($src1$$reg);
15955     FloatRegister s2 = as_FloatRegister($src2$$reg);
15956     Register d = as_Register($dst$$reg);
15957     __ fcmps(s1, s2);
15958     // installs 0 if EQ else -1
15959     __ csinvw(d, zr, zr, Assembler::EQ);
15960     // keeps -1 if less or unordered else installs 1
15961     __ csnegw(d, d, d, Assembler::LT);
15962     __ bind(done);
15963   %}
15964 
15965   ins_pipe(pipe_class_default);
15966 
15967 %}
15968 
15969 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15970 %{
15971   match(Set dst (CmpD3 src1 src2));
15972   effect(KILL cr);
15973 
15974   ins_cost(5 * INSN_COST);
15975   format %{ "fcmpd $src1, $src2\n\t"
15976             "csinvw($dst, zr, zr, eq\n\t"
15977             "csnegw($dst, $dst, $dst, lt)"
15978   %}
15979 
15980   ins_encode %{
15981     Label done;
15982     FloatRegister s1 = as_FloatRegister($src1$$reg);
15983     FloatRegister s2 = as_FloatRegister($src2$$reg);
15984     Register d = as_Register($dst$$reg);
15985     __ fcmpd(s1, s2);
15986     // installs 0 if EQ else -1
15987     __ csinvw(d, zr, zr, Assembler::EQ);
15988     // keeps -1 if less or unordered else installs 1
15989     __ csnegw(d, d, d, Assembler::LT);
15990     __ bind(done);
15991   %}
15992   ins_pipe(pipe_class_default);
15993 
15994 %}
15995 
15996 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15997 %{
15998   match(Set dst (CmpF3 src1 zero));
15999   effect(KILL cr);
16000 
16001   ins_cost(5 * INSN_COST);
16002   format %{ "fcmps $src1, 0.0\n\t"
16003             "csinvw($dst, zr, zr, eq\n\t"
16004             "csnegw($dst, $dst, $dst, lt)"
16005   %}
16006 
16007   ins_encode %{
16008     Label done;
16009     FloatRegister s1 = as_FloatRegister($src1$$reg);
16010     Register d = as_Register($dst$$reg);
16011     __ fcmps(s1, 0.0);
16012     // installs 0 if EQ else -1
16013     __ csinvw(d, zr, zr, Assembler::EQ);
16014     // keeps -1 if less or unordered else installs 1
16015     __ csnegw(d, d, d, Assembler::LT);
16016     __ bind(done);
16017   %}
16018 
16019   ins_pipe(pipe_class_default);
16020 
16021 %}
16022 
16023 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
16024 %{
16025   match(Set dst (CmpD3 src1 zero));
16026   effect(KILL cr);
16027 
16028   ins_cost(5 * INSN_COST);
16029   format %{ "fcmpd $src1, 0.0\n\t"
16030             "csinvw($dst, zr, zr, eq\n\t"
16031             "csnegw($dst, $dst, $dst, lt)"
16032   %}
16033 
16034   ins_encode %{
16035     Label done;
16036     FloatRegister s1 = as_FloatRegister($src1$$reg);
16037     Register d = as_Register($dst$$reg);
16038     __ fcmpd(s1, 0.0);
16039     // installs 0 if EQ else -1
16040     __ csinvw(d, zr, zr, Assembler::EQ);
16041     // keeps -1 if less or unordered else installs 1
16042     __ csnegw(d, d, d, Assembler::LT);
16043     __ bind(done);
16044   %}
16045   ins_pipe(pipe_class_default);
16046 
16047 %}
16048 
16049 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
16050 %{
16051   match(Set dst (CmpLTMask p q));
16052   effect(KILL cr);
16053 
16054   ins_cost(3 * INSN_COST);
16055 
16056   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
16057             "csetw $dst, lt\n\t"
16058             "subw $dst, zr, $dst"
16059   %}
16060 
16061   ins_encode %{
16062     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
16063     __ csetw(as_Register($dst$$reg), Assembler::LT);
16064     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
16065   %}
16066 
16067   ins_pipe(ialu_reg_reg);
16068 %}
16069 
16070 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
16071 %{
16072   match(Set dst (CmpLTMask src zero));
16073   effect(KILL cr);
16074 
16075   ins_cost(INSN_COST);
16076 
16077   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
16078 
16079   ins_encode %{
16080     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
16081   %}
16082 
16083   ins_pipe(ialu_reg_shift);
16084 %}
16085 
16086 // ============================================================================
16087 // Max and Min
16088 
16089 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
16090 
16091 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
16092 %{
16093   effect(DEF cr, USE src);
16094   ins_cost(INSN_COST);
16095   format %{ "cmpw $src, 0" %}
16096 
16097   ins_encode %{
16098     __ cmpw($src$$Register, 0);
16099   %}
16100   ins_pipe(icmp_reg_imm);
16101 %}
16102 
16103 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
16104 %{
16105   match(Set dst (MinI src1 src2));
16106   ins_cost(INSN_COST * 3);
16107 
16108   expand %{
16109     rFlagsReg cr;
16110     compI_reg_reg(cr, src1, src2);
16111     cmovI_reg_reg_lt(dst, src1, src2, cr);
16112   %}
16113 %}
16114 
16115 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
16116 %{
16117   match(Set dst (MaxI src1 src2));
16118   ins_cost(INSN_COST * 3);
16119 
16120   expand %{
16121     rFlagsReg cr;
16122     compI_reg_reg(cr, src1, src2);
16123     cmovI_reg_reg_gt(dst, src1, src2, cr);
16124   %}
16125 %}
16126 
16127 
16128 // ============================================================================
16129 // Branch Instructions
16130 
16131 // Direct Branch.
16132 instruct branch(label lbl)
16133 %{
16134   match(Goto);
16135 
16136   effect(USE lbl);
16137 
16138   ins_cost(BRANCH_COST);
16139   format %{ "b  $lbl" %}
16140 
16141   ins_encode(aarch64_enc_b(lbl));
16142 
16143   ins_pipe(pipe_branch);
16144 %}
16145 
16146 // Conditional Near Branch
16147 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
16148 %{
16149   // Same match rule as `branchConFar'.
16150   match(If cmp cr);
16151 
16152   effect(USE lbl);
16153 
16154   ins_cost(BRANCH_COST);
16155   // If set to 1 this indicates that the current instruction is a
16156   // short variant of a long branch. This avoids using this
16157   // instruction in first-pass matching. It will then only be used in
16158   // the `Shorten_branches' pass.
16159   // ins_short_branch(1);
16160   format %{ "b$cmp  $lbl" %}
16161 
16162   ins_encode(aarch64_enc_br_con(cmp, lbl));
16163 
16164   ins_pipe(pipe_branch_cond);
16165 %}
16166 
16167 // Conditional Near Branch Unsigned
16168 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
16169 %{
16170   // Same match rule as `branchConFar'.
16171   match(If cmp cr);
16172 
16173   effect(USE lbl);
16174 
16175   ins_cost(BRANCH_COST);
16176   // If set to 1 this indicates that the current instruction is a
16177   // short variant of a long branch. This avoids using this
16178   // instruction in first-pass matching. It will then only be used in
16179   // the `Shorten_branches' pass.
16180   // ins_short_branch(1);
16181   format %{ "b$cmp  $lbl\t# unsigned" %}
16182 
16183   ins_encode(aarch64_enc_br_conU(cmp, lbl));
16184 
16185   ins_pipe(pipe_branch_cond);
16186 %}
16187 
16188 // Make use of CBZ and CBNZ.  These instructions, as well as being
16189 // shorter than (cmp; branch), have the additional benefit of not
16190 // killing the flags.
16191 
16192 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
16193   match(If cmp (CmpI op1 op2));
16194   effect(USE labl);
16195 
16196   ins_cost(BRANCH_COST);
16197   format %{ "cbw$cmp   $op1, $labl" %}
16198   ins_encode %{
16199     Label* L = $labl$$label;
16200     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16201     if (cond == Assembler::EQ)
16202       __ cbzw($op1$$Register, *L);
16203     else
16204       __ cbnzw($op1$$Register, *L);
16205   %}
16206   ins_pipe(pipe_cmp_branch);
16207 %}
16208 
16209 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
16210   match(If cmp (CmpL op1 op2));
16211   effect(USE labl);
16212 
16213   ins_cost(BRANCH_COST);
16214   format %{ "cb$cmp   $op1, $labl" %}
16215   ins_encode %{
16216     Label* L = $labl$$label;
16217     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16218     if (cond == Assembler::EQ)
16219       __ cbz($op1$$Register, *L);
16220     else
16221       __ cbnz($op1$$Register, *L);
16222   %}
16223   ins_pipe(pipe_cmp_branch);
16224 %}
16225 
16226 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
16227   match(If cmp (CmpP op1 op2));
16228   effect(USE labl);
16229 
16230   ins_cost(BRANCH_COST);
16231   format %{ "cb$cmp   $op1, $labl" %}
16232   ins_encode %{
16233     Label* L = $labl$$label;
16234     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16235     if (cond == Assembler::EQ)
16236       __ cbz($op1$$Register, *L);
16237     else
16238       __ cbnz($op1$$Register, *L);
16239   %}
16240   ins_pipe(pipe_cmp_branch);
16241 %}
16242 
16243 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
16244   match(If cmp (CmpN op1 op2));
16245   effect(USE labl);
16246 
16247   ins_cost(BRANCH_COST);
16248   format %{ "cbw$cmp   $op1, $labl" %}
16249   ins_encode %{
16250     Label* L = $labl$$label;
16251     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16252     if (cond == Assembler::EQ)
16253       __ cbzw($op1$$Register, *L);
16254     else
16255       __ cbnzw($op1$$Register, *L);
16256   %}
16257   ins_pipe(pipe_cmp_branch);
16258 %}
16259 
16260 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
16261   match(If cmp (CmpP (DecodeN oop) zero));
16262   effect(USE labl);
16263 
16264   ins_cost(BRANCH_COST);
16265   format %{ "cb$cmp   $oop, $labl" %}
16266   ins_encode %{
16267     Label* L = $labl$$label;
16268     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16269     if (cond == Assembler::EQ)
16270       __ cbzw($oop$$Register, *L);
16271     else
16272       __ cbnzw($oop$$Register, *L);
16273   %}
16274   ins_pipe(pipe_cmp_branch);
16275 %}
16276 
16277 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
16278   match(If cmp (CmpU op1 op2));
16279   effect(USE labl);
16280 
16281   ins_cost(BRANCH_COST);
16282   format %{ "cbw$cmp   $op1, $labl" %}
16283   ins_encode %{
16284     Label* L = $labl$$label;
16285     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16286     if (cond == Assembler::EQ || cond == Assembler::LS)
16287       __ cbzw($op1$$Register, *L);
16288     else
16289       __ cbnzw($op1$$Register, *L);
16290   %}
16291   ins_pipe(pipe_cmp_branch);
16292 %}
16293 
16294 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
16295   match(If cmp (CmpUL op1 op2));
16296   effect(USE labl);
16297 
16298   ins_cost(BRANCH_COST);
16299   format %{ "cb$cmp   $op1, $labl" %}
16300   ins_encode %{
16301     Label* L = $labl$$label;
16302     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16303     if (cond == Assembler::EQ || cond == Assembler::LS)
16304       __ cbz($op1$$Register, *L);
16305     else
16306       __ cbnz($op1$$Register, *L);
16307   %}
16308   ins_pipe(pipe_cmp_branch);
16309 %}
16310 
16311 // Test bit and Branch
16312 
16313 // Patterns for short (< 32KiB) variants
16314 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16315   match(If cmp (CmpL op1 op2));
16316   effect(USE labl);
16317 
16318   ins_cost(BRANCH_COST);
16319   format %{ "cb$cmp   $op1, $labl # long" %}
16320   ins_encode %{
16321     Label* L = $labl$$label;
16322     Assembler::Condition cond =
16323       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16324     __ tbr(cond, $op1$$Register, 63, *L);
16325   %}
16326   ins_pipe(pipe_cmp_branch);
16327   ins_short_branch(1);
16328 %}
16329 
16330 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16331   match(If cmp (CmpI op1 op2));
16332   effect(USE labl);
16333 
16334   ins_cost(BRANCH_COST);
16335   format %{ "cb$cmp   $op1, $labl # int" %}
16336   ins_encode %{
16337     Label* L = $labl$$label;
16338     Assembler::Condition cond =
16339       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16340     __ tbr(cond, $op1$$Register, 31, *L);
16341   %}
16342   ins_pipe(pipe_cmp_branch);
16343   ins_short_branch(1);
16344 %}
16345 
16346 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16347   match(If cmp (CmpL (AndL op1 op2) op3));
16348   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16349   effect(USE labl);
16350 
16351   ins_cost(BRANCH_COST);
16352   format %{ "tb$cmp   $op1, $op2, $labl" %}
16353   ins_encode %{
16354     Label* L = $labl$$label;
16355     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16356     int bit = exact_log2_long($op2$$constant);
16357     __ tbr(cond, $op1$$Register, bit, *L);
16358   %}
16359   ins_pipe(pipe_cmp_branch);
16360   ins_short_branch(1);
16361 %}
16362 
16363 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16364   match(If cmp (CmpI (AndI op1 op2) op3));
16365   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16366   effect(USE labl);
16367 
16368   ins_cost(BRANCH_COST);
16369   format %{ "tb$cmp   $op1, $op2, $labl" %}
16370   ins_encode %{
16371     Label* L = $labl$$label;
16372     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16373     int bit = exact_log2((juint)$op2$$constant);
16374     __ tbr(cond, $op1$$Register, bit, *L);
16375   %}
16376   ins_pipe(pipe_cmp_branch);
16377   ins_short_branch(1);
16378 %}
16379 
16380 // And far variants
16381 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16382   match(If cmp (CmpL op1 op2));
16383   effect(USE labl);
16384 
16385   ins_cost(BRANCH_COST);
16386   format %{ "cb$cmp   $op1, $labl # long" %}
16387   ins_encode %{
16388     Label* L = $labl$$label;
16389     Assembler::Condition cond =
16390       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16391     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
16392   %}
16393   ins_pipe(pipe_cmp_branch);
16394 %}
16395 
16396 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16397   match(If cmp (CmpI op1 op2));
16398   effect(USE labl);
16399 
16400   ins_cost(BRANCH_COST);
16401   format %{ "cb$cmp   $op1, $labl # int" %}
16402   ins_encode %{
16403     Label* L = $labl$$label;
16404     Assembler::Condition cond =
16405       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16406     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16407   %}
16408   ins_pipe(pipe_cmp_branch);
16409 %}
16410 
16411 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16412   match(If cmp (CmpL (AndL op1 op2) op3));
16413   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16414   effect(USE labl);
16415 
16416   ins_cost(BRANCH_COST);
16417   format %{ "tb$cmp   $op1, $op2, $labl" %}
16418   ins_encode %{
16419     Label* L = $labl$$label;
16420     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16421     int bit = exact_log2_long($op2$$constant);
16422     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16423   %}
16424   ins_pipe(pipe_cmp_branch);
16425 %}
16426 
16427 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16428   match(If cmp (CmpI (AndI op1 op2) op3));
16429   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16430   effect(USE labl);
16431 
16432   ins_cost(BRANCH_COST);
16433   format %{ "tb$cmp   $op1, $op2, $labl" %}
16434   ins_encode %{
16435     Label* L = $labl$$label;
16436     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16437     int bit = exact_log2((juint)$op2$$constant);
16438     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16439   %}
16440   ins_pipe(pipe_cmp_branch);
16441 %}
16442 
16443 // Test bits
16444 
16445 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16446   match(Set cr (CmpL (AndL op1 op2) op3));
16447   predicate(Assembler::operand_valid_for_logical_immediate
16448             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16449 
16450   ins_cost(INSN_COST);
16451   format %{ "tst $op1, $op2 # long" %}
16452   ins_encode %{
16453     __ tst($op1$$Register, $op2$$constant);
16454   %}
16455   ins_pipe(ialu_reg_reg);
16456 %}
16457 
16458 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16459   match(Set cr (CmpI (AndI op1 op2) op3));
16460   predicate(Assembler::operand_valid_for_logical_immediate
16461             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16462 
16463   ins_cost(INSN_COST);
16464   format %{ "tst $op1, $op2 # int" %}
16465   ins_encode %{
16466     __ tstw($op1$$Register, $op2$$constant);
16467   %}
16468   ins_pipe(ialu_reg_reg);
16469 %}
16470 
16471 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16472   match(Set cr (CmpL (AndL op1 op2) op3));
16473 
16474   ins_cost(INSN_COST);
16475   format %{ "tst $op1, $op2 # long" %}
16476   ins_encode %{
16477     __ tst($op1$$Register, $op2$$Register);
16478   %}
16479   ins_pipe(ialu_reg_reg);
16480 %}
16481 
16482 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16483   match(Set cr (CmpI (AndI op1 op2) op3));
16484 
16485   ins_cost(INSN_COST);
16486   format %{ "tstw $op1, $op2 # int" %}
16487   ins_encode %{
16488     __ tstw($op1$$Register, $op2$$Register);
16489   %}
16490   ins_pipe(ialu_reg_reg);
16491 %}
16492 
16493 
16494 // Conditional Far Branch
16495 // Conditional Far Branch Unsigned
16496 // TODO: fixme
16497 
16498 // counted loop end branch near
16499 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16500 %{
16501   match(CountedLoopEnd cmp cr);
16502 
16503   effect(USE lbl);
16504 
16505   ins_cost(BRANCH_COST);
16506   // short variant.
16507   // ins_short_branch(1);
16508   format %{ "b$cmp $lbl \t// counted loop end" %}
16509 
16510   ins_encode(aarch64_enc_br_con(cmp, lbl));
16511 
16512   ins_pipe(pipe_branch);
16513 %}
16514 
16515 // counted loop end branch near Unsigned
16516 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
16517 %{
16518   match(CountedLoopEnd cmp cr);
16519 
16520   effect(USE lbl);
16521 
16522   ins_cost(BRANCH_COST);
16523   // short variant.
16524   // ins_short_branch(1);
16525   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
16526 
16527   ins_encode(aarch64_enc_br_conU(cmp, lbl));
16528 
16529   ins_pipe(pipe_branch);
16530 %}
16531 
16532 // counted loop end branch far
16533 // counted loop end branch far unsigned
16534 // TODO: fixme
16535 
16536 // ============================================================================
16537 // inlined locking and unlocking
16538 
16539 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16540 %{
16541   predicate(LockingMode != LM_LIGHTWEIGHT);
16542   match(Set cr (FastLock object box));
16543   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16544 
16545   ins_cost(5 * INSN_COST);
16546   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16547 
16548   ins_encode %{
16549     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16550   %}
16551 
16552   ins_pipe(pipe_serial);
16553 %}
16554 
16555 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16556 %{
16557   predicate(LockingMode != LM_LIGHTWEIGHT);
16558   match(Set cr (FastUnlock object box));
16559   effect(TEMP tmp, TEMP tmp2);
16560 
16561   ins_cost(5 * INSN_COST);
16562   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16563 
16564   ins_encode %{
16565     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16566   %}
16567 
16568   ins_pipe(pipe_serial);
16569 %}
16570 
16571 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16572 %{
16573   predicate(LockingMode == LM_LIGHTWEIGHT);
16574   match(Set cr (FastLock object box));
16575   effect(TEMP tmp, TEMP tmp2);
16576 
16577   ins_cost(5 * INSN_COST);
16578   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
16579 
16580   ins_encode %{
16581     __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16582   %}
16583 
16584   ins_pipe(pipe_serial);
16585 %}
16586 
16587 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16588 %{
16589   predicate(LockingMode == LM_LIGHTWEIGHT);
16590   match(Set cr (FastUnlock object box));
16591   effect(TEMP tmp, TEMP tmp2);
16592 
16593   ins_cost(5 * INSN_COST);
16594   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16595 
16596   ins_encode %{
16597     __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16598   %}
16599 
16600   ins_pipe(pipe_serial);
16601 %}
16602 
16603 // ============================================================================
16604 // Safepoint Instructions
16605 
16606 // TODO
16607 // provide a near and far version of this code
16608 
16609 instruct safePoint(rFlagsReg cr, iRegP poll)
16610 %{
16611   match(SafePoint poll);
16612   effect(KILL cr);
16613 
16614   format %{
16615     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16616   %}
16617   ins_encode %{
16618     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16619   %}
16620   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16621 %}
16622 
16623 
16624 // ============================================================================
16625 // Procedure Call/Return Instructions
16626 
16627 // Call Java Static Instruction
16628 
16629 instruct CallStaticJavaDirect(method meth)
16630 %{
16631   match(CallStaticJava);
16632 
16633   effect(USE meth);
16634 
16635   ins_cost(CALL_COST);
16636 
16637   format %{ "call,static $meth \t// ==> " %}
16638 
16639   ins_encode(aarch64_enc_java_static_call(meth),
16640              aarch64_enc_call_epilog);
16641 
16642   ins_pipe(pipe_class_call);
16643 %}
16644 
16645 // TO HERE
16646 
16647 // Call Java Dynamic Instruction
16648 instruct CallDynamicJavaDirect(method meth)
16649 %{
16650   match(CallDynamicJava);
16651 
16652   effect(USE meth);
16653 
16654   ins_cost(CALL_COST);
16655 
16656   format %{ "CALL,dynamic $meth \t// ==> " %}
16657 
16658   ins_encode(aarch64_enc_java_dynamic_call(meth),
16659              aarch64_enc_call_epilog);
16660 
16661   ins_pipe(pipe_class_call);
16662 %}
16663 
16664 // Call Runtime Instruction
16665 
16666 instruct CallRuntimeDirect(method meth)
16667 %{
16668   match(CallRuntime);
16669 
16670   effect(USE meth);
16671 
16672   ins_cost(CALL_COST);
16673 
16674   format %{ "CALL, runtime $meth" %}
16675 
16676   ins_encode( aarch64_enc_java_to_runtime(meth) );
16677 
16678   ins_pipe(pipe_class_call);
16679 %}
16680 
16681 // Call Runtime Instruction
16682 
16683 instruct CallLeafDirect(method meth)
16684 %{
16685   match(CallLeaf);
16686 
16687   effect(USE meth);
16688 
16689   ins_cost(CALL_COST);
16690 
16691   format %{ "CALL, runtime leaf $meth" %}
16692 
16693   ins_encode( aarch64_enc_java_to_runtime(meth) );
16694 
16695   ins_pipe(pipe_class_call);
16696 %}
16697 
16698 // Call Runtime Instruction
16699 
16700 instruct CallLeafNoFPDirect(method meth)
16701 %{
16702   match(CallLeafNoFP);
16703 
16704   effect(USE meth);
16705 
16706   ins_cost(CALL_COST);
16707 
16708   format %{ "CALL, runtime leaf nofp $meth" %}
16709 
16710   ins_encode( aarch64_enc_java_to_runtime(meth) );
16711 
16712   ins_pipe(pipe_class_call);
16713 %}
16714 
16715 instruct CallNativeDirect(method meth)
16716 %{
16717   match(CallNative);
16718 
16719   effect(USE meth);
16720 
16721   ins_cost(CALL_COST);
16722 
16723   format %{ "CALL, native $meth" %}
16724 
16725   ins_encode( aarch64_enc_java_to_runtime(meth) );
16726 
16727   ins_pipe(pipe_class_call);
16728 %}
16729 
16730 // Tail Call; Jump from runtime stub to Java code.
16731 // Also known as an 'interprocedural jump'.
16732 // Target of jump will eventually return to caller.
16733 // TailJump below removes the return address.
16734 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_ptr)
16735 %{
16736   match(TailCall jump_target method_ptr);
16737 
16738   ins_cost(CALL_COST);
16739 
16740   format %{ "br $jump_target\t# $method_ptr holds method" %}
16741 
16742   ins_encode(aarch64_enc_tail_call(jump_target));
16743 
16744   ins_pipe(pipe_class_call);
16745 %}
16746 
16747 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
16748 %{
16749   match(TailJump jump_target ex_oop);
16750 
16751   ins_cost(CALL_COST);
16752 
16753   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16754 
16755   ins_encode(aarch64_enc_tail_jmp(jump_target));
16756 
16757   ins_pipe(pipe_class_call);
16758 %}
16759 
16760 // Create exception oop: created by stack-crawling runtime code.
16761 // Created exception is now available to this handler, and is setup
16762 // just prior to jumping to this handler. No code emitted.
16763 // TODO check
16764 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16765 instruct CreateException(iRegP_R0 ex_oop)
16766 %{
16767   match(Set ex_oop (CreateEx));
16768 
16769   format %{ " -- \t// exception oop; no code emitted" %}
16770 
16771   size(0);
16772 
16773   ins_encode( /*empty*/ );
16774 
16775   ins_pipe(pipe_class_empty);
16776 %}
16777 
16778 // Rethrow exception: The exception oop will come in the first
16779 // argument position. Then JUMP (not call) to the rethrow stub code.
16780 instruct RethrowException() %{
16781   match(Rethrow);
16782   ins_cost(CALL_COST);
16783 
16784   format %{ "b rethrow_stub" %}
16785 
16786   ins_encode( aarch64_enc_rethrow() );
16787 
16788   ins_pipe(pipe_class_call);
16789 %}
16790 
16791 
16792 // Return Instruction
16793 // epilog node loads ret address into lr as part of frame pop
16794 instruct Ret()
16795 %{
16796   match(Return);
16797 
16798   format %{ "ret\t// return register" %}
16799 
16800   ins_encode( aarch64_enc_ret() );
16801 
16802   ins_pipe(pipe_branch);
16803 %}
16804 
16805 // Die now.
16806 instruct ShouldNotReachHere() %{
16807   match(Halt);
16808 
16809   ins_cost(CALL_COST);
16810   format %{ "ShouldNotReachHere" %}
16811 
16812   ins_encode %{
16813     if (is_reachable()) {
16814       __ stop(_halt_reason);
16815     }
16816   %}
16817 
16818   ins_pipe(pipe_class_default);
16819 %}
16820 
16821 // ============================================================================
16822 // Partial Subtype Check
16823 //
16824 // superklass array for an instance of the superklass.  Set a hidden
16825 // internal cache on a hit (cache is checked with exposed code in
16826 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16827 // encoding ALSO sets flags.
16828 
16829 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16830 %{
16831   match(Set result (PartialSubtypeCheck sub super));
16832   effect(KILL cr, KILL temp);
16833 
16834   ins_cost(1100);  // slightly larger than the next version
16835   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16836 
16837   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16838 
16839   opcode(0x1); // Force zero of result reg on hit
16840 
16841   ins_pipe(pipe_class_memory);
16842 %}
16843 
16844 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
16845 %{
16846   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
16847   effect(KILL temp, KILL result);
16848 
16849   ins_cost(1100);  // slightly larger than the next version
16850   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16851 
16852   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16853 
16854   opcode(0x0); // Don't zero result reg on hit
16855 
16856   ins_pipe(pipe_class_memory);
16857 %}
16858 
16859 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16860                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16861 %{
16862   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
16863   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16864   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16865 
16866   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16867   ins_encode %{
16868     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16869     __ string_compare($str1$$Register, $str2$$Register,
16870                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16871                       $tmp1$$Register, $tmp2$$Register,
16872                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
16873   %}
16874   ins_pipe(pipe_class_memory);
16875 %}
16876 
16877 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16878                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16879 %{
16880   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
16881   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16882   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16883 
16884   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16885   ins_encode %{
16886     __ string_compare($str1$$Register, $str2$$Register,
16887                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16888                       $tmp1$$Register, $tmp2$$Register,
16889                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
16890   %}
16891   ins_pipe(pipe_class_memory);
16892 %}
16893 
16894 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16895                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16896                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16897 %{
16898   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
16899   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16900   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16901          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16902 
16903   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16904   ins_encode %{
16905     __ string_compare($str1$$Register, $str2$$Register,
16906                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16907                       $tmp1$$Register, $tmp2$$Register,
16908                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16909                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
16910   %}
16911   ins_pipe(pipe_class_memory);
16912 %}
16913 
16914 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16915                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16916                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16917 %{
16918   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
16919   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16920   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16921          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16922 
16923   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16924   ins_encode %{
16925     __ string_compare($str1$$Register, $str2$$Register,
16926                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16927                       $tmp1$$Register, $tmp2$$Register,
16928                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16929                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
16930   %}
16931   ins_pipe(pipe_class_memory);
16932 %}
16933 
16934 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16935                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16936                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16937                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16938 %{
16939   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16940   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16941   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16942          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16943          TEMP vtmp0, TEMP vtmp1, KILL cr);
16944   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16945             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16946 
16947   ins_encode %{
16948     __ string_indexof($str1$$Register, $str2$$Register,
16949                       $cnt1$$Register, $cnt2$$Register,
16950                       $tmp1$$Register, $tmp2$$Register,
16951                       $tmp3$$Register, $tmp4$$Register,
16952                       $tmp5$$Register, $tmp6$$Register,
16953                       -1, $result$$Register, StrIntrinsicNode::UU);
16954   %}
16955   ins_pipe(pipe_class_memory);
16956 %}
16957 
16958 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16959                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16960                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16961                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16962 %{
16963   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16964   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16965   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16966          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16967          TEMP vtmp0, TEMP vtmp1, KILL cr);
16968   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16969             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16970 
16971   ins_encode %{
16972     __ string_indexof($str1$$Register, $str2$$Register,
16973                       $cnt1$$Register, $cnt2$$Register,
16974                       $tmp1$$Register, $tmp2$$Register,
16975                       $tmp3$$Register, $tmp4$$Register,
16976                       $tmp5$$Register, $tmp6$$Register,
16977                       -1, $result$$Register, StrIntrinsicNode::LL);
16978   %}
16979   ins_pipe(pipe_class_memory);
16980 %}
16981 
16982 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16983                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16984                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16985                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16986 %{
16987   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16988   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16989   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16990          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16991          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16992   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16993             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16994 
16995   ins_encode %{
16996     __ string_indexof($str1$$Register, $str2$$Register,
16997                       $cnt1$$Register, $cnt2$$Register,
16998                       $tmp1$$Register, $tmp2$$Register,
16999                       $tmp3$$Register, $tmp4$$Register,
17000                       $tmp5$$Register, $tmp6$$Register,
17001                       -1, $result$$Register, StrIntrinsicNode::UL);
17002   %}
17003   ins_pipe(pipe_class_memory);
17004 %}
17005 
17006 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17007                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17008                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17009 %{
17010   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
17011   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17012   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17013          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17014   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
17015             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17016 
17017   ins_encode %{
17018     int icnt2 = (int)$int_cnt2$$constant;
17019     __ string_indexof($str1$$Register, $str2$$Register,
17020                       $cnt1$$Register, zr,
17021                       $tmp1$$Register, $tmp2$$Register,
17022                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17023                       icnt2, $result$$Register, StrIntrinsicNode::UU);
17024   %}
17025   ins_pipe(pipe_class_memory);
17026 %}
17027 
17028 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17029                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17030                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17031 %{
17032   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
17033   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17034   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17035          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17036   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
17037             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17038 
17039   ins_encode %{
17040     int icnt2 = (int)$int_cnt2$$constant;
17041     __ string_indexof($str1$$Register, $str2$$Register,
17042                       $cnt1$$Register, zr,
17043                       $tmp1$$Register, $tmp2$$Register,
17044                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17045                       icnt2, $result$$Register, StrIntrinsicNode::LL);
17046   %}
17047   ins_pipe(pipe_class_memory);
17048 %}
17049 
17050 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
17051                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
17052                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
17053 %{
17054   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
17055   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
17056   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
17057          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
17058   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
17059             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
17060 
17061   ins_encode %{
17062     int icnt2 = (int)$int_cnt2$$constant;
17063     __ string_indexof($str1$$Register, $str2$$Register,
17064                       $cnt1$$Register, zr,
17065                       $tmp1$$Register, $tmp2$$Register,
17066                       $tmp3$$Register, $tmp4$$Register, zr, zr,
17067                       icnt2, $result$$Register, StrIntrinsicNode::UL);
17068   %}
17069   ins_pipe(pipe_class_memory);
17070 %}
17071 
17072 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17073                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
17074                               iRegINoSp tmp3, rFlagsReg cr)
17075 %{
17076   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17077   predicate(((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
17078   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17079          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17080 
17081   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17082 
17083   ins_encode %{
17084     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17085                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
17086                            $tmp3$$Register);
17087   %}
17088   ins_pipe(pipe_class_memory);
17089 %}
17090 
17091 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
17092                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
17093                               iRegINoSp tmp3, rFlagsReg cr)
17094 %{
17095   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
17096   predicate(((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
17097   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
17098          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
17099 
17100   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
17101 
17102   ins_encode %{
17103     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
17104                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
17105                            $tmp3$$Register);
17106   %}
17107   ins_pipe(pipe_class_memory);
17108 %}
17109 
17110 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17111                         iRegI_R0 result, rFlagsReg cr)
17112 %{
17113   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
17114   match(Set result (StrEquals (Binary str1 str2) cnt));
17115   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17116 
17117   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17118   ins_encode %{
17119     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17120     __ string_equals($str1$$Register, $str2$$Register,
17121                      $result$$Register, $cnt$$Register, 1);
17122   %}
17123   ins_pipe(pipe_class_memory);
17124 %}
17125 
17126 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
17127                         iRegI_R0 result, rFlagsReg cr)
17128 %{
17129   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
17130   match(Set result (StrEquals (Binary str1 str2) cnt));
17131   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
17132 
17133   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
17134   ins_encode %{
17135     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
17136     __ string_equals($str1$$Register, $str2$$Register,
17137                      $result$$Register, $cnt$$Register, 2);
17138   %}
17139   ins_pipe(pipe_class_memory);
17140 %}
17141 
17142 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17143                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17144                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17145                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17146                        iRegP_R10 tmp, rFlagsReg cr)
17147 %{
17148   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
17149   match(Set result (AryEq ary1 ary2));
17150   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17151          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17152          TEMP vtmp6, TEMP vtmp7, KILL cr);
17153 
17154   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17155   ins_encode %{
17156     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17157                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17158                                    $result$$Register, $tmp$$Register, 1);
17159     if (tpc == NULL) {
17160       ciEnv::current()->record_failure("CodeCache is full");
17161       return;
17162     }
17163   %}
17164   ins_pipe(pipe_class_memory);
17165 %}
17166 
17167 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
17168                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
17169                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17170                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
17171                        iRegP_R10 tmp, rFlagsReg cr)
17172 %{
17173   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
17174   match(Set result (AryEq ary1 ary2));
17175   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
17176          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17177          TEMP vtmp6, TEMP vtmp7, KILL cr);
17178 
17179   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
17180   ins_encode %{
17181     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
17182                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
17183                                    $result$$Register, $tmp$$Register, 2);
17184     if (tpc == NULL) {
17185       ciEnv::current()->record_failure("CodeCache is full");
17186       return;
17187     }
17188   %}
17189   ins_pipe(pipe_class_memory);
17190 %}
17191 
17192 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
17193 %{
17194   match(Set result (HasNegatives ary1 len));
17195   effect(USE_KILL ary1, USE_KILL len, KILL cr);
17196   format %{ "has negatives byte[] $ary1,$len -> $result" %}
17197   ins_encode %{
17198     address tpc = __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
17199     if (tpc == NULL) {
17200       ciEnv::current()->record_failure("CodeCache is full");
17201       return;
17202     }
17203   %}
17204   ins_pipe( pipe_slow );
17205 %}
17206 
17207 // fast char[] to byte[] compression
17208 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17209                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17210                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17211                          iRegI_R0 result, rFlagsReg cr)
17212 %{
17213   match(Set result (StrCompressedCopy src (Binary dst len)));
17214   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
17215          USE_KILL src, USE_KILL dst, USE len, KILL cr);
17216 
17217   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17218   ins_encode %{
17219     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
17220                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17221                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17222                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17223   %}
17224   ins_pipe(pipe_slow);
17225 %}
17226 
17227 // fast byte[] to char[] inflation
17228 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
17229                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
17230                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
17231 %{
17232   match(Set dummy (StrInflatedCopy src (Binary dst len)));
17233   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
17234          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
17235          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
17236 
17237   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
17238   ins_encode %{
17239     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
17240                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17241                                         $vtmp2$$FloatRegister, $tmp$$Register);
17242     if (tpc == NULL) {
17243       ciEnv::current()->record_failure("CodeCache is full");
17244       return;
17245     }
17246   %}
17247   ins_pipe(pipe_class_memory);
17248 %}
17249 
17250 // encode char[] to byte[] in ISO_8859_1
17251 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17252                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17253                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17254                           iRegI_R0 result, rFlagsReg cr)
17255 %{
17256   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
17257   match(Set result (EncodeISOArray src (Binary dst len)));
17258   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17259          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17260 
17261   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17262   ins_encode %{
17263     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17264                         $result$$Register, false,
17265                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17266                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17267                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17268   %}
17269   ins_pipe(pipe_class_memory);
17270 %}
17271 
17272 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
17273                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
17274                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
17275                             iRegI_R0 result, rFlagsReg cr)
17276 %{
17277   predicate(((EncodeISOArrayNode*)n)->is_ascii());
17278   match(Set result (EncodeISOArray src (Binary dst len)));
17279   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
17280          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
17281 
17282   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
17283   ins_encode %{
17284     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
17285                         $result$$Register, true,
17286                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
17287                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
17288                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
17289   %}
17290   ins_pipe(pipe_class_memory);
17291 %}
17292 
17293 // ============================================================================
17294 // This name is KNOWN by the ADLC and cannot be changed.
17295 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17296 // for this guy.
17297 instruct tlsLoadP(thread_RegP dst)
17298 %{
17299   match(Set dst (ThreadLocal));
17300 
17301   ins_cost(0);
17302 
17303   format %{ " -- \t// $dst=Thread::current(), empty" %}
17304 
17305   size(0);
17306 
17307   ins_encode( /*empty*/ );
17308 
17309   ins_pipe(pipe_class_empty);
17310 %}
17311 
17312 //----------PEEPHOLE RULES-----------------------------------------------------
17313 // These must follow all instruction definitions as they use the names
17314 // defined in the instructions definitions.
17315 //
17316 // peepmatch ( root_instr_name [preceding_instruction]* );
17317 //
17318 // peepconstraint %{
17319 // (instruction_number.operand_name relational_op instruction_number.operand_name
17320 //  [, ...] );
17321 // // instruction numbers are zero-based using left to right order in peepmatch
17322 //
17323 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17324 // // provide an instruction_number.operand_name for each operand that appears
17325 // // in the replacement instruction's match rule
17326 //
17327 // ---------VM FLAGS---------------------------------------------------------
17328 //
17329 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17330 //
17331 // Each peephole rule is given an identifying number starting with zero and
17332 // increasing by one in the order seen by the parser.  An individual peephole
17333 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17334 // on the command-line.
17335 //
17336 // ---------CURRENT LIMITATIONS----------------------------------------------
17337 //
17338 // Only match adjacent instructions in same basic block
17339 // Only equality constraints
17340 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17341 // Only one replacement instruction
17342 //
17343 // ---------EXAMPLE----------------------------------------------------------
17344 //
17345 // // pertinent parts of existing instructions in architecture description
17346 // instruct movI(iRegINoSp dst, iRegI src)
17347 // %{
17348 //   match(Set dst (CopyI src));
17349 // %}
17350 //
17351 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17352 // %{
17353 //   match(Set dst (AddI dst src));
17354 //   effect(KILL cr);
17355 // %}
17356 //
17357 // // Change (inc mov) to lea
17358 // peephole %{
17359 //   // increment preceeded by register-register move
17360 //   peepmatch ( incI_iReg movI );
17361 //   // require that the destination register of the increment
17362 //   // match the destination register of the move
17363 //   peepconstraint ( 0.dst == 1.dst );
17364 //   // construct a replacement instruction that sets
17365 //   // the destination to ( move's source register + one )
17366 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17367 // %}
17368 //
17369 
17370 // Implementation no longer uses movX instructions since
17371 // machine-independent system no longer uses CopyX nodes.
17372 //
17373 // peephole
17374 // %{
17375 //   peepmatch (incI_iReg movI);
17376 //   peepconstraint (0.dst == 1.dst);
17377 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17378 // %}
17379 
17380 // peephole
17381 // %{
17382 //   peepmatch (decI_iReg movI);
17383 //   peepconstraint (0.dst == 1.dst);
17384 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17385 // %}
17386 
17387 // peephole
17388 // %{
17389 //   peepmatch (addI_iReg_imm movI);
17390 //   peepconstraint (0.dst == 1.dst);
17391 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17392 // %}
17393 
17394 // peephole
17395 // %{
17396 //   peepmatch (incL_iReg movL);
17397 //   peepconstraint (0.dst == 1.dst);
17398 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17399 // %}
17400 
17401 // peephole
17402 // %{
17403 //   peepmatch (decL_iReg movL);
17404 //   peepconstraint (0.dst == 1.dst);
17405 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17406 // %}
17407 
17408 // peephole
17409 // %{
17410 //   peepmatch (addL_iReg_imm movL);
17411 //   peepconstraint (0.dst == 1.dst);
17412 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17413 // %}
17414 
17415 // peephole
17416 // %{
17417 //   peepmatch (addP_iReg_imm movP);
17418 //   peepconstraint (0.dst == 1.dst);
17419 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17420 // %}
17421 
17422 // // Change load of spilled value to only a spill
17423 // instruct storeI(memory mem, iRegI src)
17424 // %{
17425 //   match(Set mem (StoreI mem src));
17426 // %}
17427 //
17428 // instruct loadI(iRegINoSp dst, memory mem)
17429 // %{
17430 //   match(Set dst (LoadI mem));
17431 // %}
17432 //
17433 
17434 //----------SMARTSPILL RULES---------------------------------------------------
17435 // These must follow all instruction definitions as they use the names
17436 // defined in the instructions definitions.
17437 
17438 // Local Variables:
17439 // mode: c++
17440 // End: