1 //
    2 // Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // architecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182 
  183   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  184   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  185   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  186   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  187 
  188   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  189   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  190   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  191   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  192 
  193   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  194   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  195   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  196   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  197 
  198   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  199   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  200   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  201   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  202 
  203   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  204   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  205   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  206   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  207 
  208   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  209   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  210   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  211   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  212 
  213   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  214   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  215   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  216   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  217 
  218   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  219   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  220   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  221   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  222 
  223   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  224   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  225   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  226   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  227 
  228   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  229   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  230   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  231   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  232 
  233   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  234   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  235   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  236   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  237 
  238   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  239   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  240   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  241   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  242 
  243   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  244   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  245   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  246   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  247 
  248   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  249   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  250   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  251   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  252 
  253   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  254   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  255   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  256   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  257 
  258   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  259   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  260   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  261   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  262 
  263   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  264   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  265   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  266   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  267 
  268   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  269   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  270   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  271   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  272 
  273   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  274   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  275   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  276   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  277 
  278   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  279   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  280   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  281   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  282 
  283   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  284   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  285   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  286   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  287 
  288   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  289   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  290   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  291   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  292 
  293   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  294   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  295   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  296   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  297 
  298   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  299   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  300   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  301   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  302 
  303   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  304   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  305   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  306   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  307 
  308   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  309   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  310   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  311   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  312 
  313   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  314   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  315   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  316   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  317 
  318   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  319   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  320   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  321   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  322 
  323   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  324   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  325   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  326   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  327 
  328   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  329   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  330   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  331   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  332 
  333   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  334   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  335   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  336   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  337 
  338 // ----------------------------
  339 // SVE Predicate Registers
  340 // ----------------------------
  341   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  342   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  343   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  344   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  345   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  346   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  347   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  348   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  349   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  350   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  351   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  352   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  353   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  354   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  355   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  356   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  357 
  358 // ----------------------------
  359 // Special Registers
  360 // ----------------------------
  361 
  362 // the AArch64 CSPR status flag register is not directly accessible as
  363 // instruction operand. the FPSR status flag register is a system
  364 // register which can be written/read using MSR/MRS but again does not
  365 // appear as an operand (a code identifying the FSPR occurs as an
  366 // immediate value in the instruction).
  367 
  368 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  369 
  370 // Specify priority of register selection within phases of register
  371 // allocation.  Highest priority is first.  A useful heuristic is to
  372 // give registers a low priority when they are required by machine
  373 // instructions, like EAX and EDX on I486, and choose no-save registers
  374 // before save-on-call, & save-on-call before save-on-entry.  Registers
  375 // which participate in fixed calling sequences should come last.
  376 // Registers which are used as pairs must fall on an even boundary.
  377 
  378 alloc_class chunk0(
  379     // volatiles
  380     R10, R10_H,
  381     R11, R11_H,
  382     R12, R12_H,
  383     R13, R13_H,
  384     R14, R14_H,
  385     R15, R15_H,
  386     R16, R16_H,
  387     R17, R17_H,
  388     R18, R18_H,
  389 
  390     // arg registers
  391     R0, R0_H,
  392     R1, R1_H,
  393     R2, R2_H,
  394     R3, R3_H,
  395     R4, R4_H,
  396     R5, R5_H,
  397     R6, R6_H,
  398     R7, R7_H,
  399 
  400     // non-volatiles
  401     R19, R19_H,
  402     R20, R20_H,
  403     R21, R21_H,
  404     R22, R22_H,
  405     R23, R23_H,
  406     R24, R24_H,
  407     R25, R25_H,
  408     R26, R26_H,
  409 
  410     // non-allocatable registers
  411 
  412     R27, R27_H, // heapbase
  413     R28, R28_H, // thread
  414     R29, R29_H, // fp
  415     R30, R30_H, // lr
  416     R31, R31_H, // sp
  417     R8, R8_H,   // rscratch1
  418     R9, R9_H,   // rscratch2
  419 );
  420 
  421 alloc_class chunk1(
  422 
  423     // no save
  424     V16, V16_H, V16_J, V16_K,
  425     V17, V17_H, V17_J, V17_K,
  426     V18, V18_H, V18_J, V18_K,
  427     V19, V19_H, V19_J, V19_K,
  428     V20, V20_H, V20_J, V20_K,
  429     V21, V21_H, V21_J, V21_K,
  430     V22, V22_H, V22_J, V22_K,
  431     V23, V23_H, V23_J, V23_K,
  432     V24, V24_H, V24_J, V24_K,
  433     V25, V25_H, V25_J, V25_K,
  434     V26, V26_H, V26_J, V26_K,
  435     V27, V27_H, V27_J, V27_K,
  436     V28, V28_H, V28_J, V28_K,
  437     V29, V29_H, V29_J, V29_K,
  438     V30, V30_H, V30_J, V30_K,
  439     V31, V31_H, V31_J, V31_K,
  440 
  441     // arg registers
  442     V0, V0_H, V0_J, V0_K,
  443     V1, V1_H, V1_J, V1_K,
  444     V2, V2_H, V2_J, V2_K,
  445     V3, V3_H, V3_J, V3_K,
  446     V4, V4_H, V4_J, V4_K,
  447     V5, V5_H, V5_J, V5_K,
  448     V6, V6_H, V6_J, V6_K,
  449     V7, V7_H, V7_J, V7_K,
  450 
  451     // non-volatiles
  452     V8, V8_H, V8_J, V8_K,
  453     V9, V9_H, V9_J, V9_K,
  454     V10, V10_H, V10_J, V10_K,
  455     V11, V11_H, V11_J, V11_K,
  456     V12, V12_H, V12_J, V12_K,
  457     V13, V13_H, V13_J, V13_K,
  458     V14, V14_H, V14_J, V14_K,
  459     V15, V15_H, V15_J, V15_K,
  460 );
  461 
  462 alloc_class chunk2 (
  463     // Governing predicates for load/store and arithmetic
  464     P0,
  465     P1,
  466     P2,
  467     P3,
  468     P4,
  469     P5,
  470     P6,
  471 
  472     // Extra predicates
  473     P8,
  474     P9,
  475     P10,
  476     P11,
  477     P12,
  478     P13,
  479     P14,
  480     P15,
  481 
  482     // Preserved for all-true predicate
  483     P7,
  484 );
  485 
  486 alloc_class chunk3(RFLAGS);
  487 
  488 //----------Architecture Description Register Classes--------------------------
  489 // Several register classes are automatically defined based upon information in
  490 // this architecture description.
  491 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  492 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  493 //
  494 
  495 // Class for all 32 bit general purpose registers
  496 reg_class all_reg32(
  497     R0,
  498     R1,
  499     R2,
  500     R3,
  501     R4,
  502     R5,
  503     R6,
  504     R7,
  505     R10,
  506     R11,
  507     R12,
  508     R13,
  509     R14,
  510     R15,
  511     R16,
  512     R17,
  513     R18,
  514     R19,
  515     R20,
  516     R21,
  517     R22,
  518     R23,
  519     R24,
  520     R25,
  521     R26,
  522     R27,
  523     R28,
  524     R29,
  525     R30,
  526     R31
  527 );
  528 
  529 
  530 // Class for all 32 bit integer registers (excluding SP which
  531 // will never be used as an integer register)
  532 reg_class any_reg32 %{
  533   return _ANY_REG32_mask;
  534 %}
  535 
  536 // Singleton class for R0 int register
  537 reg_class int_r0_reg(R0);
  538 
  539 // Singleton class for R2 int register
  540 reg_class int_r2_reg(R2);
  541 
  542 // Singleton class for R3 int register
  543 reg_class int_r3_reg(R3);
  544 
  545 // Singleton class for R4 int register
  546 reg_class int_r4_reg(R4);
  547 
  548 // Singleton class for R31 int register
  549 reg_class int_r31_reg(R31);
  550 
  551 // Class for all 64 bit general purpose registers
  552 reg_class all_reg(
  553     R0, R0_H,
  554     R1, R1_H,
  555     R2, R2_H,
  556     R3, R3_H,
  557     R4, R4_H,
  558     R5, R5_H,
  559     R6, R6_H,
  560     R7, R7_H,
  561     R10, R10_H,
  562     R11, R11_H,
  563     R12, R12_H,
  564     R13, R13_H,
  565     R14, R14_H,
  566     R15, R15_H,
  567     R16, R16_H,
  568     R17, R17_H,
  569     R18, R18_H,
  570     R19, R19_H,
  571     R20, R20_H,
  572     R21, R21_H,
  573     R22, R22_H,
  574     R23, R23_H,
  575     R24, R24_H,
  576     R25, R25_H,
  577     R26, R26_H,
  578     R27, R27_H,
  579     R28, R28_H,
  580     R29, R29_H,
  581     R30, R30_H,
  582     R31, R31_H
  583 );
  584 
  585 // Class for all long integer registers (including SP)
  586 reg_class any_reg %{
  587   return _ANY_REG_mask;
  588 %}
  589 
  590 // Class for non-allocatable 32 bit registers
  591 reg_class non_allocatable_reg32(
  592 #ifdef R18_RESERVED
  593     // See comment in register_aarch64.hpp
  594     R18,                        // tls on Windows
  595 #endif
  596     R28,                        // thread
  597     R30,                        // lr
  598     R31                         // sp
  599 );
  600 
  601 // Class for non-allocatable 64 bit registers
  602 reg_class non_allocatable_reg(
  603 #ifdef R18_RESERVED
  604     // See comment in register_aarch64.hpp
  605     R18, R18_H,                 // tls on Windows, platform register on macOS
  606 #endif
  607     R28, R28_H,                 // thread
  608     R30, R30_H,                 // lr
  609     R31, R31_H                  // sp
  610 );
  611 
  612 // Class for all non-special integer registers
  613 reg_class no_special_reg32 %{
  614   return _NO_SPECIAL_REG32_mask;
  615 %}
  616 
  617 // Class for all non-special long integer registers
  618 reg_class no_special_reg %{
  619   return _NO_SPECIAL_REG_mask;
  620 %}
  621 
  622 // Class for 64 bit register r0
  623 reg_class r0_reg(
  624     R0, R0_H
  625 );
  626 
  627 // Class for 64 bit register r1
  628 reg_class r1_reg(
  629     R1, R1_H
  630 );
  631 
  632 // Class for 64 bit register r2
  633 reg_class r2_reg(
  634     R2, R2_H
  635 );
  636 
  637 // Class for 64 bit register r3
  638 reg_class r3_reg(
  639     R3, R3_H
  640 );
  641 
  642 // Class for 64 bit register r4
  643 reg_class r4_reg(
  644     R4, R4_H
  645 );
  646 
  647 // Class for 64 bit register r5
  648 reg_class r5_reg(
  649     R5, R5_H
  650 );
  651 
  652 // Class for 64 bit register r10
  653 reg_class r10_reg(
  654     R10, R10_H
  655 );
  656 
  657 // Class for 64 bit register r11
  658 reg_class r11_reg(
  659     R11, R11_H
  660 );
  661 
  662 // Class for method register
  663 reg_class method_reg(
  664     R12, R12_H
  665 );
  666 
  667 // Class for thread register
  668 reg_class thread_reg(
  669     R28, R28_H
  670 );
  671 
  672 // Class for frame pointer register
  673 reg_class fp_reg(
  674     R29, R29_H
  675 );
  676 
  677 // Class for link register
  678 reg_class lr_reg(
  679     R30, R30_H
  680 );
  681 
  682 // Class for long sp register
  683 reg_class sp_reg(
  684   R31, R31_H
  685 );
  686 
  687 // Class for all pointer registers
  688 reg_class ptr_reg %{
  689   return _PTR_REG_mask;
  690 %}
  691 
  692 // Class for all non_special pointer registers
  693 reg_class no_special_ptr_reg %{
  694   return _NO_SPECIAL_PTR_REG_mask;
  695 %}
  696 
  697 // Class for all non_special pointer registers (excluding rfp)
  698 reg_class no_special_no_rfp_ptr_reg %{
  699   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  700 %}
  701 
  702 // Class for all float registers
  703 reg_class float_reg(
  704     V0,
  705     V1,
  706     V2,
  707     V3,
  708     V4,
  709     V5,
  710     V6,
  711     V7,
  712     V8,
  713     V9,
  714     V10,
  715     V11,
  716     V12,
  717     V13,
  718     V14,
  719     V15,
  720     V16,
  721     V17,
  722     V18,
  723     V19,
  724     V20,
  725     V21,
  726     V22,
  727     V23,
  728     V24,
  729     V25,
  730     V26,
  731     V27,
  732     V28,
  733     V29,
  734     V30,
  735     V31
  736 );
  737 
  738 // Double precision float registers have virtual `high halves' that
  739 // are needed by the allocator.
  740 // Class for all double registers
  741 reg_class double_reg(
  742     V0, V0_H,
  743     V1, V1_H,
  744     V2, V2_H,
  745     V3, V3_H,
  746     V4, V4_H,
  747     V5, V5_H,
  748     V6, V6_H,
  749     V7, V7_H,
  750     V8, V8_H,
  751     V9, V9_H,
  752     V10, V10_H,
  753     V11, V11_H,
  754     V12, V12_H,
  755     V13, V13_H,
  756     V14, V14_H,
  757     V15, V15_H,
  758     V16, V16_H,
  759     V17, V17_H,
  760     V18, V18_H,
  761     V19, V19_H,
  762     V20, V20_H,
  763     V21, V21_H,
  764     V22, V22_H,
  765     V23, V23_H,
  766     V24, V24_H,
  767     V25, V25_H,
  768     V26, V26_H,
  769     V27, V27_H,
  770     V28, V28_H,
  771     V29, V29_H,
  772     V30, V30_H,
  773     V31, V31_H
  774 );
  775 
  776 // Class for all SVE vector registers.
  777 reg_class vectora_reg (
  778     V0, V0_H, V0_J, V0_K,
  779     V1, V1_H, V1_J, V1_K,
  780     V2, V2_H, V2_J, V2_K,
  781     V3, V3_H, V3_J, V3_K,
  782     V4, V4_H, V4_J, V4_K,
  783     V5, V5_H, V5_J, V5_K,
  784     V6, V6_H, V6_J, V6_K,
  785     V7, V7_H, V7_J, V7_K,
  786     V8, V8_H, V8_J, V8_K,
  787     V9, V9_H, V9_J, V9_K,
  788     V10, V10_H, V10_J, V10_K,
  789     V11, V11_H, V11_J, V11_K,
  790     V12, V12_H, V12_J, V12_K,
  791     V13, V13_H, V13_J, V13_K,
  792     V14, V14_H, V14_J, V14_K,
  793     V15, V15_H, V15_J, V15_K,
  794     V16, V16_H, V16_J, V16_K,
  795     V17, V17_H, V17_J, V17_K,
  796     V18, V18_H, V18_J, V18_K,
  797     V19, V19_H, V19_J, V19_K,
  798     V20, V20_H, V20_J, V20_K,
  799     V21, V21_H, V21_J, V21_K,
  800     V22, V22_H, V22_J, V22_K,
  801     V23, V23_H, V23_J, V23_K,
  802     V24, V24_H, V24_J, V24_K,
  803     V25, V25_H, V25_J, V25_K,
  804     V26, V26_H, V26_J, V26_K,
  805     V27, V27_H, V27_J, V27_K,
  806     V28, V28_H, V28_J, V28_K,
  807     V29, V29_H, V29_J, V29_K,
  808     V30, V30_H, V30_J, V30_K,
  809     V31, V31_H, V31_J, V31_K,
  810 );
  811 
  812 // Class for all 64bit vector registers
  813 reg_class vectord_reg(
  814     V0, V0_H,
  815     V1, V1_H,
  816     V2, V2_H,
  817     V3, V3_H,
  818     V4, V4_H,
  819     V5, V5_H,
  820     V6, V6_H,
  821     V7, V7_H,
  822     V8, V8_H,
  823     V9, V9_H,
  824     V10, V10_H,
  825     V11, V11_H,
  826     V12, V12_H,
  827     V13, V13_H,
  828     V14, V14_H,
  829     V15, V15_H,
  830     V16, V16_H,
  831     V17, V17_H,
  832     V18, V18_H,
  833     V19, V19_H,
  834     V20, V20_H,
  835     V21, V21_H,
  836     V22, V22_H,
  837     V23, V23_H,
  838     V24, V24_H,
  839     V25, V25_H,
  840     V26, V26_H,
  841     V27, V27_H,
  842     V28, V28_H,
  843     V29, V29_H,
  844     V30, V30_H,
  845     V31, V31_H
  846 );
  847 
  848 // Class for all 128bit vector registers
  849 reg_class vectorx_reg(
  850     V0, V0_H, V0_J, V0_K,
  851     V1, V1_H, V1_J, V1_K,
  852     V2, V2_H, V2_J, V2_K,
  853     V3, V3_H, V3_J, V3_K,
  854     V4, V4_H, V4_J, V4_K,
  855     V5, V5_H, V5_J, V5_K,
  856     V6, V6_H, V6_J, V6_K,
  857     V7, V7_H, V7_J, V7_K,
  858     V8, V8_H, V8_J, V8_K,
  859     V9, V9_H, V9_J, V9_K,
  860     V10, V10_H, V10_J, V10_K,
  861     V11, V11_H, V11_J, V11_K,
  862     V12, V12_H, V12_J, V12_K,
  863     V13, V13_H, V13_J, V13_K,
  864     V14, V14_H, V14_J, V14_K,
  865     V15, V15_H, V15_J, V15_K,
  866     V16, V16_H, V16_J, V16_K,
  867     V17, V17_H, V17_J, V17_K,
  868     V18, V18_H, V18_J, V18_K,
  869     V19, V19_H, V19_J, V19_K,
  870     V20, V20_H, V20_J, V20_K,
  871     V21, V21_H, V21_J, V21_K,
  872     V22, V22_H, V22_J, V22_K,
  873     V23, V23_H, V23_J, V23_K,
  874     V24, V24_H, V24_J, V24_K,
  875     V25, V25_H, V25_J, V25_K,
  876     V26, V26_H, V26_J, V26_K,
  877     V27, V27_H, V27_J, V27_K,
  878     V28, V28_H, V28_J, V28_K,
  879     V29, V29_H, V29_J, V29_K,
  880     V30, V30_H, V30_J, V30_K,
  881     V31, V31_H, V31_J, V31_K
  882 );
  883 
  884 // Class for 128 bit register v0
  885 reg_class v0_reg(
  886     V0, V0_H
  887 );
  888 
  889 // Class for 128 bit register v1
  890 reg_class v1_reg(
  891     V1, V1_H
  892 );
  893 
  894 // Class for 128 bit register v2
  895 reg_class v2_reg(
  896     V2, V2_H
  897 );
  898 
  899 // Class for 128 bit register v3
  900 reg_class v3_reg(
  901     V3, V3_H
  902 );
  903 
  904 // Class for 128 bit register v4
  905 reg_class v4_reg(
  906     V4, V4_H
  907 );
  908 
  909 // Class for 128 bit register v5
  910 reg_class v5_reg(
  911     V5, V5_H
  912 );
  913 
  914 // Class for 128 bit register v6
  915 reg_class v6_reg(
  916     V6, V6_H
  917 );
  918 
  919 // Class for 128 bit register v7
  920 reg_class v7_reg(
  921     V7, V7_H
  922 );
  923 
  924 // Class for 128 bit register v8
  925 reg_class v8_reg(
  926     V8, V8_H
  927 );
  928 
  929 // Class for 128 bit register v9
  930 reg_class v9_reg(
  931     V9, V9_H
  932 );
  933 
  934 // Class for 128 bit register v10
  935 reg_class v10_reg(
  936     V10, V10_H
  937 );
  938 
  939 // Class for 128 bit register v11
  940 reg_class v11_reg(
  941     V11, V11_H
  942 );
  943 
  944 // Class for 128 bit register v12
  945 reg_class v12_reg(
  946     V12, V12_H
  947 );
  948 
  949 // Class for 128 bit register v13
  950 reg_class v13_reg(
  951     V13, V13_H
  952 );
  953 
  954 // Class for 128 bit register v14
  955 reg_class v14_reg(
  956     V14, V14_H
  957 );
  958 
  959 // Class for 128 bit register v15
  960 reg_class v15_reg(
  961     V15, V15_H
  962 );
  963 
  964 // Class for 128 bit register v16
  965 reg_class v16_reg(
  966     V16, V16_H
  967 );
  968 
  969 // Class for 128 bit register v17
  970 reg_class v17_reg(
  971     V17, V17_H
  972 );
  973 
  974 // Class for 128 bit register v18
  975 reg_class v18_reg(
  976     V18, V18_H
  977 );
  978 
  979 // Class for 128 bit register v19
  980 reg_class v19_reg(
  981     V19, V19_H
  982 );
  983 
  984 // Class for 128 bit register v20
  985 reg_class v20_reg(
  986     V20, V20_H
  987 );
  988 
  989 // Class for 128 bit register v21
  990 reg_class v21_reg(
  991     V21, V21_H
  992 );
  993 
  994 // Class for 128 bit register v22
  995 reg_class v22_reg(
  996     V22, V22_H
  997 );
  998 
  999 // Class for 128 bit register v23
 1000 reg_class v23_reg(
 1001     V23, V23_H
 1002 );
 1003 
 1004 // Class for 128 bit register v24
 1005 reg_class v24_reg(
 1006     V24, V24_H
 1007 );
 1008 
 1009 // Class for 128 bit register v25
 1010 reg_class v25_reg(
 1011     V25, V25_H
 1012 );
 1013 
 1014 // Class for 128 bit register v26
 1015 reg_class v26_reg(
 1016     V26, V26_H
 1017 );
 1018 
 1019 // Class for 128 bit register v27
 1020 reg_class v27_reg(
 1021     V27, V27_H
 1022 );
 1023 
 1024 // Class for 128 bit register v28
 1025 reg_class v28_reg(
 1026     V28, V28_H
 1027 );
 1028 
 1029 // Class for 128 bit register v29
 1030 reg_class v29_reg(
 1031     V29, V29_H
 1032 );
 1033 
 1034 // Class for 128 bit register v30
 1035 reg_class v30_reg(
 1036     V30, V30_H
 1037 );
 1038 
 1039 // Class for 128 bit register v31
 1040 reg_class v31_reg(
 1041     V31, V31_H
 1042 );
 1043 
 1044 // Class for all SVE predicate registers.
 1045 reg_class pr_reg (
 1046     P0,
 1047     P1,
 1048     P2,
 1049     P3,
 1050     P4,
 1051     P5,
 1052     P6,
 1053     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1054     P8,
 1055     P9,
 1056     P10,
 1057     P11,
 1058     P12,
 1059     P13,
 1060     P14,
 1061     P15
 1062 );
 1063 
 1064 // Class for SVE governing predicate registers, which are used
 1065 // to determine the active elements of a predicated instruction.
 1066 reg_class gov_pr (
 1067     P0,
 1068     P1,
 1069     P2,
 1070     P3,
 1071     P4,
 1072     P5,
 1073     P6,
 1074     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1075 );
 1076 
 1077 reg_class p0_reg(P0);
 1078 reg_class p1_reg(P1);
 1079 
 1080 // Singleton class for condition codes
 1081 reg_class int_flags(RFLAGS);
 1082 
 1083 %}
 1084 
 1085 //----------DEFINITION BLOCK---------------------------------------------------
 1086 // Define name --> value mappings to inform the ADLC of an integer valued name
 1087 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1088 // Format:
 1089 //        int_def  <name>         ( <int_value>, <expression>);
 1090 // Generated Code in ad_<arch>.hpp
 1091 //        #define  <name>   (<expression>)
 1092 //        // value == <int_value>
 1093 // Generated code in ad_<arch>.cpp adlc_verification()
 1094 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1095 //
 1096 
 1097 // we follow the ppc-aix port in using a simple cost model which ranks
 1098 // register operations as cheap, memory ops as more expensive and
 1099 // branches as most expensive. the first two have a low as well as a
 1100 // normal cost. huge cost appears to be a way of saying don't do
 1101 // something
 1102 
 1103 definitions %{
 1104   // The default cost (of a register move instruction).
 1105   int_def INSN_COST            (    100,     100);
 1106   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1107   int_def CALL_COST            (    200,     2 * INSN_COST);
 1108   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1109 %}
 1110 
 1111 
 1112 //----------SOURCE BLOCK-------------------------------------------------------
 1113 // This is a block of C++ code which provides values, functions, and
 1114 // definitions necessary in the rest of the architecture description
 1115 
 1116 source_hpp %{
 1117 
 1118 #include "asm/macroAssembler.hpp"
 1119 #include "gc/shared/barrierSetAssembler.hpp"
 1120 #include "gc/shared/cardTable.hpp"
 1121 #include "gc/shared/cardTableBarrierSet.hpp"
 1122 #include "gc/shared/collectedHeap.hpp"
 1123 #include "opto/addnode.hpp"
 1124 #include "opto/convertnode.hpp"
 1125 #include "runtime/objectMonitor.hpp"
 1126 
 1127 extern RegMask _ANY_REG32_mask;
 1128 extern RegMask _ANY_REG_mask;
 1129 extern RegMask _PTR_REG_mask;
 1130 extern RegMask _NO_SPECIAL_REG32_mask;
 1131 extern RegMask _NO_SPECIAL_REG_mask;
 1132 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1133 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1134 
 1135 class CallStubImpl {
 1136 
 1137   //--------------------------------------------------------------
 1138   //---<  Used for optimization in Compile::shorten_branches  >---
 1139   //--------------------------------------------------------------
 1140 
 1141  public:
 1142   // Size of call trampoline stub.
 1143   static uint size_call_trampoline() {
 1144     return 0; // no call trampolines on this platform
 1145   }
 1146 
 1147   // number of relocations needed by a call trampoline stub
 1148   static uint reloc_call_trampoline() {
 1149     return 0; // no call trampolines on this platform
 1150   }
 1151 };
 1152 
 1153 class HandlerImpl {
 1154 
 1155  public:
 1156 
 1157   static int emit_exception_handler(C2_MacroAssembler *masm);
 1158   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1159 
 1160   static uint size_exception_handler() {
 1161     return MacroAssembler::far_codestub_branch_size();
 1162   }
 1163 
 1164   static uint size_deopt_handler() {
 1165     // count one adr and one far branch instruction
 1166     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1167   }
 1168 };
 1169 
 1170 class Node::PD {
 1171 public:
 1172   enum NodeFlags {
 1173     _last_flag = Node::_last_flag
 1174   };
 1175 };
 1176 
 1177   bool is_CAS(int opcode, bool maybe_volatile);
 1178 
 1179   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1180 
 1181   bool unnecessary_acquire(const Node *barrier);
 1182   bool needs_acquiring_load(const Node *load);
 1183 
 1184   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1185 
 1186   bool unnecessary_release(const Node *barrier);
 1187   bool unnecessary_volatile(const Node *barrier);
 1188   bool needs_releasing_store(const Node *store);
 1189 
 1190   // predicate controlling translation of CompareAndSwapX
 1191   bool needs_acquiring_load_exclusive(const Node *load);
 1192 
 1193   // predicate controlling addressing modes
 1194   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1195 
 1196   // Convert BootTest condition to Assembler condition.
 1197   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1198   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1199 %}
 1200 
 1201 source %{
 1202 
 1203   // Derived RegMask with conditionally allocatable registers
 1204 
 1205   void PhaseOutput::pd_perform_mach_node_analysis() {
 1206   }
 1207 
 1208   int MachNode::pd_alignment_required() const {
 1209     return 1;
 1210   }
 1211 
 1212   int MachNode::compute_padding(int current_offset) const {
 1213     return 0;
 1214   }
 1215 
 1216   RegMask _ANY_REG32_mask;
 1217   RegMask _ANY_REG_mask;
 1218   RegMask _PTR_REG_mask;
 1219   RegMask _NO_SPECIAL_REG32_mask;
 1220   RegMask _NO_SPECIAL_REG_mask;
 1221   RegMask _NO_SPECIAL_PTR_REG_mask;
 1222   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1223 
 1224   void reg_mask_init() {
 1225     // We derive below RegMask(s) from the ones which are auto-generated from
 1226     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1227     // registers conditionally reserved.
 1228 
 1229     _ANY_REG32_mask = _ALL_REG32_mask;
 1230     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1231 
 1232     _ANY_REG_mask = _ALL_REG_mask;
 1233 
 1234     _PTR_REG_mask = _ALL_REG_mask;
 1235 
 1236     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1237     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1238 
 1239     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1240     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1241 
 1242     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1243     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1244 
 1245     // r27 is not allocatable when compressed oops is on and heapbase is not
 1246     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1247     if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
 1248       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1249       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1250       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1251     }
 1252 
 1253     // r29 is not allocatable when PreserveFramePointer is on
 1254     if (PreserveFramePointer) {
 1255       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1256       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1257       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1258     }
 1259 
 1260     _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1261     _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1262   }
 1263 
 1264   // Optimizaton of volatile gets and puts
 1265   // -------------------------------------
 1266   //
 1267   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1268   // use to implement volatile reads and writes. For a volatile read
 1269   // we simply need
 1270   //
 1271   //   ldar<x>
 1272   //
 1273   // and for a volatile write we need
 1274   //
 1275   //   stlr<x>
 1276   //
 1277   // Alternatively, we can implement them by pairing a normal
 1278   // load/store with a memory barrier. For a volatile read we need
 1279   //
 1280   //   ldr<x>
 1281   //   dmb ishld
 1282   //
 1283   // for a volatile write
 1284   //
 1285   //   dmb ish
 1286   //   str<x>
 1287   //   dmb ish
 1288   //
 1289   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1290   // sequences. These are normally translated to an instruction
 1291   // sequence like the following
 1292   //
 1293   //   dmb      ish
 1294   // retry:
 1295   //   ldxr<x>   rval raddr
 1296   //   cmp       rval rold
 1297   //   b.ne done
 1298   //   stlxr<x>  rval, rnew, rold
 1299   //   cbnz      rval retry
 1300   // done:
 1301   //   cset      r0, eq
 1302   //   dmb ishld
 1303   //
 1304   // Note that the exclusive store is already using an stlxr
 1305   // instruction. That is required to ensure visibility to other
 1306   // threads of the exclusive write (assuming it succeeds) before that
 1307   // of any subsequent writes.
 1308   //
 1309   // The following instruction sequence is an improvement on the above
 1310   //
 1311   // retry:
 1312   //   ldaxr<x>  rval raddr
 1313   //   cmp       rval rold
 1314   //   b.ne done
 1315   //   stlxr<x>  rval, rnew, rold
 1316   //   cbnz      rval retry
 1317   // done:
 1318   //   cset      r0, eq
 1319   //
 1320   // We don't need the leading dmb ish since the stlxr guarantees
 1321   // visibility of prior writes in the case that the swap is
 1322   // successful. Crucially we don't have to worry about the case where
 1323   // the swap is not successful since no valid program should be
 1324   // relying on visibility of prior changes by the attempting thread
 1325   // in the case where the CAS fails.
 1326   //
 1327   // Similarly, we don't need the trailing dmb ishld if we substitute
 1328   // an ldaxr instruction since that will provide all the guarantees we
 1329   // require regarding observation of changes made by other threads
 1330   // before any change to the CAS address observed by the load.
 1331   //
 1332   // In order to generate the desired instruction sequence we need to
 1333   // be able to identify specific 'signature' ideal graph node
 1334   // sequences which i) occur as a translation of a volatile reads or
 1335   // writes or CAS operations and ii) do not occur through any other
 1336   // translation or graph transformation. We can then provide
 1337   // alternative aldc matching rules which translate these node
 1338   // sequences to the desired machine code sequences. Selection of the
 1339   // alternative rules can be implemented by predicates which identify
 1340   // the relevant node sequences.
 1341   //
 1342   // The ideal graph generator translates a volatile read to the node
 1343   // sequence
 1344   //
 1345   //   LoadX[mo_acquire]
 1346   //   MemBarAcquire
 1347   //
 1348   // As a special case when using the compressed oops optimization we
 1349   // may also see this variant
 1350   //
 1351   //   LoadN[mo_acquire]
 1352   //   DecodeN
 1353   //   MemBarAcquire
 1354   //
 1355   // A volatile write is translated to the node sequence
 1356   //
 1357   //   MemBarRelease
 1358   //   StoreX[mo_release] {CardMark}-optional
 1359   //   MemBarVolatile
 1360   //
 1361   // n.b. the above node patterns are generated with a strict
 1362   // 'signature' configuration of input and output dependencies (see
 1363   // the predicates below for exact details). The card mark may be as
 1364   // simple as a few extra nodes or, in a few GC configurations, may
 1365   // include more complex control flow between the leading and
 1366   // trailing memory barriers. However, whatever the card mark
 1367   // configuration these signatures are unique to translated volatile
 1368   // reads/stores -- they will not appear as a result of any other
 1369   // bytecode translation or inlining nor as a consequence of
 1370   // optimizing transforms.
 1371   //
 1372   // We also want to catch inlined unsafe volatile gets and puts and
 1373   // be able to implement them using either ldar<x>/stlr<x> or some
 1374   // combination of ldr<x>/stlr<x> and dmb instructions.
 1375   //
 1376   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1377   // normal volatile put node sequence containing an extra cpuorder
 1378   // membar
 1379   //
 1380   //   MemBarRelease
 1381   //   MemBarCPUOrder
 1382   //   StoreX[mo_release] {CardMark}-optional
 1383   //   MemBarCPUOrder
 1384   //   MemBarVolatile
 1385   //
 1386   // n.b. as an aside, a cpuorder membar is not itself subject to
 1387   // matching and translation by adlc rules.  However, the rule
 1388   // predicates need to detect its presence in order to correctly
 1389   // select the desired adlc rules.
 1390   //
 1391   // Inlined unsafe volatile gets manifest as a slightly different
 1392   // node sequence to a normal volatile get because of the
 1393   // introduction of some CPUOrder memory barriers to bracket the
 1394   // Load. However, but the same basic skeleton of a LoadX feeding a
 1395   // MemBarAcquire, possibly through an optional DecodeN, is still
 1396   // present
 1397   //
 1398   //   MemBarCPUOrder
 1399   //        ||       \\
 1400   //   MemBarCPUOrder LoadX[mo_acquire]
 1401   //        ||            |
 1402   //        ||       {DecodeN} optional
 1403   //        ||       /
 1404   //     MemBarAcquire
 1405   //
 1406   // In this case the acquire membar does not directly depend on the
 1407   // load. However, we can be sure that the load is generated from an
 1408   // inlined unsafe volatile get if we see it dependent on this unique
 1409   // sequence of membar nodes. Similarly, given an acquire membar we
 1410   // can know that it was added because of an inlined unsafe volatile
 1411   // get if it is fed and feeds a cpuorder membar and if its feed
 1412   // membar also feeds an acquiring load.
 1413   //
 1414   // Finally an inlined (Unsafe) CAS operation is translated to the
 1415   // following ideal graph
 1416   //
 1417   //   MemBarRelease
 1418   //   MemBarCPUOrder
 1419   //   CompareAndSwapX {CardMark}-optional
 1420   //   MemBarCPUOrder
 1421   //   MemBarAcquire
 1422   //
 1423   // So, where we can identify these volatile read and write
 1424   // signatures we can choose to plant either of the above two code
 1425   // sequences. For a volatile read we can simply plant a normal
 1426   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1427   // also choose to inhibit translation of the MemBarAcquire and
 1428   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1429   //
 1430   // When we recognise a volatile store signature we can choose to
 1431   // plant at a dmb ish as a translation for the MemBarRelease, a
 1432   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1433   // Alternatively, we can inhibit translation of the MemBarRelease
 1434   // and MemBarVolatile and instead plant a simple stlr<x>
 1435   // instruction.
 1436   //
 1437   // when we recognise a CAS signature we can choose to plant a dmb
 1438   // ish as a translation for the MemBarRelease, the conventional
 1439   // macro-instruction sequence for the CompareAndSwap node (which
 1440   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1441   // Alternatively, we can elide generation of the dmb instructions
 1442   // and plant the alternative CompareAndSwap macro-instruction
 1443   // sequence (which uses ldaxr<x>).
 1444   //
 1445   // Of course, the above only applies when we see these signature
 1446   // configurations. We still want to plant dmb instructions in any
 1447   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1448   // MemBarVolatile. For example, at the end of a constructor which
 1449   // writes final/volatile fields we will see a MemBarRelease
 1450   // instruction and this needs a 'dmb ish' lest we risk the
 1451   // constructed object being visible without making the
 1452   // final/volatile field writes visible.
 1453   //
 1454   // n.b. the translation rules below which rely on detection of the
 1455   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1456   // If we see anything other than the signature configurations we
 1457   // always just translate the loads and stores to ldr<x> and str<x>
 1458   // and translate acquire, release and volatile membars to the
 1459   // relevant dmb instructions.
 1460   //
 1461 
 1462   // is_CAS(int opcode, bool maybe_volatile)
 1463   //
 1464   // return true if opcode is one of the possible CompareAndSwapX
 1465   // values otherwise false.
 1466 
 1467   bool is_CAS(int opcode, bool maybe_volatile)
 1468   {
 1469     switch(opcode) {
 1470       // We handle these
 1471     case Op_CompareAndSwapI:
 1472     case Op_CompareAndSwapL:
 1473     case Op_CompareAndSwapP:
 1474     case Op_CompareAndSwapN:
 1475     case Op_ShenandoahCompareAndSwapP:
 1476     case Op_ShenandoahCompareAndSwapN:
 1477     case Op_CompareAndSwapB:
 1478     case Op_CompareAndSwapS:
 1479     case Op_GetAndSetI:
 1480     case Op_GetAndSetL:
 1481     case Op_GetAndSetP:
 1482     case Op_GetAndSetN:
 1483     case Op_GetAndAddI:
 1484     case Op_GetAndAddL:
 1485       return true;
 1486     case Op_CompareAndExchangeI:
 1487     case Op_CompareAndExchangeN:
 1488     case Op_CompareAndExchangeB:
 1489     case Op_CompareAndExchangeS:
 1490     case Op_CompareAndExchangeL:
 1491     case Op_CompareAndExchangeP:
 1492     case Op_WeakCompareAndSwapB:
 1493     case Op_WeakCompareAndSwapS:
 1494     case Op_WeakCompareAndSwapI:
 1495     case Op_WeakCompareAndSwapL:
 1496     case Op_WeakCompareAndSwapP:
 1497     case Op_WeakCompareAndSwapN:
 1498     case Op_ShenandoahWeakCompareAndSwapP:
 1499     case Op_ShenandoahWeakCompareAndSwapN:
 1500     case Op_ShenandoahCompareAndExchangeP:
 1501     case Op_ShenandoahCompareAndExchangeN:
 1502       return maybe_volatile;
 1503     default:
 1504       return false;
 1505     }
 1506   }
 1507 
 1508   // helper to determine the maximum number of Phi nodes we may need to
 1509   // traverse when searching from a card mark membar for the merge mem
 1510   // feeding a trailing membar or vice versa
 1511 
 1512 // predicates controlling emit of ldr<x>/ldar<x>
 1513 
 1514 bool unnecessary_acquire(const Node *barrier)
 1515 {
 1516   assert(barrier->is_MemBar(), "expecting a membar");
 1517 
 1518   MemBarNode* mb = barrier->as_MemBar();
 1519 
 1520   if (mb->trailing_load()) {
 1521     return true;
 1522   }
 1523 
 1524   if (mb->trailing_load_store()) {
 1525     Node* load_store = mb->in(MemBarNode::Precedent);
 1526     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1527     return is_CAS(load_store->Opcode(), true);
 1528   }
 1529 
 1530   return false;
 1531 }
 1532 
 1533 bool needs_acquiring_load(const Node *n)
 1534 {
 1535   assert(n->is_Load(), "expecting a load");
 1536   LoadNode *ld = n->as_Load();
 1537   return ld->is_acquire();
 1538 }
 1539 
 1540 bool unnecessary_release(const Node *n)
 1541 {
 1542   assert((n->is_MemBar() &&
 1543           n->Opcode() == Op_MemBarRelease),
 1544          "expecting a release membar");
 1545 
 1546   MemBarNode *barrier = n->as_MemBar();
 1547   if (!barrier->leading()) {
 1548     return false;
 1549   } else {
 1550     Node* trailing = barrier->trailing_membar();
 1551     MemBarNode* trailing_mb = trailing->as_MemBar();
 1552     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1553     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1554 
 1555     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1556     if (mem->is_Store()) {
 1557       assert(mem->as_Store()->is_release(), "");
 1558       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1559       return true;
 1560     } else {
 1561       assert(mem->is_LoadStore(), "");
 1562       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1563       return is_CAS(mem->Opcode(), true);
 1564     }
 1565   }
 1566   return false;
 1567 }
 1568 
 1569 bool unnecessary_volatile(const Node *n)
 1570 {
 1571   // assert n->is_MemBar();
 1572   MemBarNode *mbvol = n->as_MemBar();
 1573 
 1574   bool release = mbvol->trailing_store();
 1575   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1576 #ifdef ASSERT
 1577   if (release) {
 1578     Node* leading = mbvol->leading_membar();
 1579     assert(leading->Opcode() == Op_MemBarRelease, "");
 1580     assert(leading->as_MemBar()->leading_store(), "");
 1581     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1582   }
 1583 #endif
 1584 
 1585   return release;
 1586 }
 1587 
 1588 // predicates controlling emit of str<x>/stlr<x>
 1589 
 1590 bool needs_releasing_store(const Node *n)
 1591 {
 1592   // assert n->is_Store();
 1593   StoreNode *st = n->as_Store();
 1594   return st->trailing_membar() != nullptr;
 1595 }
 1596 
 1597 // predicate controlling translation of CAS
 1598 //
 1599 // returns true if CAS needs to use an acquiring load otherwise false
 1600 
 1601 bool needs_acquiring_load_exclusive(const Node *n)
 1602 {
 1603   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1604   LoadStoreNode* ldst = n->as_LoadStore();
 1605   if (is_CAS(n->Opcode(), false)) {
 1606     assert(ldst->trailing_membar() != nullptr, "expected trailing membar");
 1607   } else {
 1608     return ldst->trailing_membar() != nullptr;
 1609   }
 1610 
 1611   // so we can just return true here
 1612   return true;
 1613 }
 1614 
 1615 #define __ masm->
 1616 
 1617 // advance declarations for helper functions to convert register
 1618 // indices to register objects
 1619 
 1620 // the ad file has to provide implementations of certain methods
 1621 // expected by the generic code
 1622 //
 1623 // REQUIRED FUNCTIONALITY
 1624 
 1625 //=============================================================================
 1626 
 1627 // !!!!! Special hack to get all types of calls to specify the byte offset
 1628 //       from the start of the call to the point where the return address
 1629 //       will point.
 1630 
 1631 int MachCallStaticJavaNode::ret_addr_offset()
 1632 {
 1633   // call should be a simple bl
 1634   int off = 4;
 1635   return off;
 1636 }
 1637 
 1638 int MachCallDynamicJavaNode::ret_addr_offset()
 1639 {
 1640   return 16; // movz, movk, movk, bl
 1641 }
 1642 
 1643 int MachCallRuntimeNode::ret_addr_offset() {
 1644   // for generated stubs the call will be
 1645   //   bl(addr)
 1646   // or with far branches
 1647   //   bl(trampoline_stub)
 1648   // for real runtime callouts it will be six instructions
 1649   // see aarch64_enc_java_to_runtime
 1650   //   adr(rscratch2, retaddr)
 1651   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1652   //   lea(rscratch1, RuntimeAddress(addr)
 1653   //   blr(rscratch1)
 1654   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1655   if (cb) {
 1656     return 1 * NativeInstruction::instruction_size;
 1657   } else {
 1658     return 6 * NativeInstruction::instruction_size;
 1659   }
 1660 }
 1661 
 1662 //=============================================================================
 1663 
 1664 #ifndef PRODUCT
 1665 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1666   st->print("BREAKPOINT");
 1667 }
 1668 #endif
 1669 
 1670 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1671   __ brk(0);
 1672 }
 1673 
 1674 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1675   return MachNode::size(ra_);
 1676 }
 1677 
 1678 //=============================================================================
 1679 
 1680 #ifndef PRODUCT
 1681   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1682     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1683   }
 1684 #endif
 1685 
 1686   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1687     for (int i = 0; i < _count; i++) {
 1688       __ nop();
 1689     }
 1690   }
 1691 
 1692   uint MachNopNode::size(PhaseRegAlloc*) const {
 1693     return _count * NativeInstruction::instruction_size;
 1694   }
 1695 
 1696 //=============================================================================
 1697 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1698 
 1699 int ConstantTable::calculate_table_base_offset() const {
 1700   return 0;  // absolute addressing, no offset
 1701 }
 1702 
 1703 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1704 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1705   ShouldNotReachHere();
 1706 }
 1707 
 1708 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1709   // Empty encoding
 1710 }
 1711 
 1712 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1713   return 0;
 1714 }
 1715 
 1716 #ifndef PRODUCT
 1717 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1718   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1719 }
 1720 #endif
 1721 
 1722 #ifndef PRODUCT
 1723 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1724   Compile* C = ra_->C;
 1725 
 1726   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1727 
 1728   if (C->output()->need_stack_bang(framesize))
 1729     st->print("# stack bang size=%d\n\t", framesize);
 1730 
 1731   if (VM_Version::use_rop_protection()) {
 1732     st->print("ldr  zr, [lr]\n\t");
 1733     st->print("paciaz\n\t");
 1734   }
 1735   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1736     st->print("sub  sp, sp, #%d\n\t", framesize);
 1737     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1738     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1739   } else {
 1740     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1741     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1742     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1743     st->print("sub  sp, sp, rscratch1");
 1744   }
 1745   if (C->stub_function() == nullptr) {
 1746     st->print("\n\t");
 1747     st->print("ldr  rscratch1, [guard]\n\t");
 1748     st->print("dmb ishld\n\t");
 1749     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1750     st->print("cmp  rscratch1, rscratch2\n\t");
 1751     st->print("b.eq skip");
 1752     st->print("\n\t");
 1753     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1754     st->print("b skip\n\t");
 1755     st->print("guard: int\n\t");
 1756     st->print("\n\t");
 1757     st->print("skip:\n\t");
 1758   }
 1759 }
 1760 #endif
 1761 
 1762 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1763   Compile* C = ra_->C;
 1764 
 1765   // n.b. frame size includes space for return pc and rfp
 1766   const int framesize = C->output()->frame_size_in_bytes();
 1767 
 1768   // insert a nop at the start of the prolog so we can patch in a
 1769   // branch if we need to invalidate the method later
 1770   __ nop();
 1771 
 1772   if (C->clinit_barrier_on_entry()) {
 1773     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1774 
 1775     Label L_skip_barrier;
 1776 
 1777     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1778     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1779     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1780     __ bind(L_skip_barrier);
 1781   }
 1782 
 1783   if (C->max_vector_size() > 0) {
 1784     __ reinitialize_ptrue();
 1785   }
 1786 
 1787   int bangsize = C->output()->bang_size_in_bytes();
 1788   if (C->output()->need_stack_bang(bangsize))
 1789     __ generate_stack_overflow_check(bangsize);
 1790 
 1791   __ build_frame(framesize);
 1792 
 1793   if (C->stub_function() == nullptr) {
 1794     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1795     // Dummy labels for just measuring the code size
 1796     Label dummy_slow_path;
 1797     Label dummy_continuation;
 1798     Label dummy_guard;
 1799     Label* slow_path = &dummy_slow_path;
 1800     Label* continuation = &dummy_continuation;
 1801     Label* guard = &dummy_guard;
 1802     if (!Compile::current()->output()->in_scratch_emit_size()) {
 1803       // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1804       C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1805       Compile::current()->output()->add_stub(stub);
 1806       slow_path = &stub->entry();
 1807       continuation = &stub->continuation();
 1808       guard = &stub->guard();
 1809     }
 1810     // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1811     bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1812   }
 1813 
 1814   if (VerifyStackAtCalls) {
 1815     Unimplemented();
 1816   }
 1817 
 1818   C->output()->set_frame_complete(__ offset());
 1819 
 1820   if (C->has_mach_constant_base_node()) {
 1821     // NOTE: We set the table base offset here because users might be
 1822     // emitted before MachConstantBaseNode.
 1823     ConstantTable& constant_table = C->output()->constant_table();
 1824     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1825   }
 1826 }
 1827 
 1828 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1829 {
 1830   return MachNode::size(ra_); // too many variables; just compute it
 1831                               // the hard way
 1832 }
 1833 
 1834 int MachPrologNode::reloc() const
 1835 {
 1836   return 0;
 1837 }
 1838 
 1839 //=============================================================================
 1840 
 1841 #ifndef PRODUCT
 1842 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1843   Compile* C = ra_->C;
 1844   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1845 
 1846   st->print("# pop frame %d\n\t",framesize);
 1847 
 1848   if (framesize == 0) {
 1849     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1850   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1851     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1852     st->print("add  sp, sp, #%d\n\t", framesize);
 1853   } else {
 1854     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1855     st->print("add  sp, sp, rscratch1\n\t");
 1856     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1857   }
 1858   if (VM_Version::use_rop_protection()) {
 1859     st->print("autiaz\n\t");
 1860     st->print("ldr  zr, [lr]\n\t");
 1861   }
 1862 
 1863   if (do_polling() && C->is_method_compilation()) {
 1864     st->print("# test polling word\n\t");
 1865     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1866     st->print("cmp  sp, rscratch1\n\t");
 1867     st->print("bhi #slow_path");
 1868   }
 1869 }
 1870 #endif
 1871 
 1872 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1873   Compile* C = ra_->C;
 1874   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1875 
 1876   __ remove_frame(framesize);
 1877 
 1878   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1879     __ reserved_stack_check();
 1880   }
 1881 
 1882   if (do_polling() && C->is_method_compilation()) {
 1883     Label dummy_label;
 1884     Label* code_stub = &dummy_label;
 1885     if (!C->output()->in_scratch_emit_size()) {
 1886       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1887       C->output()->add_stub(stub);
 1888       code_stub = &stub->entry();
 1889     }
 1890     __ relocate(relocInfo::poll_return_type);
 1891     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1892   }
 1893 }
 1894 
 1895 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1896   // Variable size. Determine dynamically.
 1897   return MachNode::size(ra_);
 1898 }
 1899 
 1900 int MachEpilogNode::reloc() const {
 1901   // Return number of relocatable values contained in this instruction.
 1902   return 1; // 1 for polling page.
 1903 }
 1904 
 1905 const Pipeline * MachEpilogNode::pipeline() const {
 1906   return MachNode::pipeline_class();
 1907 }
 1908 
 1909 //=============================================================================
 1910 
 1911 static enum RC rc_class(OptoReg::Name reg) {
 1912 
 1913   if (reg == OptoReg::Bad) {
 1914     return rc_bad;
 1915   }
 1916 
 1917   // we have 32 int registers * 2 halves
 1918   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1919 
 1920   if (reg < slots_of_int_registers) {
 1921     return rc_int;
 1922   }
 1923 
 1924   // we have 32 float register * 8 halves
 1925   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1926   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1927     return rc_float;
 1928   }
 1929 
 1930   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1931   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1932     return rc_predicate;
 1933   }
 1934 
 1935   // Between predicate regs & stack is the flags.
 1936   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1937 
 1938   return rc_stack;
 1939 }
 1940 
 1941 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1942   Compile* C = ra_->C;
 1943 
 1944   // Get registers to move.
 1945   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1946   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1947   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1948   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1949 
 1950   enum RC src_hi_rc = rc_class(src_hi);
 1951   enum RC src_lo_rc = rc_class(src_lo);
 1952   enum RC dst_hi_rc = rc_class(dst_hi);
 1953   enum RC dst_lo_rc = rc_class(dst_lo);
 1954 
 1955   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1956 
 1957   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1958     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1959            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1960            "expected aligned-adjacent pairs");
 1961   }
 1962 
 1963   if (src_lo == dst_lo && src_hi == dst_hi) {
 1964     return 0;            // Self copy, no move.
 1965   }
 1966 
 1967   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1968               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1969   int src_offset = ra_->reg2offset(src_lo);
 1970   int dst_offset = ra_->reg2offset(dst_lo);
 1971 
 1972   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1973     uint ireg = ideal_reg();
 1974     if (ireg == Op_VecA && masm) {
 1975       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1976       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1977         // stack->stack
 1978         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 1979                                                 sve_vector_reg_size_in_bytes);
 1980       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1981         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 1982                             sve_vector_reg_size_in_bytes);
 1983       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 1984         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 1985                               sve_vector_reg_size_in_bytes);
 1986       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1987         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1988                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 1989                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 1990       } else {
 1991         ShouldNotReachHere();
 1992       }
 1993     } else if (masm) {
 1994       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 1995       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 1996       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1997         // stack->stack
 1998         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 1999         if (ireg == Op_VecD) {
 2000           __ unspill(rscratch1, true, src_offset);
 2001           __ spill(rscratch1, true, dst_offset);
 2002         } else {
 2003           __ spill_copy128(src_offset, dst_offset);
 2004         }
 2005       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2006         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2007                ireg == Op_VecD ? __ T8B : __ T16B,
 2008                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2009       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2010         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2011                  ireg == Op_VecD ? __ D : __ Q,
 2012                  ra_->reg2offset(dst_lo));
 2013       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2014         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2015                    ireg == Op_VecD ? __ D : __ Q,
 2016                    ra_->reg2offset(src_lo));
 2017       } else {
 2018         ShouldNotReachHere();
 2019       }
 2020     }
 2021   } else if (masm) {
 2022     switch (src_lo_rc) {
 2023     case rc_int:
 2024       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2025         if (is64) {
 2026             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2027                    as_Register(Matcher::_regEncode[src_lo]));
 2028         } else {
 2029             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2030                     as_Register(Matcher::_regEncode[src_lo]));
 2031         }
 2032       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2033         if (is64) {
 2034             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2035                      as_Register(Matcher::_regEncode[src_lo]));
 2036         } else {
 2037             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2038                      as_Register(Matcher::_regEncode[src_lo]));
 2039         }
 2040       } else {                    // gpr --> stack spill
 2041         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2042         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2043       }
 2044       break;
 2045     case rc_float:
 2046       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2047         if (is64) {
 2048             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2049                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2050         } else {
 2051             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2052                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2053         }
 2054       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2055         if (is64) {
 2056             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2057                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2058         } else {
 2059             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2060                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2061         }
 2062       } else {                    // fpr --> stack spill
 2063         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2064         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2065                  is64 ? __ D : __ S, dst_offset);
 2066       }
 2067       break;
 2068     case rc_stack:
 2069       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2070         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2071       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2072         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2073                    is64 ? __ D : __ S, src_offset);
 2074       } else if (dst_lo_rc == rc_predicate) {
 2075         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2076                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2077       } else {                    // stack --> stack copy
 2078         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2079         if (ideal_reg() == Op_RegVectMask) {
 2080           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2081                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2082         } else {
 2083           __ unspill(rscratch1, is64, src_offset);
 2084           __ spill(rscratch1, is64, dst_offset);
 2085         }
 2086       }
 2087       break;
 2088     case rc_predicate:
 2089       if (dst_lo_rc == rc_predicate) {
 2090         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2091       } else if (dst_lo_rc == rc_stack) {
 2092         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2093                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2094       } else {
 2095         assert(false, "bad src and dst rc_class combination.");
 2096         ShouldNotReachHere();
 2097       }
 2098       break;
 2099     default:
 2100       assert(false, "bad rc_class for spill");
 2101       ShouldNotReachHere();
 2102     }
 2103   }
 2104 
 2105   if (st) {
 2106     st->print("spill ");
 2107     if (src_lo_rc == rc_stack) {
 2108       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2109     } else {
 2110       st->print("%s -> ", Matcher::regName[src_lo]);
 2111     }
 2112     if (dst_lo_rc == rc_stack) {
 2113       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2114     } else {
 2115       st->print("%s", Matcher::regName[dst_lo]);
 2116     }
 2117     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2118       int vsize = 0;
 2119       switch (ideal_reg()) {
 2120       case Op_VecD:
 2121         vsize = 64;
 2122         break;
 2123       case Op_VecX:
 2124         vsize = 128;
 2125         break;
 2126       case Op_VecA:
 2127         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2128         break;
 2129       default:
 2130         assert(false, "bad register type for spill");
 2131         ShouldNotReachHere();
 2132       }
 2133       st->print("\t# vector spill size = %d", vsize);
 2134     } else if (ideal_reg() == Op_RegVectMask) {
 2135       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2136       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2137       st->print("\t# predicate spill size = %d", vsize);
 2138     } else {
 2139       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2140     }
 2141   }
 2142 
 2143   return 0;
 2144 
 2145 }
 2146 
 2147 #ifndef PRODUCT
 2148 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2149   if (!ra_)
 2150     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2151   else
 2152     implementation(nullptr, ra_, false, st);
 2153 }
 2154 #endif
 2155 
 2156 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2157   implementation(masm, ra_, false, nullptr);
 2158 }
 2159 
 2160 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2161   return MachNode::size(ra_);
 2162 }
 2163 
 2164 //=============================================================================
 2165 
 2166 #ifndef PRODUCT
 2167 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2168   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2169   int reg = ra_->get_reg_first(this);
 2170   st->print("add %s, rsp, #%d]\t# box lock",
 2171             Matcher::regName[reg], offset);
 2172 }
 2173 #endif
 2174 
 2175 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2176   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2177   int reg    = ra_->get_encode(this);
 2178 
 2179   // This add will handle any 24-bit signed offset. 24 bits allows an
 2180   // 8 megabyte stack frame.
 2181   __ add(as_Register(reg), sp, offset);
 2182 }
 2183 
 2184 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2185   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2186   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2187 
 2188   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2189     return NativeInstruction::instruction_size;
 2190   } else {
 2191     return 2 * NativeInstruction::instruction_size;
 2192   }
 2193 }
 2194 
 2195 //=============================================================================
 2196 
 2197 #ifndef PRODUCT
 2198 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2199 {
 2200   st->print_cr("# MachUEPNode");
 2201   if (UseCompressedClassPointers) {
 2202     st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2203     st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2204     st->print_cr("\tcmpw rscratch1, r10");
 2205   } else {
 2206     st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2207     st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2208     st->print_cr("\tcmp rscratch1, r10");
 2209   }
 2210   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2211 }
 2212 #endif
 2213 
 2214 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 2215 {
 2216   __ ic_check(InteriorEntryAlignment);
 2217 }
 2218 
 2219 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2220 {
 2221   return MachNode::size(ra_);
 2222 }
 2223 
 2224 // REQUIRED EMIT CODE
 2225 
 2226 //=============================================================================
 2227 
 2228 // Emit exception handler code.
 2229 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 2230 {
 2231   // mov rscratch1 #exception_blob_entry_point
 2232   // br rscratch1
 2233   // Note that the code buffer's insts_mark is always relative to insts.
 2234   // That's why we must use the macroassembler to generate a handler.
 2235   address base = __ start_a_stub(size_exception_handler());
 2236   if (base == nullptr) {
 2237     ciEnv::current()->record_failure("CodeCache is full");
 2238     return 0;  // CodeBuffer::expand failed
 2239   }
 2240   int offset = __ offset();
 2241   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2242   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2243   __ end_a_stub();
 2244   return offset;
 2245 }
 2246 
 2247 // Emit deopt handler code.
 2248 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 2249 {
 2250   // Note that the code buffer's insts_mark is always relative to insts.
 2251   // That's why we must use the macroassembler to generate a handler.
 2252   address base = __ start_a_stub(size_deopt_handler());
 2253   if (base == nullptr) {
 2254     ciEnv::current()->record_failure("CodeCache is full");
 2255     return 0;  // CodeBuffer::expand failed
 2256   }
 2257   int offset = __ offset();
 2258 
 2259   __ adr(lr, __ pc());
 2260   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2261 
 2262   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2263   __ end_a_stub();
 2264   return offset;
 2265 }
 2266 
 2267 // REQUIRED MATCHER CODE
 2268 
 2269 //=============================================================================
 2270 
 2271 bool Matcher::match_rule_supported(int opcode) {
 2272   if (!has_match_rule(opcode))
 2273     return false;
 2274 
 2275   switch (opcode) {
 2276     case Op_OnSpinWait:
 2277       return VM_Version::supports_on_spin_wait();
 2278     case Op_CacheWB:
 2279     case Op_CacheWBPreSync:
 2280     case Op_CacheWBPostSync:
 2281       if (!VM_Version::supports_data_cache_line_flush()) {
 2282         return false;
 2283       }
 2284       break;
 2285     case Op_ExpandBits:
 2286     case Op_CompressBits:
 2287       if (!VM_Version::supports_svebitperm()) {
 2288         return false;
 2289       }
 2290       break;
 2291     case Op_FmaF:
 2292     case Op_FmaD:
 2293     case Op_FmaVF:
 2294     case Op_FmaVD:
 2295       if (!UseFMA) {
 2296         return false;
 2297       }
 2298       break;
 2299   }
 2300 
 2301   return true; // Per default match rules are supported.
 2302 }
 2303 
 2304 const RegMask* Matcher::predicate_reg_mask(void) {
 2305   return &_PR_REG_mask;
 2306 }
 2307 
 2308 bool Matcher::supports_vector_calling_convention(void) {
 2309   return EnableVectorSupport && UseVectorStubs;
 2310 }
 2311 
 2312 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2313   assert(EnableVectorSupport && UseVectorStubs, "sanity");
 2314   int lo = V0_num;
 2315   int hi = V0_H_num;
 2316   if (ideal_reg == Op_VecX || ideal_reg == Op_VecA) {
 2317     hi = V0_K_num;
 2318   }
 2319   return OptoRegPair(hi, lo);
 2320 }
 2321 
 2322 // Is this branch offset short enough that a short branch can be used?
 2323 //
 2324 // NOTE: If the platform does not provide any short branch variants, then
 2325 //       this method should return false for offset 0.
 2326 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2327   // The passed offset is relative to address of the branch.
 2328 
 2329   return (-32768 <= offset && offset < 32768);
 2330 }
 2331 
 2332 // Vector width in bytes.
 2333 int Matcher::vector_width_in_bytes(BasicType bt) {
 2334   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2335   int size = MIN2((UseSVE > 0) ? (int)FloatRegister::sve_vl_max : (int)FloatRegister::neon_vl, (int)MaxVectorSize);
 2336   // Minimum 2 values in vector
 2337   if (size < 2*type2aelembytes(bt)) size = 0;
 2338   // But never < 4
 2339   if (size < 4) size = 0;
 2340   return size;
 2341 }
 2342 
 2343 // Limits on vector size (number of elements) loaded into vector.
 2344 int Matcher::max_vector_size(const BasicType bt) {
 2345   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2346 }
 2347 
 2348 int Matcher::min_vector_size(const BasicType bt) {
 2349   int max_size = max_vector_size(bt);
 2350   // Limit the min vector size to 8 bytes.
 2351   int size = 8 / type2aelembytes(bt);
 2352   if (bt == T_BYTE) {
 2353     // To support vector api shuffle/rearrange.
 2354     size = 4;
 2355   } else if (bt == T_BOOLEAN) {
 2356     // To support vector api load/store mask.
 2357     size = 2;
 2358   }
 2359   if (size < 2) size = 2;
 2360   return MIN2(size, max_size);
 2361 }
 2362 
 2363 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2364   return Matcher::max_vector_size(bt);
 2365 }
 2366 
 2367 // Actual max scalable vector register length.
 2368 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2369   return Matcher::max_vector_size(bt);
 2370 }
 2371 
 2372 // Vector ideal reg.
 2373 uint Matcher::vector_ideal_reg(int len) {
 2374   if (UseSVE > 0 && FloatRegister::neon_vl < len && len <= FloatRegister::sve_vl_max) {
 2375     return Op_VecA;
 2376   }
 2377   switch(len) {
 2378     // For 16-bit/32-bit mask vector, reuse VecD.
 2379     case  2:
 2380     case  4:
 2381     case  8: return Op_VecD;
 2382     case 16: return Op_VecX;
 2383   }
 2384   ShouldNotReachHere();
 2385   return 0;
 2386 }
 2387 
 2388 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2389   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2390   switch (ideal_reg) {
 2391     case Op_VecA: return new vecAOper();
 2392     case Op_VecD: return new vecDOper();
 2393     case Op_VecX: return new vecXOper();
 2394   }
 2395   ShouldNotReachHere();
 2396   return nullptr;
 2397 }
 2398 
 2399 bool Matcher::is_reg2reg_move(MachNode* m) {
 2400   return false;
 2401 }
 2402 
 2403 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2404   return opnd->opcode() == VREG;
 2405 }
 2406 
 2407 // Return whether or not this register is ever used as an argument.
 2408 // This function is used on startup to build the trampoline stubs in
 2409 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2410 // call in the trampoline, and arguments in those registers not be
 2411 // available to the callee.
 2412 bool Matcher::can_be_java_arg(int reg)
 2413 {
 2414   return
 2415     reg ==  R0_num || reg == R0_H_num ||
 2416     reg ==  R1_num || reg == R1_H_num ||
 2417     reg ==  R2_num || reg == R2_H_num ||
 2418     reg ==  R3_num || reg == R3_H_num ||
 2419     reg ==  R4_num || reg == R4_H_num ||
 2420     reg ==  R5_num || reg == R5_H_num ||
 2421     reg ==  R6_num || reg == R6_H_num ||
 2422     reg ==  R7_num || reg == R7_H_num ||
 2423     reg ==  V0_num || reg == V0_H_num ||
 2424     reg ==  V1_num || reg == V1_H_num ||
 2425     reg ==  V2_num || reg == V2_H_num ||
 2426     reg ==  V3_num || reg == V3_H_num ||
 2427     reg ==  V4_num || reg == V4_H_num ||
 2428     reg ==  V5_num || reg == V5_H_num ||
 2429     reg ==  V6_num || reg == V6_H_num ||
 2430     reg ==  V7_num || reg == V7_H_num;
 2431 }
 2432 
 2433 bool Matcher::is_spillable_arg(int reg)
 2434 {
 2435   return can_be_java_arg(reg);
 2436 }
 2437 
 2438 uint Matcher::int_pressure_limit()
 2439 {
 2440   // JDK-8183543: When taking the number of available registers as int
 2441   // register pressure threshold, the jtreg test:
 2442   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2443   // failed due to C2 compilation failure with
 2444   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2445   //
 2446   // A derived pointer is live at CallNode and then is flagged by RA
 2447   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2448   // derived pointers and lastly fail to spill after reaching maximum
 2449   // number of iterations. Lowering the default pressure threshold to
 2450   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2451   // a high register pressure area of the code so that split_DEF can
 2452   // generate DefinitionSpillCopy for the derived pointer.
 2453   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2454   if (!PreserveFramePointer) {
 2455     // When PreserveFramePointer is off, frame pointer is allocatable,
 2456     // but different from other SOC registers, it is excluded from
 2457     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2458     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2459     // See check_pressure_at_fatproj().
 2460     default_int_pressure_threshold--;
 2461   }
 2462   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2463 }
 2464 
 2465 uint Matcher::float_pressure_limit()
 2466 {
 2467   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2468   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2469 }
 2470 
 2471 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2472   return false;
 2473 }
 2474 
 2475 RegMask Matcher::divI_proj_mask() {
 2476   ShouldNotReachHere();
 2477   return RegMask();
 2478 }
 2479 
 2480 // Register for MODI projection of divmodI.
 2481 RegMask Matcher::modI_proj_mask() {
 2482   ShouldNotReachHere();
 2483   return RegMask();
 2484 }
 2485 
 2486 // Register for DIVL projection of divmodL.
 2487 RegMask Matcher::divL_proj_mask() {
 2488   ShouldNotReachHere();
 2489   return RegMask();
 2490 }
 2491 
 2492 // Register for MODL projection of divmodL.
 2493 RegMask Matcher::modL_proj_mask() {
 2494   ShouldNotReachHere();
 2495   return RegMask();
 2496 }
 2497 
 2498 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2499   return FP_REG_mask();
 2500 }
 2501 
 2502 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2503   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2504     Node* u = addp->fast_out(i);
 2505     if (u->is_LoadStore()) {
 2506       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2507       // instructions) only take register indirect as an operand, so
 2508       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2509       // must fail.
 2510       return false;
 2511     }
 2512     if (u->is_Mem()) {
 2513       int opsize = u->as_Mem()->memory_size();
 2514       assert(opsize > 0, "unexpected memory operand size");
 2515       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2516         return false;
 2517       }
 2518     }
 2519   }
 2520   return true;
 2521 }
 2522 
 2523 // Convert BootTest condition to Assembler condition.
 2524 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2525 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2526   Assembler::Condition result;
 2527   switch(cond) {
 2528     case BoolTest::eq:
 2529       result = Assembler::EQ; break;
 2530     case BoolTest::ne:
 2531       result = Assembler::NE; break;
 2532     case BoolTest::le:
 2533       result = Assembler::LE; break;
 2534     case BoolTest::ge:
 2535       result = Assembler::GE; break;
 2536     case BoolTest::lt:
 2537       result = Assembler::LT; break;
 2538     case BoolTest::gt:
 2539       result = Assembler::GT; break;
 2540     case BoolTest::ule:
 2541       result = Assembler::LS; break;
 2542     case BoolTest::uge:
 2543       result = Assembler::HS; break;
 2544     case BoolTest::ult:
 2545       result = Assembler::LO; break;
 2546     case BoolTest::ugt:
 2547       result = Assembler::HI; break;
 2548     case BoolTest::overflow:
 2549       result = Assembler::VS; break;
 2550     case BoolTest::no_overflow:
 2551       result = Assembler::VC; break;
 2552     default:
 2553       ShouldNotReachHere();
 2554       return Assembler::Condition(-1);
 2555   }
 2556 
 2557   // Check conversion
 2558   if (cond & BoolTest::unsigned_compare) {
 2559     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2560   } else {
 2561     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2562   }
 2563 
 2564   return result;
 2565 }
 2566 
 2567 // Binary src (Replicate con)
 2568 static bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2569   if (n == nullptr || m == nullptr) {
 2570     return false;
 2571   }
 2572 
 2573   if (UseSVE == 0 || m->Opcode() != Op_Replicate) {
 2574     return false;
 2575   }
 2576 
 2577   Node* imm_node = m->in(1);
 2578   if (!imm_node->is_Con()) {
 2579     return false;
 2580   }
 2581 
 2582   const Type* t = imm_node->bottom_type();
 2583   if (!(t->isa_int() || t->isa_long())) {
 2584     return false;
 2585   }
 2586 
 2587   switch (n->Opcode()) {
 2588   case Op_AndV:
 2589   case Op_OrV:
 2590   case Op_XorV: {
 2591     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2592     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2593     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2594   }
 2595   case Op_AddVB:
 2596     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2597   case Op_AddVS:
 2598   case Op_AddVI:
 2599     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2600   case Op_AddVL:
 2601     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2602   default:
 2603     return false;
 2604   }
 2605 }
 2606 
 2607 // (XorV src (Replicate m1))
 2608 // (XorVMask src (MaskAll m1))
 2609 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2610   if (n != nullptr && m != nullptr) {
 2611     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2612            VectorNode::is_all_ones_vector(m);
 2613   }
 2614   return false;
 2615 }
 2616 
 2617 // Should the matcher clone input 'm' of node 'n'?
 2618 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2619   if (is_vshift_con_pattern(n, m) ||
 2620       is_vector_bitwise_not_pattern(n, m) ||
 2621       is_valid_sve_arith_imm_pattern(n, m) ||
 2622       is_encode_and_store_pattern(n, m)) {
 2623     mstack.push(m, Visit);
 2624     return true;
 2625   }
 2626   return false;
 2627 }
 2628 
 2629 // Should the Matcher clone shifts on addressing modes, expecting them
 2630 // to be subsumed into complex addressing expressions or compute them
 2631 // into registers?
 2632 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2633 
 2634   // Loads and stores with indirect memory input (e.g., volatile loads and
 2635   // stores) do not subsume the input into complex addressing expressions. If
 2636   // the addressing expression is input to at least one such load or store, do
 2637   // not clone the addressing expression. Query needs_acquiring_load and
 2638   // needs_releasing_store as a proxy for indirect memory input, as it is not
 2639   // possible to directly query for indirect memory input at this stage.
 2640   for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
 2641     Node* n = m->fast_out(i);
 2642     if (n->is_Load() && needs_acquiring_load(n)) {
 2643       return false;
 2644     }
 2645     if (n->is_Store() && needs_releasing_store(n)) {
 2646       return false;
 2647     }
 2648   }
 2649 
 2650   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2651     return true;
 2652   }
 2653 
 2654   Node *off = m->in(AddPNode::Offset);
 2655   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2656       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2657       // Are there other uses besides address expressions?
 2658       !is_visited(off)) {
 2659     address_visited.set(off->_idx); // Flag as address_visited
 2660     mstack.push(off->in(2), Visit);
 2661     Node *conv = off->in(1);
 2662     if (conv->Opcode() == Op_ConvI2L &&
 2663         // Are there other uses besides address expressions?
 2664         !is_visited(conv)) {
 2665       address_visited.set(conv->_idx); // Flag as address_visited
 2666       mstack.push(conv->in(1), Pre_Visit);
 2667     } else {
 2668       mstack.push(conv, Pre_Visit);
 2669     }
 2670     address_visited.test_set(m->_idx); // Flag as address_visited
 2671     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2672     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2673     return true;
 2674   } else if (off->Opcode() == Op_ConvI2L &&
 2675              // Are there other uses besides address expressions?
 2676              !is_visited(off)) {
 2677     address_visited.test_set(m->_idx); // Flag as address_visited
 2678     address_visited.set(off->_idx); // Flag as address_visited
 2679     mstack.push(off->in(1), Pre_Visit);
 2680     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2681     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2682     return true;
 2683   }
 2684   return false;
 2685 }
 2686 
 2687 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2688   {                                                                     \
 2689     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2690     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2691     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2692     __ INSN(REG, as_Register(BASE));                                    \
 2693   }
 2694 
 2695 
 2696 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2697   {
 2698     Address::extend scale;
 2699 
 2700     // Hooboy, this is fugly.  We need a way to communicate to the
 2701     // encoder that the index needs to be sign extended, so we have to
 2702     // enumerate all the cases.
 2703     switch (opcode) {
 2704     case INDINDEXSCALEDI2L:
 2705     case INDINDEXSCALEDI2LN:
 2706     case INDINDEXI2L:
 2707     case INDINDEXI2LN:
 2708       scale = Address::sxtw(size);
 2709       break;
 2710     default:
 2711       scale = Address::lsl(size);
 2712     }
 2713 
 2714     if (index == -1) {
 2715       return Address(base, disp);
 2716     } else {
 2717       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2718       return Address(base, as_Register(index), scale);
 2719     }
 2720   }
 2721 
 2722 
 2723 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2724 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2725 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2726 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2727                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2728 
 2729   // Used for all non-volatile memory accesses.  The use of
 2730   // $mem->opcode() to discover whether this pattern uses sign-extended
 2731   // offsets is something of a kludge.
 2732   static void loadStore(C2_MacroAssembler* masm, mem_insn insn,
 2733                         Register reg, int opcode,
 2734                         Register base, int index, int scale, int disp,
 2735                         int size_in_memory)
 2736   {
 2737     Address addr = mem2address(opcode, base, index, scale, disp);
 2738     if (addr.getMode() == Address::base_plus_offset) {
 2739       /* Fix up any out-of-range offsets. */
 2740       assert_different_registers(rscratch1, base);
 2741       assert_different_registers(rscratch1, reg);
 2742       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2743     }
 2744     (masm->*insn)(reg, addr);
 2745   }
 2746 
 2747   static void loadStore(C2_MacroAssembler* masm, mem_float_insn insn,
 2748                         FloatRegister reg, int opcode,
 2749                         Register base, int index, int size, int disp,
 2750                         int size_in_memory)
 2751   {
 2752     Address::extend scale;
 2753 
 2754     switch (opcode) {
 2755     case INDINDEXSCALEDI2L:
 2756     case INDINDEXSCALEDI2LN:
 2757       scale = Address::sxtw(size);
 2758       break;
 2759     default:
 2760       scale = Address::lsl(size);
 2761     }
 2762 
 2763     if (index == -1) {
 2764       // Fix up any out-of-range offsets.
 2765       assert_different_registers(rscratch1, base);
 2766       Address addr = Address(base, disp);
 2767       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2768       (masm->*insn)(reg, addr);
 2769     } else {
 2770       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2771       (masm->*insn)(reg, Address(base, as_Register(index), scale));
 2772     }
 2773   }
 2774 
 2775   static void loadStore(C2_MacroAssembler* masm, mem_vector_insn insn,
 2776                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2777                         int opcode, Register base, int index, int size, int disp)
 2778   {
 2779     if (index == -1) {
 2780       (masm->*insn)(reg, T, Address(base, disp));
 2781     } else {
 2782       assert(disp == 0, "unsupported address mode");
 2783       (masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2784     }
 2785   }
 2786 
 2787 %}
 2788 
 2789 
 2790 
 2791 //----------ENCODING BLOCK-----------------------------------------------------
 2792 // This block specifies the encoding classes used by the compiler to
 2793 // output byte streams.  Encoding classes are parameterized macros
 2794 // used by Machine Instruction Nodes in order to generate the bit
 2795 // encoding of the instruction.  Operands specify their base encoding
 2796 // interface with the interface keyword.  There are currently
 2797 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2798 // COND_INTER.  REG_INTER causes an operand to generate a function
 2799 // which returns its register number when queried.  CONST_INTER causes
 2800 // an operand to generate a function which returns the value of the
 2801 // constant when queried.  MEMORY_INTER causes an operand to generate
 2802 // four functions which return the Base Register, the Index Register,
 2803 // the Scale Value, and the Offset Value of the operand when queried.
 2804 // COND_INTER causes an operand to generate six functions which return
 2805 // the encoding code (ie - encoding bits for the instruction)
 2806 // associated with each basic boolean condition for a conditional
 2807 // instruction.
 2808 //
 2809 // Instructions specify two basic values for encoding.  Again, a
 2810 // function is available to check if the constant displacement is an
 2811 // oop. They use the ins_encode keyword to specify their encoding
 2812 // classes (which must be a sequence of enc_class names, and their
 2813 // parameters, specified in the encoding block), and they use the
 2814 // opcode keyword to specify, in order, their primary, secondary, and
 2815 // tertiary opcode.  Only the opcode sections which a particular
 2816 // instruction needs for encoding need to be specified.
 2817 encode %{
 2818   // Build emit functions for each basic byte or larger field in the
 2819   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2820   // from C++ code in the enc_class source block.  Emit functions will
 2821   // live in the main source block for now.  In future, we can
 2822   // generalize this by adding a syntax that specifies the sizes of
 2823   // fields in an order, so that the adlc can build the emit functions
 2824   // automagically
 2825 
 2826   // catch all for unimplemented encodings
 2827   enc_class enc_unimplemented %{
 2828     __ unimplemented("C2 catch all");
 2829   %}
 2830 
 2831   // BEGIN Non-volatile memory access
 2832 
 2833   // This encoding class is generated automatically from ad_encode.m4.
 2834   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2835   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2836     Register dst_reg = as_Register($dst$$reg);
 2837     loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2838                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2839   %}
 2840 
 2841   // This encoding class is generated automatically from ad_encode.m4.
 2842   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2843   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2844     Register dst_reg = as_Register($dst$$reg);
 2845     loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2846                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2847   %}
 2848 
 2849   // This encoding class is generated automatically from ad_encode.m4.
 2850   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2851   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2852     Register dst_reg = as_Register($dst$$reg);
 2853     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2854                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2855   %}
 2856 
 2857   // This encoding class is generated automatically from ad_encode.m4.
 2858   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2859   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2860     Register dst_reg = as_Register($dst$$reg);
 2861     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2862                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2863   %}
 2864 
 2865   // This encoding class is generated automatically from ad_encode.m4.
 2866   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2867   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2868     Register dst_reg = as_Register($dst$$reg);
 2869     loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2870                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2871   %}
 2872 
 2873   // This encoding class is generated automatically from ad_encode.m4.
 2874   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2875   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2876     Register dst_reg = as_Register($dst$$reg);
 2877     loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2878                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2879   %}
 2880 
 2881   // This encoding class is generated automatically from ad_encode.m4.
 2882   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2883   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2884     Register dst_reg = as_Register($dst$$reg);
 2885     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2886                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2887   %}
 2888 
 2889   // This encoding class is generated automatically from ad_encode.m4.
 2890   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2891   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2892     Register dst_reg = as_Register($dst$$reg);
 2893     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2894                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2895   %}
 2896 
 2897   // This encoding class is generated automatically from ad_encode.m4.
 2898   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2899   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2900     Register dst_reg = as_Register($dst$$reg);
 2901     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2902                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2903   %}
 2904 
 2905   // This encoding class is generated automatically from ad_encode.m4.
 2906   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2907   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2908     Register dst_reg = as_Register($dst$$reg);
 2909     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2910                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2911   %}
 2912 
 2913   // This encoding class is generated automatically from ad_encode.m4.
 2914   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2915   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2916     Register dst_reg = as_Register($dst$$reg);
 2917     loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2918                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2919   %}
 2920 
 2921   // This encoding class is generated automatically from ad_encode.m4.
 2922   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2923   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2924     Register dst_reg = as_Register($dst$$reg);
 2925     loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2926                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2927   %}
 2928 
 2929   // This encoding class is generated automatically from ad_encode.m4.
 2930   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2931   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2932     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2933     loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2934                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2935   %}
 2936 
 2937   // This encoding class is generated automatically from ad_encode.m4.
 2938   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2939   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2940     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2941     loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2942                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2943   %}
 2944 
 2945   // This encoding class is generated automatically from ad_encode.m4.
 2946   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2947   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2948     Register src_reg = as_Register($src$$reg);
 2949     loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
 2950                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2951   %}
 2952 
 2953   // This encoding class is generated automatically from ad_encode.m4.
 2954   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2955   enc_class aarch64_enc_strb0(memory1 mem) %{
 2956     loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2957                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2958   %}
 2959 
 2960   // This encoding class is generated automatically from ad_encode.m4.
 2961   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2962   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2963     Register src_reg = as_Register($src$$reg);
 2964     loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
 2965                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2966   %}
 2967 
 2968   // This encoding class is generated automatically from ad_encode.m4.
 2969   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2970   enc_class aarch64_enc_strh0(memory2 mem) %{
 2971     loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2972                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2973   %}
 2974 
 2975   // This encoding class is generated automatically from ad_encode.m4.
 2976   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2977   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 2978     Register src_reg = as_Register($src$$reg);
 2979     loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
 2980                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2981   %}
 2982 
 2983   // This encoding class is generated automatically from ad_encode.m4.
 2984   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2985   enc_class aarch64_enc_strw0(memory4 mem) %{
 2986     loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
 2987                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2988   %}
 2989 
 2990   // This encoding class is generated automatically from ad_encode.m4.
 2991   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2992   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 2993     Register src_reg = as_Register($src$$reg);
 2994     // we sometimes get asked to store the stack pointer into the
 2995     // current thread -- we cannot do that directly on AArch64
 2996     if (src_reg == r31_sp) {
 2997       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 2998       __ mov(rscratch2, sp);
 2999       src_reg = rscratch2;
 3000     }
 3001     loadStore(masm, &MacroAssembler::str, src_reg, $mem->opcode(),
 3002                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3003   %}
 3004 
 3005   // This encoding class is generated automatically from ad_encode.m4.
 3006   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3007   enc_class aarch64_enc_str0(memory8 mem) %{
 3008     loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
 3009                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3010   %}
 3011 
 3012   // This encoding class is generated automatically from ad_encode.m4.
 3013   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3014   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3015     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3016     loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
 3017                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3018   %}
 3019 
 3020   // This encoding class is generated automatically from ad_encode.m4.
 3021   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3022   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3023     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3024     loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
 3025                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3026   %}
 3027 
 3028   // This encoding class is generated automatically from ad_encode.m4.
 3029   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3030   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3031       __ membar(Assembler::StoreStore);
 3032       loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3033                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3034   %}
 3035 
 3036   // END Non-volatile memory access
 3037 
 3038   // Vector loads and stores
 3039   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3040     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3041     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3042        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3043   %}
 3044 
 3045   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3046     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3047     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3048        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3049   %}
 3050 
 3051   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3052     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3053     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3054        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3055   %}
 3056 
 3057   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3058     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3059     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3060        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3061   %}
 3062 
 3063   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3064     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3065     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::H,
 3066        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3067   %}
 3068 
 3069   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3070     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3071     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::S,
 3072        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3073   %}
 3074 
 3075   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3076     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3077     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::D,
 3078        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3079   %}
 3080 
 3081   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3082     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3083     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3084        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3085   %}
 3086 
 3087   // volatile loads and stores
 3088 
 3089   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3090     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3091                  rscratch1, stlrb);
 3092   %}
 3093 
 3094   enc_class aarch64_enc_stlrb0(memory mem) %{
 3095     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3096                  rscratch1, stlrb);
 3097   %}
 3098 
 3099   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3100     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3101                  rscratch1, stlrh);
 3102   %}
 3103 
 3104   enc_class aarch64_enc_stlrh0(memory mem) %{
 3105     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3106                  rscratch1, stlrh);
 3107   %}
 3108 
 3109   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3110     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3111                  rscratch1, stlrw);
 3112   %}
 3113 
 3114   enc_class aarch64_enc_stlrw0(memory mem) %{
 3115     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3116                  rscratch1, stlrw);
 3117   %}
 3118 
 3119   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3120     Register dst_reg = as_Register($dst$$reg);
 3121     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3122              rscratch1, ldarb);
 3123     __ sxtbw(dst_reg, dst_reg);
 3124   %}
 3125 
 3126   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3127     Register dst_reg = as_Register($dst$$reg);
 3128     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3129              rscratch1, ldarb);
 3130     __ sxtb(dst_reg, dst_reg);
 3131   %}
 3132 
 3133   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3134     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3135              rscratch1, ldarb);
 3136   %}
 3137 
 3138   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3139     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3140              rscratch1, ldarb);
 3141   %}
 3142 
 3143   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3144     Register dst_reg = as_Register($dst$$reg);
 3145     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3146              rscratch1, ldarh);
 3147     __ sxthw(dst_reg, dst_reg);
 3148   %}
 3149 
 3150   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3151     Register dst_reg = as_Register($dst$$reg);
 3152     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3153              rscratch1, ldarh);
 3154     __ sxth(dst_reg, dst_reg);
 3155   %}
 3156 
 3157   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3158     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3159              rscratch1, ldarh);
 3160   %}
 3161 
 3162   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3163     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3164              rscratch1, ldarh);
 3165   %}
 3166 
 3167   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3168     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3169              rscratch1, ldarw);
 3170   %}
 3171 
 3172   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3173     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3174              rscratch1, ldarw);
 3175   %}
 3176 
 3177   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3178     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3179              rscratch1, ldar);
 3180   %}
 3181 
 3182   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3183     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3184              rscratch1, ldarw);
 3185     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3186   %}
 3187 
 3188   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3189     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3190              rscratch1, ldar);
 3191     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3192   %}
 3193 
 3194   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3195     Register src_reg = as_Register($src$$reg);
 3196     // we sometimes get asked to store the stack pointer into the
 3197     // current thread -- we cannot do that directly on AArch64
 3198     if (src_reg == r31_sp) {
 3199       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3200       __ mov(rscratch2, sp);
 3201       src_reg = rscratch2;
 3202     }
 3203     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3204                  rscratch1, stlr);
 3205   %}
 3206 
 3207   enc_class aarch64_enc_stlr0(memory mem) %{
 3208     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3209                  rscratch1, stlr);
 3210   %}
 3211 
 3212   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3213     {
 3214       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3215       __ fmovs(rscratch2, src_reg);
 3216     }
 3217     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3218                  rscratch1, stlrw);
 3219   %}
 3220 
 3221   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3222     {
 3223       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3224       __ fmovd(rscratch2, src_reg);
 3225     }
 3226     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3227                  rscratch1, stlr);
 3228   %}
 3229 
 3230   // synchronized read/update encodings
 3231 
 3232   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3233     Register dst_reg = as_Register($dst$$reg);
 3234     Register base = as_Register($mem$$base);
 3235     int index = $mem$$index;
 3236     int scale = $mem$$scale;
 3237     int disp = $mem$$disp;
 3238     if (index == -1) {
 3239        if (disp != 0) {
 3240         __ lea(rscratch1, Address(base, disp));
 3241         __ ldaxr(dst_reg, rscratch1);
 3242       } else {
 3243         // TODO
 3244         // should we ever get anything other than this case?
 3245         __ ldaxr(dst_reg, base);
 3246       }
 3247     } else {
 3248       Register index_reg = as_Register(index);
 3249       if (disp == 0) {
 3250         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3251         __ ldaxr(dst_reg, rscratch1);
 3252       } else {
 3253         __ lea(rscratch1, Address(base, disp));
 3254         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3255         __ ldaxr(dst_reg, rscratch1);
 3256       }
 3257     }
 3258   %}
 3259 
 3260   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3261     Register src_reg = as_Register($src$$reg);
 3262     Register base = as_Register($mem$$base);
 3263     int index = $mem$$index;
 3264     int scale = $mem$$scale;
 3265     int disp = $mem$$disp;
 3266     if (index == -1) {
 3267        if (disp != 0) {
 3268         __ lea(rscratch2, Address(base, disp));
 3269         __ stlxr(rscratch1, src_reg, rscratch2);
 3270       } else {
 3271         // TODO
 3272         // should we ever get anything other than this case?
 3273         __ stlxr(rscratch1, src_reg, base);
 3274       }
 3275     } else {
 3276       Register index_reg = as_Register(index);
 3277       if (disp == 0) {
 3278         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3279         __ stlxr(rscratch1, src_reg, rscratch2);
 3280       } else {
 3281         __ lea(rscratch2, Address(base, disp));
 3282         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3283         __ stlxr(rscratch1, src_reg, rscratch2);
 3284       }
 3285     }
 3286     __ cmpw(rscratch1, zr);
 3287   %}
 3288 
 3289   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3290     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3291     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3292                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3293                /*weak*/ false, noreg);
 3294   %}
 3295 
 3296   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3297     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3298     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3299                Assembler::word, /*acquire*/ false, /*release*/ true,
 3300                /*weak*/ false, noreg);
 3301   %}
 3302 
 3303   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3304     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3305     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3306                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3307                /*weak*/ false, noreg);
 3308   %}
 3309 
 3310   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3311     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3312     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3313                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3314                /*weak*/ false, noreg);
 3315   %}
 3316 
 3317 
 3318   // The only difference between aarch64_enc_cmpxchg and
 3319   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3320   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3321   // lock.
 3322   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3323     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3324     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3325                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3326                /*weak*/ false, noreg);
 3327   %}
 3328 
 3329   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3330     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3331     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3332                Assembler::word, /*acquire*/ true, /*release*/ true,
 3333                /*weak*/ false, noreg);
 3334   %}
 3335 
 3336   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3337     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3338     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3339                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3340                /*weak*/ false, noreg);
 3341   %}
 3342 
 3343   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3344     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3345     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3346                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3347                /*weak*/ false, noreg);
 3348   %}
 3349 
 3350   // auxiliary used for CompareAndSwapX to set result register
 3351   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3352     Register res_reg = as_Register($res$$reg);
 3353     __ cset(res_reg, Assembler::EQ);
 3354   %}
 3355 
 3356   // prefetch encodings
 3357 
 3358   enc_class aarch64_enc_prefetchw(memory mem) %{
 3359     Register base = as_Register($mem$$base);
 3360     int index = $mem$$index;
 3361     int scale = $mem$$scale;
 3362     int disp = $mem$$disp;
 3363     if (index == -1) {
 3364       // Fix up any out-of-range offsets.
 3365       assert_different_registers(rscratch1, base);
 3366       Address addr = Address(base, disp);
 3367       addr = __ legitimize_address(addr, 8, rscratch1);
 3368       __ prfm(addr, PSTL1KEEP);
 3369     } else {
 3370       Register index_reg = as_Register(index);
 3371       if (disp == 0) {
 3372         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3373       } else {
 3374         __ lea(rscratch1, Address(base, disp));
 3375 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3376       }
 3377     }
 3378   %}
 3379 
 3380   // mov encodings
 3381 
 3382   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3383     uint32_t con = (uint32_t)$src$$constant;
 3384     Register dst_reg = as_Register($dst$$reg);
 3385     if (con == 0) {
 3386       __ movw(dst_reg, zr);
 3387     } else {
 3388       __ movw(dst_reg, con);
 3389     }
 3390   %}
 3391 
 3392   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3393     Register dst_reg = as_Register($dst$$reg);
 3394     uint64_t con = (uint64_t)$src$$constant;
 3395     if (con == 0) {
 3396       __ mov(dst_reg, zr);
 3397     } else {
 3398       __ mov(dst_reg, con);
 3399     }
 3400   %}
 3401 
 3402   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3403     Register dst_reg = as_Register($dst$$reg);
 3404     address con = (address)$src$$constant;
 3405     if (con == nullptr || con == (address)1) {
 3406       ShouldNotReachHere();
 3407     } else {
 3408       relocInfo::relocType rtype = $src->constant_reloc();
 3409       if (rtype == relocInfo::oop_type) {
 3410         __ movoop(dst_reg, (jobject)con);
 3411       } else if (rtype == relocInfo::metadata_type) {
 3412         __ mov_metadata(dst_reg, (Metadata*)con);
 3413       } else {
 3414         assert(rtype == relocInfo::none, "unexpected reloc type");
 3415         if (! __ is_valid_AArch64_address(con) ||
 3416             con < (address)(uintptr_t)os::vm_page_size()) {
 3417           __ mov(dst_reg, con);
 3418         } else {
 3419           uint64_t offset;
 3420           __ adrp(dst_reg, con, offset);
 3421           __ add(dst_reg, dst_reg, offset);
 3422         }
 3423       }
 3424     }
 3425   %}
 3426 
 3427   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3428     Register dst_reg = as_Register($dst$$reg);
 3429     __ mov(dst_reg, zr);
 3430   %}
 3431 
 3432   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3433     Register dst_reg = as_Register($dst$$reg);
 3434     __ mov(dst_reg, (uint64_t)1);
 3435   %}
 3436 
 3437   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3438     __ load_byte_map_base($dst$$Register);
 3439   %}
 3440 
 3441   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3442     Register dst_reg = as_Register($dst$$reg);
 3443     address con = (address)$src$$constant;
 3444     if (con == nullptr) {
 3445       ShouldNotReachHere();
 3446     } else {
 3447       relocInfo::relocType rtype = $src->constant_reloc();
 3448       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3449       __ set_narrow_oop(dst_reg, (jobject)con);
 3450     }
 3451   %}
 3452 
 3453   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3454     Register dst_reg = as_Register($dst$$reg);
 3455     __ mov(dst_reg, zr);
 3456   %}
 3457 
 3458   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3459     Register dst_reg = as_Register($dst$$reg);
 3460     address con = (address)$src$$constant;
 3461     if (con == nullptr) {
 3462       ShouldNotReachHere();
 3463     } else {
 3464       relocInfo::relocType rtype = $src->constant_reloc();
 3465       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3466       __ set_narrow_klass(dst_reg, (Klass *)con);
 3467     }
 3468   %}
 3469 
 3470   // arithmetic encodings
 3471 
 3472   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3473     Register dst_reg = as_Register($dst$$reg);
 3474     Register src_reg = as_Register($src1$$reg);
 3475     int32_t con = (int32_t)$src2$$constant;
 3476     // add has primary == 0, subtract has primary == 1
 3477     if ($primary) { con = -con; }
 3478     if (con < 0) {
 3479       __ subw(dst_reg, src_reg, -con);
 3480     } else {
 3481       __ addw(dst_reg, src_reg, con);
 3482     }
 3483   %}
 3484 
 3485   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3486     Register dst_reg = as_Register($dst$$reg);
 3487     Register src_reg = as_Register($src1$$reg);
 3488     int32_t con = (int32_t)$src2$$constant;
 3489     // add has primary == 0, subtract has primary == 1
 3490     if ($primary) { con = -con; }
 3491     if (con < 0) {
 3492       __ sub(dst_reg, src_reg, -con);
 3493     } else {
 3494       __ add(dst_reg, src_reg, con);
 3495     }
 3496   %}
 3497 
 3498   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3499    Register dst_reg = as_Register($dst$$reg);
 3500    Register src1_reg = as_Register($src1$$reg);
 3501    Register src2_reg = as_Register($src2$$reg);
 3502     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3503   %}
 3504 
 3505   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3506    Register dst_reg = as_Register($dst$$reg);
 3507    Register src1_reg = as_Register($src1$$reg);
 3508    Register src2_reg = as_Register($src2$$reg);
 3509     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3510   %}
 3511 
 3512   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3513    Register dst_reg = as_Register($dst$$reg);
 3514    Register src1_reg = as_Register($src1$$reg);
 3515    Register src2_reg = as_Register($src2$$reg);
 3516     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3517   %}
 3518 
 3519   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3520    Register dst_reg = as_Register($dst$$reg);
 3521    Register src1_reg = as_Register($src1$$reg);
 3522    Register src2_reg = as_Register($src2$$reg);
 3523     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3524   %}
 3525 
 3526   // compare instruction encodings
 3527 
 3528   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3529     Register reg1 = as_Register($src1$$reg);
 3530     Register reg2 = as_Register($src2$$reg);
 3531     __ cmpw(reg1, reg2);
 3532   %}
 3533 
 3534   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3535     Register reg = as_Register($src1$$reg);
 3536     int32_t val = $src2$$constant;
 3537     if (val >= 0) {
 3538       __ subsw(zr, reg, val);
 3539     } else {
 3540       __ addsw(zr, reg, -val);
 3541     }
 3542   %}
 3543 
 3544   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3545     Register reg1 = as_Register($src1$$reg);
 3546     uint32_t val = (uint32_t)$src2$$constant;
 3547     __ movw(rscratch1, val);
 3548     __ cmpw(reg1, rscratch1);
 3549   %}
 3550 
 3551   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3552     Register reg1 = as_Register($src1$$reg);
 3553     Register reg2 = as_Register($src2$$reg);
 3554     __ cmp(reg1, reg2);
 3555   %}
 3556 
 3557   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3558     Register reg = as_Register($src1$$reg);
 3559     int64_t val = $src2$$constant;
 3560     if (val >= 0) {
 3561       __ subs(zr, reg, val);
 3562     } else if (val != -val) {
 3563       __ adds(zr, reg, -val);
 3564     } else {
 3565     // aargh, Long.MIN_VALUE is a special case
 3566       __ orr(rscratch1, zr, (uint64_t)val);
 3567       __ subs(zr, reg, rscratch1);
 3568     }
 3569   %}
 3570 
 3571   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3572     Register reg1 = as_Register($src1$$reg);
 3573     uint64_t val = (uint64_t)$src2$$constant;
 3574     __ mov(rscratch1, val);
 3575     __ cmp(reg1, rscratch1);
 3576   %}
 3577 
 3578   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3579     Register reg1 = as_Register($src1$$reg);
 3580     Register reg2 = as_Register($src2$$reg);
 3581     __ cmp(reg1, reg2);
 3582   %}
 3583 
 3584   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3585     Register reg1 = as_Register($src1$$reg);
 3586     Register reg2 = as_Register($src2$$reg);
 3587     __ cmpw(reg1, reg2);
 3588   %}
 3589 
 3590   enc_class aarch64_enc_testp(iRegP src) %{
 3591     Register reg = as_Register($src$$reg);
 3592     __ cmp(reg, zr);
 3593   %}
 3594 
 3595   enc_class aarch64_enc_testn(iRegN src) %{
 3596     Register reg = as_Register($src$$reg);
 3597     __ cmpw(reg, zr);
 3598   %}
 3599 
 3600   enc_class aarch64_enc_b(label lbl) %{
 3601     Label *L = $lbl$$label;
 3602     __ b(*L);
 3603   %}
 3604 
 3605   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3606     Label *L = $lbl$$label;
 3607     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3608   %}
 3609 
 3610   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3611     Label *L = $lbl$$label;
 3612     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3613   %}
 3614 
 3615   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3616   %{
 3617      Register sub_reg = as_Register($sub$$reg);
 3618      Register super_reg = as_Register($super$$reg);
 3619      Register temp_reg = as_Register($temp$$reg);
 3620      Register result_reg = as_Register($result$$reg);
 3621 
 3622      Label miss;
 3623      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3624                                      nullptr, &miss,
 3625                                      /*set_cond_codes:*/ true);
 3626      if ($primary) {
 3627        __ mov(result_reg, zr);
 3628      }
 3629      __ bind(miss);
 3630   %}
 3631 
 3632   enc_class aarch64_enc_java_static_call(method meth) %{
 3633     address addr = (address)$meth$$method;
 3634     address call;
 3635     if (!_method) {
 3636       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3637       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3638       if (call == nullptr) {
 3639         ciEnv::current()->record_failure("CodeCache is full");
 3640         return;
 3641       }
 3642     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3643       // The NOP here is purely to ensure that eliding a call to
 3644       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3645       __ nop();
 3646       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3647     } else {
 3648       int method_index = resolved_method_index(masm);
 3649       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3650                                                   : static_call_Relocation::spec(method_index);
 3651       call = __ trampoline_call(Address(addr, rspec));
 3652       if (call == nullptr) {
 3653         ciEnv::current()->record_failure("CodeCache is full");
 3654         return;
 3655       }
 3656       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3657         // Calls of the same statically bound method can share
 3658         // a stub to the interpreter.
 3659         __ code()->shared_stub_to_interp_for(_method, call - __ begin());
 3660       } else {
 3661         // Emit stub for static call
 3662         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 3663         if (stub == nullptr) {
 3664           ciEnv::current()->record_failure("CodeCache is full");
 3665           return;
 3666         }
 3667       }
 3668     }
 3669 
 3670     __ post_call_nop();
 3671 
 3672     // Only non uncommon_trap calls need to reinitialize ptrue.
 3673     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3674       __ reinitialize_ptrue();
 3675     }
 3676   %}
 3677 
 3678   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3679     int method_index = resolved_method_index(masm);
 3680     address call = __ ic_call((address)$meth$$method, method_index);
 3681     if (call == nullptr) {
 3682       ciEnv::current()->record_failure("CodeCache is full");
 3683       return;
 3684     }
 3685     __ post_call_nop();
 3686     if (Compile::current()->max_vector_size() > 0) {
 3687       __ reinitialize_ptrue();
 3688     }
 3689   %}
 3690 
 3691   enc_class aarch64_enc_call_epilog() %{
 3692     if (VerifyStackAtCalls) {
 3693       // Check that stack depth is unchanged: find majik cookie on stack
 3694       __ call_Unimplemented();
 3695     }
 3696   %}
 3697 
 3698   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3699     // some calls to generated routines (arraycopy code) are scheduled
 3700     // by C2 as runtime calls. if so we can call them using a br (they
 3701     // will be in a reachable segment) otherwise we have to use a blr
 3702     // which loads the absolute address into a register.
 3703     address entry = (address)$meth$$method;
 3704     CodeBlob *cb = CodeCache::find_blob(entry);
 3705     if (cb) {
 3706       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3707       if (call == nullptr) {
 3708         ciEnv::current()->record_failure("CodeCache is full");
 3709         return;
 3710       }
 3711       __ post_call_nop();
 3712     } else {
 3713       Label retaddr;
 3714       // Make the anchor frame walkable
 3715       __ adr(rscratch2, retaddr);
 3716       __ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 3717       __ lea(rscratch1, RuntimeAddress(entry));
 3718       __ blr(rscratch1);
 3719       __ bind(retaddr);
 3720       __ post_call_nop();
 3721     }
 3722     if (Compile::current()->max_vector_size() > 0) {
 3723       __ reinitialize_ptrue();
 3724     }
 3725   %}
 3726 
 3727   enc_class aarch64_enc_rethrow() %{
 3728     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3729   %}
 3730 
 3731   enc_class aarch64_enc_ret() %{
 3732 #ifdef ASSERT
 3733     if (Compile::current()->max_vector_size() > 0) {
 3734       __ verify_ptrue();
 3735     }
 3736 #endif
 3737     __ ret(lr);
 3738   %}
 3739 
 3740   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3741     Register target_reg = as_Register($jump_target$$reg);
 3742     __ br(target_reg);
 3743   %}
 3744 
 3745   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3746     Register target_reg = as_Register($jump_target$$reg);
 3747     // exception oop should be in r0
 3748     // ret addr has been popped into lr
 3749     // callee expects it in r3
 3750     __ mov(r3, lr);
 3751     __ br(target_reg);
 3752   %}
 3753 
 3754 %}
 3755 
 3756 //----------FRAME--------------------------------------------------------------
 3757 // Definition of frame structure and management information.
 3758 //
 3759 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3760 //                             |   (to get allocators register number
 3761 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3762 //  r   CALLER     |        |
 3763 //  o     |        +--------+      pad to even-align allocators stack-slot
 3764 //  w     V        |  pad0  |        numbers; owned by CALLER
 3765 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3766 //  h     ^        |   in   |  5
 3767 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3768 //  |     |        |        |  3
 3769 //  |     |        +--------+
 3770 //  V     |        | old out|      Empty on Intel, window on Sparc
 3771 //        |    old |preserve|      Must be even aligned.
 3772 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3773 //        |        |   in   |  3   area for Intel ret address
 3774 //     Owned by    |preserve|      Empty on Sparc.
 3775 //       SELF      +--------+
 3776 //        |        |  pad2  |  2   pad to align old SP
 3777 //        |        +--------+  1
 3778 //        |        | locks  |  0
 3779 //        |        +--------+----> OptoReg::stack0(), even aligned
 3780 //        |        |  pad1  | 11   pad to align new SP
 3781 //        |        +--------+
 3782 //        |        |        | 10
 3783 //        |        | spills |  9   spills
 3784 //        V        |        |  8   (pad0 slot for callee)
 3785 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3786 //        ^        |  out   |  7
 3787 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3788 //     Owned by    +--------+
 3789 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3790 //        |    new |preserve|      Must be even-aligned.
 3791 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3792 //        |        |        |
 3793 //
 3794 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3795 //         known from SELF's arguments and the Java calling convention.
 3796 //         Region 6-7 is determined per call site.
 3797 // Note 2: If the calling convention leaves holes in the incoming argument
 3798 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3799 //         are owned by the CALLEE.  Holes should not be necessary in the
 3800 //         incoming area, as the Java calling convention is completely under
 3801 //         the control of the AD file.  Doubles can be sorted and packed to
 3802 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3803 //         varargs C calling conventions.
 3804 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3805 //         even aligned with pad0 as needed.
 3806 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3807 //           (the latter is true on Intel but is it false on AArch64?)
 3808 //         region 6-11 is even aligned; it may be padded out more so that
 3809 //         the region from SP to FP meets the minimum stack alignment.
 3810 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3811 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3812 //         SP meets the minimum alignment.
 3813 
 3814 frame %{
 3815   // These three registers define part of the calling convention
 3816   // between compiled code and the interpreter.
 3817 
 3818   // Inline Cache Register or Method for I2C.
 3819   inline_cache_reg(R12);
 3820 
 3821   // Number of stack slots consumed by locking an object
 3822   sync_stack_slots(2);
 3823 
 3824   // Compiled code's Frame Pointer
 3825   frame_pointer(R31);
 3826 
 3827   // Interpreter stores its frame pointer in a register which is
 3828   // stored to the stack by I2CAdaptors.
 3829   // I2CAdaptors convert from interpreted java to compiled java.
 3830   interpreter_frame_pointer(R29);
 3831 
 3832   // Stack alignment requirement
 3833   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3834 
 3835   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3836   // for calls to C.  Supports the var-args backing area for register parms.
 3837   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3838 
 3839   // The after-PROLOG location of the return address.  Location of
 3840   // return address specifies a type (REG or STACK) and a number
 3841   // representing the register number (i.e. - use a register name) or
 3842   // stack slot.
 3843   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3844   // Otherwise, it is above the locks and verification slot and alignment word
 3845   // TODO this may well be correct but need to check why that - 2 is there
 3846   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3847   // which folds in the space used for monitors
 3848   return_addr(STACK - 2 +
 3849               align_up((Compile::current()->in_preserve_stack_slots() +
 3850                         Compile::current()->fixed_slots()),
 3851                        stack_alignment_in_slots()));
 3852 
 3853   // Location of compiled Java return values.  Same as C for now.
 3854   return_value
 3855   %{
 3856     // TODO do we allow ideal_reg == Op_RegN???
 3857     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3858            "only return normal values");
 3859 
 3860     static const int lo[Op_RegL + 1] = { // enum name
 3861       0,                                 // Op_Node
 3862       0,                                 // Op_Set
 3863       R0_num,                            // Op_RegN
 3864       R0_num,                            // Op_RegI
 3865       R0_num,                            // Op_RegP
 3866       V0_num,                            // Op_RegF
 3867       V0_num,                            // Op_RegD
 3868       R0_num                             // Op_RegL
 3869     };
 3870 
 3871     static const int hi[Op_RegL + 1] = { // enum name
 3872       0,                                 // Op_Node
 3873       0,                                 // Op_Set
 3874       OptoReg::Bad,                      // Op_RegN
 3875       OptoReg::Bad,                      // Op_RegI
 3876       R0_H_num,                          // Op_RegP
 3877       OptoReg::Bad,                      // Op_RegF
 3878       V0_H_num,                          // Op_RegD
 3879       R0_H_num                           // Op_RegL
 3880     };
 3881 
 3882     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3883   %}
 3884 %}
 3885 
 3886 //----------ATTRIBUTES---------------------------------------------------------
 3887 //----------Operand Attributes-------------------------------------------------
 3888 op_attrib op_cost(1);        // Required cost attribute
 3889 
 3890 //----------Instruction Attributes---------------------------------------------
 3891 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3892 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3893 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3894                                 // a non-matching short branch variant
 3895                                 // of some long branch?
 3896 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3897                                 // be a power of 2) specifies the
 3898                                 // alignment that some part of the
 3899                                 // instruction (not necessarily the
 3900                                 // start) requires.  If > 1, a
 3901                                 // compute_padding() function must be
 3902                                 // provided for the instruction
 3903 
 3904 //----------OPERANDS-----------------------------------------------------------
 3905 // Operand definitions must precede instruction definitions for correct parsing
 3906 // in the ADLC because operands constitute user defined types which are used in
 3907 // instruction definitions.
 3908 
 3909 //----------Simple Operands----------------------------------------------------
 3910 
 3911 // Integer operands 32 bit
 3912 // 32 bit immediate
 3913 operand immI()
 3914 %{
 3915   match(ConI);
 3916 
 3917   op_cost(0);
 3918   format %{ %}
 3919   interface(CONST_INTER);
 3920 %}
 3921 
 3922 // 32 bit zero
 3923 operand immI0()
 3924 %{
 3925   predicate(n->get_int() == 0);
 3926   match(ConI);
 3927 
 3928   op_cost(0);
 3929   format %{ %}
 3930   interface(CONST_INTER);
 3931 %}
 3932 
 3933 // 32 bit unit increment
 3934 operand immI_1()
 3935 %{
 3936   predicate(n->get_int() == 1);
 3937   match(ConI);
 3938 
 3939   op_cost(0);
 3940   format %{ %}
 3941   interface(CONST_INTER);
 3942 %}
 3943 
 3944 // 32 bit unit decrement
 3945 operand immI_M1()
 3946 %{
 3947   predicate(n->get_int() == -1);
 3948   match(ConI);
 3949 
 3950   op_cost(0);
 3951   format %{ %}
 3952   interface(CONST_INTER);
 3953 %}
 3954 
 3955 // Shift values for add/sub extension shift
 3956 operand immIExt()
 3957 %{
 3958   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 3959   match(ConI);
 3960 
 3961   op_cost(0);
 3962   format %{ %}
 3963   interface(CONST_INTER);
 3964 %}
 3965 
 3966 operand immI_gt_1()
 3967 %{
 3968   predicate(n->get_int() > 1);
 3969   match(ConI);
 3970 
 3971   op_cost(0);
 3972   format %{ %}
 3973   interface(CONST_INTER);
 3974 %}
 3975 
 3976 operand immI_le_4()
 3977 %{
 3978   predicate(n->get_int() <= 4);
 3979   match(ConI);
 3980 
 3981   op_cost(0);
 3982   format %{ %}
 3983   interface(CONST_INTER);
 3984 %}
 3985 
 3986 operand immI_16()
 3987 %{
 3988   predicate(n->get_int() == 16);
 3989   match(ConI);
 3990 
 3991   op_cost(0);
 3992   format %{ %}
 3993   interface(CONST_INTER);
 3994 %}
 3995 
 3996 operand immI_24()
 3997 %{
 3998   predicate(n->get_int() == 24);
 3999   match(ConI);
 4000 
 4001   op_cost(0);
 4002   format %{ %}
 4003   interface(CONST_INTER);
 4004 %}
 4005 
 4006 operand immI_32()
 4007 %{
 4008   predicate(n->get_int() == 32);
 4009   match(ConI);
 4010 
 4011   op_cost(0);
 4012   format %{ %}
 4013   interface(CONST_INTER);
 4014 %}
 4015 
 4016 operand immI_48()
 4017 %{
 4018   predicate(n->get_int() == 48);
 4019   match(ConI);
 4020 
 4021   op_cost(0);
 4022   format %{ %}
 4023   interface(CONST_INTER);
 4024 %}
 4025 
 4026 operand immI_56()
 4027 %{
 4028   predicate(n->get_int() == 56);
 4029   match(ConI);
 4030 
 4031   op_cost(0);
 4032   format %{ %}
 4033   interface(CONST_INTER);
 4034 %}
 4035 
 4036 operand immI_255()
 4037 %{
 4038   predicate(n->get_int() == 255);
 4039   match(ConI);
 4040 
 4041   op_cost(0);
 4042   format %{ %}
 4043   interface(CONST_INTER);
 4044 %}
 4045 
 4046 operand immI_65535()
 4047 %{
 4048   predicate(n->get_int() == 65535);
 4049   match(ConI);
 4050 
 4051   op_cost(0);
 4052   format %{ %}
 4053   interface(CONST_INTER);
 4054 %}
 4055 
 4056 operand immI_positive()
 4057 %{
 4058   predicate(n->get_int() > 0);
 4059   match(ConI);
 4060 
 4061   op_cost(0);
 4062   format %{ %}
 4063   interface(CONST_INTER);
 4064 %}
 4065 
 4066 // BoolTest condition for signed compare
 4067 operand immI_cmp_cond()
 4068 %{
 4069   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4070   match(ConI);
 4071 
 4072   op_cost(0);
 4073   format %{ %}
 4074   interface(CONST_INTER);
 4075 %}
 4076 
 4077 // BoolTest condition for unsigned compare
 4078 operand immI_cmpU_cond()
 4079 %{
 4080   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4081   match(ConI);
 4082 
 4083   op_cost(0);
 4084   format %{ %}
 4085   interface(CONST_INTER);
 4086 %}
 4087 
 4088 operand immL_255()
 4089 %{
 4090   predicate(n->get_long() == 255L);
 4091   match(ConL);
 4092 
 4093   op_cost(0);
 4094   format %{ %}
 4095   interface(CONST_INTER);
 4096 %}
 4097 
 4098 operand immL_65535()
 4099 %{
 4100   predicate(n->get_long() == 65535L);
 4101   match(ConL);
 4102 
 4103   op_cost(0);
 4104   format %{ %}
 4105   interface(CONST_INTER);
 4106 %}
 4107 
 4108 operand immL_4294967295()
 4109 %{
 4110   predicate(n->get_long() == 4294967295L);
 4111   match(ConL);
 4112 
 4113   op_cost(0);
 4114   format %{ %}
 4115   interface(CONST_INTER);
 4116 %}
 4117 
 4118 operand immL_bitmask()
 4119 %{
 4120   predicate((n->get_long() != 0)
 4121             && ((n->get_long() & 0xc000000000000000l) == 0)
 4122             && is_power_of_2(n->get_long() + 1));
 4123   match(ConL);
 4124 
 4125   op_cost(0);
 4126   format %{ %}
 4127   interface(CONST_INTER);
 4128 %}
 4129 
 4130 operand immI_bitmask()
 4131 %{
 4132   predicate((n->get_int() != 0)
 4133             && ((n->get_int() & 0xc0000000) == 0)
 4134             && is_power_of_2(n->get_int() + 1));
 4135   match(ConI);
 4136 
 4137   op_cost(0);
 4138   format %{ %}
 4139   interface(CONST_INTER);
 4140 %}
 4141 
 4142 operand immL_positive_bitmaskI()
 4143 %{
 4144   predicate((n->get_long() != 0)
 4145             && ((julong)n->get_long() < 0x80000000ULL)
 4146             && is_power_of_2(n->get_long() + 1));
 4147   match(ConL);
 4148 
 4149   op_cost(0);
 4150   format %{ %}
 4151   interface(CONST_INTER);
 4152 %}
 4153 
 4154 // Scale values for scaled offset addressing modes (up to long but not quad)
 4155 operand immIScale()
 4156 %{
 4157   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4158   match(ConI);
 4159 
 4160   op_cost(0);
 4161   format %{ %}
 4162   interface(CONST_INTER);
 4163 %}
 4164 
 4165 // 5 bit signed integer
 4166 operand immI5()
 4167 %{
 4168   predicate(Assembler::is_simm(n->get_int(), 5));
 4169   match(ConI);
 4170 
 4171   op_cost(0);
 4172   format %{ %}
 4173   interface(CONST_INTER);
 4174 %}
 4175 
 4176 // 7 bit unsigned integer
 4177 operand immIU7()
 4178 %{
 4179   predicate(Assembler::is_uimm(n->get_int(), 7));
 4180   match(ConI);
 4181 
 4182   op_cost(0);
 4183   format %{ %}
 4184   interface(CONST_INTER);
 4185 %}
 4186 
 4187 // Offset for scaled or unscaled immediate loads and stores
 4188 operand immIOffset()
 4189 %{
 4190   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4191   match(ConI);
 4192 
 4193   op_cost(0);
 4194   format %{ %}
 4195   interface(CONST_INTER);
 4196 %}
 4197 
 4198 operand immIOffset1()
 4199 %{
 4200   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4201   match(ConI);
 4202 
 4203   op_cost(0);
 4204   format %{ %}
 4205   interface(CONST_INTER);
 4206 %}
 4207 
 4208 operand immIOffset2()
 4209 %{
 4210   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4211   match(ConI);
 4212 
 4213   op_cost(0);
 4214   format %{ %}
 4215   interface(CONST_INTER);
 4216 %}
 4217 
 4218 operand immIOffset4()
 4219 %{
 4220   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4221   match(ConI);
 4222 
 4223   op_cost(0);
 4224   format %{ %}
 4225   interface(CONST_INTER);
 4226 %}
 4227 
 4228 operand immIOffset8()
 4229 %{
 4230   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4231   match(ConI);
 4232 
 4233   op_cost(0);
 4234   format %{ %}
 4235   interface(CONST_INTER);
 4236 %}
 4237 
 4238 operand immIOffset16()
 4239 %{
 4240   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4241   match(ConI);
 4242 
 4243   op_cost(0);
 4244   format %{ %}
 4245   interface(CONST_INTER);
 4246 %}
 4247 
 4248 operand immLOffset()
 4249 %{
 4250   predicate(n->get_long() >= -256 && n->get_long() <= 65520);
 4251   match(ConL);
 4252 
 4253   op_cost(0);
 4254   format %{ %}
 4255   interface(CONST_INTER);
 4256 %}
 4257 
 4258 operand immLoffset1()
 4259 %{
 4260   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4261   match(ConL);
 4262 
 4263   op_cost(0);
 4264   format %{ %}
 4265   interface(CONST_INTER);
 4266 %}
 4267 
 4268 operand immLoffset2()
 4269 %{
 4270   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4271   match(ConL);
 4272 
 4273   op_cost(0);
 4274   format %{ %}
 4275   interface(CONST_INTER);
 4276 %}
 4277 
 4278 operand immLoffset4()
 4279 %{
 4280   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4281   match(ConL);
 4282 
 4283   op_cost(0);
 4284   format %{ %}
 4285   interface(CONST_INTER);
 4286 %}
 4287 
 4288 operand immLoffset8()
 4289 %{
 4290   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4291   match(ConL);
 4292 
 4293   op_cost(0);
 4294   format %{ %}
 4295   interface(CONST_INTER);
 4296 %}
 4297 
 4298 operand immLoffset16()
 4299 %{
 4300   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4301   match(ConL);
 4302 
 4303   op_cost(0);
 4304   format %{ %}
 4305   interface(CONST_INTER);
 4306 %}
 4307 
 4308 // 5 bit signed long integer
 4309 operand immL5()
 4310 %{
 4311   predicate(Assembler::is_simm(n->get_long(), 5));
 4312   match(ConL);
 4313 
 4314   op_cost(0);
 4315   format %{ %}
 4316   interface(CONST_INTER);
 4317 %}
 4318 
 4319 // 7 bit unsigned long integer
 4320 operand immLU7()
 4321 %{
 4322   predicate(Assembler::is_uimm(n->get_long(), 7));
 4323   match(ConL);
 4324 
 4325   op_cost(0);
 4326   format %{ %}
 4327   interface(CONST_INTER);
 4328 %}
 4329 
 4330 // 8 bit signed value.
 4331 operand immI8()
 4332 %{
 4333   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4334   match(ConI);
 4335 
 4336   op_cost(0);
 4337   format %{ %}
 4338   interface(CONST_INTER);
 4339 %}
 4340 
 4341 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4342 operand immI8_shift8()
 4343 %{
 4344   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4345             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4346   match(ConI);
 4347 
 4348   op_cost(0);
 4349   format %{ %}
 4350   interface(CONST_INTER);
 4351 %}
 4352 
 4353 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4354 operand immL8_shift8()
 4355 %{
 4356   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4357             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4358   match(ConL);
 4359 
 4360   op_cost(0);
 4361   format %{ %}
 4362   interface(CONST_INTER);
 4363 %}
 4364 
 4365 // 8 bit integer valid for vector add sub immediate
 4366 operand immBAddSubV()
 4367 %{
 4368   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4369   match(ConI);
 4370 
 4371   op_cost(0);
 4372   format %{ %}
 4373   interface(CONST_INTER);
 4374 %}
 4375 
 4376 // 32 bit integer valid for add sub immediate
 4377 operand immIAddSub()
 4378 %{
 4379   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4380   match(ConI);
 4381   op_cost(0);
 4382   format %{ %}
 4383   interface(CONST_INTER);
 4384 %}
 4385 
 4386 // 32 bit integer valid for vector add sub immediate
 4387 operand immIAddSubV()
 4388 %{
 4389   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4390   match(ConI);
 4391 
 4392   op_cost(0);
 4393   format %{ %}
 4394   interface(CONST_INTER);
 4395 %}
 4396 
 4397 // 32 bit unsigned integer valid for logical immediate
 4398 
 4399 operand immBLog()
 4400 %{
 4401   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4402   match(ConI);
 4403 
 4404   op_cost(0);
 4405   format %{ %}
 4406   interface(CONST_INTER);
 4407 %}
 4408 
 4409 operand immSLog()
 4410 %{
 4411   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4412   match(ConI);
 4413 
 4414   op_cost(0);
 4415   format %{ %}
 4416   interface(CONST_INTER);
 4417 %}
 4418 
 4419 operand immILog()
 4420 %{
 4421   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4422   match(ConI);
 4423 
 4424   op_cost(0);
 4425   format %{ %}
 4426   interface(CONST_INTER);
 4427 %}
 4428 
 4429 // Integer operands 64 bit
 4430 // 64 bit immediate
 4431 operand immL()
 4432 %{
 4433   match(ConL);
 4434 
 4435   op_cost(0);
 4436   format %{ %}
 4437   interface(CONST_INTER);
 4438 %}
 4439 
 4440 // 64 bit zero
 4441 operand immL0()
 4442 %{
 4443   predicate(n->get_long() == 0);
 4444   match(ConL);
 4445 
 4446   op_cost(0);
 4447   format %{ %}
 4448   interface(CONST_INTER);
 4449 %}
 4450 
 4451 // 64 bit unit decrement
 4452 operand immL_M1()
 4453 %{
 4454   predicate(n->get_long() == -1);
 4455   match(ConL);
 4456 
 4457   op_cost(0);
 4458   format %{ %}
 4459   interface(CONST_INTER);
 4460 %}
 4461 
 4462 // 64 bit integer valid for add sub immediate
 4463 operand immLAddSub()
 4464 %{
 4465   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4466   match(ConL);
 4467   op_cost(0);
 4468   format %{ %}
 4469   interface(CONST_INTER);
 4470 %}
 4471 
 4472 // 64 bit integer valid for addv subv immediate
 4473 operand immLAddSubV()
 4474 %{
 4475   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4476   match(ConL);
 4477 
 4478   op_cost(0);
 4479   format %{ %}
 4480   interface(CONST_INTER);
 4481 %}
 4482 
 4483 // 64 bit integer valid for logical immediate
 4484 operand immLLog()
 4485 %{
 4486   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4487   match(ConL);
 4488   op_cost(0);
 4489   format %{ %}
 4490   interface(CONST_INTER);
 4491 %}
 4492 
 4493 // Long Immediate: low 32-bit mask
 4494 operand immL_32bits()
 4495 %{
 4496   predicate(n->get_long() == 0xFFFFFFFFL);
 4497   match(ConL);
 4498   op_cost(0);
 4499   format %{ %}
 4500   interface(CONST_INTER);
 4501 %}
 4502 
 4503 // Pointer operands
 4504 // Pointer Immediate
 4505 operand immP()
 4506 %{
 4507   match(ConP);
 4508 
 4509   op_cost(0);
 4510   format %{ %}
 4511   interface(CONST_INTER);
 4512 %}
 4513 
 4514 // nullptr Pointer Immediate
 4515 operand immP0()
 4516 %{
 4517   predicate(n->get_ptr() == 0);
 4518   match(ConP);
 4519 
 4520   op_cost(0);
 4521   format %{ %}
 4522   interface(CONST_INTER);
 4523 %}
 4524 
 4525 // Pointer Immediate One
 4526 // this is used in object initialization (initial object header)
 4527 operand immP_1()
 4528 %{
 4529   predicate(n->get_ptr() == 1);
 4530   match(ConP);
 4531 
 4532   op_cost(0);
 4533   format %{ %}
 4534   interface(CONST_INTER);
 4535 %}
 4536 
 4537 // Card Table Byte Map Base
 4538 operand immByteMapBase()
 4539 %{
 4540   // Get base of card map
 4541   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4542             SHENANDOAHGC_ONLY(!BarrierSet::barrier_set()->is_a(BarrierSet::ShenandoahBarrierSet) &&)
 4543             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4544   match(ConP);
 4545 
 4546   op_cost(0);
 4547   format %{ %}
 4548   interface(CONST_INTER);
 4549 %}
 4550 
 4551 // Float and Double operands
 4552 // Double Immediate
 4553 operand immD()
 4554 %{
 4555   match(ConD);
 4556   op_cost(0);
 4557   format %{ %}
 4558   interface(CONST_INTER);
 4559 %}
 4560 
 4561 // Double Immediate: +0.0d
 4562 operand immD0()
 4563 %{
 4564   predicate(jlong_cast(n->getd()) == 0);
 4565   match(ConD);
 4566 
 4567   op_cost(0);
 4568   format %{ %}
 4569   interface(CONST_INTER);
 4570 %}
 4571 
 4572 // constant 'double +0.0'.
 4573 operand immDPacked()
 4574 %{
 4575   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4576   match(ConD);
 4577   op_cost(0);
 4578   format %{ %}
 4579   interface(CONST_INTER);
 4580 %}
 4581 
 4582 // Float Immediate
 4583 operand immF()
 4584 %{
 4585   match(ConF);
 4586   op_cost(0);
 4587   format %{ %}
 4588   interface(CONST_INTER);
 4589 %}
 4590 
 4591 // Float Immediate: +0.0f.
 4592 operand immF0()
 4593 %{
 4594   predicate(jint_cast(n->getf()) == 0);
 4595   match(ConF);
 4596 
 4597   op_cost(0);
 4598   format %{ %}
 4599   interface(CONST_INTER);
 4600 %}
 4601 
 4602 //
 4603 operand immFPacked()
 4604 %{
 4605   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4606   match(ConF);
 4607   op_cost(0);
 4608   format %{ %}
 4609   interface(CONST_INTER);
 4610 %}
 4611 
 4612 // Narrow pointer operands
 4613 // Narrow Pointer Immediate
 4614 operand immN()
 4615 %{
 4616   match(ConN);
 4617 
 4618   op_cost(0);
 4619   format %{ %}
 4620   interface(CONST_INTER);
 4621 %}
 4622 
 4623 // Narrow nullptr Pointer Immediate
 4624 operand immN0()
 4625 %{
 4626   predicate(n->get_narrowcon() == 0);
 4627   match(ConN);
 4628 
 4629   op_cost(0);
 4630   format %{ %}
 4631   interface(CONST_INTER);
 4632 %}
 4633 
 4634 operand immNKlass()
 4635 %{
 4636   match(ConNKlass);
 4637 
 4638   op_cost(0);
 4639   format %{ %}
 4640   interface(CONST_INTER);
 4641 %}
 4642 
 4643 // Integer 32 bit Register Operands
 4644 // Integer 32 bitRegister (excludes SP)
 4645 operand iRegI()
 4646 %{
 4647   constraint(ALLOC_IN_RC(any_reg32));
 4648   match(RegI);
 4649   match(iRegINoSp);
 4650   op_cost(0);
 4651   format %{ %}
 4652   interface(REG_INTER);
 4653 %}
 4654 
 4655 // Integer 32 bit Register not Special
 4656 operand iRegINoSp()
 4657 %{
 4658   constraint(ALLOC_IN_RC(no_special_reg32));
 4659   match(RegI);
 4660   op_cost(0);
 4661   format %{ %}
 4662   interface(REG_INTER);
 4663 %}
 4664 
 4665 // Integer 64 bit Register Operands
 4666 // Integer 64 bit Register (includes SP)
 4667 operand iRegL()
 4668 %{
 4669   constraint(ALLOC_IN_RC(any_reg));
 4670   match(RegL);
 4671   match(iRegLNoSp);
 4672   op_cost(0);
 4673   format %{ %}
 4674   interface(REG_INTER);
 4675 %}
 4676 
 4677 // Integer 64 bit Register not Special
 4678 operand iRegLNoSp()
 4679 %{
 4680   constraint(ALLOC_IN_RC(no_special_reg));
 4681   match(RegL);
 4682   match(iRegL_R0);
 4683   format %{ %}
 4684   interface(REG_INTER);
 4685 %}
 4686 
 4687 // Pointer Register Operands
 4688 // Pointer Register
 4689 operand iRegP()
 4690 %{
 4691   constraint(ALLOC_IN_RC(ptr_reg));
 4692   match(RegP);
 4693   match(iRegPNoSp);
 4694   match(iRegP_R0);
 4695   //match(iRegP_R2);
 4696   //match(iRegP_R4);
 4697   match(iRegP_R5);
 4698   match(thread_RegP);
 4699   op_cost(0);
 4700   format %{ %}
 4701   interface(REG_INTER);
 4702 %}
 4703 
 4704 // Pointer 64 bit Register not Special
 4705 operand iRegPNoSp()
 4706 %{
 4707   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4708   match(RegP);
 4709   // match(iRegP);
 4710   // match(iRegP_R0);
 4711   // match(iRegP_R2);
 4712   // match(iRegP_R4);
 4713   // match(iRegP_R5);
 4714   // match(thread_RegP);
 4715   op_cost(0);
 4716   format %{ %}
 4717   interface(REG_INTER);
 4718 %}
 4719 
 4720 // This operand is not allowed to use rfp even if
 4721 // rfp is not used to hold the frame pointer.
 4722 operand iRegPNoSpNoRfp()
 4723 %{
 4724   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4725   match(RegP);
 4726   match(iRegPNoSp);
 4727   op_cost(0);
 4728   format %{ %}
 4729   interface(REG_INTER);
 4730 %}
 4731 
 4732 // Pointer 64 bit Register R0 only
 4733 operand iRegP_R0()
 4734 %{
 4735   constraint(ALLOC_IN_RC(r0_reg));
 4736   match(RegP);
 4737   // match(iRegP);
 4738   match(iRegPNoSp);
 4739   op_cost(0);
 4740   format %{ %}
 4741   interface(REG_INTER);
 4742 %}
 4743 
 4744 // Pointer 64 bit Register R1 only
 4745 operand iRegP_R1()
 4746 %{
 4747   constraint(ALLOC_IN_RC(r1_reg));
 4748   match(RegP);
 4749   // match(iRegP);
 4750   match(iRegPNoSp);
 4751   op_cost(0);
 4752   format %{ %}
 4753   interface(REG_INTER);
 4754 %}
 4755 
 4756 // Pointer 64 bit Register R2 only
 4757 operand iRegP_R2()
 4758 %{
 4759   constraint(ALLOC_IN_RC(r2_reg));
 4760   match(RegP);
 4761   // match(iRegP);
 4762   match(iRegPNoSp);
 4763   op_cost(0);
 4764   format %{ %}
 4765   interface(REG_INTER);
 4766 %}
 4767 
 4768 // Pointer 64 bit Register R3 only
 4769 operand iRegP_R3()
 4770 %{
 4771   constraint(ALLOC_IN_RC(r3_reg));
 4772   match(RegP);
 4773   // match(iRegP);
 4774   match(iRegPNoSp);
 4775   op_cost(0);
 4776   format %{ %}
 4777   interface(REG_INTER);
 4778 %}
 4779 
 4780 // Pointer 64 bit Register R4 only
 4781 operand iRegP_R4()
 4782 %{
 4783   constraint(ALLOC_IN_RC(r4_reg));
 4784   match(RegP);
 4785   // match(iRegP);
 4786   match(iRegPNoSp);
 4787   op_cost(0);
 4788   format %{ %}
 4789   interface(REG_INTER);
 4790 %}
 4791 
 4792 // Pointer 64 bit Register R5 only
 4793 operand iRegP_R5()
 4794 %{
 4795   constraint(ALLOC_IN_RC(r5_reg));
 4796   match(RegP);
 4797   // match(iRegP);
 4798   match(iRegPNoSp);
 4799   op_cost(0);
 4800   format %{ %}
 4801   interface(REG_INTER);
 4802 %}
 4803 
 4804 // Pointer 64 bit Register R10 only
 4805 operand iRegP_R10()
 4806 %{
 4807   constraint(ALLOC_IN_RC(r10_reg));
 4808   match(RegP);
 4809   // match(iRegP);
 4810   match(iRegPNoSp);
 4811   op_cost(0);
 4812   format %{ %}
 4813   interface(REG_INTER);
 4814 %}
 4815 
 4816 // Long 64 bit Register R0 only
 4817 operand iRegL_R0()
 4818 %{
 4819   constraint(ALLOC_IN_RC(r0_reg));
 4820   match(RegL);
 4821   match(iRegLNoSp);
 4822   op_cost(0);
 4823   format %{ %}
 4824   interface(REG_INTER);
 4825 %}
 4826 
 4827 // Long 64 bit Register R11 only
 4828 operand iRegL_R11()
 4829 %{
 4830   constraint(ALLOC_IN_RC(r11_reg));
 4831   match(RegL);
 4832   match(iRegLNoSp);
 4833   op_cost(0);
 4834   format %{ %}
 4835   interface(REG_INTER);
 4836 %}
 4837 
 4838 // Register R0 only
 4839 operand iRegI_R0()
 4840 %{
 4841   constraint(ALLOC_IN_RC(int_r0_reg));
 4842   match(RegI);
 4843   match(iRegINoSp);
 4844   op_cost(0);
 4845   format %{ %}
 4846   interface(REG_INTER);
 4847 %}
 4848 
 4849 // Register R2 only
 4850 operand iRegI_R2()
 4851 %{
 4852   constraint(ALLOC_IN_RC(int_r2_reg));
 4853   match(RegI);
 4854   match(iRegINoSp);
 4855   op_cost(0);
 4856   format %{ %}
 4857   interface(REG_INTER);
 4858 %}
 4859 
 4860 // Register R3 only
 4861 operand iRegI_R3()
 4862 %{
 4863   constraint(ALLOC_IN_RC(int_r3_reg));
 4864   match(RegI);
 4865   match(iRegINoSp);
 4866   op_cost(0);
 4867   format %{ %}
 4868   interface(REG_INTER);
 4869 %}
 4870 
 4871 
 4872 // Register R4 only
 4873 operand iRegI_R4()
 4874 %{
 4875   constraint(ALLOC_IN_RC(int_r4_reg));
 4876   match(RegI);
 4877   match(iRegINoSp);
 4878   op_cost(0);
 4879   format %{ %}
 4880   interface(REG_INTER);
 4881 %}
 4882 
 4883 
 4884 // Pointer Register Operands
 4885 // Narrow Pointer Register
 4886 operand iRegN()
 4887 %{
 4888   constraint(ALLOC_IN_RC(any_reg32));
 4889   match(RegN);
 4890   match(iRegNNoSp);
 4891   op_cost(0);
 4892   format %{ %}
 4893   interface(REG_INTER);
 4894 %}
 4895 
 4896 // Integer 64 bit Register not Special
 4897 operand iRegNNoSp()
 4898 %{
 4899   constraint(ALLOC_IN_RC(no_special_reg32));
 4900   match(RegN);
 4901   op_cost(0);
 4902   format %{ %}
 4903   interface(REG_INTER);
 4904 %}
 4905 
 4906 // Float Register
 4907 // Float register operands
 4908 operand vRegF()
 4909 %{
 4910   constraint(ALLOC_IN_RC(float_reg));
 4911   match(RegF);
 4912 
 4913   op_cost(0);
 4914   format %{ %}
 4915   interface(REG_INTER);
 4916 %}
 4917 
 4918 // Double Register
 4919 // Double register operands
 4920 operand vRegD()
 4921 %{
 4922   constraint(ALLOC_IN_RC(double_reg));
 4923   match(RegD);
 4924 
 4925   op_cost(0);
 4926   format %{ %}
 4927   interface(REG_INTER);
 4928 %}
 4929 
 4930 // Generic vector class. This will be used for
 4931 // all vector operands, including NEON and SVE.
 4932 operand vReg()
 4933 %{
 4934   constraint(ALLOC_IN_RC(dynamic));
 4935   match(VecA);
 4936   match(VecD);
 4937   match(VecX);
 4938 
 4939   op_cost(0);
 4940   format %{ %}
 4941   interface(REG_INTER);
 4942 %}
 4943 
 4944 operand vecA()
 4945 %{
 4946   constraint(ALLOC_IN_RC(vectora_reg));
 4947   match(VecA);
 4948 
 4949   op_cost(0);
 4950   format %{ %}
 4951   interface(REG_INTER);
 4952 %}
 4953 
 4954 operand vecD()
 4955 %{
 4956   constraint(ALLOC_IN_RC(vectord_reg));
 4957   match(VecD);
 4958 
 4959   op_cost(0);
 4960   format %{ %}
 4961   interface(REG_INTER);
 4962 %}
 4963 
 4964 operand vecX()
 4965 %{
 4966   constraint(ALLOC_IN_RC(vectorx_reg));
 4967   match(VecX);
 4968 
 4969   op_cost(0);
 4970   format %{ %}
 4971   interface(REG_INTER);
 4972 %}
 4973 
 4974 operand vRegD_V0()
 4975 %{
 4976   constraint(ALLOC_IN_RC(v0_reg));
 4977   match(RegD);
 4978   op_cost(0);
 4979   format %{ %}
 4980   interface(REG_INTER);
 4981 %}
 4982 
 4983 operand vRegD_V1()
 4984 %{
 4985   constraint(ALLOC_IN_RC(v1_reg));
 4986   match(RegD);
 4987   op_cost(0);
 4988   format %{ %}
 4989   interface(REG_INTER);
 4990 %}
 4991 
 4992 operand vRegD_V2()
 4993 %{
 4994   constraint(ALLOC_IN_RC(v2_reg));
 4995   match(RegD);
 4996   op_cost(0);
 4997   format %{ %}
 4998   interface(REG_INTER);
 4999 %}
 5000 
 5001 operand vRegD_V3()
 5002 %{
 5003   constraint(ALLOC_IN_RC(v3_reg));
 5004   match(RegD);
 5005   op_cost(0);
 5006   format %{ %}
 5007   interface(REG_INTER);
 5008 %}
 5009 
 5010 operand vRegD_V4()
 5011 %{
 5012   constraint(ALLOC_IN_RC(v4_reg));
 5013   match(RegD);
 5014   op_cost(0);
 5015   format %{ %}
 5016   interface(REG_INTER);
 5017 %}
 5018 
 5019 operand vRegD_V5()
 5020 %{
 5021   constraint(ALLOC_IN_RC(v5_reg));
 5022   match(RegD);
 5023   op_cost(0);
 5024   format %{ %}
 5025   interface(REG_INTER);
 5026 %}
 5027 
 5028 operand vRegD_V6()
 5029 %{
 5030   constraint(ALLOC_IN_RC(v6_reg));
 5031   match(RegD);
 5032   op_cost(0);
 5033   format %{ %}
 5034   interface(REG_INTER);
 5035 %}
 5036 
 5037 operand vRegD_V7()
 5038 %{
 5039   constraint(ALLOC_IN_RC(v7_reg));
 5040   match(RegD);
 5041   op_cost(0);
 5042   format %{ %}
 5043   interface(REG_INTER);
 5044 %}
 5045 
 5046 operand vRegD_V12()
 5047 %{
 5048   constraint(ALLOC_IN_RC(v12_reg));
 5049   match(RegD);
 5050   op_cost(0);
 5051   format %{ %}
 5052   interface(REG_INTER);
 5053 %}
 5054 
 5055 operand vRegD_V13()
 5056 %{
 5057   constraint(ALLOC_IN_RC(v13_reg));
 5058   match(RegD);
 5059   op_cost(0);
 5060   format %{ %}
 5061   interface(REG_INTER);
 5062 %}
 5063 
 5064 operand pReg()
 5065 %{
 5066   constraint(ALLOC_IN_RC(pr_reg));
 5067   match(RegVectMask);
 5068   match(pRegGov);
 5069   op_cost(0);
 5070   format %{ %}
 5071   interface(REG_INTER);
 5072 %}
 5073 
 5074 operand pRegGov()
 5075 %{
 5076   constraint(ALLOC_IN_RC(gov_pr));
 5077   match(RegVectMask);
 5078   match(pReg);
 5079   op_cost(0);
 5080   format %{ %}
 5081   interface(REG_INTER);
 5082 %}
 5083 
 5084 operand pRegGov_P0()
 5085 %{
 5086   constraint(ALLOC_IN_RC(p0_reg));
 5087   match(RegVectMask);
 5088   op_cost(0);
 5089   format %{ %}
 5090   interface(REG_INTER);
 5091 %}
 5092 
 5093 operand pRegGov_P1()
 5094 %{
 5095   constraint(ALLOC_IN_RC(p1_reg));
 5096   match(RegVectMask);
 5097   op_cost(0);
 5098   format %{ %}
 5099   interface(REG_INTER);
 5100 %}
 5101 
 5102 // Flags register, used as output of signed compare instructions
 5103 
 5104 // note that on AArch64 we also use this register as the output for
 5105 // for floating point compare instructions (CmpF CmpD). this ensures
 5106 // that ordered inequality tests use GT, GE, LT or LE none of which
 5107 // pass through cases where the result is unordered i.e. one or both
 5108 // inputs to the compare is a NaN. this means that the ideal code can
 5109 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5110 // (where the comparison should always fail). EQ and NE tests are
 5111 // always generated in ideal code so that unordered folds into the NE
 5112 // case, matching the behaviour of AArch64 NE.
 5113 //
 5114 // This differs from x86 where the outputs of FP compares use a
 5115 // special FP flags registers and where compares based on this
 5116 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5117 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5118 // to explicitly handle the unordered case in branches. x86 also has
 5119 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5120 
 5121 operand rFlagsReg()
 5122 %{
 5123   constraint(ALLOC_IN_RC(int_flags));
 5124   match(RegFlags);
 5125 
 5126   op_cost(0);
 5127   format %{ "RFLAGS" %}
 5128   interface(REG_INTER);
 5129 %}
 5130 
 5131 // Flags register, used as output of unsigned compare instructions
 5132 operand rFlagsRegU()
 5133 %{
 5134   constraint(ALLOC_IN_RC(int_flags));
 5135   match(RegFlags);
 5136 
 5137   op_cost(0);
 5138   format %{ "RFLAGSU" %}
 5139   interface(REG_INTER);
 5140 %}
 5141 
 5142 // Special Registers
 5143 
 5144 // Method Register
 5145 operand inline_cache_RegP(iRegP reg)
 5146 %{
 5147   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5148   match(reg);
 5149   match(iRegPNoSp);
 5150   op_cost(0);
 5151   format %{ %}
 5152   interface(REG_INTER);
 5153 %}
 5154 
 5155 // Thread Register
 5156 operand thread_RegP(iRegP reg)
 5157 %{
 5158   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5159   match(reg);
 5160   op_cost(0);
 5161   format %{ %}
 5162   interface(REG_INTER);
 5163 %}
 5164 
 5165 //----------Memory Operands----------------------------------------------------
 5166 
 5167 operand indirect(iRegP reg)
 5168 %{
 5169   constraint(ALLOC_IN_RC(ptr_reg));
 5170   match(reg);
 5171   op_cost(0);
 5172   format %{ "[$reg]" %}
 5173   interface(MEMORY_INTER) %{
 5174     base($reg);
 5175     index(0xffffffff);
 5176     scale(0x0);
 5177     disp(0x0);
 5178   %}
 5179 %}
 5180 
 5181 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5182 %{
 5183   constraint(ALLOC_IN_RC(ptr_reg));
 5184   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5185   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5186   op_cost(0);
 5187   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5188   interface(MEMORY_INTER) %{
 5189     base($reg);
 5190     index($ireg);
 5191     scale($scale);
 5192     disp(0x0);
 5193   %}
 5194 %}
 5195 
 5196 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5197 %{
 5198   constraint(ALLOC_IN_RC(ptr_reg));
 5199   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5200   match(AddP reg (LShiftL lreg scale));
 5201   op_cost(0);
 5202   format %{ "$reg, $lreg lsl($scale)" %}
 5203   interface(MEMORY_INTER) %{
 5204     base($reg);
 5205     index($lreg);
 5206     scale($scale);
 5207     disp(0x0);
 5208   %}
 5209 %}
 5210 
 5211 operand indIndexI2L(iRegP reg, iRegI ireg)
 5212 %{
 5213   constraint(ALLOC_IN_RC(ptr_reg));
 5214   match(AddP reg (ConvI2L ireg));
 5215   op_cost(0);
 5216   format %{ "$reg, $ireg, 0, I2L" %}
 5217   interface(MEMORY_INTER) %{
 5218     base($reg);
 5219     index($ireg);
 5220     scale(0x0);
 5221     disp(0x0);
 5222   %}
 5223 %}
 5224 
 5225 operand indIndex(iRegP reg, iRegL lreg)
 5226 %{
 5227   constraint(ALLOC_IN_RC(ptr_reg));
 5228   match(AddP reg lreg);
 5229   op_cost(0);
 5230   format %{ "$reg, $lreg" %}
 5231   interface(MEMORY_INTER) %{
 5232     base($reg);
 5233     index($lreg);
 5234     scale(0x0);
 5235     disp(0x0);
 5236   %}
 5237 %}
 5238 
 5239 operand indOffI1(iRegP reg, immIOffset1 off)
 5240 %{
 5241   constraint(ALLOC_IN_RC(ptr_reg));
 5242   match(AddP reg off);
 5243   op_cost(0);
 5244   format %{ "[$reg, $off]" %}
 5245   interface(MEMORY_INTER) %{
 5246     base($reg);
 5247     index(0xffffffff);
 5248     scale(0x0);
 5249     disp($off);
 5250   %}
 5251 %}
 5252 
 5253 operand indOffI2(iRegP reg, immIOffset2 off)
 5254 %{
 5255   constraint(ALLOC_IN_RC(ptr_reg));
 5256   match(AddP reg off);
 5257   op_cost(0);
 5258   format %{ "[$reg, $off]" %}
 5259   interface(MEMORY_INTER) %{
 5260     base($reg);
 5261     index(0xffffffff);
 5262     scale(0x0);
 5263     disp($off);
 5264   %}
 5265 %}
 5266 
 5267 operand indOffI4(iRegP reg, immIOffset4 off)
 5268 %{
 5269   constraint(ALLOC_IN_RC(ptr_reg));
 5270   match(AddP reg off);
 5271   op_cost(0);
 5272   format %{ "[$reg, $off]" %}
 5273   interface(MEMORY_INTER) %{
 5274     base($reg);
 5275     index(0xffffffff);
 5276     scale(0x0);
 5277     disp($off);
 5278   %}
 5279 %}
 5280 
 5281 operand indOffI8(iRegP reg, immIOffset8 off)
 5282 %{
 5283   constraint(ALLOC_IN_RC(ptr_reg));
 5284   match(AddP reg off);
 5285   op_cost(0);
 5286   format %{ "[$reg, $off]" %}
 5287   interface(MEMORY_INTER) %{
 5288     base($reg);
 5289     index(0xffffffff);
 5290     scale(0x0);
 5291     disp($off);
 5292   %}
 5293 %}
 5294 
 5295 operand indOffI16(iRegP reg, immIOffset16 off)
 5296 %{
 5297   constraint(ALLOC_IN_RC(ptr_reg));
 5298   match(AddP reg off);
 5299   op_cost(0);
 5300   format %{ "[$reg, $off]" %}
 5301   interface(MEMORY_INTER) %{
 5302     base($reg);
 5303     index(0xffffffff);
 5304     scale(0x0);
 5305     disp($off);
 5306   %}
 5307 %}
 5308 
 5309 operand indOffL1(iRegP reg, immLoffset1 off)
 5310 %{
 5311   constraint(ALLOC_IN_RC(ptr_reg));
 5312   match(AddP reg off);
 5313   op_cost(0);
 5314   format %{ "[$reg, $off]" %}
 5315   interface(MEMORY_INTER) %{
 5316     base($reg);
 5317     index(0xffffffff);
 5318     scale(0x0);
 5319     disp($off);
 5320   %}
 5321 %}
 5322 
 5323 operand indOffL2(iRegP reg, immLoffset2 off)
 5324 %{
 5325   constraint(ALLOC_IN_RC(ptr_reg));
 5326   match(AddP reg off);
 5327   op_cost(0);
 5328   format %{ "[$reg, $off]" %}
 5329   interface(MEMORY_INTER) %{
 5330     base($reg);
 5331     index(0xffffffff);
 5332     scale(0x0);
 5333     disp($off);
 5334   %}
 5335 %}
 5336 
 5337 operand indOffL4(iRegP reg, immLoffset4 off)
 5338 %{
 5339   constraint(ALLOC_IN_RC(ptr_reg));
 5340   match(AddP reg off);
 5341   op_cost(0);
 5342   format %{ "[$reg, $off]" %}
 5343   interface(MEMORY_INTER) %{
 5344     base($reg);
 5345     index(0xffffffff);
 5346     scale(0x0);
 5347     disp($off);
 5348   %}
 5349 %}
 5350 
 5351 operand indOffL8(iRegP reg, immLoffset8 off)
 5352 %{
 5353   constraint(ALLOC_IN_RC(ptr_reg));
 5354   match(AddP reg off);
 5355   op_cost(0);
 5356   format %{ "[$reg, $off]" %}
 5357   interface(MEMORY_INTER) %{
 5358     base($reg);
 5359     index(0xffffffff);
 5360     scale(0x0);
 5361     disp($off);
 5362   %}
 5363 %}
 5364 
 5365 operand indOffL16(iRegP reg, immLoffset16 off)
 5366 %{
 5367   constraint(ALLOC_IN_RC(ptr_reg));
 5368   match(AddP reg off);
 5369   op_cost(0);
 5370   format %{ "[$reg, $off]" %}
 5371   interface(MEMORY_INTER) %{
 5372     base($reg);
 5373     index(0xffffffff);
 5374     scale(0x0);
 5375     disp($off);
 5376   %}
 5377 %}
 5378 
 5379 operand indirectX2P(iRegL reg)
 5380 %{
 5381   constraint(ALLOC_IN_RC(ptr_reg));
 5382   match(CastX2P reg);
 5383   op_cost(0);
 5384   format %{ "[$reg]\t# long -> ptr" %}
 5385   interface(MEMORY_INTER) %{
 5386     base($reg);
 5387     index(0xffffffff);
 5388     scale(0x0);
 5389     disp(0x0);
 5390   %}
 5391 %}
 5392 
 5393 operand indOffX2P(iRegL reg, immLOffset off)
 5394 %{
 5395   constraint(ALLOC_IN_RC(ptr_reg));
 5396   match(AddP (CastX2P reg) off);
 5397   op_cost(0);
 5398   format %{ "[$reg, $off]\t# long -> ptr" %}
 5399   interface(MEMORY_INTER) %{
 5400     base($reg);
 5401     index(0xffffffff);
 5402     scale(0x0);
 5403     disp($off);
 5404   %}
 5405 %}
 5406 
 5407 operand indirectN(iRegN reg)
 5408 %{
 5409   predicate(CompressedOops::shift() == 0);
 5410   constraint(ALLOC_IN_RC(ptr_reg));
 5411   match(DecodeN reg);
 5412   op_cost(0);
 5413   format %{ "[$reg]\t# narrow" %}
 5414   interface(MEMORY_INTER) %{
 5415     base($reg);
 5416     index(0xffffffff);
 5417     scale(0x0);
 5418     disp(0x0);
 5419   %}
 5420 %}
 5421 
 5422 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5423 %{
 5424   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5425   constraint(ALLOC_IN_RC(ptr_reg));
 5426   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5427   op_cost(0);
 5428   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5429   interface(MEMORY_INTER) %{
 5430     base($reg);
 5431     index($ireg);
 5432     scale($scale);
 5433     disp(0x0);
 5434   %}
 5435 %}
 5436 
 5437 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5438 %{
 5439   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5440   constraint(ALLOC_IN_RC(ptr_reg));
 5441   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5442   op_cost(0);
 5443   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5444   interface(MEMORY_INTER) %{
 5445     base($reg);
 5446     index($lreg);
 5447     scale($scale);
 5448     disp(0x0);
 5449   %}
 5450 %}
 5451 
 5452 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5453 %{
 5454   predicate(CompressedOops::shift() == 0);
 5455   constraint(ALLOC_IN_RC(ptr_reg));
 5456   match(AddP (DecodeN reg) (ConvI2L ireg));
 5457   op_cost(0);
 5458   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5459   interface(MEMORY_INTER) %{
 5460     base($reg);
 5461     index($ireg);
 5462     scale(0x0);
 5463     disp(0x0);
 5464   %}
 5465 %}
 5466 
 5467 operand indIndexN(iRegN reg, iRegL lreg)
 5468 %{
 5469   predicate(CompressedOops::shift() == 0);
 5470   constraint(ALLOC_IN_RC(ptr_reg));
 5471   match(AddP (DecodeN reg) lreg);
 5472   op_cost(0);
 5473   format %{ "$reg, $lreg\t# narrow" %}
 5474   interface(MEMORY_INTER) %{
 5475     base($reg);
 5476     index($lreg);
 5477     scale(0x0);
 5478     disp(0x0);
 5479   %}
 5480 %}
 5481 
 5482 operand indOffIN(iRegN reg, immIOffset off)
 5483 %{
 5484   predicate(CompressedOops::shift() == 0);
 5485   constraint(ALLOC_IN_RC(ptr_reg));
 5486   match(AddP (DecodeN reg) off);
 5487   op_cost(0);
 5488   format %{ "[$reg, $off]\t# narrow" %}
 5489   interface(MEMORY_INTER) %{
 5490     base($reg);
 5491     index(0xffffffff);
 5492     scale(0x0);
 5493     disp($off);
 5494   %}
 5495 %}
 5496 
 5497 operand indOffLN(iRegN reg, immLOffset off)
 5498 %{
 5499   predicate(CompressedOops::shift() == 0);
 5500   constraint(ALLOC_IN_RC(ptr_reg));
 5501   match(AddP (DecodeN reg) off);
 5502   op_cost(0);
 5503   format %{ "[$reg, $off]\t# narrow" %}
 5504   interface(MEMORY_INTER) %{
 5505     base($reg);
 5506     index(0xffffffff);
 5507     scale(0x0);
 5508     disp($off);
 5509   %}
 5510 %}
 5511 
 5512 
 5513 //----------Special Memory Operands--------------------------------------------
 5514 // Stack Slot Operand - This operand is used for loading and storing temporary
 5515 //                      values on the stack where a match requires a value to
 5516 //                      flow through memory.
 5517 operand stackSlotP(sRegP reg)
 5518 %{
 5519   constraint(ALLOC_IN_RC(stack_slots));
 5520   op_cost(100);
 5521   // No match rule because this operand is only generated in matching
 5522   // match(RegP);
 5523   format %{ "[$reg]" %}
 5524   interface(MEMORY_INTER) %{
 5525     base(0x1e);  // RSP
 5526     index(0x0);  // No Index
 5527     scale(0x0);  // No Scale
 5528     disp($reg);  // Stack Offset
 5529   %}
 5530 %}
 5531 
 5532 operand stackSlotI(sRegI reg)
 5533 %{
 5534   constraint(ALLOC_IN_RC(stack_slots));
 5535   // No match rule because this operand is only generated in matching
 5536   // match(RegI);
 5537   format %{ "[$reg]" %}
 5538   interface(MEMORY_INTER) %{
 5539     base(0x1e);  // RSP
 5540     index(0x0);  // No Index
 5541     scale(0x0);  // No Scale
 5542     disp($reg);  // Stack Offset
 5543   %}
 5544 %}
 5545 
 5546 operand stackSlotF(sRegF reg)
 5547 %{
 5548   constraint(ALLOC_IN_RC(stack_slots));
 5549   // No match rule because this operand is only generated in matching
 5550   // match(RegF);
 5551   format %{ "[$reg]" %}
 5552   interface(MEMORY_INTER) %{
 5553     base(0x1e);  // RSP
 5554     index(0x0);  // No Index
 5555     scale(0x0);  // No Scale
 5556     disp($reg);  // Stack Offset
 5557   %}
 5558 %}
 5559 
 5560 operand stackSlotD(sRegD reg)
 5561 %{
 5562   constraint(ALLOC_IN_RC(stack_slots));
 5563   // No match rule because this operand is only generated in matching
 5564   // match(RegD);
 5565   format %{ "[$reg]" %}
 5566   interface(MEMORY_INTER) %{
 5567     base(0x1e);  // RSP
 5568     index(0x0);  // No Index
 5569     scale(0x0);  // No Scale
 5570     disp($reg);  // Stack Offset
 5571   %}
 5572 %}
 5573 
 5574 operand stackSlotL(sRegL reg)
 5575 %{
 5576   constraint(ALLOC_IN_RC(stack_slots));
 5577   // No match rule because this operand is only generated in matching
 5578   // match(RegL);
 5579   format %{ "[$reg]" %}
 5580   interface(MEMORY_INTER) %{
 5581     base(0x1e);  // RSP
 5582     index(0x0);  // No Index
 5583     scale(0x0);  // No Scale
 5584     disp($reg);  // Stack Offset
 5585   %}
 5586 %}
 5587 
 5588 // Operands for expressing Control Flow
 5589 // NOTE: Label is a predefined operand which should not be redefined in
 5590 //       the AD file. It is generically handled within the ADLC.
 5591 
 5592 //----------Conditional Branch Operands----------------------------------------
 5593 // Comparison Op  - This is the operation of the comparison, and is limited to
 5594 //                  the following set of codes:
 5595 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 5596 //
 5597 // Other attributes of the comparison, such as unsignedness, are specified
 5598 // by the comparison instruction that sets a condition code flags register.
 5599 // That result is represented by a flags operand whose subtype is appropriate
 5600 // to the unsignedness (etc.) of the comparison.
 5601 //
 5602 // Later, the instruction which matches both the Comparison Op (a Bool) and
 5603 // the flags (produced by the Cmp) specifies the coding of the comparison op
 5604 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 5605 
 5606 // used for signed integral comparisons and fp comparisons
 5607 
 5608 operand cmpOp()
 5609 %{
 5610   match(Bool);
 5611 
 5612   format %{ "" %}
 5613   interface(COND_INTER) %{
 5614     equal(0x0, "eq");
 5615     not_equal(0x1, "ne");
 5616     less(0xb, "lt");
 5617     greater_equal(0xa, "ge");
 5618     less_equal(0xd, "le");
 5619     greater(0xc, "gt");
 5620     overflow(0x6, "vs");
 5621     no_overflow(0x7, "vc");
 5622   %}
 5623 %}
 5624 
 5625 // used for unsigned integral comparisons
 5626 
 5627 operand cmpOpU()
 5628 %{
 5629   match(Bool);
 5630 
 5631   format %{ "" %}
 5632   interface(COND_INTER) %{
 5633     equal(0x0, "eq");
 5634     not_equal(0x1, "ne");
 5635     less(0x3, "lo");
 5636     greater_equal(0x2, "hs");
 5637     less_equal(0x9, "ls");
 5638     greater(0x8, "hi");
 5639     overflow(0x6, "vs");
 5640     no_overflow(0x7, "vc");
 5641   %}
 5642 %}
 5643 
 5644 // used for certain integral comparisons which can be
 5645 // converted to cbxx or tbxx instructions
 5646 
 5647 operand cmpOpEqNe()
 5648 %{
 5649   match(Bool);
 5650   op_cost(0);
 5651   predicate(n->as_Bool()->_test._test == BoolTest::ne
 5652             || n->as_Bool()->_test._test == BoolTest::eq);
 5653 
 5654   format %{ "" %}
 5655   interface(COND_INTER) %{
 5656     equal(0x0, "eq");
 5657     not_equal(0x1, "ne");
 5658     less(0xb, "lt");
 5659     greater_equal(0xa, "ge");
 5660     less_equal(0xd, "le");
 5661     greater(0xc, "gt");
 5662     overflow(0x6, "vs");
 5663     no_overflow(0x7, "vc");
 5664   %}
 5665 %}
 5666 
 5667 // used for certain integral comparisons which can be
 5668 // converted to cbxx or tbxx instructions
 5669 
 5670 operand cmpOpLtGe()
 5671 %{
 5672   match(Bool);
 5673   op_cost(0);
 5674 
 5675   predicate(n->as_Bool()->_test._test == BoolTest::lt
 5676             || n->as_Bool()->_test._test == BoolTest::ge);
 5677 
 5678   format %{ "" %}
 5679   interface(COND_INTER) %{
 5680     equal(0x0, "eq");
 5681     not_equal(0x1, "ne");
 5682     less(0xb, "lt");
 5683     greater_equal(0xa, "ge");
 5684     less_equal(0xd, "le");
 5685     greater(0xc, "gt");
 5686     overflow(0x6, "vs");
 5687     no_overflow(0x7, "vc");
 5688   %}
 5689 %}
 5690 
 5691 // used for certain unsigned integral comparisons which can be
 5692 // converted to cbxx or tbxx instructions
 5693 
 5694 operand cmpOpUEqNeLeGt()
 5695 %{
 5696   match(Bool);
 5697   op_cost(0);
 5698 
 5699   predicate(n->as_Bool()->_test._test == BoolTest::eq ||
 5700             n->as_Bool()->_test._test == BoolTest::ne ||
 5701             n->as_Bool()->_test._test == BoolTest::le ||
 5702             n->as_Bool()->_test._test == BoolTest::gt);
 5703 
 5704   format %{ "" %}
 5705   interface(COND_INTER) %{
 5706     equal(0x0, "eq");
 5707     not_equal(0x1, "ne");
 5708     less(0x3, "lo");
 5709     greater_equal(0x2, "hs");
 5710     less_equal(0x9, "ls");
 5711     greater(0x8, "hi");
 5712     overflow(0x6, "vs");
 5713     no_overflow(0x7, "vc");
 5714   %}
 5715 %}
 5716 
 5717 // Special operand allowing long args to int ops to be truncated for free
 5718 
 5719 operand iRegL2I(iRegL reg) %{
 5720 
 5721   op_cost(0);
 5722 
 5723   match(ConvL2I reg);
 5724 
 5725   format %{ "l2i($reg)" %}
 5726 
 5727   interface(REG_INTER)
 5728 %}
 5729 
 5730 operand iRegL2P(iRegL reg) %{
 5731 
 5732   op_cost(0);
 5733 
 5734   match(CastX2P reg);
 5735 
 5736   format %{ "l2p($reg)" %}
 5737 
 5738   interface(REG_INTER)
 5739 %}
 5740 
 5741 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 5742 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 5743 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 5744 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 5745 
 5746 //----------OPERAND CLASSES----------------------------------------------------
 5747 // Operand Classes are groups of operands that are used as to simplify
 5748 // instruction definitions by not requiring the AD writer to specify
 5749 // separate instructions for every form of operand when the
 5750 // instruction accepts multiple operand types with the same basic
 5751 // encoding and format. The classic case of this is memory operands.
 5752 
 5753 // memory is used to define read/write location for load/store
 5754 // instruction defs. we can turn a memory op into an Address
 5755 
 5756 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 5757                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5758 
 5759 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 5760                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5761 
 5762 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 5763                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5764 
 5765 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 5766                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5767 
 5768 // All of the memory operands. For the pipeline description.
 5769 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 5770                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 5771                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5772 
 5773 
 5774 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 5775 // operations. it allows the src to be either an iRegI or a (ConvL2I
 5776 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 5777 // can be elided because the 32-bit instruction will just employ the
 5778 // lower 32 bits anyway.
 5779 //
 5780 // n.b. this does not elide all L2I conversions. if the truncated
 5781 // value is consumed by more than one operation then the ConvL2I
 5782 // cannot be bundled into the consuming nodes so an l2i gets planted
 5783 // (actually a movw $dst $src) and the downstream instructions consume
 5784 // the result of the l2i as an iRegI input. That's a shame since the
 5785 // movw is actually redundant but its not too costly.
 5786 
 5787 opclass iRegIorL2I(iRegI, iRegL2I);
 5788 opclass iRegPorL2P(iRegP, iRegL2P);
 5789 
 5790 //----------PIPELINE-----------------------------------------------------------
 5791 // Rules which define the behavior of the target architectures pipeline.
 5792 
 5793 // For specific pipelines, eg A53, define the stages of that pipeline
 5794 //pipe_desc(ISS, EX1, EX2, WR);
 5795 #define ISS S0
 5796 #define EX1 S1
 5797 #define EX2 S2
 5798 #define WR  S3
 5799 
 5800 // Integer ALU reg operation
 5801 pipeline %{
 5802 
 5803 attributes %{
 5804   // ARM instructions are of fixed length
 5805   fixed_size_instructions;        // Fixed size instructions TODO does
 5806   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 5807   // ARM instructions come in 32-bit word units
 5808   instruction_unit_size = 4;         // An instruction is 4 bytes long
 5809   instruction_fetch_unit_size = 64;  // The processor fetches one line
 5810   instruction_fetch_units = 1;       // of 64 bytes
 5811 
 5812   // List of nop instructions
 5813   nops( MachNop );
 5814 %}
 5815 
 5816 // We don't use an actual pipeline model so don't care about resources
 5817 // or description. we do use pipeline classes to introduce fixed
 5818 // latencies
 5819 
 5820 //----------RESOURCES----------------------------------------------------------
 5821 // Resources are the functional units available to the machine
 5822 
 5823 resources( INS0, INS1, INS01 = INS0 | INS1,
 5824            ALU0, ALU1, ALU = ALU0 | ALU1,
 5825            MAC,
 5826            DIV,
 5827            BRANCH,
 5828            LDST,
 5829            NEON_FP);
 5830 
 5831 //----------PIPELINE DESCRIPTION-----------------------------------------------
 5832 // Pipeline Description specifies the stages in the machine's pipeline
 5833 
 5834 // Define the pipeline as a generic 6 stage pipeline
 5835 pipe_desc(S0, S1, S2, S3, S4, S5);
 5836 
 5837 //----------PIPELINE CLASSES---------------------------------------------------
 5838 // Pipeline Classes describe the stages in which input and output are
 5839 // referenced by the hardware pipeline.
 5840 
 5841 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 5842 %{
 5843   single_instruction;
 5844   src1   : S1(read);
 5845   src2   : S2(read);
 5846   dst    : S5(write);
 5847   INS01  : ISS;
 5848   NEON_FP : S5;
 5849 %}
 5850 
 5851 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 5852 %{
 5853   single_instruction;
 5854   src1   : S1(read);
 5855   src2   : S2(read);
 5856   dst    : S5(write);
 5857   INS01  : ISS;
 5858   NEON_FP : S5;
 5859 %}
 5860 
 5861 pipe_class fp_uop_s(vRegF dst, vRegF src)
 5862 %{
 5863   single_instruction;
 5864   src    : S1(read);
 5865   dst    : S5(write);
 5866   INS01  : ISS;
 5867   NEON_FP : S5;
 5868 %}
 5869 
 5870 pipe_class fp_uop_d(vRegD dst, vRegD src)
 5871 %{
 5872   single_instruction;
 5873   src    : S1(read);
 5874   dst    : S5(write);
 5875   INS01  : ISS;
 5876   NEON_FP : S5;
 5877 %}
 5878 
 5879 pipe_class fp_d2f(vRegF dst, vRegD src)
 5880 %{
 5881   single_instruction;
 5882   src    : S1(read);
 5883   dst    : S5(write);
 5884   INS01  : ISS;
 5885   NEON_FP : S5;
 5886 %}
 5887 
 5888 pipe_class fp_f2d(vRegD dst, vRegF src)
 5889 %{
 5890   single_instruction;
 5891   src    : S1(read);
 5892   dst    : S5(write);
 5893   INS01  : ISS;
 5894   NEON_FP : S5;
 5895 %}
 5896 
 5897 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 5898 %{
 5899   single_instruction;
 5900   src    : S1(read);
 5901   dst    : S5(write);
 5902   INS01  : ISS;
 5903   NEON_FP : S5;
 5904 %}
 5905 
 5906 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 5907 %{
 5908   single_instruction;
 5909   src    : S1(read);
 5910   dst    : S5(write);
 5911   INS01  : ISS;
 5912   NEON_FP : S5;
 5913 %}
 5914 
 5915 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 5916 %{
 5917   single_instruction;
 5918   src    : S1(read);
 5919   dst    : S5(write);
 5920   INS01  : ISS;
 5921   NEON_FP : S5;
 5922 %}
 5923 
 5924 pipe_class fp_l2f(vRegF dst, iRegL src)
 5925 %{
 5926   single_instruction;
 5927   src    : S1(read);
 5928   dst    : S5(write);
 5929   INS01  : ISS;
 5930   NEON_FP : S5;
 5931 %}
 5932 
 5933 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 5934 %{
 5935   single_instruction;
 5936   src    : S1(read);
 5937   dst    : S5(write);
 5938   INS01  : ISS;
 5939   NEON_FP : S5;
 5940 %}
 5941 
 5942 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 5943 %{
 5944   single_instruction;
 5945   src    : S1(read);
 5946   dst    : S5(write);
 5947   INS01  : ISS;
 5948   NEON_FP : S5;
 5949 %}
 5950 
 5951 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 5952 %{
 5953   single_instruction;
 5954   src    : S1(read);
 5955   dst    : S5(write);
 5956   INS01  : ISS;
 5957   NEON_FP : S5;
 5958 %}
 5959 
 5960 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 5961 %{
 5962   single_instruction;
 5963   src    : S1(read);
 5964   dst    : S5(write);
 5965   INS01  : ISS;
 5966   NEON_FP : S5;
 5967 %}
 5968 
 5969 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 5970 %{
 5971   single_instruction;
 5972   src1   : S1(read);
 5973   src2   : S2(read);
 5974   dst    : S5(write);
 5975   INS0   : ISS;
 5976   NEON_FP : S5;
 5977 %}
 5978 
 5979 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 5980 %{
 5981   single_instruction;
 5982   src1   : S1(read);
 5983   src2   : S2(read);
 5984   dst    : S5(write);
 5985   INS0   : ISS;
 5986   NEON_FP : S5;
 5987 %}
 5988 
 5989 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 5990 %{
 5991   single_instruction;
 5992   cr     : S1(read);
 5993   src1   : S1(read);
 5994   src2   : S1(read);
 5995   dst    : S3(write);
 5996   INS01  : ISS;
 5997   NEON_FP : S3;
 5998 %}
 5999 
 6000 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6001 %{
 6002   single_instruction;
 6003   cr     : S1(read);
 6004   src1   : S1(read);
 6005   src2   : S1(read);
 6006   dst    : S3(write);
 6007   INS01  : ISS;
 6008   NEON_FP : S3;
 6009 %}
 6010 
 6011 pipe_class fp_imm_s(vRegF dst)
 6012 %{
 6013   single_instruction;
 6014   dst    : S3(write);
 6015   INS01  : ISS;
 6016   NEON_FP : S3;
 6017 %}
 6018 
 6019 pipe_class fp_imm_d(vRegD dst)
 6020 %{
 6021   single_instruction;
 6022   dst    : S3(write);
 6023   INS01  : ISS;
 6024   NEON_FP : S3;
 6025 %}
 6026 
 6027 pipe_class fp_load_constant_s(vRegF dst)
 6028 %{
 6029   single_instruction;
 6030   dst    : S4(write);
 6031   INS01  : ISS;
 6032   NEON_FP : S4;
 6033 %}
 6034 
 6035 pipe_class fp_load_constant_d(vRegD dst)
 6036 %{
 6037   single_instruction;
 6038   dst    : S4(write);
 6039   INS01  : ISS;
 6040   NEON_FP : S4;
 6041 %}
 6042 
 6043 //------- Integer ALU operations --------------------------
 6044 
 6045 // Integer ALU reg-reg operation
 6046 // Operands needed in EX1, result generated in EX2
 6047 // Eg.  ADD     x0, x1, x2
 6048 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6049 %{
 6050   single_instruction;
 6051   dst    : EX2(write);
 6052   src1   : EX1(read);
 6053   src2   : EX1(read);
 6054   INS01  : ISS; // Dual issue as instruction 0 or 1
 6055   ALU    : EX2;
 6056 %}
 6057 
 6058 // Integer ALU reg-reg operation with constant shift
 6059 // Shifted register must be available in LATE_ISS instead of EX1
 6060 // Eg.  ADD     x0, x1, x2, LSL #2
 6061 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6062 %{
 6063   single_instruction;
 6064   dst    : EX2(write);
 6065   src1   : EX1(read);
 6066   src2   : ISS(read);
 6067   INS01  : ISS;
 6068   ALU    : EX2;
 6069 %}
 6070 
 6071 // Integer ALU reg operation with constant shift
 6072 // Eg.  LSL     x0, x1, #shift
 6073 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6074 %{
 6075   single_instruction;
 6076   dst    : EX2(write);
 6077   src1   : ISS(read);
 6078   INS01  : ISS;
 6079   ALU    : EX2;
 6080 %}
 6081 
 6082 // Integer ALU reg-reg operation with variable shift
 6083 // Both operands must be available in LATE_ISS instead of EX1
 6084 // Result is available in EX1 instead of EX2
 6085 // Eg.  LSLV    x0, x1, x2
 6086 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6087 %{
 6088   single_instruction;
 6089   dst    : EX1(write);
 6090   src1   : ISS(read);
 6091   src2   : ISS(read);
 6092   INS01  : ISS;
 6093   ALU    : EX1;
 6094 %}
 6095 
 6096 // Integer ALU reg-reg operation with extract
 6097 // As for _vshift above, but result generated in EX2
 6098 // Eg.  EXTR    x0, x1, x2, #N
 6099 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6100 %{
 6101   single_instruction;
 6102   dst    : EX2(write);
 6103   src1   : ISS(read);
 6104   src2   : ISS(read);
 6105   INS1   : ISS; // Can only dual issue as Instruction 1
 6106   ALU    : EX1;
 6107 %}
 6108 
 6109 // Integer ALU reg operation
 6110 // Eg.  NEG     x0, x1
 6111 pipe_class ialu_reg(iRegI dst, iRegI src)
 6112 %{
 6113   single_instruction;
 6114   dst    : EX2(write);
 6115   src    : EX1(read);
 6116   INS01  : ISS;
 6117   ALU    : EX2;
 6118 %}
 6119 
 6120 // Integer ALU reg mmediate operation
 6121 // Eg.  ADD     x0, x1, #N
 6122 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6123 %{
 6124   single_instruction;
 6125   dst    : EX2(write);
 6126   src1   : EX1(read);
 6127   INS01  : ISS;
 6128   ALU    : EX2;
 6129 %}
 6130 
 6131 // Integer ALU immediate operation (no source operands)
 6132 // Eg.  MOV     x0, #N
 6133 pipe_class ialu_imm(iRegI dst)
 6134 %{
 6135   single_instruction;
 6136   dst    : EX1(write);
 6137   INS01  : ISS;
 6138   ALU    : EX1;
 6139 %}
 6140 
 6141 //------- Compare operation -------------------------------
 6142 
 6143 // Compare reg-reg
 6144 // Eg.  CMP     x0, x1
 6145 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6146 %{
 6147   single_instruction;
 6148 //  fixed_latency(16);
 6149   cr     : EX2(write);
 6150   op1    : EX1(read);
 6151   op2    : EX1(read);
 6152   INS01  : ISS;
 6153   ALU    : EX2;
 6154 %}
 6155 
 6156 // Compare reg-reg
 6157 // Eg.  CMP     x0, #N
 6158 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6159 %{
 6160   single_instruction;
 6161 //  fixed_latency(16);
 6162   cr     : EX2(write);
 6163   op1    : EX1(read);
 6164   INS01  : ISS;
 6165   ALU    : EX2;
 6166 %}
 6167 
 6168 //------- Conditional instructions ------------------------
 6169 
 6170 // Conditional no operands
 6171 // Eg.  CSINC   x0, zr, zr, <cond>
 6172 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6173 %{
 6174   single_instruction;
 6175   cr     : EX1(read);
 6176   dst    : EX2(write);
 6177   INS01  : ISS;
 6178   ALU    : EX2;
 6179 %}
 6180 
 6181 // Conditional 2 operand
 6182 // EG.  CSEL    X0, X1, X2, <cond>
 6183 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6184 %{
 6185   single_instruction;
 6186   cr     : EX1(read);
 6187   src1   : EX1(read);
 6188   src2   : EX1(read);
 6189   dst    : EX2(write);
 6190   INS01  : ISS;
 6191   ALU    : EX2;
 6192 %}
 6193 
 6194 // Conditional 2 operand
 6195 // EG.  CSEL    X0, X1, X2, <cond>
 6196 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6197 %{
 6198   single_instruction;
 6199   cr     : EX1(read);
 6200   src    : EX1(read);
 6201   dst    : EX2(write);
 6202   INS01  : ISS;
 6203   ALU    : EX2;
 6204 %}
 6205 
 6206 //------- Multiply pipeline operations --------------------
 6207 
 6208 // Multiply reg-reg
 6209 // Eg.  MUL     w0, w1, w2
 6210 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6211 %{
 6212   single_instruction;
 6213   dst    : WR(write);
 6214   src1   : ISS(read);
 6215   src2   : ISS(read);
 6216   INS01  : ISS;
 6217   MAC    : WR;
 6218 %}
 6219 
 6220 // Multiply accumulate
 6221 // Eg.  MADD    w0, w1, w2, w3
 6222 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6223 %{
 6224   single_instruction;
 6225   dst    : WR(write);
 6226   src1   : ISS(read);
 6227   src2   : ISS(read);
 6228   src3   : ISS(read);
 6229   INS01  : ISS;
 6230   MAC    : WR;
 6231 %}
 6232 
 6233 // Eg.  MUL     w0, w1, w2
 6234 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6235 %{
 6236   single_instruction;
 6237   fixed_latency(3); // Maximum latency for 64 bit mul
 6238   dst    : WR(write);
 6239   src1   : ISS(read);
 6240   src2   : ISS(read);
 6241   INS01  : ISS;
 6242   MAC    : WR;
 6243 %}
 6244 
 6245 // Multiply accumulate
 6246 // Eg.  MADD    w0, w1, w2, w3
 6247 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6248 %{
 6249   single_instruction;
 6250   fixed_latency(3); // Maximum latency for 64 bit mul
 6251   dst    : WR(write);
 6252   src1   : ISS(read);
 6253   src2   : ISS(read);
 6254   src3   : ISS(read);
 6255   INS01  : ISS;
 6256   MAC    : WR;
 6257 %}
 6258 
 6259 //------- Divide pipeline operations --------------------
 6260 
 6261 // Eg.  SDIV    w0, w1, w2
 6262 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6263 %{
 6264   single_instruction;
 6265   fixed_latency(8); // Maximum latency for 32 bit divide
 6266   dst    : WR(write);
 6267   src1   : ISS(read);
 6268   src2   : ISS(read);
 6269   INS0   : ISS; // Can only dual issue as instruction 0
 6270   DIV    : WR;
 6271 %}
 6272 
 6273 // Eg.  SDIV    x0, x1, x2
 6274 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6275 %{
 6276   single_instruction;
 6277   fixed_latency(16); // Maximum latency for 64 bit divide
 6278   dst    : WR(write);
 6279   src1   : ISS(read);
 6280   src2   : ISS(read);
 6281   INS0   : ISS; // Can only dual issue as instruction 0
 6282   DIV    : WR;
 6283 %}
 6284 
 6285 //------- Load pipeline operations ------------------------
 6286 
 6287 // Load - prefetch
 6288 // Eg.  PFRM    <mem>
 6289 pipe_class iload_prefetch(memory mem)
 6290 %{
 6291   single_instruction;
 6292   mem    : ISS(read);
 6293   INS01  : ISS;
 6294   LDST   : WR;
 6295 %}
 6296 
 6297 // Load - reg, mem
 6298 // Eg.  LDR     x0, <mem>
 6299 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6300 %{
 6301   single_instruction;
 6302   dst    : WR(write);
 6303   mem    : ISS(read);
 6304   INS01  : ISS;
 6305   LDST   : WR;
 6306 %}
 6307 
 6308 // Load - reg, reg
 6309 // Eg.  LDR     x0, [sp, x1]
 6310 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6311 %{
 6312   single_instruction;
 6313   dst    : WR(write);
 6314   src    : ISS(read);
 6315   INS01  : ISS;
 6316   LDST   : WR;
 6317 %}
 6318 
 6319 //------- Store pipeline operations -----------------------
 6320 
 6321 // Store - zr, mem
 6322 // Eg.  STR     zr, <mem>
 6323 pipe_class istore_mem(memory mem)
 6324 %{
 6325   single_instruction;
 6326   mem    : ISS(read);
 6327   INS01  : ISS;
 6328   LDST   : WR;
 6329 %}
 6330 
 6331 // Store - reg, mem
 6332 // Eg.  STR     x0, <mem>
 6333 pipe_class istore_reg_mem(iRegI src, memory mem)
 6334 %{
 6335   single_instruction;
 6336   mem    : ISS(read);
 6337   src    : EX2(read);
 6338   INS01  : ISS;
 6339   LDST   : WR;
 6340 %}
 6341 
 6342 // Store - reg, reg
 6343 // Eg. STR      x0, [sp, x1]
 6344 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6345 %{
 6346   single_instruction;
 6347   dst    : ISS(read);
 6348   src    : EX2(read);
 6349   INS01  : ISS;
 6350   LDST   : WR;
 6351 %}
 6352 
 6353 //------- Store pipeline operations -----------------------
 6354 
 6355 // Branch
 6356 pipe_class pipe_branch()
 6357 %{
 6358   single_instruction;
 6359   INS01  : ISS;
 6360   BRANCH : EX1;
 6361 %}
 6362 
 6363 // Conditional branch
 6364 pipe_class pipe_branch_cond(rFlagsReg cr)
 6365 %{
 6366   single_instruction;
 6367   cr     : EX1(read);
 6368   INS01  : ISS;
 6369   BRANCH : EX1;
 6370 %}
 6371 
 6372 // Compare & Branch
 6373 // EG.  CBZ/CBNZ
 6374 pipe_class pipe_cmp_branch(iRegI op1)
 6375 %{
 6376   single_instruction;
 6377   op1    : EX1(read);
 6378   INS01  : ISS;
 6379   BRANCH : EX1;
 6380 %}
 6381 
 6382 //------- Synchronisation operations ----------------------
 6383 
 6384 // Any operation requiring serialization.
 6385 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6386 pipe_class pipe_serial()
 6387 %{
 6388   single_instruction;
 6389   force_serialization;
 6390   fixed_latency(16);
 6391   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6392   LDST   : WR;
 6393 %}
 6394 
 6395 // Generic big/slow expanded idiom - also serialized
 6396 pipe_class pipe_slow()
 6397 %{
 6398   instruction_count(10);
 6399   multiple_bundles;
 6400   force_serialization;
 6401   fixed_latency(16);
 6402   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6403   LDST   : WR;
 6404 %}
 6405 
 6406 // Empty pipeline class
 6407 pipe_class pipe_class_empty()
 6408 %{
 6409   single_instruction;
 6410   fixed_latency(0);
 6411 %}
 6412 
 6413 // Default pipeline class.
 6414 pipe_class pipe_class_default()
 6415 %{
 6416   single_instruction;
 6417   fixed_latency(2);
 6418 %}
 6419 
 6420 // Pipeline class for compares.
 6421 pipe_class pipe_class_compare()
 6422 %{
 6423   single_instruction;
 6424   fixed_latency(16);
 6425 %}
 6426 
 6427 // Pipeline class for memory operations.
 6428 pipe_class pipe_class_memory()
 6429 %{
 6430   single_instruction;
 6431   fixed_latency(16);
 6432 %}
 6433 
 6434 // Pipeline class for call.
 6435 pipe_class pipe_class_call()
 6436 %{
 6437   single_instruction;
 6438   fixed_latency(100);
 6439 %}
 6440 
 6441 // Define the class for the Nop node.
 6442 define %{
 6443    MachNop = pipe_class_empty;
 6444 %}
 6445 
 6446 %}
 6447 //----------INSTRUCTIONS-------------------------------------------------------
 6448 //
 6449 // match      -- States which machine-independent subtree may be replaced
 6450 //               by this instruction.
 6451 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6452 //               selection to identify a minimum cost tree of machine
 6453 //               instructions that matches a tree of machine-independent
 6454 //               instructions.
 6455 // format     -- A string providing the disassembly for this instruction.
 6456 //               The value of an instruction's operand may be inserted
 6457 //               by referring to it with a '$' prefix.
 6458 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6459 //               to within an encode class as $primary, $secondary, and $tertiary
 6460 //               rrspectively.  The primary opcode is commonly used to
 6461 //               indicate the type of machine instruction, while secondary
 6462 //               and tertiary are often used for prefix options or addressing
 6463 //               modes.
 6464 // ins_encode -- A list of encode classes with parameters. The encode class
 6465 //               name must have been defined in an 'enc_class' specification
 6466 //               in the encode section of the architecture description.
 6467 
 6468 // ============================================================================
 6469 // Memory (Load/Store) Instructions
 6470 
 6471 // Load Instructions
 6472 
 6473 // Load Byte (8 bit signed)
 6474 instruct loadB(iRegINoSp dst, memory1 mem)
 6475 %{
 6476   match(Set dst (LoadB mem));
 6477   predicate(!needs_acquiring_load(n));
 6478 
 6479   ins_cost(4 * INSN_COST);
 6480   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6481 
 6482   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6483 
 6484   ins_pipe(iload_reg_mem);
 6485 %}
 6486 
 6487 // Load Byte (8 bit signed) into long
 6488 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6489 %{
 6490   match(Set dst (ConvI2L (LoadB mem)));
 6491   predicate(!needs_acquiring_load(n->in(1)));
 6492 
 6493   ins_cost(4 * INSN_COST);
 6494   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6495 
 6496   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6497 
 6498   ins_pipe(iload_reg_mem);
 6499 %}
 6500 
 6501 // Load Byte (8 bit unsigned)
 6502 instruct loadUB(iRegINoSp dst, memory1 mem)
 6503 %{
 6504   match(Set dst (LoadUB mem));
 6505   predicate(!needs_acquiring_load(n));
 6506 
 6507   ins_cost(4 * INSN_COST);
 6508   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6509 
 6510   ins_encode(aarch64_enc_ldrb(dst, mem));
 6511 
 6512   ins_pipe(iload_reg_mem);
 6513 %}
 6514 
 6515 // Load Byte (8 bit unsigned) into long
 6516 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6517 %{
 6518   match(Set dst (ConvI2L (LoadUB mem)));
 6519   predicate(!needs_acquiring_load(n->in(1)));
 6520 
 6521   ins_cost(4 * INSN_COST);
 6522   format %{ "ldrb  $dst, $mem\t# byte" %}
 6523 
 6524   ins_encode(aarch64_enc_ldrb(dst, mem));
 6525 
 6526   ins_pipe(iload_reg_mem);
 6527 %}
 6528 
 6529 // Load Short (16 bit signed)
 6530 instruct loadS(iRegINoSp dst, memory2 mem)
 6531 %{
 6532   match(Set dst (LoadS mem));
 6533   predicate(!needs_acquiring_load(n));
 6534 
 6535   ins_cost(4 * INSN_COST);
 6536   format %{ "ldrshw  $dst, $mem\t# short" %}
 6537 
 6538   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6539 
 6540   ins_pipe(iload_reg_mem);
 6541 %}
 6542 
 6543 // Load Short (16 bit signed) into long
 6544 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6545 %{
 6546   match(Set dst (ConvI2L (LoadS mem)));
 6547   predicate(!needs_acquiring_load(n->in(1)));
 6548 
 6549   ins_cost(4 * INSN_COST);
 6550   format %{ "ldrsh  $dst, $mem\t# short" %}
 6551 
 6552   ins_encode(aarch64_enc_ldrsh(dst, mem));
 6553 
 6554   ins_pipe(iload_reg_mem);
 6555 %}
 6556 
 6557 // Load Char (16 bit unsigned)
 6558 instruct loadUS(iRegINoSp dst, memory2 mem)
 6559 %{
 6560   match(Set dst (LoadUS mem));
 6561   predicate(!needs_acquiring_load(n));
 6562 
 6563   ins_cost(4 * INSN_COST);
 6564   format %{ "ldrh  $dst, $mem\t# short" %}
 6565 
 6566   ins_encode(aarch64_enc_ldrh(dst, mem));
 6567 
 6568   ins_pipe(iload_reg_mem);
 6569 %}
 6570 
 6571 // Load Short/Char (16 bit unsigned) into long
 6572 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 6573 %{
 6574   match(Set dst (ConvI2L (LoadUS mem)));
 6575   predicate(!needs_acquiring_load(n->in(1)));
 6576 
 6577   ins_cost(4 * INSN_COST);
 6578   format %{ "ldrh  $dst, $mem\t# short" %}
 6579 
 6580   ins_encode(aarch64_enc_ldrh(dst, mem));
 6581 
 6582   ins_pipe(iload_reg_mem);
 6583 %}
 6584 
 6585 // Load Integer (32 bit signed)
 6586 instruct loadI(iRegINoSp dst, memory4 mem)
 6587 %{
 6588   match(Set dst (LoadI mem));
 6589   predicate(!needs_acquiring_load(n));
 6590 
 6591   ins_cost(4 * INSN_COST);
 6592   format %{ "ldrw  $dst, $mem\t# int" %}
 6593 
 6594   ins_encode(aarch64_enc_ldrw(dst, mem));
 6595 
 6596   ins_pipe(iload_reg_mem);
 6597 %}
 6598 
 6599 // Load Integer (32 bit signed) into long
 6600 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 6601 %{
 6602   match(Set dst (ConvI2L (LoadI mem)));
 6603   predicate(!needs_acquiring_load(n->in(1)));
 6604 
 6605   ins_cost(4 * INSN_COST);
 6606   format %{ "ldrsw  $dst, $mem\t# int" %}
 6607 
 6608   ins_encode(aarch64_enc_ldrsw(dst, mem));
 6609 
 6610   ins_pipe(iload_reg_mem);
 6611 %}
 6612 
 6613 // Load Integer (32 bit unsigned) into long
 6614 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 6615 %{
 6616   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 6617   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 6618 
 6619   ins_cost(4 * INSN_COST);
 6620   format %{ "ldrw  $dst, $mem\t# int" %}
 6621 
 6622   ins_encode(aarch64_enc_ldrw(dst, mem));
 6623 
 6624   ins_pipe(iload_reg_mem);
 6625 %}
 6626 
 6627 // Load Long (64 bit signed)
 6628 instruct loadL(iRegLNoSp dst, memory8 mem)
 6629 %{
 6630   match(Set dst (LoadL mem));
 6631   predicate(!needs_acquiring_load(n));
 6632 
 6633   ins_cost(4 * INSN_COST);
 6634   format %{ "ldr  $dst, $mem\t# int" %}
 6635 
 6636   ins_encode(aarch64_enc_ldr(dst, mem));
 6637 
 6638   ins_pipe(iload_reg_mem);
 6639 %}
 6640 
 6641 // Load Range
 6642 instruct loadRange(iRegINoSp dst, memory4 mem)
 6643 %{
 6644   match(Set dst (LoadRange mem));
 6645 
 6646   ins_cost(4 * INSN_COST);
 6647   format %{ "ldrw  $dst, $mem\t# range" %}
 6648 
 6649   ins_encode(aarch64_enc_ldrw(dst, mem));
 6650 
 6651   ins_pipe(iload_reg_mem);
 6652 %}
 6653 
 6654 // Load Pointer
 6655 instruct loadP(iRegPNoSp dst, memory8 mem)
 6656 %{
 6657   match(Set dst (LoadP mem));
 6658   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 6659 
 6660   ins_cost(4 * INSN_COST);
 6661   format %{ "ldr  $dst, $mem\t# ptr" %}
 6662 
 6663   ins_encode(aarch64_enc_ldr(dst, mem));
 6664 
 6665   ins_pipe(iload_reg_mem);
 6666 %}
 6667 
 6668 // Load Compressed Pointer
 6669 instruct loadN(iRegNNoSp dst, memory4 mem)
 6670 %{
 6671   match(Set dst (LoadN mem));
 6672   predicate(!needs_acquiring_load(n) && n->as_Load()->barrier_data() == 0);
 6673 
 6674   ins_cost(4 * INSN_COST);
 6675   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 6676 
 6677   ins_encode(aarch64_enc_ldrw(dst, mem));
 6678 
 6679   ins_pipe(iload_reg_mem);
 6680 %}
 6681 
 6682 // Load Klass Pointer
 6683 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 6684 %{
 6685   match(Set dst (LoadKlass mem));
 6686   predicate(!needs_acquiring_load(n));
 6687 
 6688   ins_cost(4 * INSN_COST);
 6689   format %{ "ldr  $dst, $mem\t# class" %}
 6690 
 6691   ins_encode(aarch64_enc_ldr(dst, mem));
 6692 
 6693   ins_pipe(iload_reg_mem);
 6694 %}
 6695 
 6696 // Load Narrow Klass Pointer
 6697 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 6698 %{
 6699   match(Set dst (LoadNKlass mem));
 6700   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
 6701 
 6702   ins_cost(4 * INSN_COST);
 6703   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 6704 
 6705   ins_encode(aarch64_enc_ldrw(dst, mem));
 6706 
 6707   ins_pipe(iload_reg_mem);
 6708 %}
 6709 
 6710 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem)
 6711 %{
 6712   match(Set dst (LoadNKlass mem));
 6713   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
 6714 
 6715   ins_cost(4 * INSN_COST);
 6716   format %{
 6717     "ldrw  $dst, $mem\t# compressed class ptr, shifted\n\t"
 6718     "lsrw  $dst, $dst, markWord::klass_shift_at_offset"
 6719   %}
 6720   ins_encode %{
 6721     // inlined aarch64_enc_ldrw
 6722     loadStore(masm, &MacroAssembler::ldrw, $dst$$Register, $mem->opcode(),
 6723               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 6724     __ lsrw($dst$$Register, $dst$$Register, markWord::klass_shift_at_offset);
 6725   %}
 6726   ins_pipe(iload_reg_mem);
 6727 %}
 6728 
 6729 // Load Float
 6730 instruct loadF(vRegF dst, memory4 mem)
 6731 %{
 6732   match(Set dst (LoadF mem));
 6733   predicate(!needs_acquiring_load(n));
 6734 
 6735   ins_cost(4 * INSN_COST);
 6736   format %{ "ldrs  $dst, $mem\t# float" %}
 6737 
 6738   ins_encode( aarch64_enc_ldrs(dst, mem) );
 6739 
 6740   ins_pipe(pipe_class_memory);
 6741 %}
 6742 
 6743 // Load Double
 6744 instruct loadD(vRegD dst, memory8 mem)
 6745 %{
 6746   match(Set dst (LoadD mem));
 6747   predicate(!needs_acquiring_load(n));
 6748 
 6749   ins_cost(4 * INSN_COST);
 6750   format %{ "ldrd  $dst, $mem\t# double" %}
 6751 
 6752   ins_encode( aarch64_enc_ldrd(dst, mem) );
 6753 
 6754   ins_pipe(pipe_class_memory);
 6755 %}
 6756 
 6757 
 6758 // Load Int Constant
 6759 instruct loadConI(iRegINoSp dst, immI src)
 6760 %{
 6761   match(Set dst src);
 6762 
 6763   ins_cost(INSN_COST);
 6764   format %{ "mov $dst, $src\t# int" %}
 6765 
 6766   ins_encode( aarch64_enc_movw_imm(dst, src) );
 6767 
 6768   ins_pipe(ialu_imm);
 6769 %}
 6770 
 6771 // Load Long Constant
 6772 instruct loadConL(iRegLNoSp dst, immL src)
 6773 %{
 6774   match(Set dst src);
 6775 
 6776   ins_cost(INSN_COST);
 6777   format %{ "mov $dst, $src\t# long" %}
 6778 
 6779   ins_encode( aarch64_enc_mov_imm(dst, src) );
 6780 
 6781   ins_pipe(ialu_imm);
 6782 %}
 6783 
 6784 // Load Pointer Constant
 6785 
 6786 instruct loadConP(iRegPNoSp dst, immP con)
 6787 %{
 6788   match(Set dst con);
 6789 
 6790   ins_cost(INSN_COST * 4);
 6791   format %{
 6792     "mov  $dst, $con\t# ptr\n\t"
 6793   %}
 6794 
 6795   ins_encode(aarch64_enc_mov_p(dst, con));
 6796 
 6797   ins_pipe(ialu_imm);
 6798 %}
 6799 
 6800 // Load Null Pointer Constant
 6801 
 6802 instruct loadConP0(iRegPNoSp dst, immP0 con)
 6803 %{
 6804   match(Set dst con);
 6805 
 6806   ins_cost(INSN_COST);
 6807   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6808 
 6809   ins_encode(aarch64_enc_mov_p0(dst, con));
 6810 
 6811   ins_pipe(ialu_imm);
 6812 %}
 6813 
 6814 // Load Pointer Constant One
 6815 
 6816 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 6817 %{
 6818   match(Set dst con);
 6819 
 6820   ins_cost(INSN_COST);
 6821   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6822 
 6823   ins_encode(aarch64_enc_mov_p1(dst, con));
 6824 
 6825   ins_pipe(ialu_imm);
 6826 %}
 6827 
 6828 // Load Byte Map Base Constant
 6829 
 6830 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 6831 %{
 6832   match(Set dst con);
 6833 
 6834   ins_cost(INSN_COST);
 6835   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 6836 
 6837   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 6838 
 6839   ins_pipe(ialu_imm);
 6840 %}
 6841 
 6842 // Load Narrow Pointer Constant
 6843 
 6844 instruct loadConN(iRegNNoSp dst, immN con)
 6845 %{
 6846   match(Set dst con);
 6847 
 6848   ins_cost(INSN_COST * 4);
 6849   format %{ "mov  $dst, $con\t# compressed ptr" %}
 6850 
 6851   ins_encode(aarch64_enc_mov_n(dst, con));
 6852 
 6853   ins_pipe(ialu_imm);
 6854 %}
 6855 
 6856 // Load Narrow Null Pointer Constant
 6857 
 6858 instruct loadConN0(iRegNNoSp dst, immN0 con)
 6859 %{
 6860   match(Set dst con);
 6861 
 6862   ins_cost(INSN_COST);
 6863   format %{ "mov  $dst, $con\t# compressed nullptr ptr" %}
 6864 
 6865   ins_encode(aarch64_enc_mov_n0(dst, con));
 6866 
 6867   ins_pipe(ialu_imm);
 6868 %}
 6869 
 6870 // Load Narrow Klass Constant
 6871 
 6872 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 6873 %{
 6874   match(Set dst con);
 6875 
 6876   ins_cost(INSN_COST);
 6877   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 6878 
 6879   ins_encode(aarch64_enc_mov_nk(dst, con));
 6880 
 6881   ins_pipe(ialu_imm);
 6882 %}
 6883 
 6884 // Load Packed Float Constant
 6885 
 6886 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 6887   match(Set dst con);
 6888   ins_cost(INSN_COST * 4);
 6889   format %{ "fmovs  $dst, $con"%}
 6890   ins_encode %{
 6891     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 6892   %}
 6893 
 6894   ins_pipe(fp_imm_s);
 6895 %}
 6896 
 6897 // Load Float Constant
 6898 
 6899 instruct loadConF(vRegF dst, immF con) %{
 6900   match(Set dst con);
 6901 
 6902   ins_cost(INSN_COST * 4);
 6903 
 6904   format %{
 6905     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 6906   %}
 6907 
 6908   ins_encode %{
 6909     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 6910   %}
 6911 
 6912   ins_pipe(fp_load_constant_s);
 6913 %}
 6914 
 6915 // Load Packed Double Constant
 6916 
 6917 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 6918   match(Set dst con);
 6919   ins_cost(INSN_COST);
 6920   format %{ "fmovd  $dst, $con"%}
 6921   ins_encode %{
 6922     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 6923   %}
 6924 
 6925   ins_pipe(fp_imm_d);
 6926 %}
 6927 
 6928 // Load Double Constant
 6929 
 6930 instruct loadConD(vRegD dst, immD con) %{
 6931   match(Set dst con);
 6932 
 6933   ins_cost(INSN_COST * 5);
 6934   format %{
 6935     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 6936   %}
 6937 
 6938   ins_encode %{
 6939     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 6940   %}
 6941 
 6942   ins_pipe(fp_load_constant_d);
 6943 %}
 6944 
 6945 // Store Instructions
 6946 
 6947 // Store Byte
 6948 instruct storeB(iRegIorL2I src, memory1 mem)
 6949 %{
 6950   match(Set mem (StoreB mem src));
 6951   predicate(!needs_releasing_store(n));
 6952 
 6953   ins_cost(INSN_COST);
 6954   format %{ "strb  $src, $mem\t# byte" %}
 6955 
 6956   ins_encode(aarch64_enc_strb(src, mem));
 6957 
 6958   ins_pipe(istore_reg_mem);
 6959 %}
 6960 
 6961 
 6962 instruct storeimmB0(immI0 zero, memory1 mem)
 6963 %{
 6964   match(Set mem (StoreB mem zero));
 6965   predicate(!needs_releasing_store(n));
 6966 
 6967   ins_cost(INSN_COST);
 6968   format %{ "strb rscractch2, $mem\t# byte" %}
 6969 
 6970   ins_encode(aarch64_enc_strb0(mem));
 6971 
 6972   ins_pipe(istore_mem);
 6973 %}
 6974 
 6975 // Store Char/Short
 6976 instruct storeC(iRegIorL2I src, memory2 mem)
 6977 %{
 6978   match(Set mem (StoreC mem src));
 6979   predicate(!needs_releasing_store(n));
 6980 
 6981   ins_cost(INSN_COST);
 6982   format %{ "strh  $src, $mem\t# short" %}
 6983 
 6984   ins_encode(aarch64_enc_strh(src, mem));
 6985 
 6986   ins_pipe(istore_reg_mem);
 6987 %}
 6988 
 6989 instruct storeimmC0(immI0 zero, memory2 mem)
 6990 %{
 6991   match(Set mem (StoreC mem zero));
 6992   predicate(!needs_releasing_store(n));
 6993 
 6994   ins_cost(INSN_COST);
 6995   format %{ "strh  zr, $mem\t# short" %}
 6996 
 6997   ins_encode(aarch64_enc_strh0(mem));
 6998 
 6999   ins_pipe(istore_mem);
 7000 %}
 7001 
 7002 // Store Integer
 7003 
 7004 instruct storeI(iRegIorL2I src, memory4 mem)
 7005 %{
 7006   match(Set mem(StoreI mem src));
 7007   predicate(!needs_releasing_store(n));
 7008 
 7009   ins_cost(INSN_COST);
 7010   format %{ "strw  $src, $mem\t# int" %}
 7011 
 7012   ins_encode(aarch64_enc_strw(src, mem));
 7013 
 7014   ins_pipe(istore_reg_mem);
 7015 %}
 7016 
 7017 instruct storeimmI0(immI0 zero, memory4 mem)
 7018 %{
 7019   match(Set mem(StoreI mem zero));
 7020   predicate(!needs_releasing_store(n));
 7021 
 7022   ins_cost(INSN_COST);
 7023   format %{ "strw  zr, $mem\t# int" %}
 7024 
 7025   ins_encode(aarch64_enc_strw0(mem));
 7026 
 7027   ins_pipe(istore_mem);
 7028 %}
 7029 
 7030 // Store Long (64 bit signed)
 7031 instruct storeL(iRegL src, memory8 mem)
 7032 %{
 7033   match(Set mem (StoreL mem src));
 7034   predicate(!needs_releasing_store(n));
 7035 
 7036   ins_cost(INSN_COST);
 7037   format %{ "str  $src, $mem\t# int" %}
 7038 
 7039   ins_encode(aarch64_enc_str(src, mem));
 7040 
 7041   ins_pipe(istore_reg_mem);
 7042 %}
 7043 
 7044 // Store Long (64 bit signed)
 7045 instruct storeimmL0(immL0 zero, memory8 mem)
 7046 %{
 7047   match(Set mem (StoreL mem zero));
 7048   predicate(!needs_releasing_store(n));
 7049 
 7050   ins_cost(INSN_COST);
 7051   format %{ "str  zr, $mem\t# int" %}
 7052 
 7053   ins_encode(aarch64_enc_str0(mem));
 7054 
 7055   ins_pipe(istore_mem);
 7056 %}
 7057 
 7058 // Store Pointer
 7059 instruct storeP(iRegP src, memory8 mem)
 7060 %{
 7061   match(Set mem (StoreP mem src));
 7062   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7063 
 7064   ins_cost(INSN_COST);
 7065   format %{ "str  $src, $mem\t# ptr" %}
 7066 
 7067   ins_encode(aarch64_enc_str(src, mem));
 7068 
 7069   ins_pipe(istore_reg_mem);
 7070 %}
 7071 
 7072 // Store Pointer
 7073 instruct storeimmP0(immP0 zero, memory8 mem)
 7074 %{
 7075   match(Set mem (StoreP mem zero));
 7076   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7077 
 7078   ins_cost(INSN_COST);
 7079   format %{ "str zr, $mem\t# ptr" %}
 7080 
 7081   ins_encode(aarch64_enc_str0(mem));
 7082 
 7083   ins_pipe(istore_mem);
 7084 %}
 7085 
 7086 // Store Compressed Pointer
 7087 instruct storeN(iRegN src, memory4 mem)
 7088 %{
 7089   match(Set mem (StoreN mem src));
 7090   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7091 
 7092   ins_cost(INSN_COST);
 7093   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7094 
 7095   ins_encode(aarch64_enc_strw(src, mem));
 7096 
 7097   ins_pipe(istore_reg_mem);
 7098 %}
 7099 
 7100 instruct storeImmN0(immN0 zero, memory4 mem)
 7101 %{
 7102   match(Set mem (StoreN mem zero));
 7103   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7104 
 7105   ins_cost(INSN_COST);
 7106   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7107 
 7108   ins_encode(aarch64_enc_strw0(mem));
 7109 
 7110   ins_pipe(istore_mem);
 7111 %}
 7112 
 7113 // Store Float
 7114 instruct storeF(vRegF src, memory4 mem)
 7115 %{
 7116   match(Set mem (StoreF mem src));
 7117   predicate(!needs_releasing_store(n));
 7118 
 7119   ins_cost(INSN_COST);
 7120   format %{ "strs  $src, $mem\t# float" %}
 7121 
 7122   ins_encode( aarch64_enc_strs(src, mem) );
 7123 
 7124   ins_pipe(pipe_class_memory);
 7125 %}
 7126 
 7127 // TODO
 7128 // implement storeImmF0 and storeFImmPacked
 7129 
 7130 // Store Double
 7131 instruct storeD(vRegD src, memory8 mem)
 7132 %{
 7133   match(Set mem (StoreD mem src));
 7134   predicate(!needs_releasing_store(n));
 7135 
 7136   ins_cost(INSN_COST);
 7137   format %{ "strd  $src, $mem\t# double" %}
 7138 
 7139   ins_encode( aarch64_enc_strd(src, mem) );
 7140 
 7141   ins_pipe(pipe_class_memory);
 7142 %}
 7143 
 7144 // Store Compressed Klass Pointer
 7145 instruct storeNKlass(iRegN src, memory4 mem)
 7146 %{
 7147   predicate(!needs_releasing_store(n));
 7148   match(Set mem (StoreNKlass mem src));
 7149 
 7150   ins_cost(INSN_COST);
 7151   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7152 
 7153   ins_encode(aarch64_enc_strw(src, mem));
 7154 
 7155   ins_pipe(istore_reg_mem);
 7156 %}
 7157 
 7158 // TODO
 7159 // implement storeImmD0 and storeDImmPacked
 7160 
 7161 // prefetch instructions
 7162 // Must be safe to execute with invalid address (cannot fault).
 7163 
 7164 instruct prefetchalloc( memory8 mem ) %{
 7165   match(PrefetchAllocation mem);
 7166 
 7167   ins_cost(INSN_COST);
 7168   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7169 
 7170   ins_encode( aarch64_enc_prefetchw(mem) );
 7171 
 7172   ins_pipe(iload_prefetch);
 7173 %}
 7174 
 7175 //  ---------------- volatile loads and stores ----------------
 7176 
 7177 // Load Byte (8 bit signed)
 7178 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7179 %{
 7180   match(Set dst (LoadB mem));
 7181 
 7182   ins_cost(VOLATILE_REF_COST);
 7183   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7184 
 7185   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7186 
 7187   ins_pipe(pipe_serial);
 7188 %}
 7189 
 7190 // Load Byte (8 bit signed) into long
 7191 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7192 %{
 7193   match(Set dst (ConvI2L (LoadB mem)));
 7194 
 7195   ins_cost(VOLATILE_REF_COST);
 7196   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7197 
 7198   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7199 
 7200   ins_pipe(pipe_serial);
 7201 %}
 7202 
 7203 // Load Byte (8 bit unsigned)
 7204 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7205 %{
 7206   match(Set dst (LoadUB mem));
 7207 
 7208   ins_cost(VOLATILE_REF_COST);
 7209   format %{ "ldarb  $dst, $mem\t# byte" %}
 7210 
 7211   ins_encode(aarch64_enc_ldarb(dst, mem));
 7212 
 7213   ins_pipe(pipe_serial);
 7214 %}
 7215 
 7216 // Load Byte (8 bit unsigned) into long
 7217 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7218 %{
 7219   match(Set dst (ConvI2L (LoadUB mem)));
 7220 
 7221   ins_cost(VOLATILE_REF_COST);
 7222   format %{ "ldarb  $dst, $mem\t# byte" %}
 7223 
 7224   ins_encode(aarch64_enc_ldarb(dst, mem));
 7225 
 7226   ins_pipe(pipe_serial);
 7227 %}
 7228 
 7229 // Load Short (16 bit signed)
 7230 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7231 %{
 7232   match(Set dst (LoadS mem));
 7233 
 7234   ins_cost(VOLATILE_REF_COST);
 7235   format %{ "ldarshw  $dst, $mem\t# short" %}
 7236 
 7237   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7238 
 7239   ins_pipe(pipe_serial);
 7240 %}
 7241 
 7242 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7243 %{
 7244   match(Set dst (LoadUS mem));
 7245 
 7246   ins_cost(VOLATILE_REF_COST);
 7247   format %{ "ldarhw  $dst, $mem\t# short" %}
 7248 
 7249   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7250 
 7251   ins_pipe(pipe_serial);
 7252 %}
 7253 
 7254 // Load Short/Char (16 bit unsigned) into long
 7255 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7256 %{
 7257   match(Set dst (ConvI2L (LoadUS mem)));
 7258 
 7259   ins_cost(VOLATILE_REF_COST);
 7260   format %{ "ldarh  $dst, $mem\t# short" %}
 7261 
 7262   ins_encode(aarch64_enc_ldarh(dst, mem));
 7263 
 7264   ins_pipe(pipe_serial);
 7265 %}
 7266 
 7267 // Load Short/Char (16 bit signed) into long
 7268 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7269 %{
 7270   match(Set dst (ConvI2L (LoadS mem)));
 7271 
 7272   ins_cost(VOLATILE_REF_COST);
 7273   format %{ "ldarh  $dst, $mem\t# short" %}
 7274 
 7275   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7276 
 7277   ins_pipe(pipe_serial);
 7278 %}
 7279 
 7280 // Load Integer (32 bit signed)
 7281 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7282 %{
 7283   match(Set dst (LoadI mem));
 7284 
 7285   ins_cost(VOLATILE_REF_COST);
 7286   format %{ "ldarw  $dst, $mem\t# int" %}
 7287 
 7288   ins_encode(aarch64_enc_ldarw(dst, mem));
 7289 
 7290   ins_pipe(pipe_serial);
 7291 %}
 7292 
 7293 // Load Integer (32 bit unsigned) into long
 7294 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7295 %{
 7296   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7297 
 7298   ins_cost(VOLATILE_REF_COST);
 7299   format %{ "ldarw  $dst, $mem\t# int" %}
 7300 
 7301   ins_encode(aarch64_enc_ldarw(dst, mem));
 7302 
 7303   ins_pipe(pipe_serial);
 7304 %}
 7305 
 7306 // Load Long (64 bit signed)
 7307 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7308 %{
 7309   match(Set dst (LoadL mem));
 7310 
 7311   ins_cost(VOLATILE_REF_COST);
 7312   format %{ "ldar  $dst, $mem\t# int" %}
 7313 
 7314   ins_encode(aarch64_enc_ldar(dst, mem));
 7315 
 7316   ins_pipe(pipe_serial);
 7317 %}
 7318 
 7319 // Load Pointer
 7320 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7321 %{
 7322   match(Set dst (LoadP mem));
 7323   predicate(n->as_Load()->barrier_data() == 0);
 7324 
 7325   ins_cost(VOLATILE_REF_COST);
 7326   format %{ "ldar  $dst, $mem\t# ptr" %}
 7327 
 7328   ins_encode(aarch64_enc_ldar(dst, mem));
 7329 
 7330   ins_pipe(pipe_serial);
 7331 %}
 7332 
 7333 // Load Compressed Pointer
 7334 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7335 %{
 7336   match(Set dst (LoadN mem));
 7337   predicate(n->as_Load()->barrier_data() == 0);
 7338 
 7339   ins_cost(VOLATILE_REF_COST);
 7340   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7341 
 7342   ins_encode(aarch64_enc_ldarw(dst, mem));
 7343 
 7344   ins_pipe(pipe_serial);
 7345 %}
 7346 
 7347 // Load Float
 7348 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7349 %{
 7350   match(Set dst (LoadF mem));
 7351 
 7352   ins_cost(VOLATILE_REF_COST);
 7353   format %{ "ldars  $dst, $mem\t# float" %}
 7354 
 7355   ins_encode( aarch64_enc_fldars(dst, mem) );
 7356 
 7357   ins_pipe(pipe_serial);
 7358 %}
 7359 
 7360 // Load Double
 7361 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7362 %{
 7363   match(Set dst (LoadD mem));
 7364 
 7365   ins_cost(VOLATILE_REF_COST);
 7366   format %{ "ldard  $dst, $mem\t# double" %}
 7367 
 7368   ins_encode( aarch64_enc_fldard(dst, mem) );
 7369 
 7370   ins_pipe(pipe_serial);
 7371 %}
 7372 
 7373 // Store Byte
 7374 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7375 %{
 7376   match(Set mem (StoreB mem src));
 7377 
 7378   ins_cost(VOLATILE_REF_COST);
 7379   format %{ "stlrb  $src, $mem\t# byte" %}
 7380 
 7381   ins_encode(aarch64_enc_stlrb(src, mem));
 7382 
 7383   ins_pipe(pipe_class_memory);
 7384 %}
 7385 
 7386 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7387 %{
 7388   match(Set mem (StoreB mem zero));
 7389 
 7390   ins_cost(VOLATILE_REF_COST);
 7391   format %{ "stlrb  zr, $mem\t# byte" %}
 7392 
 7393   ins_encode(aarch64_enc_stlrb0(mem));
 7394 
 7395   ins_pipe(pipe_class_memory);
 7396 %}
 7397 
 7398 // Store Char/Short
 7399 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7400 %{
 7401   match(Set mem (StoreC mem src));
 7402 
 7403   ins_cost(VOLATILE_REF_COST);
 7404   format %{ "stlrh  $src, $mem\t# short" %}
 7405 
 7406   ins_encode(aarch64_enc_stlrh(src, mem));
 7407 
 7408   ins_pipe(pipe_class_memory);
 7409 %}
 7410 
 7411 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7412 %{
 7413   match(Set mem (StoreC mem zero));
 7414 
 7415   ins_cost(VOLATILE_REF_COST);
 7416   format %{ "stlrh  zr, $mem\t# short" %}
 7417 
 7418   ins_encode(aarch64_enc_stlrh0(mem));
 7419 
 7420   ins_pipe(pipe_class_memory);
 7421 %}
 7422 
 7423 // Store Integer
 7424 
 7425 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7426 %{
 7427   match(Set mem(StoreI mem src));
 7428 
 7429   ins_cost(VOLATILE_REF_COST);
 7430   format %{ "stlrw  $src, $mem\t# int" %}
 7431 
 7432   ins_encode(aarch64_enc_stlrw(src, mem));
 7433 
 7434   ins_pipe(pipe_class_memory);
 7435 %}
 7436 
 7437 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7438 %{
 7439   match(Set mem(StoreI mem zero));
 7440 
 7441   ins_cost(VOLATILE_REF_COST);
 7442   format %{ "stlrw  zr, $mem\t# int" %}
 7443 
 7444   ins_encode(aarch64_enc_stlrw0(mem));
 7445 
 7446   ins_pipe(pipe_class_memory);
 7447 %}
 7448 
 7449 // Store Long (64 bit signed)
 7450 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7451 %{
 7452   match(Set mem (StoreL mem src));
 7453 
 7454   ins_cost(VOLATILE_REF_COST);
 7455   format %{ "stlr  $src, $mem\t# int" %}
 7456 
 7457   ins_encode(aarch64_enc_stlr(src, mem));
 7458 
 7459   ins_pipe(pipe_class_memory);
 7460 %}
 7461 
 7462 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7463 %{
 7464   match(Set mem (StoreL mem zero));
 7465 
 7466   ins_cost(VOLATILE_REF_COST);
 7467   format %{ "stlr  zr, $mem\t# int" %}
 7468 
 7469   ins_encode(aarch64_enc_stlr0(mem));
 7470 
 7471   ins_pipe(pipe_class_memory);
 7472 %}
 7473 
 7474 // Store Pointer
 7475 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7476 %{
 7477   match(Set mem (StoreP mem src));
 7478   predicate(n->as_Store()->barrier_data() == 0);
 7479 
 7480   ins_cost(VOLATILE_REF_COST);
 7481   format %{ "stlr  $src, $mem\t# ptr" %}
 7482 
 7483   ins_encode(aarch64_enc_stlr(src, mem));
 7484 
 7485   ins_pipe(pipe_class_memory);
 7486 %}
 7487 
 7488 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7489 %{
 7490   match(Set mem (StoreP mem zero));
 7491   predicate(n->as_Store()->barrier_data() == 0);
 7492 
 7493   ins_cost(VOLATILE_REF_COST);
 7494   format %{ "stlr  zr, $mem\t# ptr" %}
 7495 
 7496   ins_encode(aarch64_enc_stlr0(mem));
 7497 
 7498   ins_pipe(pipe_class_memory);
 7499 %}
 7500 
 7501 // Store Compressed Pointer
 7502 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7503 %{
 7504   match(Set mem (StoreN mem src));
 7505   predicate(n->as_Store()->barrier_data() == 0);
 7506 
 7507   ins_cost(VOLATILE_REF_COST);
 7508   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7509 
 7510   ins_encode(aarch64_enc_stlrw(src, mem));
 7511 
 7512   ins_pipe(pipe_class_memory);
 7513 %}
 7514 
 7515 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7516 %{
 7517   match(Set mem (StoreN mem zero));
 7518   predicate(n->as_Store()->barrier_data() == 0);
 7519 
 7520   ins_cost(VOLATILE_REF_COST);
 7521   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7522 
 7523   ins_encode(aarch64_enc_stlrw0(mem));
 7524 
 7525   ins_pipe(pipe_class_memory);
 7526 %}
 7527 
 7528 // Store Float
 7529 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7530 %{
 7531   match(Set mem (StoreF mem src));
 7532 
 7533   ins_cost(VOLATILE_REF_COST);
 7534   format %{ "stlrs  $src, $mem\t# float" %}
 7535 
 7536   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7537 
 7538   ins_pipe(pipe_class_memory);
 7539 %}
 7540 
 7541 // TODO
 7542 // implement storeImmF0 and storeFImmPacked
 7543 
 7544 // Store Double
 7545 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 7546 %{
 7547   match(Set mem (StoreD mem src));
 7548 
 7549   ins_cost(VOLATILE_REF_COST);
 7550   format %{ "stlrd  $src, $mem\t# double" %}
 7551 
 7552   ins_encode( aarch64_enc_fstlrd(src, mem) );
 7553 
 7554   ins_pipe(pipe_class_memory);
 7555 %}
 7556 
 7557 //  ---------------- end of volatile loads and stores ----------------
 7558 
 7559 instruct cacheWB(indirect addr)
 7560 %{
 7561   predicate(VM_Version::supports_data_cache_line_flush());
 7562   match(CacheWB addr);
 7563 
 7564   ins_cost(100);
 7565   format %{"cache wb $addr" %}
 7566   ins_encode %{
 7567     assert($addr->index_position() < 0, "should be");
 7568     assert($addr$$disp == 0, "should be");
 7569     __ cache_wb(Address($addr$$base$$Register, 0));
 7570   %}
 7571   ins_pipe(pipe_slow); // XXX
 7572 %}
 7573 
 7574 instruct cacheWBPreSync()
 7575 %{
 7576   predicate(VM_Version::supports_data_cache_line_flush());
 7577   match(CacheWBPreSync);
 7578 
 7579   ins_cost(100);
 7580   format %{"cache wb presync" %}
 7581   ins_encode %{
 7582     __ cache_wbsync(true);
 7583   %}
 7584   ins_pipe(pipe_slow); // XXX
 7585 %}
 7586 
 7587 instruct cacheWBPostSync()
 7588 %{
 7589   predicate(VM_Version::supports_data_cache_line_flush());
 7590   match(CacheWBPostSync);
 7591 
 7592   ins_cost(100);
 7593   format %{"cache wb postsync" %}
 7594   ins_encode %{
 7595     __ cache_wbsync(false);
 7596   %}
 7597   ins_pipe(pipe_slow); // XXX
 7598 %}
 7599 
 7600 // ============================================================================
 7601 // BSWAP Instructions
 7602 
 7603 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 7604   match(Set dst (ReverseBytesI src));
 7605 
 7606   ins_cost(INSN_COST);
 7607   format %{ "revw  $dst, $src" %}
 7608 
 7609   ins_encode %{
 7610     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 7611   %}
 7612 
 7613   ins_pipe(ialu_reg);
 7614 %}
 7615 
 7616 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 7617   match(Set dst (ReverseBytesL src));
 7618 
 7619   ins_cost(INSN_COST);
 7620   format %{ "rev  $dst, $src" %}
 7621 
 7622   ins_encode %{
 7623     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 7624   %}
 7625 
 7626   ins_pipe(ialu_reg);
 7627 %}
 7628 
 7629 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 7630   match(Set dst (ReverseBytesUS src));
 7631 
 7632   ins_cost(INSN_COST);
 7633   format %{ "rev16w  $dst, $src" %}
 7634 
 7635   ins_encode %{
 7636     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7637   %}
 7638 
 7639   ins_pipe(ialu_reg);
 7640 %}
 7641 
 7642 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 7643   match(Set dst (ReverseBytesS src));
 7644 
 7645   ins_cost(INSN_COST);
 7646   format %{ "rev16w  $dst, $src\n\t"
 7647             "sbfmw $dst, $dst, #0, #15" %}
 7648 
 7649   ins_encode %{
 7650     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7651     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 7652   %}
 7653 
 7654   ins_pipe(ialu_reg);
 7655 %}
 7656 
 7657 // ============================================================================
 7658 // Zero Count Instructions
 7659 
 7660 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7661   match(Set dst (CountLeadingZerosI src));
 7662 
 7663   ins_cost(INSN_COST);
 7664   format %{ "clzw  $dst, $src" %}
 7665   ins_encode %{
 7666     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 7667   %}
 7668 
 7669   ins_pipe(ialu_reg);
 7670 %}
 7671 
 7672 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 7673   match(Set dst (CountLeadingZerosL src));
 7674 
 7675   ins_cost(INSN_COST);
 7676   format %{ "clz   $dst, $src" %}
 7677   ins_encode %{
 7678     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 7679   %}
 7680 
 7681   ins_pipe(ialu_reg);
 7682 %}
 7683 
 7684 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7685   match(Set dst (CountTrailingZerosI src));
 7686 
 7687   ins_cost(INSN_COST * 2);
 7688   format %{ "rbitw  $dst, $src\n\t"
 7689             "clzw   $dst, $dst" %}
 7690   ins_encode %{
 7691     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 7692     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 7693   %}
 7694 
 7695   ins_pipe(ialu_reg);
 7696 %}
 7697 
 7698 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 7699   match(Set dst (CountTrailingZerosL src));
 7700 
 7701   ins_cost(INSN_COST * 2);
 7702   format %{ "rbit   $dst, $src\n\t"
 7703             "clz    $dst, $dst" %}
 7704   ins_encode %{
 7705     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 7706     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 7707   %}
 7708 
 7709   ins_pipe(ialu_reg);
 7710 %}
 7711 
 7712 //---------- Population Count Instructions -------------------------------------
 7713 //
 7714 
 7715 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 7716   match(Set dst (PopCountI src));
 7717   effect(TEMP tmp);
 7718   ins_cost(INSN_COST * 13);
 7719 
 7720   format %{ "movw   $src, $src\n\t"
 7721             "mov    $tmp, $src\t# vector (1D)\n\t"
 7722             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7723             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7724             "mov    $dst, $tmp\t# vector (1D)" %}
 7725   ins_encode %{
 7726     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 7727     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 7728     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7729     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7730     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7731   %}
 7732 
 7733   ins_pipe(pipe_class_default);
 7734 %}
 7735 
 7736 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 7737   match(Set dst (PopCountI (LoadI mem)));
 7738   effect(TEMP tmp);
 7739   ins_cost(INSN_COST * 13);
 7740 
 7741   format %{ "ldrs   $tmp, $mem\n\t"
 7742             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7743             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7744             "mov    $dst, $tmp\t# vector (1D)" %}
 7745   ins_encode %{
 7746     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7747     loadStore(masm, &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 7748               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 7749     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7750     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7751     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7752   %}
 7753 
 7754   ins_pipe(pipe_class_default);
 7755 %}
 7756 
 7757 // Note: Long.bitCount(long) returns an int.
 7758 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 7759   match(Set dst (PopCountL src));
 7760   effect(TEMP tmp);
 7761   ins_cost(INSN_COST * 13);
 7762 
 7763   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 7764             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7765             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7766             "mov    $dst, $tmp\t# vector (1D)" %}
 7767   ins_encode %{
 7768     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 7769     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7770     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7771     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7772   %}
 7773 
 7774   ins_pipe(pipe_class_default);
 7775 %}
 7776 
 7777 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 7778   match(Set dst (PopCountL (LoadL mem)));
 7779   effect(TEMP tmp);
 7780   ins_cost(INSN_COST * 13);
 7781 
 7782   format %{ "ldrd   $tmp, $mem\n\t"
 7783             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7784             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7785             "mov    $dst, $tmp\t# vector (1D)" %}
 7786   ins_encode %{
 7787     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7788     loadStore(masm, &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 7789               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 7790     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7791     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7792     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7793   %}
 7794 
 7795   ins_pipe(pipe_class_default);
 7796 %}
 7797 
 7798 // ============================================================================
 7799 // VerifyVectorAlignment Instruction
 7800 
 7801 instruct verify_vector_alignment(iRegP addr, immL_positive_bitmaskI mask, rFlagsReg cr) %{
 7802   match(Set addr (VerifyVectorAlignment addr mask));
 7803   effect(KILL cr);
 7804   format %{ "verify_vector_alignment $addr $mask \t! verify alignment" %}
 7805   ins_encode %{
 7806     Label Lskip;
 7807     // check if masked bits of addr are zero
 7808     __ tst($addr$$Register, $mask$$constant);
 7809     __ br(Assembler::EQ, Lskip);
 7810     __ stop("verify_vector_alignment found a misaligned vector memory access");
 7811     __ bind(Lskip);
 7812   %}
 7813   ins_pipe(pipe_slow);
 7814 %}
 7815 
 7816 // ============================================================================
 7817 // MemBar Instruction
 7818 
 7819 instruct load_fence() %{
 7820   match(LoadFence);
 7821   ins_cost(VOLATILE_REF_COST);
 7822 
 7823   format %{ "load_fence" %}
 7824 
 7825   ins_encode %{
 7826     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7827   %}
 7828   ins_pipe(pipe_serial);
 7829 %}
 7830 
 7831 instruct unnecessary_membar_acquire() %{
 7832   predicate(unnecessary_acquire(n));
 7833   match(MemBarAcquire);
 7834   ins_cost(0);
 7835 
 7836   format %{ "membar_acquire (elided)" %}
 7837 
 7838   ins_encode %{
 7839     __ block_comment("membar_acquire (elided)");
 7840   %}
 7841 
 7842   ins_pipe(pipe_class_empty);
 7843 %}
 7844 
 7845 instruct membar_acquire() %{
 7846   match(MemBarAcquire);
 7847   ins_cost(VOLATILE_REF_COST);
 7848 
 7849   format %{ "membar_acquire\n\t"
 7850             "dmb ishld" %}
 7851 
 7852   ins_encode %{
 7853     __ block_comment("membar_acquire");
 7854     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7855   %}
 7856 
 7857   ins_pipe(pipe_serial);
 7858 %}
 7859 
 7860 
 7861 instruct membar_acquire_lock() %{
 7862   match(MemBarAcquireLock);
 7863   ins_cost(VOLATILE_REF_COST);
 7864 
 7865   format %{ "membar_acquire_lock (elided)" %}
 7866 
 7867   ins_encode %{
 7868     __ block_comment("membar_acquire_lock (elided)");
 7869   %}
 7870 
 7871   ins_pipe(pipe_serial);
 7872 %}
 7873 
 7874 instruct store_fence() %{
 7875   match(StoreFence);
 7876   ins_cost(VOLATILE_REF_COST);
 7877 
 7878   format %{ "store_fence" %}
 7879 
 7880   ins_encode %{
 7881     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 7882   %}
 7883   ins_pipe(pipe_serial);
 7884 %}
 7885 
 7886 instruct unnecessary_membar_release() %{
 7887   predicate(unnecessary_release(n));
 7888   match(MemBarRelease);
 7889   ins_cost(0);
 7890 
 7891   format %{ "membar_release (elided)" %}
 7892 
 7893   ins_encode %{
 7894     __ block_comment("membar_release (elided)");
 7895   %}
 7896   ins_pipe(pipe_serial);
 7897 %}
 7898 
 7899 instruct membar_release() %{
 7900   match(MemBarRelease);
 7901   ins_cost(VOLATILE_REF_COST);
 7902 
 7903   format %{ "membar_release\n\t"
 7904             "dmb ishst\n\tdmb ishld" %}
 7905 
 7906   ins_encode %{
 7907     __ block_comment("membar_release");
 7908     // These will be merged if AlwaysMergeDMB is enabled.
 7909     __ membar(Assembler::StoreStore);
 7910     __ membar(Assembler::LoadStore);
 7911   %}
 7912   ins_pipe(pipe_serial);
 7913 %}
 7914 
 7915 instruct membar_storestore() %{
 7916   match(MemBarStoreStore);
 7917   match(StoreStoreFence);
 7918   ins_cost(VOLATILE_REF_COST);
 7919 
 7920   format %{ "MEMBAR-store-store" %}
 7921 
 7922   ins_encode %{
 7923     __ membar(Assembler::StoreStore);
 7924   %}
 7925   ins_pipe(pipe_serial);
 7926 %}
 7927 
 7928 instruct membar_release_lock() %{
 7929   match(MemBarReleaseLock);
 7930   ins_cost(VOLATILE_REF_COST);
 7931 
 7932   format %{ "membar_release_lock (elided)" %}
 7933 
 7934   ins_encode %{
 7935     __ block_comment("membar_release_lock (elided)");
 7936   %}
 7937 
 7938   ins_pipe(pipe_serial);
 7939 %}
 7940 
 7941 instruct unnecessary_membar_volatile() %{
 7942   predicate(unnecessary_volatile(n));
 7943   match(MemBarVolatile);
 7944   ins_cost(0);
 7945 
 7946   format %{ "membar_volatile (elided)" %}
 7947 
 7948   ins_encode %{
 7949     __ block_comment("membar_volatile (elided)");
 7950   %}
 7951 
 7952   ins_pipe(pipe_serial);
 7953 %}
 7954 
 7955 instruct membar_volatile() %{
 7956   match(MemBarVolatile);
 7957   ins_cost(VOLATILE_REF_COST*100);
 7958 
 7959   format %{ "membar_volatile\n\t"
 7960              "dmb ish"%}
 7961 
 7962   ins_encode %{
 7963     __ block_comment("membar_volatile");
 7964     __ membar(Assembler::StoreLoad);
 7965   %}
 7966 
 7967   ins_pipe(pipe_serial);
 7968 %}
 7969 
 7970 // ============================================================================
 7971 // Cast/Convert Instructions
 7972 
 7973 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 7974   match(Set dst (CastX2P src));
 7975 
 7976   ins_cost(INSN_COST);
 7977   format %{ "mov $dst, $src\t# long -> ptr" %}
 7978 
 7979   ins_encode %{
 7980     if ($dst$$reg != $src$$reg) {
 7981       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 7982     }
 7983   %}
 7984 
 7985   ins_pipe(ialu_reg);
 7986 %}
 7987 
 7988 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 7989   match(Set dst (CastP2X src));
 7990 
 7991   ins_cost(INSN_COST);
 7992   format %{ "mov $dst, $src\t# ptr -> long" %}
 7993 
 7994   ins_encode %{
 7995     if ($dst$$reg != $src$$reg) {
 7996       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 7997     }
 7998   %}
 7999 
 8000   ins_pipe(ialu_reg);
 8001 %}
 8002 
 8003 // Convert oop into int for vectors alignment masking
 8004 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8005   match(Set dst (ConvL2I (CastP2X src)));
 8006 
 8007   ins_cost(INSN_COST);
 8008   format %{ "movw $dst, $src\t# ptr -> int" %}
 8009   ins_encode %{
 8010     __ movw($dst$$Register, $src$$Register);
 8011   %}
 8012 
 8013   ins_pipe(ialu_reg);
 8014 %}
 8015 
 8016 // Convert compressed oop into int for vectors alignment masking
 8017 // in case of 32bit oops (heap < 4Gb).
 8018 instruct convN2I(iRegINoSp dst, iRegN src)
 8019 %{
 8020   predicate(CompressedOops::shift() == 0);
 8021   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8022 
 8023   ins_cost(INSN_COST);
 8024   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8025   ins_encode %{
 8026     __ movw($dst$$Register, $src$$Register);
 8027   %}
 8028 
 8029   ins_pipe(ialu_reg);
 8030 %}
 8031 
 8032 
 8033 // Convert oop pointer into compressed form
 8034 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8035   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8036   match(Set dst (EncodeP src));
 8037   effect(KILL cr);
 8038   ins_cost(INSN_COST * 3);
 8039   format %{ "encode_heap_oop $dst, $src" %}
 8040   ins_encode %{
 8041     Register s = $src$$Register;
 8042     Register d = $dst$$Register;
 8043     __ encode_heap_oop(d, s);
 8044   %}
 8045   ins_pipe(ialu_reg);
 8046 %}
 8047 
 8048 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8049   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8050   match(Set dst (EncodeP src));
 8051   ins_cost(INSN_COST * 3);
 8052   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8053   ins_encode %{
 8054     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8055   %}
 8056   ins_pipe(ialu_reg);
 8057 %}
 8058 
 8059 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8060   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8061             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8062   match(Set dst (DecodeN src));
 8063   ins_cost(INSN_COST * 3);
 8064   format %{ "decode_heap_oop $dst, $src" %}
 8065   ins_encode %{
 8066     Register s = $src$$Register;
 8067     Register d = $dst$$Register;
 8068     __ decode_heap_oop(d, s);
 8069   %}
 8070   ins_pipe(ialu_reg);
 8071 %}
 8072 
 8073 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8074   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8075             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8076   match(Set dst (DecodeN src));
 8077   ins_cost(INSN_COST * 3);
 8078   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8079   ins_encode %{
 8080     Register s = $src$$Register;
 8081     Register d = $dst$$Register;
 8082     __ decode_heap_oop_not_null(d, s);
 8083   %}
 8084   ins_pipe(ialu_reg);
 8085 %}
 8086 
 8087 // n.b. AArch64 implementations of encode_klass_not_null and
 8088 // decode_klass_not_null do not modify the flags register so, unlike
 8089 // Intel, we don't kill CR as a side effect here
 8090 
 8091 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8092   match(Set dst (EncodePKlass src));
 8093 
 8094   ins_cost(INSN_COST * 3);
 8095   format %{ "encode_klass_not_null $dst,$src" %}
 8096 
 8097   ins_encode %{
 8098     Register src_reg = as_Register($src$$reg);
 8099     Register dst_reg = as_Register($dst$$reg);
 8100     __ encode_klass_not_null(dst_reg, src_reg);
 8101   %}
 8102 
 8103    ins_pipe(ialu_reg);
 8104 %}
 8105 
 8106 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8107   match(Set dst (DecodeNKlass src));
 8108 
 8109   ins_cost(INSN_COST * 3);
 8110   format %{ "decode_klass_not_null $dst,$src" %}
 8111 
 8112   ins_encode %{
 8113     Register src_reg = as_Register($src$$reg);
 8114     Register dst_reg = as_Register($dst$$reg);
 8115     if (dst_reg != src_reg) {
 8116       __ decode_klass_not_null(dst_reg, src_reg);
 8117     } else {
 8118       __ decode_klass_not_null(dst_reg);
 8119     }
 8120   %}
 8121 
 8122    ins_pipe(ialu_reg);
 8123 %}
 8124 
 8125 instruct checkCastPP(iRegPNoSp dst)
 8126 %{
 8127   match(Set dst (CheckCastPP dst));
 8128 
 8129   size(0);
 8130   format %{ "# checkcastPP of $dst" %}
 8131   ins_encode(/* empty encoding */);
 8132   ins_pipe(pipe_class_empty);
 8133 %}
 8134 
 8135 instruct castPP(iRegPNoSp dst)
 8136 %{
 8137   match(Set dst (CastPP dst));
 8138 
 8139   size(0);
 8140   format %{ "# castPP of $dst" %}
 8141   ins_encode(/* empty encoding */);
 8142   ins_pipe(pipe_class_empty);
 8143 %}
 8144 
 8145 instruct castII(iRegI dst)
 8146 %{
 8147   match(Set dst (CastII dst));
 8148 
 8149   size(0);
 8150   format %{ "# castII of $dst" %}
 8151   ins_encode(/* empty encoding */);
 8152   ins_cost(0);
 8153   ins_pipe(pipe_class_empty);
 8154 %}
 8155 
 8156 instruct castLL(iRegL dst)
 8157 %{
 8158   match(Set dst (CastLL dst));
 8159 
 8160   size(0);
 8161   format %{ "# castLL of $dst" %}
 8162   ins_encode(/* empty encoding */);
 8163   ins_cost(0);
 8164   ins_pipe(pipe_class_empty);
 8165 %}
 8166 
 8167 instruct castFF(vRegF dst)
 8168 %{
 8169   match(Set dst (CastFF dst));
 8170 
 8171   size(0);
 8172   format %{ "# castFF of $dst" %}
 8173   ins_encode(/* empty encoding */);
 8174   ins_cost(0);
 8175   ins_pipe(pipe_class_empty);
 8176 %}
 8177 
 8178 instruct castDD(vRegD dst)
 8179 %{
 8180   match(Set dst (CastDD dst));
 8181 
 8182   size(0);
 8183   format %{ "# castDD of $dst" %}
 8184   ins_encode(/* empty encoding */);
 8185   ins_cost(0);
 8186   ins_pipe(pipe_class_empty);
 8187 %}
 8188 
 8189 instruct castVV(vReg dst)
 8190 %{
 8191   match(Set dst (CastVV dst));
 8192 
 8193   size(0);
 8194   format %{ "# castVV of $dst" %}
 8195   ins_encode(/* empty encoding */);
 8196   ins_cost(0);
 8197   ins_pipe(pipe_class_empty);
 8198 %}
 8199 
 8200 instruct castVVMask(pRegGov dst)
 8201 %{
 8202   match(Set dst (CastVV dst));
 8203 
 8204   size(0);
 8205   format %{ "# castVV of $dst" %}
 8206   ins_encode(/* empty encoding */);
 8207   ins_cost(0);
 8208   ins_pipe(pipe_class_empty);
 8209 %}
 8210 
 8211 // ============================================================================
 8212 // Atomic operation instructions
 8213 //
 8214 
 8215 // standard CompareAndSwapX when we are using barriers
 8216 // these have higher priority than the rules selected by a predicate
 8217 
 8218 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8219 // can't match them
 8220 
 8221 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8222 
 8223   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8224   ins_cost(2 * VOLATILE_REF_COST);
 8225 
 8226   effect(KILL cr);
 8227 
 8228   format %{
 8229     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8230     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8231   %}
 8232 
 8233   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8234             aarch64_enc_cset_eq(res));
 8235 
 8236   ins_pipe(pipe_slow);
 8237 %}
 8238 
 8239 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8240 
 8241   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8242   ins_cost(2 * VOLATILE_REF_COST);
 8243 
 8244   effect(KILL cr);
 8245 
 8246   format %{
 8247     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8248     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8249   %}
 8250 
 8251   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8252             aarch64_enc_cset_eq(res));
 8253 
 8254   ins_pipe(pipe_slow);
 8255 %}
 8256 
 8257 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8258 
 8259   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8260   ins_cost(2 * VOLATILE_REF_COST);
 8261 
 8262   effect(KILL cr);
 8263 
 8264  format %{
 8265     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8266     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8267  %}
 8268 
 8269  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8270             aarch64_enc_cset_eq(res));
 8271 
 8272   ins_pipe(pipe_slow);
 8273 %}
 8274 
 8275 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8276 
 8277   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8278   ins_cost(2 * VOLATILE_REF_COST);
 8279 
 8280   effect(KILL cr);
 8281 
 8282  format %{
 8283     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8284     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8285  %}
 8286 
 8287  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8288             aarch64_enc_cset_eq(res));
 8289 
 8290   ins_pipe(pipe_slow);
 8291 %}
 8292 
 8293 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8294 
 8295   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8296   predicate(n->as_LoadStore()->barrier_data() == 0);
 8297   ins_cost(2 * VOLATILE_REF_COST);
 8298 
 8299   effect(KILL cr);
 8300 
 8301  format %{
 8302     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8303     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8304  %}
 8305 
 8306  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8307             aarch64_enc_cset_eq(res));
 8308 
 8309   ins_pipe(pipe_slow);
 8310 %}
 8311 
 8312 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8313 
 8314   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8315   predicate(n->as_LoadStore()->barrier_data() == 0);
 8316   ins_cost(2 * VOLATILE_REF_COST);
 8317 
 8318   effect(KILL cr);
 8319 
 8320  format %{
 8321     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8322     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8323  %}
 8324 
 8325  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8326             aarch64_enc_cset_eq(res));
 8327 
 8328   ins_pipe(pipe_slow);
 8329 %}
 8330 
 8331 // alternative CompareAndSwapX when we are eliding barriers
 8332 
 8333 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8334 
 8335   predicate(needs_acquiring_load_exclusive(n));
 8336   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8337   ins_cost(VOLATILE_REF_COST);
 8338 
 8339   effect(KILL cr);
 8340 
 8341   format %{
 8342     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8343     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8344   %}
 8345 
 8346   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8347             aarch64_enc_cset_eq(res));
 8348 
 8349   ins_pipe(pipe_slow);
 8350 %}
 8351 
 8352 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8353 
 8354   predicate(needs_acquiring_load_exclusive(n));
 8355   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8356   ins_cost(VOLATILE_REF_COST);
 8357 
 8358   effect(KILL cr);
 8359 
 8360   format %{
 8361     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8362     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8363   %}
 8364 
 8365   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8366             aarch64_enc_cset_eq(res));
 8367 
 8368   ins_pipe(pipe_slow);
 8369 %}
 8370 
 8371 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8372 
 8373   predicate(needs_acquiring_load_exclusive(n));
 8374   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8375   ins_cost(VOLATILE_REF_COST);
 8376 
 8377   effect(KILL cr);
 8378 
 8379  format %{
 8380     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8381     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8382  %}
 8383 
 8384  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8385             aarch64_enc_cset_eq(res));
 8386 
 8387   ins_pipe(pipe_slow);
 8388 %}
 8389 
 8390 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8391 
 8392   predicate(needs_acquiring_load_exclusive(n));
 8393   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8394   ins_cost(VOLATILE_REF_COST);
 8395 
 8396   effect(KILL cr);
 8397 
 8398  format %{
 8399     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8400     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8401  %}
 8402 
 8403  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8404             aarch64_enc_cset_eq(res));
 8405 
 8406   ins_pipe(pipe_slow);
 8407 %}
 8408 
 8409 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8410 
 8411   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8412   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8413   ins_cost(VOLATILE_REF_COST);
 8414 
 8415   effect(KILL cr);
 8416 
 8417  format %{
 8418     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8419     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8420  %}
 8421 
 8422  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8423             aarch64_enc_cset_eq(res));
 8424 
 8425   ins_pipe(pipe_slow);
 8426 %}
 8427 
 8428 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8429 
 8430   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8431   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8432   ins_cost(VOLATILE_REF_COST);
 8433 
 8434   effect(KILL cr);
 8435 
 8436  format %{
 8437     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8438     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8439  %}
 8440 
 8441  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8442             aarch64_enc_cset_eq(res));
 8443 
 8444   ins_pipe(pipe_slow);
 8445 %}
 8446 
 8447 
 8448 // ---------------------------------------------------------------------
 8449 
 8450 // BEGIN This section of the file is automatically generated. Do not edit --------------
 8451 
 8452 // Sundry CAS operations.  Note that release is always true,
 8453 // regardless of the memory ordering of the CAS.  This is because we
 8454 // need the volatile case to be sequentially consistent but there is
 8455 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 8456 // can't check the type of memory ordering here, so we always emit a
 8457 // STLXR.
 8458 
 8459 // This section is generated from cas.m4
 8460 
 8461 
 8462 // This pattern is generated automatically from cas.m4.
 8463 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8464 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8465   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8466   ins_cost(2 * VOLATILE_REF_COST);
 8467   effect(TEMP_DEF res, KILL cr);
 8468   format %{
 8469     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8470   %}
 8471   ins_encode %{
 8472     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8473                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8474                /*weak*/ false, $res$$Register);
 8475     __ sxtbw($res$$Register, $res$$Register);
 8476   %}
 8477   ins_pipe(pipe_slow);
 8478 %}
 8479 
 8480 // This pattern is generated automatically from cas.m4.
 8481 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8482 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8483   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8484   ins_cost(2 * VOLATILE_REF_COST);
 8485   effect(TEMP_DEF res, KILL cr);
 8486   format %{
 8487     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8488   %}
 8489   ins_encode %{
 8490     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8491                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8492                /*weak*/ false, $res$$Register);
 8493     __ sxthw($res$$Register, $res$$Register);
 8494   %}
 8495   ins_pipe(pipe_slow);
 8496 %}
 8497 
 8498 // This pattern is generated automatically from cas.m4.
 8499 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8500 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8501   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8502   ins_cost(2 * VOLATILE_REF_COST);
 8503   effect(TEMP_DEF res, KILL cr);
 8504   format %{
 8505     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8506   %}
 8507   ins_encode %{
 8508     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8509                Assembler::word, /*acquire*/ false, /*release*/ true,
 8510                /*weak*/ false, $res$$Register);
 8511   %}
 8512   ins_pipe(pipe_slow);
 8513 %}
 8514 
 8515 // This pattern is generated automatically from cas.m4.
 8516 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8517 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8518   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8519   ins_cost(2 * VOLATILE_REF_COST);
 8520   effect(TEMP_DEF res, KILL cr);
 8521   format %{
 8522     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8523   %}
 8524   ins_encode %{
 8525     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8526                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8527                /*weak*/ false, $res$$Register);
 8528   %}
 8529   ins_pipe(pipe_slow);
 8530 %}
 8531 
 8532 // This pattern is generated automatically from cas.m4.
 8533 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8534 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8535   predicate(n->as_LoadStore()->barrier_data() == 0);
 8536   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8537   ins_cost(2 * VOLATILE_REF_COST);
 8538   effect(TEMP_DEF res, KILL cr);
 8539   format %{
 8540     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8541   %}
 8542   ins_encode %{
 8543     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8544                Assembler::word, /*acquire*/ false, /*release*/ true,
 8545                /*weak*/ false, $res$$Register);
 8546   %}
 8547   ins_pipe(pipe_slow);
 8548 %}
 8549 
 8550 // This pattern is generated automatically from cas.m4.
 8551 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8552 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8553   predicate(n->as_LoadStore()->barrier_data() == 0);
 8554   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8555   ins_cost(2 * VOLATILE_REF_COST);
 8556   effect(TEMP_DEF res, KILL cr);
 8557   format %{
 8558     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8559   %}
 8560   ins_encode %{
 8561     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8562                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8563                /*weak*/ false, $res$$Register);
 8564   %}
 8565   ins_pipe(pipe_slow);
 8566 %}
 8567 
 8568 // This pattern is generated automatically from cas.m4.
 8569 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8570 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8571   predicate(needs_acquiring_load_exclusive(n));
 8572   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8573   ins_cost(VOLATILE_REF_COST);
 8574   effect(TEMP_DEF res, KILL cr);
 8575   format %{
 8576     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8577   %}
 8578   ins_encode %{
 8579     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8580                Assembler::byte, /*acquire*/ true, /*release*/ true,
 8581                /*weak*/ false, $res$$Register);
 8582     __ sxtbw($res$$Register, $res$$Register);
 8583   %}
 8584   ins_pipe(pipe_slow);
 8585 %}
 8586 
 8587 // This pattern is generated automatically from cas.m4.
 8588 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8589 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8590   predicate(needs_acquiring_load_exclusive(n));
 8591   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8592   ins_cost(VOLATILE_REF_COST);
 8593   effect(TEMP_DEF res, KILL cr);
 8594   format %{
 8595     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8596   %}
 8597   ins_encode %{
 8598     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8599                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 8600                /*weak*/ false, $res$$Register);
 8601     __ sxthw($res$$Register, $res$$Register);
 8602   %}
 8603   ins_pipe(pipe_slow);
 8604 %}
 8605 
 8606 // This pattern is generated automatically from cas.m4.
 8607 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8608 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8609   predicate(needs_acquiring_load_exclusive(n));
 8610   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8611   ins_cost(VOLATILE_REF_COST);
 8612   effect(TEMP_DEF res, KILL cr);
 8613   format %{
 8614     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8615   %}
 8616   ins_encode %{
 8617     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8618                Assembler::word, /*acquire*/ true, /*release*/ true,
 8619                /*weak*/ false, $res$$Register);
 8620   %}
 8621   ins_pipe(pipe_slow);
 8622 %}
 8623 
 8624 // This pattern is generated automatically from cas.m4.
 8625 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8626 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8627   predicate(needs_acquiring_load_exclusive(n));
 8628   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8629   ins_cost(VOLATILE_REF_COST);
 8630   effect(TEMP_DEF res, KILL cr);
 8631   format %{
 8632     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8633   %}
 8634   ins_encode %{
 8635     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8636                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8637                /*weak*/ false, $res$$Register);
 8638   %}
 8639   ins_pipe(pipe_slow);
 8640 %}
 8641 
 8642 // This pattern is generated automatically from cas.m4.
 8643 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8644 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8645   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8646   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8647   ins_cost(VOLATILE_REF_COST);
 8648   effect(TEMP_DEF res, KILL cr);
 8649   format %{
 8650     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8651   %}
 8652   ins_encode %{
 8653     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8654                Assembler::word, /*acquire*/ true, /*release*/ true,
 8655                /*weak*/ false, $res$$Register);
 8656   %}
 8657   ins_pipe(pipe_slow);
 8658 %}
 8659 
 8660 // This pattern is generated automatically from cas.m4.
 8661 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8662 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8663   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8664   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8665   ins_cost(VOLATILE_REF_COST);
 8666   effect(TEMP_DEF res, KILL cr);
 8667   format %{
 8668     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8669   %}
 8670   ins_encode %{
 8671     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8672                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8673                /*weak*/ false, $res$$Register);
 8674   %}
 8675   ins_pipe(pipe_slow);
 8676 %}
 8677 
 8678 // This pattern is generated automatically from cas.m4.
 8679 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8680 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8681   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8682   ins_cost(2 * VOLATILE_REF_COST);
 8683   effect(KILL cr);
 8684   format %{
 8685     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8686     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8687   %}
 8688   ins_encode %{
 8689     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8690                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8691                /*weak*/ true, noreg);
 8692     __ csetw($res$$Register, Assembler::EQ);
 8693   %}
 8694   ins_pipe(pipe_slow);
 8695 %}
 8696 
 8697 // This pattern is generated automatically from cas.m4.
 8698 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8699 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8700   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 8701   ins_cost(2 * VOLATILE_REF_COST);
 8702   effect(KILL cr);
 8703   format %{
 8704     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8705     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8706   %}
 8707   ins_encode %{
 8708     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8709                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8710                /*weak*/ true, noreg);
 8711     __ csetw($res$$Register, Assembler::EQ);
 8712   %}
 8713   ins_pipe(pipe_slow);
 8714 %}
 8715 
 8716 // This pattern is generated automatically from cas.m4.
 8717 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8718 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8719   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 8720   ins_cost(2 * VOLATILE_REF_COST);
 8721   effect(KILL cr);
 8722   format %{
 8723     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8724     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8725   %}
 8726   ins_encode %{
 8727     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8728                Assembler::word, /*acquire*/ false, /*release*/ true,
 8729                /*weak*/ true, noreg);
 8730     __ csetw($res$$Register, Assembler::EQ);
 8731   %}
 8732   ins_pipe(pipe_slow);
 8733 %}
 8734 
 8735 // This pattern is generated automatically from cas.m4.
 8736 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8737 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8738   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 8739   ins_cost(2 * VOLATILE_REF_COST);
 8740   effect(KILL cr);
 8741   format %{
 8742     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8743     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8744   %}
 8745   ins_encode %{
 8746     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8747                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8748                /*weak*/ true, noreg);
 8749     __ csetw($res$$Register, Assembler::EQ);
 8750   %}
 8751   ins_pipe(pipe_slow);
 8752 %}
 8753 
 8754 // This pattern is generated automatically from cas.m4.
 8755 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8756 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8757   predicate(n->as_LoadStore()->barrier_data() == 0);
 8758   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 8759   ins_cost(2 * VOLATILE_REF_COST);
 8760   effect(KILL cr);
 8761   format %{
 8762     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8763     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8764   %}
 8765   ins_encode %{
 8766     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8767                Assembler::word, /*acquire*/ false, /*release*/ true,
 8768                /*weak*/ true, noreg);
 8769     __ csetw($res$$Register, Assembler::EQ);
 8770   %}
 8771   ins_pipe(pipe_slow);
 8772 %}
 8773 
 8774 // This pattern is generated automatically from cas.m4.
 8775 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8776 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8777   predicate(n->as_LoadStore()->barrier_data() == 0);
 8778   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 8779   ins_cost(2 * VOLATILE_REF_COST);
 8780   effect(KILL cr);
 8781   format %{
 8782     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8783     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8784   %}
 8785   ins_encode %{
 8786     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8787                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8788                /*weak*/ true, noreg);
 8789     __ csetw($res$$Register, Assembler::EQ);
 8790   %}
 8791   ins_pipe(pipe_slow);
 8792 %}
 8793 
 8794 // This pattern is generated automatically from cas.m4.
 8795 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8796 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8797   predicate(needs_acquiring_load_exclusive(n));
 8798   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8799   ins_cost(VOLATILE_REF_COST);
 8800   effect(KILL cr);
 8801   format %{
 8802     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8803     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8804   %}
 8805   ins_encode %{
 8806     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8807                Assembler::byte, /*acquire*/ true, /*release*/ true,
 8808                /*weak*/ true, noreg);
 8809     __ csetw($res$$Register, Assembler::EQ);
 8810   %}
 8811   ins_pipe(pipe_slow);
 8812 %}
 8813 
 8814 // This pattern is generated automatically from cas.m4.
 8815 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8816 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8817   predicate(needs_acquiring_load_exclusive(n));
 8818   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 8819   ins_cost(VOLATILE_REF_COST);
 8820   effect(KILL cr);
 8821   format %{
 8822     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8823     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8824   %}
 8825   ins_encode %{
 8826     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8827                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 8828                /*weak*/ true, noreg);
 8829     __ csetw($res$$Register, Assembler::EQ);
 8830   %}
 8831   ins_pipe(pipe_slow);
 8832 %}
 8833 
 8834 // This pattern is generated automatically from cas.m4.
 8835 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8836 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8837   predicate(needs_acquiring_load_exclusive(n));
 8838   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 8839   ins_cost(VOLATILE_REF_COST);
 8840   effect(KILL cr);
 8841   format %{
 8842     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8843     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8844   %}
 8845   ins_encode %{
 8846     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8847                Assembler::word, /*acquire*/ true, /*release*/ true,
 8848                /*weak*/ true, noreg);
 8849     __ csetw($res$$Register, Assembler::EQ);
 8850   %}
 8851   ins_pipe(pipe_slow);
 8852 %}
 8853 
 8854 // This pattern is generated automatically from cas.m4.
 8855 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8856 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8857   predicate(needs_acquiring_load_exclusive(n));
 8858   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 8859   ins_cost(VOLATILE_REF_COST);
 8860   effect(KILL cr);
 8861   format %{
 8862     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8863     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8864   %}
 8865   ins_encode %{
 8866     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8867                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8868                /*weak*/ true, noreg);
 8869     __ csetw($res$$Register, Assembler::EQ);
 8870   %}
 8871   ins_pipe(pipe_slow);
 8872 %}
 8873 
 8874 // This pattern is generated automatically from cas.m4.
 8875 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8876 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8877   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8878   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 8879   ins_cost(VOLATILE_REF_COST);
 8880   effect(KILL cr);
 8881   format %{
 8882     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8883     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8884   %}
 8885   ins_encode %{
 8886     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8887                Assembler::word, /*acquire*/ true, /*release*/ true,
 8888                /*weak*/ true, noreg);
 8889     __ csetw($res$$Register, Assembler::EQ);
 8890   %}
 8891   ins_pipe(pipe_slow);
 8892 %}
 8893 
 8894 // This pattern is generated automatically from cas.m4.
 8895 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8896 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8897   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8898   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 8899   ins_cost(VOLATILE_REF_COST);
 8900   effect(KILL cr);
 8901   format %{
 8902     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8903     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8904   %}
 8905   ins_encode %{
 8906     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8907                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8908                /*weak*/ true, noreg);
 8909     __ csetw($res$$Register, Assembler::EQ);
 8910   %}
 8911   ins_pipe(pipe_slow);
 8912 %}
 8913 
 8914 // END This section of the file is automatically generated. Do not edit --------------
 8915 // ---------------------------------------------------------------------
 8916 
 8917 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 8918   match(Set prev (GetAndSetI mem newv));
 8919   ins_cost(2 * VOLATILE_REF_COST);
 8920   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 8921   ins_encode %{
 8922     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8923   %}
 8924   ins_pipe(pipe_serial);
 8925 %}
 8926 
 8927 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 8928   match(Set prev (GetAndSetL mem newv));
 8929   ins_cost(2 * VOLATILE_REF_COST);
 8930   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 8931   ins_encode %{
 8932     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8933   %}
 8934   ins_pipe(pipe_serial);
 8935 %}
 8936 
 8937 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 8938   predicate(n->as_LoadStore()->barrier_data() == 0);
 8939   match(Set prev (GetAndSetN mem newv));
 8940   ins_cost(2 * VOLATILE_REF_COST);
 8941   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 8942   ins_encode %{
 8943     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8944   %}
 8945   ins_pipe(pipe_serial);
 8946 %}
 8947 
 8948 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 8949   predicate(n->as_LoadStore()->barrier_data() == 0);
 8950   match(Set prev (GetAndSetP mem newv));
 8951   ins_cost(2 * VOLATILE_REF_COST);
 8952   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 8953   ins_encode %{
 8954     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8955   %}
 8956   ins_pipe(pipe_serial);
 8957 %}
 8958 
 8959 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 8960   predicate(needs_acquiring_load_exclusive(n));
 8961   match(Set prev (GetAndSetI mem newv));
 8962   ins_cost(VOLATILE_REF_COST);
 8963   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 8964   ins_encode %{
 8965     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8966   %}
 8967   ins_pipe(pipe_serial);
 8968 %}
 8969 
 8970 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 8971   predicate(needs_acquiring_load_exclusive(n));
 8972   match(Set prev (GetAndSetL mem newv));
 8973   ins_cost(VOLATILE_REF_COST);
 8974   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 8975   ins_encode %{
 8976     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8977   %}
 8978   ins_pipe(pipe_serial);
 8979 %}
 8980 
 8981 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 8982   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8983   match(Set prev (GetAndSetN mem newv));
 8984   ins_cost(VOLATILE_REF_COST);
 8985   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 8986   ins_encode %{
 8987     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8988   %}
 8989   ins_pipe(pipe_serial);
 8990 %}
 8991 
 8992 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 8993   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8994   match(Set prev (GetAndSetP mem newv));
 8995   ins_cost(VOLATILE_REF_COST);
 8996   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 8997   ins_encode %{
 8998     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8999   %}
 9000   ins_pipe(pipe_serial);
 9001 %}
 9002 
 9003 
 9004 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9005   match(Set newval (GetAndAddL mem incr));
 9006   ins_cost(2 * VOLATILE_REF_COST + 1);
 9007   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9008   ins_encode %{
 9009     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9010   %}
 9011   ins_pipe(pipe_serial);
 9012 %}
 9013 
 9014 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9015   predicate(n->as_LoadStore()->result_not_used());
 9016   match(Set dummy (GetAndAddL mem incr));
 9017   ins_cost(2 * VOLATILE_REF_COST);
 9018   format %{ "get_and_addL [$mem], $incr" %}
 9019   ins_encode %{
 9020     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9021   %}
 9022   ins_pipe(pipe_serial);
 9023 %}
 9024 
 9025 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9026   match(Set newval (GetAndAddL mem incr));
 9027   ins_cost(2 * VOLATILE_REF_COST + 1);
 9028   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9029   ins_encode %{
 9030     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9031   %}
 9032   ins_pipe(pipe_serial);
 9033 %}
 9034 
 9035 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9036   predicate(n->as_LoadStore()->result_not_used());
 9037   match(Set dummy (GetAndAddL mem incr));
 9038   ins_cost(2 * VOLATILE_REF_COST);
 9039   format %{ "get_and_addL [$mem], $incr" %}
 9040   ins_encode %{
 9041     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9042   %}
 9043   ins_pipe(pipe_serial);
 9044 %}
 9045 
 9046 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9047   match(Set newval (GetAndAddI mem incr));
 9048   ins_cost(2 * VOLATILE_REF_COST + 1);
 9049   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9050   ins_encode %{
 9051     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9052   %}
 9053   ins_pipe(pipe_serial);
 9054 %}
 9055 
 9056 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9057   predicate(n->as_LoadStore()->result_not_used());
 9058   match(Set dummy (GetAndAddI mem incr));
 9059   ins_cost(2 * VOLATILE_REF_COST);
 9060   format %{ "get_and_addI [$mem], $incr" %}
 9061   ins_encode %{
 9062     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9063   %}
 9064   ins_pipe(pipe_serial);
 9065 %}
 9066 
 9067 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9068   match(Set newval (GetAndAddI mem incr));
 9069   ins_cost(2 * VOLATILE_REF_COST + 1);
 9070   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9071   ins_encode %{
 9072     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9073   %}
 9074   ins_pipe(pipe_serial);
 9075 %}
 9076 
 9077 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9078   predicate(n->as_LoadStore()->result_not_used());
 9079   match(Set dummy (GetAndAddI mem incr));
 9080   ins_cost(2 * VOLATILE_REF_COST);
 9081   format %{ "get_and_addI [$mem], $incr" %}
 9082   ins_encode %{
 9083     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9084   %}
 9085   ins_pipe(pipe_serial);
 9086 %}
 9087 
 9088 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9089   predicate(needs_acquiring_load_exclusive(n));
 9090   match(Set newval (GetAndAddL mem incr));
 9091   ins_cost(VOLATILE_REF_COST + 1);
 9092   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9093   ins_encode %{
 9094     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9095   %}
 9096   ins_pipe(pipe_serial);
 9097 %}
 9098 
 9099 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9100   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9101   match(Set dummy (GetAndAddL mem incr));
 9102   ins_cost(VOLATILE_REF_COST);
 9103   format %{ "get_and_addL_acq [$mem], $incr" %}
 9104   ins_encode %{
 9105     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9106   %}
 9107   ins_pipe(pipe_serial);
 9108 %}
 9109 
 9110 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9111   predicate(needs_acquiring_load_exclusive(n));
 9112   match(Set newval (GetAndAddL mem incr));
 9113   ins_cost(VOLATILE_REF_COST + 1);
 9114   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9115   ins_encode %{
 9116     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9117   %}
 9118   ins_pipe(pipe_serial);
 9119 %}
 9120 
 9121 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9122   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9123   match(Set dummy (GetAndAddL mem incr));
 9124   ins_cost(VOLATILE_REF_COST);
 9125   format %{ "get_and_addL_acq [$mem], $incr" %}
 9126   ins_encode %{
 9127     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9128   %}
 9129   ins_pipe(pipe_serial);
 9130 %}
 9131 
 9132 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9133   predicate(needs_acquiring_load_exclusive(n));
 9134   match(Set newval (GetAndAddI mem incr));
 9135   ins_cost(VOLATILE_REF_COST + 1);
 9136   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9137   ins_encode %{
 9138     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9139   %}
 9140   ins_pipe(pipe_serial);
 9141 %}
 9142 
 9143 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9144   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9145   match(Set dummy (GetAndAddI mem incr));
 9146   ins_cost(VOLATILE_REF_COST);
 9147   format %{ "get_and_addI_acq [$mem], $incr" %}
 9148   ins_encode %{
 9149     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9150   %}
 9151   ins_pipe(pipe_serial);
 9152 %}
 9153 
 9154 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9155   predicate(needs_acquiring_load_exclusive(n));
 9156   match(Set newval (GetAndAddI mem incr));
 9157   ins_cost(VOLATILE_REF_COST + 1);
 9158   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9159   ins_encode %{
 9160     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9161   %}
 9162   ins_pipe(pipe_serial);
 9163 %}
 9164 
 9165 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9166   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9167   match(Set dummy (GetAndAddI mem incr));
 9168   ins_cost(VOLATILE_REF_COST);
 9169   format %{ "get_and_addI_acq [$mem], $incr" %}
 9170   ins_encode %{
 9171     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9172   %}
 9173   ins_pipe(pipe_serial);
 9174 %}
 9175 
 9176 // Manifest a CmpU result in an integer register.
 9177 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9178 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 9179 %{
 9180   match(Set dst (CmpU3 src1 src2));
 9181   effect(KILL flags);
 9182 
 9183   ins_cost(INSN_COST * 3);
 9184   format %{
 9185       "cmpw $src1, $src2\n\t"
 9186       "csetw $dst, ne\n\t"
 9187       "cnegw $dst, lo\t# CmpU3(reg)"
 9188   %}
 9189   ins_encode %{
 9190     __ cmpw($src1$$Register, $src2$$Register);
 9191     __ csetw($dst$$Register, Assembler::NE);
 9192     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9193   %}
 9194 
 9195   ins_pipe(pipe_class_default);
 9196 %}
 9197 
 9198 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 9199 %{
 9200   match(Set dst (CmpU3 src1 src2));
 9201   effect(KILL flags);
 9202 
 9203   ins_cost(INSN_COST * 3);
 9204   format %{
 9205       "subsw zr, $src1, $src2\n\t"
 9206       "csetw $dst, ne\n\t"
 9207       "cnegw $dst, lo\t# CmpU3(imm)"
 9208   %}
 9209   ins_encode %{
 9210     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 9211     __ csetw($dst$$Register, Assembler::NE);
 9212     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9213   %}
 9214 
 9215   ins_pipe(pipe_class_default);
 9216 %}
 9217 
 9218 // Manifest a CmpUL result in an integer register.
 9219 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9220 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9221 %{
 9222   match(Set dst (CmpUL3 src1 src2));
 9223   effect(KILL flags);
 9224 
 9225   ins_cost(INSN_COST * 3);
 9226   format %{
 9227       "cmp $src1, $src2\n\t"
 9228       "csetw $dst, ne\n\t"
 9229       "cnegw $dst, lo\t# CmpUL3(reg)"
 9230   %}
 9231   ins_encode %{
 9232     __ cmp($src1$$Register, $src2$$Register);
 9233     __ csetw($dst$$Register, Assembler::NE);
 9234     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9235   %}
 9236 
 9237   ins_pipe(pipe_class_default);
 9238 %}
 9239 
 9240 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9241 %{
 9242   match(Set dst (CmpUL3 src1 src2));
 9243   effect(KILL flags);
 9244 
 9245   ins_cost(INSN_COST * 3);
 9246   format %{
 9247       "subs zr, $src1, $src2\n\t"
 9248       "csetw $dst, ne\n\t"
 9249       "cnegw $dst, lo\t# CmpUL3(imm)"
 9250   %}
 9251   ins_encode %{
 9252     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9253     __ csetw($dst$$Register, Assembler::NE);
 9254     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9255   %}
 9256 
 9257   ins_pipe(pipe_class_default);
 9258 %}
 9259 
 9260 // Manifest a CmpL result in an integer register.
 9261 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9262 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9263 %{
 9264   match(Set dst (CmpL3 src1 src2));
 9265   effect(KILL flags);
 9266 
 9267   ins_cost(INSN_COST * 3);
 9268   format %{
 9269       "cmp $src1, $src2\n\t"
 9270       "csetw $dst, ne\n\t"
 9271       "cnegw $dst, lt\t# CmpL3(reg)"
 9272   %}
 9273   ins_encode %{
 9274     __ cmp($src1$$Register, $src2$$Register);
 9275     __ csetw($dst$$Register, Assembler::NE);
 9276     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9277   %}
 9278 
 9279   ins_pipe(pipe_class_default);
 9280 %}
 9281 
 9282 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9283 %{
 9284   match(Set dst (CmpL3 src1 src2));
 9285   effect(KILL flags);
 9286 
 9287   ins_cost(INSN_COST * 3);
 9288   format %{
 9289       "subs zr, $src1, $src2\n\t"
 9290       "csetw $dst, ne\n\t"
 9291       "cnegw $dst, lt\t# CmpL3(imm)"
 9292   %}
 9293   ins_encode %{
 9294     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9295     __ csetw($dst$$Register, Assembler::NE);
 9296     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9297   %}
 9298 
 9299   ins_pipe(pipe_class_default);
 9300 %}
 9301 
 9302 // ============================================================================
 9303 // Conditional Move Instructions
 9304 
 9305 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9306 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9307 // define an op class which merged both inputs and use it to type the
 9308 // argument to a single rule. unfortunatelyt his fails because the
 9309 // opclass does not live up to the COND_INTER interface of its
 9310 // component operands. When the generic code tries to negate the
 9311 // operand it ends up running the generci Machoper::negate method
 9312 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9313 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9314 
 9315 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9316   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9317 
 9318   ins_cost(INSN_COST * 2);
 9319   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9320 
 9321   ins_encode %{
 9322     __ cselw(as_Register($dst$$reg),
 9323              as_Register($src2$$reg),
 9324              as_Register($src1$$reg),
 9325              (Assembler::Condition)$cmp$$cmpcode);
 9326   %}
 9327 
 9328   ins_pipe(icond_reg_reg);
 9329 %}
 9330 
 9331 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9332   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9333 
 9334   ins_cost(INSN_COST * 2);
 9335   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9336 
 9337   ins_encode %{
 9338     __ cselw(as_Register($dst$$reg),
 9339              as_Register($src2$$reg),
 9340              as_Register($src1$$reg),
 9341              (Assembler::Condition)$cmp$$cmpcode);
 9342   %}
 9343 
 9344   ins_pipe(icond_reg_reg);
 9345 %}
 9346 
 9347 // special cases where one arg is zero
 9348 
 9349 // n.b. this is selected in preference to the rule above because it
 9350 // avoids loading constant 0 into a source register
 9351 
 9352 // TODO
 9353 // we ought only to be able to cull one of these variants as the ideal
 9354 // transforms ought always to order the zero consistently (to left/right?)
 9355 
 9356 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9357   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9358 
 9359   ins_cost(INSN_COST * 2);
 9360   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9361 
 9362   ins_encode %{
 9363     __ cselw(as_Register($dst$$reg),
 9364              as_Register($src$$reg),
 9365              zr,
 9366              (Assembler::Condition)$cmp$$cmpcode);
 9367   %}
 9368 
 9369   ins_pipe(icond_reg);
 9370 %}
 9371 
 9372 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9373   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9374 
 9375   ins_cost(INSN_COST * 2);
 9376   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9377 
 9378   ins_encode %{
 9379     __ cselw(as_Register($dst$$reg),
 9380              as_Register($src$$reg),
 9381              zr,
 9382              (Assembler::Condition)$cmp$$cmpcode);
 9383   %}
 9384 
 9385   ins_pipe(icond_reg);
 9386 %}
 9387 
 9388 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9389   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9390 
 9391   ins_cost(INSN_COST * 2);
 9392   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9393 
 9394   ins_encode %{
 9395     __ cselw(as_Register($dst$$reg),
 9396              zr,
 9397              as_Register($src$$reg),
 9398              (Assembler::Condition)$cmp$$cmpcode);
 9399   %}
 9400 
 9401   ins_pipe(icond_reg);
 9402 %}
 9403 
 9404 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9405   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9406 
 9407   ins_cost(INSN_COST * 2);
 9408   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9409 
 9410   ins_encode %{
 9411     __ cselw(as_Register($dst$$reg),
 9412              zr,
 9413              as_Register($src$$reg),
 9414              (Assembler::Condition)$cmp$$cmpcode);
 9415   %}
 9416 
 9417   ins_pipe(icond_reg);
 9418 %}
 9419 
 9420 // special case for creating a boolean 0 or 1
 9421 
 9422 // n.b. this is selected in preference to the rule above because it
 9423 // avoids loading constants 0 and 1 into a source register
 9424 
 9425 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9426   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9427 
 9428   ins_cost(INSN_COST * 2);
 9429   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9430 
 9431   ins_encode %{
 9432     // equivalently
 9433     // cset(as_Register($dst$$reg),
 9434     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9435     __ csincw(as_Register($dst$$reg),
 9436              zr,
 9437              zr,
 9438              (Assembler::Condition)$cmp$$cmpcode);
 9439   %}
 9440 
 9441   ins_pipe(icond_none);
 9442 %}
 9443 
 9444 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9445   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9446 
 9447   ins_cost(INSN_COST * 2);
 9448   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9449 
 9450   ins_encode %{
 9451     // equivalently
 9452     // cset(as_Register($dst$$reg),
 9453     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9454     __ csincw(as_Register($dst$$reg),
 9455              zr,
 9456              zr,
 9457              (Assembler::Condition)$cmp$$cmpcode);
 9458   %}
 9459 
 9460   ins_pipe(icond_none);
 9461 %}
 9462 
 9463 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9464   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9465 
 9466   ins_cost(INSN_COST * 2);
 9467   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9468 
 9469   ins_encode %{
 9470     __ csel(as_Register($dst$$reg),
 9471             as_Register($src2$$reg),
 9472             as_Register($src1$$reg),
 9473             (Assembler::Condition)$cmp$$cmpcode);
 9474   %}
 9475 
 9476   ins_pipe(icond_reg_reg);
 9477 %}
 9478 
 9479 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9480   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9481 
 9482   ins_cost(INSN_COST * 2);
 9483   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9484 
 9485   ins_encode %{
 9486     __ csel(as_Register($dst$$reg),
 9487             as_Register($src2$$reg),
 9488             as_Register($src1$$reg),
 9489             (Assembler::Condition)$cmp$$cmpcode);
 9490   %}
 9491 
 9492   ins_pipe(icond_reg_reg);
 9493 %}
 9494 
 9495 // special cases where one arg is zero
 9496 
 9497 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9498   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9499 
 9500   ins_cost(INSN_COST * 2);
 9501   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 9502 
 9503   ins_encode %{
 9504     __ csel(as_Register($dst$$reg),
 9505             zr,
 9506             as_Register($src$$reg),
 9507             (Assembler::Condition)$cmp$$cmpcode);
 9508   %}
 9509 
 9510   ins_pipe(icond_reg);
 9511 %}
 9512 
 9513 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9514   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9515 
 9516   ins_cost(INSN_COST * 2);
 9517   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 9518 
 9519   ins_encode %{
 9520     __ csel(as_Register($dst$$reg),
 9521             zr,
 9522             as_Register($src$$reg),
 9523             (Assembler::Condition)$cmp$$cmpcode);
 9524   %}
 9525 
 9526   ins_pipe(icond_reg);
 9527 %}
 9528 
 9529 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9530   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9531 
 9532   ins_cost(INSN_COST * 2);
 9533   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 9534 
 9535   ins_encode %{
 9536     __ csel(as_Register($dst$$reg),
 9537             as_Register($src$$reg),
 9538             zr,
 9539             (Assembler::Condition)$cmp$$cmpcode);
 9540   %}
 9541 
 9542   ins_pipe(icond_reg);
 9543 %}
 9544 
 9545 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9546   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9547 
 9548   ins_cost(INSN_COST * 2);
 9549   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 9550 
 9551   ins_encode %{
 9552     __ csel(as_Register($dst$$reg),
 9553             as_Register($src$$reg),
 9554             zr,
 9555             (Assembler::Condition)$cmp$$cmpcode);
 9556   %}
 9557 
 9558   ins_pipe(icond_reg);
 9559 %}
 9560 
 9561 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9562   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9563 
 9564   ins_cost(INSN_COST * 2);
 9565   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
 9566 
 9567   ins_encode %{
 9568     __ csel(as_Register($dst$$reg),
 9569             as_Register($src2$$reg),
 9570             as_Register($src1$$reg),
 9571             (Assembler::Condition)$cmp$$cmpcode);
 9572   %}
 9573 
 9574   ins_pipe(icond_reg_reg);
 9575 %}
 9576 
 9577 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9578   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9579 
 9580   ins_cost(INSN_COST * 2);
 9581   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
 9582 
 9583   ins_encode %{
 9584     __ csel(as_Register($dst$$reg),
 9585             as_Register($src2$$reg),
 9586             as_Register($src1$$reg),
 9587             (Assembler::Condition)$cmp$$cmpcode);
 9588   %}
 9589 
 9590   ins_pipe(icond_reg_reg);
 9591 %}
 9592 
 9593 // special cases where one arg is zero
 9594 
 9595 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9596   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9597 
 9598   ins_cost(INSN_COST * 2);
 9599   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
 9600 
 9601   ins_encode %{
 9602     __ csel(as_Register($dst$$reg),
 9603             zr,
 9604             as_Register($src$$reg),
 9605             (Assembler::Condition)$cmp$$cmpcode);
 9606   %}
 9607 
 9608   ins_pipe(icond_reg);
 9609 %}
 9610 
 9611 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9612   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9613 
 9614   ins_cost(INSN_COST * 2);
 9615   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
 9616 
 9617   ins_encode %{
 9618     __ csel(as_Register($dst$$reg),
 9619             zr,
 9620             as_Register($src$$reg),
 9621             (Assembler::Condition)$cmp$$cmpcode);
 9622   %}
 9623 
 9624   ins_pipe(icond_reg);
 9625 %}
 9626 
 9627 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9628   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9629 
 9630   ins_cost(INSN_COST * 2);
 9631   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
 9632 
 9633   ins_encode %{
 9634     __ csel(as_Register($dst$$reg),
 9635             as_Register($src$$reg),
 9636             zr,
 9637             (Assembler::Condition)$cmp$$cmpcode);
 9638   %}
 9639 
 9640   ins_pipe(icond_reg);
 9641 %}
 9642 
 9643 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9644   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9645 
 9646   ins_cost(INSN_COST * 2);
 9647   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
 9648 
 9649   ins_encode %{
 9650     __ csel(as_Register($dst$$reg),
 9651             as_Register($src$$reg),
 9652             zr,
 9653             (Assembler::Condition)$cmp$$cmpcode);
 9654   %}
 9655 
 9656   ins_pipe(icond_reg);
 9657 %}
 9658 
 9659 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9660   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9661 
 9662   ins_cost(INSN_COST * 2);
 9663   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9664 
 9665   ins_encode %{
 9666     __ cselw(as_Register($dst$$reg),
 9667              as_Register($src2$$reg),
 9668              as_Register($src1$$reg),
 9669              (Assembler::Condition)$cmp$$cmpcode);
 9670   %}
 9671 
 9672   ins_pipe(icond_reg_reg);
 9673 %}
 9674 
 9675 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9676   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9677 
 9678   ins_cost(INSN_COST * 2);
 9679   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9680 
 9681   ins_encode %{
 9682     __ cselw(as_Register($dst$$reg),
 9683              as_Register($src2$$reg),
 9684              as_Register($src1$$reg),
 9685              (Assembler::Condition)$cmp$$cmpcode);
 9686   %}
 9687 
 9688   ins_pipe(icond_reg_reg);
 9689 %}
 9690 
 9691 // special cases where one arg is zero
 9692 
 9693 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9694   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9695 
 9696   ins_cost(INSN_COST * 2);
 9697   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
 9698 
 9699   ins_encode %{
 9700     __ cselw(as_Register($dst$$reg),
 9701              zr,
 9702              as_Register($src$$reg),
 9703              (Assembler::Condition)$cmp$$cmpcode);
 9704   %}
 9705 
 9706   ins_pipe(icond_reg);
 9707 %}
 9708 
 9709 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9710   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9711 
 9712   ins_cost(INSN_COST * 2);
 9713   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
 9714 
 9715   ins_encode %{
 9716     __ cselw(as_Register($dst$$reg),
 9717              zr,
 9718              as_Register($src$$reg),
 9719              (Assembler::Condition)$cmp$$cmpcode);
 9720   %}
 9721 
 9722   ins_pipe(icond_reg);
 9723 %}
 9724 
 9725 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9726   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9727 
 9728   ins_cost(INSN_COST * 2);
 9729   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
 9730 
 9731   ins_encode %{
 9732     __ cselw(as_Register($dst$$reg),
 9733              as_Register($src$$reg),
 9734              zr,
 9735              (Assembler::Condition)$cmp$$cmpcode);
 9736   %}
 9737 
 9738   ins_pipe(icond_reg);
 9739 %}
 9740 
 9741 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9742   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9743 
 9744   ins_cost(INSN_COST * 2);
 9745   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
 9746 
 9747   ins_encode %{
 9748     __ cselw(as_Register($dst$$reg),
 9749              as_Register($src$$reg),
 9750              zr,
 9751              (Assembler::Condition)$cmp$$cmpcode);
 9752   %}
 9753 
 9754   ins_pipe(icond_reg);
 9755 %}
 9756 
 9757 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
 9758 %{
 9759   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9760 
 9761   ins_cost(INSN_COST * 3);
 9762 
 9763   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9764   ins_encode %{
 9765     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9766     __ fcsels(as_FloatRegister($dst$$reg),
 9767               as_FloatRegister($src2$$reg),
 9768               as_FloatRegister($src1$$reg),
 9769               cond);
 9770   %}
 9771 
 9772   ins_pipe(fp_cond_reg_reg_s);
 9773 %}
 9774 
 9775 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
 9776 %{
 9777   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9778 
 9779   ins_cost(INSN_COST * 3);
 9780 
 9781   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 9782   ins_encode %{
 9783     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9784     __ fcsels(as_FloatRegister($dst$$reg),
 9785               as_FloatRegister($src2$$reg),
 9786               as_FloatRegister($src1$$reg),
 9787               cond);
 9788   %}
 9789 
 9790   ins_pipe(fp_cond_reg_reg_s);
 9791 %}
 9792 
 9793 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
 9794 %{
 9795   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 9796 
 9797   ins_cost(INSN_COST * 3);
 9798 
 9799   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9800   ins_encode %{
 9801     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9802     __ fcseld(as_FloatRegister($dst$$reg),
 9803               as_FloatRegister($src2$$reg),
 9804               as_FloatRegister($src1$$reg),
 9805               cond);
 9806   %}
 9807 
 9808   ins_pipe(fp_cond_reg_reg_d);
 9809 %}
 9810 
 9811 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
 9812 %{
 9813   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 9814 
 9815   ins_cost(INSN_COST * 3);
 9816 
 9817   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 9818   ins_encode %{
 9819     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9820     __ fcseld(as_FloatRegister($dst$$reg),
 9821               as_FloatRegister($src2$$reg),
 9822               as_FloatRegister($src1$$reg),
 9823               cond);
 9824   %}
 9825 
 9826   ins_pipe(fp_cond_reg_reg_d);
 9827 %}
 9828 
 9829 // ============================================================================
 9830 // Arithmetic Instructions
 9831 //
 9832 
 9833 // Integer Addition
 9834 
 9835 // TODO
 9836 // these currently employ operations which do not set CR and hence are
 9837 // not flagged as killing CR but we would like to isolate the cases
 9838 // where we want to set flags from those where we don't. need to work
 9839 // out how to do that.
 9840 
 9841 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9842   match(Set dst (AddI src1 src2));
 9843 
 9844   ins_cost(INSN_COST);
 9845   format %{ "addw  $dst, $src1, $src2" %}
 9846 
 9847   ins_encode %{
 9848     __ addw(as_Register($dst$$reg),
 9849             as_Register($src1$$reg),
 9850             as_Register($src2$$reg));
 9851   %}
 9852 
 9853   ins_pipe(ialu_reg_reg);
 9854 %}
 9855 
 9856 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
 9857   match(Set dst (AddI src1 src2));
 9858 
 9859   ins_cost(INSN_COST);
 9860   format %{ "addw $dst, $src1, $src2" %}
 9861 
 9862   // use opcode to indicate that this is an add not a sub
 9863   opcode(0x0);
 9864 
 9865   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9866 
 9867   ins_pipe(ialu_reg_imm);
 9868 %}
 9869 
 9870 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
 9871   match(Set dst (AddI (ConvL2I src1) src2));
 9872 
 9873   ins_cost(INSN_COST);
 9874   format %{ "addw $dst, $src1, $src2" %}
 9875 
 9876   // use opcode to indicate that this is an add not a sub
 9877   opcode(0x0);
 9878 
 9879   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9880 
 9881   ins_pipe(ialu_reg_imm);
 9882 %}
 9883 
 9884 // Pointer Addition
 9885 instruct addP_reg_reg(iRegPNoSp dst, iRegPorL2P src1, iRegL src2) %{
 9886   match(Set dst (AddP src1 src2));
 9887 
 9888   ins_cost(INSN_COST);
 9889   format %{ "add $dst, $src1, $src2\t# ptr" %}
 9890 
 9891   ins_encode %{
 9892     __ add(as_Register($dst$$reg),
 9893            as_Register($src1$$reg),
 9894            as_Register($src2$$reg));
 9895   %}
 9896 
 9897   ins_pipe(ialu_reg_reg);
 9898 %}
 9899 
 9900 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2) %{
 9901   match(Set dst (AddP src1 (ConvI2L src2)));
 9902 
 9903   ins_cost(1.9 * INSN_COST);
 9904   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
 9905 
 9906   ins_encode %{
 9907     __ add(as_Register($dst$$reg),
 9908            as_Register($src1$$reg),
 9909            as_Register($src2$$reg), ext::sxtw);
 9910   %}
 9911 
 9912   ins_pipe(ialu_reg_reg);
 9913 %}
 9914 
 9915 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegPorL2P src1, iRegL src2, immIScale scale) %{
 9916   match(Set dst (AddP src1 (LShiftL src2 scale)));
 9917 
 9918   ins_cost(1.9 * INSN_COST);
 9919   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
 9920 
 9921   ins_encode %{
 9922     __ lea(as_Register($dst$$reg),
 9923            Address(as_Register($src1$$reg), as_Register($src2$$reg),
 9924                    Address::lsl($scale$$constant)));
 9925   %}
 9926 
 9927   ins_pipe(ialu_reg_reg_shift);
 9928 %}
 9929 
 9930 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2, immIScale scale) %{
 9931   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
 9932 
 9933   ins_cost(1.9 * INSN_COST);
 9934   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
 9935 
 9936   ins_encode %{
 9937     __ lea(as_Register($dst$$reg),
 9938            Address(as_Register($src1$$reg), as_Register($src2$$reg),
 9939                    Address::sxtw($scale$$constant)));
 9940   %}
 9941 
 9942   ins_pipe(ialu_reg_reg_shift);
 9943 %}
 9944 
 9945 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
 9946   match(Set dst (LShiftL (ConvI2L src) scale));
 9947 
 9948   ins_cost(INSN_COST);
 9949   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
 9950 
 9951   ins_encode %{
 9952     __ sbfiz(as_Register($dst$$reg),
 9953           as_Register($src$$reg),
 9954           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
 9955   %}
 9956 
 9957   ins_pipe(ialu_reg_shift);
 9958 %}
 9959 
 9960 // Pointer Immediate Addition
 9961 // n.b. this needs to be more expensive than using an indirect memory
 9962 // operand
 9963 instruct addP_reg_imm(iRegPNoSp dst, iRegPorL2P src1, immLAddSub src2) %{
 9964   match(Set dst (AddP src1 src2));
 9965 
 9966   ins_cost(INSN_COST);
 9967   format %{ "add $dst, $src1, $src2\t# ptr" %}
 9968 
 9969   // use opcode to indicate that this is an add not a sub
 9970   opcode(0x0);
 9971 
 9972   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
 9973 
 9974   ins_pipe(ialu_reg_imm);
 9975 %}
 9976 
 9977 // Long Addition
 9978 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9979 
 9980   match(Set dst (AddL src1 src2));
 9981 
 9982   ins_cost(INSN_COST);
 9983   format %{ "add  $dst, $src1, $src2" %}
 9984 
 9985   ins_encode %{
 9986     __ add(as_Register($dst$$reg),
 9987            as_Register($src1$$reg),
 9988            as_Register($src2$$reg));
 9989   %}
 9990 
 9991   ins_pipe(ialu_reg_reg);
 9992 %}
 9993 
 9994 // No constant pool entries requiredLong Immediate Addition.
 9995 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
 9996   match(Set dst (AddL src1 src2));
 9997 
 9998   ins_cost(INSN_COST);
 9999   format %{ "add $dst, $src1, $src2" %}
10000 
10001   // use opcode to indicate that this is an add not a sub
10002   opcode(0x0);
10003 
10004   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10005 
10006   ins_pipe(ialu_reg_imm);
10007 %}
10008 
10009 // Integer Subtraction
10010 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10011   match(Set dst (SubI src1 src2));
10012 
10013   ins_cost(INSN_COST);
10014   format %{ "subw  $dst, $src1, $src2" %}
10015 
10016   ins_encode %{
10017     __ subw(as_Register($dst$$reg),
10018             as_Register($src1$$reg),
10019             as_Register($src2$$reg));
10020   %}
10021 
10022   ins_pipe(ialu_reg_reg);
10023 %}
10024 
10025 // Immediate Subtraction
10026 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10027   match(Set dst (SubI src1 src2));
10028 
10029   ins_cost(INSN_COST);
10030   format %{ "subw $dst, $src1, $src2" %}
10031 
10032   // use opcode to indicate that this is a sub not an add
10033   opcode(0x1);
10034 
10035   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10036 
10037   ins_pipe(ialu_reg_imm);
10038 %}
10039 
10040 // Long Subtraction
10041 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10042 
10043   match(Set dst (SubL src1 src2));
10044 
10045   ins_cost(INSN_COST);
10046   format %{ "sub  $dst, $src1, $src2" %}
10047 
10048   ins_encode %{
10049     __ sub(as_Register($dst$$reg),
10050            as_Register($src1$$reg),
10051            as_Register($src2$$reg));
10052   %}
10053 
10054   ins_pipe(ialu_reg_reg);
10055 %}
10056 
10057 // No constant pool entries requiredLong Immediate Subtraction.
10058 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10059   match(Set dst (SubL src1 src2));
10060 
10061   ins_cost(INSN_COST);
10062   format %{ "sub$dst, $src1, $src2" %}
10063 
10064   // use opcode to indicate that this is a sub not an add
10065   opcode(0x1);
10066 
10067   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10068 
10069   ins_pipe(ialu_reg_imm);
10070 %}
10071 
10072 // Integer Negation (special case for sub)
10073 
10074 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10075   match(Set dst (SubI zero src));
10076 
10077   ins_cost(INSN_COST);
10078   format %{ "negw $dst, $src\t# int" %}
10079 
10080   ins_encode %{
10081     __ negw(as_Register($dst$$reg),
10082             as_Register($src$$reg));
10083   %}
10084 
10085   ins_pipe(ialu_reg);
10086 %}
10087 
10088 // Long Negation
10089 
10090 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10091   match(Set dst (SubL zero src));
10092 
10093   ins_cost(INSN_COST);
10094   format %{ "neg $dst, $src\t# long" %}
10095 
10096   ins_encode %{
10097     __ neg(as_Register($dst$$reg),
10098            as_Register($src$$reg));
10099   %}
10100 
10101   ins_pipe(ialu_reg);
10102 %}
10103 
10104 // Integer Multiply
10105 
10106 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10107   match(Set dst (MulI src1 src2));
10108 
10109   ins_cost(INSN_COST * 3);
10110   format %{ "mulw  $dst, $src1, $src2" %}
10111 
10112   ins_encode %{
10113     __ mulw(as_Register($dst$$reg),
10114             as_Register($src1$$reg),
10115             as_Register($src2$$reg));
10116   %}
10117 
10118   ins_pipe(imul_reg_reg);
10119 %}
10120 
10121 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10122   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10123 
10124   ins_cost(INSN_COST * 3);
10125   format %{ "smull  $dst, $src1, $src2" %}
10126 
10127   ins_encode %{
10128     __ smull(as_Register($dst$$reg),
10129              as_Register($src1$$reg),
10130              as_Register($src2$$reg));
10131   %}
10132 
10133   ins_pipe(imul_reg_reg);
10134 %}
10135 
10136 // Long Multiply
10137 
10138 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10139   match(Set dst (MulL src1 src2));
10140 
10141   ins_cost(INSN_COST * 5);
10142   format %{ "mul  $dst, $src1, $src2" %}
10143 
10144   ins_encode %{
10145     __ mul(as_Register($dst$$reg),
10146            as_Register($src1$$reg),
10147            as_Register($src2$$reg));
10148   %}
10149 
10150   ins_pipe(lmul_reg_reg);
10151 %}
10152 
10153 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10154 %{
10155   match(Set dst (MulHiL src1 src2));
10156 
10157   ins_cost(INSN_COST * 7);
10158   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
10159 
10160   ins_encode %{
10161     __ smulh(as_Register($dst$$reg),
10162              as_Register($src1$$reg),
10163              as_Register($src2$$reg));
10164   %}
10165 
10166   ins_pipe(lmul_reg_reg);
10167 %}
10168 
10169 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10170 %{
10171   match(Set dst (UMulHiL src1 src2));
10172 
10173   ins_cost(INSN_COST * 7);
10174   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
10175 
10176   ins_encode %{
10177     __ umulh(as_Register($dst$$reg),
10178              as_Register($src1$$reg),
10179              as_Register($src2$$reg));
10180   %}
10181 
10182   ins_pipe(lmul_reg_reg);
10183 %}
10184 
10185 // Combined Integer Multiply & Add/Sub
10186 
10187 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10188   match(Set dst (AddI src3 (MulI src1 src2)));
10189 
10190   ins_cost(INSN_COST * 3);
10191   format %{ "madd  $dst, $src1, $src2, $src3" %}
10192 
10193   ins_encode %{
10194     __ maddw(as_Register($dst$$reg),
10195              as_Register($src1$$reg),
10196              as_Register($src2$$reg),
10197              as_Register($src3$$reg));
10198   %}
10199 
10200   ins_pipe(imac_reg_reg);
10201 %}
10202 
10203 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10204   match(Set dst (SubI src3 (MulI src1 src2)));
10205 
10206   ins_cost(INSN_COST * 3);
10207   format %{ "msub  $dst, $src1, $src2, $src3" %}
10208 
10209   ins_encode %{
10210     __ msubw(as_Register($dst$$reg),
10211              as_Register($src1$$reg),
10212              as_Register($src2$$reg),
10213              as_Register($src3$$reg));
10214   %}
10215 
10216   ins_pipe(imac_reg_reg);
10217 %}
10218 
10219 // Combined Integer Multiply & Neg
10220 
10221 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10222   match(Set dst (MulI (SubI zero src1) src2));
10223 
10224   ins_cost(INSN_COST * 3);
10225   format %{ "mneg  $dst, $src1, $src2" %}
10226 
10227   ins_encode %{
10228     __ mnegw(as_Register($dst$$reg),
10229              as_Register($src1$$reg),
10230              as_Register($src2$$reg));
10231   %}
10232 
10233   ins_pipe(imac_reg_reg);
10234 %}
10235 
10236 // Combined Long Multiply & Add/Sub
10237 
10238 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10239   match(Set dst (AddL src3 (MulL src1 src2)));
10240 
10241   ins_cost(INSN_COST * 5);
10242   format %{ "madd  $dst, $src1, $src2, $src3" %}
10243 
10244   ins_encode %{
10245     __ madd(as_Register($dst$$reg),
10246             as_Register($src1$$reg),
10247             as_Register($src2$$reg),
10248             as_Register($src3$$reg));
10249   %}
10250 
10251   ins_pipe(lmac_reg_reg);
10252 %}
10253 
10254 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10255   match(Set dst (SubL src3 (MulL src1 src2)));
10256 
10257   ins_cost(INSN_COST * 5);
10258   format %{ "msub  $dst, $src1, $src2, $src3" %}
10259 
10260   ins_encode %{
10261     __ msub(as_Register($dst$$reg),
10262             as_Register($src1$$reg),
10263             as_Register($src2$$reg),
10264             as_Register($src3$$reg));
10265   %}
10266 
10267   ins_pipe(lmac_reg_reg);
10268 %}
10269 
10270 // Combined Long Multiply & Neg
10271 
10272 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10273   match(Set dst (MulL (SubL zero src1) src2));
10274 
10275   ins_cost(INSN_COST * 5);
10276   format %{ "mneg  $dst, $src1, $src2" %}
10277 
10278   ins_encode %{
10279     __ mneg(as_Register($dst$$reg),
10280             as_Register($src1$$reg),
10281             as_Register($src2$$reg));
10282   %}
10283 
10284   ins_pipe(lmac_reg_reg);
10285 %}
10286 
10287 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10288 
10289 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10290   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10291 
10292   ins_cost(INSN_COST * 3);
10293   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10294 
10295   ins_encode %{
10296     __ smaddl(as_Register($dst$$reg),
10297               as_Register($src1$$reg),
10298               as_Register($src2$$reg),
10299               as_Register($src3$$reg));
10300   %}
10301 
10302   ins_pipe(imac_reg_reg);
10303 %}
10304 
10305 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10306   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10307 
10308   ins_cost(INSN_COST * 3);
10309   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10310 
10311   ins_encode %{
10312     __ smsubl(as_Register($dst$$reg),
10313               as_Register($src1$$reg),
10314               as_Register($src2$$reg),
10315               as_Register($src3$$reg));
10316   %}
10317 
10318   ins_pipe(imac_reg_reg);
10319 %}
10320 
10321 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10322   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10323 
10324   ins_cost(INSN_COST * 3);
10325   format %{ "smnegl  $dst, $src1, $src2" %}
10326 
10327   ins_encode %{
10328     __ smnegl(as_Register($dst$$reg),
10329               as_Register($src1$$reg),
10330               as_Register($src2$$reg));
10331   %}
10332 
10333   ins_pipe(imac_reg_reg);
10334 %}
10335 
10336 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10337 
10338 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10339   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10340 
10341   ins_cost(INSN_COST * 5);
10342   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10343             "maddw $dst, $src3, $src4, rscratch1" %}
10344 
10345   ins_encode %{
10346     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10347     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10348 
10349   ins_pipe(imac_reg_reg);
10350 %}
10351 
10352 // Integer Divide
10353 
10354 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10355   match(Set dst (DivI src1 src2));
10356 
10357   ins_cost(INSN_COST * 19);
10358   format %{ "sdivw  $dst, $src1, $src2" %}
10359 
10360   ins_encode(aarch64_enc_divw(dst, src1, src2));
10361   ins_pipe(idiv_reg_reg);
10362 %}
10363 
10364 // Long Divide
10365 
10366 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10367   match(Set dst (DivL src1 src2));
10368 
10369   ins_cost(INSN_COST * 35);
10370   format %{ "sdiv   $dst, $src1, $src2" %}
10371 
10372   ins_encode(aarch64_enc_div(dst, src1, src2));
10373   ins_pipe(ldiv_reg_reg);
10374 %}
10375 
10376 // Integer Remainder
10377 
10378 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10379   match(Set dst (ModI src1 src2));
10380 
10381   ins_cost(INSN_COST * 22);
10382   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10383             "msubw  $dst, rscratch1, $src2, $src1" %}
10384 
10385   ins_encode(aarch64_enc_modw(dst, src1, src2));
10386   ins_pipe(idiv_reg_reg);
10387 %}
10388 
10389 // Long Remainder
10390 
10391 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10392   match(Set dst (ModL src1 src2));
10393 
10394   ins_cost(INSN_COST * 38);
10395   format %{ "sdiv   rscratch1, $src1, $src2\n"
10396             "msub   $dst, rscratch1, $src2, $src1" %}
10397 
10398   ins_encode(aarch64_enc_mod(dst, src1, src2));
10399   ins_pipe(ldiv_reg_reg);
10400 %}
10401 
10402 // Unsigned Integer Divide
10403 
10404 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10405   match(Set dst (UDivI src1 src2));
10406 
10407   ins_cost(INSN_COST * 19);
10408   format %{ "udivw  $dst, $src1, $src2" %}
10409 
10410   ins_encode %{
10411     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
10412   %}
10413 
10414   ins_pipe(idiv_reg_reg);
10415 %}
10416 
10417 //  Unsigned Long Divide
10418 
10419 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10420   match(Set dst (UDivL src1 src2));
10421 
10422   ins_cost(INSN_COST * 35);
10423   format %{ "udiv   $dst, $src1, $src2" %}
10424 
10425   ins_encode %{
10426     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
10427   %}
10428 
10429   ins_pipe(ldiv_reg_reg);
10430 %}
10431 
10432 // Unsigned Integer Remainder
10433 
10434 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10435   match(Set dst (UModI src1 src2));
10436 
10437   ins_cost(INSN_COST * 22);
10438   format %{ "udivw  rscratch1, $src1, $src2\n\t"
10439             "msubw  $dst, rscratch1, $src2, $src1" %}
10440 
10441   ins_encode %{
10442     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
10443     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10444   %}
10445 
10446   ins_pipe(idiv_reg_reg);
10447 %}
10448 
10449 // Unsigned Long Remainder
10450 
10451 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10452   match(Set dst (UModL src1 src2));
10453 
10454   ins_cost(INSN_COST * 38);
10455   format %{ "udiv   rscratch1, $src1, $src2\n"
10456             "msub   $dst, rscratch1, $src2, $src1" %}
10457 
10458   ins_encode %{
10459     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
10460     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10461   %}
10462 
10463   ins_pipe(ldiv_reg_reg);
10464 %}
10465 
10466 // Integer Shifts
10467 
10468 // Shift Left Register
10469 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10470   match(Set dst (LShiftI src1 src2));
10471 
10472   ins_cost(INSN_COST * 2);
10473   format %{ "lslvw  $dst, $src1, $src2" %}
10474 
10475   ins_encode %{
10476     __ lslvw(as_Register($dst$$reg),
10477              as_Register($src1$$reg),
10478              as_Register($src2$$reg));
10479   %}
10480 
10481   ins_pipe(ialu_reg_reg_vshift);
10482 %}
10483 
10484 // Shift Left Immediate
10485 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10486   match(Set dst (LShiftI src1 src2));
10487 
10488   ins_cost(INSN_COST);
10489   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10490 
10491   ins_encode %{
10492     __ lslw(as_Register($dst$$reg),
10493             as_Register($src1$$reg),
10494             $src2$$constant & 0x1f);
10495   %}
10496 
10497   ins_pipe(ialu_reg_shift);
10498 %}
10499 
10500 // Shift Right Logical Register
10501 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10502   match(Set dst (URShiftI src1 src2));
10503 
10504   ins_cost(INSN_COST * 2);
10505   format %{ "lsrvw  $dst, $src1, $src2" %}
10506 
10507   ins_encode %{
10508     __ lsrvw(as_Register($dst$$reg),
10509              as_Register($src1$$reg),
10510              as_Register($src2$$reg));
10511   %}
10512 
10513   ins_pipe(ialu_reg_reg_vshift);
10514 %}
10515 
10516 // Shift Right Logical Immediate
10517 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10518   match(Set dst (URShiftI src1 src2));
10519 
10520   ins_cost(INSN_COST);
10521   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10522 
10523   ins_encode %{
10524     __ lsrw(as_Register($dst$$reg),
10525             as_Register($src1$$reg),
10526             $src2$$constant & 0x1f);
10527   %}
10528 
10529   ins_pipe(ialu_reg_shift);
10530 %}
10531 
10532 // Shift Right Arithmetic Register
10533 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10534   match(Set dst (RShiftI src1 src2));
10535 
10536   ins_cost(INSN_COST * 2);
10537   format %{ "asrvw  $dst, $src1, $src2" %}
10538 
10539   ins_encode %{
10540     __ asrvw(as_Register($dst$$reg),
10541              as_Register($src1$$reg),
10542              as_Register($src2$$reg));
10543   %}
10544 
10545   ins_pipe(ialu_reg_reg_vshift);
10546 %}
10547 
10548 // Shift Right Arithmetic Immediate
10549 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10550   match(Set dst (RShiftI src1 src2));
10551 
10552   ins_cost(INSN_COST);
10553   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10554 
10555   ins_encode %{
10556     __ asrw(as_Register($dst$$reg),
10557             as_Register($src1$$reg),
10558             $src2$$constant & 0x1f);
10559   %}
10560 
10561   ins_pipe(ialu_reg_shift);
10562 %}
10563 
10564 // Combined Int Mask and Right Shift (using UBFM)
10565 // TODO
10566 
10567 // Long Shifts
10568 
10569 // Shift Left Register
10570 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10571   match(Set dst (LShiftL src1 src2));
10572 
10573   ins_cost(INSN_COST * 2);
10574   format %{ "lslv  $dst, $src1, $src2" %}
10575 
10576   ins_encode %{
10577     __ lslv(as_Register($dst$$reg),
10578             as_Register($src1$$reg),
10579             as_Register($src2$$reg));
10580   %}
10581 
10582   ins_pipe(ialu_reg_reg_vshift);
10583 %}
10584 
10585 // Shift Left Immediate
10586 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10587   match(Set dst (LShiftL src1 src2));
10588 
10589   ins_cost(INSN_COST);
10590   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10591 
10592   ins_encode %{
10593     __ lsl(as_Register($dst$$reg),
10594             as_Register($src1$$reg),
10595             $src2$$constant & 0x3f);
10596   %}
10597 
10598   ins_pipe(ialu_reg_shift);
10599 %}
10600 
10601 // Shift Right Logical Register
10602 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10603   match(Set dst (URShiftL src1 src2));
10604 
10605   ins_cost(INSN_COST * 2);
10606   format %{ "lsrv  $dst, $src1, $src2" %}
10607 
10608   ins_encode %{
10609     __ lsrv(as_Register($dst$$reg),
10610             as_Register($src1$$reg),
10611             as_Register($src2$$reg));
10612   %}
10613 
10614   ins_pipe(ialu_reg_reg_vshift);
10615 %}
10616 
10617 // Shift Right Logical Immediate
10618 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10619   match(Set dst (URShiftL src1 src2));
10620 
10621   ins_cost(INSN_COST);
10622   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10623 
10624   ins_encode %{
10625     __ lsr(as_Register($dst$$reg),
10626            as_Register($src1$$reg),
10627            $src2$$constant & 0x3f);
10628   %}
10629 
10630   ins_pipe(ialu_reg_shift);
10631 %}
10632 
10633 // A special-case pattern for card table stores.
10634 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10635   match(Set dst (URShiftL (CastP2X src1) src2));
10636 
10637   ins_cost(INSN_COST);
10638   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10639 
10640   ins_encode %{
10641     __ lsr(as_Register($dst$$reg),
10642            as_Register($src1$$reg),
10643            $src2$$constant & 0x3f);
10644   %}
10645 
10646   ins_pipe(ialu_reg_shift);
10647 %}
10648 
10649 // Shift Right Arithmetic Register
10650 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10651   match(Set dst (RShiftL src1 src2));
10652 
10653   ins_cost(INSN_COST * 2);
10654   format %{ "asrv  $dst, $src1, $src2" %}
10655 
10656   ins_encode %{
10657     __ asrv(as_Register($dst$$reg),
10658             as_Register($src1$$reg),
10659             as_Register($src2$$reg));
10660   %}
10661 
10662   ins_pipe(ialu_reg_reg_vshift);
10663 %}
10664 
10665 // Shift Right Arithmetic Immediate
10666 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10667   match(Set dst (RShiftL src1 src2));
10668 
10669   ins_cost(INSN_COST);
10670   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10671 
10672   ins_encode %{
10673     __ asr(as_Register($dst$$reg),
10674            as_Register($src1$$reg),
10675            $src2$$constant & 0x3f);
10676   %}
10677 
10678   ins_pipe(ialu_reg_shift);
10679 %}
10680 
10681 // BEGIN This section of the file is automatically generated. Do not edit --------------
10682 // This section is generated from aarch64_ad.m4
10683 
10684 // This pattern is automatically generated from aarch64_ad.m4
10685 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10686 instruct regL_not_reg(iRegLNoSp dst,
10687                          iRegL src1, immL_M1 m1,
10688                          rFlagsReg cr) %{
10689   match(Set dst (XorL src1 m1));
10690   ins_cost(INSN_COST);
10691   format %{ "eon  $dst, $src1, zr" %}
10692 
10693   ins_encode %{
10694     __ eon(as_Register($dst$$reg),
10695               as_Register($src1$$reg),
10696               zr,
10697               Assembler::LSL, 0);
10698   %}
10699 
10700   ins_pipe(ialu_reg);
10701 %}
10702 
10703 // This pattern is automatically generated from aarch64_ad.m4
10704 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10705 instruct regI_not_reg(iRegINoSp dst,
10706                          iRegIorL2I src1, immI_M1 m1,
10707                          rFlagsReg cr) %{
10708   match(Set dst (XorI src1 m1));
10709   ins_cost(INSN_COST);
10710   format %{ "eonw  $dst, $src1, zr" %}
10711 
10712   ins_encode %{
10713     __ eonw(as_Register($dst$$reg),
10714               as_Register($src1$$reg),
10715               zr,
10716               Assembler::LSL, 0);
10717   %}
10718 
10719   ins_pipe(ialu_reg);
10720 %}
10721 
10722 // This pattern is automatically generated from aarch64_ad.m4
10723 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10724 instruct NegI_reg_URShift_reg(iRegINoSp dst,
10725                               immI0 zero, iRegIorL2I src1, immI src2) %{
10726   match(Set dst (SubI zero (URShiftI src1 src2)));
10727 
10728   ins_cost(1.9 * INSN_COST);
10729   format %{ "negw  $dst, $src1, LSR $src2" %}
10730 
10731   ins_encode %{
10732     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10733             Assembler::LSR, $src2$$constant & 0x1f);
10734   %}
10735 
10736   ins_pipe(ialu_reg_shift);
10737 %}
10738 
10739 // This pattern is automatically generated from aarch64_ad.m4
10740 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10741 instruct NegI_reg_RShift_reg(iRegINoSp dst,
10742                               immI0 zero, iRegIorL2I src1, immI src2) %{
10743   match(Set dst (SubI zero (RShiftI src1 src2)));
10744 
10745   ins_cost(1.9 * INSN_COST);
10746   format %{ "negw  $dst, $src1, ASR $src2" %}
10747 
10748   ins_encode %{
10749     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10750             Assembler::ASR, $src2$$constant & 0x1f);
10751   %}
10752 
10753   ins_pipe(ialu_reg_shift);
10754 %}
10755 
10756 // This pattern is automatically generated from aarch64_ad.m4
10757 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10758 instruct NegI_reg_LShift_reg(iRegINoSp dst,
10759                               immI0 zero, iRegIorL2I src1, immI src2) %{
10760   match(Set dst (SubI zero (LShiftI src1 src2)));
10761 
10762   ins_cost(1.9 * INSN_COST);
10763   format %{ "negw  $dst, $src1, LSL $src2" %}
10764 
10765   ins_encode %{
10766     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10767             Assembler::LSL, $src2$$constant & 0x1f);
10768   %}
10769 
10770   ins_pipe(ialu_reg_shift);
10771 %}
10772 
10773 // This pattern is automatically generated from aarch64_ad.m4
10774 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10775 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
10776                               immL0 zero, iRegL src1, immI src2) %{
10777   match(Set dst (SubL zero (URShiftL src1 src2)));
10778 
10779   ins_cost(1.9 * INSN_COST);
10780   format %{ "neg  $dst, $src1, LSR $src2" %}
10781 
10782   ins_encode %{
10783     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10784             Assembler::LSR, $src2$$constant & 0x3f);
10785   %}
10786 
10787   ins_pipe(ialu_reg_shift);
10788 %}
10789 
10790 // This pattern is automatically generated from aarch64_ad.m4
10791 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10792 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
10793                               immL0 zero, iRegL src1, immI src2) %{
10794   match(Set dst (SubL zero (RShiftL src1 src2)));
10795 
10796   ins_cost(1.9 * INSN_COST);
10797   format %{ "neg  $dst, $src1, ASR $src2" %}
10798 
10799   ins_encode %{
10800     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10801             Assembler::ASR, $src2$$constant & 0x3f);
10802   %}
10803 
10804   ins_pipe(ialu_reg_shift);
10805 %}
10806 
10807 // This pattern is automatically generated from aarch64_ad.m4
10808 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10809 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
10810                               immL0 zero, iRegL src1, immI src2) %{
10811   match(Set dst (SubL zero (LShiftL src1 src2)));
10812 
10813   ins_cost(1.9 * INSN_COST);
10814   format %{ "neg  $dst, $src1, LSL $src2" %}
10815 
10816   ins_encode %{
10817     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10818             Assembler::LSL, $src2$$constant & 0x3f);
10819   %}
10820 
10821   ins_pipe(ialu_reg_shift);
10822 %}
10823 
10824 // This pattern is automatically generated from aarch64_ad.m4
10825 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10826 instruct AndI_reg_not_reg(iRegINoSp dst,
10827                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10828   match(Set dst (AndI src1 (XorI src2 m1)));
10829   ins_cost(INSN_COST);
10830   format %{ "bicw  $dst, $src1, $src2" %}
10831 
10832   ins_encode %{
10833     __ bicw(as_Register($dst$$reg),
10834               as_Register($src1$$reg),
10835               as_Register($src2$$reg),
10836               Assembler::LSL, 0);
10837   %}
10838 
10839   ins_pipe(ialu_reg_reg);
10840 %}
10841 
10842 // This pattern is automatically generated from aarch64_ad.m4
10843 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10844 instruct AndL_reg_not_reg(iRegLNoSp dst,
10845                          iRegL src1, iRegL src2, immL_M1 m1) %{
10846   match(Set dst (AndL src1 (XorL src2 m1)));
10847   ins_cost(INSN_COST);
10848   format %{ "bic  $dst, $src1, $src2" %}
10849 
10850   ins_encode %{
10851     __ bic(as_Register($dst$$reg),
10852               as_Register($src1$$reg),
10853               as_Register($src2$$reg),
10854               Assembler::LSL, 0);
10855   %}
10856 
10857   ins_pipe(ialu_reg_reg);
10858 %}
10859 
10860 // This pattern is automatically generated from aarch64_ad.m4
10861 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10862 instruct OrI_reg_not_reg(iRegINoSp dst,
10863                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10864   match(Set dst (OrI src1 (XorI src2 m1)));
10865   ins_cost(INSN_COST);
10866   format %{ "ornw  $dst, $src1, $src2" %}
10867 
10868   ins_encode %{
10869     __ ornw(as_Register($dst$$reg),
10870               as_Register($src1$$reg),
10871               as_Register($src2$$reg),
10872               Assembler::LSL, 0);
10873   %}
10874 
10875   ins_pipe(ialu_reg_reg);
10876 %}
10877 
10878 // This pattern is automatically generated from aarch64_ad.m4
10879 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10880 instruct OrL_reg_not_reg(iRegLNoSp dst,
10881                          iRegL src1, iRegL src2, immL_M1 m1) %{
10882   match(Set dst (OrL src1 (XorL src2 m1)));
10883   ins_cost(INSN_COST);
10884   format %{ "orn  $dst, $src1, $src2" %}
10885 
10886   ins_encode %{
10887     __ orn(as_Register($dst$$reg),
10888               as_Register($src1$$reg),
10889               as_Register($src2$$reg),
10890               Assembler::LSL, 0);
10891   %}
10892 
10893   ins_pipe(ialu_reg_reg);
10894 %}
10895 
10896 // This pattern is automatically generated from aarch64_ad.m4
10897 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10898 instruct XorI_reg_not_reg(iRegINoSp dst,
10899                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10900   match(Set dst (XorI m1 (XorI src2 src1)));
10901   ins_cost(INSN_COST);
10902   format %{ "eonw  $dst, $src1, $src2" %}
10903 
10904   ins_encode %{
10905     __ eonw(as_Register($dst$$reg),
10906               as_Register($src1$$reg),
10907               as_Register($src2$$reg),
10908               Assembler::LSL, 0);
10909   %}
10910 
10911   ins_pipe(ialu_reg_reg);
10912 %}
10913 
10914 // This pattern is automatically generated from aarch64_ad.m4
10915 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10916 instruct XorL_reg_not_reg(iRegLNoSp dst,
10917                          iRegL src1, iRegL src2, immL_M1 m1) %{
10918   match(Set dst (XorL m1 (XorL src2 src1)));
10919   ins_cost(INSN_COST);
10920   format %{ "eon  $dst, $src1, $src2" %}
10921 
10922   ins_encode %{
10923     __ eon(as_Register($dst$$reg),
10924               as_Register($src1$$reg),
10925               as_Register($src2$$reg),
10926               Assembler::LSL, 0);
10927   %}
10928 
10929   ins_pipe(ialu_reg_reg);
10930 %}
10931 
10932 // This pattern is automatically generated from aarch64_ad.m4
10933 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10934 // val & (-1 ^ (val >>> shift)) ==> bicw
10935 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10936                          iRegIorL2I src1, iRegIorL2I src2,
10937                          immI src3, immI_M1 src4) %{
10938   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10939   ins_cost(1.9 * INSN_COST);
10940   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10941 
10942   ins_encode %{
10943     __ bicw(as_Register($dst$$reg),
10944               as_Register($src1$$reg),
10945               as_Register($src2$$reg),
10946               Assembler::LSR,
10947               $src3$$constant & 0x1f);
10948   %}
10949 
10950   ins_pipe(ialu_reg_reg_shift);
10951 %}
10952 
10953 // This pattern is automatically generated from aarch64_ad.m4
10954 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10955 // val & (-1 ^ (val >>> shift)) ==> bic
10956 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10957                          iRegL src1, iRegL src2,
10958                          immI src3, immL_M1 src4) %{
10959   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10960   ins_cost(1.9 * INSN_COST);
10961   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10962 
10963   ins_encode %{
10964     __ bic(as_Register($dst$$reg),
10965               as_Register($src1$$reg),
10966               as_Register($src2$$reg),
10967               Assembler::LSR,
10968               $src3$$constant & 0x3f);
10969   %}
10970 
10971   ins_pipe(ialu_reg_reg_shift);
10972 %}
10973 
10974 // This pattern is automatically generated from aarch64_ad.m4
10975 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10976 // val & (-1 ^ (val >> shift)) ==> bicw
10977 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10978                          iRegIorL2I src1, iRegIorL2I src2,
10979                          immI src3, immI_M1 src4) %{
10980   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10981   ins_cost(1.9 * INSN_COST);
10982   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10983 
10984   ins_encode %{
10985     __ bicw(as_Register($dst$$reg),
10986               as_Register($src1$$reg),
10987               as_Register($src2$$reg),
10988               Assembler::ASR,
10989               $src3$$constant & 0x1f);
10990   %}
10991 
10992   ins_pipe(ialu_reg_reg_shift);
10993 %}
10994 
10995 // This pattern is automatically generated from aarch64_ad.m4
10996 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10997 // val & (-1 ^ (val >> shift)) ==> bic
10998 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10999                          iRegL src1, iRegL src2,
11000                          immI src3, immL_M1 src4) %{
11001   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11002   ins_cost(1.9 * INSN_COST);
11003   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11004 
11005   ins_encode %{
11006     __ bic(as_Register($dst$$reg),
11007               as_Register($src1$$reg),
11008               as_Register($src2$$reg),
11009               Assembler::ASR,
11010               $src3$$constant & 0x3f);
11011   %}
11012 
11013   ins_pipe(ialu_reg_reg_shift);
11014 %}
11015 
11016 // This pattern is automatically generated from aarch64_ad.m4
11017 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11018 // val & (-1 ^ (val ror shift)) ==> bicw
11019 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11020                          iRegIorL2I src1, iRegIorL2I src2,
11021                          immI src3, immI_M1 src4) %{
11022   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11023   ins_cost(1.9 * INSN_COST);
11024   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11025 
11026   ins_encode %{
11027     __ bicw(as_Register($dst$$reg),
11028               as_Register($src1$$reg),
11029               as_Register($src2$$reg),
11030               Assembler::ROR,
11031               $src3$$constant & 0x1f);
11032   %}
11033 
11034   ins_pipe(ialu_reg_reg_shift);
11035 %}
11036 
11037 // This pattern is automatically generated from aarch64_ad.m4
11038 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11039 // val & (-1 ^ (val ror shift)) ==> bic
11040 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11041                          iRegL src1, iRegL src2,
11042                          immI src3, immL_M1 src4) %{
11043   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11044   ins_cost(1.9 * INSN_COST);
11045   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11046 
11047   ins_encode %{
11048     __ bic(as_Register($dst$$reg),
11049               as_Register($src1$$reg),
11050               as_Register($src2$$reg),
11051               Assembler::ROR,
11052               $src3$$constant & 0x3f);
11053   %}
11054 
11055   ins_pipe(ialu_reg_reg_shift);
11056 %}
11057 
11058 // This pattern is automatically generated from aarch64_ad.m4
11059 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11060 // val & (-1 ^ (val << shift)) ==> bicw
11061 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11062                          iRegIorL2I src1, iRegIorL2I src2,
11063                          immI src3, immI_M1 src4) %{
11064   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11065   ins_cost(1.9 * INSN_COST);
11066   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11067 
11068   ins_encode %{
11069     __ bicw(as_Register($dst$$reg),
11070               as_Register($src1$$reg),
11071               as_Register($src2$$reg),
11072               Assembler::LSL,
11073               $src3$$constant & 0x1f);
11074   %}
11075 
11076   ins_pipe(ialu_reg_reg_shift);
11077 %}
11078 
11079 // This pattern is automatically generated from aarch64_ad.m4
11080 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11081 // val & (-1 ^ (val << shift)) ==> bic
11082 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11083                          iRegL src1, iRegL src2,
11084                          immI src3, immL_M1 src4) %{
11085   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11086   ins_cost(1.9 * INSN_COST);
11087   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11088 
11089   ins_encode %{
11090     __ bic(as_Register($dst$$reg),
11091               as_Register($src1$$reg),
11092               as_Register($src2$$reg),
11093               Assembler::LSL,
11094               $src3$$constant & 0x3f);
11095   %}
11096 
11097   ins_pipe(ialu_reg_reg_shift);
11098 %}
11099 
11100 // This pattern is automatically generated from aarch64_ad.m4
11101 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11102 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11103 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11104                          iRegIorL2I src1, iRegIorL2I src2,
11105                          immI src3, immI_M1 src4) %{
11106   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11107   ins_cost(1.9 * INSN_COST);
11108   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11109 
11110   ins_encode %{
11111     __ eonw(as_Register($dst$$reg),
11112               as_Register($src1$$reg),
11113               as_Register($src2$$reg),
11114               Assembler::LSR,
11115               $src3$$constant & 0x1f);
11116   %}
11117 
11118   ins_pipe(ialu_reg_reg_shift);
11119 %}
11120 
11121 // This pattern is automatically generated from aarch64_ad.m4
11122 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11123 // val ^ (-1 ^ (val >>> shift)) ==> eon
11124 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11125                          iRegL src1, iRegL src2,
11126                          immI src3, immL_M1 src4) %{
11127   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11128   ins_cost(1.9 * INSN_COST);
11129   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11130 
11131   ins_encode %{
11132     __ eon(as_Register($dst$$reg),
11133               as_Register($src1$$reg),
11134               as_Register($src2$$reg),
11135               Assembler::LSR,
11136               $src3$$constant & 0x3f);
11137   %}
11138 
11139   ins_pipe(ialu_reg_reg_shift);
11140 %}
11141 
11142 // This pattern is automatically generated from aarch64_ad.m4
11143 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11144 // val ^ (-1 ^ (val >> shift)) ==> eonw
11145 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11146                          iRegIorL2I src1, iRegIorL2I src2,
11147                          immI src3, immI_M1 src4) %{
11148   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11149   ins_cost(1.9 * INSN_COST);
11150   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11151 
11152   ins_encode %{
11153     __ eonw(as_Register($dst$$reg),
11154               as_Register($src1$$reg),
11155               as_Register($src2$$reg),
11156               Assembler::ASR,
11157               $src3$$constant & 0x1f);
11158   %}
11159 
11160   ins_pipe(ialu_reg_reg_shift);
11161 %}
11162 
11163 // This pattern is automatically generated from aarch64_ad.m4
11164 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11165 // val ^ (-1 ^ (val >> shift)) ==> eon
11166 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11167                          iRegL src1, iRegL src2,
11168                          immI src3, immL_M1 src4) %{
11169   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11170   ins_cost(1.9 * INSN_COST);
11171   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11172 
11173   ins_encode %{
11174     __ eon(as_Register($dst$$reg),
11175               as_Register($src1$$reg),
11176               as_Register($src2$$reg),
11177               Assembler::ASR,
11178               $src3$$constant & 0x3f);
11179   %}
11180 
11181   ins_pipe(ialu_reg_reg_shift);
11182 %}
11183 
11184 // This pattern is automatically generated from aarch64_ad.m4
11185 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11186 // val ^ (-1 ^ (val ror shift)) ==> eonw
11187 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11188                          iRegIorL2I src1, iRegIorL2I src2,
11189                          immI src3, immI_M1 src4) %{
11190   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11191   ins_cost(1.9 * INSN_COST);
11192   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11193 
11194   ins_encode %{
11195     __ eonw(as_Register($dst$$reg),
11196               as_Register($src1$$reg),
11197               as_Register($src2$$reg),
11198               Assembler::ROR,
11199               $src3$$constant & 0x1f);
11200   %}
11201 
11202   ins_pipe(ialu_reg_reg_shift);
11203 %}
11204 
11205 // This pattern is automatically generated from aarch64_ad.m4
11206 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11207 // val ^ (-1 ^ (val ror shift)) ==> eon
11208 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11209                          iRegL src1, iRegL src2,
11210                          immI src3, immL_M1 src4) %{
11211   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11212   ins_cost(1.9 * INSN_COST);
11213   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11214 
11215   ins_encode %{
11216     __ eon(as_Register($dst$$reg),
11217               as_Register($src1$$reg),
11218               as_Register($src2$$reg),
11219               Assembler::ROR,
11220               $src3$$constant & 0x3f);
11221   %}
11222 
11223   ins_pipe(ialu_reg_reg_shift);
11224 %}
11225 
11226 // This pattern is automatically generated from aarch64_ad.m4
11227 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11228 // val ^ (-1 ^ (val << shift)) ==> eonw
11229 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11230                          iRegIorL2I src1, iRegIorL2I src2,
11231                          immI src3, immI_M1 src4) %{
11232   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11233   ins_cost(1.9 * INSN_COST);
11234   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11235 
11236   ins_encode %{
11237     __ eonw(as_Register($dst$$reg),
11238               as_Register($src1$$reg),
11239               as_Register($src2$$reg),
11240               Assembler::LSL,
11241               $src3$$constant & 0x1f);
11242   %}
11243 
11244   ins_pipe(ialu_reg_reg_shift);
11245 %}
11246 
11247 // This pattern is automatically generated from aarch64_ad.m4
11248 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11249 // val ^ (-1 ^ (val << shift)) ==> eon
11250 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11251                          iRegL src1, iRegL src2,
11252                          immI src3, immL_M1 src4) %{
11253   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11254   ins_cost(1.9 * INSN_COST);
11255   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11256 
11257   ins_encode %{
11258     __ eon(as_Register($dst$$reg),
11259               as_Register($src1$$reg),
11260               as_Register($src2$$reg),
11261               Assembler::LSL,
11262               $src3$$constant & 0x3f);
11263   %}
11264 
11265   ins_pipe(ialu_reg_reg_shift);
11266 %}
11267 
11268 // This pattern is automatically generated from aarch64_ad.m4
11269 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11270 // val | (-1 ^ (val >>> shift)) ==> ornw
11271 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11272                          iRegIorL2I src1, iRegIorL2I src2,
11273                          immI src3, immI_M1 src4) %{
11274   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11275   ins_cost(1.9 * INSN_COST);
11276   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11277 
11278   ins_encode %{
11279     __ ornw(as_Register($dst$$reg),
11280               as_Register($src1$$reg),
11281               as_Register($src2$$reg),
11282               Assembler::LSR,
11283               $src3$$constant & 0x1f);
11284   %}
11285 
11286   ins_pipe(ialu_reg_reg_shift);
11287 %}
11288 
11289 // This pattern is automatically generated from aarch64_ad.m4
11290 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11291 // val | (-1 ^ (val >>> shift)) ==> orn
11292 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11293                          iRegL src1, iRegL src2,
11294                          immI src3, immL_M1 src4) %{
11295   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11296   ins_cost(1.9 * INSN_COST);
11297   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11298 
11299   ins_encode %{
11300     __ orn(as_Register($dst$$reg),
11301               as_Register($src1$$reg),
11302               as_Register($src2$$reg),
11303               Assembler::LSR,
11304               $src3$$constant & 0x3f);
11305   %}
11306 
11307   ins_pipe(ialu_reg_reg_shift);
11308 %}
11309 
11310 // This pattern is automatically generated from aarch64_ad.m4
11311 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11312 // val | (-1 ^ (val >> shift)) ==> ornw
11313 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11314                          iRegIorL2I src1, iRegIorL2I src2,
11315                          immI src3, immI_M1 src4) %{
11316   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11317   ins_cost(1.9 * INSN_COST);
11318   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11319 
11320   ins_encode %{
11321     __ ornw(as_Register($dst$$reg),
11322               as_Register($src1$$reg),
11323               as_Register($src2$$reg),
11324               Assembler::ASR,
11325               $src3$$constant & 0x1f);
11326   %}
11327 
11328   ins_pipe(ialu_reg_reg_shift);
11329 %}
11330 
11331 // This pattern is automatically generated from aarch64_ad.m4
11332 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11333 // val | (-1 ^ (val >> shift)) ==> orn
11334 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11335                          iRegL src1, iRegL src2,
11336                          immI src3, immL_M1 src4) %{
11337   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11338   ins_cost(1.9 * INSN_COST);
11339   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11340 
11341   ins_encode %{
11342     __ orn(as_Register($dst$$reg),
11343               as_Register($src1$$reg),
11344               as_Register($src2$$reg),
11345               Assembler::ASR,
11346               $src3$$constant & 0x3f);
11347   %}
11348 
11349   ins_pipe(ialu_reg_reg_shift);
11350 %}
11351 
11352 // This pattern is automatically generated from aarch64_ad.m4
11353 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11354 // val | (-1 ^ (val ror shift)) ==> ornw
11355 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11356                          iRegIorL2I src1, iRegIorL2I src2,
11357                          immI src3, immI_M1 src4) %{
11358   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11359   ins_cost(1.9 * INSN_COST);
11360   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11361 
11362   ins_encode %{
11363     __ ornw(as_Register($dst$$reg),
11364               as_Register($src1$$reg),
11365               as_Register($src2$$reg),
11366               Assembler::ROR,
11367               $src3$$constant & 0x1f);
11368   %}
11369 
11370   ins_pipe(ialu_reg_reg_shift);
11371 %}
11372 
11373 // This pattern is automatically generated from aarch64_ad.m4
11374 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11375 // val | (-1 ^ (val ror shift)) ==> orn
11376 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11377                          iRegL src1, iRegL src2,
11378                          immI src3, immL_M1 src4) %{
11379   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11380   ins_cost(1.9 * INSN_COST);
11381   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11382 
11383   ins_encode %{
11384     __ orn(as_Register($dst$$reg),
11385               as_Register($src1$$reg),
11386               as_Register($src2$$reg),
11387               Assembler::ROR,
11388               $src3$$constant & 0x3f);
11389   %}
11390 
11391   ins_pipe(ialu_reg_reg_shift);
11392 %}
11393 
11394 // This pattern is automatically generated from aarch64_ad.m4
11395 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11396 // val | (-1 ^ (val << shift)) ==> ornw
11397 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11398                          iRegIorL2I src1, iRegIorL2I src2,
11399                          immI src3, immI_M1 src4) %{
11400   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11401   ins_cost(1.9 * INSN_COST);
11402   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11403 
11404   ins_encode %{
11405     __ ornw(as_Register($dst$$reg),
11406               as_Register($src1$$reg),
11407               as_Register($src2$$reg),
11408               Assembler::LSL,
11409               $src3$$constant & 0x1f);
11410   %}
11411 
11412   ins_pipe(ialu_reg_reg_shift);
11413 %}
11414 
11415 // This pattern is automatically generated from aarch64_ad.m4
11416 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11417 // val | (-1 ^ (val << shift)) ==> orn
11418 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11419                          iRegL src1, iRegL src2,
11420                          immI src3, immL_M1 src4) %{
11421   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11422   ins_cost(1.9 * INSN_COST);
11423   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11424 
11425   ins_encode %{
11426     __ orn(as_Register($dst$$reg),
11427               as_Register($src1$$reg),
11428               as_Register($src2$$reg),
11429               Assembler::LSL,
11430               $src3$$constant & 0x3f);
11431   %}
11432 
11433   ins_pipe(ialu_reg_reg_shift);
11434 %}
11435 
11436 // This pattern is automatically generated from aarch64_ad.m4
11437 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11438 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11439                          iRegIorL2I src1, iRegIorL2I src2,
11440                          immI src3) %{
11441   match(Set dst (AndI src1 (URShiftI src2 src3)));
11442 
11443   ins_cost(1.9 * INSN_COST);
11444   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11445 
11446   ins_encode %{
11447     __ andw(as_Register($dst$$reg),
11448               as_Register($src1$$reg),
11449               as_Register($src2$$reg),
11450               Assembler::LSR,
11451               $src3$$constant & 0x1f);
11452   %}
11453 
11454   ins_pipe(ialu_reg_reg_shift);
11455 %}
11456 
11457 // This pattern is automatically generated from aarch64_ad.m4
11458 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11459 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11460                          iRegL src1, iRegL src2,
11461                          immI src3) %{
11462   match(Set dst (AndL src1 (URShiftL src2 src3)));
11463 
11464   ins_cost(1.9 * INSN_COST);
11465   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11466 
11467   ins_encode %{
11468     __ andr(as_Register($dst$$reg),
11469               as_Register($src1$$reg),
11470               as_Register($src2$$reg),
11471               Assembler::LSR,
11472               $src3$$constant & 0x3f);
11473   %}
11474 
11475   ins_pipe(ialu_reg_reg_shift);
11476 %}
11477 
11478 // This pattern is automatically generated from aarch64_ad.m4
11479 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11480 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11481                          iRegIorL2I src1, iRegIorL2I src2,
11482                          immI src3) %{
11483   match(Set dst (AndI src1 (RShiftI src2 src3)));
11484 
11485   ins_cost(1.9 * INSN_COST);
11486   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11487 
11488   ins_encode %{
11489     __ andw(as_Register($dst$$reg),
11490               as_Register($src1$$reg),
11491               as_Register($src2$$reg),
11492               Assembler::ASR,
11493               $src3$$constant & 0x1f);
11494   %}
11495 
11496   ins_pipe(ialu_reg_reg_shift);
11497 %}
11498 
11499 // This pattern is automatically generated from aarch64_ad.m4
11500 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11501 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11502                          iRegL src1, iRegL src2,
11503                          immI src3) %{
11504   match(Set dst (AndL src1 (RShiftL src2 src3)));
11505 
11506   ins_cost(1.9 * INSN_COST);
11507   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11508 
11509   ins_encode %{
11510     __ andr(as_Register($dst$$reg),
11511               as_Register($src1$$reg),
11512               as_Register($src2$$reg),
11513               Assembler::ASR,
11514               $src3$$constant & 0x3f);
11515   %}
11516 
11517   ins_pipe(ialu_reg_reg_shift);
11518 %}
11519 
11520 // This pattern is automatically generated from aarch64_ad.m4
11521 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11522 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11523                          iRegIorL2I src1, iRegIorL2I src2,
11524                          immI src3) %{
11525   match(Set dst (AndI src1 (LShiftI src2 src3)));
11526 
11527   ins_cost(1.9 * INSN_COST);
11528   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11529 
11530   ins_encode %{
11531     __ andw(as_Register($dst$$reg),
11532               as_Register($src1$$reg),
11533               as_Register($src2$$reg),
11534               Assembler::LSL,
11535               $src3$$constant & 0x1f);
11536   %}
11537 
11538   ins_pipe(ialu_reg_reg_shift);
11539 %}
11540 
11541 // This pattern is automatically generated from aarch64_ad.m4
11542 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11543 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11544                          iRegL src1, iRegL src2,
11545                          immI src3) %{
11546   match(Set dst (AndL src1 (LShiftL src2 src3)));
11547 
11548   ins_cost(1.9 * INSN_COST);
11549   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11550 
11551   ins_encode %{
11552     __ andr(as_Register($dst$$reg),
11553               as_Register($src1$$reg),
11554               as_Register($src2$$reg),
11555               Assembler::LSL,
11556               $src3$$constant & 0x3f);
11557   %}
11558 
11559   ins_pipe(ialu_reg_reg_shift);
11560 %}
11561 
11562 // This pattern is automatically generated from aarch64_ad.m4
11563 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11564 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
11565                          iRegIorL2I src1, iRegIorL2I src2,
11566                          immI src3) %{
11567   match(Set dst (AndI src1 (RotateRight src2 src3)));
11568 
11569   ins_cost(1.9 * INSN_COST);
11570   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
11571 
11572   ins_encode %{
11573     __ andw(as_Register($dst$$reg),
11574               as_Register($src1$$reg),
11575               as_Register($src2$$reg),
11576               Assembler::ROR,
11577               $src3$$constant & 0x1f);
11578   %}
11579 
11580   ins_pipe(ialu_reg_reg_shift);
11581 %}
11582 
11583 // This pattern is automatically generated from aarch64_ad.m4
11584 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11585 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
11586                          iRegL src1, iRegL src2,
11587                          immI src3) %{
11588   match(Set dst (AndL src1 (RotateRight src2 src3)));
11589 
11590   ins_cost(1.9 * INSN_COST);
11591   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
11592 
11593   ins_encode %{
11594     __ andr(as_Register($dst$$reg),
11595               as_Register($src1$$reg),
11596               as_Register($src2$$reg),
11597               Assembler::ROR,
11598               $src3$$constant & 0x3f);
11599   %}
11600 
11601   ins_pipe(ialu_reg_reg_shift);
11602 %}
11603 
11604 // This pattern is automatically generated from aarch64_ad.m4
11605 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11606 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11607                          iRegIorL2I src1, iRegIorL2I src2,
11608                          immI src3) %{
11609   match(Set dst (XorI src1 (URShiftI src2 src3)));
11610 
11611   ins_cost(1.9 * INSN_COST);
11612   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11613 
11614   ins_encode %{
11615     __ eorw(as_Register($dst$$reg),
11616               as_Register($src1$$reg),
11617               as_Register($src2$$reg),
11618               Assembler::LSR,
11619               $src3$$constant & 0x1f);
11620   %}
11621 
11622   ins_pipe(ialu_reg_reg_shift);
11623 %}
11624 
11625 // This pattern is automatically generated from aarch64_ad.m4
11626 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11627 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11628                          iRegL src1, iRegL src2,
11629                          immI src3) %{
11630   match(Set dst (XorL src1 (URShiftL src2 src3)));
11631 
11632   ins_cost(1.9 * INSN_COST);
11633   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11634 
11635   ins_encode %{
11636     __ eor(as_Register($dst$$reg),
11637               as_Register($src1$$reg),
11638               as_Register($src2$$reg),
11639               Assembler::LSR,
11640               $src3$$constant & 0x3f);
11641   %}
11642 
11643   ins_pipe(ialu_reg_reg_shift);
11644 %}
11645 
11646 // This pattern is automatically generated from aarch64_ad.m4
11647 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11648 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11649                          iRegIorL2I src1, iRegIorL2I src2,
11650                          immI src3) %{
11651   match(Set dst (XorI src1 (RShiftI src2 src3)));
11652 
11653   ins_cost(1.9 * INSN_COST);
11654   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11655 
11656   ins_encode %{
11657     __ eorw(as_Register($dst$$reg),
11658               as_Register($src1$$reg),
11659               as_Register($src2$$reg),
11660               Assembler::ASR,
11661               $src3$$constant & 0x1f);
11662   %}
11663 
11664   ins_pipe(ialu_reg_reg_shift);
11665 %}
11666 
11667 // This pattern is automatically generated from aarch64_ad.m4
11668 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11669 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11670                          iRegL src1, iRegL src2,
11671                          immI src3) %{
11672   match(Set dst (XorL src1 (RShiftL src2 src3)));
11673 
11674   ins_cost(1.9 * INSN_COST);
11675   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11676 
11677   ins_encode %{
11678     __ eor(as_Register($dst$$reg),
11679               as_Register($src1$$reg),
11680               as_Register($src2$$reg),
11681               Assembler::ASR,
11682               $src3$$constant & 0x3f);
11683   %}
11684 
11685   ins_pipe(ialu_reg_reg_shift);
11686 %}
11687 
11688 // This pattern is automatically generated from aarch64_ad.m4
11689 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11690 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11691                          iRegIorL2I src1, iRegIorL2I src2,
11692                          immI src3) %{
11693   match(Set dst (XorI src1 (LShiftI src2 src3)));
11694 
11695   ins_cost(1.9 * INSN_COST);
11696   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11697 
11698   ins_encode %{
11699     __ eorw(as_Register($dst$$reg),
11700               as_Register($src1$$reg),
11701               as_Register($src2$$reg),
11702               Assembler::LSL,
11703               $src3$$constant & 0x1f);
11704   %}
11705 
11706   ins_pipe(ialu_reg_reg_shift);
11707 %}
11708 
11709 // This pattern is automatically generated from aarch64_ad.m4
11710 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11711 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11712                          iRegL src1, iRegL src2,
11713                          immI src3) %{
11714   match(Set dst (XorL src1 (LShiftL src2 src3)));
11715 
11716   ins_cost(1.9 * INSN_COST);
11717   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11718 
11719   ins_encode %{
11720     __ eor(as_Register($dst$$reg),
11721               as_Register($src1$$reg),
11722               as_Register($src2$$reg),
11723               Assembler::LSL,
11724               $src3$$constant & 0x3f);
11725   %}
11726 
11727   ins_pipe(ialu_reg_reg_shift);
11728 %}
11729 
11730 // This pattern is automatically generated from aarch64_ad.m4
11731 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11732 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
11733                          iRegIorL2I src1, iRegIorL2I src2,
11734                          immI src3) %{
11735   match(Set dst (XorI src1 (RotateRight src2 src3)));
11736 
11737   ins_cost(1.9 * INSN_COST);
11738   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
11739 
11740   ins_encode %{
11741     __ eorw(as_Register($dst$$reg),
11742               as_Register($src1$$reg),
11743               as_Register($src2$$reg),
11744               Assembler::ROR,
11745               $src3$$constant & 0x1f);
11746   %}
11747 
11748   ins_pipe(ialu_reg_reg_shift);
11749 %}
11750 
11751 // This pattern is automatically generated from aarch64_ad.m4
11752 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11753 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
11754                          iRegL src1, iRegL src2,
11755                          immI src3) %{
11756   match(Set dst (XorL src1 (RotateRight src2 src3)));
11757 
11758   ins_cost(1.9 * INSN_COST);
11759   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
11760 
11761   ins_encode %{
11762     __ eor(as_Register($dst$$reg),
11763               as_Register($src1$$reg),
11764               as_Register($src2$$reg),
11765               Assembler::ROR,
11766               $src3$$constant & 0x3f);
11767   %}
11768 
11769   ins_pipe(ialu_reg_reg_shift);
11770 %}
11771 
11772 // This pattern is automatically generated from aarch64_ad.m4
11773 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11774 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11775                          iRegIorL2I src1, iRegIorL2I src2,
11776                          immI src3) %{
11777   match(Set dst (OrI src1 (URShiftI src2 src3)));
11778 
11779   ins_cost(1.9 * INSN_COST);
11780   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11781 
11782   ins_encode %{
11783     __ orrw(as_Register($dst$$reg),
11784               as_Register($src1$$reg),
11785               as_Register($src2$$reg),
11786               Assembler::LSR,
11787               $src3$$constant & 0x1f);
11788   %}
11789 
11790   ins_pipe(ialu_reg_reg_shift);
11791 %}
11792 
11793 // This pattern is automatically generated from aarch64_ad.m4
11794 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11795 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11796                          iRegL src1, iRegL src2,
11797                          immI src3) %{
11798   match(Set dst (OrL src1 (URShiftL src2 src3)));
11799 
11800   ins_cost(1.9 * INSN_COST);
11801   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11802 
11803   ins_encode %{
11804     __ orr(as_Register($dst$$reg),
11805               as_Register($src1$$reg),
11806               as_Register($src2$$reg),
11807               Assembler::LSR,
11808               $src3$$constant & 0x3f);
11809   %}
11810 
11811   ins_pipe(ialu_reg_reg_shift);
11812 %}
11813 
11814 // This pattern is automatically generated from aarch64_ad.m4
11815 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11816 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11817                          iRegIorL2I src1, iRegIorL2I src2,
11818                          immI src3) %{
11819   match(Set dst (OrI src1 (RShiftI src2 src3)));
11820 
11821   ins_cost(1.9 * INSN_COST);
11822   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11823 
11824   ins_encode %{
11825     __ orrw(as_Register($dst$$reg),
11826               as_Register($src1$$reg),
11827               as_Register($src2$$reg),
11828               Assembler::ASR,
11829               $src3$$constant & 0x1f);
11830   %}
11831 
11832   ins_pipe(ialu_reg_reg_shift);
11833 %}
11834 
11835 // This pattern is automatically generated from aarch64_ad.m4
11836 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11837 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11838                          iRegL src1, iRegL src2,
11839                          immI src3) %{
11840   match(Set dst (OrL src1 (RShiftL src2 src3)));
11841 
11842   ins_cost(1.9 * INSN_COST);
11843   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11844 
11845   ins_encode %{
11846     __ orr(as_Register($dst$$reg),
11847               as_Register($src1$$reg),
11848               as_Register($src2$$reg),
11849               Assembler::ASR,
11850               $src3$$constant & 0x3f);
11851   %}
11852 
11853   ins_pipe(ialu_reg_reg_shift);
11854 %}
11855 
11856 // This pattern is automatically generated from aarch64_ad.m4
11857 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11858 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11859                          iRegIorL2I src1, iRegIorL2I src2,
11860                          immI src3) %{
11861   match(Set dst (OrI src1 (LShiftI src2 src3)));
11862 
11863   ins_cost(1.9 * INSN_COST);
11864   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11865 
11866   ins_encode %{
11867     __ orrw(as_Register($dst$$reg),
11868               as_Register($src1$$reg),
11869               as_Register($src2$$reg),
11870               Assembler::LSL,
11871               $src3$$constant & 0x1f);
11872   %}
11873 
11874   ins_pipe(ialu_reg_reg_shift);
11875 %}
11876 
11877 // This pattern is automatically generated from aarch64_ad.m4
11878 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11879 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11880                          iRegL src1, iRegL src2,
11881                          immI src3) %{
11882   match(Set dst (OrL src1 (LShiftL src2 src3)));
11883 
11884   ins_cost(1.9 * INSN_COST);
11885   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11886 
11887   ins_encode %{
11888     __ orr(as_Register($dst$$reg),
11889               as_Register($src1$$reg),
11890               as_Register($src2$$reg),
11891               Assembler::LSL,
11892               $src3$$constant & 0x3f);
11893   %}
11894 
11895   ins_pipe(ialu_reg_reg_shift);
11896 %}
11897 
11898 // This pattern is automatically generated from aarch64_ad.m4
11899 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11900 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
11901                          iRegIorL2I src1, iRegIorL2I src2,
11902                          immI src3) %{
11903   match(Set dst (OrI src1 (RotateRight src2 src3)));
11904 
11905   ins_cost(1.9 * INSN_COST);
11906   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
11907 
11908   ins_encode %{
11909     __ orrw(as_Register($dst$$reg),
11910               as_Register($src1$$reg),
11911               as_Register($src2$$reg),
11912               Assembler::ROR,
11913               $src3$$constant & 0x1f);
11914   %}
11915 
11916   ins_pipe(ialu_reg_reg_shift);
11917 %}
11918 
11919 // This pattern is automatically generated from aarch64_ad.m4
11920 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11921 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
11922                          iRegL src1, iRegL src2,
11923                          immI src3) %{
11924   match(Set dst (OrL src1 (RotateRight src2 src3)));
11925 
11926   ins_cost(1.9 * INSN_COST);
11927   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
11928 
11929   ins_encode %{
11930     __ orr(as_Register($dst$$reg),
11931               as_Register($src1$$reg),
11932               as_Register($src2$$reg),
11933               Assembler::ROR,
11934               $src3$$constant & 0x3f);
11935   %}
11936 
11937   ins_pipe(ialu_reg_reg_shift);
11938 %}
11939 
11940 // This pattern is automatically generated from aarch64_ad.m4
11941 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11942 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11943                          iRegIorL2I src1, iRegIorL2I src2,
11944                          immI src3) %{
11945   match(Set dst (AddI src1 (URShiftI src2 src3)));
11946 
11947   ins_cost(1.9 * INSN_COST);
11948   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11949 
11950   ins_encode %{
11951     __ addw(as_Register($dst$$reg),
11952               as_Register($src1$$reg),
11953               as_Register($src2$$reg),
11954               Assembler::LSR,
11955               $src3$$constant & 0x1f);
11956   %}
11957 
11958   ins_pipe(ialu_reg_reg_shift);
11959 %}
11960 
11961 // This pattern is automatically generated from aarch64_ad.m4
11962 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11963 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11964                          iRegL src1, iRegL src2,
11965                          immI src3) %{
11966   match(Set dst (AddL src1 (URShiftL src2 src3)));
11967 
11968   ins_cost(1.9 * INSN_COST);
11969   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11970 
11971   ins_encode %{
11972     __ add(as_Register($dst$$reg),
11973               as_Register($src1$$reg),
11974               as_Register($src2$$reg),
11975               Assembler::LSR,
11976               $src3$$constant & 0x3f);
11977   %}
11978 
11979   ins_pipe(ialu_reg_reg_shift);
11980 %}
11981 
11982 // This pattern is automatically generated from aarch64_ad.m4
11983 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11984 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11985                          iRegIorL2I src1, iRegIorL2I src2,
11986                          immI src3) %{
11987   match(Set dst (AddI src1 (RShiftI src2 src3)));
11988 
11989   ins_cost(1.9 * INSN_COST);
11990   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11991 
11992   ins_encode %{
11993     __ addw(as_Register($dst$$reg),
11994               as_Register($src1$$reg),
11995               as_Register($src2$$reg),
11996               Assembler::ASR,
11997               $src3$$constant & 0x1f);
11998   %}
11999 
12000   ins_pipe(ialu_reg_reg_shift);
12001 %}
12002 
12003 // This pattern is automatically generated from aarch64_ad.m4
12004 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12005 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12006                          iRegL src1, iRegL src2,
12007                          immI src3) %{
12008   match(Set dst (AddL src1 (RShiftL src2 src3)));
12009 
12010   ins_cost(1.9 * INSN_COST);
12011   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12012 
12013   ins_encode %{
12014     __ add(as_Register($dst$$reg),
12015               as_Register($src1$$reg),
12016               as_Register($src2$$reg),
12017               Assembler::ASR,
12018               $src3$$constant & 0x3f);
12019   %}
12020 
12021   ins_pipe(ialu_reg_reg_shift);
12022 %}
12023 
12024 // This pattern is automatically generated from aarch64_ad.m4
12025 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12026 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12027                          iRegIorL2I src1, iRegIorL2I src2,
12028                          immI src3) %{
12029   match(Set dst (AddI src1 (LShiftI src2 src3)));
12030 
12031   ins_cost(1.9 * INSN_COST);
12032   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12033 
12034   ins_encode %{
12035     __ addw(as_Register($dst$$reg),
12036               as_Register($src1$$reg),
12037               as_Register($src2$$reg),
12038               Assembler::LSL,
12039               $src3$$constant & 0x1f);
12040   %}
12041 
12042   ins_pipe(ialu_reg_reg_shift);
12043 %}
12044 
12045 // This pattern is automatically generated from aarch64_ad.m4
12046 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12047 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12048                          iRegL src1, iRegL src2,
12049                          immI src3) %{
12050   match(Set dst (AddL src1 (LShiftL src2 src3)));
12051 
12052   ins_cost(1.9 * INSN_COST);
12053   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12054 
12055   ins_encode %{
12056     __ add(as_Register($dst$$reg),
12057               as_Register($src1$$reg),
12058               as_Register($src2$$reg),
12059               Assembler::LSL,
12060               $src3$$constant & 0x3f);
12061   %}
12062 
12063   ins_pipe(ialu_reg_reg_shift);
12064 %}
12065 
12066 // This pattern is automatically generated from aarch64_ad.m4
12067 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12068 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12069                          iRegIorL2I src1, iRegIorL2I src2,
12070                          immI src3) %{
12071   match(Set dst (SubI src1 (URShiftI src2 src3)));
12072 
12073   ins_cost(1.9 * INSN_COST);
12074   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12075 
12076   ins_encode %{
12077     __ subw(as_Register($dst$$reg),
12078               as_Register($src1$$reg),
12079               as_Register($src2$$reg),
12080               Assembler::LSR,
12081               $src3$$constant & 0x1f);
12082   %}
12083 
12084   ins_pipe(ialu_reg_reg_shift);
12085 %}
12086 
12087 // This pattern is automatically generated from aarch64_ad.m4
12088 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12089 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12090                          iRegL src1, iRegL src2,
12091                          immI src3) %{
12092   match(Set dst (SubL src1 (URShiftL src2 src3)));
12093 
12094   ins_cost(1.9 * INSN_COST);
12095   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12096 
12097   ins_encode %{
12098     __ sub(as_Register($dst$$reg),
12099               as_Register($src1$$reg),
12100               as_Register($src2$$reg),
12101               Assembler::LSR,
12102               $src3$$constant & 0x3f);
12103   %}
12104 
12105   ins_pipe(ialu_reg_reg_shift);
12106 %}
12107 
12108 // This pattern is automatically generated from aarch64_ad.m4
12109 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12110 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12111                          iRegIorL2I src1, iRegIorL2I src2,
12112                          immI src3) %{
12113   match(Set dst (SubI src1 (RShiftI src2 src3)));
12114 
12115   ins_cost(1.9 * INSN_COST);
12116   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12117 
12118   ins_encode %{
12119     __ subw(as_Register($dst$$reg),
12120               as_Register($src1$$reg),
12121               as_Register($src2$$reg),
12122               Assembler::ASR,
12123               $src3$$constant & 0x1f);
12124   %}
12125 
12126   ins_pipe(ialu_reg_reg_shift);
12127 %}
12128 
12129 // This pattern is automatically generated from aarch64_ad.m4
12130 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12131 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12132                          iRegL src1, iRegL src2,
12133                          immI src3) %{
12134   match(Set dst (SubL src1 (RShiftL src2 src3)));
12135 
12136   ins_cost(1.9 * INSN_COST);
12137   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12138 
12139   ins_encode %{
12140     __ sub(as_Register($dst$$reg),
12141               as_Register($src1$$reg),
12142               as_Register($src2$$reg),
12143               Assembler::ASR,
12144               $src3$$constant & 0x3f);
12145   %}
12146 
12147   ins_pipe(ialu_reg_reg_shift);
12148 %}
12149 
12150 // This pattern is automatically generated from aarch64_ad.m4
12151 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12152 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12153                          iRegIorL2I src1, iRegIorL2I src2,
12154                          immI src3) %{
12155   match(Set dst (SubI src1 (LShiftI src2 src3)));
12156 
12157   ins_cost(1.9 * INSN_COST);
12158   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12159 
12160   ins_encode %{
12161     __ subw(as_Register($dst$$reg),
12162               as_Register($src1$$reg),
12163               as_Register($src2$$reg),
12164               Assembler::LSL,
12165               $src3$$constant & 0x1f);
12166   %}
12167 
12168   ins_pipe(ialu_reg_reg_shift);
12169 %}
12170 
12171 // This pattern is automatically generated from aarch64_ad.m4
12172 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12173 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12174                          iRegL src1, iRegL src2,
12175                          immI src3) %{
12176   match(Set dst (SubL src1 (LShiftL src2 src3)));
12177 
12178   ins_cost(1.9 * INSN_COST);
12179   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12180 
12181   ins_encode %{
12182     __ sub(as_Register($dst$$reg),
12183               as_Register($src1$$reg),
12184               as_Register($src2$$reg),
12185               Assembler::LSL,
12186               $src3$$constant & 0x3f);
12187   %}
12188 
12189   ins_pipe(ialu_reg_reg_shift);
12190 %}
12191 
12192 // This pattern is automatically generated from aarch64_ad.m4
12193 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12194 
12195 // Shift Left followed by Shift Right.
12196 // This idiom is used by the compiler for the i2b bytecode etc.
12197 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12198 %{
12199   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12200   ins_cost(INSN_COST * 2);
12201   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12202   ins_encode %{
12203     int lshift = $lshift_count$$constant & 63;
12204     int rshift = $rshift_count$$constant & 63;
12205     int s = 63 - lshift;
12206     int r = (rshift - lshift) & 63;
12207     __ sbfm(as_Register($dst$$reg),
12208             as_Register($src$$reg),
12209             r, s);
12210   %}
12211 
12212   ins_pipe(ialu_reg_shift);
12213 %}
12214 
12215 // This pattern is automatically generated from aarch64_ad.m4
12216 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12217 
12218 // Shift Left followed by Shift Right.
12219 // This idiom is used by the compiler for the i2b bytecode etc.
12220 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12221 %{
12222   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12223   ins_cost(INSN_COST * 2);
12224   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12225   ins_encode %{
12226     int lshift = $lshift_count$$constant & 31;
12227     int rshift = $rshift_count$$constant & 31;
12228     int s = 31 - lshift;
12229     int r = (rshift - lshift) & 31;
12230     __ sbfmw(as_Register($dst$$reg),
12231             as_Register($src$$reg),
12232             r, s);
12233   %}
12234 
12235   ins_pipe(ialu_reg_shift);
12236 %}
12237 
12238 // This pattern is automatically generated from aarch64_ad.m4
12239 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12240 
12241 // Shift Left followed by Shift Right.
12242 // This idiom is used by the compiler for the i2b bytecode etc.
12243 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12244 %{
12245   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12246   ins_cost(INSN_COST * 2);
12247   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12248   ins_encode %{
12249     int lshift = $lshift_count$$constant & 63;
12250     int rshift = $rshift_count$$constant & 63;
12251     int s = 63 - lshift;
12252     int r = (rshift - lshift) & 63;
12253     __ ubfm(as_Register($dst$$reg),
12254             as_Register($src$$reg),
12255             r, s);
12256   %}
12257 
12258   ins_pipe(ialu_reg_shift);
12259 %}
12260 
12261 // This pattern is automatically generated from aarch64_ad.m4
12262 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12263 
12264 // Shift Left followed by Shift Right.
12265 // This idiom is used by the compiler for the i2b bytecode etc.
12266 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12267 %{
12268   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12269   ins_cost(INSN_COST * 2);
12270   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12271   ins_encode %{
12272     int lshift = $lshift_count$$constant & 31;
12273     int rshift = $rshift_count$$constant & 31;
12274     int s = 31 - lshift;
12275     int r = (rshift - lshift) & 31;
12276     __ ubfmw(as_Register($dst$$reg),
12277             as_Register($src$$reg),
12278             r, s);
12279   %}
12280 
12281   ins_pipe(ialu_reg_shift);
12282 %}
12283 
12284 // Bitfield extract with shift & mask
12285 
12286 // This pattern is automatically generated from aarch64_ad.m4
12287 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12288 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12289 %{
12290   match(Set dst (AndI (URShiftI src rshift) mask));
12291   // Make sure we are not going to exceed what ubfxw can do.
12292   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12293 
12294   ins_cost(INSN_COST);
12295   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12296   ins_encode %{
12297     int rshift = $rshift$$constant & 31;
12298     intptr_t mask = $mask$$constant;
12299     int width = exact_log2(mask+1);
12300     __ ubfxw(as_Register($dst$$reg),
12301             as_Register($src$$reg), rshift, width);
12302   %}
12303   ins_pipe(ialu_reg_shift);
12304 %}
12305 
12306 // This pattern is automatically generated from aarch64_ad.m4
12307 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12308 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12309 %{
12310   match(Set dst (AndL (URShiftL src rshift) mask));
12311   // Make sure we are not going to exceed what ubfx can do.
12312   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12313 
12314   ins_cost(INSN_COST);
12315   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12316   ins_encode %{
12317     int rshift = $rshift$$constant & 63;
12318     intptr_t mask = $mask$$constant;
12319     int width = exact_log2_long(mask+1);
12320     __ ubfx(as_Register($dst$$reg),
12321             as_Register($src$$reg), rshift, width);
12322   %}
12323   ins_pipe(ialu_reg_shift);
12324 %}
12325 
12326 
12327 // This pattern is automatically generated from aarch64_ad.m4
12328 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12329 
12330 // We can use ubfx when extending an And with a mask when we know mask
12331 // is positive.  We know that because immI_bitmask guarantees it.
12332 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12333 %{
12334   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12335   // Make sure we are not going to exceed what ubfxw can do.
12336   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12337 
12338   ins_cost(INSN_COST * 2);
12339   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12340   ins_encode %{
12341     int rshift = $rshift$$constant & 31;
12342     intptr_t mask = $mask$$constant;
12343     int width = exact_log2(mask+1);
12344     __ ubfx(as_Register($dst$$reg),
12345             as_Register($src$$reg), rshift, width);
12346   %}
12347   ins_pipe(ialu_reg_shift);
12348 %}
12349 
12350 
12351 // This pattern is automatically generated from aarch64_ad.m4
12352 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12353 
12354 // We can use ubfiz when masking by a positive number and then left shifting the result.
12355 // We know that the mask is positive because immI_bitmask guarantees it.
12356 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12357 %{
12358   match(Set dst (LShiftI (AndI src mask) lshift));
12359   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12360 
12361   ins_cost(INSN_COST);
12362   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12363   ins_encode %{
12364     int lshift = $lshift$$constant & 31;
12365     intptr_t mask = $mask$$constant;
12366     int width = exact_log2(mask+1);
12367     __ ubfizw(as_Register($dst$$reg),
12368           as_Register($src$$reg), lshift, width);
12369   %}
12370   ins_pipe(ialu_reg_shift);
12371 %}
12372 
12373 // This pattern is automatically generated from aarch64_ad.m4
12374 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12375 
12376 // We can use ubfiz when masking by a positive number and then left shifting the result.
12377 // We know that the mask is positive because immL_bitmask guarantees it.
12378 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12379 %{
12380   match(Set dst (LShiftL (AndL src mask) lshift));
12381   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12382 
12383   ins_cost(INSN_COST);
12384   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12385   ins_encode %{
12386     int lshift = $lshift$$constant & 63;
12387     intptr_t mask = $mask$$constant;
12388     int width = exact_log2_long(mask+1);
12389     __ ubfiz(as_Register($dst$$reg),
12390           as_Register($src$$reg), lshift, width);
12391   %}
12392   ins_pipe(ialu_reg_shift);
12393 %}
12394 
12395 // This pattern is automatically generated from aarch64_ad.m4
12396 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12397 
12398 // We can use ubfiz when masking by a positive number and then left shifting the result.
12399 // We know that the mask is positive because immI_bitmask guarantees it.
12400 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12401 %{
12402   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12403   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12404 
12405   ins_cost(INSN_COST);
12406   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12407   ins_encode %{
12408     int lshift = $lshift$$constant & 31;
12409     intptr_t mask = $mask$$constant;
12410     int width = exact_log2(mask+1);
12411     __ ubfizw(as_Register($dst$$reg),
12412           as_Register($src$$reg), lshift, width);
12413   %}
12414   ins_pipe(ialu_reg_shift);
12415 %}
12416 
12417 // This pattern is automatically generated from aarch64_ad.m4
12418 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12419 
12420 // We can use ubfiz when masking by a positive number and then left shifting the result.
12421 // We know that the mask is positive because immL_bitmask guarantees it.
12422 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12423 %{
12424   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12425   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12426 
12427   ins_cost(INSN_COST);
12428   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12429   ins_encode %{
12430     int lshift = $lshift$$constant & 63;
12431     intptr_t mask = $mask$$constant;
12432     int width = exact_log2_long(mask+1);
12433     __ ubfiz(as_Register($dst$$reg),
12434           as_Register($src$$reg), lshift, width);
12435   %}
12436   ins_pipe(ialu_reg_shift);
12437 %}
12438 
12439 
12440 // This pattern is automatically generated from aarch64_ad.m4
12441 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12442 
12443 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12444 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12445 %{
12446   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12447   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12448 
12449   ins_cost(INSN_COST);
12450   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12451   ins_encode %{
12452     int lshift = $lshift$$constant & 63;
12453     intptr_t mask = $mask$$constant;
12454     int width = exact_log2(mask+1);
12455     __ ubfiz(as_Register($dst$$reg),
12456              as_Register($src$$reg), lshift, width);
12457   %}
12458   ins_pipe(ialu_reg_shift);
12459 %}
12460 
12461 // This pattern is automatically generated from aarch64_ad.m4
12462 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12463 
12464 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12465 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12466 %{
12467   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12468   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12469 
12470   ins_cost(INSN_COST);
12471   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12472   ins_encode %{
12473     int lshift = $lshift$$constant & 31;
12474     intptr_t mask = $mask$$constant;
12475     int width = exact_log2(mask+1);
12476     __ ubfiz(as_Register($dst$$reg),
12477              as_Register($src$$reg), lshift, width);
12478   %}
12479   ins_pipe(ialu_reg_shift);
12480 %}
12481 
12482 // This pattern is automatically generated from aarch64_ad.m4
12483 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12484 
12485 // Can skip int2long conversions after AND with small bitmask
12486 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12487 %{
12488   match(Set dst (ConvI2L (AndI src msk)));
12489   ins_cost(INSN_COST);
12490   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12491   ins_encode %{
12492     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12493   %}
12494   ins_pipe(ialu_reg_shift);
12495 %}
12496 
12497 
12498 // Rotations
12499 
12500 // This pattern is automatically generated from aarch64_ad.m4
12501 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12502 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12503 %{
12504   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12505   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12506 
12507   ins_cost(INSN_COST);
12508   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12509 
12510   ins_encode %{
12511     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12512             $rshift$$constant & 63);
12513   %}
12514   ins_pipe(ialu_reg_reg_extr);
12515 %}
12516 
12517 
12518 // This pattern is automatically generated from aarch64_ad.m4
12519 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12520 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12521 %{
12522   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12523   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12524 
12525   ins_cost(INSN_COST);
12526   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12527 
12528   ins_encode %{
12529     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12530             $rshift$$constant & 31);
12531   %}
12532   ins_pipe(ialu_reg_reg_extr);
12533 %}
12534 
12535 
12536 // This pattern is automatically generated from aarch64_ad.m4
12537 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12538 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12539 %{
12540   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12541   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12542 
12543   ins_cost(INSN_COST);
12544   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12545 
12546   ins_encode %{
12547     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12548             $rshift$$constant & 63);
12549   %}
12550   ins_pipe(ialu_reg_reg_extr);
12551 %}
12552 
12553 
12554 // This pattern is automatically generated from aarch64_ad.m4
12555 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12556 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12557 %{
12558   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12559   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12560 
12561   ins_cost(INSN_COST);
12562   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12563 
12564   ins_encode %{
12565     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12566             $rshift$$constant & 31);
12567   %}
12568   ins_pipe(ialu_reg_reg_extr);
12569 %}
12570 
12571 // This pattern is automatically generated from aarch64_ad.m4
12572 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12573 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
12574 %{
12575   match(Set dst (RotateRight src shift));
12576 
12577   ins_cost(INSN_COST);
12578   format %{ "ror    $dst, $src, $shift" %}
12579 
12580   ins_encode %{
12581      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12582                $shift$$constant & 0x1f);
12583   %}
12584   ins_pipe(ialu_reg_reg_vshift);
12585 %}
12586 
12587 // This pattern is automatically generated from aarch64_ad.m4
12588 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12589 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
12590 %{
12591   match(Set dst (RotateRight src shift));
12592 
12593   ins_cost(INSN_COST);
12594   format %{ "ror    $dst, $src, $shift" %}
12595 
12596   ins_encode %{
12597      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12598                $shift$$constant & 0x3f);
12599   %}
12600   ins_pipe(ialu_reg_reg_vshift);
12601 %}
12602 
12603 // This pattern is automatically generated from aarch64_ad.m4
12604 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12605 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12606 %{
12607   match(Set dst (RotateRight src shift));
12608 
12609   ins_cost(INSN_COST);
12610   format %{ "ror    $dst, $src, $shift" %}
12611 
12612   ins_encode %{
12613      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12614   %}
12615   ins_pipe(ialu_reg_reg_vshift);
12616 %}
12617 
12618 // This pattern is automatically generated from aarch64_ad.m4
12619 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12620 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12621 %{
12622   match(Set dst (RotateRight src shift));
12623 
12624   ins_cost(INSN_COST);
12625   format %{ "ror    $dst, $src, $shift" %}
12626 
12627   ins_encode %{
12628      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12629   %}
12630   ins_pipe(ialu_reg_reg_vshift);
12631 %}
12632 
12633 // This pattern is automatically generated from aarch64_ad.m4
12634 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12635 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12636 %{
12637   match(Set dst (RotateLeft src shift));
12638 
12639   ins_cost(INSN_COST);
12640   format %{ "rol    $dst, $src, $shift" %}
12641 
12642   ins_encode %{
12643      __ subw(rscratch1, zr, as_Register($shift$$reg));
12644      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12645   %}
12646   ins_pipe(ialu_reg_reg_vshift);
12647 %}
12648 
12649 // This pattern is automatically generated from aarch64_ad.m4
12650 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12651 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12652 %{
12653   match(Set dst (RotateLeft src shift));
12654 
12655   ins_cost(INSN_COST);
12656   format %{ "rol    $dst, $src, $shift" %}
12657 
12658   ins_encode %{
12659      __ subw(rscratch1, zr, as_Register($shift$$reg));
12660      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12661   %}
12662   ins_pipe(ialu_reg_reg_vshift);
12663 %}
12664 
12665 
12666 // Add/subtract (extended)
12667 
12668 // This pattern is automatically generated from aarch64_ad.m4
12669 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12670 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12671 %{
12672   match(Set dst (AddL src1 (ConvI2L src2)));
12673   ins_cost(INSN_COST);
12674   format %{ "add  $dst, $src1, $src2, sxtw" %}
12675 
12676    ins_encode %{
12677      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12678             as_Register($src2$$reg), ext::sxtw);
12679    %}
12680   ins_pipe(ialu_reg_reg);
12681 %}
12682 
12683 // This pattern is automatically generated from aarch64_ad.m4
12684 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12685 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12686 %{
12687   match(Set dst (SubL src1 (ConvI2L src2)));
12688   ins_cost(INSN_COST);
12689   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12690 
12691    ins_encode %{
12692      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12693             as_Register($src2$$reg), ext::sxtw);
12694    %}
12695   ins_pipe(ialu_reg_reg);
12696 %}
12697 
12698 // This pattern is automatically generated from aarch64_ad.m4
12699 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12700 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12701 %{
12702   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12703   ins_cost(INSN_COST);
12704   format %{ "add  $dst, $src1, $src2, sxth" %}
12705 
12706    ins_encode %{
12707      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12708             as_Register($src2$$reg), ext::sxth);
12709    %}
12710   ins_pipe(ialu_reg_reg);
12711 %}
12712 
12713 // This pattern is automatically generated from aarch64_ad.m4
12714 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12715 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12716 %{
12717   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12718   ins_cost(INSN_COST);
12719   format %{ "add  $dst, $src1, $src2, sxtb" %}
12720 
12721    ins_encode %{
12722      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12723             as_Register($src2$$reg), ext::sxtb);
12724    %}
12725   ins_pipe(ialu_reg_reg);
12726 %}
12727 
12728 // This pattern is automatically generated from aarch64_ad.m4
12729 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12730 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12731 %{
12732   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12733   ins_cost(INSN_COST);
12734   format %{ "add  $dst, $src1, $src2, uxtb" %}
12735 
12736    ins_encode %{
12737      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12738             as_Register($src2$$reg), ext::uxtb);
12739    %}
12740   ins_pipe(ialu_reg_reg);
12741 %}
12742 
12743 // This pattern is automatically generated from aarch64_ad.m4
12744 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12745 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12746 %{
12747   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12748   ins_cost(INSN_COST);
12749   format %{ "add  $dst, $src1, $src2, sxth" %}
12750 
12751    ins_encode %{
12752      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12753             as_Register($src2$$reg), ext::sxth);
12754    %}
12755   ins_pipe(ialu_reg_reg);
12756 %}
12757 
12758 // This pattern is automatically generated from aarch64_ad.m4
12759 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12760 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12761 %{
12762   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12763   ins_cost(INSN_COST);
12764   format %{ "add  $dst, $src1, $src2, sxtw" %}
12765 
12766    ins_encode %{
12767      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12768             as_Register($src2$$reg), ext::sxtw);
12769    %}
12770   ins_pipe(ialu_reg_reg);
12771 %}
12772 
12773 // This pattern is automatically generated from aarch64_ad.m4
12774 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12775 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12776 %{
12777   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12778   ins_cost(INSN_COST);
12779   format %{ "add  $dst, $src1, $src2, sxtb" %}
12780 
12781    ins_encode %{
12782      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12783             as_Register($src2$$reg), ext::sxtb);
12784    %}
12785   ins_pipe(ialu_reg_reg);
12786 %}
12787 
12788 // This pattern is automatically generated from aarch64_ad.m4
12789 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12790 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12791 %{
12792   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12793   ins_cost(INSN_COST);
12794   format %{ "add  $dst, $src1, $src2, uxtb" %}
12795 
12796    ins_encode %{
12797      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12798             as_Register($src2$$reg), ext::uxtb);
12799    %}
12800   ins_pipe(ialu_reg_reg);
12801 %}
12802 
12803 // This pattern is automatically generated from aarch64_ad.m4
12804 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12805 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12806 %{
12807   match(Set dst (AddI src1 (AndI src2 mask)));
12808   ins_cost(INSN_COST);
12809   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12810 
12811    ins_encode %{
12812      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12813             as_Register($src2$$reg), ext::uxtb);
12814    %}
12815   ins_pipe(ialu_reg_reg);
12816 %}
12817 
12818 // This pattern is automatically generated from aarch64_ad.m4
12819 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12820 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12821 %{
12822   match(Set dst (AddI src1 (AndI src2 mask)));
12823   ins_cost(INSN_COST);
12824   format %{ "addw  $dst, $src1, $src2, uxth" %}
12825 
12826    ins_encode %{
12827      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12828             as_Register($src2$$reg), ext::uxth);
12829    %}
12830   ins_pipe(ialu_reg_reg);
12831 %}
12832 
12833 // This pattern is automatically generated from aarch64_ad.m4
12834 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12835 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12836 %{
12837   match(Set dst (AddL src1 (AndL src2 mask)));
12838   ins_cost(INSN_COST);
12839   format %{ "add  $dst, $src1, $src2, uxtb" %}
12840 
12841    ins_encode %{
12842      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12843             as_Register($src2$$reg), ext::uxtb);
12844    %}
12845   ins_pipe(ialu_reg_reg);
12846 %}
12847 
12848 // This pattern is automatically generated from aarch64_ad.m4
12849 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12850 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12851 %{
12852   match(Set dst (AddL src1 (AndL src2 mask)));
12853   ins_cost(INSN_COST);
12854   format %{ "add  $dst, $src1, $src2, uxth" %}
12855 
12856    ins_encode %{
12857      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12858             as_Register($src2$$reg), ext::uxth);
12859    %}
12860   ins_pipe(ialu_reg_reg);
12861 %}
12862 
12863 // This pattern is automatically generated from aarch64_ad.m4
12864 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12865 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12866 %{
12867   match(Set dst (AddL src1 (AndL src2 mask)));
12868   ins_cost(INSN_COST);
12869   format %{ "add  $dst, $src1, $src2, uxtw" %}
12870 
12871    ins_encode %{
12872      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12873             as_Register($src2$$reg), ext::uxtw);
12874    %}
12875   ins_pipe(ialu_reg_reg);
12876 %}
12877 
12878 // This pattern is automatically generated from aarch64_ad.m4
12879 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12880 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12881 %{
12882   match(Set dst (SubI src1 (AndI src2 mask)));
12883   ins_cost(INSN_COST);
12884   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12885 
12886    ins_encode %{
12887      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12888             as_Register($src2$$reg), ext::uxtb);
12889    %}
12890   ins_pipe(ialu_reg_reg);
12891 %}
12892 
12893 // This pattern is automatically generated from aarch64_ad.m4
12894 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12895 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12896 %{
12897   match(Set dst (SubI src1 (AndI src2 mask)));
12898   ins_cost(INSN_COST);
12899   format %{ "subw  $dst, $src1, $src2, uxth" %}
12900 
12901    ins_encode %{
12902      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12903             as_Register($src2$$reg), ext::uxth);
12904    %}
12905   ins_pipe(ialu_reg_reg);
12906 %}
12907 
12908 // This pattern is automatically generated from aarch64_ad.m4
12909 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12910 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12911 %{
12912   match(Set dst (SubL src1 (AndL src2 mask)));
12913   ins_cost(INSN_COST);
12914   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12915 
12916    ins_encode %{
12917      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12918             as_Register($src2$$reg), ext::uxtb);
12919    %}
12920   ins_pipe(ialu_reg_reg);
12921 %}
12922 
12923 // This pattern is automatically generated from aarch64_ad.m4
12924 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12925 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12926 %{
12927   match(Set dst (SubL src1 (AndL src2 mask)));
12928   ins_cost(INSN_COST);
12929   format %{ "sub  $dst, $src1, $src2, uxth" %}
12930 
12931    ins_encode %{
12932      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12933             as_Register($src2$$reg), ext::uxth);
12934    %}
12935   ins_pipe(ialu_reg_reg);
12936 %}
12937 
12938 // This pattern is automatically generated from aarch64_ad.m4
12939 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12940 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12941 %{
12942   match(Set dst (SubL src1 (AndL src2 mask)));
12943   ins_cost(INSN_COST);
12944   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12945 
12946    ins_encode %{
12947      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12948             as_Register($src2$$reg), ext::uxtw);
12949    %}
12950   ins_pipe(ialu_reg_reg);
12951 %}
12952 
12953 
12954 // This pattern is automatically generated from aarch64_ad.m4
12955 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12956 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12957 %{
12958   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12959   ins_cost(1.9 * INSN_COST);
12960   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12961 
12962    ins_encode %{
12963      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12964             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12965    %}
12966   ins_pipe(ialu_reg_reg_shift);
12967 %}
12968 
12969 // This pattern is automatically generated from aarch64_ad.m4
12970 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12971 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12972 %{
12973   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12974   ins_cost(1.9 * INSN_COST);
12975   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12976 
12977    ins_encode %{
12978      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12979             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12980    %}
12981   ins_pipe(ialu_reg_reg_shift);
12982 %}
12983 
12984 // This pattern is automatically generated from aarch64_ad.m4
12985 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12986 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12987 %{
12988   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12989   ins_cost(1.9 * INSN_COST);
12990   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12991 
12992    ins_encode %{
12993      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12994             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12995    %}
12996   ins_pipe(ialu_reg_reg_shift);
12997 %}
12998 
12999 // This pattern is automatically generated from aarch64_ad.m4
13000 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13001 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13002 %{
13003   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13004   ins_cost(1.9 * INSN_COST);
13005   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13006 
13007    ins_encode %{
13008      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13009             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13010    %}
13011   ins_pipe(ialu_reg_reg_shift);
13012 %}
13013 
13014 // This pattern is automatically generated from aarch64_ad.m4
13015 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13016 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13017 %{
13018   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13019   ins_cost(1.9 * INSN_COST);
13020   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13021 
13022    ins_encode %{
13023      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13024             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13025    %}
13026   ins_pipe(ialu_reg_reg_shift);
13027 %}
13028 
13029 // This pattern is automatically generated from aarch64_ad.m4
13030 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13031 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13032 %{
13033   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13034   ins_cost(1.9 * INSN_COST);
13035   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13036 
13037    ins_encode %{
13038      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13039             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13040    %}
13041   ins_pipe(ialu_reg_reg_shift);
13042 %}
13043 
13044 // This pattern is automatically generated from aarch64_ad.m4
13045 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13046 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13047 %{
13048   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13049   ins_cost(1.9 * INSN_COST);
13050   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13051 
13052    ins_encode %{
13053      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13054             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13055    %}
13056   ins_pipe(ialu_reg_reg_shift);
13057 %}
13058 
13059 // This pattern is automatically generated from aarch64_ad.m4
13060 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13061 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13062 %{
13063   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13064   ins_cost(1.9 * INSN_COST);
13065   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13066 
13067    ins_encode %{
13068      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13069             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13070    %}
13071   ins_pipe(ialu_reg_reg_shift);
13072 %}
13073 
13074 // This pattern is automatically generated from aarch64_ad.m4
13075 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13076 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13077 %{
13078   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13079   ins_cost(1.9 * INSN_COST);
13080   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13081 
13082    ins_encode %{
13083      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13084             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13085    %}
13086   ins_pipe(ialu_reg_reg_shift);
13087 %}
13088 
13089 // This pattern is automatically generated from aarch64_ad.m4
13090 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13091 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13092 %{
13093   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13094   ins_cost(1.9 * INSN_COST);
13095   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13096 
13097    ins_encode %{
13098      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13099             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13100    %}
13101   ins_pipe(ialu_reg_reg_shift);
13102 %}
13103 
13104 // This pattern is automatically generated from aarch64_ad.m4
13105 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13106 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13107 %{
13108   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13109   ins_cost(1.9 * INSN_COST);
13110   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13111 
13112    ins_encode %{
13113      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13114             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13115    %}
13116   ins_pipe(ialu_reg_reg_shift);
13117 %}
13118 
13119 // This pattern is automatically generated from aarch64_ad.m4
13120 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13121 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13122 %{
13123   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13124   ins_cost(1.9 * INSN_COST);
13125   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13126 
13127    ins_encode %{
13128      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13129             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13130    %}
13131   ins_pipe(ialu_reg_reg_shift);
13132 %}
13133 
13134 // This pattern is automatically generated from aarch64_ad.m4
13135 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13136 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13137 %{
13138   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13139   ins_cost(1.9 * INSN_COST);
13140   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13141 
13142    ins_encode %{
13143      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13144             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13145    %}
13146   ins_pipe(ialu_reg_reg_shift);
13147 %}
13148 
13149 // This pattern is automatically generated from aarch64_ad.m4
13150 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13151 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13152 %{
13153   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13154   ins_cost(1.9 * INSN_COST);
13155   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13156 
13157    ins_encode %{
13158      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13159             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13160    %}
13161   ins_pipe(ialu_reg_reg_shift);
13162 %}
13163 
13164 // This pattern is automatically generated from aarch64_ad.m4
13165 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13166 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13167 %{
13168   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13169   ins_cost(1.9 * INSN_COST);
13170   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13171 
13172    ins_encode %{
13173      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13174             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13175    %}
13176   ins_pipe(ialu_reg_reg_shift);
13177 %}
13178 
13179 // This pattern is automatically generated from aarch64_ad.m4
13180 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13181 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13182 %{
13183   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13184   ins_cost(1.9 * INSN_COST);
13185   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13186 
13187    ins_encode %{
13188      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13189             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13190    %}
13191   ins_pipe(ialu_reg_reg_shift);
13192 %}
13193 
13194 // This pattern is automatically generated from aarch64_ad.m4
13195 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13196 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13197 %{
13198   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13199   ins_cost(1.9 * INSN_COST);
13200   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13201 
13202    ins_encode %{
13203      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13204             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13205    %}
13206   ins_pipe(ialu_reg_reg_shift);
13207 %}
13208 
13209 // This pattern is automatically generated from aarch64_ad.m4
13210 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13211 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13212 %{
13213   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13214   ins_cost(1.9 * INSN_COST);
13215   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13216 
13217    ins_encode %{
13218      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13219             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13220    %}
13221   ins_pipe(ialu_reg_reg_shift);
13222 %}
13223 
13224 // This pattern is automatically generated from aarch64_ad.m4
13225 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13226 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13227 %{
13228   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13229   ins_cost(1.9 * INSN_COST);
13230   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13231 
13232    ins_encode %{
13233      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13234             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13235    %}
13236   ins_pipe(ialu_reg_reg_shift);
13237 %}
13238 
13239 // This pattern is automatically generated from aarch64_ad.m4
13240 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13241 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13242 %{
13243   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13244   ins_cost(1.9 * INSN_COST);
13245   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13246 
13247    ins_encode %{
13248      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13249             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13250    %}
13251   ins_pipe(ialu_reg_reg_shift);
13252 %}
13253 
13254 // This pattern is automatically generated from aarch64_ad.m4
13255 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13256 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13257 %{
13258   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13259   ins_cost(1.9 * INSN_COST);
13260   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13261 
13262    ins_encode %{
13263      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13264             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13265    %}
13266   ins_pipe(ialu_reg_reg_shift);
13267 %}
13268 
13269 // This pattern is automatically generated from aarch64_ad.m4
13270 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13271 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13272 %{
13273   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13274   ins_cost(1.9 * INSN_COST);
13275   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13276 
13277    ins_encode %{
13278      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13279             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13280    %}
13281   ins_pipe(ialu_reg_reg_shift);
13282 %}
13283 
13284 // This pattern is automatically generated from aarch64_ad.m4
13285 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13286 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13287 %{
13288   effect(DEF dst, USE src1, USE src2, USE cr);
13289   ins_cost(INSN_COST * 2);
13290   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13291 
13292   ins_encode %{
13293     __ cselw($dst$$Register,
13294              $src1$$Register,
13295              $src2$$Register,
13296              Assembler::LT);
13297   %}
13298   ins_pipe(icond_reg_reg);
13299 %}
13300 
13301 // This pattern is automatically generated from aarch64_ad.m4
13302 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13303 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13304 %{
13305   effect(DEF dst, USE src1, USE src2, USE cr);
13306   ins_cost(INSN_COST * 2);
13307   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13308 
13309   ins_encode %{
13310     __ cselw($dst$$Register,
13311              $src1$$Register,
13312              $src2$$Register,
13313              Assembler::GT);
13314   %}
13315   ins_pipe(icond_reg_reg);
13316 %}
13317 
13318 // This pattern is automatically generated from aarch64_ad.m4
13319 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13320 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13321 %{
13322   effect(DEF dst, USE src1, USE cr);
13323   ins_cost(INSN_COST * 2);
13324   format %{ "cselw $dst, $src1, zr lt\t"  %}
13325 
13326   ins_encode %{
13327     __ cselw($dst$$Register,
13328              $src1$$Register,
13329              zr,
13330              Assembler::LT);
13331   %}
13332   ins_pipe(icond_reg);
13333 %}
13334 
13335 // This pattern is automatically generated from aarch64_ad.m4
13336 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13337 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13338 %{
13339   effect(DEF dst, USE src1, USE cr);
13340   ins_cost(INSN_COST * 2);
13341   format %{ "cselw $dst, $src1, zr gt\t"  %}
13342 
13343   ins_encode %{
13344     __ cselw($dst$$Register,
13345              $src1$$Register,
13346              zr,
13347              Assembler::GT);
13348   %}
13349   ins_pipe(icond_reg);
13350 %}
13351 
13352 // This pattern is automatically generated from aarch64_ad.m4
13353 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13354 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13355 %{
13356   effect(DEF dst, USE src1, USE cr);
13357   ins_cost(INSN_COST * 2);
13358   format %{ "csincw $dst, $src1, zr le\t"  %}
13359 
13360   ins_encode %{
13361     __ csincw($dst$$Register,
13362              $src1$$Register,
13363              zr,
13364              Assembler::LE);
13365   %}
13366   ins_pipe(icond_reg);
13367 %}
13368 
13369 // This pattern is automatically generated from aarch64_ad.m4
13370 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13371 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13372 %{
13373   effect(DEF dst, USE src1, USE cr);
13374   ins_cost(INSN_COST * 2);
13375   format %{ "csincw $dst, $src1, zr gt\t"  %}
13376 
13377   ins_encode %{
13378     __ csincw($dst$$Register,
13379              $src1$$Register,
13380              zr,
13381              Assembler::GT);
13382   %}
13383   ins_pipe(icond_reg);
13384 %}
13385 
13386 // This pattern is automatically generated from aarch64_ad.m4
13387 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13388 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13389 %{
13390   effect(DEF dst, USE src1, USE cr);
13391   ins_cost(INSN_COST * 2);
13392   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13393 
13394   ins_encode %{
13395     __ csinvw($dst$$Register,
13396              $src1$$Register,
13397              zr,
13398              Assembler::LT);
13399   %}
13400   ins_pipe(icond_reg);
13401 %}
13402 
13403 // This pattern is automatically generated from aarch64_ad.m4
13404 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13405 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13406 %{
13407   effect(DEF dst, USE src1, USE cr);
13408   ins_cost(INSN_COST * 2);
13409   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13410 
13411   ins_encode %{
13412     __ csinvw($dst$$Register,
13413              $src1$$Register,
13414              zr,
13415              Assembler::GE);
13416   %}
13417   ins_pipe(icond_reg);
13418 %}
13419 
13420 // This pattern is automatically generated from aarch64_ad.m4
13421 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13422 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13423 %{
13424   match(Set dst (MinI src imm));
13425   ins_cost(INSN_COST * 3);
13426   expand %{
13427     rFlagsReg cr;
13428     compI_reg_imm0(cr, src);
13429     cmovI_reg_imm0_lt(dst, src, cr);
13430   %}
13431 %}
13432 
13433 // This pattern is automatically generated from aarch64_ad.m4
13434 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13435 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13436 %{
13437   match(Set dst (MinI imm src));
13438   ins_cost(INSN_COST * 3);
13439   expand %{
13440     rFlagsReg cr;
13441     compI_reg_imm0(cr, src);
13442     cmovI_reg_imm0_lt(dst, src, cr);
13443   %}
13444 %}
13445 
13446 // This pattern is automatically generated from aarch64_ad.m4
13447 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13448 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13449 %{
13450   match(Set dst (MinI src imm));
13451   ins_cost(INSN_COST * 3);
13452   expand %{
13453     rFlagsReg cr;
13454     compI_reg_imm0(cr, src);
13455     cmovI_reg_imm1_le(dst, src, cr);
13456   %}
13457 %}
13458 
13459 // This pattern is automatically generated from aarch64_ad.m4
13460 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13461 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13462 %{
13463   match(Set dst (MinI imm src));
13464   ins_cost(INSN_COST * 3);
13465   expand %{
13466     rFlagsReg cr;
13467     compI_reg_imm0(cr, src);
13468     cmovI_reg_imm1_le(dst, src, cr);
13469   %}
13470 %}
13471 
13472 // This pattern is automatically generated from aarch64_ad.m4
13473 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13474 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13475 %{
13476   match(Set dst (MinI src imm));
13477   ins_cost(INSN_COST * 3);
13478   expand %{
13479     rFlagsReg cr;
13480     compI_reg_imm0(cr, src);
13481     cmovI_reg_immM1_lt(dst, src, cr);
13482   %}
13483 %}
13484 
13485 // This pattern is automatically generated from aarch64_ad.m4
13486 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13487 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13488 %{
13489   match(Set dst (MinI imm src));
13490   ins_cost(INSN_COST * 3);
13491   expand %{
13492     rFlagsReg cr;
13493     compI_reg_imm0(cr, src);
13494     cmovI_reg_immM1_lt(dst, src, cr);
13495   %}
13496 %}
13497 
13498 // This pattern is automatically generated from aarch64_ad.m4
13499 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13500 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13501 %{
13502   match(Set dst (MaxI src imm));
13503   ins_cost(INSN_COST * 3);
13504   expand %{
13505     rFlagsReg cr;
13506     compI_reg_imm0(cr, src);
13507     cmovI_reg_imm0_gt(dst, src, cr);
13508   %}
13509 %}
13510 
13511 // This pattern is automatically generated from aarch64_ad.m4
13512 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13513 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13514 %{
13515   match(Set dst (MaxI imm src));
13516   ins_cost(INSN_COST * 3);
13517   expand %{
13518     rFlagsReg cr;
13519     compI_reg_imm0(cr, src);
13520     cmovI_reg_imm0_gt(dst, src, cr);
13521   %}
13522 %}
13523 
13524 // This pattern is automatically generated from aarch64_ad.m4
13525 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13526 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13527 %{
13528   match(Set dst (MaxI src imm));
13529   ins_cost(INSN_COST * 3);
13530   expand %{
13531     rFlagsReg cr;
13532     compI_reg_imm0(cr, src);
13533     cmovI_reg_imm1_gt(dst, src, cr);
13534   %}
13535 %}
13536 
13537 // This pattern is automatically generated from aarch64_ad.m4
13538 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13539 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13540 %{
13541   match(Set dst (MaxI imm src));
13542   ins_cost(INSN_COST * 3);
13543   expand %{
13544     rFlagsReg cr;
13545     compI_reg_imm0(cr, src);
13546     cmovI_reg_imm1_gt(dst, src, cr);
13547   %}
13548 %}
13549 
13550 // This pattern is automatically generated from aarch64_ad.m4
13551 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13552 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13553 %{
13554   match(Set dst (MaxI src imm));
13555   ins_cost(INSN_COST * 3);
13556   expand %{
13557     rFlagsReg cr;
13558     compI_reg_imm0(cr, src);
13559     cmovI_reg_immM1_ge(dst, src, cr);
13560   %}
13561 %}
13562 
13563 // This pattern is automatically generated from aarch64_ad.m4
13564 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13565 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13566 %{
13567   match(Set dst (MaxI imm src));
13568   ins_cost(INSN_COST * 3);
13569   expand %{
13570     rFlagsReg cr;
13571     compI_reg_imm0(cr, src);
13572     cmovI_reg_immM1_ge(dst, src, cr);
13573   %}
13574 %}
13575 
13576 // This pattern is automatically generated from aarch64_ad.m4
13577 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13578 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
13579 %{
13580   match(Set dst (ReverseI src));
13581   ins_cost(INSN_COST);
13582   format %{ "rbitw  $dst, $src" %}
13583   ins_encode %{
13584     __ rbitw($dst$$Register, $src$$Register);
13585   %}
13586   ins_pipe(ialu_reg);
13587 %}
13588 
13589 // This pattern is automatically generated from aarch64_ad.m4
13590 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13591 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
13592 %{
13593   match(Set dst (ReverseL src));
13594   ins_cost(INSN_COST);
13595   format %{ "rbit  $dst, $src" %}
13596   ins_encode %{
13597     __ rbit($dst$$Register, $src$$Register);
13598   %}
13599   ins_pipe(ialu_reg);
13600 %}
13601 
13602 
13603 // END This section of the file is automatically generated. Do not edit --------------
13604 
13605 
13606 // ============================================================================
13607 // Floating Point Arithmetic Instructions
13608 
13609 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13610   match(Set dst (AddF src1 src2));
13611 
13612   ins_cost(INSN_COST * 5);
13613   format %{ "fadds   $dst, $src1, $src2" %}
13614 
13615   ins_encode %{
13616     __ fadds(as_FloatRegister($dst$$reg),
13617              as_FloatRegister($src1$$reg),
13618              as_FloatRegister($src2$$reg));
13619   %}
13620 
13621   ins_pipe(fp_dop_reg_reg_s);
13622 %}
13623 
13624 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13625   match(Set dst (AddD src1 src2));
13626 
13627   ins_cost(INSN_COST * 5);
13628   format %{ "faddd   $dst, $src1, $src2" %}
13629 
13630   ins_encode %{
13631     __ faddd(as_FloatRegister($dst$$reg),
13632              as_FloatRegister($src1$$reg),
13633              as_FloatRegister($src2$$reg));
13634   %}
13635 
13636   ins_pipe(fp_dop_reg_reg_d);
13637 %}
13638 
13639 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13640   match(Set dst (SubF src1 src2));
13641 
13642   ins_cost(INSN_COST * 5);
13643   format %{ "fsubs   $dst, $src1, $src2" %}
13644 
13645   ins_encode %{
13646     __ fsubs(as_FloatRegister($dst$$reg),
13647              as_FloatRegister($src1$$reg),
13648              as_FloatRegister($src2$$reg));
13649   %}
13650 
13651   ins_pipe(fp_dop_reg_reg_s);
13652 %}
13653 
13654 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13655   match(Set dst (SubD src1 src2));
13656 
13657   ins_cost(INSN_COST * 5);
13658   format %{ "fsubd   $dst, $src1, $src2" %}
13659 
13660   ins_encode %{
13661     __ fsubd(as_FloatRegister($dst$$reg),
13662              as_FloatRegister($src1$$reg),
13663              as_FloatRegister($src2$$reg));
13664   %}
13665 
13666   ins_pipe(fp_dop_reg_reg_d);
13667 %}
13668 
13669 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13670   match(Set dst (MulF src1 src2));
13671 
13672   ins_cost(INSN_COST * 6);
13673   format %{ "fmuls   $dst, $src1, $src2" %}
13674 
13675   ins_encode %{
13676     __ fmuls(as_FloatRegister($dst$$reg),
13677              as_FloatRegister($src1$$reg),
13678              as_FloatRegister($src2$$reg));
13679   %}
13680 
13681   ins_pipe(fp_dop_reg_reg_s);
13682 %}
13683 
13684 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13685   match(Set dst (MulD src1 src2));
13686 
13687   ins_cost(INSN_COST * 6);
13688   format %{ "fmuld   $dst, $src1, $src2" %}
13689 
13690   ins_encode %{
13691     __ fmuld(as_FloatRegister($dst$$reg),
13692              as_FloatRegister($src1$$reg),
13693              as_FloatRegister($src2$$reg));
13694   %}
13695 
13696   ins_pipe(fp_dop_reg_reg_d);
13697 %}
13698 
13699 // src1 * src2 + src3
13700 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13701   match(Set dst (FmaF src3 (Binary src1 src2)));
13702 
13703   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13704 
13705   ins_encode %{
13706     assert(UseFMA, "Needs FMA instructions support.");
13707     __ fmadds(as_FloatRegister($dst$$reg),
13708              as_FloatRegister($src1$$reg),
13709              as_FloatRegister($src2$$reg),
13710              as_FloatRegister($src3$$reg));
13711   %}
13712 
13713   ins_pipe(pipe_class_default);
13714 %}
13715 
13716 // src1 * src2 + src3
13717 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13718   match(Set dst (FmaD src3 (Binary src1 src2)));
13719 
13720   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13721 
13722   ins_encode %{
13723     assert(UseFMA, "Needs FMA instructions support.");
13724     __ fmaddd(as_FloatRegister($dst$$reg),
13725              as_FloatRegister($src1$$reg),
13726              as_FloatRegister($src2$$reg),
13727              as_FloatRegister($src3$$reg));
13728   %}
13729 
13730   ins_pipe(pipe_class_default);
13731 %}
13732 
13733 // src1 * (-src2) + src3
13734 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13735 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13736   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13737 
13738   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13739 
13740   ins_encode %{
13741     assert(UseFMA, "Needs FMA instructions support.");
13742     __ fmsubs(as_FloatRegister($dst$$reg),
13743               as_FloatRegister($src1$$reg),
13744               as_FloatRegister($src2$$reg),
13745               as_FloatRegister($src3$$reg));
13746   %}
13747 
13748   ins_pipe(pipe_class_default);
13749 %}
13750 
13751 // src1 * (-src2) + src3
13752 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13753 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13754   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13755 
13756   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13757 
13758   ins_encode %{
13759     assert(UseFMA, "Needs FMA instructions support.");
13760     __ fmsubd(as_FloatRegister($dst$$reg),
13761               as_FloatRegister($src1$$reg),
13762               as_FloatRegister($src2$$reg),
13763               as_FloatRegister($src3$$reg));
13764   %}
13765 
13766   ins_pipe(pipe_class_default);
13767 %}
13768 
13769 // src1 * (-src2) - src3
13770 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
13771 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13772   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13773 
13774   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13775 
13776   ins_encode %{
13777     assert(UseFMA, "Needs FMA instructions support.");
13778     __ fnmadds(as_FloatRegister($dst$$reg),
13779                as_FloatRegister($src1$$reg),
13780                as_FloatRegister($src2$$reg),
13781                as_FloatRegister($src3$$reg));
13782   %}
13783 
13784   ins_pipe(pipe_class_default);
13785 %}
13786 
13787 // src1 * (-src2) - src3
13788 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
13789 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13790   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13791 
13792   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13793 
13794   ins_encode %{
13795     assert(UseFMA, "Needs FMA instructions support.");
13796     __ fnmaddd(as_FloatRegister($dst$$reg),
13797                as_FloatRegister($src1$$reg),
13798                as_FloatRegister($src2$$reg),
13799                as_FloatRegister($src3$$reg));
13800   %}
13801 
13802   ins_pipe(pipe_class_default);
13803 %}
13804 
13805 // src1 * src2 - src3
13806 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13807   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13808 
13809   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13810 
13811   ins_encode %{
13812     assert(UseFMA, "Needs FMA instructions support.");
13813     __ fnmsubs(as_FloatRegister($dst$$reg),
13814                as_FloatRegister($src1$$reg),
13815                as_FloatRegister($src2$$reg),
13816                as_FloatRegister($src3$$reg));
13817   %}
13818 
13819   ins_pipe(pipe_class_default);
13820 %}
13821 
13822 // src1 * src2 - src3
13823 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13824   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13825 
13826   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13827 
13828   ins_encode %{
13829     assert(UseFMA, "Needs FMA instructions support.");
13830     // n.b. insn name should be fnmsubd
13831     __ fnmsub(as_FloatRegister($dst$$reg),
13832               as_FloatRegister($src1$$reg),
13833               as_FloatRegister($src2$$reg),
13834               as_FloatRegister($src3$$reg));
13835   %}
13836 
13837   ins_pipe(pipe_class_default);
13838 %}
13839 
13840 
13841 // Math.max(FF)F
13842 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13843   match(Set dst (MaxF src1 src2));
13844 
13845   format %{ "fmaxs   $dst, $src1, $src2" %}
13846   ins_encode %{
13847     __ fmaxs(as_FloatRegister($dst$$reg),
13848              as_FloatRegister($src1$$reg),
13849              as_FloatRegister($src2$$reg));
13850   %}
13851 
13852   ins_pipe(fp_dop_reg_reg_s);
13853 %}
13854 
13855 // Math.min(FF)F
13856 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13857   match(Set dst (MinF src1 src2));
13858 
13859   format %{ "fmins   $dst, $src1, $src2" %}
13860   ins_encode %{
13861     __ fmins(as_FloatRegister($dst$$reg),
13862              as_FloatRegister($src1$$reg),
13863              as_FloatRegister($src2$$reg));
13864   %}
13865 
13866   ins_pipe(fp_dop_reg_reg_s);
13867 %}
13868 
13869 // Math.max(DD)D
13870 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13871   match(Set dst (MaxD src1 src2));
13872 
13873   format %{ "fmaxd   $dst, $src1, $src2" %}
13874   ins_encode %{
13875     __ fmaxd(as_FloatRegister($dst$$reg),
13876              as_FloatRegister($src1$$reg),
13877              as_FloatRegister($src2$$reg));
13878   %}
13879 
13880   ins_pipe(fp_dop_reg_reg_d);
13881 %}
13882 
13883 // Math.min(DD)D
13884 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13885   match(Set dst (MinD src1 src2));
13886 
13887   format %{ "fmind   $dst, $src1, $src2" %}
13888   ins_encode %{
13889     __ fmind(as_FloatRegister($dst$$reg),
13890              as_FloatRegister($src1$$reg),
13891              as_FloatRegister($src2$$reg));
13892   %}
13893 
13894   ins_pipe(fp_dop_reg_reg_d);
13895 %}
13896 
13897 
13898 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13899   match(Set dst (DivF src1  src2));
13900 
13901   ins_cost(INSN_COST * 18);
13902   format %{ "fdivs   $dst, $src1, $src2" %}
13903 
13904   ins_encode %{
13905     __ fdivs(as_FloatRegister($dst$$reg),
13906              as_FloatRegister($src1$$reg),
13907              as_FloatRegister($src2$$reg));
13908   %}
13909 
13910   ins_pipe(fp_div_s);
13911 %}
13912 
13913 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13914   match(Set dst (DivD src1  src2));
13915 
13916   ins_cost(INSN_COST * 32);
13917   format %{ "fdivd   $dst, $src1, $src2" %}
13918 
13919   ins_encode %{
13920     __ fdivd(as_FloatRegister($dst$$reg),
13921              as_FloatRegister($src1$$reg),
13922              as_FloatRegister($src2$$reg));
13923   %}
13924 
13925   ins_pipe(fp_div_d);
13926 %}
13927 
13928 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13929   match(Set dst (NegF src));
13930 
13931   ins_cost(INSN_COST * 3);
13932   format %{ "fneg   $dst, $src" %}
13933 
13934   ins_encode %{
13935     __ fnegs(as_FloatRegister($dst$$reg),
13936              as_FloatRegister($src$$reg));
13937   %}
13938 
13939   ins_pipe(fp_uop_s);
13940 %}
13941 
13942 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13943   match(Set dst (NegD src));
13944 
13945   ins_cost(INSN_COST * 3);
13946   format %{ "fnegd   $dst, $src" %}
13947 
13948   ins_encode %{
13949     __ fnegd(as_FloatRegister($dst$$reg),
13950              as_FloatRegister($src$$reg));
13951   %}
13952 
13953   ins_pipe(fp_uop_d);
13954 %}
13955 
13956 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13957 %{
13958   match(Set dst (AbsI src));
13959 
13960   effect(KILL cr);
13961   ins_cost(INSN_COST * 2);
13962   format %{ "cmpw  $src, zr\n\t"
13963             "cnegw $dst, $src, Assembler::LT\t# int abs"
13964   %}
13965 
13966   ins_encode %{
13967     __ cmpw(as_Register($src$$reg), zr);
13968     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13969   %}
13970   ins_pipe(pipe_class_default);
13971 %}
13972 
13973 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
13974 %{
13975   match(Set dst (AbsL src));
13976 
13977   effect(KILL cr);
13978   ins_cost(INSN_COST * 2);
13979   format %{ "cmp  $src, zr\n\t"
13980             "cneg $dst, $src, Assembler::LT\t# long abs"
13981   %}
13982 
13983   ins_encode %{
13984     __ cmp(as_Register($src$$reg), zr);
13985     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13986   %}
13987   ins_pipe(pipe_class_default);
13988 %}
13989 
13990 instruct absF_reg(vRegF dst, vRegF src) %{
13991   match(Set dst (AbsF src));
13992 
13993   ins_cost(INSN_COST * 3);
13994   format %{ "fabss   $dst, $src" %}
13995   ins_encode %{
13996     __ fabss(as_FloatRegister($dst$$reg),
13997              as_FloatRegister($src$$reg));
13998   %}
13999 
14000   ins_pipe(fp_uop_s);
14001 %}
14002 
14003 instruct absD_reg(vRegD dst, vRegD src) %{
14004   match(Set dst (AbsD src));
14005 
14006   ins_cost(INSN_COST * 3);
14007   format %{ "fabsd   $dst, $src" %}
14008   ins_encode %{
14009     __ fabsd(as_FloatRegister($dst$$reg),
14010              as_FloatRegister($src$$reg));
14011   %}
14012 
14013   ins_pipe(fp_uop_d);
14014 %}
14015 
14016 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14017   match(Set dst (AbsF (SubF src1 src2)));
14018 
14019   ins_cost(INSN_COST * 3);
14020   format %{ "fabds   $dst, $src1, $src2" %}
14021   ins_encode %{
14022     __ fabds(as_FloatRegister($dst$$reg),
14023              as_FloatRegister($src1$$reg),
14024              as_FloatRegister($src2$$reg));
14025   %}
14026 
14027   ins_pipe(fp_uop_s);
14028 %}
14029 
14030 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14031   match(Set dst (AbsD (SubD src1 src2)));
14032 
14033   ins_cost(INSN_COST * 3);
14034   format %{ "fabdd   $dst, $src1, $src2" %}
14035   ins_encode %{
14036     __ fabdd(as_FloatRegister($dst$$reg),
14037              as_FloatRegister($src1$$reg),
14038              as_FloatRegister($src2$$reg));
14039   %}
14040 
14041   ins_pipe(fp_uop_d);
14042 %}
14043 
14044 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14045   match(Set dst (SqrtD src));
14046 
14047   ins_cost(INSN_COST * 50);
14048   format %{ "fsqrtd  $dst, $src" %}
14049   ins_encode %{
14050     __ fsqrtd(as_FloatRegister($dst$$reg),
14051              as_FloatRegister($src$$reg));
14052   %}
14053 
14054   ins_pipe(fp_div_s);
14055 %}
14056 
14057 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14058   match(Set dst (SqrtF src));
14059 
14060   ins_cost(INSN_COST * 50);
14061   format %{ "fsqrts  $dst, $src" %}
14062   ins_encode %{
14063     __ fsqrts(as_FloatRegister($dst$$reg),
14064              as_FloatRegister($src$$reg));
14065   %}
14066 
14067   ins_pipe(fp_div_d);
14068 %}
14069 
14070 // Math.rint, floor, ceil
14071 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14072   match(Set dst (RoundDoubleMode src rmode));
14073   format %{ "frint  $dst, $src, $rmode" %}
14074   ins_encode %{
14075     switch ($rmode$$constant) {
14076       case RoundDoubleModeNode::rmode_rint:
14077         __ frintnd(as_FloatRegister($dst$$reg),
14078                    as_FloatRegister($src$$reg));
14079         break;
14080       case RoundDoubleModeNode::rmode_floor:
14081         __ frintmd(as_FloatRegister($dst$$reg),
14082                    as_FloatRegister($src$$reg));
14083         break;
14084       case RoundDoubleModeNode::rmode_ceil:
14085         __ frintpd(as_FloatRegister($dst$$reg),
14086                    as_FloatRegister($src$$reg));
14087         break;
14088     }
14089   %}
14090   ins_pipe(fp_uop_d);
14091 %}
14092 
14093 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14094   match(Set dst (CopySignD src1 (Binary src2 zero)));
14095   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14096   format %{ "CopySignD  $dst $src1 $src2" %}
14097   ins_encode %{
14098     FloatRegister dst = as_FloatRegister($dst$$reg),
14099                   src1 = as_FloatRegister($src1$$reg),
14100                   src2 = as_FloatRegister($src2$$reg),
14101                   zero = as_FloatRegister($zero$$reg);
14102     __ fnegd(dst, zero);
14103     __ bsl(dst, __ T8B, src2, src1);
14104   %}
14105   ins_pipe(fp_uop_d);
14106 %}
14107 
14108 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14109   match(Set dst (CopySignF src1 src2));
14110   effect(TEMP_DEF dst, USE src1, USE src2);
14111   format %{ "CopySignF  $dst $src1 $src2" %}
14112   ins_encode %{
14113     FloatRegister dst = as_FloatRegister($dst$$reg),
14114                   src1 = as_FloatRegister($src1$$reg),
14115                   src2 = as_FloatRegister($src2$$reg);
14116     __ movi(dst, __ T2S, 0x80, 24);
14117     __ bsl(dst, __ T8B, src2, src1);
14118   %}
14119   ins_pipe(fp_uop_d);
14120 %}
14121 
14122 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14123   match(Set dst (SignumD src (Binary zero one)));
14124   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14125   format %{ "signumD  $dst, $src" %}
14126   ins_encode %{
14127     FloatRegister src = as_FloatRegister($src$$reg),
14128                   dst = as_FloatRegister($dst$$reg),
14129                   zero = as_FloatRegister($zero$$reg),
14130                   one = as_FloatRegister($one$$reg);
14131     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14132     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14133     // Bit selection instruction gets bit from "one" for each enabled bit in
14134     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14135     // NaN the whole "src" will be copied because "dst" is zero. For all other
14136     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14137     // from "src", and all other bits are copied from 1.0.
14138     __ bsl(dst, __ T8B, one, src);
14139   %}
14140   ins_pipe(fp_uop_d);
14141 %}
14142 
14143 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14144   match(Set dst (SignumF src (Binary zero one)));
14145   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14146   format %{ "signumF  $dst, $src" %}
14147   ins_encode %{
14148     FloatRegister src = as_FloatRegister($src$$reg),
14149                   dst = as_FloatRegister($dst$$reg),
14150                   zero = as_FloatRegister($zero$$reg),
14151                   one = as_FloatRegister($one$$reg);
14152     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14153     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14154     // Bit selection instruction gets bit from "one" for each enabled bit in
14155     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14156     // NaN the whole "src" will be copied because "dst" is zero. For all other
14157     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14158     // from "src", and all other bits are copied from 1.0.
14159     __ bsl(dst, __ T8B, one, src);
14160   %}
14161   ins_pipe(fp_uop_d);
14162 %}
14163 
14164 instruct onspinwait() %{
14165   match(OnSpinWait);
14166   ins_cost(INSN_COST);
14167 
14168   format %{ "onspinwait" %}
14169 
14170   ins_encode %{
14171     __ spin_wait();
14172   %}
14173   ins_pipe(pipe_class_empty);
14174 %}
14175 
14176 // ============================================================================
14177 // Logical Instructions
14178 
14179 // Integer Logical Instructions
14180 
14181 // And Instructions
14182 
14183 
14184 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14185   match(Set dst (AndI src1 src2));
14186 
14187   format %{ "andw  $dst, $src1, $src2\t# int" %}
14188 
14189   ins_cost(INSN_COST);
14190   ins_encode %{
14191     __ andw(as_Register($dst$$reg),
14192             as_Register($src1$$reg),
14193             as_Register($src2$$reg));
14194   %}
14195 
14196   ins_pipe(ialu_reg_reg);
14197 %}
14198 
14199 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14200   match(Set dst (AndI src1 src2));
14201 
14202   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14203 
14204   ins_cost(INSN_COST);
14205   ins_encode %{
14206     __ andw(as_Register($dst$$reg),
14207             as_Register($src1$$reg),
14208             (uint64_t)($src2$$constant));
14209   %}
14210 
14211   ins_pipe(ialu_reg_imm);
14212 %}
14213 
14214 // Or Instructions
14215 
14216 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14217   match(Set dst (OrI src1 src2));
14218 
14219   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14220 
14221   ins_cost(INSN_COST);
14222   ins_encode %{
14223     __ orrw(as_Register($dst$$reg),
14224             as_Register($src1$$reg),
14225             as_Register($src2$$reg));
14226   %}
14227 
14228   ins_pipe(ialu_reg_reg);
14229 %}
14230 
14231 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14232   match(Set dst (OrI src1 src2));
14233 
14234   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14235 
14236   ins_cost(INSN_COST);
14237   ins_encode %{
14238     __ orrw(as_Register($dst$$reg),
14239             as_Register($src1$$reg),
14240             (uint64_t)($src2$$constant));
14241   %}
14242 
14243   ins_pipe(ialu_reg_imm);
14244 %}
14245 
14246 // Xor Instructions
14247 
14248 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14249   match(Set dst (XorI src1 src2));
14250 
14251   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14252 
14253   ins_cost(INSN_COST);
14254   ins_encode %{
14255     __ eorw(as_Register($dst$$reg),
14256             as_Register($src1$$reg),
14257             as_Register($src2$$reg));
14258   %}
14259 
14260   ins_pipe(ialu_reg_reg);
14261 %}
14262 
14263 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14264   match(Set dst (XorI src1 src2));
14265 
14266   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14267 
14268   ins_cost(INSN_COST);
14269   ins_encode %{
14270     __ eorw(as_Register($dst$$reg),
14271             as_Register($src1$$reg),
14272             (uint64_t)($src2$$constant));
14273   %}
14274 
14275   ins_pipe(ialu_reg_imm);
14276 %}
14277 
14278 // Long Logical Instructions
14279 // TODO
14280 
14281 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14282   match(Set dst (AndL src1 src2));
14283 
14284   format %{ "and  $dst, $src1, $src2\t# int" %}
14285 
14286   ins_cost(INSN_COST);
14287   ins_encode %{
14288     __ andr(as_Register($dst$$reg),
14289             as_Register($src1$$reg),
14290             as_Register($src2$$reg));
14291   %}
14292 
14293   ins_pipe(ialu_reg_reg);
14294 %}
14295 
14296 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14297   match(Set dst (AndL src1 src2));
14298 
14299   format %{ "and  $dst, $src1, $src2\t# int" %}
14300 
14301   ins_cost(INSN_COST);
14302   ins_encode %{
14303     __ andr(as_Register($dst$$reg),
14304             as_Register($src1$$reg),
14305             (uint64_t)($src2$$constant));
14306   %}
14307 
14308   ins_pipe(ialu_reg_imm);
14309 %}
14310 
14311 // Or Instructions
14312 
14313 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14314   match(Set dst (OrL src1 src2));
14315 
14316   format %{ "orr  $dst, $src1, $src2\t# int" %}
14317 
14318   ins_cost(INSN_COST);
14319   ins_encode %{
14320     __ orr(as_Register($dst$$reg),
14321            as_Register($src1$$reg),
14322            as_Register($src2$$reg));
14323   %}
14324 
14325   ins_pipe(ialu_reg_reg);
14326 %}
14327 
14328 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14329   match(Set dst (OrL src1 src2));
14330 
14331   format %{ "orr  $dst, $src1, $src2\t# int" %}
14332 
14333   ins_cost(INSN_COST);
14334   ins_encode %{
14335     __ orr(as_Register($dst$$reg),
14336            as_Register($src1$$reg),
14337            (uint64_t)($src2$$constant));
14338   %}
14339 
14340   ins_pipe(ialu_reg_imm);
14341 %}
14342 
14343 // Xor Instructions
14344 
14345 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14346   match(Set dst (XorL src1 src2));
14347 
14348   format %{ "eor  $dst, $src1, $src2\t# int" %}
14349 
14350   ins_cost(INSN_COST);
14351   ins_encode %{
14352     __ eor(as_Register($dst$$reg),
14353            as_Register($src1$$reg),
14354            as_Register($src2$$reg));
14355   %}
14356 
14357   ins_pipe(ialu_reg_reg);
14358 %}
14359 
14360 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14361   match(Set dst (XorL src1 src2));
14362 
14363   ins_cost(INSN_COST);
14364   format %{ "eor  $dst, $src1, $src2\t# int" %}
14365 
14366   ins_encode %{
14367     __ eor(as_Register($dst$$reg),
14368            as_Register($src1$$reg),
14369            (uint64_t)($src2$$constant));
14370   %}
14371 
14372   ins_pipe(ialu_reg_imm);
14373 %}
14374 
14375 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14376 %{
14377   match(Set dst (ConvI2L src));
14378 
14379   ins_cost(INSN_COST);
14380   format %{ "sxtw  $dst, $src\t# i2l" %}
14381   ins_encode %{
14382     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14383   %}
14384   ins_pipe(ialu_reg_shift);
14385 %}
14386 
14387 // this pattern occurs in bigmath arithmetic
14388 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14389 %{
14390   match(Set dst (AndL (ConvI2L src) mask));
14391 
14392   ins_cost(INSN_COST);
14393   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14394   ins_encode %{
14395     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14396   %}
14397 
14398   ins_pipe(ialu_reg_shift);
14399 %}
14400 
14401 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14402   match(Set dst (ConvL2I src));
14403 
14404   ins_cost(INSN_COST);
14405   format %{ "movw  $dst, $src \t// l2i" %}
14406 
14407   ins_encode %{
14408     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14409   %}
14410 
14411   ins_pipe(ialu_reg);
14412 %}
14413 
14414 instruct convD2F_reg(vRegF dst, vRegD src) %{
14415   match(Set dst (ConvD2F src));
14416 
14417   ins_cost(INSN_COST * 5);
14418   format %{ "fcvtd  $dst, $src \t// d2f" %}
14419 
14420   ins_encode %{
14421     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14422   %}
14423 
14424   ins_pipe(fp_d2f);
14425 %}
14426 
14427 instruct convF2D_reg(vRegD dst, vRegF src) %{
14428   match(Set dst (ConvF2D src));
14429 
14430   ins_cost(INSN_COST * 5);
14431   format %{ "fcvts  $dst, $src \t// f2d" %}
14432 
14433   ins_encode %{
14434     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14435   %}
14436 
14437   ins_pipe(fp_f2d);
14438 %}
14439 
14440 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14441   match(Set dst (ConvF2I src));
14442 
14443   ins_cost(INSN_COST * 5);
14444   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14445 
14446   ins_encode %{
14447     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14448   %}
14449 
14450   ins_pipe(fp_f2i);
14451 %}
14452 
14453 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14454   match(Set dst (ConvF2L src));
14455 
14456   ins_cost(INSN_COST * 5);
14457   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14458 
14459   ins_encode %{
14460     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14461   %}
14462 
14463   ins_pipe(fp_f2l);
14464 %}
14465 
14466 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
14467   match(Set dst (ConvF2HF src));
14468   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
14469             "smov $dst, $tmp\t# move result from $tmp to $dst"
14470   %}
14471   effect(TEMP tmp);
14472   ins_encode %{
14473       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
14474   %}
14475   ins_pipe(pipe_slow);
14476 %}
14477 
14478 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
14479   match(Set dst (ConvHF2F src));
14480   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
14481             "fcvt $dst, $tmp\t# convert half to single precision"
14482   %}
14483   effect(TEMP tmp);
14484   ins_encode %{
14485       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
14486   %}
14487   ins_pipe(pipe_slow);
14488 %}
14489 
14490 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14491   match(Set dst (ConvI2F src));
14492 
14493   ins_cost(INSN_COST * 5);
14494   format %{ "scvtfws  $dst, $src \t// i2f" %}
14495 
14496   ins_encode %{
14497     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14498   %}
14499 
14500   ins_pipe(fp_i2f);
14501 %}
14502 
14503 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14504   match(Set dst (ConvL2F src));
14505 
14506   ins_cost(INSN_COST * 5);
14507   format %{ "scvtfs  $dst, $src \t// l2f" %}
14508 
14509   ins_encode %{
14510     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14511   %}
14512 
14513   ins_pipe(fp_l2f);
14514 %}
14515 
14516 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14517   match(Set dst (ConvD2I src));
14518 
14519   ins_cost(INSN_COST * 5);
14520   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14521 
14522   ins_encode %{
14523     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14524   %}
14525 
14526   ins_pipe(fp_d2i);
14527 %}
14528 
14529 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14530   match(Set dst (ConvD2L src));
14531 
14532   ins_cost(INSN_COST * 5);
14533   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14534 
14535   ins_encode %{
14536     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14537   %}
14538 
14539   ins_pipe(fp_d2l);
14540 %}
14541 
14542 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14543   match(Set dst (ConvI2D src));
14544 
14545   ins_cost(INSN_COST * 5);
14546   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14547 
14548   ins_encode %{
14549     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14550   %}
14551 
14552   ins_pipe(fp_i2d);
14553 %}
14554 
14555 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14556   match(Set dst (ConvL2D src));
14557 
14558   ins_cost(INSN_COST * 5);
14559   format %{ "scvtfd  $dst, $src \t// l2d" %}
14560 
14561   ins_encode %{
14562     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14563   %}
14564 
14565   ins_pipe(fp_l2d);
14566 %}
14567 
14568 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
14569 %{
14570   match(Set dst (RoundD src));
14571   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14572   format %{ "java_round_double $dst,$src"%}
14573   ins_encode %{
14574     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
14575                          as_FloatRegister($ftmp$$reg));
14576   %}
14577   ins_pipe(pipe_slow);
14578 %}
14579 
14580 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
14581 %{
14582   match(Set dst (RoundF src));
14583   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14584   format %{ "java_round_float $dst,$src"%}
14585   ins_encode %{
14586     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
14587                         as_FloatRegister($ftmp$$reg));
14588   %}
14589   ins_pipe(pipe_slow);
14590 %}
14591 
14592 // stack <-> reg and reg <-> reg shuffles with no conversion
14593 
14594 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14595 
14596   match(Set dst (MoveF2I src));
14597 
14598   effect(DEF dst, USE src);
14599 
14600   ins_cost(4 * INSN_COST);
14601 
14602   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14603 
14604   ins_encode %{
14605     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14606   %}
14607 
14608   ins_pipe(iload_reg_reg);
14609 
14610 %}
14611 
14612 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14613 
14614   match(Set dst (MoveI2F src));
14615 
14616   effect(DEF dst, USE src);
14617 
14618   ins_cost(4 * INSN_COST);
14619 
14620   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14621 
14622   ins_encode %{
14623     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14624   %}
14625 
14626   ins_pipe(pipe_class_memory);
14627 
14628 %}
14629 
14630 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14631 
14632   match(Set dst (MoveD2L src));
14633 
14634   effect(DEF dst, USE src);
14635 
14636   ins_cost(4 * INSN_COST);
14637 
14638   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14639 
14640   ins_encode %{
14641     __ ldr($dst$$Register, Address(sp, $src$$disp));
14642   %}
14643 
14644   ins_pipe(iload_reg_reg);
14645 
14646 %}
14647 
14648 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14649 
14650   match(Set dst (MoveL2D src));
14651 
14652   effect(DEF dst, USE src);
14653 
14654   ins_cost(4 * INSN_COST);
14655 
14656   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14657 
14658   ins_encode %{
14659     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14660   %}
14661 
14662   ins_pipe(pipe_class_memory);
14663 
14664 %}
14665 
14666 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14667 
14668   match(Set dst (MoveF2I src));
14669 
14670   effect(DEF dst, USE src);
14671 
14672   ins_cost(INSN_COST);
14673 
14674   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14675 
14676   ins_encode %{
14677     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14678   %}
14679 
14680   ins_pipe(pipe_class_memory);
14681 
14682 %}
14683 
14684 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14685 
14686   match(Set dst (MoveI2F src));
14687 
14688   effect(DEF dst, USE src);
14689 
14690   ins_cost(INSN_COST);
14691 
14692   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14693 
14694   ins_encode %{
14695     __ strw($src$$Register, Address(sp, $dst$$disp));
14696   %}
14697 
14698   ins_pipe(istore_reg_reg);
14699 
14700 %}
14701 
14702 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14703 
14704   match(Set dst (MoveD2L src));
14705 
14706   effect(DEF dst, USE src);
14707 
14708   ins_cost(INSN_COST);
14709 
14710   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14711 
14712   ins_encode %{
14713     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14714   %}
14715 
14716   ins_pipe(pipe_class_memory);
14717 
14718 %}
14719 
14720 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14721 
14722   match(Set dst (MoveL2D src));
14723 
14724   effect(DEF dst, USE src);
14725 
14726   ins_cost(INSN_COST);
14727 
14728   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14729 
14730   ins_encode %{
14731     __ str($src$$Register, Address(sp, $dst$$disp));
14732   %}
14733 
14734   ins_pipe(istore_reg_reg);
14735 
14736 %}
14737 
14738 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14739 
14740   match(Set dst (MoveF2I src));
14741 
14742   effect(DEF dst, USE src);
14743 
14744   ins_cost(INSN_COST);
14745 
14746   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14747 
14748   ins_encode %{
14749     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14750   %}
14751 
14752   ins_pipe(fp_f2i);
14753 
14754 %}
14755 
14756 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14757 
14758   match(Set dst (MoveI2F src));
14759 
14760   effect(DEF dst, USE src);
14761 
14762   ins_cost(INSN_COST);
14763 
14764   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14765 
14766   ins_encode %{
14767     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14768   %}
14769 
14770   ins_pipe(fp_i2f);
14771 
14772 %}
14773 
14774 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14775 
14776   match(Set dst (MoveD2L src));
14777 
14778   effect(DEF dst, USE src);
14779 
14780   ins_cost(INSN_COST);
14781 
14782   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14783 
14784   ins_encode %{
14785     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14786   %}
14787 
14788   ins_pipe(fp_d2l);
14789 
14790 %}
14791 
14792 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14793 
14794   match(Set dst (MoveL2D src));
14795 
14796   effect(DEF dst, USE src);
14797 
14798   ins_cost(INSN_COST);
14799 
14800   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14801 
14802   ins_encode %{
14803     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14804   %}
14805 
14806   ins_pipe(fp_l2d);
14807 
14808 %}
14809 
14810 // ============================================================================
14811 // clearing of an array
14812 
14813 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14814 %{
14815   match(Set dummy (ClearArray cnt base));
14816   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14817 
14818   ins_cost(4 * INSN_COST);
14819   format %{ "ClearArray $cnt, $base" %}
14820 
14821   ins_encode %{
14822     address tpc = __ zero_words($base$$Register, $cnt$$Register);
14823     if (tpc == nullptr) {
14824       ciEnv::current()->record_failure("CodeCache is full");
14825       return;
14826     }
14827   %}
14828 
14829   ins_pipe(pipe_class_memory);
14830 %}
14831 
14832 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
14833 %{
14834   predicate((uint64_t)n->in(2)->get_long()
14835             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14836   match(Set dummy (ClearArray cnt base));
14837   effect(TEMP temp, USE_KILL base, KILL cr);
14838 
14839   ins_cost(4 * INSN_COST);
14840   format %{ "ClearArray $cnt, $base" %}
14841 
14842   ins_encode %{
14843     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
14844     if (tpc == nullptr) {
14845       ciEnv::current()->record_failure("CodeCache is full");
14846       return;
14847     }
14848   %}
14849 
14850   ins_pipe(pipe_class_memory);
14851 %}
14852 
14853 // ============================================================================
14854 // Overflow Math Instructions
14855 
14856 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14857 %{
14858   match(Set cr (OverflowAddI op1 op2));
14859 
14860   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14861   ins_cost(INSN_COST);
14862   ins_encode %{
14863     __ cmnw($op1$$Register, $op2$$Register);
14864   %}
14865 
14866   ins_pipe(icmp_reg_reg);
14867 %}
14868 
14869 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14870 %{
14871   match(Set cr (OverflowAddI op1 op2));
14872 
14873   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14874   ins_cost(INSN_COST);
14875   ins_encode %{
14876     __ cmnw($op1$$Register, $op2$$constant);
14877   %}
14878 
14879   ins_pipe(icmp_reg_imm);
14880 %}
14881 
14882 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14883 %{
14884   match(Set cr (OverflowAddL op1 op2));
14885 
14886   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14887   ins_cost(INSN_COST);
14888   ins_encode %{
14889     __ cmn($op1$$Register, $op2$$Register);
14890   %}
14891 
14892   ins_pipe(icmp_reg_reg);
14893 %}
14894 
14895 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14896 %{
14897   match(Set cr (OverflowAddL op1 op2));
14898 
14899   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
14900   ins_cost(INSN_COST);
14901   ins_encode %{
14902     __ adds(zr, $op1$$Register, $op2$$constant);
14903   %}
14904 
14905   ins_pipe(icmp_reg_imm);
14906 %}
14907 
14908 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14909 %{
14910   match(Set cr (OverflowSubI op1 op2));
14911 
14912   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14913   ins_cost(INSN_COST);
14914   ins_encode %{
14915     __ cmpw($op1$$Register, $op2$$Register);
14916   %}
14917 
14918   ins_pipe(icmp_reg_reg);
14919 %}
14920 
14921 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14922 %{
14923   match(Set cr (OverflowSubI op1 op2));
14924 
14925   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14926   ins_cost(INSN_COST);
14927   ins_encode %{
14928     __ cmpw($op1$$Register, $op2$$constant);
14929   %}
14930 
14931   ins_pipe(icmp_reg_imm);
14932 %}
14933 
14934 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14935 %{
14936   match(Set cr (OverflowSubL op1 op2));
14937 
14938   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14939   ins_cost(INSN_COST);
14940   ins_encode %{
14941     __ cmp($op1$$Register, $op2$$Register);
14942   %}
14943 
14944   ins_pipe(icmp_reg_reg);
14945 %}
14946 
14947 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14948 %{
14949   match(Set cr (OverflowSubL op1 op2));
14950 
14951   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14952   ins_cost(INSN_COST);
14953   ins_encode %{
14954     __ subs(zr, $op1$$Register, $op2$$constant);
14955   %}
14956 
14957   ins_pipe(icmp_reg_imm);
14958 %}
14959 
14960 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14961 %{
14962   match(Set cr (OverflowSubI zero op1));
14963 
14964   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14965   ins_cost(INSN_COST);
14966   ins_encode %{
14967     __ cmpw(zr, $op1$$Register);
14968   %}
14969 
14970   ins_pipe(icmp_reg_imm);
14971 %}
14972 
14973 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14974 %{
14975   match(Set cr (OverflowSubL zero op1));
14976 
14977   format %{ "cmp   zr, $op1\t# overflow check long" %}
14978   ins_cost(INSN_COST);
14979   ins_encode %{
14980     __ cmp(zr, $op1$$Register);
14981   %}
14982 
14983   ins_pipe(icmp_reg_imm);
14984 %}
14985 
14986 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14987 %{
14988   match(Set cr (OverflowMulI op1 op2));
14989 
14990   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14991             "cmp   rscratch1, rscratch1, sxtw\n\t"
14992             "movw  rscratch1, #0x80000000\n\t"
14993             "cselw rscratch1, rscratch1, zr, NE\n\t"
14994             "cmpw  rscratch1, #1" %}
14995   ins_cost(5 * INSN_COST);
14996   ins_encode %{
14997     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14998     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14999     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15000     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15001     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15002   %}
15003 
15004   ins_pipe(pipe_slow);
15005 %}
15006 
15007 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15008 %{
15009   match(If cmp (OverflowMulI op1 op2));
15010   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15011             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15012   effect(USE labl, KILL cr);
15013 
15014   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15015             "cmp   rscratch1, rscratch1, sxtw\n\t"
15016             "b$cmp   $labl" %}
15017   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15018   ins_encode %{
15019     Label* L = $labl$$label;
15020     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15021     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15022     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15023     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15024   %}
15025 
15026   ins_pipe(pipe_serial);
15027 %}
15028 
15029 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15030 %{
15031   match(Set cr (OverflowMulL op1 op2));
15032 
15033   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15034             "smulh rscratch2, $op1, $op2\n\t"
15035             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15036             "movw  rscratch1, #0x80000000\n\t"
15037             "cselw rscratch1, rscratch1, zr, NE\n\t"
15038             "cmpw  rscratch1, #1" %}
15039   ins_cost(6 * INSN_COST);
15040   ins_encode %{
15041     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15042     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15043     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15044     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15045     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15046     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15047   %}
15048 
15049   ins_pipe(pipe_slow);
15050 %}
15051 
15052 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15053 %{
15054   match(If cmp (OverflowMulL op1 op2));
15055   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15056             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15057   effect(USE labl, KILL cr);
15058 
15059   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15060             "smulh rscratch2, $op1, $op2\n\t"
15061             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15062             "b$cmp $labl" %}
15063   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15064   ins_encode %{
15065     Label* L = $labl$$label;
15066     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15067     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15068     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15069     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15070     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15071   %}
15072 
15073   ins_pipe(pipe_serial);
15074 %}
15075 
15076 // ============================================================================
15077 // Compare Instructions
15078 
15079 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15080 %{
15081   match(Set cr (CmpI op1 op2));
15082 
15083   effect(DEF cr, USE op1, USE op2);
15084 
15085   ins_cost(INSN_COST);
15086   format %{ "cmpw  $op1, $op2" %}
15087 
15088   ins_encode(aarch64_enc_cmpw(op1, op2));
15089 
15090   ins_pipe(icmp_reg_reg);
15091 %}
15092 
15093 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15094 %{
15095   match(Set cr (CmpI op1 zero));
15096 
15097   effect(DEF cr, USE op1);
15098 
15099   ins_cost(INSN_COST);
15100   format %{ "cmpw $op1, 0" %}
15101 
15102   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15103 
15104   ins_pipe(icmp_reg_imm);
15105 %}
15106 
15107 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15108 %{
15109   match(Set cr (CmpI op1 op2));
15110 
15111   effect(DEF cr, USE op1);
15112 
15113   ins_cost(INSN_COST);
15114   format %{ "cmpw  $op1, $op2" %}
15115 
15116   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15117 
15118   ins_pipe(icmp_reg_imm);
15119 %}
15120 
15121 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15122 %{
15123   match(Set cr (CmpI op1 op2));
15124 
15125   effect(DEF cr, USE op1);
15126 
15127   ins_cost(INSN_COST * 2);
15128   format %{ "cmpw  $op1, $op2" %}
15129 
15130   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15131 
15132   ins_pipe(icmp_reg_imm);
15133 %}
15134 
15135 // Unsigned compare Instructions; really, same as signed compare
15136 // except it should only be used to feed an If or a CMovI which takes a
15137 // cmpOpU.
15138 
15139 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15140 %{
15141   match(Set cr (CmpU op1 op2));
15142 
15143   effect(DEF cr, USE op1, USE op2);
15144 
15145   ins_cost(INSN_COST);
15146   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15147 
15148   ins_encode(aarch64_enc_cmpw(op1, op2));
15149 
15150   ins_pipe(icmp_reg_reg);
15151 %}
15152 
15153 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15154 %{
15155   match(Set cr (CmpU op1 zero));
15156 
15157   effect(DEF cr, USE op1);
15158 
15159   ins_cost(INSN_COST);
15160   format %{ "cmpw $op1, #0\t# unsigned" %}
15161 
15162   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15163 
15164   ins_pipe(icmp_reg_imm);
15165 %}
15166 
15167 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15168 %{
15169   match(Set cr (CmpU op1 op2));
15170 
15171   effect(DEF cr, USE op1);
15172 
15173   ins_cost(INSN_COST);
15174   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15175 
15176   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15177 
15178   ins_pipe(icmp_reg_imm);
15179 %}
15180 
15181 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15182 %{
15183   match(Set cr (CmpU op1 op2));
15184 
15185   effect(DEF cr, USE op1);
15186 
15187   ins_cost(INSN_COST * 2);
15188   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15189 
15190   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15191 
15192   ins_pipe(icmp_reg_imm);
15193 %}
15194 
15195 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15196 %{
15197   match(Set cr (CmpL op1 op2));
15198 
15199   effect(DEF cr, USE op1, USE op2);
15200 
15201   ins_cost(INSN_COST);
15202   format %{ "cmp  $op1, $op2" %}
15203 
15204   ins_encode(aarch64_enc_cmp(op1, op2));
15205 
15206   ins_pipe(icmp_reg_reg);
15207 %}
15208 
15209 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15210 %{
15211   match(Set cr (CmpL op1 zero));
15212 
15213   effect(DEF cr, USE op1);
15214 
15215   ins_cost(INSN_COST);
15216   format %{ "tst  $op1" %}
15217 
15218   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15219 
15220   ins_pipe(icmp_reg_imm);
15221 %}
15222 
15223 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15224 %{
15225   match(Set cr (CmpL op1 op2));
15226 
15227   effect(DEF cr, USE op1);
15228 
15229   ins_cost(INSN_COST);
15230   format %{ "cmp  $op1, $op2" %}
15231 
15232   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15233 
15234   ins_pipe(icmp_reg_imm);
15235 %}
15236 
15237 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15238 %{
15239   match(Set cr (CmpL op1 op2));
15240 
15241   effect(DEF cr, USE op1);
15242 
15243   ins_cost(INSN_COST * 2);
15244   format %{ "cmp  $op1, $op2" %}
15245 
15246   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15247 
15248   ins_pipe(icmp_reg_imm);
15249 %}
15250 
15251 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15252 %{
15253   match(Set cr (CmpUL op1 op2));
15254 
15255   effect(DEF cr, USE op1, USE op2);
15256 
15257   ins_cost(INSN_COST);
15258   format %{ "cmp  $op1, $op2" %}
15259 
15260   ins_encode(aarch64_enc_cmp(op1, op2));
15261 
15262   ins_pipe(icmp_reg_reg);
15263 %}
15264 
15265 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15266 %{
15267   match(Set cr (CmpUL op1 zero));
15268 
15269   effect(DEF cr, USE op1);
15270 
15271   ins_cost(INSN_COST);
15272   format %{ "tst  $op1" %}
15273 
15274   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15275 
15276   ins_pipe(icmp_reg_imm);
15277 %}
15278 
15279 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15280 %{
15281   match(Set cr (CmpUL op1 op2));
15282 
15283   effect(DEF cr, USE op1);
15284 
15285   ins_cost(INSN_COST);
15286   format %{ "cmp  $op1, $op2" %}
15287 
15288   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15289 
15290   ins_pipe(icmp_reg_imm);
15291 %}
15292 
15293 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15294 %{
15295   match(Set cr (CmpUL op1 op2));
15296 
15297   effect(DEF cr, USE op1);
15298 
15299   ins_cost(INSN_COST * 2);
15300   format %{ "cmp  $op1, $op2" %}
15301 
15302   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15303 
15304   ins_pipe(icmp_reg_imm);
15305 %}
15306 
15307 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15308 %{
15309   match(Set cr (CmpP op1 op2));
15310 
15311   effect(DEF cr, USE op1, USE op2);
15312 
15313   ins_cost(INSN_COST);
15314   format %{ "cmp  $op1, $op2\t // ptr" %}
15315 
15316   ins_encode(aarch64_enc_cmpp(op1, op2));
15317 
15318   ins_pipe(icmp_reg_reg);
15319 %}
15320 
15321 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15322 %{
15323   match(Set cr (CmpN op1 op2));
15324 
15325   effect(DEF cr, USE op1, USE op2);
15326 
15327   ins_cost(INSN_COST);
15328   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15329 
15330   ins_encode(aarch64_enc_cmpn(op1, op2));
15331 
15332   ins_pipe(icmp_reg_reg);
15333 %}
15334 
15335 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15336 %{
15337   match(Set cr (CmpP op1 zero));
15338 
15339   effect(DEF cr, USE op1, USE zero);
15340 
15341   ins_cost(INSN_COST);
15342   format %{ "cmp  $op1, 0\t // ptr" %}
15343 
15344   ins_encode(aarch64_enc_testp(op1));
15345 
15346   ins_pipe(icmp_reg_imm);
15347 %}
15348 
15349 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15350 %{
15351   match(Set cr (CmpN op1 zero));
15352 
15353   effect(DEF cr, USE op1, USE zero);
15354 
15355   ins_cost(INSN_COST);
15356   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15357 
15358   ins_encode(aarch64_enc_testn(op1));
15359 
15360   ins_pipe(icmp_reg_imm);
15361 %}
15362 
15363 // FP comparisons
15364 //
15365 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15366 // using normal cmpOp. See declaration of rFlagsReg for details.
15367 
15368 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15369 %{
15370   match(Set cr (CmpF src1 src2));
15371 
15372   ins_cost(3 * INSN_COST);
15373   format %{ "fcmps $src1, $src2" %}
15374 
15375   ins_encode %{
15376     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15377   %}
15378 
15379   ins_pipe(pipe_class_compare);
15380 %}
15381 
15382 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15383 %{
15384   match(Set cr (CmpF src1 src2));
15385 
15386   ins_cost(3 * INSN_COST);
15387   format %{ "fcmps $src1, 0.0" %}
15388 
15389   ins_encode %{
15390     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15391   %}
15392 
15393   ins_pipe(pipe_class_compare);
15394 %}
15395 // FROM HERE
15396 
15397 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15398 %{
15399   match(Set cr (CmpD src1 src2));
15400 
15401   ins_cost(3 * INSN_COST);
15402   format %{ "fcmpd $src1, $src2" %}
15403 
15404   ins_encode %{
15405     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15406   %}
15407 
15408   ins_pipe(pipe_class_compare);
15409 %}
15410 
15411 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15412 %{
15413   match(Set cr (CmpD src1 src2));
15414 
15415   ins_cost(3 * INSN_COST);
15416   format %{ "fcmpd $src1, 0.0" %}
15417 
15418   ins_encode %{
15419     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15420   %}
15421 
15422   ins_pipe(pipe_class_compare);
15423 %}
15424 
15425 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15426 %{
15427   match(Set dst (CmpF3 src1 src2));
15428   effect(KILL cr);
15429 
15430   ins_cost(5 * INSN_COST);
15431   format %{ "fcmps $src1, $src2\n\t"
15432             "csinvw($dst, zr, zr, eq\n\t"
15433             "csnegw($dst, $dst, $dst, lt)"
15434   %}
15435 
15436   ins_encode %{
15437     Label done;
15438     FloatRegister s1 = as_FloatRegister($src1$$reg);
15439     FloatRegister s2 = as_FloatRegister($src2$$reg);
15440     Register d = as_Register($dst$$reg);
15441     __ fcmps(s1, s2);
15442     // installs 0 if EQ else -1
15443     __ csinvw(d, zr, zr, Assembler::EQ);
15444     // keeps -1 if less or unordered else installs 1
15445     __ csnegw(d, d, d, Assembler::LT);
15446     __ bind(done);
15447   %}
15448 
15449   ins_pipe(pipe_class_default);
15450 
15451 %}
15452 
15453 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15454 %{
15455   match(Set dst (CmpD3 src1 src2));
15456   effect(KILL cr);
15457 
15458   ins_cost(5 * INSN_COST);
15459   format %{ "fcmpd $src1, $src2\n\t"
15460             "csinvw($dst, zr, zr, eq\n\t"
15461             "csnegw($dst, $dst, $dst, lt)"
15462   %}
15463 
15464   ins_encode %{
15465     Label done;
15466     FloatRegister s1 = as_FloatRegister($src1$$reg);
15467     FloatRegister s2 = as_FloatRegister($src2$$reg);
15468     Register d = as_Register($dst$$reg);
15469     __ fcmpd(s1, s2);
15470     // installs 0 if EQ else -1
15471     __ csinvw(d, zr, zr, Assembler::EQ);
15472     // keeps -1 if less or unordered else installs 1
15473     __ csnegw(d, d, d, Assembler::LT);
15474     __ bind(done);
15475   %}
15476   ins_pipe(pipe_class_default);
15477 
15478 %}
15479 
15480 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15481 %{
15482   match(Set dst (CmpF3 src1 zero));
15483   effect(KILL cr);
15484 
15485   ins_cost(5 * INSN_COST);
15486   format %{ "fcmps $src1, 0.0\n\t"
15487             "csinvw($dst, zr, zr, eq\n\t"
15488             "csnegw($dst, $dst, $dst, lt)"
15489   %}
15490 
15491   ins_encode %{
15492     Label done;
15493     FloatRegister s1 = as_FloatRegister($src1$$reg);
15494     Register d = as_Register($dst$$reg);
15495     __ fcmps(s1, 0.0);
15496     // installs 0 if EQ else -1
15497     __ csinvw(d, zr, zr, Assembler::EQ);
15498     // keeps -1 if less or unordered else installs 1
15499     __ csnegw(d, d, d, Assembler::LT);
15500     __ bind(done);
15501   %}
15502 
15503   ins_pipe(pipe_class_default);
15504 
15505 %}
15506 
15507 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15508 %{
15509   match(Set dst (CmpD3 src1 zero));
15510   effect(KILL cr);
15511 
15512   ins_cost(5 * INSN_COST);
15513   format %{ "fcmpd $src1, 0.0\n\t"
15514             "csinvw($dst, zr, zr, eq\n\t"
15515             "csnegw($dst, $dst, $dst, lt)"
15516   %}
15517 
15518   ins_encode %{
15519     Label done;
15520     FloatRegister s1 = as_FloatRegister($src1$$reg);
15521     Register d = as_Register($dst$$reg);
15522     __ fcmpd(s1, 0.0);
15523     // installs 0 if EQ else -1
15524     __ csinvw(d, zr, zr, Assembler::EQ);
15525     // keeps -1 if less or unordered else installs 1
15526     __ csnegw(d, d, d, Assembler::LT);
15527     __ bind(done);
15528   %}
15529   ins_pipe(pipe_class_default);
15530 
15531 %}
15532 
15533 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15534 %{
15535   match(Set dst (CmpLTMask p q));
15536   effect(KILL cr);
15537 
15538   ins_cost(3 * INSN_COST);
15539 
15540   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15541             "csetw $dst, lt\n\t"
15542             "subw $dst, zr, $dst"
15543   %}
15544 
15545   ins_encode %{
15546     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15547     __ csetw(as_Register($dst$$reg), Assembler::LT);
15548     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15549   %}
15550 
15551   ins_pipe(ialu_reg_reg);
15552 %}
15553 
15554 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15555 %{
15556   match(Set dst (CmpLTMask src zero));
15557   effect(KILL cr);
15558 
15559   ins_cost(INSN_COST);
15560 
15561   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15562 
15563   ins_encode %{
15564     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15565   %}
15566 
15567   ins_pipe(ialu_reg_shift);
15568 %}
15569 
15570 // ============================================================================
15571 // Max and Min
15572 
15573 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
15574 
15575 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
15576 %{
15577   effect(DEF cr, USE src);
15578   ins_cost(INSN_COST);
15579   format %{ "cmpw $src, 0" %}
15580 
15581   ins_encode %{
15582     __ cmpw($src$$Register, 0);
15583   %}
15584   ins_pipe(icmp_reg_imm);
15585 %}
15586 
15587 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15588 %{
15589   match(Set dst (MinI src1 src2));
15590   ins_cost(INSN_COST * 3);
15591 
15592   expand %{
15593     rFlagsReg cr;
15594     compI_reg_reg(cr, src1, src2);
15595     cmovI_reg_reg_lt(dst, src1, src2, cr);
15596   %}
15597 %}
15598 
15599 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15600 %{
15601   match(Set dst (MaxI src1 src2));
15602   ins_cost(INSN_COST * 3);
15603 
15604   expand %{
15605     rFlagsReg cr;
15606     compI_reg_reg(cr, src1, src2);
15607     cmovI_reg_reg_gt(dst, src1, src2, cr);
15608   %}
15609 %}
15610 
15611 
15612 // ============================================================================
15613 // Branch Instructions
15614 
15615 // Direct Branch.
15616 instruct branch(label lbl)
15617 %{
15618   match(Goto);
15619 
15620   effect(USE lbl);
15621 
15622   ins_cost(BRANCH_COST);
15623   format %{ "b  $lbl" %}
15624 
15625   ins_encode(aarch64_enc_b(lbl));
15626 
15627   ins_pipe(pipe_branch);
15628 %}
15629 
15630 // Conditional Near Branch
15631 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15632 %{
15633   // Same match rule as `branchConFar'.
15634   match(If cmp cr);
15635 
15636   effect(USE lbl);
15637 
15638   ins_cost(BRANCH_COST);
15639   // If set to 1 this indicates that the current instruction is a
15640   // short variant of a long branch. This avoids using this
15641   // instruction in first-pass matching. It will then only be used in
15642   // the `Shorten_branches' pass.
15643   // ins_short_branch(1);
15644   format %{ "b$cmp  $lbl" %}
15645 
15646   ins_encode(aarch64_enc_br_con(cmp, lbl));
15647 
15648   ins_pipe(pipe_branch_cond);
15649 %}
15650 
15651 // Conditional Near Branch Unsigned
15652 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15653 %{
15654   // Same match rule as `branchConFar'.
15655   match(If cmp cr);
15656 
15657   effect(USE lbl);
15658 
15659   ins_cost(BRANCH_COST);
15660   // If set to 1 this indicates that the current instruction is a
15661   // short variant of a long branch. This avoids using this
15662   // instruction in first-pass matching. It will then only be used in
15663   // the `Shorten_branches' pass.
15664   // ins_short_branch(1);
15665   format %{ "b$cmp  $lbl\t# unsigned" %}
15666 
15667   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15668 
15669   ins_pipe(pipe_branch_cond);
15670 %}
15671 
15672 // Make use of CBZ and CBNZ.  These instructions, as well as being
15673 // shorter than (cmp; branch), have the additional benefit of not
15674 // killing the flags.
15675 
15676 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15677   match(If cmp (CmpI op1 op2));
15678   effect(USE labl);
15679 
15680   ins_cost(BRANCH_COST);
15681   format %{ "cbw$cmp   $op1, $labl" %}
15682   ins_encode %{
15683     Label* L = $labl$$label;
15684     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15685     if (cond == Assembler::EQ)
15686       __ cbzw($op1$$Register, *L);
15687     else
15688       __ cbnzw($op1$$Register, *L);
15689   %}
15690   ins_pipe(pipe_cmp_branch);
15691 %}
15692 
15693 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15694   match(If cmp (CmpL op1 op2));
15695   effect(USE labl);
15696 
15697   ins_cost(BRANCH_COST);
15698   format %{ "cb$cmp   $op1, $labl" %}
15699   ins_encode %{
15700     Label* L = $labl$$label;
15701     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15702     if (cond == Assembler::EQ)
15703       __ cbz($op1$$Register, *L);
15704     else
15705       __ cbnz($op1$$Register, *L);
15706   %}
15707   ins_pipe(pipe_cmp_branch);
15708 %}
15709 
15710 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15711   match(If cmp (CmpP op1 op2));
15712   effect(USE labl);
15713 
15714   ins_cost(BRANCH_COST);
15715   format %{ "cb$cmp   $op1, $labl" %}
15716   ins_encode %{
15717     Label* L = $labl$$label;
15718     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15719     if (cond == Assembler::EQ)
15720       __ cbz($op1$$Register, *L);
15721     else
15722       __ cbnz($op1$$Register, *L);
15723   %}
15724   ins_pipe(pipe_cmp_branch);
15725 %}
15726 
15727 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15728   match(If cmp (CmpN op1 op2));
15729   effect(USE labl);
15730 
15731   ins_cost(BRANCH_COST);
15732   format %{ "cbw$cmp   $op1, $labl" %}
15733   ins_encode %{
15734     Label* L = $labl$$label;
15735     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15736     if (cond == Assembler::EQ)
15737       __ cbzw($op1$$Register, *L);
15738     else
15739       __ cbnzw($op1$$Register, *L);
15740   %}
15741   ins_pipe(pipe_cmp_branch);
15742 %}
15743 
15744 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15745   match(If cmp (CmpP (DecodeN oop) zero));
15746   effect(USE labl);
15747 
15748   ins_cost(BRANCH_COST);
15749   format %{ "cb$cmp   $oop, $labl" %}
15750   ins_encode %{
15751     Label* L = $labl$$label;
15752     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15753     if (cond == Assembler::EQ)
15754       __ cbzw($oop$$Register, *L);
15755     else
15756       __ cbnzw($oop$$Register, *L);
15757   %}
15758   ins_pipe(pipe_cmp_branch);
15759 %}
15760 
15761 instruct cmpUI_imm0_branch(cmpOpUEqNeLeGt cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15762   match(If cmp (CmpU op1 op2));
15763   effect(USE labl);
15764 
15765   ins_cost(BRANCH_COST);
15766   format %{ "cbw$cmp   $op1, $labl" %}
15767   ins_encode %{
15768     Label* L = $labl$$label;
15769     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15770     if (cond == Assembler::EQ || cond == Assembler::LS) {
15771       __ cbzw($op1$$Register, *L);
15772     } else {
15773       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
15774       __ cbnzw($op1$$Register, *L);
15775     }
15776   %}
15777   ins_pipe(pipe_cmp_branch);
15778 %}
15779 
15780 instruct cmpUL_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 op2, label labl) %{
15781   match(If cmp (CmpUL op1 op2));
15782   effect(USE labl);
15783 
15784   ins_cost(BRANCH_COST);
15785   format %{ "cb$cmp   $op1, $labl" %}
15786   ins_encode %{
15787     Label* L = $labl$$label;
15788     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15789     if (cond == Assembler::EQ || cond == Assembler::LS) {
15790       __ cbz($op1$$Register, *L);
15791     } else {
15792       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
15793       __ cbnz($op1$$Register, *L);
15794     }
15795   %}
15796   ins_pipe(pipe_cmp_branch);
15797 %}
15798 
15799 // Test bit and Branch
15800 
15801 // Patterns for short (< 32KiB) variants
15802 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15803   match(If cmp (CmpL op1 op2));
15804   effect(USE labl);
15805 
15806   ins_cost(BRANCH_COST);
15807   format %{ "cb$cmp   $op1, $labl # long" %}
15808   ins_encode %{
15809     Label* L = $labl$$label;
15810     Assembler::Condition cond =
15811       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15812     __ tbr(cond, $op1$$Register, 63, *L);
15813   %}
15814   ins_pipe(pipe_cmp_branch);
15815   ins_short_branch(1);
15816 %}
15817 
15818 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15819   match(If cmp (CmpI op1 op2));
15820   effect(USE labl);
15821 
15822   ins_cost(BRANCH_COST);
15823   format %{ "cb$cmp   $op1, $labl # int" %}
15824   ins_encode %{
15825     Label* L = $labl$$label;
15826     Assembler::Condition cond =
15827       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15828     __ tbr(cond, $op1$$Register, 31, *L);
15829   %}
15830   ins_pipe(pipe_cmp_branch);
15831   ins_short_branch(1);
15832 %}
15833 
15834 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15835   match(If cmp (CmpL (AndL op1 op2) op3));
15836   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15837   effect(USE labl);
15838 
15839   ins_cost(BRANCH_COST);
15840   format %{ "tb$cmp   $op1, $op2, $labl" %}
15841   ins_encode %{
15842     Label* L = $labl$$label;
15843     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15844     int bit = exact_log2_long($op2$$constant);
15845     __ tbr(cond, $op1$$Register, bit, *L);
15846   %}
15847   ins_pipe(pipe_cmp_branch);
15848   ins_short_branch(1);
15849 %}
15850 
15851 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15852   match(If cmp (CmpI (AndI op1 op2) op3));
15853   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15854   effect(USE labl);
15855 
15856   ins_cost(BRANCH_COST);
15857   format %{ "tb$cmp   $op1, $op2, $labl" %}
15858   ins_encode %{
15859     Label* L = $labl$$label;
15860     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15861     int bit = exact_log2((juint)$op2$$constant);
15862     __ tbr(cond, $op1$$Register, bit, *L);
15863   %}
15864   ins_pipe(pipe_cmp_branch);
15865   ins_short_branch(1);
15866 %}
15867 
15868 // And far variants
15869 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15870   match(If cmp (CmpL op1 op2));
15871   effect(USE labl);
15872 
15873   ins_cost(BRANCH_COST);
15874   format %{ "cb$cmp   $op1, $labl # long" %}
15875   ins_encode %{
15876     Label* L = $labl$$label;
15877     Assembler::Condition cond =
15878       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15879     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15880   %}
15881   ins_pipe(pipe_cmp_branch);
15882 %}
15883 
15884 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15885   match(If cmp (CmpI op1 op2));
15886   effect(USE labl);
15887 
15888   ins_cost(BRANCH_COST);
15889   format %{ "cb$cmp   $op1, $labl # int" %}
15890   ins_encode %{
15891     Label* L = $labl$$label;
15892     Assembler::Condition cond =
15893       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15894     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15895   %}
15896   ins_pipe(pipe_cmp_branch);
15897 %}
15898 
15899 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15900   match(If cmp (CmpL (AndL op1 op2) op3));
15901   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15902   effect(USE labl);
15903 
15904   ins_cost(BRANCH_COST);
15905   format %{ "tb$cmp   $op1, $op2, $labl" %}
15906   ins_encode %{
15907     Label* L = $labl$$label;
15908     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15909     int bit = exact_log2_long($op2$$constant);
15910     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15911   %}
15912   ins_pipe(pipe_cmp_branch);
15913 %}
15914 
15915 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15916   match(If cmp (CmpI (AndI op1 op2) op3));
15917   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15918   effect(USE labl);
15919 
15920   ins_cost(BRANCH_COST);
15921   format %{ "tb$cmp   $op1, $op2, $labl" %}
15922   ins_encode %{
15923     Label* L = $labl$$label;
15924     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15925     int bit = exact_log2((juint)$op2$$constant);
15926     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15927   %}
15928   ins_pipe(pipe_cmp_branch);
15929 %}
15930 
15931 // Test bits
15932 
15933 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15934   match(Set cr (CmpL (AndL op1 op2) op3));
15935   predicate(Assembler::operand_valid_for_logical_immediate
15936             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15937 
15938   ins_cost(INSN_COST);
15939   format %{ "tst $op1, $op2 # long" %}
15940   ins_encode %{
15941     __ tst($op1$$Register, $op2$$constant);
15942   %}
15943   ins_pipe(ialu_reg_reg);
15944 %}
15945 
15946 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15947   match(Set cr (CmpI (AndI op1 op2) op3));
15948   predicate(Assembler::operand_valid_for_logical_immediate
15949             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15950 
15951   ins_cost(INSN_COST);
15952   format %{ "tst $op1, $op2 # int" %}
15953   ins_encode %{
15954     __ tstw($op1$$Register, $op2$$constant);
15955   %}
15956   ins_pipe(ialu_reg_reg);
15957 %}
15958 
15959 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15960   match(Set cr (CmpL (AndL op1 op2) op3));
15961 
15962   ins_cost(INSN_COST);
15963   format %{ "tst $op1, $op2 # long" %}
15964   ins_encode %{
15965     __ tst($op1$$Register, $op2$$Register);
15966   %}
15967   ins_pipe(ialu_reg_reg);
15968 %}
15969 
15970 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15971   match(Set cr (CmpI (AndI op1 op2) op3));
15972 
15973   ins_cost(INSN_COST);
15974   format %{ "tstw $op1, $op2 # int" %}
15975   ins_encode %{
15976     __ tstw($op1$$Register, $op2$$Register);
15977   %}
15978   ins_pipe(ialu_reg_reg);
15979 %}
15980 
15981 
15982 // Conditional Far Branch
15983 // Conditional Far Branch Unsigned
15984 // TODO: fixme
15985 
15986 // counted loop end branch near
15987 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15988 %{
15989   match(CountedLoopEnd cmp cr);
15990 
15991   effect(USE lbl);
15992 
15993   ins_cost(BRANCH_COST);
15994   // short variant.
15995   // ins_short_branch(1);
15996   format %{ "b$cmp $lbl \t// counted loop end" %}
15997 
15998   ins_encode(aarch64_enc_br_con(cmp, lbl));
15999 
16000   ins_pipe(pipe_branch);
16001 %}
16002 
16003 // counted loop end branch far
16004 // TODO: fixme
16005 
16006 // ============================================================================
16007 // inlined locking and unlocking
16008 
16009 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16010 %{
16011   predicate(LockingMode != LM_LIGHTWEIGHT);
16012   match(Set cr (FastLock object box));
16013   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16014 
16015   ins_cost(5 * INSN_COST);
16016   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16017 
16018   ins_encode %{
16019     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16020   %}
16021 
16022   ins_pipe(pipe_serial);
16023 %}
16024 
16025 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16026 %{
16027   predicate(LockingMode != LM_LIGHTWEIGHT);
16028   match(Set cr (FastUnlock object box));
16029   effect(TEMP tmp, TEMP tmp2);
16030 
16031   ins_cost(5 * INSN_COST);
16032   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16033 
16034   ins_encode %{
16035     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16036   %}
16037 
16038   ins_pipe(pipe_serial);
16039 %}
16040 
16041 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16042 %{
16043   predicate(LockingMode == LM_LIGHTWEIGHT);
16044   match(Set cr (FastLock object box));
16045   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16046 
16047   ins_cost(5 * INSN_COST);
16048   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16049 
16050   ins_encode %{
16051     __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16052   %}
16053 
16054   ins_pipe(pipe_serial);
16055 %}
16056 
16057 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16058 %{
16059   predicate(LockingMode == LM_LIGHTWEIGHT);
16060   match(Set cr (FastUnlock object box));
16061   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16062 
16063   ins_cost(5 * INSN_COST);
16064   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %}
16065 
16066   ins_encode %{
16067     __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16068   %}
16069 
16070   ins_pipe(pipe_serial);
16071 %}
16072 
16073 // ============================================================================
16074 // Safepoint Instructions
16075 
16076 // TODO
16077 // provide a near and far version of this code
16078 
16079 instruct safePoint(rFlagsReg cr, iRegP poll)
16080 %{
16081   match(SafePoint poll);
16082   effect(KILL cr);
16083 
16084   format %{
16085     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16086   %}
16087   ins_encode %{
16088     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16089   %}
16090   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16091 %}
16092 
16093 
16094 // ============================================================================
16095 // Procedure Call/Return Instructions
16096 
16097 // Call Java Static Instruction
16098 
16099 instruct CallStaticJavaDirect(method meth)
16100 %{
16101   match(CallStaticJava);
16102 
16103   effect(USE meth);
16104 
16105   ins_cost(CALL_COST);
16106 
16107   format %{ "call,static $meth \t// ==> " %}
16108 
16109   ins_encode(aarch64_enc_java_static_call(meth),
16110              aarch64_enc_call_epilog);
16111 
16112   ins_pipe(pipe_class_call);
16113 %}
16114 
16115 // TO HERE
16116 
16117 // Call Java Dynamic Instruction
16118 instruct CallDynamicJavaDirect(method meth)
16119 %{
16120   match(CallDynamicJava);
16121 
16122   effect(USE meth);
16123 
16124   ins_cost(CALL_COST);
16125 
16126   format %{ "CALL,dynamic $meth \t// ==> " %}
16127 
16128   ins_encode(aarch64_enc_java_dynamic_call(meth),
16129              aarch64_enc_call_epilog);
16130 
16131   ins_pipe(pipe_class_call);
16132 %}
16133 
16134 // Call Runtime Instruction
16135 
16136 instruct CallRuntimeDirect(method meth)
16137 %{
16138   match(CallRuntime);
16139 
16140   effect(USE meth);
16141 
16142   ins_cost(CALL_COST);
16143 
16144   format %{ "CALL, runtime $meth" %}
16145 
16146   ins_encode( aarch64_enc_java_to_runtime(meth) );
16147 
16148   ins_pipe(pipe_class_call);
16149 %}
16150 
16151 // Call Runtime Instruction
16152 
16153 instruct CallLeafDirect(method meth)
16154 %{
16155   match(CallLeaf);
16156 
16157   effect(USE meth);
16158 
16159   ins_cost(CALL_COST);
16160 
16161   format %{ "CALL, runtime leaf $meth" %}
16162 
16163   ins_encode( aarch64_enc_java_to_runtime(meth) );
16164 
16165   ins_pipe(pipe_class_call);
16166 %}
16167 
16168 // Call Runtime Instruction without safepoint and with vector arguments
16169 instruct CallLeafDirectVector(method meth)
16170 %{
16171   match(CallLeafVector);
16172 
16173   effect(USE meth);
16174 
16175   ins_cost(CALL_COST);
16176 
16177   format %{ "CALL, runtime leaf vector $meth" %}
16178 
16179   ins_encode(aarch64_enc_java_to_runtime(meth));
16180 
16181   ins_pipe(pipe_class_call);
16182 %}
16183 
16184 // Call Runtime Instruction
16185 
16186 instruct CallLeafNoFPDirect(method meth)
16187 %{
16188   match(CallLeafNoFP);
16189 
16190   effect(USE meth);
16191 
16192   ins_cost(CALL_COST);
16193 
16194   format %{ "CALL, runtime leaf nofp $meth" %}
16195 
16196   ins_encode( aarch64_enc_java_to_runtime(meth) );
16197 
16198   ins_pipe(pipe_class_call);
16199 %}
16200 
16201 // Tail Call; Jump from runtime stub to Java code.
16202 // Also known as an 'interprocedural jump'.
16203 // Target of jump will eventually return to caller.
16204 // TailJump below removes the return address.
16205 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16206 // emitted just above the TailCall which has reset rfp to the caller state.
16207 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
16208 %{
16209   match(TailCall jump_target method_ptr);
16210 
16211   ins_cost(CALL_COST);
16212 
16213   format %{ "br $jump_target\t# $method_ptr holds method" %}
16214 
16215   ins_encode(aarch64_enc_tail_call(jump_target));
16216 
16217   ins_pipe(pipe_class_call);
16218 %}
16219 
16220 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
16221 %{
16222   match(TailJump jump_target ex_oop);
16223 
16224   ins_cost(CALL_COST);
16225 
16226   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16227 
16228   ins_encode(aarch64_enc_tail_jmp(jump_target));
16229 
16230   ins_pipe(pipe_class_call);
16231 %}
16232 
16233 // Forward exception.
16234 instruct ForwardExceptionjmp()
16235 %{
16236   match(ForwardException);
16237   ins_cost(CALL_COST);
16238 
16239   format %{ "b forward_exception_stub" %}
16240   ins_encode %{
16241     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
16242   %}
16243   ins_pipe(pipe_class_call);
16244 %}
16245 
16246 // Create exception oop: created by stack-crawling runtime code.
16247 // Created exception is now available to this handler, and is setup
16248 // just prior to jumping to this handler. No code emitted.
16249 // TODO check
16250 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16251 instruct CreateException(iRegP_R0 ex_oop)
16252 %{
16253   match(Set ex_oop (CreateEx));
16254 
16255   format %{ " -- \t// exception oop; no code emitted" %}
16256 
16257   size(0);
16258 
16259   ins_encode( /*empty*/ );
16260 
16261   ins_pipe(pipe_class_empty);
16262 %}
16263 
16264 // Rethrow exception: The exception oop will come in the first
16265 // argument position. Then JUMP (not call) to the rethrow stub code.
16266 instruct RethrowException() %{
16267   match(Rethrow);
16268   ins_cost(CALL_COST);
16269 
16270   format %{ "b rethrow_stub" %}
16271 
16272   ins_encode( aarch64_enc_rethrow() );
16273 
16274   ins_pipe(pipe_class_call);
16275 %}
16276 
16277 
16278 // Return Instruction
16279 // epilog node loads ret address into lr as part of frame pop
16280 instruct Ret()
16281 %{
16282   match(Return);
16283 
16284   format %{ "ret\t// return register" %}
16285 
16286   ins_encode( aarch64_enc_ret() );
16287 
16288   ins_pipe(pipe_branch);
16289 %}
16290 
16291 // Die now.
16292 instruct ShouldNotReachHere() %{
16293   match(Halt);
16294 
16295   ins_cost(CALL_COST);
16296   format %{ "ShouldNotReachHere" %}
16297 
16298   ins_encode %{
16299     if (is_reachable()) {
16300       __ stop(_halt_reason);
16301     }
16302   %}
16303 
16304   ins_pipe(pipe_class_default);
16305 %}
16306 
16307 // ============================================================================
16308 // Partial Subtype Check
16309 //
16310 // superklass array for an instance of the superklass.  Set a hidden
16311 // internal cache on a hit (cache is checked with exposed code in
16312 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16313 // encoding ALSO sets flags.
16314 
16315 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16316 %{
16317   match(Set result (PartialSubtypeCheck sub super));
16318   predicate(!UseSecondarySupersTable);
16319   effect(KILL cr, KILL temp);
16320 
16321   ins_cost(20 * INSN_COST);  // slightly larger than the next version
16322   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16323 
16324   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16325 
16326   opcode(0x1); // Force zero of result reg on hit
16327 
16328   ins_pipe(pipe_class_memory);
16329 %}
16330 
16331 // Two versions of partialSubtypeCheck, both used when we need to
16332 // search for a super class in the secondary supers array. The first
16333 // is used when we don't know _a priori_ the class being searched
16334 // for. The second, far more common, is used when we do know: this is
16335 // used for instanceof, checkcast, and any case where C2 can determine
16336 // it by constant propagation.
16337 
16338 instruct partialSubtypeCheckVarSuper(iRegP_R4 sub, iRegP_R0 super, vRegD_V0 vtemp, iRegP_R5 result,
16339                                      iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16340                                      rFlagsReg cr)
16341 %{
16342   match(Set result (PartialSubtypeCheck sub super));
16343   predicate(UseSecondarySupersTable);
16344   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16345 
16346   ins_cost(10 * INSN_COST);  // slightly larger than the next version
16347   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16348 
16349   ins_encode %{
16350     __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
16351                                          $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16352                                          $vtemp$$FloatRegister,
16353                                          $result$$Register, /*L_success*/nullptr);
16354   %}
16355 
16356   ins_pipe(pipe_class_memory);
16357 %}
16358 
16359 instruct partialSubtypeCheckConstSuper(iRegP_R4 sub, iRegP_R0 super_reg, immP super_con, vRegD_V0 vtemp, iRegP_R5 result,
16360                                        iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16361                                        rFlagsReg cr)
16362 %{
16363   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
16364   predicate(UseSecondarySupersTable);
16365   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16366 
16367   ins_cost(5 * INSN_COST);  // smaller than the next version
16368   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
16369 
16370   ins_encode %{
16371     bool success = false;
16372     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
16373     if (InlineSecondarySupersTest) {
16374       success =
16375         __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
16376                                                $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16377                                                $vtemp$$FloatRegister,
16378                                                $result$$Register,
16379                                                super_klass_slot);
16380     } else {
16381       address call = __ trampoline_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
16382       success = (call != nullptr);
16383     }
16384     if (!success) {
16385       ciEnv::current()->record_failure("CodeCache is full");
16386       return;
16387     }
16388   %}
16389 
16390   ins_pipe(pipe_class_memory);
16391 %}
16392 
16393 // Intrisics for String.compareTo()
16394 
16395 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16396                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16397 %{
16398   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16399   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16400   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16401 
16402   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16403   ins_encode %{
16404     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16405     __ string_compare($str1$$Register, $str2$$Register,
16406                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16407                       $tmp1$$Register, $tmp2$$Register,
16408                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
16409   %}
16410   ins_pipe(pipe_class_memory);
16411 %}
16412 
16413 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16414                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16415 %{
16416   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16417   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16418   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16419 
16420   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16421   ins_encode %{
16422     __ string_compare($str1$$Register, $str2$$Register,
16423                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16424                       $tmp1$$Register, $tmp2$$Register,
16425                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
16426   %}
16427   ins_pipe(pipe_class_memory);
16428 %}
16429 
16430 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16431                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16432                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16433 %{
16434   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16435   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16436   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16437          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16438 
16439   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16440   ins_encode %{
16441     __ string_compare($str1$$Register, $str2$$Register,
16442                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16443                       $tmp1$$Register, $tmp2$$Register,
16444                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16445                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
16446   %}
16447   ins_pipe(pipe_class_memory);
16448 %}
16449 
16450 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16451                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16452                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16453 %{
16454   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16455   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16456   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16457          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16458 
16459   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16460   ins_encode %{
16461     __ string_compare($str1$$Register, $str2$$Register,
16462                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16463                       $tmp1$$Register, $tmp2$$Register,
16464                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16465                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
16466   %}
16467   ins_pipe(pipe_class_memory);
16468 %}
16469 
16470 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
16471 // these string_compare variants as NEON register type for convenience so that the prototype of
16472 // string_compare can be shared with all variants.
16473 
16474 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16475                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16476                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16477                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16478 %{
16479   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16480   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16481   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16482          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16483 
16484   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16485   ins_encode %{
16486     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16487     __ string_compare($str1$$Register, $str2$$Register,
16488                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16489                       $tmp1$$Register, $tmp2$$Register,
16490                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16491                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16492                       StrIntrinsicNode::LL);
16493   %}
16494   ins_pipe(pipe_class_memory);
16495 %}
16496 
16497 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16498                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16499                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16500                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16501 %{
16502   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16503   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16504   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16505          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16506 
16507   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16508   ins_encode %{
16509     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16510     __ string_compare($str1$$Register, $str2$$Register,
16511                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16512                       $tmp1$$Register, $tmp2$$Register,
16513                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16514                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16515                       StrIntrinsicNode::LU);
16516   %}
16517   ins_pipe(pipe_class_memory);
16518 %}
16519 
16520 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16521                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16522                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16523                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16524 %{
16525   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16526   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16527   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16528          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16529 
16530   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16531   ins_encode %{
16532     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16533     __ string_compare($str1$$Register, $str2$$Register,
16534                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16535                       $tmp1$$Register, $tmp2$$Register,
16536                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16537                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16538                       StrIntrinsicNode::UL);
16539   %}
16540   ins_pipe(pipe_class_memory);
16541 %}
16542 
16543 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16544                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16545                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16546                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16547 %{
16548   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16549   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16550   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16551          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16552 
16553   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16554   ins_encode %{
16555     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16556     __ string_compare($str1$$Register, $str2$$Register,
16557                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16558                       $tmp1$$Register, $tmp2$$Register,
16559                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16560                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16561                       StrIntrinsicNode::UU);
16562   %}
16563   ins_pipe(pipe_class_memory);
16564 %}
16565 
16566 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16567                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16568                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16569                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16570 %{
16571   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16572   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16573   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16574          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16575          TEMP vtmp0, TEMP vtmp1, KILL cr);
16576   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16577             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16578 
16579   ins_encode %{
16580     __ string_indexof($str1$$Register, $str2$$Register,
16581                       $cnt1$$Register, $cnt2$$Register,
16582                       $tmp1$$Register, $tmp2$$Register,
16583                       $tmp3$$Register, $tmp4$$Register,
16584                       $tmp5$$Register, $tmp6$$Register,
16585                       -1, $result$$Register, StrIntrinsicNode::UU);
16586   %}
16587   ins_pipe(pipe_class_memory);
16588 %}
16589 
16590 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16591                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16592                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16593                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16594 %{
16595   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16596   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16597   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16598          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16599          TEMP vtmp0, TEMP vtmp1, KILL cr);
16600   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16601             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16602 
16603   ins_encode %{
16604     __ string_indexof($str1$$Register, $str2$$Register,
16605                       $cnt1$$Register, $cnt2$$Register,
16606                       $tmp1$$Register, $tmp2$$Register,
16607                       $tmp3$$Register, $tmp4$$Register,
16608                       $tmp5$$Register, $tmp6$$Register,
16609                       -1, $result$$Register, StrIntrinsicNode::LL);
16610   %}
16611   ins_pipe(pipe_class_memory);
16612 %}
16613 
16614 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16615                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16616                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16617                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16618 %{
16619   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16620   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16621   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16622          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16623          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16624   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16625             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16626 
16627   ins_encode %{
16628     __ string_indexof($str1$$Register, $str2$$Register,
16629                       $cnt1$$Register, $cnt2$$Register,
16630                       $tmp1$$Register, $tmp2$$Register,
16631                       $tmp3$$Register, $tmp4$$Register,
16632                       $tmp5$$Register, $tmp6$$Register,
16633                       -1, $result$$Register, StrIntrinsicNode::UL);
16634   %}
16635   ins_pipe(pipe_class_memory);
16636 %}
16637 
16638 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16639                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16640                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16641 %{
16642   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16643   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16644   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16645          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16646   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
16647             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16648 
16649   ins_encode %{
16650     int icnt2 = (int)$int_cnt2$$constant;
16651     __ string_indexof($str1$$Register, $str2$$Register,
16652                       $cnt1$$Register, zr,
16653                       $tmp1$$Register, $tmp2$$Register,
16654                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16655                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16656   %}
16657   ins_pipe(pipe_class_memory);
16658 %}
16659 
16660 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16661                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16662                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16663 %{
16664   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16665   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16666   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16667          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16668   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
16669             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16670 
16671   ins_encode %{
16672     int icnt2 = (int)$int_cnt2$$constant;
16673     __ string_indexof($str1$$Register, $str2$$Register,
16674                       $cnt1$$Register, zr,
16675                       $tmp1$$Register, $tmp2$$Register,
16676                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16677                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16678   %}
16679   ins_pipe(pipe_class_memory);
16680 %}
16681 
16682 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16683                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16684                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16685 %{
16686   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16687   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16688   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16689          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16690   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
16691             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16692 
16693   ins_encode %{
16694     int icnt2 = (int)$int_cnt2$$constant;
16695     __ string_indexof($str1$$Register, $str2$$Register,
16696                       $cnt1$$Register, zr,
16697                       $tmp1$$Register, $tmp2$$Register,
16698                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16699                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16700   %}
16701   ins_pipe(pipe_class_memory);
16702 %}
16703 
16704 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16705                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16706                              iRegINoSp tmp3, rFlagsReg cr)
16707 %{
16708   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16709   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
16710   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16711          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16712 
16713   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16714 
16715   ins_encode %{
16716     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16717                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16718                            $tmp3$$Register);
16719   %}
16720   ins_pipe(pipe_class_memory);
16721 %}
16722 
16723 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16724                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16725                               iRegINoSp tmp3, rFlagsReg cr)
16726 %{
16727   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16728   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
16729   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16730          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16731 
16732   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16733 
16734   ins_encode %{
16735     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16736                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
16737                             $tmp3$$Register);
16738   %}
16739   ins_pipe(pipe_class_memory);
16740 %}
16741 
16742 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16743                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
16744                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
16745   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
16746   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16747   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
16748   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
16749   ins_encode %{
16750     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
16751                                $result$$Register, $ztmp1$$FloatRegister,
16752                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
16753                                $ptmp$$PRegister, true /* isL */);
16754   %}
16755   ins_pipe(pipe_class_memory);
16756 %}
16757 
16758 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16759                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
16760                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
16761   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
16762   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16763   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
16764   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
16765   ins_encode %{
16766     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
16767                                $result$$Register, $ztmp1$$FloatRegister,
16768                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
16769                                $ptmp$$PRegister, false /* isL */);
16770   %}
16771   ins_pipe(pipe_class_memory);
16772 %}
16773 
16774 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16775                         iRegI_R0 result, rFlagsReg cr)
16776 %{
16777   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16778   match(Set result (StrEquals (Binary str1 str2) cnt));
16779   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16780 
16781   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16782   ins_encode %{
16783     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16784     __ string_equals($str1$$Register, $str2$$Register,
16785                      $result$$Register, $cnt$$Register);
16786   %}
16787   ins_pipe(pipe_class_memory);
16788 %}
16789 
16790 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16791                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16792                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16793                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16794                        iRegP_R10 tmp, rFlagsReg cr)
16795 %{
16796   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16797   match(Set result (AryEq ary1 ary2));
16798   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
16799          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16800          TEMP vtmp6, TEMP vtmp7, KILL cr);
16801 
16802   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
16803   ins_encode %{
16804     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
16805                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16806                                    $result$$Register, $tmp$$Register, 1);
16807     if (tpc == nullptr) {
16808       ciEnv::current()->record_failure("CodeCache is full");
16809       return;
16810     }
16811   %}
16812   ins_pipe(pipe_class_memory);
16813 %}
16814 
16815 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16816                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16817                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16818                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16819                        iRegP_R10 tmp, rFlagsReg cr)
16820 %{
16821   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16822   match(Set result (AryEq ary1 ary2));
16823   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
16824          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16825          TEMP vtmp6, TEMP vtmp7, KILL cr);
16826 
16827   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
16828   ins_encode %{
16829     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
16830                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16831                                    $result$$Register, $tmp$$Register, 2);
16832     if (tpc == nullptr) {
16833       ciEnv::current()->record_failure("CodeCache is full");
16834       return;
16835     }
16836   %}
16837   ins_pipe(pipe_class_memory);
16838 %}
16839 
16840 instruct arrays_hashcode(iRegP_R1 ary, iRegI_R2 cnt, iRegI_R0 result, immI basic_type,
16841                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16842                          vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16843                          vRegD_V12 vtmp8, vRegD_V13 vtmp9, rFlagsReg cr)
16844 %{
16845   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
16846   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, TEMP vtmp6,
16847          TEMP vtmp7, TEMP vtmp8, TEMP vtmp9, USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
16848 
16849   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
16850   ins_encode %{
16851     address tpc = __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
16852                                      $vtmp3$$FloatRegister, $vtmp2$$FloatRegister,
16853                                      $vtmp1$$FloatRegister, $vtmp0$$FloatRegister,
16854                                      $vtmp4$$FloatRegister, $vtmp5$$FloatRegister,
16855                                      $vtmp6$$FloatRegister, $vtmp7$$FloatRegister,
16856                                      $vtmp8$$FloatRegister, $vtmp9$$FloatRegister,
16857                                      (BasicType)$basic_type$$constant);
16858     if (tpc == nullptr) {
16859       ciEnv::current()->record_failure("CodeCache is full");
16860       return;
16861     }
16862   %}
16863   ins_pipe(pipe_class_memory);
16864 %}
16865 
16866 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16867 %{
16868   match(Set result (CountPositives ary1 len));
16869   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16870   format %{ "count positives byte[] $ary1,$len -> $result" %}
16871   ins_encode %{
16872     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
16873     if (tpc == nullptr) {
16874       ciEnv::current()->record_failure("CodeCache is full");
16875       return;
16876     }
16877   %}
16878   ins_pipe( pipe_slow );
16879 %}
16880 
16881 // fast char[] to byte[] compression
16882 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16883                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
16884                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
16885                          iRegI_R0 result, rFlagsReg cr)
16886 %{
16887   match(Set result (StrCompressedCopy src (Binary dst len)));
16888   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16889          USE_KILL src, USE_KILL dst, USE len, KILL cr);
16890 
16891   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
16892   ins_encode %{
16893     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16894                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16895                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
16896                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
16897   %}
16898   ins_pipe(pipe_slow);
16899 %}
16900 
16901 // fast byte[] to char[] inflation
16902 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
16903                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16904                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
16905 %{
16906   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16907   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
16908          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
16909          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16910 
16911   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
16912   ins_encode %{
16913     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16914                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16915                                         $vtmp2$$FloatRegister, $tmp$$Register);
16916     if (tpc == nullptr) {
16917       ciEnv::current()->record_failure("CodeCache is full");
16918       return;
16919     }
16920   %}
16921   ins_pipe(pipe_class_memory);
16922 %}
16923 
16924 // encode char[] to byte[] in ISO_8859_1
16925 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16926                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
16927                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
16928                           iRegI_R0 result, rFlagsReg cr)
16929 %{
16930   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
16931   match(Set result (EncodeISOArray src (Binary dst len)));
16932   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
16933          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
16934 
16935   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
16936   ins_encode %{
16937     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16938                         $result$$Register, false,
16939                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16940                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
16941                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
16942   %}
16943   ins_pipe(pipe_class_memory);
16944 %}
16945 
16946 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16947                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
16948                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
16949                             iRegI_R0 result, rFlagsReg cr)
16950 %{
16951   predicate(((EncodeISOArrayNode*)n)->is_ascii());
16952   match(Set result (EncodeISOArray src (Binary dst len)));
16953   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
16954          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
16955 
16956   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
16957   ins_encode %{
16958     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16959                         $result$$Register, true,
16960                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16961                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
16962                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
16963   %}
16964   ins_pipe(pipe_class_memory);
16965 %}
16966 
16967 //----------------------------- CompressBits/ExpandBits ------------------------
16968 
16969 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
16970                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
16971   match(Set dst (CompressBits src mask));
16972   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16973   format %{ "mov    $tsrc, $src\n\t"
16974             "mov    $tmask, $mask\n\t"
16975             "bext   $tdst, $tsrc, $tmask\n\t"
16976             "mov    $dst, $tdst"
16977           %}
16978   ins_encode %{
16979     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
16980     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
16981     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
16982     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
16983   %}
16984   ins_pipe(pipe_slow);
16985 %}
16986 
16987 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
16988                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
16989   match(Set dst (CompressBits (LoadI mem) mask));
16990   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16991   format %{ "ldrs   $tsrc, $mem\n\t"
16992             "ldrs   $tmask, $mask\n\t"
16993             "bext   $tdst, $tsrc, $tmask\n\t"
16994             "mov    $dst, $tdst"
16995           %}
16996   ins_encode %{
16997     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
16998               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
16999     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17000     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17001     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17002   %}
17003   ins_pipe(pipe_slow);
17004 %}
17005 
17006 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17007                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
17008   match(Set dst (CompressBits src mask));
17009   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17010   format %{ "mov    $tsrc, $src\n\t"
17011             "mov    $tmask, $mask\n\t"
17012             "bext   $tdst, $tsrc, $tmask\n\t"
17013             "mov    $dst, $tdst"
17014           %}
17015   ins_encode %{
17016     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17017     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17018     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17019     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17020   %}
17021   ins_pipe(pipe_slow);
17022 %}
17023 
17024 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
17025                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17026   match(Set dst (CompressBits (LoadL mem) mask));
17027   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17028   format %{ "ldrd   $tsrc, $mem\n\t"
17029             "ldrd   $tmask, $mask\n\t"
17030             "bext   $tdst, $tsrc, $tmask\n\t"
17031             "mov    $dst, $tdst"
17032           %}
17033   ins_encode %{
17034     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17035               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17036     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17037     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17038     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17039   %}
17040   ins_pipe(pipe_slow);
17041 %}
17042 
17043 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17044                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17045   match(Set dst (ExpandBits src mask));
17046   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17047   format %{ "mov    $tsrc, $src\n\t"
17048             "mov    $tmask, $mask\n\t"
17049             "bdep   $tdst, $tsrc, $tmask\n\t"
17050             "mov    $dst, $tdst"
17051           %}
17052   ins_encode %{
17053     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17054     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17055     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17056     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17057   %}
17058   ins_pipe(pipe_slow);
17059 %}
17060 
17061 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17062                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17063   match(Set dst (ExpandBits (LoadI mem) mask));
17064   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17065   format %{ "ldrs   $tsrc, $mem\n\t"
17066             "ldrs   $tmask, $mask\n\t"
17067             "bdep   $tdst, $tsrc, $tmask\n\t"
17068             "mov    $dst, $tdst"
17069           %}
17070   ins_encode %{
17071     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17072               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17073     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17074     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17075     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17076   %}
17077   ins_pipe(pipe_slow);
17078 %}
17079 
17080 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17081                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
17082   match(Set dst (ExpandBits src mask));
17083   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17084   format %{ "mov    $tsrc, $src\n\t"
17085             "mov    $tmask, $mask\n\t"
17086             "bdep   $tdst, $tsrc, $tmask\n\t"
17087             "mov    $dst, $tdst"
17088           %}
17089   ins_encode %{
17090     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17091     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17092     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17093     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17094   %}
17095   ins_pipe(pipe_slow);
17096 %}
17097 
17098 
17099 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
17100                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17101   match(Set dst (ExpandBits (LoadL mem) mask));
17102   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17103   format %{ "ldrd   $tsrc, $mem\n\t"
17104             "ldrd   $tmask, $mask\n\t"
17105             "bdep   $tdst, $tsrc, $tmask\n\t"
17106             "mov    $dst, $tdst"
17107           %}
17108   ins_encode %{
17109     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17110               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17111     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17112     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17113     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17114   %}
17115   ins_pipe(pipe_slow);
17116 %}
17117 
17118 // ============================================================================
17119 // This name is KNOWN by the ADLC and cannot be changed.
17120 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17121 // for this guy.
17122 instruct tlsLoadP(thread_RegP dst)
17123 %{
17124   match(Set dst (ThreadLocal));
17125 
17126   ins_cost(0);
17127 
17128   format %{ " -- \t// $dst=Thread::current(), empty" %}
17129 
17130   size(0);
17131 
17132   ins_encode( /*empty*/ );
17133 
17134   ins_pipe(pipe_class_empty);
17135 %}
17136 
17137 //----------PEEPHOLE RULES-----------------------------------------------------
17138 // These must follow all instruction definitions as they use the names
17139 // defined in the instructions definitions.
17140 //
17141 // peepmatch ( root_instr_name [preceding_instruction]* );
17142 //
17143 // peepconstraint %{
17144 // (instruction_number.operand_name relational_op instruction_number.operand_name
17145 //  [, ...] );
17146 // // instruction numbers are zero-based using left to right order in peepmatch
17147 //
17148 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17149 // // provide an instruction_number.operand_name for each operand that appears
17150 // // in the replacement instruction's match rule
17151 //
17152 // ---------VM FLAGS---------------------------------------------------------
17153 //
17154 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17155 //
17156 // Each peephole rule is given an identifying number starting with zero and
17157 // increasing by one in the order seen by the parser.  An individual peephole
17158 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17159 // on the command-line.
17160 //
17161 // ---------CURRENT LIMITATIONS----------------------------------------------
17162 //
17163 // Only match adjacent instructions in same basic block
17164 // Only equality constraints
17165 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17166 // Only one replacement instruction
17167 //
17168 // ---------EXAMPLE----------------------------------------------------------
17169 //
17170 // // pertinent parts of existing instructions in architecture description
17171 // instruct movI(iRegINoSp dst, iRegI src)
17172 // %{
17173 //   match(Set dst (CopyI src));
17174 // %}
17175 //
17176 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17177 // %{
17178 //   match(Set dst (AddI dst src));
17179 //   effect(KILL cr);
17180 // %}
17181 //
17182 // // Change (inc mov) to lea
17183 // peephole %{
17184 //   // increment preceded by register-register move
17185 //   peepmatch ( incI_iReg movI );
17186 //   // require that the destination register of the increment
17187 //   // match the destination register of the move
17188 //   peepconstraint ( 0.dst == 1.dst );
17189 //   // construct a replacement instruction that sets
17190 //   // the destination to ( move's source register + one )
17191 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17192 // %}
17193 //
17194 
17195 // Implementation no longer uses movX instructions since
17196 // machine-independent system no longer uses CopyX nodes.
17197 //
17198 // peephole
17199 // %{
17200 //   peepmatch (incI_iReg movI);
17201 //   peepconstraint (0.dst == 1.dst);
17202 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17203 // %}
17204 
17205 // peephole
17206 // %{
17207 //   peepmatch (decI_iReg movI);
17208 //   peepconstraint (0.dst == 1.dst);
17209 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17210 // %}
17211 
17212 // peephole
17213 // %{
17214 //   peepmatch (addI_iReg_imm movI);
17215 //   peepconstraint (0.dst == 1.dst);
17216 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17217 // %}
17218 
17219 // peephole
17220 // %{
17221 //   peepmatch (incL_iReg movL);
17222 //   peepconstraint (0.dst == 1.dst);
17223 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17224 // %}
17225 
17226 // peephole
17227 // %{
17228 //   peepmatch (decL_iReg movL);
17229 //   peepconstraint (0.dst == 1.dst);
17230 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17231 // %}
17232 
17233 // peephole
17234 // %{
17235 //   peepmatch (addL_iReg_imm movL);
17236 //   peepconstraint (0.dst == 1.dst);
17237 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17238 // %}
17239 
17240 // peephole
17241 // %{
17242 //   peepmatch (addP_iReg_imm movP);
17243 //   peepconstraint (0.dst == 1.dst);
17244 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17245 // %}
17246 
17247 // // Change load of spilled value to only a spill
17248 // instruct storeI(memory mem, iRegI src)
17249 // %{
17250 //   match(Set mem (StoreI mem src));
17251 // %}
17252 //
17253 // instruct loadI(iRegINoSp dst, memory mem)
17254 // %{
17255 //   match(Set dst (LoadI mem));
17256 // %}
17257 //
17258 
17259 //----------SMARTSPILL RULES---------------------------------------------------
17260 // These must follow all instruction definitions as they use the names
17261 // defined in the instructions definitions.
17262 
17263 // Local Variables:
17264 // mode: c++
17265 // End: