1 //
    2 // Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
    4 // Copyright 2025 Arm Limited and/or its affiliates.
    5 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    6 //
    7 // This code is free software; you can redistribute it and/or modify it
    8 // under the terms of the GNU General Public License version 2 only, as
    9 // published by the Free Software Foundation.
   10 //
   11 // This code is distributed in the hope that it will be useful, but WITHOUT
   12 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   14 // version 2 for more details (a copy is included in the LICENSE file that
   15 // accompanied this code).
   16 //
   17 // You should have received a copy of the GNU General Public License version
   18 // 2 along with this work; if not, write to the Free Software Foundation,
   19 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   20 //
   21 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   22 // or visit www.oracle.com if you need additional information or have any
   23 // questions.
   24 //
   25 //
   26 
   27 // AArch64 Architecture Description File
   28 
   29 //----------REGISTER DEFINITION BLOCK------------------------------------------
   30 // This information is used by the matcher and the register allocator to
   31 // describe individual registers and classes of registers within the target
   32 // architecture.
   33 
   34 register %{
   35 //----------Architecture Description Register Definitions----------------------
   36 // General Registers
   37 // "reg_def"  name ( register save type, C convention save type,
   38 //                   ideal register type, encoding );
   39 // Register Save Types:
   40 //
   41 // NS  = No-Save:       The register allocator assumes that these registers
   42 //                      can be used without saving upon entry to the method, &
   43 //                      that they do not need to be saved at call sites.
   44 //
   45 // SOC = Save-On-Call:  The register allocator assumes that these registers
   46 //                      can be used without saving upon entry to the method,
   47 //                      but that they must be saved at call sites.
   48 //
   49 // SOE = Save-On-Entry: The register allocator assumes that these registers
   50 //                      must be saved before using them upon entry to the
   51 //                      method, but they do not need to be saved at call
   52 //                      sites.
   53 //
   54 // AS  = Always-Save:   The register allocator assumes that these registers
   55 //                      must be saved before using them upon entry to the
   56 //                      method, & that they must be saved at call sites.
   57 //
   58 // Ideal Register Type is used to determine how to save & restore a
   59 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   60 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   61 //
   62 // The encoding number is the actual bit-pattern placed into the opcodes.
   63 
   64 // We must define the 64 bit int registers in two 32 bit halves, the
   65 // real lower register and a virtual upper half register. upper halves
   66 // are used by the register allocator but are not actually supplied as
   67 // operands to memory ops.
   68 //
   69 // follow the C1 compiler in making registers
   70 //
   71 //   r0-r7,r10-r26 volatile (caller save)
   72 //   r27-r32 system (no save, no allocate)
   73 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   74 //
   75 // as regards Java usage. we don't use any callee save registers
   76 // because this makes it difficult to de-optimise a frame (see comment
   77 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   78 //
   79 
   80 // General Registers
   81 
   82 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   83 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   84 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   85 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   86 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   87 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   88 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   89 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   90 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   91 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   92 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   93 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   94 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   95 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   96 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   97 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   98 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   99 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
  100 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  101 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  102 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  103 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  104 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  105 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  106 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  107 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  108 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  109 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  110 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  111 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  112 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  113 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  114 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  115 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  116 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  117 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  118 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  119 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  120 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  121 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  122 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  123 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  124 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  125 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  126 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  127 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  128 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  129 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  130 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  131 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  132 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  133 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  134 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  135 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  136 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  137 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  138 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  139 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  140 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  141 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  142 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  143 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  144 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  145 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  146 
  147 // ----------------------------
  148 // Float/Double/Vector Registers
  149 // ----------------------------
  150 
  151 // Double Registers
  152 
  153 // The rules of ADL require that double registers be defined in pairs.
  154 // Each pair must be two 32-bit values, but not necessarily a pair of
  155 // single float registers. In each pair, ADLC-assigned register numbers
  156 // must be adjacent, with the lower number even. Finally, when the
  157 // CPU stores such a register pair to memory, the word associated with
  158 // the lower ADLC-assigned number must be stored to the lower address.
  159 
  160 // AArch64 has 32 floating-point registers. Each can store a vector of
  161 // single or double precision floating-point values up to 8 * 32
  162 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  163 // use the first float or double element of the vector.
  164 
  165 // for Java use float registers v0-v15 are always save on call whereas
  166 // the platform ABI treats v8-v15 as callee save). float registers
  167 // v16-v31 are SOC as per the platform spec
  168 
  169 // For SVE vector registers, we simply extend vector register size to 8
  170 // 'logical' slots. This is nominally 256 bits but it actually covers
  171 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  172 // bits. The 'physical' SVE vector register length is detected during
  173 // startup, so the register allocator is able to identify the correct
  174 // number of bytes needed for an SVE spill/unspill.
  175 // Note that a vector register with 4 slots denotes a 128-bit NEON
  176 // register allowing it to be distinguished from the corresponding SVE
  177 // vector register when the SVE vector length is 128 bits.
  178 
  179   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  180   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  181   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  182   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  183 
  184   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  185   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  186   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  187   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  188 
  189   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  190   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  191   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  192   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  193 
  194   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  195   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  196   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  197   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  198 
  199   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  200   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  201   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  202   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  203 
  204   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  205   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  206   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  207   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  208 
  209   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  210   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  211   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  212   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  213 
  214   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  215   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  216   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  217   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  218 
  219   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  220   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  221   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  222   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  223 
  224   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  225   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  226   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  227   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  228 
  229   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  230   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  231   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  232   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  233 
  234   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  235   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  236   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  237   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  238 
  239   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  240   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  241   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  242   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  243 
  244   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  245   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  246   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  247   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  248 
  249   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  250   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  251   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  252   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  253 
  254   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  255   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  256   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  257   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  258 
  259   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  260   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  261   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  262   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  263 
  264   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  265   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  266   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  267   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  268 
  269   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  270   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  271   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  272   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  273 
  274   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  275   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  276   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  277   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  278 
  279   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  280   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  281   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  282   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  283 
  284   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  285   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  286   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  287   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  288 
  289   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  290   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  291   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  292   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  293 
  294   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  295   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  296   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  297   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  298 
  299   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  300   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  301   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  302   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  303 
  304   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  305   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  306   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  307   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  308 
  309   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  310   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  311   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  312   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  313 
  314   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  315   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  316   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  317   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  318 
  319   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  320   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  321   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  322   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  323 
  324   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  325   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  326   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  327   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  328 
  329   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  330   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  331   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  332   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  333 
  334   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  335   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  336   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  337   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  338 
  339 // ----------------------------
  340 // SVE Predicate Registers
  341 // ----------------------------
  342   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  343   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  344   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  345   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  346   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  347   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  348   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  349   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  350   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  351   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  352   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  353   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  354   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  355   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  356   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  357   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  358 
  359 // ----------------------------
  360 // Special Registers
  361 // ----------------------------
  362 
  363 // the AArch64 CSPR status flag register is not directly accessible as
  364 // instruction operand. the FPSR status flag register is a system
  365 // register which can be written/read using MSR/MRS but again does not
  366 // appear as an operand (a code identifying the FSPR occurs as an
  367 // immediate value in the instruction).
  368 
  369 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  370 
  371 // Specify priority of register selection within phases of register
  372 // allocation.  Highest priority is first.  A useful heuristic is to
  373 // give registers a low priority when they are required by machine
  374 // instructions, like EAX and EDX on I486, and choose no-save registers
  375 // before save-on-call, & save-on-call before save-on-entry.  Registers
  376 // which participate in fixed calling sequences should come last.
  377 // Registers which are used as pairs must fall on an even boundary.
  378 
  379 alloc_class chunk0(
  380     // volatiles
  381     R10, R10_H,
  382     R11, R11_H,
  383     R12, R12_H,
  384     R13, R13_H,
  385     R14, R14_H,
  386     R15, R15_H,
  387     R16, R16_H,
  388     R17, R17_H,
  389     R18, R18_H,
  390 
  391     // arg registers
  392     R0, R0_H,
  393     R1, R1_H,
  394     R2, R2_H,
  395     R3, R3_H,
  396     R4, R4_H,
  397     R5, R5_H,
  398     R6, R6_H,
  399     R7, R7_H,
  400 
  401     // non-volatiles
  402     R19, R19_H,
  403     R20, R20_H,
  404     R21, R21_H,
  405     R22, R22_H,
  406     R23, R23_H,
  407     R24, R24_H,
  408     R25, R25_H,
  409     R26, R26_H,
  410 
  411     // non-allocatable registers
  412 
  413     R27, R27_H, // heapbase
  414     R28, R28_H, // thread
  415     R29, R29_H, // fp
  416     R30, R30_H, // lr
  417     R31, R31_H, // sp
  418     R8, R8_H,   // rscratch1
  419     R9, R9_H,   // rscratch2
  420 );
  421 
  422 alloc_class chunk1(
  423 
  424     // no save
  425     V16, V16_H, V16_J, V16_K,
  426     V17, V17_H, V17_J, V17_K,
  427     V18, V18_H, V18_J, V18_K,
  428     V19, V19_H, V19_J, V19_K,
  429     V20, V20_H, V20_J, V20_K,
  430     V21, V21_H, V21_J, V21_K,
  431     V22, V22_H, V22_J, V22_K,
  432     V23, V23_H, V23_J, V23_K,
  433     V24, V24_H, V24_J, V24_K,
  434     V25, V25_H, V25_J, V25_K,
  435     V26, V26_H, V26_J, V26_K,
  436     V27, V27_H, V27_J, V27_K,
  437     V28, V28_H, V28_J, V28_K,
  438     V29, V29_H, V29_J, V29_K,
  439     V30, V30_H, V30_J, V30_K,
  440     V31, V31_H, V31_J, V31_K,
  441 
  442     // arg registers
  443     V0, V0_H, V0_J, V0_K,
  444     V1, V1_H, V1_J, V1_K,
  445     V2, V2_H, V2_J, V2_K,
  446     V3, V3_H, V3_J, V3_K,
  447     V4, V4_H, V4_J, V4_K,
  448     V5, V5_H, V5_J, V5_K,
  449     V6, V6_H, V6_J, V6_K,
  450     V7, V7_H, V7_J, V7_K,
  451 
  452     // non-volatiles
  453     V8, V8_H, V8_J, V8_K,
  454     V9, V9_H, V9_J, V9_K,
  455     V10, V10_H, V10_J, V10_K,
  456     V11, V11_H, V11_J, V11_K,
  457     V12, V12_H, V12_J, V12_K,
  458     V13, V13_H, V13_J, V13_K,
  459     V14, V14_H, V14_J, V14_K,
  460     V15, V15_H, V15_J, V15_K,
  461 );
  462 
  463 alloc_class chunk2 (
  464     // Governing predicates for load/store and arithmetic
  465     P0,
  466     P1,
  467     P2,
  468     P3,
  469     P4,
  470     P5,
  471     P6,
  472 
  473     // Extra predicates
  474     P8,
  475     P9,
  476     P10,
  477     P11,
  478     P12,
  479     P13,
  480     P14,
  481     P15,
  482 
  483     // Preserved for all-true predicate
  484     P7,
  485 );
  486 
  487 alloc_class chunk3(RFLAGS);
  488 
  489 //----------Architecture Description Register Classes--------------------------
  490 // Several register classes are automatically defined based upon information in
  491 // this architecture description.
  492 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  493 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  494 //
  495 
  496 // Class for all 32 bit general purpose registers
  497 reg_class all_reg32(
  498     R0,
  499     R1,
  500     R2,
  501     R3,
  502     R4,
  503     R5,
  504     R6,
  505     R7,
  506     R10,
  507     R11,
  508     R12,
  509     R13,
  510     R14,
  511     R15,
  512     R16,
  513     R17,
  514     R18,
  515     R19,
  516     R20,
  517     R21,
  518     R22,
  519     R23,
  520     R24,
  521     R25,
  522     R26,
  523     R27,
  524     R28,
  525     R29,
  526     R30,
  527     R31
  528 );
  529 
  530 
  531 // Class for all 32 bit integer registers (excluding SP which
  532 // will never be used as an integer register)
  533 reg_class any_reg32 %{
  534   return _ANY_REG32_mask;
  535 %}
  536 
  537 // Singleton class for R0 int register
  538 reg_class int_r0_reg(R0);
  539 
  540 // Singleton class for R2 int register
  541 reg_class int_r2_reg(R2);
  542 
  543 // Singleton class for R3 int register
  544 reg_class int_r3_reg(R3);
  545 
  546 // Singleton class for R4 int register
  547 reg_class int_r4_reg(R4);
  548 
  549 // Singleton class for R31 int register
  550 reg_class int_r31_reg(R31);
  551 
  552 // Class for all 64 bit general purpose registers
  553 reg_class all_reg(
  554     R0, R0_H,
  555     R1, R1_H,
  556     R2, R2_H,
  557     R3, R3_H,
  558     R4, R4_H,
  559     R5, R5_H,
  560     R6, R6_H,
  561     R7, R7_H,
  562     R10, R10_H,
  563     R11, R11_H,
  564     R12, R12_H,
  565     R13, R13_H,
  566     R14, R14_H,
  567     R15, R15_H,
  568     R16, R16_H,
  569     R17, R17_H,
  570     R18, R18_H,
  571     R19, R19_H,
  572     R20, R20_H,
  573     R21, R21_H,
  574     R22, R22_H,
  575     R23, R23_H,
  576     R24, R24_H,
  577     R25, R25_H,
  578     R26, R26_H,
  579     R27, R27_H,
  580     R28, R28_H,
  581     R29, R29_H,
  582     R30, R30_H,
  583     R31, R31_H
  584 );
  585 
  586 // Class for all long integer registers (including SP)
  587 reg_class any_reg %{
  588   return _ANY_REG_mask;
  589 %}
  590 
  591 // Class for non-allocatable 32 bit registers
  592 reg_class non_allocatable_reg32(
  593 #ifdef R18_RESERVED
  594     // See comment in register_aarch64.hpp
  595     R18,                        // tls on Windows
  596 #endif
  597     R28,                        // thread
  598     R30,                        // lr
  599     R31                         // sp
  600 );
  601 
  602 // Class for non-allocatable 64 bit registers
  603 reg_class non_allocatable_reg(
  604 #ifdef R18_RESERVED
  605     // See comment in register_aarch64.hpp
  606     R18, R18_H,                 // tls on Windows, platform register on macOS
  607 #endif
  608     R28, R28_H,                 // thread
  609     R30, R30_H,                 // lr
  610     R31, R31_H                  // sp
  611 );
  612 
  613 // Class for all non-special integer registers
  614 reg_class no_special_reg32 %{
  615   return _NO_SPECIAL_REG32_mask;
  616 %}
  617 
  618 // Class for all non-special long integer registers
  619 reg_class no_special_reg %{
  620   return _NO_SPECIAL_REG_mask;
  621 %}
  622 
  623 // Class for 64 bit register r0
  624 reg_class r0_reg(
  625     R0, R0_H
  626 );
  627 
  628 // Class for 64 bit register r1
  629 reg_class r1_reg(
  630     R1, R1_H
  631 );
  632 
  633 // Class for 64 bit register r2
  634 reg_class r2_reg(
  635     R2, R2_H
  636 );
  637 
  638 // Class for 64 bit register r3
  639 reg_class r3_reg(
  640     R3, R3_H
  641 );
  642 
  643 // Class for 64 bit register r4
  644 reg_class r4_reg(
  645     R4, R4_H
  646 );
  647 
  648 // Class for 64 bit register r5
  649 reg_class r5_reg(
  650     R5, R5_H
  651 );
  652 
  653 // Class for 64 bit register r10
  654 reg_class r10_reg(
  655     R10, R10_H
  656 );
  657 
  658 // Class for 64 bit register r11
  659 reg_class r11_reg(
  660     R11, R11_H
  661 );
  662 
  663 // Class for method register
  664 reg_class method_reg(
  665     R12, R12_H
  666 );
  667 
  668 // Class for thread register
  669 reg_class thread_reg(
  670     R28, R28_H
  671 );
  672 
  673 // Class for frame pointer register
  674 reg_class fp_reg(
  675     R29, R29_H
  676 );
  677 
  678 // Class for link register
  679 reg_class lr_reg(
  680     R30, R30_H
  681 );
  682 
  683 // Class for long sp register
  684 reg_class sp_reg(
  685   R31, R31_H
  686 );
  687 
  688 // Class for all pointer registers
  689 reg_class ptr_reg %{
  690   return _PTR_REG_mask;
  691 %}
  692 
  693 // Class for all non_special pointer registers
  694 reg_class no_special_ptr_reg %{
  695   return _NO_SPECIAL_PTR_REG_mask;
  696 %}
  697 
  698 // Class for all non_special pointer registers (excluding rfp)
  699 reg_class no_special_no_rfp_ptr_reg %{
  700   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  701 %}
  702 
  703 // Class for all float registers
  704 reg_class float_reg(
  705     V0,
  706     V1,
  707     V2,
  708     V3,
  709     V4,
  710     V5,
  711     V6,
  712     V7,
  713     V8,
  714     V9,
  715     V10,
  716     V11,
  717     V12,
  718     V13,
  719     V14,
  720     V15,
  721     V16,
  722     V17,
  723     V18,
  724     V19,
  725     V20,
  726     V21,
  727     V22,
  728     V23,
  729     V24,
  730     V25,
  731     V26,
  732     V27,
  733     V28,
  734     V29,
  735     V30,
  736     V31
  737 );
  738 
  739 // Double precision float registers have virtual `high halves' that
  740 // are needed by the allocator.
  741 // Class for all double registers
  742 reg_class double_reg(
  743     V0, V0_H,
  744     V1, V1_H,
  745     V2, V2_H,
  746     V3, V3_H,
  747     V4, V4_H,
  748     V5, V5_H,
  749     V6, V6_H,
  750     V7, V7_H,
  751     V8, V8_H,
  752     V9, V9_H,
  753     V10, V10_H,
  754     V11, V11_H,
  755     V12, V12_H,
  756     V13, V13_H,
  757     V14, V14_H,
  758     V15, V15_H,
  759     V16, V16_H,
  760     V17, V17_H,
  761     V18, V18_H,
  762     V19, V19_H,
  763     V20, V20_H,
  764     V21, V21_H,
  765     V22, V22_H,
  766     V23, V23_H,
  767     V24, V24_H,
  768     V25, V25_H,
  769     V26, V26_H,
  770     V27, V27_H,
  771     V28, V28_H,
  772     V29, V29_H,
  773     V30, V30_H,
  774     V31, V31_H
  775 );
  776 
  777 // Class for all SVE vector registers.
  778 reg_class vectora_reg (
  779     V0, V0_H, V0_J, V0_K,
  780     V1, V1_H, V1_J, V1_K,
  781     V2, V2_H, V2_J, V2_K,
  782     V3, V3_H, V3_J, V3_K,
  783     V4, V4_H, V4_J, V4_K,
  784     V5, V5_H, V5_J, V5_K,
  785     V6, V6_H, V6_J, V6_K,
  786     V7, V7_H, V7_J, V7_K,
  787     V8, V8_H, V8_J, V8_K,
  788     V9, V9_H, V9_J, V9_K,
  789     V10, V10_H, V10_J, V10_K,
  790     V11, V11_H, V11_J, V11_K,
  791     V12, V12_H, V12_J, V12_K,
  792     V13, V13_H, V13_J, V13_K,
  793     V14, V14_H, V14_J, V14_K,
  794     V15, V15_H, V15_J, V15_K,
  795     V16, V16_H, V16_J, V16_K,
  796     V17, V17_H, V17_J, V17_K,
  797     V18, V18_H, V18_J, V18_K,
  798     V19, V19_H, V19_J, V19_K,
  799     V20, V20_H, V20_J, V20_K,
  800     V21, V21_H, V21_J, V21_K,
  801     V22, V22_H, V22_J, V22_K,
  802     V23, V23_H, V23_J, V23_K,
  803     V24, V24_H, V24_J, V24_K,
  804     V25, V25_H, V25_J, V25_K,
  805     V26, V26_H, V26_J, V26_K,
  806     V27, V27_H, V27_J, V27_K,
  807     V28, V28_H, V28_J, V28_K,
  808     V29, V29_H, V29_J, V29_K,
  809     V30, V30_H, V30_J, V30_K,
  810     V31, V31_H, V31_J, V31_K,
  811 );
  812 
  813 // Class for all 64bit vector registers
  814 reg_class vectord_reg(
  815     V0, V0_H,
  816     V1, V1_H,
  817     V2, V2_H,
  818     V3, V3_H,
  819     V4, V4_H,
  820     V5, V5_H,
  821     V6, V6_H,
  822     V7, V7_H,
  823     V8, V8_H,
  824     V9, V9_H,
  825     V10, V10_H,
  826     V11, V11_H,
  827     V12, V12_H,
  828     V13, V13_H,
  829     V14, V14_H,
  830     V15, V15_H,
  831     V16, V16_H,
  832     V17, V17_H,
  833     V18, V18_H,
  834     V19, V19_H,
  835     V20, V20_H,
  836     V21, V21_H,
  837     V22, V22_H,
  838     V23, V23_H,
  839     V24, V24_H,
  840     V25, V25_H,
  841     V26, V26_H,
  842     V27, V27_H,
  843     V28, V28_H,
  844     V29, V29_H,
  845     V30, V30_H,
  846     V31, V31_H
  847 );
  848 
  849 // Class for all 128bit vector registers
  850 reg_class vectorx_reg(
  851     V0, V0_H, V0_J, V0_K,
  852     V1, V1_H, V1_J, V1_K,
  853     V2, V2_H, V2_J, V2_K,
  854     V3, V3_H, V3_J, V3_K,
  855     V4, V4_H, V4_J, V4_K,
  856     V5, V5_H, V5_J, V5_K,
  857     V6, V6_H, V6_J, V6_K,
  858     V7, V7_H, V7_J, V7_K,
  859     V8, V8_H, V8_J, V8_K,
  860     V9, V9_H, V9_J, V9_K,
  861     V10, V10_H, V10_J, V10_K,
  862     V11, V11_H, V11_J, V11_K,
  863     V12, V12_H, V12_J, V12_K,
  864     V13, V13_H, V13_J, V13_K,
  865     V14, V14_H, V14_J, V14_K,
  866     V15, V15_H, V15_J, V15_K,
  867     V16, V16_H, V16_J, V16_K,
  868     V17, V17_H, V17_J, V17_K,
  869     V18, V18_H, V18_J, V18_K,
  870     V19, V19_H, V19_J, V19_K,
  871     V20, V20_H, V20_J, V20_K,
  872     V21, V21_H, V21_J, V21_K,
  873     V22, V22_H, V22_J, V22_K,
  874     V23, V23_H, V23_J, V23_K,
  875     V24, V24_H, V24_J, V24_K,
  876     V25, V25_H, V25_J, V25_K,
  877     V26, V26_H, V26_J, V26_K,
  878     V27, V27_H, V27_J, V27_K,
  879     V28, V28_H, V28_J, V28_K,
  880     V29, V29_H, V29_J, V29_K,
  881     V30, V30_H, V30_J, V30_K,
  882     V31, V31_H, V31_J, V31_K
  883 );
  884 
  885 // Class for vector register V10
  886 reg_class v10_veca_reg(
  887     V10, V10_H, V10_J, V10_K
  888 );
  889 
  890 // Class for vector register V11
  891 reg_class v11_veca_reg(
  892     V11, V11_H, V11_J, V11_K
  893 );
  894 
  895 // Class for vector register V12
  896 reg_class v12_veca_reg(
  897     V12, V12_H, V12_J, V12_K
  898 );
  899 
  900 // Class for vector register V13
  901 reg_class v13_veca_reg(
  902     V13, V13_H, V13_J, V13_K
  903 );
  904 
  905 // Class for vector register V17
  906 reg_class v17_veca_reg(
  907     V17, V17_H, V17_J, V17_K
  908 );
  909 
  910 // Class for vector register V18
  911 reg_class v18_veca_reg(
  912     V18, V18_H, V18_J, V18_K
  913 );
  914 
  915 // Class for vector register V23
  916 reg_class v23_veca_reg(
  917     V23, V23_H, V23_J, V23_K
  918 );
  919 
  920 // Class for vector register V24
  921 reg_class v24_veca_reg(
  922     V24, V24_H, V24_J, V24_K
  923 );
  924 
  925 // Class for 128 bit register v0
  926 reg_class v0_reg(
  927     V0, V0_H
  928 );
  929 
  930 // Class for 128 bit register v1
  931 reg_class v1_reg(
  932     V1, V1_H
  933 );
  934 
  935 // Class for 128 bit register v2
  936 reg_class v2_reg(
  937     V2, V2_H
  938 );
  939 
  940 // Class for 128 bit register v3
  941 reg_class v3_reg(
  942     V3, V3_H
  943 );
  944 
  945 // Class for 128 bit register v4
  946 reg_class v4_reg(
  947     V4, V4_H
  948 );
  949 
  950 // Class for 128 bit register v5
  951 reg_class v5_reg(
  952     V5, V5_H
  953 );
  954 
  955 // Class for 128 bit register v6
  956 reg_class v6_reg(
  957     V6, V6_H
  958 );
  959 
  960 // Class for 128 bit register v7
  961 reg_class v7_reg(
  962     V7, V7_H
  963 );
  964 
  965 // Class for 128 bit register v8
  966 reg_class v8_reg(
  967     V8, V8_H
  968 );
  969 
  970 // Class for 128 bit register v9
  971 reg_class v9_reg(
  972     V9, V9_H
  973 );
  974 
  975 // Class for 128 bit register v10
  976 reg_class v10_reg(
  977     V10, V10_H
  978 );
  979 
  980 // Class for 128 bit register v11
  981 reg_class v11_reg(
  982     V11, V11_H
  983 );
  984 
  985 // Class for 128 bit register v12
  986 reg_class v12_reg(
  987     V12, V12_H
  988 );
  989 
  990 // Class for 128 bit register v13
  991 reg_class v13_reg(
  992     V13, V13_H
  993 );
  994 
  995 // Class for 128 bit register v14
  996 reg_class v14_reg(
  997     V14, V14_H
  998 );
  999 
 1000 // Class for 128 bit register v15
 1001 reg_class v15_reg(
 1002     V15, V15_H
 1003 );
 1004 
 1005 // Class for 128 bit register v16
 1006 reg_class v16_reg(
 1007     V16, V16_H
 1008 );
 1009 
 1010 // Class for 128 bit register v17
 1011 reg_class v17_reg(
 1012     V17, V17_H
 1013 );
 1014 
 1015 // Class for 128 bit register v18
 1016 reg_class v18_reg(
 1017     V18, V18_H
 1018 );
 1019 
 1020 // Class for 128 bit register v19
 1021 reg_class v19_reg(
 1022     V19, V19_H
 1023 );
 1024 
 1025 // Class for 128 bit register v20
 1026 reg_class v20_reg(
 1027     V20, V20_H
 1028 );
 1029 
 1030 // Class for 128 bit register v21
 1031 reg_class v21_reg(
 1032     V21, V21_H
 1033 );
 1034 
 1035 // Class for 128 bit register v22
 1036 reg_class v22_reg(
 1037     V22, V22_H
 1038 );
 1039 
 1040 // Class for 128 bit register v23
 1041 reg_class v23_reg(
 1042     V23, V23_H
 1043 );
 1044 
 1045 // Class for 128 bit register v24
 1046 reg_class v24_reg(
 1047     V24, V24_H
 1048 );
 1049 
 1050 // Class for 128 bit register v25
 1051 reg_class v25_reg(
 1052     V25, V25_H
 1053 );
 1054 
 1055 // Class for 128 bit register v26
 1056 reg_class v26_reg(
 1057     V26, V26_H
 1058 );
 1059 
 1060 // Class for 128 bit register v27
 1061 reg_class v27_reg(
 1062     V27, V27_H
 1063 );
 1064 
 1065 // Class for 128 bit register v28
 1066 reg_class v28_reg(
 1067     V28, V28_H
 1068 );
 1069 
 1070 // Class for 128 bit register v29
 1071 reg_class v29_reg(
 1072     V29, V29_H
 1073 );
 1074 
 1075 // Class for 128 bit register v30
 1076 reg_class v30_reg(
 1077     V30, V30_H
 1078 );
 1079 
 1080 // Class for 128 bit register v31
 1081 reg_class v31_reg(
 1082     V31, V31_H
 1083 );
 1084 
 1085 // Class for all SVE predicate registers.
 1086 reg_class pr_reg (
 1087     P0,
 1088     P1,
 1089     P2,
 1090     P3,
 1091     P4,
 1092     P5,
 1093     P6,
 1094     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1095     P8,
 1096     P9,
 1097     P10,
 1098     P11,
 1099     P12,
 1100     P13,
 1101     P14,
 1102     P15
 1103 );
 1104 
 1105 // Class for SVE governing predicate registers, which are used
 1106 // to determine the active elements of a predicated instruction.
 1107 reg_class gov_pr (
 1108     P0,
 1109     P1,
 1110     P2,
 1111     P3,
 1112     P4,
 1113     P5,
 1114     P6,
 1115     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1116 );
 1117 
 1118 reg_class p0_reg(P0);
 1119 reg_class p1_reg(P1);
 1120 
 1121 // Singleton class for condition codes
 1122 reg_class int_flags(RFLAGS);
 1123 
 1124 %}
 1125 
 1126 //----------DEFINITION BLOCK---------------------------------------------------
 1127 // Define name --> value mappings to inform the ADLC of an integer valued name
 1128 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1129 // Format:
 1130 //        int_def  <name>         ( <int_value>, <expression>);
 1131 // Generated Code in ad_<arch>.hpp
 1132 //        #define  <name>   (<expression>)
 1133 //        // value == <int_value>
 1134 // Generated code in ad_<arch>.cpp adlc_verification()
 1135 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1136 //
 1137 
 1138 // we follow the ppc-aix port in using a simple cost model which ranks
 1139 // register operations as cheap, memory ops as more expensive and
 1140 // branches as most expensive. the first two have a low as well as a
 1141 // normal cost. huge cost appears to be a way of saying don't do
 1142 // something
 1143 
 1144 definitions %{
 1145   // The default cost (of a register move instruction).
 1146   int_def INSN_COST            (    100,     100);
 1147   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1148   int_def CALL_COST            (    200,     2 * INSN_COST);
 1149   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1150 %}
 1151 
 1152 
 1153 //----------SOURCE BLOCK-------------------------------------------------------
 1154 // This is a block of C++ code which provides values, functions, and
 1155 // definitions necessary in the rest of the architecture description
 1156 
 1157 source_hpp %{
 1158 
 1159 #include "asm/macroAssembler.hpp"
 1160 #include "gc/shared/barrierSetAssembler.hpp"
 1161 #include "gc/shared/cardTable.hpp"
 1162 #include "gc/shared/cardTableBarrierSet.hpp"
 1163 #include "gc/shared/collectedHeap.hpp"
 1164 #include "opto/addnode.hpp"
 1165 #include "opto/convertnode.hpp"
 1166 #include "runtime/objectMonitor.hpp"
 1167 
 1168 extern RegMask _ANY_REG32_mask;
 1169 extern RegMask _ANY_REG_mask;
 1170 extern RegMask _PTR_REG_mask;
 1171 extern RegMask _NO_SPECIAL_REG32_mask;
 1172 extern RegMask _NO_SPECIAL_REG_mask;
 1173 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1174 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1175 
 1176 class CallStubImpl {
 1177 
 1178   //--------------------------------------------------------------
 1179   //---<  Used for optimization in Compile::shorten_branches  >---
 1180   //--------------------------------------------------------------
 1181 
 1182  public:
 1183   // Size of call trampoline stub.
 1184   static uint size_call_trampoline() {
 1185     return 0; // no call trampolines on this platform
 1186   }
 1187 
 1188   // number of relocations needed by a call trampoline stub
 1189   static uint reloc_call_trampoline() {
 1190     return 0; // no call trampolines on this platform
 1191   }
 1192 };
 1193 
 1194 class HandlerImpl {
 1195 
 1196  public:
 1197 
 1198   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1199 
 1200   static uint size_deopt_handler() {
 1201     // count one branch instruction and one far call instruction sequence
 1202     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1203   }
 1204 };
 1205 
 1206 class Node::PD {
 1207 public:
 1208   enum NodeFlags {
 1209     _last_flag = Node::_last_flag
 1210   };
 1211 };
 1212 
 1213   bool is_CAS(int opcode, bool maybe_volatile);
 1214 
 1215   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1216 
 1217   bool unnecessary_acquire(const Node *barrier);
 1218   bool needs_acquiring_load(const Node *load);
 1219 
 1220   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1221 
 1222   bool unnecessary_release(const Node *barrier);
 1223   bool unnecessary_volatile(const Node *barrier);
 1224   bool needs_releasing_store(const Node *store);
 1225 
 1226   // predicate controlling translation of CompareAndSwapX
 1227   bool needs_acquiring_load_exclusive(const Node *load);
 1228 
 1229   // predicate controlling addressing modes
 1230   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1231 
 1232   // Convert BootTest condition to Assembler condition.
 1233   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1234   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1235 %}
 1236 
 1237 source %{
 1238 
 1239   // Derived RegMask with conditionally allocatable registers
 1240 
 1241   void PhaseOutput::pd_perform_mach_node_analysis() {
 1242   }
 1243 
 1244   int MachNode::pd_alignment_required() const {
 1245     return 1;
 1246   }
 1247 
 1248   int MachNode::compute_padding(int current_offset) const {
 1249     return 0;
 1250   }
 1251 
 1252   RegMask _ANY_REG32_mask;
 1253   RegMask _ANY_REG_mask;
 1254   RegMask _PTR_REG_mask;
 1255   RegMask _NO_SPECIAL_REG32_mask;
 1256   RegMask _NO_SPECIAL_REG_mask;
 1257   RegMask _NO_SPECIAL_PTR_REG_mask;
 1258   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1259 
 1260   void reg_mask_init() {
 1261     // We derive below RegMask(s) from the ones which are auto-generated from
 1262     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1263     // registers conditionally reserved.
 1264 
 1265     _ANY_REG32_mask.assignFrom(_ALL_REG32_mask);
 1266     _ANY_REG32_mask.remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1267 
 1268     _ANY_REG_mask.assignFrom(_ALL_REG_mask);
 1269 
 1270     _PTR_REG_mask.assignFrom(_ALL_REG_mask);
 1271 
 1272     _NO_SPECIAL_REG32_mask.assignFrom(_ALL_REG32_mask);
 1273     _NO_SPECIAL_REG32_mask.subtract(_NON_ALLOCATABLE_REG32_mask);
 1274 
 1275     _NO_SPECIAL_REG_mask.assignFrom(_ALL_REG_mask);
 1276     _NO_SPECIAL_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
 1277 
 1278     _NO_SPECIAL_PTR_REG_mask.assignFrom(_ALL_REG_mask);
 1279     _NO_SPECIAL_PTR_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
 1280 
 1281     // r27 is not allocatable when compressed oops is on and heapbase is not
 1282     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1283     if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
 1284       _NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1285       _NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1286       _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1287     }
 1288 
 1289     // r29 is not allocatable when PreserveFramePointer is on
 1290     if (PreserveFramePointer) {
 1291       _NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1292       _NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1293       _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1294     }
 1295 
 1296     _NO_SPECIAL_NO_RFP_PTR_REG_mask.assignFrom(_NO_SPECIAL_PTR_REG_mask);
 1297     _NO_SPECIAL_NO_RFP_PTR_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1298   }
 1299 
 1300   // Optimizaton of volatile gets and puts
 1301   // -------------------------------------
 1302   //
 1303   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1304   // use to implement volatile reads and writes. For a volatile read
 1305   // we simply need
 1306   //
 1307   //   ldar<x>
 1308   //
 1309   // and for a volatile write we need
 1310   //
 1311   //   stlr<x>
 1312   //
 1313   // Alternatively, we can implement them by pairing a normal
 1314   // load/store with a memory barrier. For a volatile read we need
 1315   //
 1316   //   ldr<x>
 1317   //   dmb ishld
 1318   //
 1319   // for a volatile write
 1320   //
 1321   //   dmb ish
 1322   //   str<x>
 1323   //   dmb ish
 1324   //
 1325   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1326   // sequences. These are normally translated to an instruction
 1327   // sequence like the following
 1328   //
 1329   //   dmb      ish
 1330   // retry:
 1331   //   ldxr<x>   rval raddr
 1332   //   cmp       rval rold
 1333   //   b.ne done
 1334   //   stlxr<x>  rval, rnew, rold
 1335   //   cbnz      rval retry
 1336   // done:
 1337   //   cset      r0, eq
 1338   //   dmb ishld
 1339   //
 1340   // Note that the exclusive store is already using an stlxr
 1341   // instruction. That is required to ensure visibility to other
 1342   // threads of the exclusive write (assuming it succeeds) before that
 1343   // of any subsequent writes.
 1344   //
 1345   // The following instruction sequence is an improvement on the above
 1346   //
 1347   // retry:
 1348   //   ldaxr<x>  rval raddr
 1349   //   cmp       rval rold
 1350   //   b.ne done
 1351   //   stlxr<x>  rval, rnew, rold
 1352   //   cbnz      rval retry
 1353   // done:
 1354   //   cset      r0, eq
 1355   //
 1356   // We don't need the leading dmb ish since the stlxr guarantees
 1357   // visibility of prior writes in the case that the swap is
 1358   // successful. Crucially we don't have to worry about the case where
 1359   // the swap is not successful since no valid program should be
 1360   // relying on visibility of prior changes by the attempting thread
 1361   // in the case where the CAS fails.
 1362   //
 1363   // Similarly, we don't need the trailing dmb ishld if we substitute
 1364   // an ldaxr instruction since that will provide all the guarantees we
 1365   // require regarding observation of changes made by other threads
 1366   // before any change to the CAS address observed by the load.
 1367   //
 1368   // In order to generate the desired instruction sequence we need to
 1369   // be able to identify specific 'signature' ideal graph node
 1370   // sequences which i) occur as a translation of a volatile reads or
 1371   // writes or CAS operations and ii) do not occur through any other
 1372   // translation or graph transformation. We can then provide
 1373   // alternative aldc matching rules which translate these node
 1374   // sequences to the desired machine code sequences. Selection of the
 1375   // alternative rules can be implemented by predicates which identify
 1376   // the relevant node sequences.
 1377   //
 1378   // The ideal graph generator translates a volatile read to the node
 1379   // sequence
 1380   //
 1381   //   LoadX[mo_acquire]
 1382   //   MemBarAcquire
 1383   //
 1384   // As a special case when using the compressed oops optimization we
 1385   // may also see this variant
 1386   //
 1387   //   LoadN[mo_acquire]
 1388   //   DecodeN
 1389   //   MemBarAcquire
 1390   //
 1391   // A volatile write is translated to the node sequence
 1392   //
 1393   //   MemBarRelease
 1394   //   StoreX[mo_release] {CardMark}-optional
 1395   //   MemBarVolatile
 1396   //
 1397   // n.b. the above node patterns are generated with a strict
 1398   // 'signature' configuration of input and output dependencies (see
 1399   // the predicates below for exact details). The card mark may be as
 1400   // simple as a few extra nodes or, in a few GC configurations, may
 1401   // include more complex control flow between the leading and
 1402   // trailing memory barriers. However, whatever the card mark
 1403   // configuration these signatures are unique to translated volatile
 1404   // reads/stores -- they will not appear as a result of any other
 1405   // bytecode translation or inlining nor as a consequence of
 1406   // optimizing transforms.
 1407   //
 1408   // We also want to catch inlined unsafe volatile gets and puts and
 1409   // be able to implement them using either ldar<x>/stlr<x> or some
 1410   // combination of ldr<x>/stlr<x> and dmb instructions.
 1411   //
 1412   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1413   // normal volatile put node sequence containing an extra cpuorder
 1414   // membar
 1415   //
 1416   //   MemBarRelease
 1417   //   MemBarCPUOrder
 1418   //   StoreX[mo_release] {CardMark}-optional
 1419   //   MemBarCPUOrder
 1420   //   MemBarVolatile
 1421   //
 1422   // n.b. as an aside, a cpuorder membar is not itself subject to
 1423   // matching and translation by adlc rules.  However, the rule
 1424   // predicates need to detect its presence in order to correctly
 1425   // select the desired adlc rules.
 1426   //
 1427   // Inlined unsafe volatile gets manifest as a slightly different
 1428   // node sequence to a normal volatile get because of the
 1429   // introduction of some CPUOrder memory barriers to bracket the
 1430   // Load. However, but the same basic skeleton of a LoadX feeding a
 1431   // MemBarAcquire, possibly through an optional DecodeN, is still
 1432   // present
 1433   //
 1434   //   MemBarCPUOrder
 1435   //        ||       \\
 1436   //   MemBarCPUOrder LoadX[mo_acquire]
 1437   //        ||            |
 1438   //        ||       {DecodeN} optional
 1439   //        ||       /
 1440   //     MemBarAcquire
 1441   //
 1442   // In this case the acquire membar does not directly depend on the
 1443   // load. However, we can be sure that the load is generated from an
 1444   // inlined unsafe volatile get if we see it dependent on this unique
 1445   // sequence of membar nodes. Similarly, given an acquire membar we
 1446   // can know that it was added because of an inlined unsafe volatile
 1447   // get if it is fed and feeds a cpuorder membar and if its feed
 1448   // membar also feeds an acquiring load.
 1449   //
 1450   // Finally an inlined (Unsafe) CAS operation is translated to the
 1451   // following ideal graph
 1452   //
 1453   //   MemBarRelease
 1454   //   MemBarCPUOrder
 1455   //   CompareAndSwapX {CardMark}-optional
 1456   //   MemBarCPUOrder
 1457   //   MemBarAcquire
 1458   //
 1459   // So, where we can identify these volatile read and write
 1460   // signatures we can choose to plant either of the above two code
 1461   // sequences. For a volatile read we can simply plant a normal
 1462   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1463   // also choose to inhibit translation of the MemBarAcquire and
 1464   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1465   //
 1466   // When we recognise a volatile store signature we can choose to
 1467   // plant at a dmb ish as a translation for the MemBarRelease, a
 1468   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1469   // Alternatively, we can inhibit translation of the MemBarRelease
 1470   // and MemBarVolatile and instead plant a simple stlr<x>
 1471   // instruction.
 1472   //
 1473   // when we recognise a CAS signature we can choose to plant a dmb
 1474   // ish as a translation for the MemBarRelease, the conventional
 1475   // macro-instruction sequence for the CompareAndSwap node (which
 1476   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1477   // Alternatively, we can elide generation of the dmb instructions
 1478   // and plant the alternative CompareAndSwap macro-instruction
 1479   // sequence (which uses ldaxr<x>).
 1480   //
 1481   // Of course, the above only applies when we see these signature
 1482   // configurations. We still want to plant dmb instructions in any
 1483   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1484   // MemBarVolatile. For example, at the end of a constructor which
 1485   // writes final/volatile fields we will see a MemBarRelease
 1486   // instruction and this needs a 'dmb ish' lest we risk the
 1487   // constructed object being visible without making the
 1488   // final/volatile field writes visible.
 1489   //
 1490   // n.b. the translation rules below which rely on detection of the
 1491   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1492   // If we see anything other than the signature configurations we
 1493   // always just translate the loads and stores to ldr<x> and str<x>
 1494   // and translate acquire, release and volatile membars to the
 1495   // relevant dmb instructions.
 1496   //
 1497 
 1498   // is_CAS(int opcode, bool maybe_volatile)
 1499   //
 1500   // return true if opcode is one of the possible CompareAndSwapX
 1501   // values otherwise false.
 1502 
 1503   bool is_CAS(int opcode, bool maybe_volatile)
 1504   {
 1505     switch(opcode) {
 1506       // We handle these
 1507     case Op_CompareAndSwapI:
 1508     case Op_CompareAndSwapL:
 1509     case Op_CompareAndSwapP:
 1510     case Op_CompareAndSwapN:
 1511     case Op_ShenandoahCompareAndSwapP:
 1512     case Op_ShenandoahCompareAndSwapN:
 1513     case Op_CompareAndSwapB:
 1514     case Op_CompareAndSwapS:
 1515     case Op_GetAndSetI:
 1516     case Op_GetAndSetL:
 1517     case Op_GetAndSetP:
 1518     case Op_GetAndSetN:
 1519     case Op_GetAndAddI:
 1520     case Op_GetAndAddL:
 1521       return true;
 1522     case Op_CompareAndExchangeI:
 1523     case Op_CompareAndExchangeN:
 1524     case Op_CompareAndExchangeB:
 1525     case Op_CompareAndExchangeS:
 1526     case Op_CompareAndExchangeL:
 1527     case Op_CompareAndExchangeP:
 1528     case Op_WeakCompareAndSwapB:
 1529     case Op_WeakCompareAndSwapS:
 1530     case Op_WeakCompareAndSwapI:
 1531     case Op_WeakCompareAndSwapL:
 1532     case Op_WeakCompareAndSwapP:
 1533     case Op_WeakCompareAndSwapN:
 1534     case Op_ShenandoahWeakCompareAndSwapP:
 1535     case Op_ShenandoahWeakCompareAndSwapN:
 1536     case Op_ShenandoahCompareAndExchangeP:
 1537     case Op_ShenandoahCompareAndExchangeN:
 1538       return maybe_volatile;
 1539     default:
 1540       return false;
 1541     }
 1542   }
 1543 
 1544   // helper to determine the maximum number of Phi nodes we may need to
 1545   // traverse when searching from a card mark membar for the merge mem
 1546   // feeding a trailing membar or vice versa
 1547 
 1548 // predicates controlling emit of ldr<x>/ldar<x>
 1549 
 1550 bool unnecessary_acquire(const Node *barrier)
 1551 {
 1552   assert(barrier->is_MemBar(), "expecting a membar");
 1553 
 1554   MemBarNode* mb = barrier->as_MemBar();
 1555 
 1556   if (mb->trailing_load()) {
 1557     return true;
 1558   }
 1559 
 1560   if (mb->trailing_load_store()) {
 1561     Node* load_store = mb->in(MemBarNode::Precedent);
 1562     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1563     return is_CAS(load_store->Opcode(), true);
 1564   }
 1565 
 1566   return false;
 1567 }
 1568 
 1569 bool needs_acquiring_load(const Node *n)
 1570 {
 1571   assert(n->is_Load(), "expecting a load");
 1572   LoadNode *ld = n->as_Load();
 1573   return ld->is_acquire();
 1574 }
 1575 
 1576 bool unnecessary_release(const Node *n)
 1577 {
 1578   assert((n->is_MemBar() &&
 1579           n->Opcode() == Op_MemBarRelease),
 1580          "expecting a release membar");
 1581 
 1582   MemBarNode *barrier = n->as_MemBar();
 1583   if (!barrier->leading()) {
 1584     return false;
 1585   } else {
 1586     Node* trailing = barrier->trailing_membar();
 1587     MemBarNode* trailing_mb = trailing->as_MemBar();
 1588     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1589     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1590 
 1591     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1592     if (mem->is_Store()) {
 1593       assert(mem->as_Store()->is_release(), "");
 1594       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1595       return true;
 1596     } else {
 1597       assert(mem->is_LoadStore(), "");
 1598       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1599       return is_CAS(mem->Opcode(), true);
 1600     }
 1601   }
 1602   return false;
 1603 }
 1604 
 1605 bool unnecessary_volatile(const Node *n)
 1606 {
 1607   // assert n->is_MemBar();
 1608   MemBarNode *mbvol = n->as_MemBar();
 1609 
 1610   bool release = mbvol->trailing_store();
 1611   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1612 #ifdef ASSERT
 1613   if (release) {
 1614     Node* leading = mbvol->leading_membar();
 1615     assert(leading->Opcode() == Op_MemBarRelease, "");
 1616     assert(leading->as_MemBar()->leading_store(), "");
 1617     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1618   }
 1619 #endif
 1620 
 1621   return release;
 1622 }
 1623 
 1624 // predicates controlling emit of str<x>/stlr<x>
 1625 
 1626 bool needs_releasing_store(const Node *n)
 1627 {
 1628   // assert n->is_Store();
 1629   StoreNode *st = n->as_Store();
 1630   return st->trailing_membar() != nullptr;
 1631 }
 1632 
 1633 // predicate controlling translation of CAS
 1634 //
 1635 // returns true if CAS needs to use an acquiring load otherwise false
 1636 
 1637 bool needs_acquiring_load_exclusive(const Node *n)
 1638 {
 1639   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1640   LoadStoreNode* ldst = n->as_LoadStore();
 1641   if (is_CAS(n->Opcode(), false)) {
 1642     assert(ldst->trailing_membar() != nullptr, "expected trailing membar");
 1643   } else {
 1644     return ldst->trailing_membar() != nullptr;
 1645   }
 1646 
 1647   // so we can just return true here
 1648   return true;
 1649 }
 1650 
 1651 #define __ masm->
 1652 
 1653 // advance declarations for helper functions to convert register
 1654 // indices to register objects
 1655 
 1656 // the ad file has to provide implementations of certain methods
 1657 // expected by the generic code
 1658 //
 1659 // REQUIRED FUNCTIONALITY
 1660 
 1661 //=============================================================================
 1662 
 1663 // !!!!! Special hack to get all types of calls to specify the byte offset
 1664 //       from the start of the call to the point where the return address
 1665 //       will point.
 1666 
 1667 int MachCallStaticJavaNode::ret_addr_offset()
 1668 {
 1669   // call should be a simple bl
 1670   int off = 4;
 1671   return off;
 1672 }
 1673 
 1674 int MachCallDynamicJavaNode::ret_addr_offset()
 1675 {
 1676   return 16; // movz, movk, movk, bl
 1677 }
 1678 
 1679 int MachCallRuntimeNode::ret_addr_offset() {
 1680   // for generated stubs the call will be
 1681   //   bl(addr)
 1682   // or with far branches
 1683   //   bl(trampoline_stub)
 1684   // for real runtime callouts it will be six instructions
 1685   // see aarch64_enc_java_to_runtime
 1686   //   adr(rscratch2, retaddr)
 1687   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1688   //   lea(rscratch1, RuntimeAddress(addr)
 1689   //   blr(rscratch1)
 1690   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1691   if (cb) {
 1692     return 1 * NativeInstruction::instruction_size;
 1693   } else {
 1694     return 6 * NativeInstruction::instruction_size;
 1695   }
 1696 }
 1697 
 1698 //=============================================================================
 1699 
 1700 #ifndef PRODUCT
 1701 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1702   st->print("BREAKPOINT");
 1703 }
 1704 #endif
 1705 
 1706 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1707   __ brk(0);
 1708 }
 1709 
 1710 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1711   return MachNode::size(ra_);
 1712 }
 1713 
 1714 //=============================================================================
 1715 
 1716 #ifndef PRODUCT
 1717   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1718     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1719   }
 1720 #endif
 1721 
 1722   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1723     for (int i = 0; i < _count; i++) {
 1724       __ nop();
 1725     }
 1726   }
 1727 
 1728   uint MachNopNode::size(PhaseRegAlloc*) const {
 1729     return _count * NativeInstruction::instruction_size;
 1730   }
 1731 
 1732 //=============================================================================
 1733 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::EMPTY;
 1734 
 1735 int ConstantTable::calculate_table_base_offset() const {
 1736   return 0;  // absolute addressing, no offset
 1737 }
 1738 
 1739 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1740 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1741   ShouldNotReachHere();
 1742 }
 1743 
 1744 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1745   // Empty encoding
 1746 }
 1747 
 1748 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1749   return 0;
 1750 }
 1751 
 1752 #ifndef PRODUCT
 1753 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1754   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1755 }
 1756 #endif
 1757 
 1758 #ifndef PRODUCT
 1759 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1760   Compile* C = ra_->C;
 1761 
 1762   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1763 
 1764   if (C->output()->need_stack_bang(framesize))
 1765     st->print("# stack bang size=%d\n\t", framesize);
 1766 
 1767   if (VM_Version::use_rop_protection()) {
 1768     st->print("ldr  zr, [lr]\n\t");
 1769     st->print("paciaz\n\t");
 1770   }
 1771   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1772     st->print("sub  sp, sp, #%d\n\t", framesize);
 1773     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1774     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1775   } else {
 1776     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1777     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1778     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1779     st->print("sub  sp, sp, rscratch1");
 1780   }
 1781   if (C->stub_function() == nullptr) {
 1782     st->print("\n\t");
 1783     st->print("ldr  rscratch1, [guard]\n\t");
 1784     st->print("dmb ishld\n\t");
 1785     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1786     st->print("cmp  rscratch1, rscratch2\n\t");
 1787     st->print("b.eq skip");
 1788     st->print("\n\t");
 1789     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1790     st->print("b skip\n\t");
 1791     st->print("guard: int\n\t");
 1792     st->print("\n\t");
 1793     st->print("skip:\n\t");
 1794   }
 1795 }
 1796 #endif
 1797 
 1798 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1799   Compile* C = ra_->C;
 1800 
 1801   // n.b. frame size includes space for return pc and rfp
 1802   const int framesize = C->output()->frame_size_in_bytes();
 1803 
 1804   if (C->clinit_barrier_on_entry()) {
 1805     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1806 
 1807     Label L_skip_barrier;
 1808 
 1809     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1810     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1811     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1812     __ bind(L_skip_barrier);
 1813   }
 1814 
 1815   if (C->max_vector_size() > 0) {
 1816     __ reinitialize_ptrue();
 1817   }
 1818 
 1819   int bangsize = C->output()->bang_size_in_bytes();
 1820   if (C->output()->need_stack_bang(bangsize))
 1821     __ generate_stack_overflow_check(bangsize);
 1822 
 1823   __ build_frame(framesize);
 1824 
 1825   if (C->stub_function() == nullptr) {
 1826     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1827     // Dummy labels for just measuring the code size
 1828     Label dummy_slow_path;
 1829     Label dummy_continuation;
 1830     Label dummy_guard;
 1831     Label* slow_path = &dummy_slow_path;
 1832     Label* continuation = &dummy_continuation;
 1833     Label* guard = &dummy_guard;
 1834     if (!Compile::current()->output()->in_scratch_emit_size()) {
 1835       // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1836       C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1837       Compile::current()->output()->add_stub(stub);
 1838       slow_path = &stub->entry();
 1839       continuation = &stub->continuation();
 1840       guard = &stub->guard();
 1841     }
 1842     // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1843     bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1844   }
 1845 
 1846   if (VerifyStackAtCalls) {
 1847     Unimplemented();
 1848   }
 1849 
 1850   C->output()->set_frame_complete(__ offset());
 1851 
 1852   if (C->has_mach_constant_base_node()) {
 1853     // NOTE: We set the table base offset here because users might be
 1854     // emitted before MachConstantBaseNode.
 1855     ConstantTable& constant_table = C->output()->constant_table();
 1856     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1857   }
 1858 }
 1859 
 1860 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1861 {
 1862   return MachNode::size(ra_); // too many variables; just compute it
 1863                               // the hard way
 1864 }
 1865 
 1866 int MachPrologNode::reloc() const
 1867 {
 1868   return 0;
 1869 }
 1870 
 1871 //=============================================================================
 1872 
 1873 #ifndef PRODUCT
 1874 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1875   Compile* C = ra_->C;
 1876   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1877 
 1878   st->print("# pop frame %d\n\t",framesize);
 1879 
 1880   if (framesize == 0) {
 1881     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1882   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1883     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1884     st->print("add  sp, sp, #%d\n\t", framesize);
 1885   } else {
 1886     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1887     st->print("add  sp, sp, rscratch1\n\t");
 1888     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1889   }
 1890   if (VM_Version::use_rop_protection()) {
 1891     st->print("autiaz\n\t");
 1892     st->print("ldr  zr, [lr]\n\t");
 1893   }
 1894 
 1895   if (do_polling() && C->is_method_compilation()) {
 1896     st->print("# test polling word\n\t");
 1897     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1898     st->print("cmp  sp, rscratch1\n\t");
 1899     st->print("bhi #slow_path");
 1900   }
 1901 }
 1902 #endif
 1903 
 1904 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1905   Compile* C = ra_->C;
 1906   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1907 
 1908   __ remove_frame(framesize);
 1909 
 1910   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1911     __ reserved_stack_check();
 1912   }
 1913 
 1914   if (do_polling() && C->is_method_compilation()) {
 1915     Label dummy_label;
 1916     Label* code_stub = &dummy_label;
 1917     if (!C->output()->in_scratch_emit_size()) {
 1918       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1919       C->output()->add_stub(stub);
 1920       code_stub = &stub->entry();
 1921     }
 1922     __ relocate(relocInfo::poll_return_type);
 1923     __ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
 1924   }
 1925 }
 1926 
 1927 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1928   // Variable size. Determine dynamically.
 1929   return MachNode::size(ra_);
 1930 }
 1931 
 1932 int MachEpilogNode::reloc() const {
 1933   // Return number of relocatable values contained in this instruction.
 1934   return 1; // 1 for polling page.
 1935 }
 1936 
 1937 const Pipeline * MachEpilogNode::pipeline() const {
 1938   return MachNode::pipeline_class();
 1939 }
 1940 
 1941 //=============================================================================
 1942 
 1943 static enum RC rc_class(OptoReg::Name reg) {
 1944 
 1945   if (reg == OptoReg::Bad) {
 1946     return rc_bad;
 1947   }
 1948 
 1949   // we have 32 int registers * 2 halves
 1950   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1951 
 1952   if (reg < slots_of_int_registers) {
 1953     return rc_int;
 1954   }
 1955 
 1956   // we have 32 float register * 8 halves
 1957   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1958   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1959     return rc_float;
 1960   }
 1961 
 1962   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1963   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1964     return rc_predicate;
 1965   }
 1966 
 1967   // Between predicate regs & stack is the flags.
 1968   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1969 
 1970   return rc_stack;
 1971 }
 1972 
 1973 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1974   Compile* C = ra_->C;
 1975 
 1976   // Get registers to move.
 1977   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1978   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1979   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1980   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1981 
 1982   enum RC src_hi_rc = rc_class(src_hi);
 1983   enum RC src_lo_rc = rc_class(src_lo);
 1984   enum RC dst_hi_rc = rc_class(dst_hi);
 1985   enum RC dst_lo_rc = rc_class(dst_lo);
 1986 
 1987   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1988 
 1989   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1990     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1991            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1992            "expected aligned-adjacent pairs");
 1993   }
 1994 
 1995   if (src_lo == dst_lo && src_hi == dst_hi) {
 1996     return 0;            // Self copy, no move.
 1997   }
 1998 
 1999   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 2000               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 2001   int src_offset = ra_->reg2offset(src_lo);
 2002   int dst_offset = ra_->reg2offset(dst_lo);
 2003 
 2004   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2005     uint ireg = ideal_reg();
 2006     DEBUG_ONLY(int algm = MIN2(RegMask::num_registers(ireg), (int)Matcher::stack_alignment_in_slots()) * VMRegImpl::stack_slot_size);
 2007     assert((src_lo_rc != rc_stack) || is_aligned(src_offset, algm), "unaligned vector spill sp offset %d (src)", src_offset);
 2008     assert((dst_lo_rc != rc_stack) || is_aligned(dst_offset, algm), "unaligned vector spill sp offset %d (dst)", dst_offset);
 2009     if (ireg == Op_VecA && masm) {
 2010       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 2011       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2012         // stack->stack
 2013         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 2014                                                 sve_vector_reg_size_in_bytes);
 2015       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2016         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2017                             sve_vector_reg_size_in_bytes);
 2018       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2019         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2020                               sve_vector_reg_size_in_bytes);
 2021       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2022         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2023                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 2024                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 2025       } else {
 2026         ShouldNotReachHere();
 2027       }
 2028     } else if (masm) {
 2029       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2030       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2031       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2032         // stack->stack
 2033         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2034         if (ireg == Op_VecD) {
 2035           __ unspill(rscratch1, true, src_offset);
 2036           __ spill(rscratch1, true, dst_offset);
 2037         } else {
 2038           __ spill_copy128(src_offset, dst_offset);
 2039         }
 2040       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2041         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2042                ireg == Op_VecD ? __ T8B : __ T16B,
 2043                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2044       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2045         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2046                  ireg == Op_VecD ? __ D : __ Q,
 2047                  ra_->reg2offset(dst_lo));
 2048       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2049         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2050                    ireg == Op_VecD ? __ D : __ Q,
 2051                    ra_->reg2offset(src_lo));
 2052       } else {
 2053         ShouldNotReachHere();
 2054       }
 2055     }
 2056   } else if (masm) {
 2057     switch (src_lo_rc) {
 2058     case rc_int:
 2059       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2060         if (is64) {
 2061             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2062                    as_Register(Matcher::_regEncode[src_lo]));
 2063         } else {
 2064             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2065                     as_Register(Matcher::_regEncode[src_lo]));
 2066         }
 2067       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2068         if (is64) {
 2069             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2070                      as_Register(Matcher::_regEncode[src_lo]));
 2071         } else {
 2072             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2073                      as_Register(Matcher::_regEncode[src_lo]));
 2074         }
 2075       } else {                    // gpr --> stack spill
 2076         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2077         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2078       }
 2079       break;
 2080     case rc_float:
 2081       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2082         if (is64) {
 2083             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2084                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2085         } else {
 2086             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2087                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2088         }
 2089       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2090         if (is64) {
 2091             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2092                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2093         } else {
 2094             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2095                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2096         }
 2097       } else {                    // fpr --> stack spill
 2098         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2099         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2100                  is64 ? __ D : __ S, dst_offset);
 2101       }
 2102       break;
 2103     case rc_stack:
 2104       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2105         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2106       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2107         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2108                    is64 ? __ D : __ S, src_offset);
 2109       } else if (dst_lo_rc == rc_predicate) {
 2110         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2111                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2112       } else {                    // stack --> stack copy
 2113         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2114         if (ideal_reg() == Op_RegVectMask) {
 2115           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2116                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2117         } else {
 2118           __ unspill(rscratch1, is64, src_offset);
 2119           __ spill(rscratch1, is64, dst_offset);
 2120         }
 2121       }
 2122       break;
 2123     case rc_predicate:
 2124       if (dst_lo_rc == rc_predicate) {
 2125         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2126       } else if (dst_lo_rc == rc_stack) {
 2127         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2128                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2129       } else {
 2130         assert(false, "bad src and dst rc_class combination.");
 2131         ShouldNotReachHere();
 2132       }
 2133       break;
 2134     default:
 2135       assert(false, "bad rc_class for spill");
 2136       ShouldNotReachHere();
 2137     }
 2138   }
 2139 
 2140   if (st) {
 2141     st->print("spill ");
 2142     if (src_lo_rc == rc_stack) {
 2143       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2144     } else {
 2145       st->print("%s -> ", Matcher::regName[src_lo]);
 2146     }
 2147     if (dst_lo_rc == rc_stack) {
 2148       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2149     } else {
 2150       st->print("%s", Matcher::regName[dst_lo]);
 2151     }
 2152     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2153       int vsize = 0;
 2154       switch (ideal_reg()) {
 2155       case Op_VecD:
 2156         vsize = 64;
 2157         break;
 2158       case Op_VecX:
 2159         vsize = 128;
 2160         break;
 2161       case Op_VecA:
 2162         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2163         break;
 2164       default:
 2165         assert(false, "bad register type for spill");
 2166         ShouldNotReachHere();
 2167       }
 2168       st->print("\t# vector spill size = %d", vsize);
 2169     } else if (ideal_reg() == Op_RegVectMask) {
 2170       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2171       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2172       st->print("\t# predicate spill size = %d", vsize);
 2173     } else {
 2174       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2175     }
 2176   }
 2177 
 2178   return 0;
 2179 
 2180 }
 2181 
 2182 #ifndef PRODUCT
 2183 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2184   if (!ra_)
 2185     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2186   else
 2187     implementation(nullptr, ra_, false, st);
 2188 }
 2189 #endif
 2190 
 2191 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2192   implementation(masm, ra_, false, nullptr);
 2193 }
 2194 
 2195 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2196   return MachNode::size(ra_);
 2197 }
 2198 
 2199 //=============================================================================
 2200 
 2201 #ifndef PRODUCT
 2202 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2203   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2204   int reg = ra_->get_reg_first(this);
 2205   st->print("add %s, rsp, #%d]\t# box lock",
 2206             Matcher::regName[reg], offset);
 2207 }
 2208 #endif
 2209 
 2210 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2211   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2212   int reg    = ra_->get_encode(this);
 2213 
 2214   // This add will handle any 24-bit signed offset. 24 bits allows an
 2215   // 8 megabyte stack frame.
 2216   __ add(as_Register(reg), sp, offset);
 2217 }
 2218 
 2219 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2220   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2221   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2222 
 2223   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2224     return NativeInstruction::instruction_size;
 2225   } else {
 2226     return 2 * NativeInstruction::instruction_size;
 2227   }
 2228 }
 2229 
 2230 //=============================================================================
 2231 
 2232 #ifndef PRODUCT
 2233 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2234 {
 2235   st->print_cr("# MachUEPNode");
 2236   if (UseCompressedClassPointers) {
 2237     st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2238     st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2239     st->print_cr("\tcmpw rscratch1, r10");
 2240   } else {
 2241     st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2242     st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2243     st->print_cr("\tcmp rscratch1, r10");
 2244   }
 2245   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2246 }
 2247 #endif
 2248 
 2249 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 2250 {
 2251   __ ic_check(InteriorEntryAlignment);
 2252 }
 2253 
 2254 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2255 {
 2256   return MachNode::size(ra_);
 2257 }
 2258 
 2259 // REQUIRED EMIT CODE
 2260 
 2261 //=============================================================================
 2262 
 2263 // Emit deopt handler code.
 2264 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 2265 {
 2266   // Note that the code buffer's insts_mark is always relative to insts.
 2267   // That's why we must use the macroassembler to generate a handler.
 2268   address base = __ start_a_stub(size_deopt_handler());
 2269   if (base == nullptr) {
 2270     ciEnv::current()->record_failure("CodeCache is full");
 2271     return 0;  // CodeBuffer::expand failed
 2272   }
 2273 
 2274   int offset = __ offset();
 2275   Label start;
 2276   __ bind(start);
 2277   __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2278 
 2279   int entry_offset = __ offset();
 2280   __ b(start);
 2281 
 2282   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2283   assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
 2284          "out of bounds read in post-call NOP check");
 2285   __ end_a_stub();
 2286   return entry_offset;
 2287 }
 2288 
 2289 // REQUIRED MATCHER CODE
 2290 
 2291 //=============================================================================
 2292 
 2293 bool Matcher::match_rule_supported(int opcode) {
 2294   if (!has_match_rule(opcode))
 2295     return false;
 2296 
 2297   switch (opcode) {
 2298     case Op_OnSpinWait:
 2299       return VM_Version::supports_on_spin_wait();
 2300     case Op_CacheWB:
 2301     case Op_CacheWBPreSync:
 2302     case Op_CacheWBPostSync:
 2303       if (!VM_Version::supports_data_cache_line_flush()) {
 2304         return false;
 2305       }
 2306       break;
 2307     case Op_ExpandBits:
 2308     case Op_CompressBits:
 2309       if (!VM_Version::supports_svebitperm()) {
 2310         return false;
 2311       }
 2312       break;
 2313     case Op_FmaF:
 2314     case Op_FmaD:
 2315     case Op_FmaVF:
 2316     case Op_FmaVD:
 2317       if (!UseFMA) {
 2318         return false;
 2319       }
 2320       break;
 2321     case Op_FmaHF:
 2322       // UseFMA flag also needs to be checked along with FEAT_FP16
 2323       if (!UseFMA || !is_feat_fp16_supported()) {
 2324         return false;
 2325       }
 2326       break;
 2327     case Op_AddHF:
 2328     case Op_SubHF:
 2329     case Op_MulHF:
 2330     case Op_DivHF:
 2331     case Op_MinHF:
 2332     case Op_MaxHF:
 2333     case Op_SqrtHF:
 2334       // Half-precision floating point scalar operations require FEAT_FP16
 2335       // to be available. FEAT_FP16 is enabled if both "fphp" and "asimdhp"
 2336       // features are supported.
 2337       if (!is_feat_fp16_supported()) {
 2338         return false;
 2339       }
 2340       break;
 2341   }
 2342 
 2343   return true; // Per default match rules are supported.
 2344 }
 2345 
 2346 const RegMask* Matcher::predicate_reg_mask(void) {
 2347   return &_PR_REG_mask;
 2348 }
 2349 
 2350 bool Matcher::supports_vector_calling_convention(void) {
 2351   return EnableVectorSupport;
 2352 }
 2353 
 2354 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2355   assert(EnableVectorSupport, "sanity");
 2356   int lo = V0_num;
 2357   int hi = V0_H_num;
 2358   if (ideal_reg == Op_VecX || ideal_reg == Op_VecA) {
 2359     hi = V0_K_num;
 2360   }
 2361   return OptoRegPair(hi, lo);
 2362 }
 2363 
 2364 // Is this branch offset short enough that a short branch can be used?
 2365 //
 2366 // NOTE: If the platform does not provide any short branch variants, then
 2367 //       this method should return false for offset 0.
 2368 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2369   // The passed offset is relative to address of the branch.
 2370 
 2371   return (-32768 <= offset && offset < 32768);
 2372 }
 2373 
 2374 // Vector width in bytes.
 2375 int Matcher::vector_width_in_bytes(BasicType bt) {
 2376   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2377   int size = MIN2((UseSVE > 0) ? (int)FloatRegister::sve_vl_max : (int)FloatRegister::neon_vl, (int)MaxVectorSize);
 2378   // Minimum 2 values in vector
 2379   if (size < 2*type2aelembytes(bt)) size = 0;
 2380   // But never < 4
 2381   if (size < 4) size = 0;
 2382   return size;
 2383 }
 2384 
 2385 // Limits on vector size (number of elements) loaded into vector.
 2386 int Matcher::max_vector_size(const BasicType bt) {
 2387   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2388 }
 2389 
 2390 int Matcher::min_vector_size(const BasicType bt) {
 2391   // Usually, the shortest vector length supported by AArch64 ISA and
 2392   // Vector API species is 64 bits. However, we allow 32-bit or 16-bit
 2393   // vectors in a few special cases.
 2394   int size;
 2395   switch(bt) {
 2396     case T_BOOLEAN:
 2397       // Load/store a vector mask with only 2 elements for vector types
 2398       // such as "2I/2F/2L/2D".
 2399       size = 2;
 2400       break;
 2401     case T_BYTE:
 2402       // Generate a "4B" vector, to support vector cast between "8B/16B"
 2403       // and "4S/4I/4L/4F/4D".
 2404       size = 4;
 2405       break;
 2406     case T_SHORT:
 2407       // Generate a "2S" vector, to support vector cast between "4S/8S"
 2408       // and "2I/2L/2F/2D".
 2409       size = 2;
 2410       break;
 2411     default:
 2412       // Limit the min vector length to 64-bit.
 2413       size = 8 / type2aelembytes(bt);
 2414       // The number of elements in a vector should be at least 2.
 2415       size = MAX2(size, 2);
 2416   }
 2417 
 2418   int max_size = max_vector_size(bt);
 2419   return MIN2(size, max_size);
 2420 }
 2421 
 2422 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2423   return Matcher::max_vector_size(bt);
 2424 }
 2425 
 2426 // Actual max scalable vector register length.
 2427 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2428   return Matcher::max_vector_size(bt);
 2429 }
 2430 
 2431 // Vector ideal reg.
 2432 uint Matcher::vector_ideal_reg(int len) {
 2433   if (UseSVE > 0 && FloatRegister::neon_vl < len && len <= FloatRegister::sve_vl_max) {
 2434     return Op_VecA;
 2435   }
 2436   switch(len) {
 2437     // For 16-bit/32-bit mask vector, reuse VecD.
 2438     case  2:
 2439     case  4:
 2440     case  8: return Op_VecD;
 2441     case 16: return Op_VecX;
 2442   }
 2443   ShouldNotReachHere();
 2444   return 0;
 2445 }
 2446 
 2447 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2448   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2449   switch (ideal_reg) {
 2450     case Op_VecA: return new vecAOper();
 2451     case Op_VecD: return new vecDOper();
 2452     case Op_VecX: return new vecXOper();
 2453   }
 2454   ShouldNotReachHere();
 2455   return nullptr;
 2456 }
 2457 
 2458 bool Matcher::is_reg2reg_move(MachNode* m) {
 2459   return false;
 2460 }
 2461 
 2462 bool Matcher::is_register_biasing_candidate(const MachNode* mdef, int oper_index) {
 2463   return false;
 2464 }
 2465 
 2466 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2467   return opnd->opcode() == VREG;
 2468 }
 2469 
 2470 // Return whether or not this register is ever used as an argument.
 2471 // This function is used on startup to build the trampoline stubs in
 2472 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2473 // call in the trampoline, and arguments in those registers not be
 2474 // available to the callee.
 2475 bool Matcher::can_be_java_arg(int reg)
 2476 {
 2477   return
 2478     reg ==  R0_num || reg == R0_H_num ||
 2479     reg ==  R1_num || reg == R1_H_num ||
 2480     reg ==  R2_num || reg == R2_H_num ||
 2481     reg ==  R3_num || reg == R3_H_num ||
 2482     reg ==  R4_num || reg == R4_H_num ||
 2483     reg ==  R5_num || reg == R5_H_num ||
 2484     reg ==  R6_num || reg == R6_H_num ||
 2485     reg ==  R7_num || reg == R7_H_num ||
 2486     reg ==  V0_num || reg == V0_H_num ||
 2487     reg ==  V1_num || reg == V1_H_num ||
 2488     reg ==  V2_num || reg == V2_H_num ||
 2489     reg ==  V3_num || reg == V3_H_num ||
 2490     reg ==  V4_num || reg == V4_H_num ||
 2491     reg ==  V5_num || reg == V5_H_num ||
 2492     reg ==  V6_num || reg == V6_H_num ||
 2493     reg ==  V7_num || reg == V7_H_num;
 2494 }
 2495 
 2496 bool Matcher::is_spillable_arg(int reg)
 2497 {
 2498   return can_be_java_arg(reg);
 2499 }
 2500 
 2501 uint Matcher::int_pressure_limit()
 2502 {
 2503   // JDK-8183543: When taking the number of available registers as int
 2504   // register pressure threshold, the jtreg test:
 2505   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2506   // failed due to C2 compilation failure with
 2507   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2508   //
 2509   // A derived pointer is live at CallNode and then is flagged by RA
 2510   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2511   // derived pointers and lastly fail to spill after reaching maximum
 2512   // number of iterations. Lowering the default pressure threshold to
 2513   // (_NO_SPECIAL_REG32_mask.size() minus 1) forces CallNode to become
 2514   // a high register pressure area of the code so that split_DEF can
 2515   // generate DefinitionSpillCopy for the derived pointer.
 2516   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.size() - 1;
 2517   if (!PreserveFramePointer) {
 2518     // When PreserveFramePointer is off, frame pointer is allocatable,
 2519     // but different from other SOC registers, it is excluded from
 2520     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2521     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2522     // See check_pressure_at_fatproj().
 2523     default_int_pressure_threshold--;
 2524   }
 2525   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2526 }
 2527 
 2528 uint Matcher::float_pressure_limit()
 2529 {
 2530   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2531   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
 2532 }
 2533 
 2534 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2535   return false;
 2536 }
 2537 
 2538 const RegMask& Matcher::divI_proj_mask() {
 2539   ShouldNotReachHere();
 2540   return RegMask::EMPTY;
 2541 }
 2542 
 2543 // Register for MODI projection of divmodI.
 2544 const RegMask& Matcher::modI_proj_mask() {
 2545   ShouldNotReachHere();
 2546   return RegMask::EMPTY;
 2547 }
 2548 
 2549 // Register for DIVL projection of divmodL.
 2550 const RegMask& Matcher::divL_proj_mask() {
 2551   ShouldNotReachHere();
 2552   return RegMask::EMPTY;
 2553 }
 2554 
 2555 // Register for MODL projection of divmodL.
 2556 const RegMask& Matcher::modL_proj_mask() {
 2557   ShouldNotReachHere();
 2558   return RegMask::EMPTY;
 2559 }
 2560 
 2561 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2562   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2563     Node* u = addp->fast_out(i);
 2564     if (u->is_LoadStore()) {
 2565       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2566       // instructions) only take register indirect as an operand, so
 2567       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2568       // must fail.
 2569       return false;
 2570     }
 2571     if (u->is_Mem()) {
 2572       int opsize = u->as_Mem()->memory_size();
 2573       assert(opsize > 0, "unexpected memory operand size");
 2574       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2575         return false;
 2576       }
 2577     }
 2578   }
 2579   return true;
 2580 }
 2581 
 2582 // Convert BootTest condition to Assembler condition.
 2583 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2584 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2585   Assembler::Condition result;
 2586   switch(cond) {
 2587     case BoolTest::eq:
 2588       result = Assembler::EQ; break;
 2589     case BoolTest::ne:
 2590       result = Assembler::NE; break;
 2591     case BoolTest::le:
 2592       result = Assembler::LE; break;
 2593     case BoolTest::ge:
 2594       result = Assembler::GE; break;
 2595     case BoolTest::lt:
 2596       result = Assembler::LT; break;
 2597     case BoolTest::gt:
 2598       result = Assembler::GT; break;
 2599     case BoolTest::ule:
 2600       result = Assembler::LS; break;
 2601     case BoolTest::uge:
 2602       result = Assembler::HS; break;
 2603     case BoolTest::ult:
 2604       result = Assembler::LO; break;
 2605     case BoolTest::ugt:
 2606       result = Assembler::HI; break;
 2607     case BoolTest::overflow:
 2608       result = Assembler::VS; break;
 2609     case BoolTest::no_overflow:
 2610       result = Assembler::VC; break;
 2611     default:
 2612       ShouldNotReachHere();
 2613       return Assembler::Condition(-1);
 2614   }
 2615 
 2616   // Check conversion
 2617   if (cond & BoolTest::unsigned_compare) {
 2618     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2619   } else {
 2620     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2621   }
 2622 
 2623   return result;
 2624 }
 2625 
 2626 // Binary src (Replicate con)
 2627 static bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2628   if (n == nullptr || m == nullptr) {
 2629     return false;
 2630   }
 2631 
 2632   if (UseSVE == 0 || m->Opcode() != Op_Replicate) {
 2633     return false;
 2634   }
 2635 
 2636   Node* imm_node = m->in(1);
 2637   if (!imm_node->is_Con()) {
 2638     return false;
 2639   }
 2640 
 2641   const Type* t = imm_node->bottom_type();
 2642   if (!(t->isa_int() || t->isa_long())) {
 2643     return false;
 2644   }
 2645 
 2646   switch (n->Opcode()) {
 2647   case Op_AndV:
 2648   case Op_OrV:
 2649   case Op_XorV: {
 2650     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2651     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2652     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2653   }
 2654   case Op_AddVB:
 2655     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2656   case Op_AddVS:
 2657   case Op_AddVI:
 2658     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2659   case Op_AddVL:
 2660     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2661   default:
 2662     return false;
 2663   }
 2664 }
 2665 
 2666 // (XorV src (Replicate m1))
 2667 // (XorVMask src (MaskAll m1))
 2668 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2669   if (n != nullptr && m != nullptr) {
 2670     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2671            VectorNode::is_all_ones_vector(m);
 2672   }
 2673   return false;
 2674 }
 2675 
 2676 // Should the matcher clone input 'm' of node 'n'?
 2677 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2678   if (is_vshift_con_pattern(n, m) ||
 2679       is_vector_bitwise_not_pattern(n, m) ||
 2680       is_valid_sve_arith_imm_pattern(n, m) ||
 2681       is_encode_and_store_pattern(n, m)) {
 2682     mstack.push(m, Visit);
 2683     return true;
 2684   }
 2685   return false;
 2686 }
 2687 
 2688 // Should the Matcher clone shifts on addressing modes, expecting them
 2689 // to be subsumed into complex addressing expressions or compute them
 2690 // into registers?
 2691 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2692 
 2693   // Loads and stores with indirect memory input (e.g., volatile loads and
 2694   // stores) do not subsume the input into complex addressing expressions. If
 2695   // the addressing expression is input to at least one such load or store, do
 2696   // not clone the addressing expression. Query needs_acquiring_load and
 2697   // needs_releasing_store as a proxy for indirect memory input, as it is not
 2698   // possible to directly query for indirect memory input at this stage.
 2699   for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
 2700     Node* n = m->fast_out(i);
 2701     if (n->is_Load() && needs_acquiring_load(n)) {
 2702       return false;
 2703     }
 2704     if (n->is_Store() && needs_releasing_store(n)) {
 2705       return false;
 2706     }
 2707   }
 2708 
 2709   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2710     return true;
 2711   }
 2712 
 2713   Node *off = m->in(AddPNode::Offset);
 2714   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2715       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2716       // Are there other uses besides address expressions?
 2717       !is_visited(off)) {
 2718     address_visited.set(off->_idx); // Flag as address_visited
 2719     mstack.push(off->in(2), Visit);
 2720     Node *conv = off->in(1);
 2721     if (conv->Opcode() == Op_ConvI2L &&
 2722         // Are there other uses besides address expressions?
 2723         !is_visited(conv)) {
 2724       address_visited.set(conv->_idx); // Flag as address_visited
 2725       mstack.push(conv->in(1), Pre_Visit);
 2726     } else {
 2727       mstack.push(conv, Pre_Visit);
 2728     }
 2729     address_visited.test_set(m->_idx); // Flag as address_visited
 2730     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2731     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2732     return true;
 2733   } else if (off->Opcode() == Op_ConvI2L &&
 2734              // Are there other uses besides address expressions?
 2735              !is_visited(off)) {
 2736     address_visited.test_set(m->_idx); // Flag as address_visited
 2737     address_visited.set(off->_idx); // Flag as address_visited
 2738     mstack.push(off->in(1), Pre_Visit);
 2739     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2740     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2741     return true;
 2742   }
 2743   return false;
 2744 }
 2745 
 2746 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2747   {                                                                     \
 2748     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2749     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2750     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2751     __ INSN(REG, as_Register(BASE));                                    \
 2752   }
 2753 
 2754 
 2755 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2756   {
 2757     Address::extend scale;
 2758 
 2759     // Hooboy, this is fugly.  We need a way to communicate to the
 2760     // encoder that the index needs to be sign extended, so we have to
 2761     // enumerate all the cases.
 2762     switch (opcode) {
 2763     case INDINDEXSCALEDI2L:
 2764     case INDINDEXSCALEDI2LN:
 2765     case INDINDEXI2L:
 2766     case INDINDEXI2LN:
 2767       scale = Address::sxtw(size);
 2768       break;
 2769     default:
 2770       scale = Address::lsl(size);
 2771     }
 2772 
 2773     if (index == -1) {
 2774       return Address(base, disp);
 2775     } else {
 2776       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2777       return Address(base, as_Register(index), scale);
 2778     }
 2779   }
 2780 
 2781 
 2782 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2783 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2784 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2785 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2786                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2787 
 2788   // Used for all non-volatile memory accesses.  The use of
 2789   // $mem->opcode() to discover whether this pattern uses sign-extended
 2790   // offsets is something of a kludge.
 2791   static void loadStore(C2_MacroAssembler* masm, mem_insn insn,
 2792                         Register reg, int opcode,
 2793                         Register base, int index, int scale, int disp,
 2794                         int size_in_memory)
 2795   {
 2796     Address addr = mem2address(opcode, base, index, scale, disp);
 2797     if (addr.getMode() == Address::base_plus_offset) {
 2798       /* Fix up any out-of-range offsets. */
 2799       assert_different_registers(rscratch1, base);
 2800       assert_different_registers(rscratch1, reg);
 2801       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2802     }
 2803     (masm->*insn)(reg, addr);
 2804   }
 2805 
 2806   static void loadStore(C2_MacroAssembler* masm, mem_float_insn insn,
 2807                         FloatRegister reg, int opcode,
 2808                         Register base, int index, int size, int disp,
 2809                         int size_in_memory)
 2810   {
 2811     Address::extend scale;
 2812 
 2813     switch (opcode) {
 2814     case INDINDEXSCALEDI2L:
 2815     case INDINDEXSCALEDI2LN:
 2816       scale = Address::sxtw(size);
 2817       break;
 2818     default:
 2819       scale = Address::lsl(size);
 2820     }
 2821 
 2822     if (index == -1) {
 2823       // Fix up any out-of-range offsets.
 2824       assert_different_registers(rscratch1, base);
 2825       Address addr = Address(base, disp);
 2826       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2827       (masm->*insn)(reg, addr);
 2828     } else {
 2829       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2830       (masm->*insn)(reg, Address(base, as_Register(index), scale));
 2831     }
 2832   }
 2833 
 2834   static void loadStore(C2_MacroAssembler* masm, mem_vector_insn insn,
 2835                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2836                         int opcode, Register base, int index, int size, int disp)
 2837   {
 2838     if (index == -1) {
 2839       (masm->*insn)(reg, T, Address(base, disp));
 2840     } else {
 2841       assert(disp == 0, "unsupported address mode");
 2842       (masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2843     }
 2844   }
 2845 
 2846 %}
 2847 
 2848 
 2849 
 2850 //----------ENCODING BLOCK-----------------------------------------------------
 2851 // This block specifies the encoding classes used by the compiler to
 2852 // output byte streams.  Encoding classes are parameterized macros
 2853 // used by Machine Instruction Nodes in order to generate the bit
 2854 // encoding of the instruction.  Operands specify their base encoding
 2855 // interface with the interface keyword.  There are currently
 2856 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2857 // COND_INTER.  REG_INTER causes an operand to generate a function
 2858 // which returns its register number when queried.  CONST_INTER causes
 2859 // an operand to generate a function which returns the value of the
 2860 // constant when queried.  MEMORY_INTER causes an operand to generate
 2861 // four functions which return the Base Register, the Index Register,
 2862 // the Scale Value, and the Offset Value of the operand when queried.
 2863 // COND_INTER causes an operand to generate six functions which return
 2864 // the encoding code (ie - encoding bits for the instruction)
 2865 // associated with each basic boolean condition for a conditional
 2866 // instruction.
 2867 //
 2868 // Instructions specify two basic values for encoding.  Again, a
 2869 // function is available to check if the constant displacement is an
 2870 // oop. They use the ins_encode keyword to specify their encoding
 2871 // classes (which must be a sequence of enc_class names, and their
 2872 // parameters, specified in the encoding block), and they use the
 2873 // opcode keyword to specify, in order, their primary, secondary, and
 2874 // tertiary opcode.  Only the opcode sections which a particular
 2875 // instruction needs for encoding need to be specified.
 2876 encode %{
 2877   // Build emit functions for each basic byte or larger field in the
 2878   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2879   // from C++ code in the enc_class source block.  Emit functions will
 2880   // live in the main source block for now.  In future, we can
 2881   // generalize this by adding a syntax that specifies the sizes of
 2882   // fields in an order, so that the adlc can build the emit functions
 2883   // automagically
 2884 
 2885   // catch all for unimplemented encodings
 2886   enc_class enc_unimplemented %{
 2887     __ unimplemented("C2 catch all");
 2888   %}
 2889 
 2890   // BEGIN Non-volatile memory access
 2891 
 2892   // This encoding class is generated automatically from ad_encode.m4.
 2893   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2894   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2895     Register dst_reg = as_Register($dst$$reg);
 2896     loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2897                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2898   %}
 2899 
 2900   // This encoding class is generated automatically from ad_encode.m4.
 2901   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2902   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2903     Register dst_reg = as_Register($dst$$reg);
 2904     loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2905                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2906   %}
 2907 
 2908   // This encoding class is generated automatically from ad_encode.m4.
 2909   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2910   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2911     Register dst_reg = as_Register($dst$$reg);
 2912     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2913                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2914   %}
 2915 
 2916   // This encoding class is generated automatically from ad_encode.m4.
 2917   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2918   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2919     Register dst_reg = as_Register($dst$$reg);
 2920     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2921                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2922   %}
 2923 
 2924   // This encoding class is generated automatically from ad_encode.m4.
 2925   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2926   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2927     Register dst_reg = as_Register($dst$$reg);
 2928     loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2929                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2930   %}
 2931 
 2932   // This encoding class is generated automatically from ad_encode.m4.
 2933   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2934   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2935     Register dst_reg = as_Register($dst$$reg);
 2936     loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2937                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2938   %}
 2939 
 2940   // This encoding class is generated automatically from ad_encode.m4.
 2941   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2942   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2943     Register dst_reg = as_Register($dst$$reg);
 2944     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2945                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2946   %}
 2947 
 2948   // This encoding class is generated automatically from ad_encode.m4.
 2949   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2950   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2951     Register dst_reg = as_Register($dst$$reg);
 2952     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2953                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2954   %}
 2955 
 2956   // This encoding class is generated automatically from ad_encode.m4.
 2957   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2958   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2959     Register dst_reg = as_Register($dst$$reg);
 2960     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2961                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2962   %}
 2963 
 2964   // This encoding class is generated automatically from ad_encode.m4.
 2965   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2966   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2967     Register dst_reg = as_Register($dst$$reg);
 2968     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2969                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2970   %}
 2971 
 2972   // This encoding class is generated automatically from ad_encode.m4.
 2973   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2974   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2975     Register dst_reg = as_Register($dst$$reg);
 2976     loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2977                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2978   %}
 2979 
 2980   // This encoding class is generated automatically from ad_encode.m4.
 2981   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2982   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2983     Register dst_reg = as_Register($dst$$reg);
 2984     loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2985                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2986   %}
 2987 
 2988   // This encoding class is generated automatically from ad_encode.m4.
 2989   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2990   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2991     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2992     loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2993                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2994   %}
 2995 
 2996   // This encoding class is generated automatically from ad_encode.m4.
 2997   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2998   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2999     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3000     loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 3001                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3002   %}
 3003 
 3004   // This encoding class is generated automatically from ad_encode.m4.
 3005   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3006   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 3007     Register src_reg = as_Register($src$$reg);
 3008     loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
 3009                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3010   %}
 3011 
 3012   // This encoding class is generated automatically from ad_encode.m4.
 3013   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3014   enc_class aarch64_enc_strb0(memory1 mem) %{
 3015     loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3016                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3017   %}
 3018 
 3019   // This encoding class is generated automatically from ad_encode.m4.
 3020   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3021   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 3022     Register src_reg = as_Register($src$$reg);
 3023     loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
 3024                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 3025   %}
 3026 
 3027   // This encoding class is generated automatically from ad_encode.m4.
 3028   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3029   enc_class aarch64_enc_strh0(memory2 mem) %{
 3030     loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
 3031                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 3032   %}
 3033 
 3034   // This encoding class is generated automatically from ad_encode.m4.
 3035   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3036   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 3037     Register src_reg = as_Register($src$$reg);
 3038     loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
 3039                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3040   %}
 3041 
 3042   // This encoding class is generated automatically from ad_encode.m4.
 3043   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3044   enc_class aarch64_enc_strw0(memory4 mem) %{
 3045     loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3046                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3047   %}
 3048 
 3049   // This encoding class is generated automatically from ad_encode.m4.
 3050   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3051   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3052     Register src_reg = as_Register($src$$reg);
 3053     // we sometimes get asked to store the stack pointer into the
 3054     // current thread -- we cannot do that directly on AArch64
 3055     if (src_reg == r31_sp) {
 3056       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3057       __ mov(rscratch2, sp);
 3058       src_reg = rscratch2;
 3059     }
 3060     loadStore(masm, &MacroAssembler::str, src_reg, $mem->opcode(),
 3061                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3062   %}
 3063 
 3064   // This encoding class is generated automatically from ad_encode.m4.
 3065   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3066   enc_class aarch64_enc_str0(memory8 mem) %{
 3067     loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
 3068                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3069   %}
 3070 
 3071   // This encoding class is generated automatically from ad_encode.m4.
 3072   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3073   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3074     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3075     loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
 3076                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3077   %}
 3078 
 3079   // This encoding class is generated automatically from ad_encode.m4.
 3080   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3081   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3082     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3083     loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
 3084                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3085   %}
 3086 
 3087   // This encoding class is generated automatically from ad_encode.m4.
 3088   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3089   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3090       __ membar(Assembler::StoreStore);
 3091       loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3092                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3093   %}
 3094 
 3095   // END Non-volatile memory access
 3096 
 3097   // Vector loads and stores
 3098   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3099     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3100     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3101        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3102   %}
 3103 
 3104   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3105     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3106     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3107        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3108   %}
 3109 
 3110   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3111     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3112     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3113        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3114   %}
 3115 
 3116   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3117     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3118     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3119        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3120   %}
 3121 
 3122   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3123     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3124     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::H,
 3125        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3126   %}
 3127 
 3128   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3129     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3130     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::S,
 3131        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3132   %}
 3133 
 3134   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3135     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3136     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::D,
 3137        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3138   %}
 3139 
 3140   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3141     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3142     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3143        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3144   %}
 3145 
 3146   // volatile loads and stores
 3147 
 3148   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3149     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3150                  rscratch1, stlrb);
 3151   %}
 3152 
 3153   enc_class aarch64_enc_stlrb0(memory mem) %{
 3154     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3155                  rscratch1, stlrb);
 3156   %}
 3157 
 3158   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3159     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3160                  rscratch1, stlrh);
 3161   %}
 3162 
 3163   enc_class aarch64_enc_stlrh0(memory mem) %{
 3164     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3165                  rscratch1, stlrh);
 3166   %}
 3167 
 3168   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3169     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3170                  rscratch1, stlrw);
 3171   %}
 3172 
 3173   enc_class aarch64_enc_stlrw0(memory mem) %{
 3174     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3175                  rscratch1, stlrw);
 3176   %}
 3177 
 3178   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3179     Register dst_reg = as_Register($dst$$reg);
 3180     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3181              rscratch1, ldarb);
 3182     __ sxtbw(dst_reg, dst_reg);
 3183   %}
 3184 
 3185   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3186     Register dst_reg = as_Register($dst$$reg);
 3187     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3188              rscratch1, ldarb);
 3189     __ sxtb(dst_reg, dst_reg);
 3190   %}
 3191 
 3192   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3193     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3194              rscratch1, ldarb);
 3195   %}
 3196 
 3197   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3198     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3199              rscratch1, ldarb);
 3200   %}
 3201 
 3202   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3203     Register dst_reg = as_Register($dst$$reg);
 3204     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3205              rscratch1, ldarh);
 3206     __ sxthw(dst_reg, dst_reg);
 3207   %}
 3208 
 3209   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3210     Register dst_reg = as_Register($dst$$reg);
 3211     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3212              rscratch1, ldarh);
 3213     __ sxth(dst_reg, dst_reg);
 3214   %}
 3215 
 3216   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3217     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3218              rscratch1, ldarh);
 3219   %}
 3220 
 3221   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3222     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3223              rscratch1, ldarh);
 3224   %}
 3225 
 3226   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3227     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3228              rscratch1, ldarw);
 3229   %}
 3230 
 3231   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3232     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3233              rscratch1, ldarw);
 3234   %}
 3235 
 3236   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3237     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3238              rscratch1, ldar);
 3239   %}
 3240 
 3241   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3242     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3243              rscratch1, ldarw);
 3244     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3245   %}
 3246 
 3247   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3248     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3249              rscratch1, ldar);
 3250     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3251   %}
 3252 
 3253   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3254     Register src_reg = as_Register($src$$reg);
 3255     // we sometimes get asked to store the stack pointer into the
 3256     // current thread -- we cannot do that directly on AArch64
 3257     if (src_reg == r31_sp) {
 3258       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3259       __ mov(rscratch2, sp);
 3260       src_reg = rscratch2;
 3261     }
 3262     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3263                  rscratch1, stlr);
 3264   %}
 3265 
 3266   enc_class aarch64_enc_stlr0(memory mem) %{
 3267     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3268                  rscratch1, stlr);
 3269   %}
 3270 
 3271   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3272     {
 3273       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3274       __ fmovs(rscratch2, src_reg);
 3275     }
 3276     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3277                  rscratch1, stlrw);
 3278   %}
 3279 
 3280   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3281     {
 3282       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3283       __ fmovd(rscratch2, src_reg);
 3284     }
 3285     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3286                  rscratch1, stlr);
 3287   %}
 3288 
 3289   // synchronized read/update encodings
 3290 
 3291   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3292     Register dst_reg = as_Register($dst$$reg);
 3293     Register base = as_Register($mem$$base);
 3294     int index = $mem$$index;
 3295     int scale = $mem$$scale;
 3296     int disp = $mem$$disp;
 3297     if (index == -1) {
 3298        if (disp != 0) {
 3299         __ lea(rscratch1, Address(base, disp));
 3300         __ ldaxr(dst_reg, rscratch1);
 3301       } else {
 3302         // TODO
 3303         // should we ever get anything other than this case?
 3304         __ ldaxr(dst_reg, base);
 3305       }
 3306     } else {
 3307       Register index_reg = as_Register(index);
 3308       if (disp == 0) {
 3309         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3310         __ ldaxr(dst_reg, rscratch1);
 3311       } else {
 3312         __ lea(rscratch1, Address(base, disp));
 3313         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3314         __ ldaxr(dst_reg, rscratch1);
 3315       }
 3316     }
 3317   %}
 3318 
 3319   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3320     Register src_reg = as_Register($src$$reg);
 3321     Register base = as_Register($mem$$base);
 3322     int index = $mem$$index;
 3323     int scale = $mem$$scale;
 3324     int disp = $mem$$disp;
 3325     if (index == -1) {
 3326        if (disp != 0) {
 3327         __ lea(rscratch2, Address(base, disp));
 3328         __ stlxr(rscratch1, src_reg, rscratch2);
 3329       } else {
 3330         // TODO
 3331         // should we ever get anything other than this case?
 3332         __ stlxr(rscratch1, src_reg, base);
 3333       }
 3334     } else {
 3335       Register index_reg = as_Register(index);
 3336       if (disp == 0) {
 3337         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3338         __ stlxr(rscratch1, src_reg, rscratch2);
 3339       } else {
 3340         __ lea(rscratch2, Address(base, disp));
 3341         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3342         __ stlxr(rscratch1, src_reg, rscratch2);
 3343       }
 3344     }
 3345     __ cmpw(rscratch1, zr);
 3346   %}
 3347 
 3348   // prefetch encodings
 3349 
 3350   enc_class aarch64_enc_prefetchw(memory mem) %{
 3351     Register base = as_Register($mem$$base);
 3352     int index = $mem$$index;
 3353     int scale = $mem$$scale;
 3354     int disp = $mem$$disp;
 3355     if (index == -1) {
 3356       // Fix up any out-of-range offsets.
 3357       assert_different_registers(rscratch1, base);
 3358       Address addr = Address(base, disp);
 3359       addr = __ legitimize_address(addr, 8, rscratch1);
 3360       __ prfm(addr, PSTL1KEEP);
 3361     } else {
 3362       Register index_reg = as_Register(index);
 3363       if (disp == 0) {
 3364         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3365       } else {
 3366         __ lea(rscratch1, Address(base, disp));
 3367 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3368       }
 3369     }
 3370   %}
 3371 
 3372   // mov encodings
 3373 
 3374   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3375     uint32_t con = (uint32_t)$src$$constant;
 3376     Register dst_reg = as_Register($dst$$reg);
 3377     if (con == 0) {
 3378       __ movw(dst_reg, zr);
 3379     } else {
 3380       __ movw(dst_reg, con);
 3381     }
 3382   %}
 3383 
 3384   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3385     Register dst_reg = as_Register($dst$$reg);
 3386     uint64_t con = (uint64_t)$src$$constant;
 3387     if (con == 0) {
 3388       __ mov(dst_reg, zr);
 3389     } else {
 3390       __ mov(dst_reg, con);
 3391     }
 3392   %}
 3393 
 3394   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3395     Register dst_reg = as_Register($dst$$reg);
 3396     address con = (address)$src$$constant;
 3397     if (con == nullptr || con == (address)1) {
 3398       ShouldNotReachHere();
 3399     } else {
 3400       relocInfo::relocType rtype = $src->constant_reloc();
 3401       if (rtype == relocInfo::oop_type) {
 3402         __ movoop(dst_reg, (jobject)con);
 3403       } else if (rtype == relocInfo::metadata_type) {
 3404         __ mov_metadata(dst_reg, (Metadata*)con);
 3405       } else {
 3406         assert(rtype == relocInfo::none, "unexpected reloc type");
 3407         if (! __ is_valid_AArch64_address(con) ||
 3408             con < (address)(uintptr_t)os::vm_page_size()) {
 3409           __ mov(dst_reg, con);
 3410         } else {
 3411           uint64_t offset;
 3412           __ adrp(dst_reg, con, offset);
 3413           __ add(dst_reg, dst_reg, offset);
 3414         }
 3415       }
 3416     }
 3417   %}
 3418 
 3419   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3420     Register dst_reg = as_Register($dst$$reg);
 3421     __ mov(dst_reg, zr);
 3422   %}
 3423 
 3424   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3425     Register dst_reg = as_Register($dst$$reg);
 3426     __ mov(dst_reg, (uint64_t)1);
 3427   %}
 3428 
 3429   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3430     Register dst_reg = as_Register($dst$$reg);
 3431     address con = (address)$src$$constant;
 3432     if (con == nullptr) {
 3433       ShouldNotReachHere();
 3434     } else {
 3435       relocInfo::relocType rtype = $src->constant_reloc();
 3436       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3437       __ set_narrow_oop(dst_reg, (jobject)con);
 3438     }
 3439   %}
 3440 
 3441   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3442     Register dst_reg = as_Register($dst$$reg);
 3443     __ mov(dst_reg, zr);
 3444   %}
 3445 
 3446   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3447     Register dst_reg = as_Register($dst$$reg);
 3448     address con = (address)$src$$constant;
 3449     if (con == nullptr) {
 3450       ShouldNotReachHere();
 3451     } else {
 3452       relocInfo::relocType rtype = $src->constant_reloc();
 3453       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3454       __ set_narrow_klass(dst_reg, (Klass *)con);
 3455     }
 3456   %}
 3457 
 3458   // arithmetic encodings
 3459 
 3460   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3461     Register dst_reg = as_Register($dst$$reg);
 3462     Register src_reg = as_Register($src1$$reg);
 3463     int32_t con = (int32_t)$src2$$constant;
 3464     // add has primary == 0, subtract has primary == 1
 3465     if ($primary) { con = -con; }
 3466     if (con < 0) {
 3467       __ subw(dst_reg, src_reg, -con);
 3468     } else {
 3469       __ addw(dst_reg, src_reg, con);
 3470     }
 3471   %}
 3472 
 3473   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3474     Register dst_reg = as_Register($dst$$reg);
 3475     Register src_reg = as_Register($src1$$reg);
 3476     int32_t con = (int32_t)$src2$$constant;
 3477     // add has primary == 0, subtract has primary == 1
 3478     if ($primary) { con = -con; }
 3479     if (con < 0) {
 3480       __ sub(dst_reg, src_reg, -con);
 3481     } else {
 3482       __ add(dst_reg, src_reg, con);
 3483     }
 3484   %}
 3485 
 3486   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3487    Register dst_reg = as_Register($dst$$reg);
 3488    Register src1_reg = as_Register($src1$$reg);
 3489    Register src2_reg = as_Register($src2$$reg);
 3490     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3491   %}
 3492 
 3493   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3494    Register dst_reg = as_Register($dst$$reg);
 3495    Register src1_reg = as_Register($src1$$reg);
 3496    Register src2_reg = as_Register($src2$$reg);
 3497     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3498   %}
 3499 
 3500   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3501    Register dst_reg = as_Register($dst$$reg);
 3502    Register src1_reg = as_Register($src1$$reg);
 3503    Register src2_reg = as_Register($src2$$reg);
 3504     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3505   %}
 3506 
 3507   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3508    Register dst_reg = as_Register($dst$$reg);
 3509    Register src1_reg = as_Register($src1$$reg);
 3510    Register src2_reg = as_Register($src2$$reg);
 3511     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3512   %}
 3513 
 3514   // compare instruction encodings
 3515 
 3516   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3517     Register reg1 = as_Register($src1$$reg);
 3518     Register reg2 = as_Register($src2$$reg);
 3519     __ cmpw(reg1, reg2);
 3520   %}
 3521 
 3522   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3523     Register reg = as_Register($src1$$reg);
 3524     int32_t val = $src2$$constant;
 3525     if (val >= 0) {
 3526       __ subsw(zr, reg, val);
 3527     } else {
 3528       __ addsw(zr, reg, -val);
 3529     }
 3530   %}
 3531 
 3532   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3533     Register reg1 = as_Register($src1$$reg);
 3534     uint32_t val = (uint32_t)$src2$$constant;
 3535     __ movw(rscratch1, val);
 3536     __ cmpw(reg1, rscratch1);
 3537   %}
 3538 
 3539   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3540     Register reg1 = as_Register($src1$$reg);
 3541     Register reg2 = as_Register($src2$$reg);
 3542     __ cmp(reg1, reg2);
 3543   %}
 3544 
 3545   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3546     Register reg = as_Register($src1$$reg);
 3547     int64_t val = $src2$$constant;
 3548     if (val >= 0) {
 3549       __ subs(zr, reg, val);
 3550     } else if (val != -val) {
 3551       __ adds(zr, reg, -val);
 3552     } else {
 3553     // aargh, Long.MIN_VALUE is a special case
 3554       __ orr(rscratch1, zr, (uint64_t)val);
 3555       __ subs(zr, reg, rscratch1);
 3556     }
 3557   %}
 3558 
 3559   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3560     Register reg1 = as_Register($src1$$reg);
 3561     uint64_t val = (uint64_t)$src2$$constant;
 3562     __ mov(rscratch1, val);
 3563     __ cmp(reg1, rscratch1);
 3564   %}
 3565 
 3566   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3567     Register reg1 = as_Register($src1$$reg);
 3568     Register reg2 = as_Register($src2$$reg);
 3569     __ cmp(reg1, reg2);
 3570   %}
 3571 
 3572   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3573     Register reg1 = as_Register($src1$$reg);
 3574     Register reg2 = as_Register($src2$$reg);
 3575     __ cmpw(reg1, reg2);
 3576   %}
 3577 
 3578   enc_class aarch64_enc_testp(iRegP src) %{
 3579     Register reg = as_Register($src$$reg);
 3580     __ cmp(reg, zr);
 3581   %}
 3582 
 3583   enc_class aarch64_enc_testn(iRegN src) %{
 3584     Register reg = as_Register($src$$reg);
 3585     __ cmpw(reg, zr);
 3586   %}
 3587 
 3588   enc_class aarch64_enc_b(label lbl) %{
 3589     Label *L = $lbl$$label;
 3590     __ b(*L);
 3591   %}
 3592 
 3593   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3594     Label *L = $lbl$$label;
 3595     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3596   %}
 3597 
 3598   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3599     Label *L = $lbl$$label;
 3600     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3601   %}
 3602 
 3603   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3604   %{
 3605      Register sub_reg = as_Register($sub$$reg);
 3606      Register super_reg = as_Register($super$$reg);
 3607      Register temp_reg = as_Register($temp$$reg);
 3608      Register result_reg = as_Register($result$$reg);
 3609 
 3610      Label miss;
 3611      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3612                                      nullptr, &miss,
 3613                                      /*set_cond_codes:*/ true);
 3614      if ($primary) {
 3615        __ mov(result_reg, zr);
 3616      }
 3617      __ bind(miss);
 3618   %}
 3619 
 3620   enc_class aarch64_enc_java_static_call(method meth) %{
 3621     address addr = (address)$meth$$method;
 3622     address call;
 3623     if (!_method) {
 3624       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3625       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3626       if (call == nullptr) {
 3627         ciEnv::current()->record_failure("CodeCache is full");
 3628         return;
 3629       }
 3630     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3631       // The NOP here is purely to ensure that eliding a call to
 3632       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3633       __ nop();
 3634       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3635     } else {
 3636       int method_index = resolved_method_index(masm);
 3637       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3638                                                   : static_call_Relocation::spec(method_index);
 3639       call = __ trampoline_call(Address(addr, rspec));
 3640       if (call == nullptr) {
 3641         ciEnv::current()->record_failure("CodeCache is full");
 3642         return;
 3643       }
 3644       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3645         // Calls of the same statically bound method can share
 3646         // a stub to the interpreter.
 3647         __ code()->shared_stub_to_interp_for(_method, call - __ begin());
 3648       } else {
 3649         // Emit stub for static call
 3650         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 3651         if (stub == nullptr) {
 3652           ciEnv::current()->record_failure("CodeCache is full");
 3653           return;
 3654         }
 3655       }
 3656     }
 3657 
 3658     __ post_call_nop();
 3659 
 3660     // Only non uncommon_trap calls need to reinitialize ptrue.
 3661     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3662       __ reinitialize_ptrue();
 3663     }
 3664   %}
 3665 
 3666   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3667     int method_index = resolved_method_index(masm);
 3668     address call = __ ic_call((address)$meth$$method, method_index);
 3669     if (call == nullptr) {
 3670       ciEnv::current()->record_failure("CodeCache is full");
 3671       return;
 3672     }
 3673     __ post_call_nop();
 3674     if (Compile::current()->max_vector_size() > 0) {
 3675       __ reinitialize_ptrue();
 3676     }
 3677   %}
 3678 
 3679   enc_class aarch64_enc_call_epilog() %{
 3680     if (VerifyStackAtCalls) {
 3681       // Check that stack depth is unchanged: find majik cookie on stack
 3682       __ call_Unimplemented();
 3683     }
 3684   %}
 3685 
 3686   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3687     // some calls to generated routines (arraycopy code) are scheduled
 3688     // by C2 as runtime calls. if so we can call them using a br (they
 3689     // will be in a reachable segment) otherwise we have to use a blr
 3690     // which loads the absolute address into a register.
 3691     address entry = (address)$meth$$method;
 3692     CodeBlob *cb = CodeCache::find_blob(entry);
 3693     if (cb) {
 3694       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3695       if (call == nullptr) {
 3696         ciEnv::current()->record_failure("CodeCache is full");
 3697         return;
 3698       }
 3699       __ post_call_nop();
 3700     } else {
 3701       Label retaddr;
 3702       // Make the anchor frame walkable
 3703       __ adr(rscratch2, retaddr);
 3704       __ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 3705       __ lea(rscratch1, RuntimeAddress(entry));
 3706       __ blr(rscratch1);
 3707       __ bind(retaddr);
 3708       __ post_call_nop();
 3709     }
 3710     if (Compile::current()->max_vector_size() > 0) {
 3711       __ reinitialize_ptrue();
 3712     }
 3713   %}
 3714 
 3715   enc_class aarch64_enc_rethrow() %{
 3716     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3717   %}
 3718 
 3719   enc_class aarch64_enc_ret() %{
 3720 #ifdef ASSERT
 3721     if (Compile::current()->max_vector_size() > 0) {
 3722       __ verify_ptrue();
 3723     }
 3724 #endif
 3725     __ ret(lr);
 3726   %}
 3727 
 3728   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3729     Register target_reg = as_Register($jump_target$$reg);
 3730     __ br(target_reg);
 3731   %}
 3732 
 3733   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3734     Register target_reg = as_Register($jump_target$$reg);
 3735     // exception oop should be in r0
 3736     // ret addr has been popped into lr
 3737     // callee expects it in r3
 3738     __ mov(r3, lr);
 3739     __ br(target_reg);
 3740   %}
 3741 
 3742 %}
 3743 
 3744 //----------FRAME--------------------------------------------------------------
 3745 // Definition of frame structure and management information.
 3746 //
 3747 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3748 //                             |   (to get allocators register number
 3749 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3750 //  r   CALLER     |        |
 3751 //  o     |        +--------+      pad to even-align allocators stack-slot
 3752 //  w     V        |  pad0  |        numbers; owned by CALLER
 3753 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3754 //  h     ^        |   in   |  5
 3755 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3756 //  |     |        |        |  3
 3757 //  |     |        +--------+
 3758 //  V     |        | old out|      Empty on Intel, window on Sparc
 3759 //        |    old |preserve|      Must be even aligned.
 3760 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3761 //        |        |   in   |  3   area for Intel ret address
 3762 //     Owned by    |preserve|      Empty on Sparc.
 3763 //       SELF      +--------+
 3764 //        |        |  pad2  |  2   pad to align old SP
 3765 //        |        +--------+  1
 3766 //        |        | locks  |  0
 3767 //        |        +--------+----> OptoReg::stack0(), even aligned
 3768 //        |        |  pad1  | 11   pad to align new SP
 3769 //        |        +--------+
 3770 //        |        |        | 10
 3771 //        |        | spills |  9   spills
 3772 //        V        |        |  8   (pad0 slot for callee)
 3773 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3774 //        ^        |  out   |  7
 3775 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3776 //     Owned by    +--------+
 3777 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3778 //        |    new |preserve|      Must be even-aligned.
 3779 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3780 //        |        |        |
 3781 //
 3782 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3783 //         known from SELF's arguments and the Java calling convention.
 3784 //         Region 6-7 is determined per call site.
 3785 // Note 2: If the calling convention leaves holes in the incoming argument
 3786 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3787 //         are owned by the CALLEE.  Holes should not be necessary in the
 3788 //         incoming area, as the Java calling convention is completely under
 3789 //         the control of the AD file.  Doubles can be sorted and packed to
 3790 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3791 //         varargs C calling conventions.
 3792 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3793 //         even aligned with pad0 as needed.
 3794 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3795 //           (the latter is true on Intel but is it false on AArch64?)
 3796 //         region 6-11 is even aligned; it may be padded out more so that
 3797 //         the region from SP to FP meets the minimum stack alignment.
 3798 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3799 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3800 //         SP meets the minimum alignment.
 3801 
 3802 frame %{
 3803   // These three registers define part of the calling convention
 3804   // between compiled code and the interpreter.
 3805 
 3806   // Inline Cache Register or Method for I2C.
 3807   inline_cache_reg(R12);
 3808 
 3809   // Number of stack slots consumed by locking an object
 3810   sync_stack_slots(2);
 3811 
 3812   // Compiled code's Frame Pointer
 3813   frame_pointer(R31);
 3814 
 3815   // Interpreter stores its frame pointer in a register which is
 3816   // stored to the stack by I2CAdaptors.
 3817   // I2CAdaptors convert from interpreted java to compiled java.
 3818   interpreter_frame_pointer(R29);
 3819 
 3820   // Stack alignment requirement
 3821   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3822 
 3823   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3824   // for calls to C.  Supports the var-args backing area for register parms.
 3825   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3826 
 3827   // The after-PROLOG location of the return address.  Location of
 3828   // return address specifies a type (REG or STACK) and a number
 3829   // representing the register number (i.e. - use a register name) or
 3830   // stack slot.
 3831   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3832   // Otherwise, it is above the locks and verification slot and alignment word
 3833   // TODO this may well be correct but need to check why that - 2 is there
 3834   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3835   // which folds in the space used for monitors
 3836   return_addr(STACK - 2 +
 3837               align_up((Compile::current()->in_preserve_stack_slots() +
 3838                         Compile::current()->fixed_slots()),
 3839                        stack_alignment_in_slots()));
 3840 
 3841   // Location of compiled Java return values.  Same as C for now.
 3842   return_value
 3843   %{
 3844     // TODO do we allow ideal_reg == Op_RegN???
 3845     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3846            "only return normal values");
 3847 
 3848     static const int lo[Op_RegL + 1] = { // enum name
 3849       0,                                 // Op_Node
 3850       0,                                 // Op_Set
 3851       R0_num,                            // Op_RegN
 3852       R0_num,                            // Op_RegI
 3853       R0_num,                            // Op_RegP
 3854       V0_num,                            // Op_RegF
 3855       V0_num,                            // Op_RegD
 3856       R0_num                             // Op_RegL
 3857     };
 3858 
 3859     static const int hi[Op_RegL + 1] = { // enum name
 3860       0,                                 // Op_Node
 3861       0,                                 // Op_Set
 3862       OptoReg::Bad,                      // Op_RegN
 3863       OptoReg::Bad,                      // Op_RegI
 3864       R0_H_num,                          // Op_RegP
 3865       OptoReg::Bad,                      // Op_RegF
 3866       V0_H_num,                          // Op_RegD
 3867       R0_H_num                           // Op_RegL
 3868     };
 3869 
 3870     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3871   %}
 3872 %}
 3873 
 3874 //----------ATTRIBUTES---------------------------------------------------------
 3875 //----------Operand Attributes-------------------------------------------------
 3876 op_attrib op_cost(1);        // Required cost attribute
 3877 
 3878 //----------Instruction Attributes---------------------------------------------
 3879 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3880 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3881 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3882                                 // a non-matching short branch variant
 3883                                 // of some long branch?
 3884 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3885                                 // be a power of 2) specifies the
 3886                                 // alignment that some part of the
 3887                                 // instruction (not necessarily the
 3888                                 // start) requires.  If > 1, a
 3889                                 // compute_padding() function must be
 3890                                 // provided for the instruction
 3891 
 3892 // Whether this node is expanded during code emission into a sequence of
 3893 // instructions and the first instruction can perform an implicit null check.
 3894 ins_attrib ins_is_late_expanded_null_check_candidate(false);
 3895 
 3896 //----------OPERANDS-----------------------------------------------------------
 3897 // Operand definitions must precede instruction definitions for correct parsing
 3898 // in the ADLC because operands constitute user defined types which are used in
 3899 // instruction definitions.
 3900 
 3901 //----------Simple Operands----------------------------------------------------
 3902 
 3903 // Integer operands 32 bit
 3904 // 32 bit immediate
 3905 operand immI()
 3906 %{
 3907   match(ConI);
 3908 
 3909   op_cost(0);
 3910   format %{ %}
 3911   interface(CONST_INTER);
 3912 %}
 3913 
 3914 // 32 bit zero
 3915 operand immI0()
 3916 %{
 3917   predicate(n->get_int() == 0);
 3918   match(ConI);
 3919 
 3920   op_cost(0);
 3921   format %{ %}
 3922   interface(CONST_INTER);
 3923 %}
 3924 
 3925 // 32 bit unit increment
 3926 operand immI_1()
 3927 %{
 3928   predicate(n->get_int() == 1);
 3929   match(ConI);
 3930 
 3931   op_cost(0);
 3932   format %{ %}
 3933   interface(CONST_INTER);
 3934 %}
 3935 
 3936 // 32 bit unit decrement
 3937 operand immI_M1()
 3938 %{
 3939   predicate(n->get_int() == -1);
 3940   match(ConI);
 3941 
 3942   op_cost(0);
 3943   format %{ %}
 3944   interface(CONST_INTER);
 3945 %}
 3946 
 3947 // Shift values for add/sub extension shift
 3948 operand immIExt()
 3949 %{
 3950   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 3951   match(ConI);
 3952 
 3953   op_cost(0);
 3954   format %{ %}
 3955   interface(CONST_INTER);
 3956 %}
 3957 
 3958 operand immI_gt_1()
 3959 %{
 3960   predicate(n->get_int() > 1);
 3961   match(ConI);
 3962 
 3963   op_cost(0);
 3964   format %{ %}
 3965   interface(CONST_INTER);
 3966 %}
 3967 
 3968 operand immI_le_4()
 3969 %{
 3970   predicate(n->get_int() <= 4);
 3971   match(ConI);
 3972 
 3973   op_cost(0);
 3974   format %{ %}
 3975   interface(CONST_INTER);
 3976 %}
 3977 
 3978 operand immI_16()
 3979 %{
 3980   predicate(n->get_int() == 16);
 3981   match(ConI);
 3982 
 3983   op_cost(0);
 3984   format %{ %}
 3985   interface(CONST_INTER);
 3986 %}
 3987 
 3988 operand immI_24()
 3989 %{
 3990   predicate(n->get_int() == 24);
 3991   match(ConI);
 3992 
 3993   op_cost(0);
 3994   format %{ %}
 3995   interface(CONST_INTER);
 3996 %}
 3997 
 3998 operand immI_32()
 3999 %{
 4000   predicate(n->get_int() == 32);
 4001   match(ConI);
 4002 
 4003   op_cost(0);
 4004   format %{ %}
 4005   interface(CONST_INTER);
 4006 %}
 4007 
 4008 operand immI_48()
 4009 %{
 4010   predicate(n->get_int() == 48);
 4011   match(ConI);
 4012 
 4013   op_cost(0);
 4014   format %{ %}
 4015   interface(CONST_INTER);
 4016 %}
 4017 
 4018 operand immI_56()
 4019 %{
 4020   predicate(n->get_int() == 56);
 4021   match(ConI);
 4022 
 4023   op_cost(0);
 4024   format %{ %}
 4025   interface(CONST_INTER);
 4026 %}
 4027 
 4028 operand immI_255()
 4029 %{
 4030   predicate(n->get_int() == 255);
 4031   match(ConI);
 4032 
 4033   op_cost(0);
 4034   format %{ %}
 4035   interface(CONST_INTER);
 4036 %}
 4037 
 4038 operand immI_65535()
 4039 %{
 4040   predicate(n->get_int() == 65535);
 4041   match(ConI);
 4042 
 4043   op_cost(0);
 4044   format %{ %}
 4045   interface(CONST_INTER);
 4046 %}
 4047 
 4048 operand immI_positive()
 4049 %{
 4050   predicate(n->get_int() > 0);
 4051   match(ConI);
 4052 
 4053   op_cost(0);
 4054   format %{ %}
 4055   interface(CONST_INTER);
 4056 %}
 4057 
 4058 // BoolTest condition for signed compare
 4059 operand immI_cmp_cond()
 4060 %{
 4061   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4062   match(ConI);
 4063 
 4064   op_cost(0);
 4065   format %{ %}
 4066   interface(CONST_INTER);
 4067 %}
 4068 
 4069 // BoolTest condition for unsigned compare
 4070 operand immI_cmpU_cond()
 4071 %{
 4072   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4073   match(ConI);
 4074 
 4075   op_cost(0);
 4076   format %{ %}
 4077   interface(CONST_INTER);
 4078 %}
 4079 
 4080 operand immL_255()
 4081 %{
 4082   predicate(n->get_long() == 255L);
 4083   match(ConL);
 4084 
 4085   op_cost(0);
 4086   format %{ %}
 4087   interface(CONST_INTER);
 4088 %}
 4089 
 4090 operand immL_65535()
 4091 %{
 4092   predicate(n->get_long() == 65535L);
 4093   match(ConL);
 4094 
 4095   op_cost(0);
 4096   format %{ %}
 4097   interface(CONST_INTER);
 4098 %}
 4099 
 4100 operand immL_4294967295()
 4101 %{
 4102   predicate(n->get_long() == 4294967295L);
 4103   match(ConL);
 4104 
 4105   op_cost(0);
 4106   format %{ %}
 4107   interface(CONST_INTER);
 4108 %}
 4109 
 4110 operand immL_bitmask()
 4111 %{
 4112   predicate((n->get_long() != 0)
 4113             && ((n->get_long() & 0xc000000000000000l) == 0)
 4114             && is_power_of_2(n->get_long() + 1));
 4115   match(ConL);
 4116 
 4117   op_cost(0);
 4118   format %{ %}
 4119   interface(CONST_INTER);
 4120 %}
 4121 
 4122 operand immI_bitmask()
 4123 %{
 4124   predicate((n->get_int() != 0)
 4125             && ((n->get_int() & 0xc0000000) == 0)
 4126             && is_power_of_2(n->get_int() + 1));
 4127   match(ConI);
 4128 
 4129   op_cost(0);
 4130   format %{ %}
 4131   interface(CONST_INTER);
 4132 %}
 4133 
 4134 operand immL_positive_bitmaskI()
 4135 %{
 4136   predicate((n->get_long() != 0)
 4137             && ((julong)n->get_long() < 0x80000000ULL)
 4138             && is_power_of_2(n->get_long() + 1));
 4139   match(ConL);
 4140 
 4141   op_cost(0);
 4142   format %{ %}
 4143   interface(CONST_INTER);
 4144 %}
 4145 
 4146 // Scale values for scaled offset addressing modes (up to long but not quad)
 4147 operand immIScale()
 4148 %{
 4149   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4150   match(ConI);
 4151 
 4152   op_cost(0);
 4153   format %{ %}
 4154   interface(CONST_INTER);
 4155 %}
 4156 
 4157 // 5 bit signed integer
 4158 operand immI5()
 4159 %{
 4160   predicate(Assembler::is_simm(n->get_int(), 5));
 4161   match(ConI);
 4162 
 4163   op_cost(0);
 4164   format %{ %}
 4165   interface(CONST_INTER);
 4166 %}
 4167 
 4168 // 7 bit unsigned integer
 4169 operand immIU7()
 4170 %{
 4171   predicate(Assembler::is_uimm(n->get_int(), 7));
 4172   match(ConI);
 4173 
 4174   op_cost(0);
 4175   format %{ %}
 4176   interface(CONST_INTER);
 4177 %}
 4178 
 4179 // Offset for scaled or unscaled immediate loads and stores
 4180 operand immIOffset()
 4181 %{
 4182   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4183   match(ConI);
 4184 
 4185   op_cost(0);
 4186   format %{ %}
 4187   interface(CONST_INTER);
 4188 %}
 4189 
 4190 operand immIOffset1()
 4191 %{
 4192   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4193   match(ConI);
 4194 
 4195   op_cost(0);
 4196   format %{ %}
 4197   interface(CONST_INTER);
 4198 %}
 4199 
 4200 operand immIOffset2()
 4201 %{
 4202   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4203   match(ConI);
 4204 
 4205   op_cost(0);
 4206   format %{ %}
 4207   interface(CONST_INTER);
 4208 %}
 4209 
 4210 operand immIOffset4()
 4211 %{
 4212   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4213   match(ConI);
 4214 
 4215   op_cost(0);
 4216   format %{ %}
 4217   interface(CONST_INTER);
 4218 %}
 4219 
 4220 operand immIOffset8()
 4221 %{
 4222   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4223   match(ConI);
 4224 
 4225   op_cost(0);
 4226   format %{ %}
 4227   interface(CONST_INTER);
 4228 %}
 4229 
 4230 operand immIOffset16()
 4231 %{
 4232   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4233   match(ConI);
 4234 
 4235   op_cost(0);
 4236   format %{ %}
 4237   interface(CONST_INTER);
 4238 %}
 4239 
 4240 operand immLOffset()
 4241 %{
 4242   predicate(n->get_long() >= -256 && n->get_long() <= 65520);
 4243   match(ConL);
 4244 
 4245   op_cost(0);
 4246   format %{ %}
 4247   interface(CONST_INTER);
 4248 %}
 4249 
 4250 operand immLoffset1()
 4251 %{
 4252   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4253   match(ConL);
 4254 
 4255   op_cost(0);
 4256   format %{ %}
 4257   interface(CONST_INTER);
 4258 %}
 4259 
 4260 operand immLoffset2()
 4261 %{
 4262   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4263   match(ConL);
 4264 
 4265   op_cost(0);
 4266   format %{ %}
 4267   interface(CONST_INTER);
 4268 %}
 4269 
 4270 operand immLoffset4()
 4271 %{
 4272   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4273   match(ConL);
 4274 
 4275   op_cost(0);
 4276   format %{ %}
 4277   interface(CONST_INTER);
 4278 %}
 4279 
 4280 operand immLoffset8()
 4281 %{
 4282   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4283   match(ConL);
 4284 
 4285   op_cost(0);
 4286   format %{ %}
 4287   interface(CONST_INTER);
 4288 %}
 4289 
 4290 operand immLoffset16()
 4291 %{
 4292   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4293   match(ConL);
 4294 
 4295   op_cost(0);
 4296   format %{ %}
 4297   interface(CONST_INTER);
 4298 %}
 4299 
 4300 // 5 bit signed long integer
 4301 operand immL5()
 4302 %{
 4303   predicate(Assembler::is_simm(n->get_long(), 5));
 4304   match(ConL);
 4305 
 4306   op_cost(0);
 4307   format %{ %}
 4308   interface(CONST_INTER);
 4309 %}
 4310 
 4311 // 7 bit unsigned long integer
 4312 operand immLU7()
 4313 %{
 4314   predicate(Assembler::is_uimm(n->get_long(), 7));
 4315   match(ConL);
 4316 
 4317   op_cost(0);
 4318   format %{ %}
 4319   interface(CONST_INTER);
 4320 %}
 4321 
 4322 // 8 bit signed value.
 4323 operand immI8()
 4324 %{
 4325   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4326   match(ConI);
 4327 
 4328   op_cost(0);
 4329   format %{ %}
 4330   interface(CONST_INTER);
 4331 %}
 4332 
 4333 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4334 operand immIDupV()
 4335 %{
 4336   predicate(Assembler::operand_valid_for_sve_dup_immediate((int64_t)n->get_int()));
 4337   match(ConI);
 4338 
 4339   op_cost(0);
 4340   format %{ %}
 4341   interface(CONST_INTER);
 4342 %}
 4343 
 4344 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4345 operand immLDupV()
 4346 %{
 4347   predicate(Assembler::operand_valid_for_sve_dup_immediate(n->get_long()));
 4348   match(ConL);
 4349 
 4350   op_cost(0);
 4351   format %{ %}
 4352   interface(CONST_INTER);
 4353 %}
 4354 
 4355 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4356 operand immHDupV()
 4357 %{
 4358   predicate(Assembler::operand_valid_for_sve_dup_immediate((int64_t)n->geth()));
 4359   match(ConH);
 4360 
 4361   op_cost(0);
 4362   format %{ %}
 4363   interface(CONST_INTER);
 4364 %}
 4365 
 4366 // 8 bit integer valid for vector add sub immediate
 4367 operand immBAddSubV()
 4368 %{
 4369   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4370   match(ConI);
 4371 
 4372   op_cost(0);
 4373   format %{ %}
 4374   interface(CONST_INTER);
 4375 %}
 4376 
 4377 // 32 bit integer valid for add sub immediate
 4378 operand immIAddSub()
 4379 %{
 4380   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4381   match(ConI);
 4382   op_cost(0);
 4383   format %{ %}
 4384   interface(CONST_INTER);
 4385 %}
 4386 
 4387 // 32 bit integer valid for vector add sub immediate
 4388 operand immIAddSubV()
 4389 %{
 4390   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4391   match(ConI);
 4392 
 4393   op_cost(0);
 4394   format %{ %}
 4395   interface(CONST_INTER);
 4396 %}
 4397 
 4398 // 32 bit unsigned integer valid for logical immediate
 4399 
 4400 operand immBLog()
 4401 %{
 4402   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4403   match(ConI);
 4404 
 4405   op_cost(0);
 4406   format %{ %}
 4407   interface(CONST_INTER);
 4408 %}
 4409 
 4410 operand immSLog()
 4411 %{
 4412   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4413   match(ConI);
 4414 
 4415   op_cost(0);
 4416   format %{ %}
 4417   interface(CONST_INTER);
 4418 %}
 4419 
 4420 operand immILog()
 4421 %{
 4422   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4423   match(ConI);
 4424 
 4425   op_cost(0);
 4426   format %{ %}
 4427   interface(CONST_INTER);
 4428 %}
 4429 
 4430 // Integer operands 64 bit
 4431 // 64 bit immediate
 4432 operand immL()
 4433 %{
 4434   match(ConL);
 4435 
 4436   op_cost(0);
 4437   format %{ %}
 4438   interface(CONST_INTER);
 4439 %}
 4440 
 4441 // 64 bit zero
 4442 operand immL0()
 4443 %{
 4444   predicate(n->get_long() == 0);
 4445   match(ConL);
 4446 
 4447   op_cost(0);
 4448   format %{ %}
 4449   interface(CONST_INTER);
 4450 %}
 4451 
 4452 // 64 bit unit decrement
 4453 operand immL_M1()
 4454 %{
 4455   predicate(n->get_long() == -1);
 4456   match(ConL);
 4457 
 4458   op_cost(0);
 4459   format %{ %}
 4460   interface(CONST_INTER);
 4461 %}
 4462 
 4463 // 64 bit integer valid for add sub immediate
 4464 operand immLAddSub()
 4465 %{
 4466   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4467   match(ConL);
 4468   op_cost(0);
 4469   format %{ %}
 4470   interface(CONST_INTER);
 4471 %}
 4472 
 4473 // 64 bit integer valid for addv subv immediate
 4474 operand immLAddSubV()
 4475 %{
 4476   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4477   match(ConL);
 4478 
 4479   op_cost(0);
 4480   format %{ %}
 4481   interface(CONST_INTER);
 4482 %}
 4483 
 4484 // 64 bit integer valid for logical immediate
 4485 operand immLLog()
 4486 %{
 4487   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4488   match(ConL);
 4489   op_cost(0);
 4490   format %{ %}
 4491   interface(CONST_INTER);
 4492 %}
 4493 
 4494 // Long Immediate: low 32-bit mask
 4495 operand immL_32bits()
 4496 %{
 4497   predicate(n->get_long() == 0xFFFFFFFFL);
 4498   match(ConL);
 4499   op_cost(0);
 4500   format %{ %}
 4501   interface(CONST_INTER);
 4502 %}
 4503 
 4504 // Pointer operands
 4505 // Pointer Immediate
 4506 operand immP()
 4507 %{
 4508   match(ConP);
 4509 
 4510   op_cost(0);
 4511   format %{ %}
 4512   interface(CONST_INTER);
 4513 %}
 4514 
 4515 // nullptr Pointer Immediate
 4516 operand immP0()
 4517 %{
 4518   predicate(n->get_ptr() == 0);
 4519   match(ConP);
 4520 
 4521   op_cost(0);
 4522   format %{ %}
 4523   interface(CONST_INTER);
 4524 %}
 4525 
 4526 // Pointer Immediate One
 4527 // this is used in object initialization (initial object header)
 4528 operand immP_1()
 4529 %{
 4530   predicate(n->get_ptr() == 1);
 4531   match(ConP);
 4532 
 4533   op_cost(0);
 4534   format %{ %}
 4535   interface(CONST_INTER);
 4536 %}
 4537 
 4538 // Float and Double operands
 4539 // Double Immediate
 4540 operand immD()
 4541 %{
 4542   match(ConD);
 4543   op_cost(0);
 4544   format %{ %}
 4545   interface(CONST_INTER);
 4546 %}
 4547 
 4548 // Double Immediate: +0.0d
 4549 operand immD0()
 4550 %{
 4551   predicate(jlong_cast(n->getd()) == 0);
 4552   match(ConD);
 4553 
 4554   op_cost(0);
 4555   format %{ %}
 4556   interface(CONST_INTER);
 4557 %}
 4558 
 4559 // constant 'double +0.0'.
 4560 operand immDPacked()
 4561 %{
 4562   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4563   match(ConD);
 4564   op_cost(0);
 4565   format %{ %}
 4566   interface(CONST_INTER);
 4567 %}
 4568 
 4569 // Float Immediate
 4570 operand immF()
 4571 %{
 4572   match(ConF);
 4573   op_cost(0);
 4574   format %{ %}
 4575   interface(CONST_INTER);
 4576 %}
 4577 
 4578 // Float Immediate: +0.0f.
 4579 operand immF0()
 4580 %{
 4581   predicate(jint_cast(n->getf()) == 0);
 4582   match(ConF);
 4583 
 4584   op_cost(0);
 4585   format %{ %}
 4586   interface(CONST_INTER);
 4587 %}
 4588 
 4589 // Half Float (FP16) Immediate
 4590 operand immH()
 4591 %{
 4592   match(ConH);
 4593   op_cost(0);
 4594   format %{ %}
 4595   interface(CONST_INTER);
 4596 %}
 4597 
 4598 //
 4599 operand immFPacked()
 4600 %{
 4601   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4602   match(ConF);
 4603   op_cost(0);
 4604   format %{ %}
 4605   interface(CONST_INTER);
 4606 %}
 4607 
 4608 // Narrow pointer operands
 4609 // Narrow Pointer Immediate
 4610 operand immN()
 4611 %{
 4612   match(ConN);
 4613 
 4614   op_cost(0);
 4615   format %{ %}
 4616   interface(CONST_INTER);
 4617 %}
 4618 
 4619 // Narrow nullptr Pointer Immediate
 4620 operand immN0()
 4621 %{
 4622   predicate(n->get_narrowcon() == 0);
 4623   match(ConN);
 4624 
 4625   op_cost(0);
 4626   format %{ %}
 4627   interface(CONST_INTER);
 4628 %}
 4629 
 4630 operand immNKlass()
 4631 %{
 4632   match(ConNKlass);
 4633 
 4634   op_cost(0);
 4635   format %{ %}
 4636   interface(CONST_INTER);
 4637 %}
 4638 
 4639 // Integer 32 bit Register Operands
 4640 // Integer 32 bitRegister (excludes SP)
 4641 operand iRegI()
 4642 %{
 4643   constraint(ALLOC_IN_RC(any_reg32));
 4644   match(RegI);
 4645   match(iRegINoSp);
 4646   op_cost(0);
 4647   format %{ %}
 4648   interface(REG_INTER);
 4649 %}
 4650 
 4651 // Integer 32 bit Register not Special
 4652 operand iRegINoSp()
 4653 %{
 4654   constraint(ALLOC_IN_RC(no_special_reg32));
 4655   match(RegI);
 4656   op_cost(0);
 4657   format %{ %}
 4658   interface(REG_INTER);
 4659 %}
 4660 
 4661 // Integer 64 bit Register Operands
 4662 // Integer 64 bit Register (includes SP)
 4663 operand iRegL()
 4664 %{
 4665   constraint(ALLOC_IN_RC(any_reg));
 4666   match(RegL);
 4667   match(iRegLNoSp);
 4668   op_cost(0);
 4669   format %{ %}
 4670   interface(REG_INTER);
 4671 %}
 4672 
 4673 // Integer 64 bit Register not Special
 4674 operand iRegLNoSp()
 4675 %{
 4676   constraint(ALLOC_IN_RC(no_special_reg));
 4677   match(RegL);
 4678   match(iRegL_R0);
 4679   format %{ %}
 4680   interface(REG_INTER);
 4681 %}
 4682 
 4683 // Pointer Register Operands
 4684 // Pointer Register
 4685 operand iRegP()
 4686 %{
 4687   constraint(ALLOC_IN_RC(ptr_reg));
 4688   match(RegP);
 4689   match(iRegPNoSp);
 4690   match(iRegP_R0);
 4691   //match(iRegP_R2);
 4692   //match(iRegP_R4);
 4693   match(iRegP_R5);
 4694   match(thread_RegP);
 4695   op_cost(0);
 4696   format %{ %}
 4697   interface(REG_INTER);
 4698 %}
 4699 
 4700 // Pointer 64 bit Register not Special
 4701 operand iRegPNoSp()
 4702 %{
 4703   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4704   match(RegP);
 4705   // match(iRegP);
 4706   // match(iRegP_R0);
 4707   // match(iRegP_R2);
 4708   // match(iRegP_R4);
 4709   // match(iRegP_R5);
 4710   // match(thread_RegP);
 4711   op_cost(0);
 4712   format %{ %}
 4713   interface(REG_INTER);
 4714 %}
 4715 
 4716 // This operand is not allowed to use rfp even if
 4717 // rfp is not used to hold the frame pointer.
 4718 operand iRegPNoSpNoRfp()
 4719 %{
 4720   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4721   match(RegP);
 4722   match(iRegPNoSp);
 4723   op_cost(0);
 4724   format %{ %}
 4725   interface(REG_INTER);
 4726 %}
 4727 
 4728 // Pointer 64 bit Register R0 only
 4729 operand iRegP_R0()
 4730 %{
 4731   constraint(ALLOC_IN_RC(r0_reg));
 4732   match(RegP);
 4733   // match(iRegP);
 4734   match(iRegPNoSp);
 4735   op_cost(0);
 4736   format %{ %}
 4737   interface(REG_INTER);
 4738 %}
 4739 
 4740 // Pointer 64 bit Register R1 only
 4741 operand iRegP_R1()
 4742 %{
 4743   constraint(ALLOC_IN_RC(r1_reg));
 4744   match(RegP);
 4745   // match(iRegP);
 4746   match(iRegPNoSp);
 4747   op_cost(0);
 4748   format %{ %}
 4749   interface(REG_INTER);
 4750 %}
 4751 
 4752 // Pointer 64 bit Register R2 only
 4753 operand iRegP_R2()
 4754 %{
 4755   constraint(ALLOC_IN_RC(r2_reg));
 4756   match(RegP);
 4757   // match(iRegP);
 4758   match(iRegPNoSp);
 4759   op_cost(0);
 4760   format %{ %}
 4761   interface(REG_INTER);
 4762 %}
 4763 
 4764 // Pointer 64 bit Register R3 only
 4765 operand iRegP_R3()
 4766 %{
 4767   constraint(ALLOC_IN_RC(r3_reg));
 4768   match(RegP);
 4769   // match(iRegP);
 4770   match(iRegPNoSp);
 4771   op_cost(0);
 4772   format %{ %}
 4773   interface(REG_INTER);
 4774 %}
 4775 
 4776 // Pointer 64 bit Register R4 only
 4777 operand iRegP_R4()
 4778 %{
 4779   constraint(ALLOC_IN_RC(r4_reg));
 4780   match(RegP);
 4781   // match(iRegP);
 4782   match(iRegPNoSp);
 4783   op_cost(0);
 4784   format %{ %}
 4785   interface(REG_INTER);
 4786 %}
 4787 
 4788 // Pointer 64 bit Register R5 only
 4789 operand iRegP_R5()
 4790 %{
 4791   constraint(ALLOC_IN_RC(r5_reg));
 4792   match(RegP);
 4793   // match(iRegP);
 4794   match(iRegPNoSp);
 4795   op_cost(0);
 4796   format %{ %}
 4797   interface(REG_INTER);
 4798 %}
 4799 
 4800 // Pointer 64 bit Register R10 only
 4801 operand iRegP_R10()
 4802 %{
 4803   constraint(ALLOC_IN_RC(r10_reg));
 4804   match(RegP);
 4805   // match(iRegP);
 4806   match(iRegPNoSp);
 4807   op_cost(0);
 4808   format %{ %}
 4809   interface(REG_INTER);
 4810 %}
 4811 
 4812 // Long 64 bit Register R0 only
 4813 operand iRegL_R0()
 4814 %{
 4815   constraint(ALLOC_IN_RC(r0_reg));
 4816   match(RegL);
 4817   match(iRegLNoSp);
 4818   op_cost(0);
 4819   format %{ %}
 4820   interface(REG_INTER);
 4821 %}
 4822 
 4823 // Long 64 bit Register R11 only
 4824 operand iRegL_R11()
 4825 %{
 4826   constraint(ALLOC_IN_RC(r11_reg));
 4827   match(RegL);
 4828   match(iRegLNoSp);
 4829   op_cost(0);
 4830   format %{ %}
 4831   interface(REG_INTER);
 4832 %}
 4833 
 4834 // Register R0 only
 4835 operand iRegI_R0()
 4836 %{
 4837   constraint(ALLOC_IN_RC(int_r0_reg));
 4838   match(RegI);
 4839   match(iRegINoSp);
 4840   op_cost(0);
 4841   format %{ %}
 4842   interface(REG_INTER);
 4843 %}
 4844 
 4845 // Register R2 only
 4846 operand iRegI_R2()
 4847 %{
 4848   constraint(ALLOC_IN_RC(int_r2_reg));
 4849   match(RegI);
 4850   match(iRegINoSp);
 4851   op_cost(0);
 4852   format %{ %}
 4853   interface(REG_INTER);
 4854 %}
 4855 
 4856 // Register R3 only
 4857 operand iRegI_R3()
 4858 %{
 4859   constraint(ALLOC_IN_RC(int_r3_reg));
 4860   match(RegI);
 4861   match(iRegINoSp);
 4862   op_cost(0);
 4863   format %{ %}
 4864   interface(REG_INTER);
 4865 %}
 4866 
 4867 
 4868 // Register R4 only
 4869 operand iRegI_R4()
 4870 %{
 4871   constraint(ALLOC_IN_RC(int_r4_reg));
 4872   match(RegI);
 4873   match(iRegINoSp);
 4874   op_cost(0);
 4875   format %{ %}
 4876   interface(REG_INTER);
 4877 %}
 4878 
 4879 
 4880 // Pointer Register Operands
 4881 // Narrow Pointer Register
 4882 operand iRegN()
 4883 %{
 4884   constraint(ALLOC_IN_RC(any_reg32));
 4885   match(RegN);
 4886   match(iRegNNoSp);
 4887   op_cost(0);
 4888   format %{ %}
 4889   interface(REG_INTER);
 4890 %}
 4891 
 4892 // Integer 64 bit Register not Special
 4893 operand iRegNNoSp()
 4894 %{
 4895   constraint(ALLOC_IN_RC(no_special_reg32));
 4896   match(RegN);
 4897   op_cost(0);
 4898   format %{ %}
 4899   interface(REG_INTER);
 4900 %}
 4901 
 4902 // Float Register
 4903 // Float register operands
 4904 operand vRegF()
 4905 %{
 4906   constraint(ALLOC_IN_RC(float_reg));
 4907   match(RegF);
 4908 
 4909   op_cost(0);
 4910   format %{ %}
 4911   interface(REG_INTER);
 4912 %}
 4913 
 4914 // Double Register
 4915 // Double register operands
 4916 operand vRegD()
 4917 %{
 4918   constraint(ALLOC_IN_RC(double_reg));
 4919   match(RegD);
 4920 
 4921   op_cost(0);
 4922   format %{ %}
 4923   interface(REG_INTER);
 4924 %}
 4925 
 4926 // Generic vector class. This will be used for
 4927 // all vector operands, including NEON and SVE.
 4928 operand vReg()
 4929 %{
 4930   constraint(ALLOC_IN_RC(dynamic));
 4931   match(VecA);
 4932   match(VecD);
 4933   match(VecX);
 4934 
 4935   op_cost(0);
 4936   format %{ %}
 4937   interface(REG_INTER);
 4938 %}
 4939 
 4940 operand vReg_V10()
 4941 %{
 4942   constraint(ALLOC_IN_RC(v10_veca_reg));
 4943   match(vReg);
 4944 
 4945   op_cost(0);
 4946   format %{ %}
 4947   interface(REG_INTER);
 4948 %}
 4949 
 4950 operand vReg_V11()
 4951 %{
 4952   constraint(ALLOC_IN_RC(v11_veca_reg));
 4953   match(vReg);
 4954 
 4955   op_cost(0);
 4956   format %{ %}
 4957   interface(REG_INTER);
 4958 %}
 4959 
 4960 operand vReg_V12()
 4961 %{
 4962   constraint(ALLOC_IN_RC(v12_veca_reg));
 4963   match(vReg);
 4964 
 4965   op_cost(0);
 4966   format %{ %}
 4967   interface(REG_INTER);
 4968 %}
 4969 
 4970 operand vReg_V13()
 4971 %{
 4972   constraint(ALLOC_IN_RC(v13_veca_reg));
 4973   match(vReg);
 4974 
 4975   op_cost(0);
 4976   format %{ %}
 4977   interface(REG_INTER);
 4978 %}
 4979 
 4980 operand vReg_V17()
 4981 %{
 4982   constraint(ALLOC_IN_RC(v17_veca_reg));
 4983   match(vReg);
 4984 
 4985   op_cost(0);
 4986   format %{ %}
 4987   interface(REG_INTER);
 4988 %}
 4989 
 4990 operand vReg_V18()
 4991 %{
 4992   constraint(ALLOC_IN_RC(v18_veca_reg));
 4993   match(vReg);
 4994 
 4995   op_cost(0);
 4996   format %{ %}
 4997   interface(REG_INTER);
 4998 %}
 4999 
 5000 operand vReg_V23()
 5001 %{
 5002   constraint(ALLOC_IN_RC(v23_veca_reg));
 5003   match(vReg);
 5004 
 5005   op_cost(0);
 5006   format %{ %}
 5007   interface(REG_INTER);
 5008 %}
 5009 
 5010 operand vReg_V24()
 5011 %{
 5012   constraint(ALLOC_IN_RC(v24_veca_reg));
 5013   match(vReg);
 5014 
 5015   op_cost(0);
 5016   format %{ %}
 5017   interface(REG_INTER);
 5018 %}
 5019 
 5020 operand vecA()
 5021 %{
 5022   constraint(ALLOC_IN_RC(vectora_reg));
 5023   match(VecA);
 5024 
 5025   op_cost(0);
 5026   format %{ %}
 5027   interface(REG_INTER);
 5028 %}
 5029 
 5030 operand vecD()
 5031 %{
 5032   constraint(ALLOC_IN_RC(vectord_reg));
 5033   match(VecD);
 5034 
 5035   op_cost(0);
 5036   format %{ %}
 5037   interface(REG_INTER);
 5038 %}
 5039 
 5040 operand vecX()
 5041 %{
 5042   constraint(ALLOC_IN_RC(vectorx_reg));
 5043   match(VecX);
 5044 
 5045   op_cost(0);
 5046   format %{ %}
 5047   interface(REG_INTER);
 5048 %}
 5049 
 5050 operand vRegD_V0()
 5051 %{
 5052   constraint(ALLOC_IN_RC(v0_reg));
 5053   match(RegD);
 5054   op_cost(0);
 5055   format %{ %}
 5056   interface(REG_INTER);
 5057 %}
 5058 
 5059 operand vRegD_V1()
 5060 %{
 5061   constraint(ALLOC_IN_RC(v1_reg));
 5062   match(RegD);
 5063   op_cost(0);
 5064   format %{ %}
 5065   interface(REG_INTER);
 5066 %}
 5067 
 5068 operand vRegD_V2()
 5069 %{
 5070   constraint(ALLOC_IN_RC(v2_reg));
 5071   match(RegD);
 5072   op_cost(0);
 5073   format %{ %}
 5074   interface(REG_INTER);
 5075 %}
 5076 
 5077 operand vRegD_V3()
 5078 %{
 5079   constraint(ALLOC_IN_RC(v3_reg));
 5080   match(RegD);
 5081   op_cost(0);
 5082   format %{ %}
 5083   interface(REG_INTER);
 5084 %}
 5085 
 5086 operand vRegD_V4()
 5087 %{
 5088   constraint(ALLOC_IN_RC(v4_reg));
 5089   match(RegD);
 5090   op_cost(0);
 5091   format %{ %}
 5092   interface(REG_INTER);
 5093 %}
 5094 
 5095 operand vRegD_V5()
 5096 %{
 5097   constraint(ALLOC_IN_RC(v5_reg));
 5098   match(RegD);
 5099   op_cost(0);
 5100   format %{ %}
 5101   interface(REG_INTER);
 5102 %}
 5103 
 5104 operand vRegD_V6()
 5105 %{
 5106   constraint(ALLOC_IN_RC(v6_reg));
 5107   match(RegD);
 5108   op_cost(0);
 5109   format %{ %}
 5110   interface(REG_INTER);
 5111 %}
 5112 
 5113 operand vRegD_V7()
 5114 %{
 5115   constraint(ALLOC_IN_RC(v7_reg));
 5116   match(RegD);
 5117   op_cost(0);
 5118   format %{ %}
 5119   interface(REG_INTER);
 5120 %}
 5121 
 5122 operand vRegD_V12()
 5123 %{
 5124   constraint(ALLOC_IN_RC(v12_reg));
 5125   match(RegD);
 5126   op_cost(0);
 5127   format %{ %}
 5128   interface(REG_INTER);
 5129 %}
 5130 
 5131 operand vRegD_V13()
 5132 %{
 5133   constraint(ALLOC_IN_RC(v13_reg));
 5134   match(RegD);
 5135   op_cost(0);
 5136   format %{ %}
 5137   interface(REG_INTER);
 5138 %}
 5139 
 5140 operand pReg()
 5141 %{
 5142   constraint(ALLOC_IN_RC(pr_reg));
 5143   match(RegVectMask);
 5144   match(pRegGov);
 5145   op_cost(0);
 5146   format %{ %}
 5147   interface(REG_INTER);
 5148 %}
 5149 
 5150 operand pRegGov()
 5151 %{
 5152   constraint(ALLOC_IN_RC(gov_pr));
 5153   match(RegVectMask);
 5154   match(pReg);
 5155   op_cost(0);
 5156   format %{ %}
 5157   interface(REG_INTER);
 5158 %}
 5159 
 5160 operand pRegGov_P0()
 5161 %{
 5162   constraint(ALLOC_IN_RC(p0_reg));
 5163   match(RegVectMask);
 5164   op_cost(0);
 5165   format %{ %}
 5166   interface(REG_INTER);
 5167 %}
 5168 
 5169 operand pRegGov_P1()
 5170 %{
 5171   constraint(ALLOC_IN_RC(p1_reg));
 5172   match(RegVectMask);
 5173   op_cost(0);
 5174   format %{ %}
 5175   interface(REG_INTER);
 5176 %}
 5177 
 5178 // Flags register, used as output of signed compare instructions
 5179 
 5180 // note that on AArch64 we also use this register as the output for
 5181 // for floating point compare instructions (CmpF CmpD). this ensures
 5182 // that ordered inequality tests use GT, GE, LT or LE none of which
 5183 // pass through cases where the result is unordered i.e. one or both
 5184 // inputs to the compare is a NaN. this means that the ideal code can
 5185 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5186 // (where the comparison should always fail). EQ and NE tests are
 5187 // always generated in ideal code so that unordered folds into the NE
 5188 // case, matching the behaviour of AArch64 NE.
 5189 //
 5190 // This differs from x86 where the outputs of FP compares use a
 5191 // special FP flags registers and where compares based on this
 5192 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5193 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5194 // to explicitly handle the unordered case in branches. x86 also has
 5195 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5196 
 5197 operand rFlagsReg()
 5198 %{
 5199   constraint(ALLOC_IN_RC(int_flags));
 5200   match(RegFlags);
 5201 
 5202   op_cost(0);
 5203   format %{ "RFLAGS" %}
 5204   interface(REG_INTER);
 5205 %}
 5206 
 5207 // Flags register, used as output of unsigned compare instructions
 5208 operand rFlagsRegU()
 5209 %{
 5210   constraint(ALLOC_IN_RC(int_flags));
 5211   match(RegFlags);
 5212 
 5213   op_cost(0);
 5214   format %{ "RFLAGSU" %}
 5215   interface(REG_INTER);
 5216 %}
 5217 
 5218 // Special Registers
 5219 
 5220 // Method Register
 5221 operand inline_cache_RegP(iRegP reg)
 5222 %{
 5223   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5224   match(reg);
 5225   match(iRegPNoSp);
 5226   op_cost(0);
 5227   format %{ %}
 5228   interface(REG_INTER);
 5229 %}
 5230 
 5231 // Thread Register
 5232 operand thread_RegP(iRegP reg)
 5233 %{
 5234   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5235   match(reg);
 5236   op_cost(0);
 5237   format %{ %}
 5238   interface(REG_INTER);
 5239 %}
 5240 
 5241 //----------Memory Operands----------------------------------------------------
 5242 
 5243 operand indirect(iRegP reg)
 5244 %{
 5245   constraint(ALLOC_IN_RC(ptr_reg));
 5246   match(reg);
 5247   op_cost(0);
 5248   format %{ "[$reg]" %}
 5249   interface(MEMORY_INTER) %{
 5250     base($reg);
 5251     index(0xffffffff);
 5252     scale(0x0);
 5253     disp(0x0);
 5254   %}
 5255 %}
 5256 
 5257 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5258 %{
 5259   constraint(ALLOC_IN_RC(ptr_reg));
 5260   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5261   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5262   op_cost(0);
 5263   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5264   interface(MEMORY_INTER) %{
 5265     base($reg);
 5266     index($ireg);
 5267     scale($scale);
 5268     disp(0x0);
 5269   %}
 5270 %}
 5271 
 5272 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5273 %{
 5274   constraint(ALLOC_IN_RC(ptr_reg));
 5275   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5276   match(AddP reg (LShiftL lreg scale));
 5277   op_cost(0);
 5278   format %{ "$reg, $lreg lsl($scale)" %}
 5279   interface(MEMORY_INTER) %{
 5280     base($reg);
 5281     index($lreg);
 5282     scale($scale);
 5283     disp(0x0);
 5284   %}
 5285 %}
 5286 
 5287 operand indIndexI2L(iRegP reg, iRegI ireg)
 5288 %{
 5289   constraint(ALLOC_IN_RC(ptr_reg));
 5290   match(AddP reg (ConvI2L ireg));
 5291   op_cost(0);
 5292   format %{ "$reg, $ireg, 0, I2L" %}
 5293   interface(MEMORY_INTER) %{
 5294     base($reg);
 5295     index($ireg);
 5296     scale(0x0);
 5297     disp(0x0);
 5298   %}
 5299 %}
 5300 
 5301 operand indIndex(iRegP reg, iRegL lreg)
 5302 %{
 5303   constraint(ALLOC_IN_RC(ptr_reg));
 5304   match(AddP reg lreg);
 5305   op_cost(0);
 5306   format %{ "$reg, $lreg" %}
 5307   interface(MEMORY_INTER) %{
 5308     base($reg);
 5309     index($lreg);
 5310     scale(0x0);
 5311     disp(0x0);
 5312   %}
 5313 %}
 5314 
 5315 operand indOffI1(iRegP reg, immIOffset1 off)
 5316 %{
 5317   constraint(ALLOC_IN_RC(ptr_reg));
 5318   match(AddP reg off);
 5319   op_cost(0);
 5320   format %{ "[$reg, $off]" %}
 5321   interface(MEMORY_INTER) %{
 5322     base($reg);
 5323     index(0xffffffff);
 5324     scale(0x0);
 5325     disp($off);
 5326   %}
 5327 %}
 5328 
 5329 operand indOffI2(iRegP reg, immIOffset2 off)
 5330 %{
 5331   constraint(ALLOC_IN_RC(ptr_reg));
 5332   match(AddP reg off);
 5333   op_cost(0);
 5334   format %{ "[$reg, $off]" %}
 5335   interface(MEMORY_INTER) %{
 5336     base($reg);
 5337     index(0xffffffff);
 5338     scale(0x0);
 5339     disp($off);
 5340   %}
 5341 %}
 5342 
 5343 operand indOffI4(iRegP reg, immIOffset4 off)
 5344 %{
 5345   constraint(ALLOC_IN_RC(ptr_reg));
 5346   match(AddP reg off);
 5347   op_cost(0);
 5348   format %{ "[$reg, $off]" %}
 5349   interface(MEMORY_INTER) %{
 5350     base($reg);
 5351     index(0xffffffff);
 5352     scale(0x0);
 5353     disp($off);
 5354   %}
 5355 %}
 5356 
 5357 operand indOffI8(iRegP reg, immIOffset8 off)
 5358 %{
 5359   constraint(ALLOC_IN_RC(ptr_reg));
 5360   match(AddP reg off);
 5361   op_cost(0);
 5362   format %{ "[$reg, $off]" %}
 5363   interface(MEMORY_INTER) %{
 5364     base($reg);
 5365     index(0xffffffff);
 5366     scale(0x0);
 5367     disp($off);
 5368   %}
 5369 %}
 5370 
 5371 operand indOffI16(iRegP reg, immIOffset16 off)
 5372 %{
 5373   constraint(ALLOC_IN_RC(ptr_reg));
 5374   match(AddP reg off);
 5375   op_cost(0);
 5376   format %{ "[$reg, $off]" %}
 5377   interface(MEMORY_INTER) %{
 5378     base($reg);
 5379     index(0xffffffff);
 5380     scale(0x0);
 5381     disp($off);
 5382   %}
 5383 %}
 5384 
 5385 operand indOffL1(iRegP reg, immLoffset1 off)
 5386 %{
 5387   constraint(ALLOC_IN_RC(ptr_reg));
 5388   match(AddP reg off);
 5389   op_cost(0);
 5390   format %{ "[$reg, $off]" %}
 5391   interface(MEMORY_INTER) %{
 5392     base($reg);
 5393     index(0xffffffff);
 5394     scale(0x0);
 5395     disp($off);
 5396   %}
 5397 %}
 5398 
 5399 operand indOffL2(iRegP reg, immLoffset2 off)
 5400 %{
 5401   constraint(ALLOC_IN_RC(ptr_reg));
 5402   match(AddP reg off);
 5403   op_cost(0);
 5404   format %{ "[$reg, $off]" %}
 5405   interface(MEMORY_INTER) %{
 5406     base($reg);
 5407     index(0xffffffff);
 5408     scale(0x0);
 5409     disp($off);
 5410   %}
 5411 %}
 5412 
 5413 operand indOffL4(iRegP reg, immLoffset4 off)
 5414 %{
 5415   constraint(ALLOC_IN_RC(ptr_reg));
 5416   match(AddP reg off);
 5417   op_cost(0);
 5418   format %{ "[$reg, $off]" %}
 5419   interface(MEMORY_INTER) %{
 5420     base($reg);
 5421     index(0xffffffff);
 5422     scale(0x0);
 5423     disp($off);
 5424   %}
 5425 %}
 5426 
 5427 operand indOffL8(iRegP reg, immLoffset8 off)
 5428 %{
 5429   constraint(ALLOC_IN_RC(ptr_reg));
 5430   match(AddP reg off);
 5431   op_cost(0);
 5432   format %{ "[$reg, $off]" %}
 5433   interface(MEMORY_INTER) %{
 5434     base($reg);
 5435     index(0xffffffff);
 5436     scale(0x0);
 5437     disp($off);
 5438   %}
 5439 %}
 5440 
 5441 operand indOffL16(iRegP reg, immLoffset16 off)
 5442 %{
 5443   constraint(ALLOC_IN_RC(ptr_reg));
 5444   match(AddP reg off);
 5445   op_cost(0);
 5446   format %{ "[$reg, $off]" %}
 5447   interface(MEMORY_INTER) %{
 5448     base($reg);
 5449     index(0xffffffff);
 5450     scale(0x0);
 5451     disp($off);
 5452   %}
 5453 %}
 5454 
 5455 operand indirectX2P(iRegL reg)
 5456 %{
 5457   constraint(ALLOC_IN_RC(ptr_reg));
 5458   match(CastX2P reg);
 5459   op_cost(0);
 5460   format %{ "[$reg]\t# long -> ptr" %}
 5461   interface(MEMORY_INTER) %{
 5462     base($reg);
 5463     index(0xffffffff);
 5464     scale(0x0);
 5465     disp(0x0);
 5466   %}
 5467 %}
 5468 
 5469 operand indOffX2P(iRegL reg, immLOffset off)
 5470 %{
 5471   constraint(ALLOC_IN_RC(ptr_reg));
 5472   match(AddP (CastX2P reg) off);
 5473   op_cost(0);
 5474   format %{ "[$reg, $off]\t# long -> ptr" %}
 5475   interface(MEMORY_INTER) %{
 5476     base($reg);
 5477     index(0xffffffff);
 5478     scale(0x0);
 5479     disp($off);
 5480   %}
 5481 %}
 5482 
 5483 operand indirectN(iRegN reg)
 5484 %{
 5485   predicate(CompressedOops::shift() == 0);
 5486   constraint(ALLOC_IN_RC(ptr_reg));
 5487   match(DecodeN reg);
 5488   op_cost(0);
 5489   format %{ "[$reg]\t# narrow" %}
 5490   interface(MEMORY_INTER) %{
 5491     base($reg);
 5492     index(0xffffffff);
 5493     scale(0x0);
 5494     disp(0x0);
 5495   %}
 5496 %}
 5497 
 5498 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5499 %{
 5500   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5501   constraint(ALLOC_IN_RC(ptr_reg));
 5502   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5503   op_cost(0);
 5504   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5505   interface(MEMORY_INTER) %{
 5506     base($reg);
 5507     index($ireg);
 5508     scale($scale);
 5509     disp(0x0);
 5510   %}
 5511 %}
 5512 
 5513 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5514 %{
 5515   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5516   constraint(ALLOC_IN_RC(ptr_reg));
 5517   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5518   op_cost(0);
 5519   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5520   interface(MEMORY_INTER) %{
 5521     base($reg);
 5522     index($lreg);
 5523     scale($scale);
 5524     disp(0x0);
 5525   %}
 5526 %}
 5527 
 5528 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5529 %{
 5530   predicate(CompressedOops::shift() == 0);
 5531   constraint(ALLOC_IN_RC(ptr_reg));
 5532   match(AddP (DecodeN reg) (ConvI2L ireg));
 5533   op_cost(0);
 5534   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5535   interface(MEMORY_INTER) %{
 5536     base($reg);
 5537     index($ireg);
 5538     scale(0x0);
 5539     disp(0x0);
 5540   %}
 5541 %}
 5542 
 5543 operand indIndexN(iRegN reg, iRegL lreg)
 5544 %{
 5545   predicate(CompressedOops::shift() == 0);
 5546   constraint(ALLOC_IN_RC(ptr_reg));
 5547   match(AddP (DecodeN reg) lreg);
 5548   op_cost(0);
 5549   format %{ "$reg, $lreg\t# narrow" %}
 5550   interface(MEMORY_INTER) %{
 5551     base($reg);
 5552     index($lreg);
 5553     scale(0x0);
 5554     disp(0x0);
 5555   %}
 5556 %}
 5557 
 5558 operand indOffIN(iRegN reg, immIOffset off)
 5559 %{
 5560   predicate(CompressedOops::shift() == 0);
 5561   constraint(ALLOC_IN_RC(ptr_reg));
 5562   match(AddP (DecodeN reg) off);
 5563   op_cost(0);
 5564   format %{ "[$reg, $off]\t# narrow" %}
 5565   interface(MEMORY_INTER) %{
 5566     base($reg);
 5567     index(0xffffffff);
 5568     scale(0x0);
 5569     disp($off);
 5570   %}
 5571 %}
 5572 
 5573 operand indOffLN(iRegN reg, immLOffset off)
 5574 %{
 5575   predicate(CompressedOops::shift() == 0);
 5576   constraint(ALLOC_IN_RC(ptr_reg));
 5577   match(AddP (DecodeN reg) off);
 5578   op_cost(0);
 5579   format %{ "[$reg, $off]\t# narrow" %}
 5580   interface(MEMORY_INTER) %{
 5581     base($reg);
 5582     index(0xffffffff);
 5583     scale(0x0);
 5584     disp($off);
 5585   %}
 5586 %}
 5587 
 5588 
 5589 //----------Special Memory Operands--------------------------------------------
 5590 // Stack Slot Operand - This operand is used for loading and storing temporary
 5591 //                      values on the stack where a match requires a value to
 5592 //                      flow through memory.
 5593 operand stackSlotP(sRegP reg)
 5594 %{
 5595   constraint(ALLOC_IN_RC(stack_slots));
 5596   op_cost(100);
 5597   // No match rule because this operand is only generated in matching
 5598   // match(RegP);
 5599   format %{ "[$reg]" %}
 5600   interface(MEMORY_INTER) %{
 5601     base(0x1e);  // RSP
 5602     index(0x0);  // No Index
 5603     scale(0x0);  // No Scale
 5604     disp($reg);  // Stack Offset
 5605   %}
 5606 %}
 5607 
 5608 operand stackSlotI(sRegI reg)
 5609 %{
 5610   constraint(ALLOC_IN_RC(stack_slots));
 5611   // No match rule because this operand is only generated in matching
 5612   // match(RegI);
 5613   format %{ "[$reg]" %}
 5614   interface(MEMORY_INTER) %{
 5615     base(0x1e);  // RSP
 5616     index(0x0);  // No Index
 5617     scale(0x0);  // No Scale
 5618     disp($reg);  // Stack Offset
 5619   %}
 5620 %}
 5621 
 5622 operand stackSlotF(sRegF reg)
 5623 %{
 5624   constraint(ALLOC_IN_RC(stack_slots));
 5625   // No match rule because this operand is only generated in matching
 5626   // match(RegF);
 5627   format %{ "[$reg]" %}
 5628   interface(MEMORY_INTER) %{
 5629     base(0x1e);  // RSP
 5630     index(0x0);  // No Index
 5631     scale(0x0);  // No Scale
 5632     disp($reg);  // Stack Offset
 5633   %}
 5634 %}
 5635 
 5636 operand stackSlotD(sRegD reg)
 5637 %{
 5638   constraint(ALLOC_IN_RC(stack_slots));
 5639   // No match rule because this operand is only generated in matching
 5640   // match(RegD);
 5641   format %{ "[$reg]" %}
 5642   interface(MEMORY_INTER) %{
 5643     base(0x1e);  // RSP
 5644     index(0x0);  // No Index
 5645     scale(0x0);  // No Scale
 5646     disp($reg);  // Stack Offset
 5647   %}
 5648 %}
 5649 
 5650 operand stackSlotL(sRegL reg)
 5651 %{
 5652   constraint(ALLOC_IN_RC(stack_slots));
 5653   // No match rule because this operand is only generated in matching
 5654   // match(RegL);
 5655   format %{ "[$reg]" %}
 5656   interface(MEMORY_INTER) %{
 5657     base(0x1e);  // RSP
 5658     index(0x0);  // No Index
 5659     scale(0x0);  // No Scale
 5660     disp($reg);  // Stack Offset
 5661   %}
 5662 %}
 5663 
 5664 // Operands for expressing Control Flow
 5665 // NOTE: Label is a predefined operand which should not be redefined in
 5666 //       the AD file. It is generically handled within the ADLC.
 5667 
 5668 //----------Conditional Branch Operands----------------------------------------
 5669 // Comparison Op  - This is the operation of the comparison, and is limited to
 5670 //                  the following set of codes:
 5671 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 5672 //
 5673 // Other attributes of the comparison, such as unsignedness, are specified
 5674 // by the comparison instruction that sets a condition code flags register.
 5675 // That result is represented by a flags operand whose subtype is appropriate
 5676 // to the unsignedness (etc.) of the comparison.
 5677 //
 5678 // Later, the instruction which matches both the Comparison Op (a Bool) and
 5679 // the flags (produced by the Cmp) specifies the coding of the comparison op
 5680 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 5681 
 5682 // used for signed integral comparisons and fp comparisons
 5683 
 5684 operand cmpOp()
 5685 %{
 5686   match(Bool);
 5687 
 5688   format %{ "" %}
 5689   interface(COND_INTER) %{
 5690     equal(0x0, "eq");
 5691     not_equal(0x1, "ne");
 5692     less(0xb, "lt");
 5693     greater_equal(0xa, "ge");
 5694     less_equal(0xd, "le");
 5695     greater(0xc, "gt");
 5696     overflow(0x6, "vs");
 5697     no_overflow(0x7, "vc");
 5698   %}
 5699 %}
 5700 
 5701 // used for unsigned integral comparisons
 5702 
 5703 operand cmpOpU()
 5704 %{
 5705   match(Bool);
 5706 
 5707   format %{ "" %}
 5708   interface(COND_INTER) %{
 5709     equal(0x0, "eq");
 5710     not_equal(0x1, "ne");
 5711     less(0x3, "lo");
 5712     greater_equal(0x2, "hs");
 5713     less_equal(0x9, "ls");
 5714     greater(0x8, "hi");
 5715     overflow(0x6, "vs");
 5716     no_overflow(0x7, "vc");
 5717   %}
 5718 %}
 5719 
 5720 // used for certain integral comparisons which can be
 5721 // converted to cbxx or tbxx instructions
 5722 
 5723 operand cmpOpEqNe()
 5724 %{
 5725   match(Bool);
 5726   op_cost(0);
 5727   predicate(n->as_Bool()->_test._test == BoolTest::ne
 5728             || n->as_Bool()->_test._test == BoolTest::eq);
 5729 
 5730   format %{ "" %}
 5731   interface(COND_INTER) %{
 5732     equal(0x0, "eq");
 5733     not_equal(0x1, "ne");
 5734     less(0xb, "lt");
 5735     greater_equal(0xa, "ge");
 5736     less_equal(0xd, "le");
 5737     greater(0xc, "gt");
 5738     overflow(0x6, "vs");
 5739     no_overflow(0x7, "vc");
 5740   %}
 5741 %}
 5742 
 5743 // used for certain integral comparisons which can be
 5744 // converted to cbxx or tbxx instructions
 5745 
 5746 operand cmpOpLtGe()
 5747 %{
 5748   match(Bool);
 5749   op_cost(0);
 5750 
 5751   predicate(n->as_Bool()->_test._test == BoolTest::lt
 5752             || n->as_Bool()->_test._test == BoolTest::ge);
 5753 
 5754   format %{ "" %}
 5755   interface(COND_INTER) %{
 5756     equal(0x0, "eq");
 5757     not_equal(0x1, "ne");
 5758     less(0xb, "lt");
 5759     greater_equal(0xa, "ge");
 5760     less_equal(0xd, "le");
 5761     greater(0xc, "gt");
 5762     overflow(0x6, "vs");
 5763     no_overflow(0x7, "vc");
 5764   %}
 5765 %}
 5766 
 5767 // used for certain unsigned integral comparisons which can be
 5768 // converted to cbxx or tbxx instructions
 5769 
 5770 operand cmpOpUEqNeLeGt()
 5771 %{
 5772   match(Bool);
 5773   op_cost(0);
 5774 
 5775   predicate(n->as_Bool()->_test._test == BoolTest::eq ||
 5776             n->as_Bool()->_test._test == BoolTest::ne ||
 5777             n->as_Bool()->_test._test == BoolTest::le ||
 5778             n->as_Bool()->_test._test == BoolTest::gt);
 5779 
 5780   format %{ "" %}
 5781   interface(COND_INTER) %{
 5782     equal(0x0, "eq");
 5783     not_equal(0x1, "ne");
 5784     less(0x3, "lo");
 5785     greater_equal(0x2, "hs");
 5786     less_equal(0x9, "ls");
 5787     greater(0x8, "hi");
 5788     overflow(0x6, "vs");
 5789     no_overflow(0x7, "vc");
 5790   %}
 5791 %}
 5792 
 5793 // Special operand allowing long args to int ops to be truncated for free
 5794 
 5795 operand iRegL2I(iRegL reg) %{
 5796 
 5797   op_cost(0);
 5798 
 5799   match(ConvL2I reg);
 5800 
 5801   format %{ "l2i($reg)" %}
 5802 
 5803   interface(REG_INTER)
 5804 %}
 5805 
 5806 operand iRegL2P(iRegL reg) %{
 5807 
 5808   op_cost(0);
 5809 
 5810   match(CastX2P reg);
 5811 
 5812   format %{ "l2p($reg)" %}
 5813 
 5814   interface(REG_INTER)
 5815 %}
 5816 
 5817 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 5818 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 5819 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 5820 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 5821 
 5822 //----------OPERAND CLASSES----------------------------------------------------
 5823 // Operand Classes are groups of operands that are used as to simplify
 5824 // instruction definitions by not requiring the AD writer to specify
 5825 // separate instructions for every form of operand when the
 5826 // instruction accepts multiple operand types with the same basic
 5827 // encoding and format. The classic case of this is memory operands.
 5828 
 5829 // memory is used to define read/write location for load/store
 5830 // instruction defs. we can turn a memory op into an Address
 5831 
 5832 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 5833                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5834 
 5835 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 5836                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5837 
 5838 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 5839                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5840 
 5841 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 5842                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5843 
 5844 // All of the memory operands. For the pipeline description.
 5845 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 5846                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 5847                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5848 
 5849 
 5850 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 5851 // operations. it allows the src to be either an iRegI or a (ConvL2I
 5852 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 5853 // can be elided because the 32-bit instruction will just employ the
 5854 // lower 32 bits anyway.
 5855 //
 5856 // n.b. this does not elide all L2I conversions. if the truncated
 5857 // value is consumed by more than one operation then the ConvL2I
 5858 // cannot be bundled into the consuming nodes so an l2i gets planted
 5859 // (actually a movw $dst $src) and the downstream instructions consume
 5860 // the result of the l2i as an iRegI input. That's a shame since the
 5861 // movw is actually redundant but its not too costly.
 5862 
 5863 opclass iRegIorL2I(iRegI, iRegL2I);
 5864 opclass iRegPorL2P(iRegP, iRegL2P);
 5865 
 5866 //----------PIPELINE-----------------------------------------------------------
 5867 // Rules which define the behavior of the target architectures pipeline.
 5868 
 5869 // For specific pipelines, eg A53, define the stages of that pipeline
 5870 //pipe_desc(ISS, EX1, EX2, WR);
 5871 #define ISS S0
 5872 #define EX1 S1
 5873 #define EX2 S2
 5874 #define WR  S3
 5875 
 5876 // Integer ALU reg operation
 5877 pipeline %{
 5878 
 5879 attributes %{
 5880   // ARM instructions are of fixed length
 5881   fixed_size_instructions;        // Fixed size instructions TODO does
 5882   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 5883   // ARM instructions come in 32-bit word units
 5884   instruction_unit_size = 4;         // An instruction is 4 bytes long
 5885   instruction_fetch_unit_size = 64;  // The processor fetches one line
 5886   instruction_fetch_units = 1;       // of 64 bytes
 5887 %}
 5888 
 5889 // We don't use an actual pipeline model so don't care about resources
 5890 // or description. we do use pipeline classes to introduce fixed
 5891 // latencies
 5892 
 5893 //----------RESOURCES----------------------------------------------------------
 5894 // Resources are the functional units available to the machine
 5895 
 5896 resources( INS0, INS1, INS01 = INS0 | INS1,
 5897            ALU0, ALU1, ALU = ALU0 | ALU1,
 5898            MAC,
 5899            DIV,
 5900            BRANCH,
 5901            LDST,
 5902            NEON_FP);
 5903 
 5904 //----------PIPELINE DESCRIPTION-----------------------------------------------
 5905 // Pipeline Description specifies the stages in the machine's pipeline
 5906 
 5907 // Define the pipeline as a generic 6 stage pipeline
 5908 pipe_desc(S0, S1, S2, S3, S4, S5);
 5909 
 5910 //----------PIPELINE CLASSES---------------------------------------------------
 5911 // Pipeline Classes describe the stages in which input and output are
 5912 // referenced by the hardware pipeline.
 5913 
 5914 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 5915 %{
 5916   single_instruction;
 5917   src1   : S1(read);
 5918   src2   : S2(read);
 5919   dst    : S5(write);
 5920   INS01  : ISS;
 5921   NEON_FP : S5;
 5922 %}
 5923 
 5924 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 5925 %{
 5926   single_instruction;
 5927   src1   : S1(read);
 5928   src2   : S2(read);
 5929   dst    : S5(write);
 5930   INS01  : ISS;
 5931   NEON_FP : S5;
 5932 %}
 5933 
 5934 pipe_class fp_uop_s(vRegF dst, vRegF src)
 5935 %{
 5936   single_instruction;
 5937   src    : S1(read);
 5938   dst    : S5(write);
 5939   INS01  : ISS;
 5940   NEON_FP : S5;
 5941 %}
 5942 
 5943 pipe_class fp_uop_d(vRegD dst, vRegD src)
 5944 %{
 5945   single_instruction;
 5946   src    : S1(read);
 5947   dst    : S5(write);
 5948   INS01  : ISS;
 5949   NEON_FP : S5;
 5950 %}
 5951 
 5952 pipe_class fp_d2f(vRegF dst, vRegD src)
 5953 %{
 5954   single_instruction;
 5955   src    : S1(read);
 5956   dst    : S5(write);
 5957   INS01  : ISS;
 5958   NEON_FP : S5;
 5959 %}
 5960 
 5961 pipe_class fp_f2d(vRegD dst, vRegF src)
 5962 %{
 5963   single_instruction;
 5964   src    : S1(read);
 5965   dst    : S5(write);
 5966   INS01  : ISS;
 5967   NEON_FP : S5;
 5968 %}
 5969 
 5970 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 5971 %{
 5972   single_instruction;
 5973   src    : S1(read);
 5974   dst    : S5(write);
 5975   INS01  : ISS;
 5976   NEON_FP : S5;
 5977 %}
 5978 
 5979 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 5980 %{
 5981   single_instruction;
 5982   src    : S1(read);
 5983   dst    : S5(write);
 5984   INS01  : ISS;
 5985   NEON_FP : S5;
 5986 %}
 5987 
 5988 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 5989 %{
 5990   single_instruction;
 5991   src    : S1(read);
 5992   dst    : S5(write);
 5993   INS01  : ISS;
 5994   NEON_FP : S5;
 5995 %}
 5996 
 5997 pipe_class fp_l2f(vRegF dst, iRegL src)
 5998 %{
 5999   single_instruction;
 6000   src    : S1(read);
 6001   dst    : S5(write);
 6002   INS01  : ISS;
 6003   NEON_FP : S5;
 6004 %}
 6005 
 6006 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6007 %{
 6008   single_instruction;
 6009   src    : S1(read);
 6010   dst    : S5(write);
 6011   INS01  : ISS;
 6012   NEON_FP : S5;
 6013 %}
 6014 
 6015 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6016 %{
 6017   single_instruction;
 6018   src    : S1(read);
 6019   dst    : S5(write);
 6020   INS01  : ISS;
 6021   NEON_FP : S5;
 6022 %}
 6023 
 6024 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6025 %{
 6026   single_instruction;
 6027   src    : S1(read);
 6028   dst    : S5(write);
 6029   INS01  : ISS;
 6030   NEON_FP : S5;
 6031 %}
 6032 
 6033 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6034 %{
 6035   single_instruction;
 6036   src    : S1(read);
 6037   dst    : S5(write);
 6038   INS01  : ISS;
 6039   NEON_FP : S5;
 6040 %}
 6041 
 6042 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6043 %{
 6044   single_instruction;
 6045   src1   : S1(read);
 6046   src2   : S2(read);
 6047   dst    : S5(write);
 6048   INS0   : ISS;
 6049   NEON_FP : S5;
 6050 %}
 6051 
 6052 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6053 %{
 6054   single_instruction;
 6055   src1   : S1(read);
 6056   src2   : S2(read);
 6057   dst    : S5(write);
 6058   INS0   : ISS;
 6059   NEON_FP : S5;
 6060 %}
 6061 
 6062 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6063 %{
 6064   single_instruction;
 6065   cr     : S1(read);
 6066   src1   : S1(read);
 6067   src2   : S1(read);
 6068   dst    : S3(write);
 6069   INS01  : ISS;
 6070   NEON_FP : S3;
 6071 %}
 6072 
 6073 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6074 %{
 6075   single_instruction;
 6076   cr     : S1(read);
 6077   src1   : S1(read);
 6078   src2   : S1(read);
 6079   dst    : S3(write);
 6080   INS01  : ISS;
 6081   NEON_FP : S3;
 6082 %}
 6083 
 6084 pipe_class fp_imm_s(vRegF dst)
 6085 %{
 6086   single_instruction;
 6087   dst    : S3(write);
 6088   INS01  : ISS;
 6089   NEON_FP : S3;
 6090 %}
 6091 
 6092 pipe_class fp_imm_d(vRegD dst)
 6093 %{
 6094   single_instruction;
 6095   dst    : S3(write);
 6096   INS01  : ISS;
 6097   NEON_FP : S3;
 6098 %}
 6099 
 6100 pipe_class fp_load_constant_s(vRegF dst)
 6101 %{
 6102   single_instruction;
 6103   dst    : S4(write);
 6104   INS01  : ISS;
 6105   NEON_FP : S4;
 6106 %}
 6107 
 6108 pipe_class fp_load_constant_d(vRegD dst)
 6109 %{
 6110   single_instruction;
 6111   dst    : S4(write);
 6112   INS01  : ISS;
 6113   NEON_FP : S4;
 6114 %}
 6115 
 6116 //------- Integer ALU operations --------------------------
 6117 
 6118 // Integer ALU reg-reg operation
 6119 // Operands needed in EX1, result generated in EX2
 6120 // Eg.  ADD     x0, x1, x2
 6121 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6122 %{
 6123   single_instruction;
 6124   dst    : EX2(write);
 6125   src1   : EX1(read);
 6126   src2   : EX1(read);
 6127   INS01  : ISS; // Dual issue as instruction 0 or 1
 6128   ALU    : EX2;
 6129 %}
 6130 
 6131 // Integer ALU reg-reg operation with constant shift
 6132 // Shifted register must be available in LATE_ISS instead of EX1
 6133 // Eg.  ADD     x0, x1, x2, LSL #2
 6134 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6135 %{
 6136   single_instruction;
 6137   dst    : EX2(write);
 6138   src1   : EX1(read);
 6139   src2   : ISS(read);
 6140   INS01  : ISS;
 6141   ALU    : EX2;
 6142 %}
 6143 
 6144 // Integer ALU reg operation with constant shift
 6145 // Eg.  LSL     x0, x1, #shift
 6146 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6147 %{
 6148   single_instruction;
 6149   dst    : EX2(write);
 6150   src1   : ISS(read);
 6151   INS01  : ISS;
 6152   ALU    : EX2;
 6153 %}
 6154 
 6155 // Integer ALU reg-reg operation with variable shift
 6156 // Both operands must be available in LATE_ISS instead of EX1
 6157 // Result is available in EX1 instead of EX2
 6158 // Eg.  LSLV    x0, x1, x2
 6159 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6160 %{
 6161   single_instruction;
 6162   dst    : EX1(write);
 6163   src1   : ISS(read);
 6164   src2   : ISS(read);
 6165   INS01  : ISS;
 6166   ALU    : EX1;
 6167 %}
 6168 
 6169 // Integer ALU reg-reg operation with extract
 6170 // As for _vshift above, but result generated in EX2
 6171 // Eg.  EXTR    x0, x1, x2, #N
 6172 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6173 %{
 6174   single_instruction;
 6175   dst    : EX2(write);
 6176   src1   : ISS(read);
 6177   src2   : ISS(read);
 6178   INS1   : ISS; // Can only dual issue as Instruction 1
 6179   ALU    : EX1;
 6180 %}
 6181 
 6182 // Integer ALU reg operation
 6183 // Eg.  NEG     x0, x1
 6184 pipe_class ialu_reg(iRegI dst, iRegI src)
 6185 %{
 6186   single_instruction;
 6187   dst    : EX2(write);
 6188   src    : EX1(read);
 6189   INS01  : ISS;
 6190   ALU    : EX2;
 6191 %}
 6192 
 6193 // Integer ALU reg mmediate operation
 6194 // Eg.  ADD     x0, x1, #N
 6195 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6196 %{
 6197   single_instruction;
 6198   dst    : EX2(write);
 6199   src1   : EX1(read);
 6200   INS01  : ISS;
 6201   ALU    : EX2;
 6202 %}
 6203 
 6204 // Integer ALU immediate operation (no source operands)
 6205 // Eg.  MOV     x0, #N
 6206 pipe_class ialu_imm(iRegI dst)
 6207 %{
 6208   single_instruction;
 6209   dst    : EX1(write);
 6210   INS01  : ISS;
 6211   ALU    : EX1;
 6212 %}
 6213 
 6214 //------- Compare operation -------------------------------
 6215 
 6216 // Compare reg-reg
 6217 // Eg.  CMP     x0, x1
 6218 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6219 %{
 6220   single_instruction;
 6221 //  fixed_latency(16);
 6222   cr     : EX2(write);
 6223   op1    : EX1(read);
 6224   op2    : EX1(read);
 6225   INS01  : ISS;
 6226   ALU    : EX2;
 6227 %}
 6228 
 6229 // Compare reg-reg
 6230 // Eg.  CMP     x0, #N
 6231 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6232 %{
 6233   single_instruction;
 6234 //  fixed_latency(16);
 6235   cr     : EX2(write);
 6236   op1    : EX1(read);
 6237   INS01  : ISS;
 6238   ALU    : EX2;
 6239 %}
 6240 
 6241 //------- Conditional instructions ------------------------
 6242 
 6243 // Conditional no operands
 6244 // Eg.  CSINC   x0, zr, zr, <cond>
 6245 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6246 %{
 6247   single_instruction;
 6248   cr     : EX1(read);
 6249   dst    : EX2(write);
 6250   INS01  : ISS;
 6251   ALU    : EX2;
 6252 %}
 6253 
 6254 // Conditional 2 operand
 6255 // EG.  CSEL    X0, X1, X2, <cond>
 6256 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6257 %{
 6258   single_instruction;
 6259   cr     : EX1(read);
 6260   src1   : EX1(read);
 6261   src2   : EX1(read);
 6262   dst    : EX2(write);
 6263   INS01  : ISS;
 6264   ALU    : EX2;
 6265 %}
 6266 
 6267 // Conditional 2 operand
 6268 // EG.  CSEL    X0, X1, X2, <cond>
 6269 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6270 %{
 6271   single_instruction;
 6272   cr     : EX1(read);
 6273   src    : EX1(read);
 6274   dst    : EX2(write);
 6275   INS01  : ISS;
 6276   ALU    : EX2;
 6277 %}
 6278 
 6279 //------- Multiply pipeline operations --------------------
 6280 
 6281 // Multiply reg-reg
 6282 // Eg.  MUL     w0, w1, w2
 6283 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6284 %{
 6285   single_instruction;
 6286   dst    : WR(write);
 6287   src1   : ISS(read);
 6288   src2   : ISS(read);
 6289   INS01  : ISS;
 6290   MAC    : WR;
 6291 %}
 6292 
 6293 // Multiply accumulate
 6294 // Eg.  MADD    w0, w1, w2, w3
 6295 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6296 %{
 6297   single_instruction;
 6298   dst    : WR(write);
 6299   src1   : ISS(read);
 6300   src2   : ISS(read);
 6301   src3   : ISS(read);
 6302   INS01  : ISS;
 6303   MAC    : WR;
 6304 %}
 6305 
 6306 // Eg.  MUL     w0, w1, w2
 6307 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6308 %{
 6309   single_instruction;
 6310   fixed_latency(3); // Maximum latency for 64 bit mul
 6311   dst    : WR(write);
 6312   src1   : ISS(read);
 6313   src2   : ISS(read);
 6314   INS01  : ISS;
 6315   MAC    : WR;
 6316 %}
 6317 
 6318 // Multiply accumulate
 6319 // Eg.  MADD    w0, w1, w2, w3
 6320 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6321 %{
 6322   single_instruction;
 6323   fixed_latency(3); // Maximum latency for 64 bit mul
 6324   dst    : WR(write);
 6325   src1   : ISS(read);
 6326   src2   : ISS(read);
 6327   src3   : ISS(read);
 6328   INS01  : ISS;
 6329   MAC    : WR;
 6330 %}
 6331 
 6332 //------- Divide pipeline operations --------------------
 6333 
 6334 // Eg.  SDIV    w0, w1, w2
 6335 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6336 %{
 6337   single_instruction;
 6338   fixed_latency(8); // Maximum latency for 32 bit divide
 6339   dst    : WR(write);
 6340   src1   : ISS(read);
 6341   src2   : ISS(read);
 6342   INS0   : ISS; // Can only dual issue as instruction 0
 6343   DIV    : WR;
 6344 %}
 6345 
 6346 // Eg.  SDIV    x0, x1, x2
 6347 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6348 %{
 6349   single_instruction;
 6350   fixed_latency(16); // Maximum latency for 64 bit divide
 6351   dst    : WR(write);
 6352   src1   : ISS(read);
 6353   src2   : ISS(read);
 6354   INS0   : ISS; // Can only dual issue as instruction 0
 6355   DIV    : WR;
 6356 %}
 6357 
 6358 //------- Load pipeline operations ------------------------
 6359 
 6360 // Load - prefetch
 6361 // Eg.  PFRM    <mem>
 6362 pipe_class iload_prefetch(memory mem)
 6363 %{
 6364   single_instruction;
 6365   mem    : ISS(read);
 6366   INS01  : ISS;
 6367   LDST   : WR;
 6368 %}
 6369 
 6370 // Load - reg, mem
 6371 // Eg.  LDR     x0, <mem>
 6372 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6373 %{
 6374   single_instruction;
 6375   dst    : WR(write);
 6376   mem    : ISS(read);
 6377   INS01  : ISS;
 6378   LDST   : WR;
 6379 %}
 6380 
 6381 // Load - reg, reg
 6382 // Eg.  LDR     x0, [sp, x1]
 6383 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6384 %{
 6385   single_instruction;
 6386   dst    : WR(write);
 6387   src    : ISS(read);
 6388   INS01  : ISS;
 6389   LDST   : WR;
 6390 %}
 6391 
 6392 //------- Store pipeline operations -----------------------
 6393 
 6394 // Store - zr, mem
 6395 // Eg.  STR     zr, <mem>
 6396 pipe_class istore_mem(memory mem)
 6397 %{
 6398   single_instruction;
 6399   mem    : ISS(read);
 6400   INS01  : ISS;
 6401   LDST   : WR;
 6402 %}
 6403 
 6404 // Store - reg, mem
 6405 // Eg.  STR     x0, <mem>
 6406 pipe_class istore_reg_mem(iRegI src, memory mem)
 6407 %{
 6408   single_instruction;
 6409   mem    : ISS(read);
 6410   src    : EX2(read);
 6411   INS01  : ISS;
 6412   LDST   : WR;
 6413 %}
 6414 
 6415 // Store - reg, reg
 6416 // Eg. STR      x0, [sp, x1]
 6417 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6418 %{
 6419   single_instruction;
 6420   dst    : ISS(read);
 6421   src    : EX2(read);
 6422   INS01  : ISS;
 6423   LDST   : WR;
 6424 %}
 6425 
 6426 //------- Store pipeline operations -----------------------
 6427 
 6428 // Branch
 6429 pipe_class pipe_branch()
 6430 %{
 6431   single_instruction;
 6432   INS01  : ISS;
 6433   BRANCH : EX1;
 6434 %}
 6435 
 6436 // Conditional branch
 6437 pipe_class pipe_branch_cond(rFlagsReg cr)
 6438 %{
 6439   single_instruction;
 6440   cr     : EX1(read);
 6441   INS01  : ISS;
 6442   BRANCH : EX1;
 6443 %}
 6444 
 6445 // Compare & Branch
 6446 // EG.  CBZ/CBNZ
 6447 pipe_class pipe_cmp_branch(iRegI op1)
 6448 %{
 6449   single_instruction;
 6450   op1    : EX1(read);
 6451   INS01  : ISS;
 6452   BRANCH : EX1;
 6453 %}
 6454 
 6455 //------- Synchronisation operations ----------------------
 6456 
 6457 // Any operation requiring serialization.
 6458 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6459 pipe_class pipe_serial()
 6460 %{
 6461   single_instruction;
 6462   force_serialization;
 6463   fixed_latency(16);
 6464   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6465   LDST   : WR;
 6466 %}
 6467 
 6468 // Generic big/slow expanded idiom - also serialized
 6469 pipe_class pipe_slow()
 6470 %{
 6471   instruction_count(10);
 6472   multiple_bundles;
 6473   force_serialization;
 6474   fixed_latency(16);
 6475   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6476   LDST   : WR;
 6477 %}
 6478 
 6479 // Empty pipeline class
 6480 pipe_class pipe_class_empty()
 6481 %{
 6482   single_instruction;
 6483   fixed_latency(0);
 6484 %}
 6485 
 6486 // Default pipeline class.
 6487 pipe_class pipe_class_default()
 6488 %{
 6489   single_instruction;
 6490   fixed_latency(2);
 6491 %}
 6492 
 6493 // Pipeline class for compares.
 6494 pipe_class pipe_class_compare()
 6495 %{
 6496   single_instruction;
 6497   fixed_latency(16);
 6498 %}
 6499 
 6500 // Pipeline class for memory operations.
 6501 pipe_class pipe_class_memory()
 6502 %{
 6503   single_instruction;
 6504   fixed_latency(16);
 6505 %}
 6506 
 6507 // Pipeline class for call.
 6508 pipe_class pipe_class_call()
 6509 %{
 6510   single_instruction;
 6511   fixed_latency(100);
 6512 %}
 6513 
 6514 // Define the class for the Nop node.
 6515 define %{
 6516    MachNop = pipe_class_empty;
 6517 %}
 6518 
 6519 %}
 6520 //----------INSTRUCTIONS-------------------------------------------------------
 6521 //
 6522 // match      -- States which machine-independent subtree may be replaced
 6523 //               by this instruction.
 6524 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6525 //               selection to identify a minimum cost tree of machine
 6526 //               instructions that matches a tree of machine-independent
 6527 //               instructions.
 6528 // format     -- A string providing the disassembly for this instruction.
 6529 //               The value of an instruction's operand may be inserted
 6530 //               by referring to it with a '$' prefix.
 6531 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6532 //               to within an encode class as $primary, $secondary, and $tertiary
 6533 //               rrspectively.  The primary opcode is commonly used to
 6534 //               indicate the type of machine instruction, while secondary
 6535 //               and tertiary are often used for prefix options or addressing
 6536 //               modes.
 6537 // ins_encode -- A list of encode classes with parameters. The encode class
 6538 //               name must have been defined in an 'enc_class' specification
 6539 //               in the encode section of the architecture description.
 6540 
 6541 // ============================================================================
 6542 // Memory (Load/Store) Instructions
 6543 
 6544 // Load Instructions
 6545 
 6546 // Load Byte (8 bit signed)
 6547 instruct loadB(iRegINoSp dst, memory1 mem)
 6548 %{
 6549   match(Set dst (LoadB mem));
 6550   predicate(!needs_acquiring_load(n));
 6551 
 6552   ins_cost(4 * INSN_COST);
 6553   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6554 
 6555   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6556 
 6557   ins_pipe(iload_reg_mem);
 6558 %}
 6559 
 6560 // Load Byte (8 bit signed) into long
 6561 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6562 %{
 6563   match(Set dst (ConvI2L (LoadB mem)));
 6564   predicate(!needs_acquiring_load(n->in(1)));
 6565 
 6566   ins_cost(4 * INSN_COST);
 6567   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6568 
 6569   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6570 
 6571   ins_pipe(iload_reg_mem);
 6572 %}
 6573 
 6574 // Load Byte (8 bit unsigned)
 6575 instruct loadUB(iRegINoSp dst, memory1 mem)
 6576 %{
 6577   match(Set dst (LoadUB mem));
 6578   predicate(!needs_acquiring_load(n));
 6579 
 6580   ins_cost(4 * INSN_COST);
 6581   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6582 
 6583   ins_encode(aarch64_enc_ldrb(dst, mem));
 6584 
 6585   ins_pipe(iload_reg_mem);
 6586 %}
 6587 
 6588 // Load Byte (8 bit unsigned) into long
 6589 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6590 %{
 6591   match(Set dst (ConvI2L (LoadUB mem)));
 6592   predicate(!needs_acquiring_load(n->in(1)));
 6593 
 6594   ins_cost(4 * INSN_COST);
 6595   format %{ "ldrb  $dst, $mem\t# byte" %}
 6596 
 6597   ins_encode(aarch64_enc_ldrb(dst, mem));
 6598 
 6599   ins_pipe(iload_reg_mem);
 6600 %}
 6601 
 6602 // Load Short (16 bit signed)
 6603 instruct loadS(iRegINoSp dst, memory2 mem)
 6604 %{
 6605   match(Set dst (LoadS mem));
 6606   predicate(!needs_acquiring_load(n));
 6607 
 6608   ins_cost(4 * INSN_COST);
 6609   format %{ "ldrshw  $dst, $mem\t# short" %}
 6610 
 6611   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6612 
 6613   ins_pipe(iload_reg_mem);
 6614 %}
 6615 
 6616 // Load Short (16 bit signed) into long
 6617 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6618 %{
 6619   match(Set dst (ConvI2L (LoadS mem)));
 6620   predicate(!needs_acquiring_load(n->in(1)));
 6621 
 6622   ins_cost(4 * INSN_COST);
 6623   format %{ "ldrsh  $dst, $mem\t# short" %}
 6624 
 6625   ins_encode(aarch64_enc_ldrsh(dst, mem));
 6626 
 6627   ins_pipe(iload_reg_mem);
 6628 %}
 6629 
 6630 // Load Char (16 bit unsigned)
 6631 instruct loadUS(iRegINoSp dst, memory2 mem)
 6632 %{
 6633   match(Set dst (LoadUS mem));
 6634   predicate(!needs_acquiring_load(n));
 6635 
 6636   ins_cost(4 * INSN_COST);
 6637   format %{ "ldrh  $dst, $mem\t# short" %}
 6638 
 6639   ins_encode(aarch64_enc_ldrh(dst, mem));
 6640 
 6641   ins_pipe(iload_reg_mem);
 6642 %}
 6643 
 6644 // Load Short/Char (16 bit unsigned) into long
 6645 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 6646 %{
 6647   match(Set dst (ConvI2L (LoadUS mem)));
 6648   predicate(!needs_acquiring_load(n->in(1)));
 6649 
 6650   ins_cost(4 * INSN_COST);
 6651   format %{ "ldrh  $dst, $mem\t# short" %}
 6652 
 6653   ins_encode(aarch64_enc_ldrh(dst, mem));
 6654 
 6655   ins_pipe(iload_reg_mem);
 6656 %}
 6657 
 6658 // Load Integer (32 bit signed)
 6659 instruct loadI(iRegINoSp dst, memory4 mem)
 6660 %{
 6661   match(Set dst (LoadI mem));
 6662   predicate(!needs_acquiring_load(n));
 6663 
 6664   ins_cost(4 * INSN_COST);
 6665   format %{ "ldrw  $dst, $mem\t# int" %}
 6666 
 6667   ins_encode(aarch64_enc_ldrw(dst, mem));
 6668 
 6669   ins_pipe(iload_reg_mem);
 6670 %}
 6671 
 6672 // Load Integer (32 bit signed) into long
 6673 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 6674 %{
 6675   match(Set dst (ConvI2L (LoadI mem)));
 6676   predicate(!needs_acquiring_load(n->in(1)));
 6677 
 6678   ins_cost(4 * INSN_COST);
 6679   format %{ "ldrsw  $dst, $mem\t# int" %}
 6680 
 6681   ins_encode(aarch64_enc_ldrsw(dst, mem));
 6682 
 6683   ins_pipe(iload_reg_mem);
 6684 %}
 6685 
 6686 // Load Integer (32 bit unsigned) into long
 6687 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 6688 %{
 6689   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 6690   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 6691 
 6692   ins_cost(4 * INSN_COST);
 6693   format %{ "ldrw  $dst, $mem\t# int" %}
 6694 
 6695   ins_encode(aarch64_enc_ldrw(dst, mem));
 6696 
 6697   ins_pipe(iload_reg_mem);
 6698 %}
 6699 
 6700 // Load Long (64 bit signed)
 6701 instruct loadL(iRegLNoSp dst, memory8 mem)
 6702 %{
 6703   match(Set dst (LoadL mem));
 6704   predicate(!needs_acquiring_load(n));
 6705 
 6706   ins_cost(4 * INSN_COST);
 6707   format %{ "ldr  $dst, $mem\t# int" %}
 6708 
 6709   ins_encode(aarch64_enc_ldr(dst, mem));
 6710 
 6711   ins_pipe(iload_reg_mem);
 6712 %}
 6713 
 6714 // Load Range
 6715 instruct loadRange(iRegINoSp dst, memory4 mem)
 6716 %{
 6717   match(Set dst (LoadRange mem));
 6718 
 6719   ins_cost(4 * INSN_COST);
 6720   format %{ "ldrw  $dst, $mem\t# range" %}
 6721 
 6722   ins_encode(aarch64_enc_ldrw(dst, mem));
 6723 
 6724   ins_pipe(iload_reg_mem);
 6725 %}
 6726 
 6727 // Load Pointer
 6728 instruct loadP(iRegPNoSp dst, memory8 mem)
 6729 %{
 6730   match(Set dst (LoadP mem));
 6731   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 6732 
 6733   ins_cost(4 * INSN_COST);
 6734   format %{ "ldr  $dst, $mem\t# ptr" %}
 6735 
 6736   ins_encode(aarch64_enc_ldr(dst, mem));
 6737 
 6738   ins_pipe(iload_reg_mem);
 6739 %}
 6740 
 6741 // Load Compressed Pointer
 6742 instruct loadN(iRegNNoSp dst, memory4 mem)
 6743 %{
 6744   match(Set dst (LoadN mem));
 6745   predicate(!needs_acquiring_load(n) && n->as_Load()->barrier_data() == 0);
 6746 
 6747   ins_cost(4 * INSN_COST);
 6748   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 6749 
 6750   ins_encode(aarch64_enc_ldrw(dst, mem));
 6751 
 6752   ins_pipe(iload_reg_mem);
 6753 %}
 6754 
 6755 // Load Klass Pointer
 6756 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 6757 %{
 6758   match(Set dst (LoadKlass mem));
 6759   predicate(!needs_acquiring_load(n));
 6760 
 6761   ins_cost(4 * INSN_COST);
 6762   format %{ "ldr  $dst, $mem\t# class" %}
 6763 
 6764   ins_encode(aarch64_enc_ldr(dst, mem));
 6765 
 6766   ins_pipe(iload_reg_mem);
 6767 %}
 6768 
 6769 // Load Narrow Klass Pointer
 6770 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 6771 %{
 6772   match(Set dst (LoadNKlass mem));
 6773   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
 6774 
 6775   ins_cost(4 * INSN_COST);
 6776   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 6777 
 6778   ins_encode(aarch64_enc_ldrw(dst, mem));
 6779 
 6780   ins_pipe(iload_reg_mem);
 6781 %}
 6782 
 6783 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem)
 6784 %{
 6785   match(Set dst (LoadNKlass mem));
 6786   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
 6787 
 6788   ins_cost(4 * INSN_COST);
 6789   format %{
 6790     "ldrw  $dst, $mem\t# compressed class ptr, shifted\n\t"
 6791     "lsrw  $dst, $dst, markWord::klass_shift_at_offset"
 6792   %}
 6793   ins_encode %{
 6794     // inlined aarch64_enc_ldrw
 6795     loadStore(masm, &MacroAssembler::ldrw, $dst$$Register, $mem->opcode(),
 6796               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 6797     __ lsrw($dst$$Register, $dst$$Register, markWord::klass_shift_at_offset);
 6798   %}
 6799   ins_pipe(iload_reg_mem);
 6800 %}
 6801 
 6802 // Load Float
 6803 instruct loadF(vRegF dst, memory4 mem)
 6804 %{
 6805   match(Set dst (LoadF mem));
 6806   predicate(!needs_acquiring_load(n));
 6807 
 6808   ins_cost(4 * INSN_COST);
 6809   format %{ "ldrs  $dst, $mem\t# float" %}
 6810 
 6811   ins_encode( aarch64_enc_ldrs(dst, mem) );
 6812 
 6813   ins_pipe(pipe_class_memory);
 6814 %}
 6815 
 6816 // Load Double
 6817 instruct loadD(vRegD dst, memory8 mem)
 6818 %{
 6819   match(Set dst (LoadD mem));
 6820   predicate(!needs_acquiring_load(n));
 6821 
 6822   ins_cost(4 * INSN_COST);
 6823   format %{ "ldrd  $dst, $mem\t# double" %}
 6824 
 6825   ins_encode( aarch64_enc_ldrd(dst, mem) );
 6826 
 6827   ins_pipe(pipe_class_memory);
 6828 %}
 6829 
 6830 
 6831 // Load Int Constant
 6832 instruct loadConI(iRegINoSp dst, immI src)
 6833 %{
 6834   match(Set dst src);
 6835 
 6836   ins_cost(INSN_COST);
 6837   format %{ "mov $dst, $src\t# int" %}
 6838 
 6839   ins_encode( aarch64_enc_movw_imm(dst, src) );
 6840 
 6841   ins_pipe(ialu_imm);
 6842 %}
 6843 
 6844 // Load Long Constant
 6845 instruct loadConL(iRegLNoSp dst, immL src)
 6846 %{
 6847   match(Set dst src);
 6848 
 6849   ins_cost(INSN_COST);
 6850   format %{ "mov $dst, $src\t# long" %}
 6851 
 6852   ins_encode( aarch64_enc_mov_imm(dst, src) );
 6853 
 6854   ins_pipe(ialu_imm);
 6855 %}
 6856 
 6857 // Load Pointer Constant
 6858 
 6859 instruct loadConP(iRegPNoSp dst, immP con)
 6860 %{
 6861   match(Set dst con);
 6862 
 6863   ins_cost(INSN_COST * 4);
 6864   format %{
 6865     "mov  $dst, $con\t# ptr\n\t"
 6866   %}
 6867 
 6868   ins_encode(aarch64_enc_mov_p(dst, con));
 6869 
 6870   ins_pipe(ialu_imm);
 6871 %}
 6872 
 6873 // Load Null Pointer Constant
 6874 
 6875 instruct loadConP0(iRegPNoSp dst, immP0 con)
 6876 %{
 6877   match(Set dst con);
 6878 
 6879   ins_cost(INSN_COST);
 6880   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6881 
 6882   ins_encode(aarch64_enc_mov_p0(dst, con));
 6883 
 6884   ins_pipe(ialu_imm);
 6885 %}
 6886 
 6887 // Load Pointer Constant One
 6888 
 6889 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 6890 %{
 6891   match(Set dst con);
 6892 
 6893   ins_cost(INSN_COST);
 6894   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6895 
 6896   ins_encode(aarch64_enc_mov_p1(dst, con));
 6897 
 6898   ins_pipe(ialu_imm);
 6899 %}
 6900 
 6901 // Load Narrow Pointer Constant
 6902 
 6903 instruct loadConN(iRegNNoSp dst, immN con)
 6904 %{
 6905   match(Set dst con);
 6906 
 6907   ins_cost(INSN_COST * 4);
 6908   format %{ "mov  $dst, $con\t# compressed ptr" %}
 6909 
 6910   ins_encode(aarch64_enc_mov_n(dst, con));
 6911 
 6912   ins_pipe(ialu_imm);
 6913 %}
 6914 
 6915 // Load Narrow Null Pointer Constant
 6916 
 6917 instruct loadConN0(iRegNNoSp dst, immN0 con)
 6918 %{
 6919   match(Set dst con);
 6920 
 6921   ins_cost(INSN_COST);
 6922   format %{ "mov  $dst, $con\t# compressed nullptr ptr" %}
 6923 
 6924   ins_encode(aarch64_enc_mov_n0(dst, con));
 6925 
 6926   ins_pipe(ialu_imm);
 6927 %}
 6928 
 6929 // Load Narrow Klass Constant
 6930 
 6931 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 6932 %{
 6933   match(Set dst con);
 6934 
 6935   ins_cost(INSN_COST);
 6936   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 6937 
 6938   ins_encode(aarch64_enc_mov_nk(dst, con));
 6939 
 6940   ins_pipe(ialu_imm);
 6941 %}
 6942 
 6943 // Load Packed Float Constant
 6944 
 6945 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 6946   match(Set dst con);
 6947   ins_cost(INSN_COST * 4);
 6948   format %{ "fmovs  $dst, $con"%}
 6949   ins_encode %{
 6950     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 6951   %}
 6952 
 6953   ins_pipe(fp_imm_s);
 6954 %}
 6955 
 6956 // Load Float Constant
 6957 
 6958 instruct loadConF(vRegF dst, immF con) %{
 6959   match(Set dst con);
 6960 
 6961   ins_cost(INSN_COST * 4);
 6962 
 6963   format %{
 6964     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 6965   %}
 6966 
 6967   ins_encode %{
 6968     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 6969   %}
 6970 
 6971   ins_pipe(fp_load_constant_s);
 6972 %}
 6973 
 6974 // Load Packed Double Constant
 6975 
 6976 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 6977   match(Set dst con);
 6978   ins_cost(INSN_COST);
 6979   format %{ "fmovd  $dst, $con"%}
 6980   ins_encode %{
 6981     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 6982   %}
 6983 
 6984   ins_pipe(fp_imm_d);
 6985 %}
 6986 
 6987 // Load Double Constant
 6988 
 6989 instruct loadConD(vRegD dst, immD con) %{
 6990   match(Set dst con);
 6991 
 6992   ins_cost(INSN_COST * 5);
 6993   format %{
 6994     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 6995   %}
 6996 
 6997   ins_encode %{
 6998     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 6999   %}
 7000 
 7001   ins_pipe(fp_load_constant_d);
 7002 %}
 7003 
 7004 // Load Half Float Constant
 7005 instruct loadConH(vRegF dst, immH con) %{
 7006   match(Set dst con);
 7007   format %{ "mov    rscratch1, $con\n\t"
 7008             "fmov   $dst, rscratch1"
 7009          %}
 7010   ins_encode %{
 7011     __ movw(rscratch1, (uint32_t)$con$$constant);
 7012     __ fmovs($dst$$FloatRegister, rscratch1);
 7013   %}
 7014   ins_pipe(pipe_class_default);
 7015 %}
 7016 
 7017 // Store Instructions
 7018 
 7019 // Store Byte
 7020 instruct storeB(iRegIorL2I src, memory1 mem)
 7021 %{
 7022   match(Set mem (StoreB mem src));
 7023   predicate(!needs_releasing_store(n));
 7024 
 7025   ins_cost(INSN_COST);
 7026   format %{ "strb  $src, $mem\t# byte" %}
 7027 
 7028   ins_encode(aarch64_enc_strb(src, mem));
 7029 
 7030   ins_pipe(istore_reg_mem);
 7031 %}
 7032 
 7033 
 7034 instruct storeimmB0(immI0 zero, memory1 mem)
 7035 %{
 7036   match(Set mem (StoreB mem zero));
 7037   predicate(!needs_releasing_store(n));
 7038 
 7039   ins_cost(INSN_COST);
 7040   format %{ "strb rscractch2, $mem\t# byte" %}
 7041 
 7042   ins_encode(aarch64_enc_strb0(mem));
 7043 
 7044   ins_pipe(istore_mem);
 7045 %}
 7046 
 7047 // Store Char/Short
 7048 instruct storeC(iRegIorL2I src, memory2 mem)
 7049 %{
 7050   match(Set mem (StoreC mem src));
 7051   predicate(!needs_releasing_store(n));
 7052 
 7053   ins_cost(INSN_COST);
 7054   format %{ "strh  $src, $mem\t# short" %}
 7055 
 7056   ins_encode(aarch64_enc_strh(src, mem));
 7057 
 7058   ins_pipe(istore_reg_mem);
 7059 %}
 7060 
 7061 instruct storeimmC0(immI0 zero, memory2 mem)
 7062 %{
 7063   match(Set mem (StoreC mem zero));
 7064   predicate(!needs_releasing_store(n));
 7065 
 7066   ins_cost(INSN_COST);
 7067   format %{ "strh  zr, $mem\t# short" %}
 7068 
 7069   ins_encode(aarch64_enc_strh0(mem));
 7070 
 7071   ins_pipe(istore_mem);
 7072 %}
 7073 
 7074 // Store Integer
 7075 
 7076 instruct storeI(iRegIorL2I src, memory4 mem)
 7077 %{
 7078   match(Set mem(StoreI mem src));
 7079   predicate(!needs_releasing_store(n));
 7080 
 7081   ins_cost(INSN_COST);
 7082   format %{ "strw  $src, $mem\t# int" %}
 7083 
 7084   ins_encode(aarch64_enc_strw(src, mem));
 7085 
 7086   ins_pipe(istore_reg_mem);
 7087 %}
 7088 
 7089 instruct storeimmI0(immI0 zero, memory4 mem)
 7090 %{
 7091   match(Set mem(StoreI mem zero));
 7092   predicate(!needs_releasing_store(n));
 7093 
 7094   ins_cost(INSN_COST);
 7095   format %{ "strw  zr, $mem\t# int" %}
 7096 
 7097   ins_encode(aarch64_enc_strw0(mem));
 7098 
 7099   ins_pipe(istore_mem);
 7100 %}
 7101 
 7102 // Store Long (64 bit signed)
 7103 instruct storeL(iRegL src, memory8 mem)
 7104 %{
 7105   match(Set mem (StoreL mem src));
 7106   predicate(!needs_releasing_store(n));
 7107 
 7108   ins_cost(INSN_COST);
 7109   format %{ "str  $src, $mem\t# int" %}
 7110 
 7111   ins_encode(aarch64_enc_str(src, mem));
 7112 
 7113   ins_pipe(istore_reg_mem);
 7114 %}
 7115 
 7116 // Store Long (64 bit signed)
 7117 instruct storeimmL0(immL0 zero, memory8 mem)
 7118 %{
 7119   match(Set mem (StoreL mem zero));
 7120   predicate(!needs_releasing_store(n));
 7121 
 7122   ins_cost(INSN_COST);
 7123   format %{ "str  zr, $mem\t# int" %}
 7124 
 7125   ins_encode(aarch64_enc_str0(mem));
 7126 
 7127   ins_pipe(istore_mem);
 7128 %}
 7129 
 7130 // Store Pointer
 7131 instruct storeP(iRegP src, memory8 mem)
 7132 %{
 7133   match(Set mem (StoreP mem src));
 7134   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7135 
 7136   ins_cost(INSN_COST);
 7137   format %{ "str  $src, $mem\t# ptr" %}
 7138 
 7139   ins_encode(aarch64_enc_str(src, mem));
 7140 
 7141   ins_pipe(istore_reg_mem);
 7142 %}
 7143 
 7144 // Store Pointer
 7145 instruct storeimmP0(immP0 zero, memory8 mem)
 7146 %{
 7147   match(Set mem (StoreP mem zero));
 7148   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7149 
 7150   ins_cost(INSN_COST);
 7151   format %{ "str zr, $mem\t# ptr" %}
 7152 
 7153   ins_encode(aarch64_enc_str0(mem));
 7154 
 7155   ins_pipe(istore_mem);
 7156 %}
 7157 
 7158 // Store Compressed Pointer
 7159 instruct storeN(iRegN src, memory4 mem)
 7160 %{
 7161   match(Set mem (StoreN mem src));
 7162   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7163 
 7164   ins_cost(INSN_COST);
 7165   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7166 
 7167   ins_encode(aarch64_enc_strw(src, mem));
 7168 
 7169   ins_pipe(istore_reg_mem);
 7170 %}
 7171 
 7172 instruct storeImmN0(immN0 zero, memory4 mem)
 7173 %{
 7174   match(Set mem (StoreN mem zero));
 7175   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7176 
 7177   ins_cost(INSN_COST);
 7178   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7179 
 7180   ins_encode(aarch64_enc_strw0(mem));
 7181 
 7182   ins_pipe(istore_mem);
 7183 %}
 7184 
 7185 // Store Float
 7186 instruct storeF(vRegF src, memory4 mem)
 7187 %{
 7188   match(Set mem (StoreF mem src));
 7189   predicate(!needs_releasing_store(n));
 7190 
 7191   ins_cost(INSN_COST);
 7192   format %{ "strs  $src, $mem\t# float" %}
 7193 
 7194   ins_encode( aarch64_enc_strs(src, mem) );
 7195 
 7196   ins_pipe(pipe_class_memory);
 7197 %}
 7198 
 7199 // TODO
 7200 // implement storeImmF0 and storeFImmPacked
 7201 
 7202 // Store Double
 7203 instruct storeD(vRegD src, memory8 mem)
 7204 %{
 7205   match(Set mem (StoreD mem src));
 7206   predicate(!needs_releasing_store(n));
 7207 
 7208   ins_cost(INSN_COST);
 7209   format %{ "strd  $src, $mem\t# double" %}
 7210 
 7211   ins_encode( aarch64_enc_strd(src, mem) );
 7212 
 7213   ins_pipe(pipe_class_memory);
 7214 %}
 7215 
 7216 // Store Compressed Klass Pointer
 7217 instruct storeNKlass(iRegN src, memory4 mem)
 7218 %{
 7219   predicate(!needs_releasing_store(n));
 7220   match(Set mem (StoreNKlass mem src));
 7221 
 7222   ins_cost(INSN_COST);
 7223   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7224 
 7225   ins_encode(aarch64_enc_strw(src, mem));
 7226 
 7227   ins_pipe(istore_reg_mem);
 7228 %}
 7229 
 7230 // TODO
 7231 // implement storeImmD0 and storeDImmPacked
 7232 
 7233 // prefetch instructions
 7234 // Must be safe to execute with invalid address (cannot fault).
 7235 
 7236 instruct prefetchalloc( memory8 mem ) %{
 7237   match(PrefetchAllocation mem);
 7238 
 7239   ins_cost(INSN_COST);
 7240   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7241 
 7242   ins_encode( aarch64_enc_prefetchw(mem) );
 7243 
 7244   ins_pipe(iload_prefetch);
 7245 %}
 7246 
 7247 //  ---------------- volatile loads and stores ----------------
 7248 
 7249 // Load Byte (8 bit signed)
 7250 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7251 %{
 7252   match(Set dst (LoadB mem));
 7253 
 7254   ins_cost(VOLATILE_REF_COST);
 7255   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7256 
 7257   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7258 
 7259   ins_pipe(pipe_serial);
 7260 %}
 7261 
 7262 // Load Byte (8 bit signed) into long
 7263 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7264 %{
 7265   match(Set dst (ConvI2L (LoadB mem)));
 7266 
 7267   ins_cost(VOLATILE_REF_COST);
 7268   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7269 
 7270   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7271 
 7272   ins_pipe(pipe_serial);
 7273 %}
 7274 
 7275 // Load Byte (8 bit unsigned)
 7276 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7277 %{
 7278   match(Set dst (LoadUB mem));
 7279 
 7280   ins_cost(VOLATILE_REF_COST);
 7281   format %{ "ldarb  $dst, $mem\t# byte" %}
 7282 
 7283   ins_encode(aarch64_enc_ldarb(dst, mem));
 7284 
 7285   ins_pipe(pipe_serial);
 7286 %}
 7287 
 7288 // Load Byte (8 bit unsigned) into long
 7289 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7290 %{
 7291   match(Set dst (ConvI2L (LoadUB mem)));
 7292 
 7293   ins_cost(VOLATILE_REF_COST);
 7294   format %{ "ldarb  $dst, $mem\t# byte" %}
 7295 
 7296   ins_encode(aarch64_enc_ldarb(dst, mem));
 7297 
 7298   ins_pipe(pipe_serial);
 7299 %}
 7300 
 7301 // Load Short (16 bit signed)
 7302 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7303 %{
 7304   match(Set dst (LoadS mem));
 7305 
 7306   ins_cost(VOLATILE_REF_COST);
 7307   format %{ "ldarshw  $dst, $mem\t# short" %}
 7308 
 7309   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7310 
 7311   ins_pipe(pipe_serial);
 7312 %}
 7313 
 7314 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7315 %{
 7316   match(Set dst (LoadUS mem));
 7317 
 7318   ins_cost(VOLATILE_REF_COST);
 7319   format %{ "ldarhw  $dst, $mem\t# short" %}
 7320 
 7321   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7322 
 7323   ins_pipe(pipe_serial);
 7324 %}
 7325 
 7326 // Load Short/Char (16 bit unsigned) into long
 7327 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7328 %{
 7329   match(Set dst (ConvI2L (LoadUS mem)));
 7330 
 7331   ins_cost(VOLATILE_REF_COST);
 7332   format %{ "ldarh  $dst, $mem\t# short" %}
 7333 
 7334   ins_encode(aarch64_enc_ldarh(dst, mem));
 7335 
 7336   ins_pipe(pipe_serial);
 7337 %}
 7338 
 7339 // Load Short/Char (16 bit signed) into long
 7340 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7341 %{
 7342   match(Set dst (ConvI2L (LoadS mem)));
 7343 
 7344   ins_cost(VOLATILE_REF_COST);
 7345   format %{ "ldarh  $dst, $mem\t# short" %}
 7346 
 7347   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7348 
 7349   ins_pipe(pipe_serial);
 7350 %}
 7351 
 7352 // Load Integer (32 bit signed)
 7353 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7354 %{
 7355   match(Set dst (LoadI mem));
 7356 
 7357   ins_cost(VOLATILE_REF_COST);
 7358   format %{ "ldarw  $dst, $mem\t# int" %}
 7359 
 7360   ins_encode(aarch64_enc_ldarw(dst, mem));
 7361 
 7362   ins_pipe(pipe_serial);
 7363 %}
 7364 
 7365 // Load Integer (32 bit unsigned) into long
 7366 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7367 %{
 7368   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7369 
 7370   ins_cost(VOLATILE_REF_COST);
 7371   format %{ "ldarw  $dst, $mem\t# int" %}
 7372 
 7373   ins_encode(aarch64_enc_ldarw(dst, mem));
 7374 
 7375   ins_pipe(pipe_serial);
 7376 %}
 7377 
 7378 // Load Long (64 bit signed)
 7379 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7380 %{
 7381   match(Set dst (LoadL mem));
 7382 
 7383   ins_cost(VOLATILE_REF_COST);
 7384   format %{ "ldar  $dst, $mem\t# int" %}
 7385 
 7386   ins_encode(aarch64_enc_ldar(dst, mem));
 7387 
 7388   ins_pipe(pipe_serial);
 7389 %}
 7390 
 7391 // Load Pointer
 7392 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7393 %{
 7394   match(Set dst (LoadP mem));
 7395   predicate(n->as_Load()->barrier_data() == 0);
 7396 
 7397   ins_cost(VOLATILE_REF_COST);
 7398   format %{ "ldar  $dst, $mem\t# ptr" %}
 7399 
 7400   ins_encode(aarch64_enc_ldar(dst, mem));
 7401 
 7402   ins_pipe(pipe_serial);
 7403 %}
 7404 
 7405 // Load Compressed Pointer
 7406 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7407 %{
 7408   match(Set dst (LoadN mem));
 7409   predicate(n->as_Load()->barrier_data() == 0);
 7410 
 7411   ins_cost(VOLATILE_REF_COST);
 7412   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7413 
 7414   ins_encode(aarch64_enc_ldarw(dst, mem));
 7415 
 7416   ins_pipe(pipe_serial);
 7417 %}
 7418 
 7419 // Load Float
 7420 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7421 %{
 7422   match(Set dst (LoadF mem));
 7423 
 7424   ins_cost(VOLATILE_REF_COST);
 7425   format %{ "ldars  $dst, $mem\t# float" %}
 7426 
 7427   ins_encode( aarch64_enc_fldars(dst, mem) );
 7428 
 7429   ins_pipe(pipe_serial);
 7430 %}
 7431 
 7432 // Load Double
 7433 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7434 %{
 7435   match(Set dst (LoadD mem));
 7436 
 7437   ins_cost(VOLATILE_REF_COST);
 7438   format %{ "ldard  $dst, $mem\t# double" %}
 7439 
 7440   ins_encode( aarch64_enc_fldard(dst, mem) );
 7441 
 7442   ins_pipe(pipe_serial);
 7443 %}
 7444 
 7445 // Store Byte
 7446 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7447 %{
 7448   match(Set mem (StoreB mem src));
 7449 
 7450   ins_cost(VOLATILE_REF_COST);
 7451   format %{ "stlrb  $src, $mem\t# byte" %}
 7452 
 7453   ins_encode(aarch64_enc_stlrb(src, mem));
 7454 
 7455   ins_pipe(pipe_class_memory);
 7456 %}
 7457 
 7458 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7459 %{
 7460   match(Set mem (StoreB mem zero));
 7461 
 7462   ins_cost(VOLATILE_REF_COST);
 7463   format %{ "stlrb  zr, $mem\t# byte" %}
 7464 
 7465   ins_encode(aarch64_enc_stlrb0(mem));
 7466 
 7467   ins_pipe(pipe_class_memory);
 7468 %}
 7469 
 7470 // Store Char/Short
 7471 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7472 %{
 7473   match(Set mem (StoreC mem src));
 7474 
 7475   ins_cost(VOLATILE_REF_COST);
 7476   format %{ "stlrh  $src, $mem\t# short" %}
 7477 
 7478   ins_encode(aarch64_enc_stlrh(src, mem));
 7479 
 7480   ins_pipe(pipe_class_memory);
 7481 %}
 7482 
 7483 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7484 %{
 7485   match(Set mem (StoreC mem zero));
 7486 
 7487   ins_cost(VOLATILE_REF_COST);
 7488   format %{ "stlrh  zr, $mem\t# short" %}
 7489 
 7490   ins_encode(aarch64_enc_stlrh0(mem));
 7491 
 7492   ins_pipe(pipe_class_memory);
 7493 %}
 7494 
 7495 // Store Integer
 7496 
 7497 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7498 %{
 7499   match(Set mem(StoreI mem src));
 7500 
 7501   ins_cost(VOLATILE_REF_COST);
 7502   format %{ "stlrw  $src, $mem\t# int" %}
 7503 
 7504   ins_encode(aarch64_enc_stlrw(src, mem));
 7505 
 7506   ins_pipe(pipe_class_memory);
 7507 %}
 7508 
 7509 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7510 %{
 7511   match(Set mem(StoreI mem zero));
 7512 
 7513   ins_cost(VOLATILE_REF_COST);
 7514   format %{ "stlrw  zr, $mem\t# int" %}
 7515 
 7516   ins_encode(aarch64_enc_stlrw0(mem));
 7517 
 7518   ins_pipe(pipe_class_memory);
 7519 %}
 7520 
 7521 // Store Long (64 bit signed)
 7522 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7523 %{
 7524   match(Set mem (StoreL mem src));
 7525 
 7526   ins_cost(VOLATILE_REF_COST);
 7527   format %{ "stlr  $src, $mem\t# int" %}
 7528 
 7529   ins_encode(aarch64_enc_stlr(src, mem));
 7530 
 7531   ins_pipe(pipe_class_memory);
 7532 %}
 7533 
 7534 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7535 %{
 7536   match(Set mem (StoreL mem zero));
 7537 
 7538   ins_cost(VOLATILE_REF_COST);
 7539   format %{ "stlr  zr, $mem\t# int" %}
 7540 
 7541   ins_encode(aarch64_enc_stlr0(mem));
 7542 
 7543   ins_pipe(pipe_class_memory);
 7544 %}
 7545 
 7546 // Store Pointer
 7547 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7548 %{
 7549   match(Set mem (StoreP mem src));
 7550   predicate(n->as_Store()->barrier_data() == 0);
 7551 
 7552   ins_cost(VOLATILE_REF_COST);
 7553   format %{ "stlr  $src, $mem\t# ptr" %}
 7554 
 7555   ins_encode(aarch64_enc_stlr(src, mem));
 7556 
 7557   ins_pipe(pipe_class_memory);
 7558 %}
 7559 
 7560 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7561 %{
 7562   match(Set mem (StoreP mem zero));
 7563   predicate(n->as_Store()->barrier_data() == 0);
 7564 
 7565   ins_cost(VOLATILE_REF_COST);
 7566   format %{ "stlr  zr, $mem\t# ptr" %}
 7567 
 7568   ins_encode(aarch64_enc_stlr0(mem));
 7569 
 7570   ins_pipe(pipe_class_memory);
 7571 %}
 7572 
 7573 // Store Compressed Pointer
 7574 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7575 %{
 7576   match(Set mem (StoreN mem src));
 7577   predicate(n->as_Store()->barrier_data() == 0);
 7578 
 7579   ins_cost(VOLATILE_REF_COST);
 7580   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7581 
 7582   ins_encode(aarch64_enc_stlrw(src, mem));
 7583 
 7584   ins_pipe(pipe_class_memory);
 7585 %}
 7586 
 7587 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7588 %{
 7589   match(Set mem (StoreN mem zero));
 7590   predicate(n->as_Store()->barrier_data() == 0);
 7591 
 7592   ins_cost(VOLATILE_REF_COST);
 7593   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7594 
 7595   ins_encode(aarch64_enc_stlrw0(mem));
 7596 
 7597   ins_pipe(pipe_class_memory);
 7598 %}
 7599 
 7600 // Store Float
 7601 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7602 %{
 7603   match(Set mem (StoreF mem src));
 7604 
 7605   ins_cost(VOLATILE_REF_COST);
 7606   format %{ "stlrs  $src, $mem\t# float" %}
 7607 
 7608   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7609 
 7610   ins_pipe(pipe_class_memory);
 7611 %}
 7612 
 7613 // TODO
 7614 // implement storeImmF0 and storeFImmPacked
 7615 
 7616 // Store Double
 7617 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 7618 %{
 7619   match(Set mem (StoreD mem src));
 7620 
 7621   ins_cost(VOLATILE_REF_COST);
 7622   format %{ "stlrd  $src, $mem\t# double" %}
 7623 
 7624   ins_encode( aarch64_enc_fstlrd(src, mem) );
 7625 
 7626   ins_pipe(pipe_class_memory);
 7627 %}
 7628 
 7629 //  ---------------- end of volatile loads and stores ----------------
 7630 
 7631 instruct cacheWB(indirect addr)
 7632 %{
 7633   predicate(VM_Version::supports_data_cache_line_flush());
 7634   match(CacheWB addr);
 7635 
 7636   ins_cost(100);
 7637   format %{"cache wb $addr" %}
 7638   ins_encode %{
 7639     assert($addr->index_position() < 0, "should be");
 7640     assert($addr$$disp == 0, "should be");
 7641     __ cache_wb(Address($addr$$base$$Register, 0));
 7642   %}
 7643   ins_pipe(pipe_slow); // XXX
 7644 %}
 7645 
 7646 instruct cacheWBPreSync()
 7647 %{
 7648   predicate(VM_Version::supports_data_cache_line_flush());
 7649   match(CacheWBPreSync);
 7650 
 7651   ins_cost(100);
 7652   format %{"cache wb presync" %}
 7653   ins_encode %{
 7654     __ cache_wbsync(true);
 7655   %}
 7656   ins_pipe(pipe_slow); // XXX
 7657 %}
 7658 
 7659 instruct cacheWBPostSync()
 7660 %{
 7661   predicate(VM_Version::supports_data_cache_line_flush());
 7662   match(CacheWBPostSync);
 7663 
 7664   ins_cost(100);
 7665   format %{"cache wb postsync" %}
 7666   ins_encode %{
 7667     __ cache_wbsync(false);
 7668   %}
 7669   ins_pipe(pipe_slow); // XXX
 7670 %}
 7671 
 7672 // ============================================================================
 7673 // BSWAP Instructions
 7674 
 7675 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 7676   match(Set dst (ReverseBytesI src));
 7677 
 7678   ins_cost(INSN_COST);
 7679   format %{ "revw  $dst, $src" %}
 7680 
 7681   ins_encode %{
 7682     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 7683   %}
 7684 
 7685   ins_pipe(ialu_reg);
 7686 %}
 7687 
 7688 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 7689   match(Set dst (ReverseBytesL src));
 7690 
 7691   ins_cost(INSN_COST);
 7692   format %{ "rev  $dst, $src" %}
 7693 
 7694   ins_encode %{
 7695     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 7696   %}
 7697 
 7698   ins_pipe(ialu_reg);
 7699 %}
 7700 
 7701 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 7702   match(Set dst (ReverseBytesUS src));
 7703 
 7704   ins_cost(INSN_COST);
 7705   format %{ "rev16w  $dst, $src" %}
 7706 
 7707   ins_encode %{
 7708     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7709   %}
 7710 
 7711   ins_pipe(ialu_reg);
 7712 %}
 7713 
 7714 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 7715   match(Set dst (ReverseBytesS src));
 7716 
 7717   ins_cost(INSN_COST);
 7718   format %{ "rev16w  $dst, $src\n\t"
 7719             "sbfmw $dst, $dst, #0, #15" %}
 7720 
 7721   ins_encode %{
 7722     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7723     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 7724   %}
 7725 
 7726   ins_pipe(ialu_reg);
 7727 %}
 7728 
 7729 // ============================================================================
 7730 // Zero Count Instructions
 7731 
 7732 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7733   match(Set dst (CountLeadingZerosI src));
 7734 
 7735   ins_cost(INSN_COST);
 7736   format %{ "clzw  $dst, $src" %}
 7737   ins_encode %{
 7738     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 7739   %}
 7740 
 7741   ins_pipe(ialu_reg);
 7742 %}
 7743 
 7744 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 7745   match(Set dst (CountLeadingZerosL src));
 7746 
 7747   ins_cost(INSN_COST);
 7748   format %{ "clz   $dst, $src" %}
 7749   ins_encode %{
 7750     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 7751   %}
 7752 
 7753   ins_pipe(ialu_reg);
 7754 %}
 7755 
 7756 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7757   match(Set dst (CountTrailingZerosI src));
 7758 
 7759   ins_cost(INSN_COST * 2);
 7760   format %{ "rbitw  $dst, $src\n\t"
 7761             "clzw   $dst, $dst" %}
 7762   ins_encode %{
 7763     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 7764     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 7765   %}
 7766 
 7767   ins_pipe(ialu_reg);
 7768 %}
 7769 
 7770 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 7771   match(Set dst (CountTrailingZerosL src));
 7772 
 7773   ins_cost(INSN_COST * 2);
 7774   format %{ "rbit   $dst, $src\n\t"
 7775             "clz    $dst, $dst" %}
 7776   ins_encode %{
 7777     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 7778     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 7779   %}
 7780 
 7781   ins_pipe(ialu_reg);
 7782 %}
 7783 
 7784 //---------- Population Count Instructions -------------------------------------
 7785 //
 7786 
 7787 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 7788   match(Set dst (PopCountI src));
 7789   effect(TEMP tmp);
 7790   ins_cost(INSN_COST * 13);
 7791 
 7792   format %{ "fmovs  $tmp, $src\t# vector (1S)\n\t"
 7793             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7794             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7795             "mov    $dst, $tmp\t# vector (1D)" %}
 7796   ins_encode %{
 7797     __ fmovs($tmp$$FloatRegister, $src$$Register);
 7798     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7799     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7800     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7801   %}
 7802 
 7803   ins_pipe(pipe_class_default);
 7804 %}
 7805 
 7806 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 7807   match(Set dst (PopCountI (LoadI mem)));
 7808   effect(TEMP tmp);
 7809   ins_cost(INSN_COST * 13);
 7810 
 7811   format %{ "ldrs   $tmp, $mem\n\t"
 7812             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7813             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7814             "mov    $dst, $tmp\t# vector (1D)" %}
 7815   ins_encode %{
 7816     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7817     loadStore(masm, &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 7818               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 7819     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7820     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7821     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7822   %}
 7823 
 7824   ins_pipe(pipe_class_default);
 7825 %}
 7826 
 7827 // Note: Long.bitCount(long) returns an int.
 7828 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 7829   match(Set dst (PopCountL src));
 7830   effect(TEMP tmp);
 7831   ins_cost(INSN_COST * 13);
 7832 
 7833   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 7834             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7835             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7836             "mov    $dst, $tmp\t# vector (1D)" %}
 7837   ins_encode %{
 7838     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 7839     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7840     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7841     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7842   %}
 7843 
 7844   ins_pipe(pipe_class_default);
 7845 %}
 7846 
 7847 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 7848   match(Set dst (PopCountL (LoadL mem)));
 7849   effect(TEMP tmp);
 7850   ins_cost(INSN_COST * 13);
 7851 
 7852   format %{ "ldrd   $tmp, $mem\n\t"
 7853             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7854             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7855             "mov    $dst, $tmp\t# vector (1D)" %}
 7856   ins_encode %{
 7857     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7858     loadStore(masm, &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 7859               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 7860     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7861     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7862     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7863   %}
 7864 
 7865   ins_pipe(pipe_class_default);
 7866 %}
 7867 
 7868 // ============================================================================
 7869 // VerifyVectorAlignment Instruction
 7870 
 7871 instruct verify_vector_alignment(iRegP addr, immL_positive_bitmaskI mask, rFlagsReg cr) %{
 7872   match(Set addr (VerifyVectorAlignment addr mask));
 7873   effect(KILL cr);
 7874   format %{ "verify_vector_alignment $addr $mask \t! verify alignment" %}
 7875   ins_encode %{
 7876     Label Lskip;
 7877     // check if masked bits of addr are zero
 7878     __ tst($addr$$Register, $mask$$constant);
 7879     __ br(Assembler::EQ, Lskip);
 7880     __ stop("verify_vector_alignment found a misaligned vector memory access");
 7881     __ bind(Lskip);
 7882   %}
 7883   ins_pipe(pipe_slow);
 7884 %}
 7885 
 7886 // ============================================================================
 7887 // MemBar Instruction
 7888 
 7889 instruct load_fence() %{
 7890   match(LoadFence);
 7891   ins_cost(VOLATILE_REF_COST);
 7892 
 7893   format %{ "load_fence" %}
 7894 
 7895   ins_encode %{
 7896     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7897   %}
 7898   ins_pipe(pipe_serial);
 7899 %}
 7900 
 7901 instruct unnecessary_membar_acquire() %{
 7902   predicate(unnecessary_acquire(n));
 7903   match(MemBarAcquire);
 7904   ins_cost(0);
 7905 
 7906   format %{ "membar_acquire (elided)" %}
 7907 
 7908   ins_encode %{
 7909     __ block_comment("membar_acquire (elided)");
 7910   %}
 7911 
 7912   ins_pipe(pipe_class_empty);
 7913 %}
 7914 
 7915 instruct membar_acquire() %{
 7916   match(MemBarAcquire);
 7917   ins_cost(VOLATILE_REF_COST);
 7918 
 7919   format %{ "membar_acquire\n\t"
 7920             "dmb ishld" %}
 7921 
 7922   ins_encode %{
 7923     __ block_comment("membar_acquire");
 7924     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7925   %}
 7926 
 7927   ins_pipe(pipe_serial);
 7928 %}
 7929 
 7930 
 7931 instruct membar_acquire_lock() %{
 7932   match(MemBarAcquireLock);
 7933   ins_cost(VOLATILE_REF_COST);
 7934 
 7935   format %{ "membar_acquire_lock (elided)" %}
 7936 
 7937   ins_encode %{
 7938     __ block_comment("membar_acquire_lock (elided)");
 7939   %}
 7940 
 7941   ins_pipe(pipe_serial);
 7942 %}
 7943 
 7944 instruct store_fence() %{
 7945   match(StoreFence);
 7946   ins_cost(VOLATILE_REF_COST);
 7947 
 7948   format %{ "store_fence" %}
 7949 
 7950   ins_encode %{
 7951     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 7952   %}
 7953   ins_pipe(pipe_serial);
 7954 %}
 7955 
 7956 instruct unnecessary_membar_release() %{
 7957   predicate(unnecessary_release(n));
 7958   match(MemBarRelease);
 7959   ins_cost(0);
 7960 
 7961   format %{ "membar_release (elided)" %}
 7962 
 7963   ins_encode %{
 7964     __ block_comment("membar_release (elided)");
 7965   %}
 7966   ins_pipe(pipe_serial);
 7967 %}
 7968 
 7969 instruct membar_release() %{
 7970   match(MemBarRelease);
 7971   ins_cost(VOLATILE_REF_COST);
 7972 
 7973   format %{ "membar_release\n\t"
 7974             "dmb ishst\n\tdmb ishld" %}
 7975 
 7976   ins_encode %{
 7977     __ block_comment("membar_release");
 7978     // These will be merged if AlwaysMergeDMB is enabled.
 7979     __ membar(Assembler::StoreStore);
 7980     __ membar(Assembler::LoadStore);
 7981   %}
 7982   ins_pipe(pipe_serial);
 7983 %}
 7984 
 7985 instruct membar_storestore() %{
 7986   match(MemBarStoreStore);
 7987   match(StoreStoreFence);
 7988   ins_cost(VOLATILE_REF_COST);
 7989 
 7990   format %{ "MEMBAR-store-store" %}
 7991 
 7992   ins_encode %{
 7993     __ membar(Assembler::StoreStore);
 7994   %}
 7995   ins_pipe(pipe_serial);
 7996 %}
 7997 
 7998 instruct membar_release_lock() %{
 7999   match(MemBarReleaseLock);
 8000   ins_cost(VOLATILE_REF_COST);
 8001 
 8002   format %{ "membar_release_lock (elided)" %}
 8003 
 8004   ins_encode %{
 8005     __ block_comment("membar_release_lock (elided)");
 8006   %}
 8007 
 8008   ins_pipe(pipe_serial);
 8009 %}
 8010 
 8011 instruct unnecessary_membar_volatile() %{
 8012   predicate(unnecessary_volatile(n));
 8013   match(MemBarVolatile);
 8014   ins_cost(0);
 8015 
 8016   format %{ "membar_volatile (elided)" %}
 8017 
 8018   ins_encode %{
 8019     __ block_comment("membar_volatile (elided)");
 8020   %}
 8021 
 8022   ins_pipe(pipe_serial);
 8023 %}
 8024 
 8025 instruct membar_volatile() %{
 8026   match(MemBarVolatile);
 8027   ins_cost(VOLATILE_REF_COST*100);
 8028 
 8029   format %{ "membar_volatile\n\t"
 8030              "dmb ish"%}
 8031 
 8032   ins_encode %{
 8033     __ block_comment("membar_volatile");
 8034     __ membar(Assembler::StoreLoad);
 8035   %}
 8036 
 8037   ins_pipe(pipe_serial);
 8038 %}
 8039 
 8040 // ============================================================================
 8041 // Cast/Convert Instructions
 8042 
 8043 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8044   match(Set dst (CastX2P src));
 8045 
 8046   ins_cost(INSN_COST);
 8047   format %{ "mov $dst, $src\t# long -> ptr" %}
 8048 
 8049   ins_encode %{
 8050     if ($dst$$reg != $src$$reg) {
 8051       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8052     }
 8053   %}
 8054 
 8055   ins_pipe(ialu_reg);
 8056 %}
 8057 
 8058 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8059   match(Set dst (CastP2X src));
 8060 
 8061   ins_cost(INSN_COST);
 8062   format %{ "mov $dst, $src\t# ptr -> long" %}
 8063 
 8064   ins_encode %{
 8065     if ($dst$$reg != $src$$reg) {
 8066       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8067     }
 8068   %}
 8069 
 8070   ins_pipe(ialu_reg);
 8071 %}
 8072 
 8073 // Convert oop into int for vectors alignment masking
 8074 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8075   match(Set dst (ConvL2I (CastP2X src)));
 8076 
 8077   ins_cost(INSN_COST);
 8078   format %{ "movw $dst, $src\t# ptr -> int" %}
 8079   ins_encode %{
 8080     __ movw($dst$$Register, $src$$Register);
 8081   %}
 8082 
 8083   ins_pipe(ialu_reg);
 8084 %}
 8085 
 8086 // Convert compressed oop into int for vectors alignment masking
 8087 // in case of 32bit oops (heap < 4Gb).
 8088 instruct convN2I(iRegINoSp dst, iRegN src)
 8089 %{
 8090   predicate(CompressedOops::shift() == 0);
 8091   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8092 
 8093   ins_cost(INSN_COST);
 8094   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8095   ins_encode %{
 8096     __ movw($dst$$Register, $src$$Register);
 8097   %}
 8098 
 8099   ins_pipe(ialu_reg);
 8100 %}
 8101 
 8102 
 8103 // Convert oop pointer into compressed form
 8104 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8105   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8106   match(Set dst (EncodeP src));
 8107   effect(KILL cr);
 8108   ins_cost(INSN_COST * 3);
 8109   format %{ "encode_heap_oop $dst, $src" %}
 8110   ins_encode %{
 8111     Register s = $src$$Register;
 8112     Register d = $dst$$Register;
 8113     __ encode_heap_oop(d, s);
 8114   %}
 8115   ins_pipe(ialu_reg);
 8116 %}
 8117 
 8118 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8119   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8120   match(Set dst (EncodeP src));
 8121   ins_cost(INSN_COST * 3);
 8122   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8123   ins_encode %{
 8124     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8125   %}
 8126   ins_pipe(ialu_reg);
 8127 %}
 8128 
 8129 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8130   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8131             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8132   match(Set dst (DecodeN src));
 8133   ins_cost(INSN_COST * 3);
 8134   format %{ "decode_heap_oop $dst, $src" %}
 8135   ins_encode %{
 8136     Register s = $src$$Register;
 8137     Register d = $dst$$Register;
 8138     __ decode_heap_oop(d, s);
 8139   %}
 8140   ins_pipe(ialu_reg);
 8141 %}
 8142 
 8143 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8144   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8145             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8146   match(Set dst (DecodeN src));
 8147   ins_cost(INSN_COST * 3);
 8148   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8149   ins_encode %{
 8150     Register s = $src$$Register;
 8151     Register d = $dst$$Register;
 8152     __ decode_heap_oop_not_null(d, s);
 8153   %}
 8154   ins_pipe(ialu_reg);
 8155 %}
 8156 
 8157 // n.b. AArch64 implementations of encode_klass_not_null and
 8158 // decode_klass_not_null do not modify the flags register so, unlike
 8159 // Intel, we don't kill CR as a side effect here
 8160 
 8161 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8162   match(Set dst (EncodePKlass src));
 8163 
 8164   ins_cost(INSN_COST * 3);
 8165   format %{ "encode_klass_not_null $dst,$src" %}
 8166 
 8167   ins_encode %{
 8168     Register src_reg = as_Register($src$$reg);
 8169     Register dst_reg = as_Register($dst$$reg);
 8170     __ encode_klass_not_null(dst_reg, src_reg);
 8171   %}
 8172 
 8173    ins_pipe(ialu_reg);
 8174 %}
 8175 
 8176 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8177   match(Set dst (DecodeNKlass src));
 8178 
 8179   ins_cost(INSN_COST * 3);
 8180   format %{ "decode_klass_not_null $dst,$src" %}
 8181 
 8182   ins_encode %{
 8183     Register src_reg = as_Register($src$$reg);
 8184     Register dst_reg = as_Register($dst$$reg);
 8185     if (dst_reg != src_reg) {
 8186       __ decode_klass_not_null(dst_reg, src_reg);
 8187     } else {
 8188       __ decode_klass_not_null(dst_reg);
 8189     }
 8190   %}
 8191 
 8192    ins_pipe(ialu_reg);
 8193 %}
 8194 
 8195 instruct checkCastPP(iRegPNoSp dst)
 8196 %{
 8197   match(Set dst (CheckCastPP dst));
 8198 
 8199   size(0);
 8200   format %{ "# checkcastPP of $dst" %}
 8201   ins_encode(/* empty encoding */);
 8202   ins_pipe(pipe_class_empty);
 8203 %}
 8204 
 8205 instruct castPP(iRegPNoSp dst)
 8206 %{
 8207   match(Set dst (CastPP dst));
 8208 
 8209   size(0);
 8210   format %{ "# castPP of $dst" %}
 8211   ins_encode(/* empty encoding */);
 8212   ins_pipe(pipe_class_empty);
 8213 %}
 8214 
 8215 instruct castII(iRegI dst)
 8216 %{
 8217   predicate(VerifyConstraintCasts == 0);
 8218   match(Set dst (CastII dst));
 8219 
 8220   size(0);
 8221   format %{ "# castII of $dst" %}
 8222   ins_encode(/* empty encoding */);
 8223   ins_cost(0);
 8224   ins_pipe(pipe_class_empty);
 8225 %}
 8226 
 8227 instruct castII_checked(iRegI dst, rFlagsReg cr)
 8228 %{
 8229   predicate(VerifyConstraintCasts > 0);
 8230   match(Set dst (CastII dst));
 8231   effect(KILL cr);
 8232 
 8233   format %{ "# castII_checked of $dst" %}
 8234   ins_encode %{
 8235     __ verify_int_in_range(_idx, bottom_type()->is_int(), $dst$$Register, rscratch1);
 8236   %}
 8237   ins_pipe(pipe_slow);
 8238 %}
 8239 
 8240 instruct castLL(iRegL dst)
 8241 %{
 8242   predicate(VerifyConstraintCasts == 0);
 8243   match(Set dst (CastLL dst));
 8244 
 8245   size(0);
 8246   format %{ "# castLL of $dst" %}
 8247   ins_encode(/* empty encoding */);
 8248   ins_cost(0);
 8249   ins_pipe(pipe_class_empty);
 8250 %}
 8251 
 8252 instruct castLL_checked(iRegL dst, rFlagsReg cr)
 8253 %{
 8254   predicate(VerifyConstraintCasts > 0);
 8255   match(Set dst (CastLL dst));
 8256   effect(KILL cr);
 8257 
 8258   format %{ "# castLL_checked of $dst" %}
 8259   ins_encode %{
 8260     __ verify_long_in_range(_idx, bottom_type()->is_long(), $dst$$Register, rscratch1);
 8261   %}
 8262   ins_pipe(pipe_slow);
 8263 %}
 8264 
 8265 instruct castHH(vRegF dst)
 8266 %{
 8267   match(Set dst (CastHH dst));
 8268   size(0);
 8269   format %{ "# castHH of $dst" %}
 8270   ins_encode(/* empty encoding */);
 8271   ins_cost(0);
 8272   ins_pipe(pipe_class_empty);
 8273 %}
 8274 
 8275 instruct castFF(vRegF dst)
 8276 %{
 8277   match(Set dst (CastFF dst));
 8278 
 8279   size(0);
 8280   format %{ "# castFF of $dst" %}
 8281   ins_encode(/* empty encoding */);
 8282   ins_cost(0);
 8283   ins_pipe(pipe_class_empty);
 8284 %}
 8285 
 8286 instruct castDD(vRegD dst)
 8287 %{
 8288   match(Set dst (CastDD dst));
 8289 
 8290   size(0);
 8291   format %{ "# castDD of $dst" %}
 8292   ins_encode(/* empty encoding */);
 8293   ins_cost(0);
 8294   ins_pipe(pipe_class_empty);
 8295 %}
 8296 
 8297 instruct castVV(vReg dst)
 8298 %{
 8299   match(Set dst (CastVV dst));
 8300 
 8301   size(0);
 8302   format %{ "# castVV of $dst" %}
 8303   ins_encode(/* empty encoding */);
 8304   ins_cost(0);
 8305   ins_pipe(pipe_class_empty);
 8306 %}
 8307 
 8308 instruct castVVMask(pRegGov dst)
 8309 %{
 8310   match(Set dst (CastVV dst));
 8311 
 8312   size(0);
 8313   format %{ "# castVV of $dst" %}
 8314   ins_encode(/* empty encoding */);
 8315   ins_cost(0);
 8316   ins_pipe(pipe_class_empty);
 8317 %}
 8318 
 8319 // Manifest a CmpU result in an integer register.
 8320 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 8321 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 8322 %{
 8323   match(Set dst (CmpU3 src1 src2));
 8324   effect(KILL flags);
 8325 
 8326   ins_cost(INSN_COST * 3);
 8327   format %{
 8328       "cmpw $src1, $src2\n\t"
 8329       "csetw $dst, ne\n\t"
 8330       "cnegw $dst, lo\t# CmpU3(reg)"
 8331   %}
 8332   ins_encode %{
 8333     __ cmpw($src1$$Register, $src2$$Register);
 8334     __ csetw($dst$$Register, Assembler::NE);
 8335     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 8336   %}
 8337 
 8338   ins_pipe(pipe_class_default);
 8339 %}
 8340 
 8341 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 8342 %{
 8343   match(Set dst (CmpU3 src1 src2));
 8344   effect(KILL flags);
 8345 
 8346   ins_cost(INSN_COST * 3);
 8347   format %{
 8348       "subsw zr, $src1, $src2\n\t"
 8349       "csetw $dst, ne\n\t"
 8350       "cnegw $dst, lo\t# CmpU3(imm)"
 8351   %}
 8352   ins_encode %{
 8353     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 8354     __ csetw($dst$$Register, Assembler::NE);
 8355     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 8356   %}
 8357 
 8358   ins_pipe(pipe_class_default);
 8359 %}
 8360 
 8361 // Manifest a CmpUL result in an integer register.
 8362 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 8363 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 8364 %{
 8365   match(Set dst (CmpUL3 src1 src2));
 8366   effect(KILL flags);
 8367 
 8368   ins_cost(INSN_COST * 3);
 8369   format %{
 8370       "cmp $src1, $src2\n\t"
 8371       "csetw $dst, ne\n\t"
 8372       "cnegw $dst, lo\t# CmpUL3(reg)"
 8373   %}
 8374   ins_encode %{
 8375     __ cmp($src1$$Register, $src2$$Register);
 8376     __ csetw($dst$$Register, Assembler::NE);
 8377     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 8378   %}
 8379 
 8380   ins_pipe(pipe_class_default);
 8381 %}
 8382 
 8383 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 8384 %{
 8385   match(Set dst (CmpUL3 src1 src2));
 8386   effect(KILL flags);
 8387 
 8388   ins_cost(INSN_COST * 3);
 8389   format %{
 8390       "subs zr, $src1, $src2\n\t"
 8391       "csetw $dst, ne\n\t"
 8392       "cnegw $dst, lo\t# CmpUL3(imm)"
 8393   %}
 8394   ins_encode %{
 8395     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 8396     __ csetw($dst$$Register, Assembler::NE);
 8397     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 8398   %}
 8399 
 8400   ins_pipe(pipe_class_default);
 8401 %}
 8402 
 8403 // Manifest a CmpL result in an integer register.
 8404 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 8405 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 8406 %{
 8407   match(Set dst (CmpL3 src1 src2));
 8408   effect(KILL flags);
 8409 
 8410   ins_cost(INSN_COST * 3);
 8411   format %{
 8412       "cmp $src1, $src2\n\t"
 8413       "csetw $dst, ne\n\t"
 8414       "cnegw $dst, lt\t# CmpL3(reg)"
 8415   %}
 8416   ins_encode %{
 8417     __ cmp($src1$$Register, $src2$$Register);
 8418     __ csetw($dst$$Register, Assembler::NE);
 8419     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 8420   %}
 8421 
 8422   ins_pipe(pipe_class_default);
 8423 %}
 8424 
 8425 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 8426 %{
 8427   match(Set dst (CmpL3 src1 src2));
 8428   effect(KILL flags);
 8429 
 8430   ins_cost(INSN_COST * 3);
 8431   format %{
 8432       "subs zr, $src1, $src2\n\t"
 8433       "csetw $dst, ne\n\t"
 8434       "cnegw $dst, lt\t# CmpL3(imm)"
 8435   %}
 8436   ins_encode %{
 8437     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 8438     __ csetw($dst$$Register, Assembler::NE);
 8439     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 8440   %}
 8441 
 8442   ins_pipe(pipe_class_default);
 8443 %}
 8444 
 8445 // ============================================================================
 8446 // Conditional Move Instructions
 8447 
 8448 // n.b. we have identical rules for both a signed compare op (cmpOp)
 8449 // and an unsigned compare op (cmpOpU). it would be nice if we could
 8450 // define an op class which merged both inputs and use it to type the
 8451 // argument to a single rule. unfortunatelyt his fails because the
 8452 // opclass does not live up to the COND_INTER interface of its
 8453 // component operands. When the generic code tries to negate the
 8454 // operand it ends up running the generci Machoper::negate method
 8455 // which throws a ShouldNotHappen. So, we have to provide two flavours
 8456 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 8457 
 8458 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 8459   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 8460 
 8461   ins_cost(INSN_COST * 2);
 8462   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 8463 
 8464   ins_encode %{
 8465     __ cselw(as_Register($dst$$reg),
 8466              as_Register($src2$$reg),
 8467              as_Register($src1$$reg),
 8468              (Assembler::Condition)$cmp$$cmpcode);
 8469   %}
 8470 
 8471   ins_pipe(icond_reg_reg);
 8472 %}
 8473 
 8474 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 8475   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 8476 
 8477   ins_cost(INSN_COST * 2);
 8478   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 8479 
 8480   ins_encode %{
 8481     __ cselw(as_Register($dst$$reg),
 8482              as_Register($src2$$reg),
 8483              as_Register($src1$$reg),
 8484              (Assembler::Condition)$cmp$$cmpcode);
 8485   %}
 8486 
 8487   ins_pipe(icond_reg_reg);
 8488 %}
 8489 
 8490 // special cases where one arg is zero
 8491 
 8492 // n.b. this is selected in preference to the rule above because it
 8493 // avoids loading constant 0 into a source register
 8494 
 8495 // TODO
 8496 // we ought only to be able to cull one of these variants as the ideal
 8497 // transforms ought always to order the zero consistently (to left/right?)
 8498 
 8499 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 8500   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 8501 
 8502   ins_cost(INSN_COST * 2);
 8503   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 8504 
 8505   ins_encode %{
 8506     __ cselw(as_Register($dst$$reg),
 8507              as_Register($src$$reg),
 8508              zr,
 8509              (Assembler::Condition)$cmp$$cmpcode);
 8510   %}
 8511 
 8512   ins_pipe(icond_reg);
 8513 %}
 8514 
 8515 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 8516   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 8517 
 8518   ins_cost(INSN_COST * 2);
 8519   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 8520 
 8521   ins_encode %{
 8522     __ cselw(as_Register($dst$$reg),
 8523              as_Register($src$$reg),
 8524              zr,
 8525              (Assembler::Condition)$cmp$$cmpcode);
 8526   %}
 8527 
 8528   ins_pipe(icond_reg);
 8529 %}
 8530 
 8531 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 8532   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 8533 
 8534   ins_cost(INSN_COST * 2);
 8535   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 8536 
 8537   ins_encode %{
 8538     __ cselw(as_Register($dst$$reg),
 8539              zr,
 8540              as_Register($src$$reg),
 8541              (Assembler::Condition)$cmp$$cmpcode);
 8542   %}
 8543 
 8544   ins_pipe(icond_reg);
 8545 %}
 8546 
 8547 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 8548   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 8549 
 8550   ins_cost(INSN_COST * 2);
 8551   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 8552 
 8553   ins_encode %{
 8554     __ cselw(as_Register($dst$$reg),
 8555              zr,
 8556              as_Register($src$$reg),
 8557              (Assembler::Condition)$cmp$$cmpcode);
 8558   %}
 8559 
 8560   ins_pipe(icond_reg);
 8561 %}
 8562 
 8563 // special case for creating a boolean 0 or 1
 8564 
 8565 // n.b. this is selected in preference to the rule above because it
 8566 // avoids loading constants 0 and 1 into a source register
 8567 
 8568 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 8569   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 8570 
 8571   ins_cost(INSN_COST * 2);
 8572   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 8573 
 8574   ins_encode %{
 8575     // equivalently
 8576     // cset(as_Register($dst$$reg),
 8577     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 8578     __ csincw(as_Register($dst$$reg),
 8579              zr,
 8580              zr,
 8581              (Assembler::Condition)$cmp$$cmpcode);
 8582   %}
 8583 
 8584   ins_pipe(icond_none);
 8585 %}
 8586 
 8587 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 8588   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 8589 
 8590   ins_cost(INSN_COST * 2);
 8591   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 8592 
 8593   ins_encode %{
 8594     // equivalently
 8595     // cset(as_Register($dst$$reg),
 8596     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 8597     __ csincw(as_Register($dst$$reg),
 8598              zr,
 8599              zr,
 8600              (Assembler::Condition)$cmp$$cmpcode);
 8601   %}
 8602 
 8603   ins_pipe(icond_none);
 8604 %}
 8605 
 8606 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 8607   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 8608 
 8609   ins_cost(INSN_COST * 2);
 8610   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 8611 
 8612   ins_encode %{
 8613     __ csel(as_Register($dst$$reg),
 8614             as_Register($src2$$reg),
 8615             as_Register($src1$$reg),
 8616             (Assembler::Condition)$cmp$$cmpcode);
 8617   %}
 8618 
 8619   ins_pipe(icond_reg_reg);
 8620 %}
 8621 
 8622 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 8623   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 8624 
 8625   ins_cost(INSN_COST * 2);
 8626   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 8627 
 8628   ins_encode %{
 8629     __ csel(as_Register($dst$$reg),
 8630             as_Register($src2$$reg),
 8631             as_Register($src1$$reg),
 8632             (Assembler::Condition)$cmp$$cmpcode);
 8633   %}
 8634 
 8635   ins_pipe(icond_reg_reg);
 8636 %}
 8637 
 8638 // special cases where one arg is zero
 8639 
 8640 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 8641   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 8642 
 8643   ins_cost(INSN_COST * 2);
 8644   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 8645 
 8646   ins_encode %{
 8647     __ csel(as_Register($dst$$reg),
 8648             zr,
 8649             as_Register($src$$reg),
 8650             (Assembler::Condition)$cmp$$cmpcode);
 8651   %}
 8652 
 8653   ins_pipe(icond_reg);
 8654 %}
 8655 
 8656 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 8657   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 8658 
 8659   ins_cost(INSN_COST * 2);
 8660   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 8661 
 8662   ins_encode %{
 8663     __ csel(as_Register($dst$$reg),
 8664             zr,
 8665             as_Register($src$$reg),
 8666             (Assembler::Condition)$cmp$$cmpcode);
 8667   %}
 8668 
 8669   ins_pipe(icond_reg);
 8670 %}
 8671 
 8672 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 8673   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 8674 
 8675   ins_cost(INSN_COST * 2);
 8676   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 8677 
 8678   ins_encode %{
 8679     __ csel(as_Register($dst$$reg),
 8680             as_Register($src$$reg),
 8681             zr,
 8682             (Assembler::Condition)$cmp$$cmpcode);
 8683   %}
 8684 
 8685   ins_pipe(icond_reg);
 8686 %}
 8687 
 8688 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 8689   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 8690 
 8691   ins_cost(INSN_COST * 2);
 8692   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 8693 
 8694   ins_encode %{
 8695     __ csel(as_Register($dst$$reg),
 8696             as_Register($src$$reg),
 8697             zr,
 8698             (Assembler::Condition)$cmp$$cmpcode);
 8699   %}
 8700 
 8701   ins_pipe(icond_reg);
 8702 %}
 8703 
 8704 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 8705   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 8706 
 8707   ins_cost(INSN_COST * 2);
 8708   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
 8709 
 8710   ins_encode %{
 8711     __ csel(as_Register($dst$$reg),
 8712             as_Register($src2$$reg),
 8713             as_Register($src1$$reg),
 8714             (Assembler::Condition)$cmp$$cmpcode);
 8715   %}
 8716 
 8717   ins_pipe(icond_reg_reg);
 8718 %}
 8719 
 8720 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 8721   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 8722 
 8723   ins_cost(INSN_COST * 2);
 8724   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
 8725 
 8726   ins_encode %{
 8727     __ csel(as_Register($dst$$reg),
 8728             as_Register($src2$$reg),
 8729             as_Register($src1$$reg),
 8730             (Assembler::Condition)$cmp$$cmpcode);
 8731   %}
 8732 
 8733   ins_pipe(icond_reg_reg);
 8734 %}
 8735 
 8736 // special cases where one arg is zero
 8737 
 8738 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 8739   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 8740 
 8741   ins_cost(INSN_COST * 2);
 8742   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
 8743 
 8744   ins_encode %{
 8745     __ csel(as_Register($dst$$reg),
 8746             zr,
 8747             as_Register($src$$reg),
 8748             (Assembler::Condition)$cmp$$cmpcode);
 8749   %}
 8750 
 8751   ins_pipe(icond_reg);
 8752 %}
 8753 
 8754 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 8755   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 8756 
 8757   ins_cost(INSN_COST * 2);
 8758   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
 8759 
 8760   ins_encode %{
 8761     __ csel(as_Register($dst$$reg),
 8762             zr,
 8763             as_Register($src$$reg),
 8764             (Assembler::Condition)$cmp$$cmpcode);
 8765   %}
 8766 
 8767   ins_pipe(icond_reg);
 8768 %}
 8769 
 8770 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 8771   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 8772 
 8773   ins_cost(INSN_COST * 2);
 8774   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
 8775 
 8776   ins_encode %{
 8777     __ csel(as_Register($dst$$reg),
 8778             as_Register($src$$reg),
 8779             zr,
 8780             (Assembler::Condition)$cmp$$cmpcode);
 8781   %}
 8782 
 8783   ins_pipe(icond_reg);
 8784 %}
 8785 
 8786 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 8787   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 8788 
 8789   ins_cost(INSN_COST * 2);
 8790   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
 8791 
 8792   ins_encode %{
 8793     __ csel(as_Register($dst$$reg),
 8794             as_Register($src$$reg),
 8795             zr,
 8796             (Assembler::Condition)$cmp$$cmpcode);
 8797   %}
 8798 
 8799   ins_pipe(icond_reg);
 8800 %}
 8801 
 8802 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 8803   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 8804 
 8805   ins_cost(INSN_COST * 2);
 8806   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 8807 
 8808   ins_encode %{
 8809     __ cselw(as_Register($dst$$reg),
 8810              as_Register($src2$$reg),
 8811              as_Register($src1$$reg),
 8812              (Assembler::Condition)$cmp$$cmpcode);
 8813   %}
 8814 
 8815   ins_pipe(icond_reg_reg);
 8816 %}
 8817 
 8818 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 8819   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 8820 
 8821   ins_cost(INSN_COST * 2);
 8822   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 8823 
 8824   ins_encode %{
 8825     __ cselw(as_Register($dst$$reg),
 8826              as_Register($src2$$reg),
 8827              as_Register($src1$$reg),
 8828              (Assembler::Condition)$cmp$$cmpcode);
 8829   %}
 8830 
 8831   ins_pipe(icond_reg_reg);
 8832 %}
 8833 
 8834 // special cases where one arg is zero
 8835 
 8836 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 8837   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 8838 
 8839   ins_cost(INSN_COST * 2);
 8840   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
 8841 
 8842   ins_encode %{
 8843     __ cselw(as_Register($dst$$reg),
 8844              zr,
 8845              as_Register($src$$reg),
 8846              (Assembler::Condition)$cmp$$cmpcode);
 8847   %}
 8848 
 8849   ins_pipe(icond_reg);
 8850 %}
 8851 
 8852 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 8853   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 8854 
 8855   ins_cost(INSN_COST * 2);
 8856   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
 8857 
 8858   ins_encode %{
 8859     __ cselw(as_Register($dst$$reg),
 8860              zr,
 8861              as_Register($src$$reg),
 8862              (Assembler::Condition)$cmp$$cmpcode);
 8863   %}
 8864 
 8865   ins_pipe(icond_reg);
 8866 %}
 8867 
 8868 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 8869   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 8870 
 8871   ins_cost(INSN_COST * 2);
 8872   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
 8873 
 8874   ins_encode %{
 8875     __ cselw(as_Register($dst$$reg),
 8876              as_Register($src$$reg),
 8877              zr,
 8878              (Assembler::Condition)$cmp$$cmpcode);
 8879   %}
 8880 
 8881   ins_pipe(icond_reg);
 8882 %}
 8883 
 8884 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 8885   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 8886 
 8887   ins_cost(INSN_COST * 2);
 8888   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
 8889 
 8890   ins_encode %{
 8891     __ cselw(as_Register($dst$$reg),
 8892              as_Register($src$$reg),
 8893              zr,
 8894              (Assembler::Condition)$cmp$$cmpcode);
 8895   %}
 8896 
 8897   ins_pipe(icond_reg);
 8898 %}
 8899 
 8900 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
 8901 %{
 8902   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 8903 
 8904   ins_cost(INSN_COST * 3);
 8905 
 8906   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 8907   ins_encode %{
 8908     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 8909     __ fcsels(as_FloatRegister($dst$$reg),
 8910               as_FloatRegister($src2$$reg),
 8911               as_FloatRegister($src1$$reg),
 8912               cond);
 8913   %}
 8914 
 8915   ins_pipe(fp_cond_reg_reg_s);
 8916 %}
 8917 
 8918 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
 8919 %{
 8920   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 8921 
 8922   ins_cost(INSN_COST * 3);
 8923 
 8924   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 8925   ins_encode %{
 8926     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 8927     __ fcsels(as_FloatRegister($dst$$reg),
 8928               as_FloatRegister($src2$$reg),
 8929               as_FloatRegister($src1$$reg),
 8930               cond);
 8931   %}
 8932 
 8933   ins_pipe(fp_cond_reg_reg_s);
 8934 %}
 8935 
 8936 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
 8937 %{
 8938   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 8939 
 8940   ins_cost(INSN_COST * 3);
 8941 
 8942   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 8943   ins_encode %{
 8944     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 8945     __ fcseld(as_FloatRegister($dst$$reg),
 8946               as_FloatRegister($src2$$reg),
 8947               as_FloatRegister($src1$$reg),
 8948               cond);
 8949   %}
 8950 
 8951   ins_pipe(fp_cond_reg_reg_d);
 8952 %}
 8953 
 8954 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
 8955 %{
 8956   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 8957 
 8958   ins_cost(INSN_COST * 3);
 8959 
 8960   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 8961   ins_encode %{
 8962     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 8963     __ fcseld(as_FloatRegister($dst$$reg),
 8964               as_FloatRegister($src2$$reg),
 8965               as_FloatRegister($src1$$reg),
 8966               cond);
 8967   %}
 8968 
 8969   ins_pipe(fp_cond_reg_reg_d);
 8970 %}
 8971 
 8972 // ============================================================================
 8973 // Arithmetic Instructions
 8974 //
 8975 
 8976 // Integer Addition
 8977 
 8978 // TODO
 8979 // these currently employ operations which do not set CR and hence are
 8980 // not flagged as killing CR but we would like to isolate the cases
 8981 // where we want to set flags from those where we don't. need to work
 8982 // out how to do that.
 8983 
 8984 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 8985   match(Set dst (AddI src1 src2));
 8986 
 8987   ins_cost(INSN_COST);
 8988   format %{ "addw  $dst, $src1, $src2" %}
 8989 
 8990   ins_encode %{
 8991     __ addw(as_Register($dst$$reg),
 8992             as_Register($src1$$reg),
 8993             as_Register($src2$$reg));
 8994   %}
 8995 
 8996   ins_pipe(ialu_reg_reg);
 8997 %}
 8998 
 8999 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
 9000   match(Set dst (AddI src1 src2));
 9001 
 9002   ins_cost(INSN_COST);
 9003   format %{ "addw $dst, $src1, $src2" %}
 9004 
 9005   // use opcode to indicate that this is an add not a sub
 9006   opcode(0x0);
 9007 
 9008   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9009 
 9010   ins_pipe(ialu_reg_imm);
 9011 %}
 9012 
 9013 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
 9014   match(Set dst (AddI (ConvL2I src1) src2));
 9015 
 9016   ins_cost(INSN_COST);
 9017   format %{ "addw $dst, $src1, $src2" %}
 9018 
 9019   // use opcode to indicate that this is an add not a sub
 9020   opcode(0x0);
 9021 
 9022   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9023 
 9024   ins_pipe(ialu_reg_imm);
 9025 %}
 9026 
 9027 // Pointer Addition
 9028 instruct addP_reg_reg(iRegPNoSp dst, iRegPorL2P src1, iRegL src2) %{
 9029   match(Set dst (AddP src1 src2));
 9030 
 9031   ins_cost(INSN_COST);
 9032   format %{ "add $dst, $src1, $src2\t# ptr" %}
 9033 
 9034   ins_encode %{
 9035     __ add(as_Register($dst$$reg),
 9036            as_Register($src1$$reg),
 9037            as_Register($src2$$reg));
 9038   %}
 9039 
 9040   ins_pipe(ialu_reg_reg);
 9041 %}
 9042 
 9043 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2) %{
 9044   match(Set dst (AddP src1 (ConvI2L src2)));
 9045 
 9046   ins_cost(1.9 * INSN_COST);
 9047   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
 9048 
 9049   ins_encode %{
 9050     __ add(as_Register($dst$$reg),
 9051            as_Register($src1$$reg),
 9052            as_Register($src2$$reg), ext::sxtw);
 9053   %}
 9054 
 9055   ins_pipe(ialu_reg_reg);
 9056 %}
 9057 
 9058 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegPorL2P src1, iRegL src2, immIScale scale) %{
 9059   match(Set dst (AddP src1 (LShiftL src2 scale)));
 9060 
 9061   ins_cost(1.9 * INSN_COST);
 9062   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
 9063 
 9064   ins_encode %{
 9065     __ lea(as_Register($dst$$reg),
 9066            Address(as_Register($src1$$reg), as_Register($src2$$reg),
 9067                    Address::lsl($scale$$constant)));
 9068   %}
 9069 
 9070   ins_pipe(ialu_reg_reg_shift);
 9071 %}
 9072 
 9073 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2, immIScale scale) %{
 9074   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
 9075 
 9076   ins_cost(1.9 * INSN_COST);
 9077   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
 9078 
 9079   ins_encode %{
 9080     __ lea(as_Register($dst$$reg),
 9081            Address(as_Register($src1$$reg), as_Register($src2$$reg),
 9082                    Address::sxtw($scale$$constant)));
 9083   %}
 9084 
 9085   ins_pipe(ialu_reg_reg_shift);
 9086 %}
 9087 
 9088 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
 9089   match(Set dst (LShiftL (ConvI2L src) scale));
 9090 
 9091   ins_cost(INSN_COST);
 9092   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
 9093 
 9094   ins_encode %{
 9095     __ sbfiz(as_Register($dst$$reg),
 9096           as_Register($src$$reg),
 9097           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
 9098   %}
 9099 
 9100   ins_pipe(ialu_reg_shift);
 9101 %}
 9102 
 9103 // Pointer Immediate Addition
 9104 // n.b. this needs to be more expensive than using an indirect memory
 9105 // operand
 9106 instruct addP_reg_imm(iRegPNoSp dst, iRegPorL2P src1, immLAddSub src2) %{
 9107   match(Set dst (AddP src1 src2));
 9108 
 9109   ins_cost(INSN_COST);
 9110   format %{ "add $dst, $src1, $src2\t# ptr" %}
 9111 
 9112   // use opcode to indicate that this is an add not a sub
 9113   opcode(0x0);
 9114 
 9115   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
 9116 
 9117   ins_pipe(ialu_reg_imm);
 9118 %}
 9119 
 9120 // Long Addition
 9121 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9122 
 9123   match(Set dst (AddL src1 src2));
 9124 
 9125   ins_cost(INSN_COST);
 9126   format %{ "add  $dst, $src1, $src2" %}
 9127 
 9128   ins_encode %{
 9129     __ add(as_Register($dst$$reg),
 9130            as_Register($src1$$reg),
 9131            as_Register($src2$$reg));
 9132   %}
 9133 
 9134   ins_pipe(ialu_reg_reg);
 9135 %}
 9136 
 9137 // No constant pool entries requiredLong Immediate Addition.
 9138 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
 9139   match(Set dst (AddL src1 src2));
 9140 
 9141   ins_cost(INSN_COST);
 9142   format %{ "add $dst, $src1, $src2" %}
 9143 
 9144   // use opcode to indicate that this is an add not a sub
 9145   opcode(0x0);
 9146 
 9147   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
 9148 
 9149   ins_pipe(ialu_reg_imm);
 9150 %}
 9151 
 9152 // Integer Subtraction
 9153 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9154   match(Set dst (SubI src1 src2));
 9155 
 9156   ins_cost(INSN_COST);
 9157   format %{ "subw  $dst, $src1, $src2" %}
 9158 
 9159   ins_encode %{
 9160     __ subw(as_Register($dst$$reg),
 9161             as_Register($src1$$reg),
 9162             as_Register($src2$$reg));
 9163   %}
 9164 
 9165   ins_pipe(ialu_reg_reg);
 9166 %}
 9167 
 9168 // Immediate Subtraction
 9169 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
 9170   match(Set dst (SubI src1 src2));
 9171 
 9172   ins_cost(INSN_COST);
 9173   format %{ "subw $dst, $src1, $src2" %}
 9174 
 9175   // use opcode to indicate that this is a sub not an add
 9176   opcode(0x1);
 9177 
 9178   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9179 
 9180   ins_pipe(ialu_reg_imm);
 9181 %}
 9182 
 9183 // Long Subtraction
 9184 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9185 
 9186   match(Set dst (SubL src1 src2));
 9187 
 9188   ins_cost(INSN_COST);
 9189   format %{ "sub  $dst, $src1, $src2" %}
 9190 
 9191   ins_encode %{
 9192     __ sub(as_Register($dst$$reg),
 9193            as_Register($src1$$reg),
 9194            as_Register($src2$$reg));
 9195   %}
 9196 
 9197   ins_pipe(ialu_reg_reg);
 9198 %}
 9199 
 9200 // No constant pool entries requiredLong Immediate Subtraction.
 9201 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
 9202   match(Set dst (SubL src1 src2));
 9203 
 9204   ins_cost(INSN_COST);
 9205   format %{ "sub$dst, $src1, $src2" %}
 9206 
 9207   // use opcode to indicate that this is a sub not an add
 9208   opcode(0x1);
 9209 
 9210   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
 9211 
 9212   ins_pipe(ialu_reg_imm);
 9213 %}
 9214 
 9215 // Integer Negation (special case for sub)
 9216 
 9217 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
 9218   match(Set dst (SubI zero src));
 9219 
 9220   ins_cost(INSN_COST);
 9221   format %{ "negw $dst, $src\t# int" %}
 9222 
 9223   ins_encode %{
 9224     __ negw(as_Register($dst$$reg),
 9225             as_Register($src$$reg));
 9226   %}
 9227 
 9228   ins_pipe(ialu_reg);
 9229 %}
 9230 
 9231 // Long Negation
 9232 
 9233 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
 9234   match(Set dst (SubL zero src));
 9235 
 9236   ins_cost(INSN_COST);
 9237   format %{ "neg $dst, $src\t# long" %}
 9238 
 9239   ins_encode %{
 9240     __ neg(as_Register($dst$$reg),
 9241            as_Register($src$$reg));
 9242   %}
 9243 
 9244   ins_pipe(ialu_reg);
 9245 %}
 9246 
 9247 // Integer Multiply
 9248 
 9249 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9250   match(Set dst (MulI src1 src2));
 9251 
 9252   ins_cost(INSN_COST * 3);
 9253   format %{ "mulw  $dst, $src1, $src2" %}
 9254 
 9255   ins_encode %{
 9256     __ mulw(as_Register($dst$$reg),
 9257             as_Register($src1$$reg),
 9258             as_Register($src2$$reg));
 9259   %}
 9260 
 9261   ins_pipe(imul_reg_reg);
 9262 %}
 9263 
 9264 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9265   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
 9266 
 9267   ins_cost(INSN_COST * 3);
 9268   format %{ "smull  $dst, $src1, $src2" %}
 9269 
 9270   ins_encode %{
 9271     __ smull(as_Register($dst$$reg),
 9272              as_Register($src1$$reg),
 9273              as_Register($src2$$reg));
 9274   %}
 9275 
 9276   ins_pipe(imul_reg_reg);
 9277 %}
 9278 
 9279 // Long Multiply
 9280 
 9281 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9282   match(Set dst (MulL src1 src2));
 9283 
 9284   ins_cost(INSN_COST * 5);
 9285   format %{ "mul  $dst, $src1, $src2" %}
 9286 
 9287   ins_encode %{
 9288     __ mul(as_Register($dst$$reg),
 9289            as_Register($src1$$reg),
 9290            as_Register($src2$$reg));
 9291   %}
 9292 
 9293   ins_pipe(lmul_reg_reg);
 9294 %}
 9295 
 9296 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
 9297 %{
 9298   match(Set dst (MulHiL src1 src2));
 9299 
 9300   ins_cost(INSN_COST * 7);
 9301   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
 9302 
 9303   ins_encode %{
 9304     __ smulh(as_Register($dst$$reg),
 9305              as_Register($src1$$reg),
 9306              as_Register($src2$$reg));
 9307   %}
 9308 
 9309   ins_pipe(lmul_reg_reg);
 9310 %}
 9311 
 9312 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
 9313 %{
 9314   match(Set dst (UMulHiL src1 src2));
 9315 
 9316   ins_cost(INSN_COST * 7);
 9317   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
 9318 
 9319   ins_encode %{
 9320     __ umulh(as_Register($dst$$reg),
 9321              as_Register($src1$$reg),
 9322              as_Register($src2$$reg));
 9323   %}
 9324 
 9325   ins_pipe(lmul_reg_reg);
 9326 %}
 9327 
 9328 // Combined Integer Multiply & Add/Sub
 9329 
 9330 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
 9331   match(Set dst (AddI src3 (MulI src1 src2)));
 9332 
 9333   ins_cost(INSN_COST * 3);
 9334   format %{ "madd  $dst, $src1, $src2, $src3" %}
 9335 
 9336   ins_encode %{
 9337     __ maddw(as_Register($dst$$reg),
 9338              as_Register($src1$$reg),
 9339              as_Register($src2$$reg),
 9340              as_Register($src3$$reg));
 9341   %}
 9342 
 9343   ins_pipe(imac_reg_reg);
 9344 %}
 9345 
 9346 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
 9347   match(Set dst (SubI src3 (MulI src1 src2)));
 9348 
 9349   ins_cost(INSN_COST * 3);
 9350   format %{ "msub  $dst, $src1, $src2, $src3" %}
 9351 
 9352   ins_encode %{
 9353     __ msubw(as_Register($dst$$reg),
 9354              as_Register($src1$$reg),
 9355              as_Register($src2$$reg),
 9356              as_Register($src3$$reg));
 9357   %}
 9358 
 9359   ins_pipe(imac_reg_reg);
 9360 %}
 9361 
 9362 // Combined Integer Multiply & Neg
 9363 
 9364 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
 9365   match(Set dst (MulI (SubI zero src1) src2));
 9366 
 9367   ins_cost(INSN_COST * 3);
 9368   format %{ "mneg  $dst, $src1, $src2" %}
 9369 
 9370   ins_encode %{
 9371     __ mnegw(as_Register($dst$$reg),
 9372              as_Register($src1$$reg),
 9373              as_Register($src2$$reg));
 9374   %}
 9375 
 9376   ins_pipe(imac_reg_reg);
 9377 %}
 9378 
 9379 // Combined Long Multiply & Add/Sub
 9380 
 9381 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
 9382   match(Set dst (AddL src3 (MulL src1 src2)));
 9383 
 9384   ins_cost(INSN_COST * 5);
 9385   format %{ "madd  $dst, $src1, $src2, $src3" %}
 9386 
 9387   ins_encode %{
 9388     __ madd(as_Register($dst$$reg),
 9389             as_Register($src1$$reg),
 9390             as_Register($src2$$reg),
 9391             as_Register($src3$$reg));
 9392   %}
 9393 
 9394   ins_pipe(lmac_reg_reg);
 9395 %}
 9396 
 9397 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
 9398   match(Set dst (SubL src3 (MulL src1 src2)));
 9399 
 9400   ins_cost(INSN_COST * 5);
 9401   format %{ "msub  $dst, $src1, $src2, $src3" %}
 9402 
 9403   ins_encode %{
 9404     __ msub(as_Register($dst$$reg),
 9405             as_Register($src1$$reg),
 9406             as_Register($src2$$reg),
 9407             as_Register($src3$$reg));
 9408   %}
 9409 
 9410   ins_pipe(lmac_reg_reg);
 9411 %}
 9412 
 9413 // Combined Long Multiply & Neg
 9414 
 9415 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
 9416   match(Set dst (MulL (SubL zero src1) src2));
 9417 
 9418   ins_cost(INSN_COST * 5);
 9419   format %{ "mneg  $dst, $src1, $src2" %}
 9420 
 9421   ins_encode %{
 9422     __ mneg(as_Register($dst$$reg),
 9423             as_Register($src1$$reg),
 9424             as_Register($src2$$reg));
 9425   %}
 9426 
 9427   ins_pipe(lmac_reg_reg);
 9428 %}
 9429 
 9430 // Combine Integer Signed Multiply & Add/Sub/Neg Long
 9431 
 9432 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
 9433   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
 9434 
 9435   ins_cost(INSN_COST * 3);
 9436   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
 9437 
 9438   ins_encode %{
 9439     __ smaddl(as_Register($dst$$reg),
 9440               as_Register($src1$$reg),
 9441               as_Register($src2$$reg),
 9442               as_Register($src3$$reg));
 9443   %}
 9444 
 9445   ins_pipe(imac_reg_reg);
 9446 %}
 9447 
 9448 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
 9449   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
 9450 
 9451   ins_cost(INSN_COST * 3);
 9452   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
 9453 
 9454   ins_encode %{
 9455     __ smsubl(as_Register($dst$$reg),
 9456               as_Register($src1$$reg),
 9457               as_Register($src2$$reg),
 9458               as_Register($src3$$reg));
 9459   %}
 9460 
 9461   ins_pipe(imac_reg_reg);
 9462 %}
 9463 
 9464 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
 9465   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
 9466 
 9467   ins_cost(INSN_COST * 3);
 9468   format %{ "smnegl  $dst, $src1, $src2" %}
 9469 
 9470   ins_encode %{
 9471     __ smnegl(as_Register($dst$$reg),
 9472               as_Register($src1$$reg),
 9473               as_Register($src2$$reg));
 9474   %}
 9475 
 9476   ins_pipe(imac_reg_reg);
 9477 %}
 9478 
 9479 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
 9480 
 9481 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
 9482   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
 9483 
 9484   ins_cost(INSN_COST * 5);
 9485   format %{ "mulw  rscratch1, $src1, $src2\n\t"
 9486             "maddw $dst, $src3, $src4, rscratch1" %}
 9487 
 9488   ins_encode %{
 9489     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
 9490     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
 9491 
 9492   ins_pipe(imac_reg_reg);
 9493 %}
 9494 
 9495 // Integer Divide
 9496 
 9497 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9498   match(Set dst (DivI src1 src2));
 9499 
 9500   ins_cost(INSN_COST * 19);
 9501   format %{ "sdivw  $dst, $src1, $src2" %}
 9502 
 9503   ins_encode(aarch64_enc_divw(dst, src1, src2));
 9504   ins_pipe(idiv_reg_reg);
 9505 %}
 9506 
 9507 // Long Divide
 9508 
 9509 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9510   match(Set dst (DivL src1 src2));
 9511 
 9512   ins_cost(INSN_COST * 35);
 9513   format %{ "sdiv   $dst, $src1, $src2" %}
 9514 
 9515   ins_encode(aarch64_enc_div(dst, src1, src2));
 9516   ins_pipe(ldiv_reg_reg);
 9517 %}
 9518 
 9519 // Integer Remainder
 9520 
 9521 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9522   match(Set dst (ModI src1 src2));
 9523 
 9524   ins_cost(INSN_COST * 22);
 9525   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
 9526             "msubw  $dst, rscratch1, $src2, $src1" %}
 9527 
 9528   ins_encode(aarch64_enc_modw(dst, src1, src2));
 9529   ins_pipe(idiv_reg_reg);
 9530 %}
 9531 
 9532 // Long Remainder
 9533 
 9534 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9535   match(Set dst (ModL src1 src2));
 9536 
 9537   ins_cost(INSN_COST * 38);
 9538   format %{ "sdiv   rscratch1, $src1, $src2\n"
 9539             "msub   $dst, rscratch1, $src2, $src1" %}
 9540 
 9541   ins_encode(aarch64_enc_mod(dst, src1, src2));
 9542   ins_pipe(ldiv_reg_reg);
 9543 %}
 9544 
 9545 // Unsigned Integer Divide
 9546 
 9547 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9548   match(Set dst (UDivI src1 src2));
 9549 
 9550   ins_cost(INSN_COST * 19);
 9551   format %{ "udivw  $dst, $src1, $src2" %}
 9552 
 9553   ins_encode %{
 9554     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
 9555   %}
 9556 
 9557   ins_pipe(idiv_reg_reg);
 9558 %}
 9559 
 9560 //  Unsigned Long Divide
 9561 
 9562 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9563   match(Set dst (UDivL src1 src2));
 9564 
 9565   ins_cost(INSN_COST * 35);
 9566   format %{ "udiv   $dst, $src1, $src2" %}
 9567 
 9568   ins_encode %{
 9569     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
 9570   %}
 9571 
 9572   ins_pipe(ldiv_reg_reg);
 9573 %}
 9574 
 9575 // Unsigned Integer Remainder
 9576 
 9577 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9578   match(Set dst (UModI src1 src2));
 9579 
 9580   ins_cost(INSN_COST * 22);
 9581   format %{ "udivw  rscratch1, $src1, $src2\n\t"
 9582             "msubw  $dst, rscratch1, $src2, $src1" %}
 9583 
 9584   ins_encode %{
 9585     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
 9586     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
 9587   %}
 9588 
 9589   ins_pipe(idiv_reg_reg);
 9590 %}
 9591 
 9592 // Unsigned Long Remainder
 9593 
 9594 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9595   match(Set dst (UModL src1 src2));
 9596 
 9597   ins_cost(INSN_COST * 38);
 9598   format %{ "udiv   rscratch1, $src1, $src2\n"
 9599             "msub   $dst, rscratch1, $src2, $src1" %}
 9600 
 9601   ins_encode %{
 9602     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
 9603     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
 9604   %}
 9605 
 9606   ins_pipe(ldiv_reg_reg);
 9607 %}
 9608 
 9609 // Integer Shifts
 9610 
 9611 // Shift Left Register
 9612 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9613   match(Set dst (LShiftI src1 src2));
 9614 
 9615   ins_cost(INSN_COST * 2);
 9616   format %{ "lslvw  $dst, $src1, $src2" %}
 9617 
 9618   ins_encode %{
 9619     __ lslvw(as_Register($dst$$reg),
 9620              as_Register($src1$$reg),
 9621              as_Register($src2$$reg));
 9622   %}
 9623 
 9624   ins_pipe(ialu_reg_reg_vshift);
 9625 %}
 9626 
 9627 // Shift Left Immediate
 9628 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 9629   match(Set dst (LShiftI src1 src2));
 9630 
 9631   ins_cost(INSN_COST);
 9632   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
 9633 
 9634   ins_encode %{
 9635     __ lslw(as_Register($dst$$reg),
 9636             as_Register($src1$$reg),
 9637             $src2$$constant & 0x1f);
 9638   %}
 9639 
 9640   ins_pipe(ialu_reg_shift);
 9641 %}
 9642 
 9643 // Shift Right Logical Register
 9644 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9645   match(Set dst (URShiftI src1 src2));
 9646 
 9647   ins_cost(INSN_COST * 2);
 9648   format %{ "lsrvw  $dst, $src1, $src2" %}
 9649 
 9650   ins_encode %{
 9651     __ lsrvw(as_Register($dst$$reg),
 9652              as_Register($src1$$reg),
 9653              as_Register($src2$$reg));
 9654   %}
 9655 
 9656   ins_pipe(ialu_reg_reg_vshift);
 9657 %}
 9658 
 9659 // Shift Right Logical Immediate
 9660 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 9661   match(Set dst (URShiftI src1 src2));
 9662 
 9663   ins_cost(INSN_COST);
 9664   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
 9665 
 9666   ins_encode %{
 9667     __ lsrw(as_Register($dst$$reg),
 9668             as_Register($src1$$reg),
 9669             $src2$$constant & 0x1f);
 9670   %}
 9671 
 9672   ins_pipe(ialu_reg_shift);
 9673 %}
 9674 
 9675 // Shift Right Arithmetic Register
 9676 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9677   match(Set dst (RShiftI src1 src2));
 9678 
 9679   ins_cost(INSN_COST * 2);
 9680   format %{ "asrvw  $dst, $src1, $src2" %}
 9681 
 9682   ins_encode %{
 9683     __ asrvw(as_Register($dst$$reg),
 9684              as_Register($src1$$reg),
 9685              as_Register($src2$$reg));
 9686   %}
 9687 
 9688   ins_pipe(ialu_reg_reg_vshift);
 9689 %}
 9690 
 9691 // Shift Right Arithmetic Immediate
 9692 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 9693   match(Set dst (RShiftI src1 src2));
 9694 
 9695   ins_cost(INSN_COST);
 9696   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
 9697 
 9698   ins_encode %{
 9699     __ asrw(as_Register($dst$$reg),
 9700             as_Register($src1$$reg),
 9701             $src2$$constant & 0x1f);
 9702   %}
 9703 
 9704   ins_pipe(ialu_reg_shift);
 9705 %}
 9706 
 9707 // Combined Int Mask and Right Shift (using UBFM)
 9708 // TODO
 9709 
 9710 // Long Shifts
 9711 
 9712 // Shift Left Register
 9713 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 9714   match(Set dst (LShiftL src1 src2));
 9715 
 9716   ins_cost(INSN_COST * 2);
 9717   format %{ "lslv  $dst, $src1, $src2" %}
 9718 
 9719   ins_encode %{
 9720     __ lslv(as_Register($dst$$reg),
 9721             as_Register($src1$$reg),
 9722             as_Register($src2$$reg));
 9723   %}
 9724 
 9725   ins_pipe(ialu_reg_reg_vshift);
 9726 %}
 9727 
 9728 // Shift Left Immediate
 9729 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 9730   match(Set dst (LShiftL src1 src2));
 9731 
 9732   ins_cost(INSN_COST);
 9733   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
 9734 
 9735   ins_encode %{
 9736     __ lsl(as_Register($dst$$reg),
 9737             as_Register($src1$$reg),
 9738             $src2$$constant & 0x3f);
 9739   %}
 9740 
 9741   ins_pipe(ialu_reg_shift);
 9742 %}
 9743 
 9744 // Shift Right Logical Register
 9745 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 9746   match(Set dst (URShiftL src1 src2));
 9747 
 9748   ins_cost(INSN_COST * 2);
 9749   format %{ "lsrv  $dst, $src1, $src2" %}
 9750 
 9751   ins_encode %{
 9752     __ lsrv(as_Register($dst$$reg),
 9753             as_Register($src1$$reg),
 9754             as_Register($src2$$reg));
 9755   %}
 9756 
 9757   ins_pipe(ialu_reg_reg_vshift);
 9758 %}
 9759 
 9760 // Shift Right Logical Immediate
 9761 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 9762   match(Set dst (URShiftL src1 src2));
 9763 
 9764   ins_cost(INSN_COST);
 9765   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
 9766 
 9767   ins_encode %{
 9768     __ lsr(as_Register($dst$$reg),
 9769            as_Register($src1$$reg),
 9770            $src2$$constant & 0x3f);
 9771   %}
 9772 
 9773   ins_pipe(ialu_reg_shift);
 9774 %}
 9775 
 9776 // A special-case pattern for card table stores.
 9777 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
 9778   match(Set dst (URShiftL (CastP2X src1) src2));
 9779 
 9780   ins_cost(INSN_COST);
 9781   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
 9782 
 9783   ins_encode %{
 9784     __ lsr(as_Register($dst$$reg),
 9785            as_Register($src1$$reg),
 9786            $src2$$constant & 0x3f);
 9787   %}
 9788 
 9789   ins_pipe(ialu_reg_shift);
 9790 %}
 9791 
 9792 // Shift Right Arithmetic Register
 9793 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 9794   match(Set dst (RShiftL src1 src2));
 9795 
 9796   ins_cost(INSN_COST * 2);
 9797   format %{ "asrv  $dst, $src1, $src2" %}
 9798 
 9799   ins_encode %{
 9800     __ asrv(as_Register($dst$$reg),
 9801             as_Register($src1$$reg),
 9802             as_Register($src2$$reg));
 9803   %}
 9804 
 9805   ins_pipe(ialu_reg_reg_vshift);
 9806 %}
 9807 
 9808 // Shift Right Arithmetic Immediate
 9809 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 9810   match(Set dst (RShiftL src1 src2));
 9811 
 9812   ins_cost(INSN_COST);
 9813   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
 9814 
 9815   ins_encode %{
 9816     __ asr(as_Register($dst$$reg),
 9817            as_Register($src1$$reg),
 9818            $src2$$constant & 0x3f);
 9819   %}
 9820 
 9821   ins_pipe(ialu_reg_shift);
 9822 %}
 9823 
 9824 // BEGIN This section of the file is automatically generated. Do not edit --------------
 9825 // This section is generated from aarch64_ad.m4
 9826 
 9827 // This pattern is automatically generated from aarch64_ad.m4
 9828 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9829 instruct regL_not_reg(iRegLNoSp dst,
 9830                          iRegL src1, immL_M1 m1,
 9831                          rFlagsReg cr) %{
 9832   match(Set dst (XorL src1 m1));
 9833   ins_cost(INSN_COST);
 9834   format %{ "eon  $dst, $src1, zr" %}
 9835 
 9836   ins_encode %{
 9837     __ eon(as_Register($dst$$reg),
 9838               as_Register($src1$$reg),
 9839               zr,
 9840               Assembler::LSL, 0);
 9841   %}
 9842 
 9843   ins_pipe(ialu_reg);
 9844 %}
 9845 
 9846 // This pattern is automatically generated from aarch64_ad.m4
 9847 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9848 instruct regI_not_reg(iRegINoSp dst,
 9849                          iRegIorL2I src1, immI_M1 m1,
 9850                          rFlagsReg cr) %{
 9851   match(Set dst (XorI src1 m1));
 9852   ins_cost(INSN_COST);
 9853   format %{ "eonw  $dst, $src1, zr" %}
 9854 
 9855   ins_encode %{
 9856     __ eonw(as_Register($dst$$reg),
 9857               as_Register($src1$$reg),
 9858               zr,
 9859               Assembler::LSL, 0);
 9860   %}
 9861 
 9862   ins_pipe(ialu_reg);
 9863 %}
 9864 
 9865 // This pattern is automatically generated from aarch64_ad.m4
 9866 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9867 instruct NegI_reg_URShift_reg(iRegINoSp dst,
 9868                               immI0 zero, iRegIorL2I src1, immI src2) %{
 9869   match(Set dst (SubI zero (URShiftI src1 src2)));
 9870 
 9871   ins_cost(1.9 * INSN_COST);
 9872   format %{ "negw  $dst, $src1, LSR $src2" %}
 9873 
 9874   ins_encode %{
 9875     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
 9876             Assembler::LSR, $src2$$constant & 0x1f);
 9877   %}
 9878 
 9879   ins_pipe(ialu_reg_shift);
 9880 %}
 9881 
 9882 // This pattern is automatically generated from aarch64_ad.m4
 9883 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9884 instruct NegI_reg_RShift_reg(iRegINoSp dst,
 9885                               immI0 zero, iRegIorL2I src1, immI src2) %{
 9886   match(Set dst (SubI zero (RShiftI src1 src2)));
 9887 
 9888   ins_cost(1.9 * INSN_COST);
 9889   format %{ "negw  $dst, $src1, ASR $src2" %}
 9890 
 9891   ins_encode %{
 9892     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
 9893             Assembler::ASR, $src2$$constant & 0x1f);
 9894   %}
 9895 
 9896   ins_pipe(ialu_reg_shift);
 9897 %}
 9898 
 9899 // This pattern is automatically generated from aarch64_ad.m4
 9900 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9901 instruct NegI_reg_LShift_reg(iRegINoSp dst,
 9902                               immI0 zero, iRegIorL2I src1, immI src2) %{
 9903   match(Set dst (SubI zero (LShiftI src1 src2)));
 9904 
 9905   ins_cost(1.9 * INSN_COST);
 9906   format %{ "negw  $dst, $src1, LSL $src2" %}
 9907 
 9908   ins_encode %{
 9909     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
 9910             Assembler::LSL, $src2$$constant & 0x1f);
 9911   %}
 9912 
 9913   ins_pipe(ialu_reg_shift);
 9914 %}
 9915 
 9916 // This pattern is automatically generated from aarch64_ad.m4
 9917 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9918 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
 9919                               immL0 zero, iRegL src1, immI src2) %{
 9920   match(Set dst (SubL zero (URShiftL src1 src2)));
 9921 
 9922   ins_cost(1.9 * INSN_COST);
 9923   format %{ "neg  $dst, $src1, LSR $src2" %}
 9924 
 9925   ins_encode %{
 9926     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
 9927             Assembler::LSR, $src2$$constant & 0x3f);
 9928   %}
 9929 
 9930   ins_pipe(ialu_reg_shift);
 9931 %}
 9932 
 9933 // This pattern is automatically generated from aarch64_ad.m4
 9934 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9935 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
 9936                               immL0 zero, iRegL src1, immI src2) %{
 9937   match(Set dst (SubL zero (RShiftL src1 src2)));
 9938 
 9939   ins_cost(1.9 * INSN_COST);
 9940   format %{ "neg  $dst, $src1, ASR $src2" %}
 9941 
 9942   ins_encode %{
 9943     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
 9944             Assembler::ASR, $src2$$constant & 0x3f);
 9945   %}
 9946 
 9947   ins_pipe(ialu_reg_shift);
 9948 %}
 9949 
 9950 // This pattern is automatically generated from aarch64_ad.m4
 9951 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9952 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
 9953                               immL0 zero, iRegL src1, immI src2) %{
 9954   match(Set dst (SubL zero (LShiftL src1 src2)));
 9955 
 9956   ins_cost(1.9 * INSN_COST);
 9957   format %{ "neg  $dst, $src1, LSL $src2" %}
 9958 
 9959   ins_encode %{
 9960     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
 9961             Assembler::LSL, $src2$$constant & 0x3f);
 9962   %}
 9963 
 9964   ins_pipe(ialu_reg_shift);
 9965 %}
 9966 
 9967 // This pattern is automatically generated from aarch64_ad.m4
 9968 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9969 instruct AndI_reg_not_reg(iRegINoSp dst,
 9970                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
 9971   match(Set dst (AndI src1 (XorI src2 m1)));
 9972   ins_cost(INSN_COST);
 9973   format %{ "bicw  $dst, $src1, $src2" %}
 9974 
 9975   ins_encode %{
 9976     __ bicw(as_Register($dst$$reg),
 9977               as_Register($src1$$reg),
 9978               as_Register($src2$$reg),
 9979               Assembler::LSL, 0);
 9980   %}
 9981 
 9982   ins_pipe(ialu_reg_reg);
 9983 %}
 9984 
 9985 // This pattern is automatically generated from aarch64_ad.m4
 9986 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 9987 instruct AndL_reg_not_reg(iRegLNoSp dst,
 9988                          iRegL src1, iRegL src2, immL_M1 m1) %{
 9989   match(Set dst (AndL src1 (XorL src2 m1)));
 9990   ins_cost(INSN_COST);
 9991   format %{ "bic  $dst, $src1, $src2" %}
 9992 
 9993   ins_encode %{
 9994     __ bic(as_Register($dst$$reg),
 9995               as_Register($src1$$reg),
 9996               as_Register($src2$$reg),
 9997               Assembler::LSL, 0);
 9998   %}
 9999 
10000   ins_pipe(ialu_reg_reg);
10001 %}
10002 
10003 // This pattern is automatically generated from aarch64_ad.m4
10004 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10005 instruct OrI_reg_not_reg(iRegINoSp dst,
10006                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10007   match(Set dst (OrI src1 (XorI src2 m1)));
10008   ins_cost(INSN_COST);
10009   format %{ "ornw  $dst, $src1, $src2" %}
10010 
10011   ins_encode %{
10012     __ ornw(as_Register($dst$$reg),
10013               as_Register($src1$$reg),
10014               as_Register($src2$$reg),
10015               Assembler::LSL, 0);
10016   %}
10017 
10018   ins_pipe(ialu_reg_reg);
10019 %}
10020 
10021 // This pattern is automatically generated from aarch64_ad.m4
10022 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10023 instruct OrL_reg_not_reg(iRegLNoSp dst,
10024                          iRegL src1, iRegL src2, immL_M1 m1) %{
10025   match(Set dst (OrL src1 (XorL src2 m1)));
10026   ins_cost(INSN_COST);
10027   format %{ "orn  $dst, $src1, $src2" %}
10028 
10029   ins_encode %{
10030     __ orn(as_Register($dst$$reg),
10031               as_Register($src1$$reg),
10032               as_Register($src2$$reg),
10033               Assembler::LSL, 0);
10034   %}
10035 
10036   ins_pipe(ialu_reg_reg);
10037 %}
10038 
10039 // This pattern is automatically generated from aarch64_ad.m4
10040 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10041 instruct XorI_reg_not_reg(iRegINoSp dst,
10042                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10043   match(Set dst (XorI m1 (XorI src2 src1)));
10044   ins_cost(INSN_COST);
10045   format %{ "eonw  $dst, $src1, $src2" %}
10046 
10047   ins_encode %{
10048     __ eonw(as_Register($dst$$reg),
10049               as_Register($src1$$reg),
10050               as_Register($src2$$reg),
10051               Assembler::LSL, 0);
10052   %}
10053 
10054   ins_pipe(ialu_reg_reg);
10055 %}
10056 
10057 // This pattern is automatically generated from aarch64_ad.m4
10058 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10059 instruct XorL_reg_not_reg(iRegLNoSp dst,
10060                          iRegL src1, iRegL src2, immL_M1 m1) %{
10061   match(Set dst (XorL m1 (XorL src2 src1)));
10062   ins_cost(INSN_COST);
10063   format %{ "eon  $dst, $src1, $src2" %}
10064 
10065   ins_encode %{
10066     __ eon(as_Register($dst$$reg),
10067               as_Register($src1$$reg),
10068               as_Register($src2$$reg),
10069               Assembler::LSL, 0);
10070   %}
10071 
10072   ins_pipe(ialu_reg_reg);
10073 %}
10074 
10075 // This pattern is automatically generated from aarch64_ad.m4
10076 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10077 // val & (-1 ^ (val >>> shift)) ==> bicw
10078 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10079                          iRegIorL2I src1, iRegIorL2I src2,
10080                          immI src3, immI_M1 src4) %{
10081   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10082   ins_cost(1.9 * INSN_COST);
10083   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10084 
10085   ins_encode %{
10086     __ bicw(as_Register($dst$$reg),
10087               as_Register($src1$$reg),
10088               as_Register($src2$$reg),
10089               Assembler::LSR,
10090               $src3$$constant & 0x1f);
10091   %}
10092 
10093   ins_pipe(ialu_reg_reg_shift);
10094 %}
10095 
10096 // This pattern is automatically generated from aarch64_ad.m4
10097 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10098 // val & (-1 ^ (val >>> shift)) ==> bic
10099 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10100                          iRegL src1, iRegL src2,
10101                          immI src3, immL_M1 src4) %{
10102   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10103   ins_cost(1.9 * INSN_COST);
10104   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10105 
10106   ins_encode %{
10107     __ bic(as_Register($dst$$reg),
10108               as_Register($src1$$reg),
10109               as_Register($src2$$reg),
10110               Assembler::LSR,
10111               $src3$$constant & 0x3f);
10112   %}
10113 
10114   ins_pipe(ialu_reg_reg_shift);
10115 %}
10116 
10117 // This pattern is automatically generated from aarch64_ad.m4
10118 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10119 // val & (-1 ^ (val >> shift)) ==> bicw
10120 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10121                          iRegIorL2I src1, iRegIorL2I src2,
10122                          immI src3, immI_M1 src4) %{
10123   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10124   ins_cost(1.9 * INSN_COST);
10125   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10126 
10127   ins_encode %{
10128     __ bicw(as_Register($dst$$reg),
10129               as_Register($src1$$reg),
10130               as_Register($src2$$reg),
10131               Assembler::ASR,
10132               $src3$$constant & 0x1f);
10133   %}
10134 
10135   ins_pipe(ialu_reg_reg_shift);
10136 %}
10137 
10138 // This pattern is automatically generated from aarch64_ad.m4
10139 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10140 // val & (-1 ^ (val >> shift)) ==> bic
10141 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10142                          iRegL src1, iRegL src2,
10143                          immI src3, immL_M1 src4) %{
10144   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10145   ins_cost(1.9 * INSN_COST);
10146   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10147 
10148   ins_encode %{
10149     __ bic(as_Register($dst$$reg),
10150               as_Register($src1$$reg),
10151               as_Register($src2$$reg),
10152               Assembler::ASR,
10153               $src3$$constant & 0x3f);
10154   %}
10155 
10156   ins_pipe(ialu_reg_reg_shift);
10157 %}
10158 
10159 // This pattern is automatically generated from aarch64_ad.m4
10160 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10161 // val & (-1 ^ (val ror shift)) ==> bicw
10162 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
10163                          iRegIorL2I src1, iRegIorL2I src2,
10164                          immI src3, immI_M1 src4) %{
10165   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
10166   ins_cost(1.9 * INSN_COST);
10167   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
10168 
10169   ins_encode %{
10170     __ bicw(as_Register($dst$$reg),
10171               as_Register($src1$$reg),
10172               as_Register($src2$$reg),
10173               Assembler::ROR,
10174               $src3$$constant & 0x1f);
10175   %}
10176 
10177   ins_pipe(ialu_reg_reg_shift);
10178 %}
10179 
10180 // This pattern is automatically generated from aarch64_ad.m4
10181 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10182 // val & (-1 ^ (val ror shift)) ==> bic
10183 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
10184                          iRegL src1, iRegL src2,
10185                          immI src3, immL_M1 src4) %{
10186   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
10187   ins_cost(1.9 * INSN_COST);
10188   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
10189 
10190   ins_encode %{
10191     __ bic(as_Register($dst$$reg),
10192               as_Register($src1$$reg),
10193               as_Register($src2$$reg),
10194               Assembler::ROR,
10195               $src3$$constant & 0x3f);
10196   %}
10197 
10198   ins_pipe(ialu_reg_reg_shift);
10199 %}
10200 
10201 // This pattern is automatically generated from aarch64_ad.m4
10202 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10203 // val & (-1 ^ (val << shift)) ==> bicw
10204 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10205                          iRegIorL2I src1, iRegIorL2I src2,
10206                          immI src3, immI_M1 src4) %{
10207   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10208   ins_cost(1.9 * INSN_COST);
10209   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10210 
10211   ins_encode %{
10212     __ bicw(as_Register($dst$$reg),
10213               as_Register($src1$$reg),
10214               as_Register($src2$$reg),
10215               Assembler::LSL,
10216               $src3$$constant & 0x1f);
10217   %}
10218 
10219   ins_pipe(ialu_reg_reg_shift);
10220 %}
10221 
10222 // This pattern is automatically generated from aarch64_ad.m4
10223 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10224 // val & (-1 ^ (val << shift)) ==> bic
10225 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10226                          iRegL src1, iRegL src2,
10227                          immI src3, immL_M1 src4) %{
10228   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10229   ins_cost(1.9 * INSN_COST);
10230   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10231 
10232   ins_encode %{
10233     __ bic(as_Register($dst$$reg),
10234               as_Register($src1$$reg),
10235               as_Register($src2$$reg),
10236               Assembler::LSL,
10237               $src3$$constant & 0x3f);
10238   %}
10239 
10240   ins_pipe(ialu_reg_reg_shift);
10241 %}
10242 
10243 // This pattern is automatically generated from aarch64_ad.m4
10244 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10245 // val ^ (-1 ^ (val >>> shift)) ==> eonw
10246 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10247                          iRegIorL2I src1, iRegIorL2I src2,
10248                          immI src3, immI_M1 src4) %{
10249   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10250   ins_cost(1.9 * INSN_COST);
10251   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10252 
10253   ins_encode %{
10254     __ eonw(as_Register($dst$$reg),
10255               as_Register($src1$$reg),
10256               as_Register($src2$$reg),
10257               Assembler::LSR,
10258               $src3$$constant & 0x1f);
10259   %}
10260 
10261   ins_pipe(ialu_reg_reg_shift);
10262 %}
10263 
10264 // This pattern is automatically generated from aarch64_ad.m4
10265 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10266 // val ^ (-1 ^ (val >>> shift)) ==> eon
10267 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10268                          iRegL src1, iRegL src2,
10269                          immI src3, immL_M1 src4) %{
10270   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10271   ins_cost(1.9 * INSN_COST);
10272   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10273 
10274   ins_encode %{
10275     __ eon(as_Register($dst$$reg),
10276               as_Register($src1$$reg),
10277               as_Register($src2$$reg),
10278               Assembler::LSR,
10279               $src3$$constant & 0x3f);
10280   %}
10281 
10282   ins_pipe(ialu_reg_reg_shift);
10283 %}
10284 
10285 // This pattern is automatically generated from aarch64_ad.m4
10286 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10287 // val ^ (-1 ^ (val >> shift)) ==> eonw
10288 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10289                          iRegIorL2I src1, iRegIorL2I src2,
10290                          immI src3, immI_M1 src4) %{
10291   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10292   ins_cost(1.9 * INSN_COST);
10293   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10294 
10295   ins_encode %{
10296     __ eonw(as_Register($dst$$reg),
10297               as_Register($src1$$reg),
10298               as_Register($src2$$reg),
10299               Assembler::ASR,
10300               $src3$$constant & 0x1f);
10301   %}
10302 
10303   ins_pipe(ialu_reg_reg_shift);
10304 %}
10305 
10306 // This pattern is automatically generated from aarch64_ad.m4
10307 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10308 // val ^ (-1 ^ (val >> shift)) ==> eon
10309 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10310                          iRegL src1, iRegL src2,
10311                          immI src3, immL_M1 src4) %{
10312   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10313   ins_cost(1.9 * INSN_COST);
10314   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10315 
10316   ins_encode %{
10317     __ eon(as_Register($dst$$reg),
10318               as_Register($src1$$reg),
10319               as_Register($src2$$reg),
10320               Assembler::ASR,
10321               $src3$$constant & 0x3f);
10322   %}
10323 
10324   ins_pipe(ialu_reg_reg_shift);
10325 %}
10326 
10327 // This pattern is automatically generated from aarch64_ad.m4
10328 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10329 // val ^ (-1 ^ (val ror shift)) ==> eonw
10330 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
10331                          iRegIorL2I src1, iRegIorL2I src2,
10332                          immI src3, immI_M1 src4) %{
10333   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
10334   ins_cost(1.9 * INSN_COST);
10335   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
10336 
10337   ins_encode %{
10338     __ eonw(as_Register($dst$$reg),
10339               as_Register($src1$$reg),
10340               as_Register($src2$$reg),
10341               Assembler::ROR,
10342               $src3$$constant & 0x1f);
10343   %}
10344 
10345   ins_pipe(ialu_reg_reg_shift);
10346 %}
10347 
10348 // This pattern is automatically generated from aarch64_ad.m4
10349 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10350 // val ^ (-1 ^ (val ror shift)) ==> eon
10351 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
10352                          iRegL src1, iRegL src2,
10353                          immI src3, immL_M1 src4) %{
10354   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
10355   ins_cost(1.9 * INSN_COST);
10356   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
10357 
10358   ins_encode %{
10359     __ eon(as_Register($dst$$reg),
10360               as_Register($src1$$reg),
10361               as_Register($src2$$reg),
10362               Assembler::ROR,
10363               $src3$$constant & 0x3f);
10364   %}
10365 
10366   ins_pipe(ialu_reg_reg_shift);
10367 %}
10368 
10369 // This pattern is automatically generated from aarch64_ad.m4
10370 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10371 // val ^ (-1 ^ (val << shift)) ==> eonw
10372 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10373                          iRegIorL2I src1, iRegIorL2I src2,
10374                          immI src3, immI_M1 src4) %{
10375   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10376   ins_cost(1.9 * INSN_COST);
10377   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10378 
10379   ins_encode %{
10380     __ eonw(as_Register($dst$$reg),
10381               as_Register($src1$$reg),
10382               as_Register($src2$$reg),
10383               Assembler::LSL,
10384               $src3$$constant & 0x1f);
10385   %}
10386 
10387   ins_pipe(ialu_reg_reg_shift);
10388 %}
10389 
10390 // This pattern is automatically generated from aarch64_ad.m4
10391 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10392 // val ^ (-1 ^ (val << shift)) ==> eon
10393 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10394                          iRegL src1, iRegL src2,
10395                          immI src3, immL_M1 src4) %{
10396   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10397   ins_cost(1.9 * INSN_COST);
10398   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10399 
10400   ins_encode %{
10401     __ eon(as_Register($dst$$reg),
10402               as_Register($src1$$reg),
10403               as_Register($src2$$reg),
10404               Assembler::LSL,
10405               $src3$$constant & 0x3f);
10406   %}
10407 
10408   ins_pipe(ialu_reg_reg_shift);
10409 %}
10410 
10411 // This pattern is automatically generated from aarch64_ad.m4
10412 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10413 // val | (-1 ^ (val >>> shift)) ==> ornw
10414 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10415                          iRegIorL2I src1, iRegIorL2I src2,
10416                          immI src3, immI_M1 src4) %{
10417   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10418   ins_cost(1.9 * INSN_COST);
10419   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10420 
10421   ins_encode %{
10422     __ ornw(as_Register($dst$$reg),
10423               as_Register($src1$$reg),
10424               as_Register($src2$$reg),
10425               Assembler::LSR,
10426               $src3$$constant & 0x1f);
10427   %}
10428 
10429   ins_pipe(ialu_reg_reg_shift);
10430 %}
10431 
10432 // This pattern is automatically generated from aarch64_ad.m4
10433 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10434 // val | (-1 ^ (val >>> shift)) ==> orn
10435 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10436                          iRegL src1, iRegL src2,
10437                          immI src3, immL_M1 src4) %{
10438   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10439   ins_cost(1.9 * INSN_COST);
10440   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10441 
10442   ins_encode %{
10443     __ orn(as_Register($dst$$reg),
10444               as_Register($src1$$reg),
10445               as_Register($src2$$reg),
10446               Assembler::LSR,
10447               $src3$$constant & 0x3f);
10448   %}
10449 
10450   ins_pipe(ialu_reg_reg_shift);
10451 %}
10452 
10453 // This pattern is automatically generated from aarch64_ad.m4
10454 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10455 // val | (-1 ^ (val >> shift)) ==> ornw
10456 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10457                          iRegIorL2I src1, iRegIorL2I src2,
10458                          immI src3, immI_M1 src4) %{
10459   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10460   ins_cost(1.9 * INSN_COST);
10461   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10462 
10463   ins_encode %{
10464     __ ornw(as_Register($dst$$reg),
10465               as_Register($src1$$reg),
10466               as_Register($src2$$reg),
10467               Assembler::ASR,
10468               $src3$$constant & 0x1f);
10469   %}
10470 
10471   ins_pipe(ialu_reg_reg_shift);
10472 %}
10473 
10474 // This pattern is automatically generated from aarch64_ad.m4
10475 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10476 // val | (-1 ^ (val >> shift)) ==> orn
10477 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10478                          iRegL src1, iRegL src2,
10479                          immI src3, immL_M1 src4) %{
10480   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10481   ins_cost(1.9 * INSN_COST);
10482   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10483 
10484   ins_encode %{
10485     __ orn(as_Register($dst$$reg),
10486               as_Register($src1$$reg),
10487               as_Register($src2$$reg),
10488               Assembler::ASR,
10489               $src3$$constant & 0x3f);
10490   %}
10491 
10492   ins_pipe(ialu_reg_reg_shift);
10493 %}
10494 
10495 // This pattern is automatically generated from aarch64_ad.m4
10496 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10497 // val | (-1 ^ (val ror shift)) ==> ornw
10498 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
10499                          iRegIorL2I src1, iRegIorL2I src2,
10500                          immI src3, immI_M1 src4) %{
10501   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
10502   ins_cost(1.9 * INSN_COST);
10503   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
10504 
10505   ins_encode %{
10506     __ ornw(as_Register($dst$$reg),
10507               as_Register($src1$$reg),
10508               as_Register($src2$$reg),
10509               Assembler::ROR,
10510               $src3$$constant & 0x1f);
10511   %}
10512 
10513   ins_pipe(ialu_reg_reg_shift);
10514 %}
10515 
10516 // This pattern is automatically generated from aarch64_ad.m4
10517 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10518 // val | (-1 ^ (val ror shift)) ==> orn
10519 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
10520                          iRegL src1, iRegL src2,
10521                          immI src3, immL_M1 src4) %{
10522   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
10523   ins_cost(1.9 * INSN_COST);
10524   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
10525 
10526   ins_encode %{
10527     __ orn(as_Register($dst$$reg),
10528               as_Register($src1$$reg),
10529               as_Register($src2$$reg),
10530               Assembler::ROR,
10531               $src3$$constant & 0x3f);
10532   %}
10533 
10534   ins_pipe(ialu_reg_reg_shift);
10535 %}
10536 
10537 // This pattern is automatically generated from aarch64_ad.m4
10538 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10539 // val | (-1 ^ (val << shift)) ==> ornw
10540 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10541                          iRegIorL2I src1, iRegIorL2I src2,
10542                          immI src3, immI_M1 src4) %{
10543   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10544   ins_cost(1.9 * INSN_COST);
10545   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10546 
10547   ins_encode %{
10548     __ ornw(as_Register($dst$$reg),
10549               as_Register($src1$$reg),
10550               as_Register($src2$$reg),
10551               Assembler::LSL,
10552               $src3$$constant & 0x1f);
10553   %}
10554 
10555   ins_pipe(ialu_reg_reg_shift);
10556 %}
10557 
10558 // This pattern is automatically generated from aarch64_ad.m4
10559 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10560 // val | (-1 ^ (val << shift)) ==> orn
10561 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10562                          iRegL src1, iRegL src2,
10563                          immI src3, immL_M1 src4) %{
10564   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10565   ins_cost(1.9 * INSN_COST);
10566   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10567 
10568   ins_encode %{
10569     __ orn(as_Register($dst$$reg),
10570               as_Register($src1$$reg),
10571               as_Register($src2$$reg),
10572               Assembler::LSL,
10573               $src3$$constant & 0x3f);
10574   %}
10575 
10576   ins_pipe(ialu_reg_reg_shift);
10577 %}
10578 
10579 // This pattern is automatically generated from aarch64_ad.m4
10580 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10581 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10582                          iRegIorL2I src1, iRegIorL2I src2,
10583                          immI src3) %{
10584   match(Set dst (AndI src1 (URShiftI src2 src3)));
10585 
10586   ins_cost(1.9 * INSN_COST);
10587   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10588 
10589   ins_encode %{
10590     __ andw(as_Register($dst$$reg),
10591               as_Register($src1$$reg),
10592               as_Register($src2$$reg),
10593               Assembler::LSR,
10594               $src3$$constant & 0x1f);
10595   %}
10596 
10597   ins_pipe(ialu_reg_reg_shift);
10598 %}
10599 
10600 // This pattern is automatically generated from aarch64_ad.m4
10601 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10602 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10603                          iRegL src1, iRegL src2,
10604                          immI src3) %{
10605   match(Set dst (AndL src1 (URShiftL src2 src3)));
10606 
10607   ins_cost(1.9 * INSN_COST);
10608   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10609 
10610   ins_encode %{
10611     __ andr(as_Register($dst$$reg),
10612               as_Register($src1$$reg),
10613               as_Register($src2$$reg),
10614               Assembler::LSR,
10615               $src3$$constant & 0x3f);
10616   %}
10617 
10618   ins_pipe(ialu_reg_reg_shift);
10619 %}
10620 
10621 // This pattern is automatically generated from aarch64_ad.m4
10622 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10623 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10624                          iRegIorL2I src1, iRegIorL2I src2,
10625                          immI src3) %{
10626   match(Set dst (AndI src1 (RShiftI src2 src3)));
10627 
10628   ins_cost(1.9 * INSN_COST);
10629   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10630 
10631   ins_encode %{
10632     __ andw(as_Register($dst$$reg),
10633               as_Register($src1$$reg),
10634               as_Register($src2$$reg),
10635               Assembler::ASR,
10636               $src3$$constant & 0x1f);
10637   %}
10638 
10639   ins_pipe(ialu_reg_reg_shift);
10640 %}
10641 
10642 // This pattern is automatically generated from aarch64_ad.m4
10643 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10644 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10645                          iRegL src1, iRegL src2,
10646                          immI src3) %{
10647   match(Set dst (AndL src1 (RShiftL src2 src3)));
10648 
10649   ins_cost(1.9 * INSN_COST);
10650   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10651 
10652   ins_encode %{
10653     __ andr(as_Register($dst$$reg),
10654               as_Register($src1$$reg),
10655               as_Register($src2$$reg),
10656               Assembler::ASR,
10657               $src3$$constant & 0x3f);
10658   %}
10659 
10660   ins_pipe(ialu_reg_reg_shift);
10661 %}
10662 
10663 // This pattern is automatically generated from aarch64_ad.m4
10664 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10665 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10666                          iRegIorL2I src1, iRegIorL2I src2,
10667                          immI src3) %{
10668   match(Set dst (AndI src1 (LShiftI src2 src3)));
10669 
10670   ins_cost(1.9 * INSN_COST);
10671   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10672 
10673   ins_encode %{
10674     __ andw(as_Register($dst$$reg),
10675               as_Register($src1$$reg),
10676               as_Register($src2$$reg),
10677               Assembler::LSL,
10678               $src3$$constant & 0x1f);
10679   %}
10680 
10681   ins_pipe(ialu_reg_reg_shift);
10682 %}
10683 
10684 // This pattern is automatically generated from aarch64_ad.m4
10685 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10686 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10687                          iRegL src1, iRegL src2,
10688                          immI src3) %{
10689   match(Set dst (AndL src1 (LShiftL src2 src3)));
10690 
10691   ins_cost(1.9 * INSN_COST);
10692   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10693 
10694   ins_encode %{
10695     __ andr(as_Register($dst$$reg),
10696               as_Register($src1$$reg),
10697               as_Register($src2$$reg),
10698               Assembler::LSL,
10699               $src3$$constant & 0x3f);
10700   %}
10701 
10702   ins_pipe(ialu_reg_reg_shift);
10703 %}
10704 
10705 // This pattern is automatically generated from aarch64_ad.m4
10706 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10707 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
10708                          iRegIorL2I src1, iRegIorL2I src2,
10709                          immI src3) %{
10710   match(Set dst (AndI src1 (RotateRight src2 src3)));
10711 
10712   ins_cost(1.9 * INSN_COST);
10713   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
10714 
10715   ins_encode %{
10716     __ andw(as_Register($dst$$reg),
10717               as_Register($src1$$reg),
10718               as_Register($src2$$reg),
10719               Assembler::ROR,
10720               $src3$$constant & 0x1f);
10721   %}
10722 
10723   ins_pipe(ialu_reg_reg_shift);
10724 %}
10725 
10726 // This pattern is automatically generated from aarch64_ad.m4
10727 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10728 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
10729                          iRegL src1, iRegL src2,
10730                          immI src3) %{
10731   match(Set dst (AndL src1 (RotateRight src2 src3)));
10732 
10733   ins_cost(1.9 * INSN_COST);
10734   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
10735 
10736   ins_encode %{
10737     __ andr(as_Register($dst$$reg),
10738               as_Register($src1$$reg),
10739               as_Register($src2$$reg),
10740               Assembler::ROR,
10741               $src3$$constant & 0x3f);
10742   %}
10743 
10744   ins_pipe(ialu_reg_reg_shift);
10745 %}
10746 
10747 // This pattern is automatically generated from aarch64_ad.m4
10748 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10749 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10750                          iRegIorL2I src1, iRegIorL2I src2,
10751                          immI src3) %{
10752   match(Set dst (XorI src1 (URShiftI src2 src3)));
10753 
10754   ins_cost(1.9 * INSN_COST);
10755   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10756 
10757   ins_encode %{
10758     __ eorw(as_Register($dst$$reg),
10759               as_Register($src1$$reg),
10760               as_Register($src2$$reg),
10761               Assembler::LSR,
10762               $src3$$constant & 0x1f);
10763   %}
10764 
10765   ins_pipe(ialu_reg_reg_shift);
10766 %}
10767 
10768 // This pattern is automatically generated from aarch64_ad.m4
10769 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10770 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10771                          iRegL src1, iRegL src2,
10772                          immI src3) %{
10773   match(Set dst (XorL src1 (URShiftL src2 src3)));
10774 
10775   ins_cost(1.9 * INSN_COST);
10776   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10777 
10778   ins_encode %{
10779     __ eor(as_Register($dst$$reg),
10780               as_Register($src1$$reg),
10781               as_Register($src2$$reg),
10782               Assembler::LSR,
10783               $src3$$constant & 0x3f);
10784   %}
10785 
10786   ins_pipe(ialu_reg_reg_shift);
10787 %}
10788 
10789 // This pattern is automatically generated from aarch64_ad.m4
10790 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10791 instruct XorI_reg_RShift_reg(iRegINoSp dst,
10792                          iRegIorL2I src1, iRegIorL2I src2,
10793                          immI src3) %{
10794   match(Set dst (XorI src1 (RShiftI src2 src3)));
10795 
10796   ins_cost(1.9 * INSN_COST);
10797   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
10798 
10799   ins_encode %{
10800     __ eorw(as_Register($dst$$reg),
10801               as_Register($src1$$reg),
10802               as_Register($src2$$reg),
10803               Assembler::ASR,
10804               $src3$$constant & 0x1f);
10805   %}
10806 
10807   ins_pipe(ialu_reg_reg_shift);
10808 %}
10809 
10810 // This pattern is automatically generated from aarch64_ad.m4
10811 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10812 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
10813                          iRegL src1, iRegL src2,
10814                          immI src3) %{
10815   match(Set dst (XorL src1 (RShiftL src2 src3)));
10816 
10817   ins_cost(1.9 * INSN_COST);
10818   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
10819 
10820   ins_encode %{
10821     __ eor(as_Register($dst$$reg),
10822               as_Register($src1$$reg),
10823               as_Register($src2$$reg),
10824               Assembler::ASR,
10825               $src3$$constant & 0x3f);
10826   %}
10827 
10828   ins_pipe(ialu_reg_reg_shift);
10829 %}
10830 
10831 // This pattern is automatically generated from aarch64_ad.m4
10832 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10833 instruct XorI_reg_LShift_reg(iRegINoSp dst,
10834                          iRegIorL2I src1, iRegIorL2I src2,
10835                          immI src3) %{
10836   match(Set dst (XorI src1 (LShiftI src2 src3)));
10837 
10838   ins_cost(1.9 * INSN_COST);
10839   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
10840 
10841   ins_encode %{
10842     __ eorw(as_Register($dst$$reg),
10843               as_Register($src1$$reg),
10844               as_Register($src2$$reg),
10845               Assembler::LSL,
10846               $src3$$constant & 0x1f);
10847   %}
10848 
10849   ins_pipe(ialu_reg_reg_shift);
10850 %}
10851 
10852 // This pattern is automatically generated from aarch64_ad.m4
10853 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10854 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
10855                          iRegL src1, iRegL src2,
10856                          immI src3) %{
10857   match(Set dst (XorL src1 (LShiftL src2 src3)));
10858 
10859   ins_cost(1.9 * INSN_COST);
10860   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
10861 
10862   ins_encode %{
10863     __ eor(as_Register($dst$$reg),
10864               as_Register($src1$$reg),
10865               as_Register($src2$$reg),
10866               Assembler::LSL,
10867               $src3$$constant & 0x3f);
10868   %}
10869 
10870   ins_pipe(ialu_reg_reg_shift);
10871 %}
10872 
10873 // This pattern is automatically generated from aarch64_ad.m4
10874 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10875 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
10876                          iRegIorL2I src1, iRegIorL2I src2,
10877                          immI src3) %{
10878   match(Set dst (XorI src1 (RotateRight src2 src3)));
10879 
10880   ins_cost(1.9 * INSN_COST);
10881   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
10882 
10883   ins_encode %{
10884     __ eorw(as_Register($dst$$reg),
10885               as_Register($src1$$reg),
10886               as_Register($src2$$reg),
10887               Assembler::ROR,
10888               $src3$$constant & 0x1f);
10889   %}
10890 
10891   ins_pipe(ialu_reg_reg_shift);
10892 %}
10893 
10894 // This pattern is automatically generated from aarch64_ad.m4
10895 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10896 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
10897                          iRegL src1, iRegL src2,
10898                          immI src3) %{
10899   match(Set dst (XorL src1 (RotateRight src2 src3)));
10900 
10901   ins_cost(1.9 * INSN_COST);
10902   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
10903 
10904   ins_encode %{
10905     __ eor(as_Register($dst$$reg),
10906               as_Register($src1$$reg),
10907               as_Register($src2$$reg),
10908               Assembler::ROR,
10909               $src3$$constant & 0x3f);
10910   %}
10911 
10912   ins_pipe(ialu_reg_reg_shift);
10913 %}
10914 
10915 // This pattern is automatically generated from aarch64_ad.m4
10916 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10917 instruct OrI_reg_URShift_reg(iRegINoSp dst,
10918                          iRegIorL2I src1, iRegIorL2I src2,
10919                          immI src3) %{
10920   match(Set dst (OrI src1 (URShiftI src2 src3)));
10921 
10922   ins_cost(1.9 * INSN_COST);
10923   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
10924 
10925   ins_encode %{
10926     __ orrw(as_Register($dst$$reg),
10927               as_Register($src1$$reg),
10928               as_Register($src2$$reg),
10929               Assembler::LSR,
10930               $src3$$constant & 0x1f);
10931   %}
10932 
10933   ins_pipe(ialu_reg_reg_shift);
10934 %}
10935 
10936 // This pattern is automatically generated from aarch64_ad.m4
10937 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10938 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
10939                          iRegL src1, iRegL src2,
10940                          immI src3) %{
10941   match(Set dst (OrL src1 (URShiftL src2 src3)));
10942 
10943   ins_cost(1.9 * INSN_COST);
10944   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
10945 
10946   ins_encode %{
10947     __ orr(as_Register($dst$$reg),
10948               as_Register($src1$$reg),
10949               as_Register($src2$$reg),
10950               Assembler::LSR,
10951               $src3$$constant & 0x3f);
10952   %}
10953 
10954   ins_pipe(ialu_reg_reg_shift);
10955 %}
10956 
10957 // This pattern is automatically generated from aarch64_ad.m4
10958 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10959 instruct OrI_reg_RShift_reg(iRegINoSp dst,
10960                          iRegIorL2I src1, iRegIorL2I src2,
10961                          immI src3) %{
10962   match(Set dst (OrI src1 (RShiftI src2 src3)));
10963 
10964   ins_cost(1.9 * INSN_COST);
10965   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
10966 
10967   ins_encode %{
10968     __ orrw(as_Register($dst$$reg),
10969               as_Register($src1$$reg),
10970               as_Register($src2$$reg),
10971               Assembler::ASR,
10972               $src3$$constant & 0x1f);
10973   %}
10974 
10975   ins_pipe(ialu_reg_reg_shift);
10976 %}
10977 
10978 // This pattern is automatically generated from aarch64_ad.m4
10979 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10980 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
10981                          iRegL src1, iRegL src2,
10982                          immI src3) %{
10983   match(Set dst (OrL src1 (RShiftL src2 src3)));
10984 
10985   ins_cost(1.9 * INSN_COST);
10986   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
10987 
10988   ins_encode %{
10989     __ orr(as_Register($dst$$reg),
10990               as_Register($src1$$reg),
10991               as_Register($src2$$reg),
10992               Assembler::ASR,
10993               $src3$$constant & 0x3f);
10994   %}
10995 
10996   ins_pipe(ialu_reg_reg_shift);
10997 %}
10998 
10999 // This pattern is automatically generated from aarch64_ad.m4
11000 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11001 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11002                          iRegIorL2I src1, iRegIorL2I src2,
11003                          immI src3) %{
11004   match(Set dst (OrI src1 (LShiftI src2 src3)));
11005 
11006   ins_cost(1.9 * INSN_COST);
11007   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11008 
11009   ins_encode %{
11010     __ orrw(as_Register($dst$$reg),
11011               as_Register($src1$$reg),
11012               as_Register($src2$$reg),
11013               Assembler::LSL,
11014               $src3$$constant & 0x1f);
11015   %}
11016 
11017   ins_pipe(ialu_reg_reg_shift);
11018 %}
11019 
11020 // This pattern is automatically generated from aarch64_ad.m4
11021 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11022 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11023                          iRegL src1, iRegL src2,
11024                          immI src3) %{
11025   match(Set dst (OrL src1 (LShiftL src2 src3)));
11026 
11027   ins_cost(1.9 * INSN_COST);
11028   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11029 
11030   ins_encode %{
11031     __ orr(as_Register($dst$$reg),
11032               as_Register($src1$$reg),
11033               as_Register($src2$$reg),
11034               Assembler::LSL,
11035               $src3$$constant & 0x3f);
11036   %}
11037 
11038   ins_pipe(ialu_reg_reg_shift);
11039 %}
11040 
11041 // This pattern is automatically generated from aarch64_ad.m4
11042 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11043 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
11044                          iRegIorL2I src1, iRegIorL2I src2,
11045                          immI src3) %{
11046   match(Set dst (OrI src1 (RotateRight src2 src3)));
11047 
11048   ins_cost(1.9 * INSN_COST);
11049   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
11050 
11051   ins_encode %{
11052     __ orrw(as_Register($dst$$reg),
11053               as_Register($src1$$reg),
11054               as_Register($src2$$reg),
11055               Assembler::ROR,
11056               $src3$$constant & 0x1f);
11057   %}
11058 
11059   ins_pipe(ialu_reg_reg_shift);
11060 %}
11061 
11062 // This pattern is automatically generated from aarch64_ad.m4
11063 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11064 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
11065                          iRegL src1, iRegL src2,
11066                          immI src3) %{
11067   match(Set dst (OrL src1 (RotateRight src2 src3)));
11068 
11069   ins_cost(1.9 * INSN_COST);
11070   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
11071 
11072   ins_encode %{
11073     __ orr(as_Register($dst$$reg),
11074               as_Register($src1$$reg),
11075               as_Register($src2$$reg),
11076               Assembler::ROR,
11077               $src3$$constant & 0x3f);
11078   %}
11079 
11080   ins_pipe(ialu_reg_reg_shift);
11081 %}
11082 
11083 // This pattern is automatically generated from aarch64_ad.m4
11084 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11085 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11086                          iRegIorL2I src1, iRegIorL2I src2,
11087                          immI src3) %{
11088   match(Set dst (AddI src1 (URShiftI src2 src3)));
11089 
11090   ins_cost(1.9 * INSN_COST);
11091   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11092 
11093   ins_encode %{
11094     __ addw(as_Register($dst$$reg),
11095               as_Register($src1$$reg),
11096               as_Register($src2$$reg),
11097               Assembler::LSR,
11098               $src3$$constant & 0x1f);
11099   %}
11100 
11101   ins_pipe(ialu_reg_reg_shift);
11102 %}
11103 
11104 // This pattern is automatically generated from aarch64_ad.m4
11105 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11106 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11107                          iRegL src1, iRegL src2,
11108                          immI src3) %{
11109   match(Set dst (AddL src1 (URShiftL src2 src3)));
11110 
11111   ins_cost(1.9 * INSN_COST);
11112   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11113 
11114   ins_encode %{
11115     __ add(as_Register($dst$$reg),
11116               as_Register($src1$$reg),
11117               as_Register($src2$$reg),
11118               Assembler::LSR,
11119               $src3$$constant & 0x3f);
11120   %}
11121 
11122   ins_pipe(ialu_reg_reg_shift);
11123 %}
11124 
11125 // This pattern is automatically generated from aarch64_ad.m4
11126 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11127 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11128                          iRegIorL2I src1, iRegIorL2I src2,
11129                          immI src3) %{
11130   match(Set dst (AddI src1 (RShiftI src2 src3)));
11131 
11132   ins_cost(1.9 * INSN_COST);
11133   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11134 
11135   ins_encode %{
11136     __ addw(as_Register($dst$$reg),
11137               as_Register($src1$$reg),
11138               as_Register($src2$$reg),
11139               Assembler::ASR,
11140               $src3$$constant & 0x1f);
11141   %}
11142 
11143   ins_pipe(ialu_reg_reg_shift);
11144 %}
11145 
11146 // This pattern is automatically generated from aarch64_ad.m4
11147 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11148 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11149                          iRegL src1, iRegL src2,
11150                          immI src3) %{
11151   match(Set dst (AddL src1 (RShiftL src2 src3)));
11152 
11153   ins_cost(1.9 * INSN_COST);
11154   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11155 
11156   ins_encode %{
11157     __ add(as_Register($dst$$reg),
11158               as_Register($src1$$reg),
11159               as_Register($src2$$reg),
11160               Assembler::ASR,
11161               $src3$$constant & 0x3f);
11162   %}
11163 
11164   ins_pipe(ialu_reg_reg_shift);
11165 %}
11166 
11167 // This pattern is automatically generated from aarch64_ad.m4
11168 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11169 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11170                          iRegIorL2I src1, iRegIorL2I src2,
11171                          immI src3) %{
11172   match(Set dst (AddI src1 (LShiftI src2 src3)));
11173 
11174   ins_cost(1.9 * INSN_COST);
11175   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11176 
11177   ins_encode %{
11178     __ addw(as_Register($dst$$reg),
11179               as_Register($src1$$reg),
11180               as_Register($src2$$reg),
11181               Assembler::LSL,
11182               $src3$$constant & 0x1f);
11183   %}
11184 
11185   ins_pipe(ialu_reg_reg_shift);
11186 %}
11187 
11188 // This pattern is automatically generated from aarch64_ad.m4
11189 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11190 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11191                          iRegL src1, iRegL src2,
11192                          immI src3) %{
11193   match(Set dst (AddL src1 (LShiftL src2 src3)));
11194 
11195   ins_cost(1.9 * INSN_COST);
11196   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11197 
11198   ins_encode %{
11199     __ add(as_Register($dst$$reg),
11200               as_Register($src1$$reg),
11201               as_Register($src2$$reg),
11202               Assembler::LSL,
11203               $src3$$constant & 0x3f);
11204   %}
11205 
11206   ins_pipe(ialu_reg_reg_shift);
11207 %}
11208 
11209 // This pattern is automatically generated from aarch64_ad.m4
11210 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11211 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11212                          iRegIorL2I src1, iRegIorL2I src2,
11213                          immI src3) %{
11214   match(Set dst (SubI src1 (URShiftI src2 src3)));
11215 
11216   ins_cost(1.9 * INSN_COST);
11217   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11218 
11219   ins_encode %{
11220     __ subw(as_Register($dst$$reg),
11221               as_Register($src1$$reg),
11222               as_Register($src2$$reg),
11223               Assembler::LSR,
11224               $src3$$constant & 0x1f);
11225   %}
11226 
11227   ins_pipe(ialu_reg_reg_shift);
11228 %}
11229 
11230 // This pattern is automatically generated from aarch64_ad.m4
11231 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11232 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11233                          iRegL src1, iRegL src2,
11234                          immI src3) %{
11235   match(Set dst (SubL src1 (URShiftL src2 src3)));
11236 
11237   ins_cost(1.9 * INSN_COST);
11238   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11239 
11240   ins_encode %{
11241     __ sub(as_Register($dst$$reg),
11242               as_Register($src1$$reg),
11243               as_Register($src2$$reg),
11244               Assembler::LSR,
11245               $src3$$constant & 0x3f);
11246   %}
11247 
11248   ins_pipe(ialu_reg_reg_shift);
11249 %}
11250 
11251 // This pattern is automatically generated from aarch64_ad.m4
11252 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11253 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11254                          iRegIorL2I src1, iRegIorL2I src2,
11255                          immI src3) %{
11256   match(Set dst (SubI src1 (RShiftI src2 src3)));
11257 
11258   ins_cost(1.9 * INSN_COST);
11259   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11260 
11261   ins_encode %{
11262     __ subw(as_Register($dst$$reg),
11263               as_Register($src1$$reg),
11264               as_Register($src2$$reg),
11265               Assembler::ASR,
11266               $src3$$constant & 0x1f);
11267   %}
11268 
11269   ins_pipe(ialu_reg_reg_shift);
11270 %}
11271 
11272 // This pattern is automatically generated from aarch64_ad.m4
11273 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11274 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11275                          iRegL src1, iRegL src2,
11276                          immI src3) %{
11277   match(Set dst (SubL src1 (RShiftL src2 src3)));
11278 
11279   ins_cost(1.9 * INSN_COST);
11280   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11281 
11282   ins_encode %{
11283     __ sub(as_Register($dst$$reg),
11284               as_Register($src1$$reg),
11285               as_Register($src2$$reg),
11286               Assembler::ASR,
11287               $src3$$constant & 0x3f);
11288   %}
11289 
11290   ins_pipe(ialu_reg_reg_shift);
11291 %}
11292 
11293 // This pattern is automatically generated from aarch64_ad.m4
11294 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11295 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11296                          iRegIorL2I src1, iRegIorL2I src2,
11297                          immI src3) %{
11298   match(Set dst (SubI src1 (LShiftI src2 src3)));
11299 
11300   ins_cost(1.9 * INSN_COST);
11301   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11302 
11303   ins_encode %{
11304     __ subw(as_Register($dst$$reg),
11305               as_Register($src1$$reg),
11306               as_Register($src2$$reg),
11307               Assembler::LSL,
11308               $src3$$constant & 0x1f);
11309   %}
11310 
11311   ins_pipe(ialu_reg_reg_shift);
11312 %}
11313 
11314 // This pattern is automatically generated from aarch64_ad.m4
11315 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11316 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11317                          iRegL src1, iRegL src2,
11318                          immI src3) %{
11319   match(Set dst (SubL src1 (LShiftL src2 src3)));
11320 
11321   ins_cost(1.9 * INSN_COST);
11322   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11323 
11324   ins_encode %{
11325     __ sub(as_Register($dst$$reg),
11326               as_Register($src1$$reg),
11327               as_Register($src2$$reg),
11328               Assembler::LSL,
11329               $src3$$constant & 0x3f);
11330   %}
11331 
11332   ins_pipe(ialu_reg_reg_shift);
11333 %}
11334 
11335 // This pattern is automatically generated from aarch64_ad.m4
11336 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11337 
11338 // Shift Left followed by Shift Right.
11339 // This idiom is used by the compiler for the i2b bytecode etc.
11340 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11341 %{
11342   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11343   ins_cost(INSN_COST * 2);
11344   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11345   ins_encode %{
11346     int lshift = $lshift_count$$constant & 63;
11347     int rshift = $rshift_count$$constant & 63;
11348     int s = 63 - lshift;
11349     int r = (rshift - lshift) & 63;
11350     __ sbfm(as_Register($dst$$reg),
11351             as_Register($src$$reg),
11352             r, s);
11353   %}
11354 
11355   ins_pipe(ialu_reg_shift);
11356 %}
11357 
11358 // This pattern is automatically generated from aarch64_ad.m4
11359 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11360 
11361 // Shift Left followed by Shift Right.
11362 // This idiom is used by the compiler for the i2b bytecode etc.
11363 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11364 %{
11365   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11366   ins_cost(INSN_COST * 2);
11367   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11368   ins_encode %{
11369     int lshift = $lshift_count$$constant & 31;
11370     int rshift = $rshift_count$$constant & 31;
11371     int s = 31 - lshift;
11372     int r = (rshift - lshift) & 31;
11373     __ sbfmw(as_Register($dst$$reg),
11374             as_Register($src$$reg),
11375             r, s);
11376   %}
11377 
11378   ins_pipe(ialu_reg_shift);
11379 %}
11380 
11381 // This pattern is automatically generated from aarch64_ad.m4
11382 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11383 
11384 // Shift Left followed by Shift Right.
11385 // This idiom is used by the compiler for the i2b bytecode etc.
11386 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11387 %{
11388   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11389   ins_cost(INSN_COST * 2);
11390   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11391   ins_encode %{
11392     int lshift = $lshift_count$$constant & 63;
11393     int rshift = $rshift_count$$constant & 63;
11394     int s = 63 - lshift;
11395     int r = (rshift - lshift) & 63;
11396     __ ubfm(as_Register($dst$$reg),
11397             as_Register($src$$reg),
11398             r, s);
11399   %}
11400 
11401   ins_pipe(ialu_reg_shift);
11402 %}
11403 
11404 // This pattern is automatically generated from aarch64_ad.m4
11405 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11406 
11407 // Shift Left followed by Shift Right.
11408 // This idiom is used by the compiler for the i2b bytecode etc.
11409 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11410 %{
11411   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11412   ins_cost(INSN_COST * 2);
11413   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11414   ins_encode %{
11415     int lshift = $lshift_count$$constant & 31;
11416     int rshift = $rshift_count$$constant & 31;
11417     int s = 31 - lshift;
11418     int r = (rshift - lshift) & 31;
11419     __ ubfmw(as_Register($dst$$reg),
11420             as_Register($src$$reg),
11421             r, s);
11422   %}
11423 
11424   ins_pipe(ialu_reg_shift);
11425 %}
11426 
11427 // Bitfield extract with shift & mask
11428 
11429 // This pattern is automatically generated from aarch64_ad.m4
11430 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11431 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11432 %{
11433   match(Set dst (AndI (URShiftI src rshift) mask));
11434   // Make sure we are not going to exceed what ubfxw can do.
11435   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11436 
11437   ins_cost(INSN_COST);
11438   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11439   ins_encode %{
11440     int rshift = $rshift$$constant & 31;
11441     intptr_t mask = $mask$$constant;
11442     int width = exact_log2(mask+1);
11443     __ ubfxw(as_Register($dst$$reg),
11444             as_Register($src$$reg), rshift, width);
11445   %}
11446   ins_pipe(ialu_reg_shift);
11447 %}
11448 
11449 // This pattern is automatically generated from aarch64_ad.m4
11450 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11451 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11452 %{
11453   match(Set dst (AndL (URShiftL src rshift) mask));
11454   // Make sure we are not going to exceed what ubfx can do.
11455   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
11456 
11457   ins_cost(INSN_COST);
11458   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11459   ins_encode %{
11460     int rshift = $rshift$$constant & 63;
11461     intptr_t mask = $mask$$constant;
11462     int width = exact_log2_long(mask+1);
11463     __ ubfx(as_Register($dst$$reg),
11464             as_Register($src$$reg), rshift, width);
11465   %}
11466   ins_pipe(ialu_reg_shift);
11467 %}
11468 
11469 
11470 // This pattern is automatically generated from aarch64_ad.m4
11471 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11472 
11473 // We can use ubfx when extending an And with a mask when we know mask
11474 // is positive.  We know that because immI_bitmask guarantees it.
11475 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11476 %{
11477   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11478   // Make sure we are not going to exceed what ubfxw can do.
11479   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11480 
11481   ins_cost(INSN_COST * 2);
11482   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11483   ins_encode %{
11484     int rshift = $rshift$$constant & 31;
11485     intptr_t mask = $mask$$constant;
11486     int width = exact_log2(mask+1);
11487     __ ubfx(as_Register($dst$$reg),
11488             as_Register($src$$reg), rshift, width);
11489   %}
11490   ins_pipe(ialu_reg_shift);
11491 %}
11492 
11493 
11494 // This pattern is automatically generated from aarch64_ad.m4
11495 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11496 
11497 // We can use ubfiz when masking by a positive number and then left shifting the result.
11498 // We know that the mask is positive because immI_bitmask guarantees it.
11499 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11500 %{
11501   match(Set dst (LShiftI (AndI src mask) lshift));
11502   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
11503 
11504   ins_cost(INSN_COST);
11505   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11506   ins_encode %{
11507     int lshift = $lshift$$constant & 31;
11508     intptr_t mask = $mask$$constant;
11509     int width = exact_log2(mask+1);
11510     __ ubfizw(as_Register($dst$$reg),
11511           as_Register($src$$reg), lshift, width);
11512   %}
11513   ins_pipe(ialu_reg_shift);
11514 %}
11515 
11516 // This pattern is automatically generated from aarch64_ad.m4
11517 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11518 
11519 // We can use ubfiz when masking by a positive number and then left shifting the result.
11520 // We know that the mask is positive because immL_bitmask guarantees it.
11521 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11522 %{
11523   match(Set dst (LShiftL (AndL src mask) lshift));
11524   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11525 
11526   ins_cost(INSN_COST);
11527   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11528   ins_encode %{
11529     int lshift = $lshift$$constant & 63;
11530     intptr_t mask = $mask$$constant;
11531     int width = exact_log2_long(mask+1);
11532     __ ubfiz(as_Register($dst$$reg),
11533           as_Register($src$$reg), lshift, width);
11534   %}
11535   ins_pipe(ialu_reg_shift);
11536 %}
11537 
11538 // This pattern is automatically generated from aarch64_ad.m4
11539 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11540 
11541 // We can use ubfiz when masking by a positive number and then left shifting the result.
11542 // We know that the mask is positive because immI_bitmask guarantees it.
11543 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11544 %{
11545   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
11546   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
11547 
11548   ins_cost(INSN_COST);
11549   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11550   ins_encode %{
11551     int lshift = $lshift$$constant & 31;
11552     intptr_t mask = $mask$$constant;
11553     int width = exact_log2(mask+1);
11554     __ ubfizw(as_Register($dst$$reg),
11555           as_Register($src$$reg), lshift, width);
11556   %}
11557   ins_pipe(ialu_reg_shift);
11558 %}
11559 
11560 // This pattern is automatically generated from aarch64_ad.m4
11561 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11562 
11563 // We can use ubfiz when masking by a positive number and then left shifting the result.
11564 // We know that the mask is positive because immL_bitmask guarantees it.
11565 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
11566 %{
11567   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
11568   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
11569 
11570   ins_cost(INSN_COST);
11571   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11572   ins_encode %{
11573     int lshift = $lshift$$constant & 63;
11574     intptr_t mask = $mask$$constant;
11575     int width = exact_log2_long(mask+1);
11576     __ ubfiz(as_Register($dst$$reg),
11577           as_Register($src$$reg), lshift, width);
11578   %}
11579   ins_pipe(ialu_reg_shift);
11580 %}
11581 
11582 
11583 // This pattern is automatically generated from aarch64_ad.m4
11584 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11585 
11586 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11587 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11588 %{
11589   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
11590   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11591 
11592   ins_cost(INSN_COST);
11593   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11594   ins_encode %{
11595     int lshift = $lshift$$constant & 63;
11596     intptr_t mask = $mask$$constant;
11597     int width = exact_log2(mask+1);
11598     __ ubfiz(as_Register($dst$$reg),
11599              as_Register($src$$reg), lshift, width);
11600   %}
11601   ins_pipe(ialu_reg_shift);
11602 %}
11603 
11604 // This pattern is automatically generated from aarch64_ad.m4
11605 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11606 
11607 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
11608 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
11609 %{
11610   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
11611   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
11612 
11613   ins_cost(INSN_COST);
11614   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11615   ins_encode %{
11616     int lshift = $lshift$$constant & 31;
11617     intptr_t mask = $mask$$constant;
11618     int width = exact_log2(mask+1);
11619     __ ubfiz(as_Register($dst$$reg),
11620              as_Register($src$$reg), lshift, width);
11621   %}
11622   ins_pipe(ialu_reg_shift);
11623 %}
11624 
11625 // This pattern is automatically generated from aarch64_ad.m4
11626 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11627 
11628 // Can skip int2long conversions after AND with small bitmask
11629 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
11630 %{
11631   match(Set dst (ConvI2L (AndI src msk)));
11632   ins_cost(INSN_COST);
11633   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
11634   ins_encode %{
11635     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
11636   %}
11637   ins_pipe(ialu_reg_shift);
11638 %}
11639 
11640 
11641 // Rotations
11642 
11643 // This pattern is automatically generated from aarch64_ad.m4
11644 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11645 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11646 %{
11647   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11648   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
11649 
11650   ins_cost(INSN_COST);
11651   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11652 
11653   ins_encode %{
11654     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11655             $rshift$$constant & 63);
11656   %}
11657   ins_pipe(ialu_reg_reg_extr);
11658 %}
11659 
11660 
11661 // This pattern is automatically generated from aarch64_ad.m4
11662 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11663 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11664 %{
11665   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11666   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
11667 
11668   ins_cost(INSN_COST);
11669   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11670 
11671   ins_encode %{
11672     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11673             $rshift$$constant & 31);
11674   %}
11675   ins_pipe(ialu_reg_reg_extr);
11676 %}
11677 
11678 
11679 // This pattern is automatically generated from aarch64_ad.m4
11680 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11681 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11682 %{
11683   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11684   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
11685 
11686   ins_cost(INSN_COST);
11687   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11688 
11689   ins_encode %{
11690     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11691             $rshift$$constant & 63);
11692   %}
11693   ins_pipe(ialu_reg_reg_extr);
11694 %}
11695 
11696 
11697 // This pattern is automatically generated from aarch64_ad.m4
11698 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11699 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11700 %{
11701   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11702   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
11703 
11704   ins_cost(INSN_COST);
11705   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11706 
11707   ins_encode %{
11708     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11709             $rshift$$constant & 31);
11710   %}
11711   ins_pipe(ialu_reg_reg_extr);
11712 %}
11713 
11714 // This pattern is automatically generated from aarch64_ad.m4
11715 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11716 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
11717 %{
11718   match(Set dst (RotateRight src shift));
11719 
11720   ins_cost(INSN_COST);
11721   format %{ "ror    $dst, $src, $shift" %}
11722 
11723   ins_encode %{
11724      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
11725                $shift$$constant & 0x1f);
11726   %}
11727   ins_pipe(ialu_reg_reg_vshift);
11728 %}
11729 
11730 // This pattern is automatically generated from aarch64_ad.m4
11731 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11732 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
11733 %{
11734   match(Set dst (RotateRight src shift));
11735 
11736   ins_cost(INSN_COST);
11737   format %{ "ror    $dst, $src, $shift" %}
11738 
11739   ins_encode %{
11740      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
11741                $shift$$constant & 0x3f);
11742   %}
11743   ins_pipe(ialu_reg_reg_vshift);
11744 %}
11745 
11746 // This pattern is automatically generated from aarch64_ad.m4
11747 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11748 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
11749 %{
11750   match(Set dst (RotateRight src shift));
11751 
11752   ins_cost(INSN_COST);
11753   format %{ "ror    $dst, $src, $shift" %}
11754 
11755   ins_encode %{
11756      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
11757   %}
11758   ins_pipe(ialu_reg_reg_vshift);
11759 %}
11760 
11761 // This pattern is automatically generated from aarch64_ad.m4
11762 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11763 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
11764 %{
11765   match(Set dst (RotateRight src shift));
11766 
11767   ins_cost(INSN_COST);
11768   format %{ "ror    $dst, $src, $shift" %}
11769 
11770   ins_encode %{
11771      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
11772   %}
11773   ins_pipe(ialu_reg_reg_vshift);
11774 %}
11775 
11776 // This pattern is automatically generated from aarch64_ad.m4
11777 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11778 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
11779 %{
11780   match(Set dst (RotateLeft src shift));
11781 
11782   ins_cost(INSN_COST);
11783   format %{ "rol    $dst, $src, $shift" %}
11784 
11785   ins_encode %{
11786      __ subw(rscratch1, zr, as_Register($shift$$reg));
11787      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
11788   %}
11789   ins_pipe(ialu_reg_reg_vshift);
11790 %}
11791 
11792 // This pattern is automatically generated from aarch64_ad.m4
11793 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11794 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
11795 %{
11796   match(Set dst (RotateLeft src shift));
11797 
11798   ins_cost(INSN_COST);
11799   format %{ "rol    $dst, $src, $shift" %}
11800 
11801   ins_encode %{
11802      __ subw(rscratch1, zr, as_Register($shift$$reg));
11803      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
11804   %}
11805   ins_pipe(ialu_reg_reg_vshift);
11806 %}
11807 
11808 
11809 // Add/subtract (extended)
11810 
11811 // This pattern is automatically generated from aarch64_ad.m4
11812 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11813 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11814 %{
11815   match(Set dst (AddL src1 (ConvI2L src2)));
11816   ins_cost(INSN_COST);
11817   format %{ "add  $dst, $src1, $src2, sxtw" %}
11818 
11819    ins_encode %{
11820      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11821             as_Register($src2$$reg), ext::sxtw);
11822    %}
11823   ins_pipe(ialu_reg_reg);
11824 %}
11825 
11826 // This pattern is automatically generated from aarch64_ad.m4
11827 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11828 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11829 %{
11830   match(Set dst (SubL src1 (ConvI2L src2)));
11831   ins_cost(INSN_COST);
11832   format %{ "sub  $dst, $src1, $src2, sxtw" %}
11833 
11834    ins_encode %{
11835      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11836             as_Register($src2$$reg), ext::sxtw);
11837    %}
11838   ins_pipe(ialu_reg_reg);
11839 %}
11840 
11841 // This pattern is automatically generated from aarch64_ad.m4
11842 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11843 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11844 %{
11845   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11846   ins_cost(INSN_COST);
11847   format %{ "add  $dst, $src1, $src2, sxth" %}
11848 
11849    ins_encode %{
11850      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11851             as_Register($src2$$reg), ext::sxth);
11852    %}
11853   ins_pipe(ialu_reg_reg);
11854 %}
11855 
11856 // This pattern is automatically generated from aarch64_ad.m4
11857 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11858 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11859 %{
11860   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11861   ins_cost(INSN_COST);
11862   format %{ "add  $dst, $src1, $src2, sxtb" %}
11863 
11864    ins_encode %{
11865      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11866             as_Register($src2$$reg), ext::sxtb);
11867    %}
11868   ins_pipe(ialu_reg_reg);
11869 %}
11870 
11871 // This pattern is automatically generated from aarch64_ad.m4
11872 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11873 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11874 %{
11875   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11876   ins_cost(INSN_COST);
11877   format %{ "add  $dst, $src1, $src2, uxtb" %}
11878 
11879    ins_encode %{
11880      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11881             as_Register($src2$$reg), ext::uxtb);
11882    %}
11883   ins_pipe(ialu_reg_reg);
11884 %}
11885 
11886 // This pattern is automatically generated from aarch64_ad.m4
11887 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11888 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11889 %{
11890   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11891   ins_cost(INSN_COST);
11892   format %{ "add  $dst, $src1, $src2, sxth" %}
11893 
11894    ins_encode %{
11895      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11896             as_Register($src2$$reg), ext::sxth);
11897    %}
11898   ins_pipe(ialu_reg_reg);
11899 %}
11900 
11901 // This pattern is automatically generated from aarch64_ad.m4
11902 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11903 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11904 %{
11905   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11906   ins_cost(INSN_COST);
11907   format %{ "add  $dst, $src1, $src2, sxtw" %}
11908 
11909    ins_encode %{
11910      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11911             as_Register($src2$$reg), ext::sxtw);
11912    %}
11913   ins_pipe(ialu_reg_reg);
11914 %}
11915 
11916 // This pattern is automatically generated from aarch64_ad.m4
11917 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11918 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11919 %{
11920   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11921   ins_cost(INSN_COST);
11922   format %{ "add  $dst, $src1, $src2, sxtb" %}
11923 
11924    ins_encode %{
11925      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11926             as_Register($src2$$reg), ext::sxtb);
11927    %}
11928   ins_pipe(ialu_reg_reg);
11929 %}
11930 
11931 // This pattern is automatically generated from aarch64_ad.m4
11932 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11933 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11934 %{
11935   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11936   ins_cost(INSN_COST);
11937   format %{ "add  $dst, $src1, $src2, uxtb" %}
11938 
11939    ins_encode %{
11940      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11941             as_Register($src2$$reg), ext::uxtb);
11942    %}
11943   ins_pipe(ialu_reg_reg);
11944 %}
11945 
11946 // This pattern is automatically generated from aarch64_ad.m4
11947 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11948 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11949 %{
11950   match(Set dst (AddI src1 (AndI src2 mask)));
11951   ins_cost(INSN_COST);
11952   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11953 
11954    ins_encode %{
11955      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11956             as_Register($src2$$reg), ext::uxtb);
11957    %}
11958   ins_pipe(ialu_reg_reg);
11959 %}
11960 
11961 // This pattern is automatically generated from aarch64_ad.m4
11962 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11963 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11964 %{
11965   match(Set dst (AddI src1 (AndI src2 mask)));
11966   ins_cost(INSN_COST);
11967   format %{ "addw  $dst, $src1, $src2, uxth" %}
11968 
11969    ins_encode %{
11970      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11971             as_Register($src2$$reg), ext::uxth);
11972    %}
11973   ins_pipe(ialu_reg_reg);
11974 %}
11975 
11976 // This pattern is automatically generated from aarch64_ad.m4
11977 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11978 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11979 %{
11980   match(Set dst (AddL src1 (AndL src2 mask)));
11981   ins_cost(INSN_COST);
11982   format %{ "add  $dst, $src1, $src2, uxtb" %}
11983 
11984    ins_encode %{
11985      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11986             as_Register($src2$$reg), ext::uxtb);
11987    %}
11988   ins_pipe(ialu_reg_reg);
11989 %}
11990 
11991 // This pattern is automatically generated from aarch64_ad.m4
11992 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11993 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11994 %{
11995   match(Set dst (AddL src1 (AndL src2 mask)));
11996   ins_cost(INSN_COST);
11997   format %{ "add  $dst, $src1, $src2, uxth" %}
11998 
11999    ins_encode %{
12000      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12001             as_Register($src2$$reg), ext::uxth);
12002    %}
12003   ins_pipe(ialu_reg_reg);
12004 %}
12005 
12006 // This pattern is automatically generated from aarch64_ad.m4
12007 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12008 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12009 %{
12010   match(Set dst (AddL src1 (AndL src2 mask)));
12011   ins_cost(INSN_COST);
12012   format %{ "add  $dst, $src1, $src2, uxtw" %}
12013 
12014    ins_encode %{
12015      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12016             as_Register($src2$$reg), ext::uxtw);
12017    %}
12018   ins_pipe(ialu_reg_reg);
12019 %}
12020 
12021 // This pattern is automatically generated from aarch64_ad.m4
12022 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12023 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12024 %{
12025   match(Set dst (SubI src1 (AndI src2 mask)));
12026   ins_cost(INSN_COST);
12027   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12028 
12029    ins_encode %{
12030      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12031             as_Register($src2$$reg), ext::uxtb);
12032    %}
12033   ins_pipe(ialu_reg_reg);
12034 %}
12035 
12036 // This pattern is automatically generated from aarch64_ad.m4
12037 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12038 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12039 %{
12040   match(Set dst (SubI src1 (AndI src2 mask)));
12041   ins_cost(INSN_COST);
12042   format %{ "subw  $dst, $src1, $src2, uxth" %}
12043 
12044    ins_encode %{
12045      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12046             as_Register($src2$$reg), ext::uxth);
12047    %}
12048   ins_pipe(ialu_reg_reg);
12049 %}
12050 
12051 // This pattern is automatically generated from aarch64_ad.m4
12052 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12053 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12054 %{
12055   match(Set dst (SubL src1 (AndL src2 mask)));
12056   ins_cost(INSN_COST);
12057   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12058 
12059    ins_encode %{
12060      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12061             as_Register($src2$$reg), ext::uxtb);
12062    %}
12063   ins_pipe(ialu_reg_reg);
12064 %}
12065 
12066 // This pattern is automatically generated from aarch64_ad.m4
12067 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12068 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12069 %{
12070   match(Set dst (SubL src1 (AndL src2 mask)));
12071   ins_cost(INSN_COST);
12072   format %{ "sub  $dst, $src1, $src2, uxth" %}
12073 
12074    ins_encode %{
12075      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12076             as_Register($src2$$reg), ext::uxth);
12077    %}
12078   ins_pipe(ialu_reg_reg);
12079 %}
12080 
12081 // This pattern is automatically generated from aarch64_ad.m4
12082 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12083 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12084 %{
12085   match(Set dst (SubL src1 (AndL src2 mask)));
12086   ins_cost(INSN_COST);
12087   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12088 
12089    ins_encode %{
12090      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12091             as_Register($src2$$reg), ext::uxtw);
12092    %}
12093   ins_pipe(ialu_reg_reg);
12094 %}
12095 
12096 
12097 // This pattern is automatically generated from aarch64_ad.m4
12098 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12099 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12100 %{
12101   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12102   ins_cost(1.9 * INSN_COST);
12103   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12104 
12105    ins_encode %{
12106      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12107             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12108    %}
12109   ins_pipe(ialu_reg_reg_shift);
12110 %}
12111 
12112 // This pattern is automatically generated from aarch64_ad.m4
12113 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12114 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12115 %{
12116   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12117   ins_cost(1.9 * INSN_COST);
12118   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12119 
12120    ins_encode %{
12121      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12122             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12123    %}
12124   ins_pipe(ialu_reg_reg_shift);
12125 %}
12126 
12127 // This pattern is automatically generated from aarch64_ad.m4
12128 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12129 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12130 %{
12131   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12132   ins_cost(1.9 * INSN_COST);
12133   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12134 
12135    ins_encode %{
12136      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12137             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12138    %}
12139   ins_pipe(ialu_reg_reg_shift);
12140 %}
12141 
12142 // This pattern is automatically generated from aarch64_ad.m4
12143 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12144 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12145 %{
12146   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12147   ins_cost(1.9 * INSN_COST);
12148   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12149 
12150    ins_encode %{
12151      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12152             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12153    %}
12154   ins_pipe(ialu_reg_reg_shift);
12155 %}
12156 
12157 // This pattern is automatically generated from aarch64_ad.m4
12158 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12159 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12160 %{
12161   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12162   ins_cost(1.9 * INSN_COST);
12163   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12164 
12165    ins_encode %{
12166      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12167             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12168    %}
12169   ins_pipe(ialu_reg_reg_shift);
12170 %}
12171 
12172 // This pattern is automatically generated from aarch64_ad.m4
12173 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12174 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12175 %{
12176   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12177   ins_cost(1.9 * INSN_COST);
12178   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12179 
12180    ins_encode %{
12181      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12182             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12183    %}
12184   ins_pipe(ialu_reg_reg_shift);
12185 %}
12186 
12187 // This pattern is automatically generated from aarch64_ad.m4
12188 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12189 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12190 %{
12191   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12192   ins_cost(1.9 * INSN_COST);
12193   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12194 
12195    ins_encode %{
12196      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12197             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12198    %}
12199   ins_pipe(ialu_reg_reg_shift);
12200 %}
12201 
12202 // This pattern is automatically generated from aarch64_ad.m4
12203 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12204 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12205 %{
12206   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12207   ins_cost(1.9 * INSN_COST);
12208   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12209 
12210    ins_encode %{
12211      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12212             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12213    %}
12214   ins_pipe(ialu_reg_reg_shift);
12215 %}
12216 
12217 // This pattern is automatically generated from aarch64_ad.m4
12218 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12219 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12220 %{
12221   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12222   ins_cost(1.9 * INSN_COST);
12223   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12224 
12225    ins_encode %{
12226      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12227             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12228    %}
12229   ins_pipe(ialu_reg_reg_shift);
12230 %}
12231 
12232 // This pattern is automatically generated from aarch64_ad.m4
12233 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12234 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12235 %{
12236   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12237   ins_cost(1.9 * INSN_COST);
12238   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12239 
12240    ins_encode %{
12241      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12242             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12243    %}
12244   ins_pipe(ialu_reg_reg_shift);
12245 %}
12246 
12247 // This pattern is automatically generated from aarch64_ad.m4
12248 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12249 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12250 %{
12251   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12252   ins_cost(1.9 * INSN_COST);
12253   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12254 
12255    ins_encode %{
12256      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12257             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12258    %}
12259   ins_pipe(ialu_reg_reg_shift);
12260 %}
12261 
12262 // This pattern is automatically generated from aarch64_ad.m4
12263 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12264 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12265 %{
12266   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12267   ins_cost(1.9 * INSN_COST);
12268   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12269 
12270    ins_encode %{
12271      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12272             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12273    %}
12274   ins_pipe(ialu_reg_reg_shift);
12275 %}
12276 
12277 // This pattern is automatically generated from aarch64_ad.m4
12278 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12279 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12280 %{
12281   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12282   ins_cost(1.9 * INSN_COST);
12283   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12284 
12285    ins_encode %{
12286      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12287             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12288    %}
12289   ins_pipe(ialu_reg_reg_shift);
12290 %}
12291 
12292 // This pattern is automatically generated from aarch64_ad.m4
12293 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12294 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12295 %{
12296   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12297   ins_cost(1.9 * INSN_COST);
12298   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12299 
12300    ins_encode %{
12301      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12302             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12303    %}
12304   ins_pipe(ialu_reg_reg_shift);
12305 %}
12306 
12307 // This pattern is automatically generated from aarch64_ad.m4
12308 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12309 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12310 %{
12311   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12312   ins_cost(1.9 * INSN_COST);
12313   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12314 
12315    ins_encode %{
12316      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12317             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12318    %}
12319   ins_pipe(ialu_reg_reg_shift);
12320 %}
12321 
12322 // This pattern is automatically generated from aarch64_ad.m4
12323 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12324 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12325 %{
12326   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12327   ins_cost(1.9 * INSN_COST);
12328   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12329 
12330    ins_encode %{
12331      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12332             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12333    %}
12334   ins_pipe(ialu_reg_reg_shift);
12335 %}
12336 
12337 // This pattern is automatically generated from aarch64_ad.m4
12338 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12339 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12340 %{
12341   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12342   ins_cost(1.9 * INSN_COST);
12343   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12344 
12345    ins_encode %{
12346      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12347             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12348    %}
12349   ins_pipe(ialu_reg_reg_shift);
12350 %}
12351 
12352 // This pattern is automatically generated from aarch64_ad.m4
12353 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12354 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12355 %{
12356   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12357   ins_cost(1.9 * INSN_COST);
12358   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12359 
12360    ins_encode %{
12361      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12362             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12363    %}
12364   ins_pipe(ialu_reg_reg_shift);
12365 %}
12366 
12367 // This pattern is automatically generated from aarch64_ad.m4
12368 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12369 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12370 %{
12371   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12372   ins_cost(1.9 * INSN_COST);
12373   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12374 
12375    ins_encode %{
12376      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12377             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12378    %}
12379   ins_pipe(ialu_reg_reg_shift);
12380 %}
12381 
12382 // This pattern is automatically generated from aarch64_ad.m4
12383 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12384 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12385 %{
12386   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12387   ins_cost(1.9 * INSN_COST);
12388   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12389 
12390    ins_encode %{
12391      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12392             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12393    %}
12394   ins_pipe(ialu_reg_reg_shift);
12395 %}
12396 
12397 // This pattern is automatically generated from aarch64_ad.m4
12398 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12399 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12400 %{
12401   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12402   ins_cost(1.9 * INSN_COST);
12403   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12404 
12405    ins_encode %{
12406      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12407             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12408    %}
12409   ins_pipe(ialu_reg_reg_shift);
12410 %}
12411 
12412 // This pattern is automatically generated from aarch64_ad.m4
12413 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12414 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12415 %{
12416   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12417   ins_cost(1.9 * INSN_COST);
12418   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12419 
12420    ins_encode %{
12421      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12422             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12423    %}
12424   ins_pipe(ialu_reg_reg_shift);
12425 %}
12426 
12427 // This pattern is automatically generated from aarch64_ad.m4
12428 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12429 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
12430 %{
12431   effect(DEF dst, USE src1, USE src2, USE cr);
12432   ins_cost(INSN_COST * 2);
12433   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
12434 
12435   ins_encode %{
12436     __ cselw($dst$$Register,
12437              $src1$$Register,
12438              $src2$$Register,
12439              Assembler::LT);
12440   %}
12441   ins_pipe(icond_reg_reg);
12442 %}
12443 
12444 // This pattern is automatically generated from aarch64_ad.m4
12445 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12446 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
12447 %{
12448   effect(DEF dst, USE src1, USE src2, USE cr);
12449   ins_cost(INSN_COST * 2);
12450   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
12451 
12452   ins_encode %{
12453     __ cselw($dst$$Register,
12454              $src1$$Register,
12455              $src2$$Register,
12456              Assembler::GT);
12457   %}
12458   ins_pipe(icond_reg_reg);
12459 %}
12460 
12461 // This pattern is automatically generated from aarch64_ad.m4
12462 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12463 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
12464 %{
12465   effect(DEF dst, USE src1, USE cr);
12466   ins_cost(INSN_COST * 2);
12467   format %{ "cselw $dst, $src1, zr lt\t"  %}
12468 
12469   ins_encode %{
12470     __ cselw($dst$$Register,
12471              $src1$$Register,
12472              zr,
12473              Assembler::LT);
12474   %}
12475   ins_pipe(icond_reg);
12476 %}
12477 
12478 // This pattern is automatically generated from aarch64_ad.m4
12479 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12480 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
12481 %{
12482   effect(DEF dst, USE src1, USE cr);
12483   ins_cost(INSN_COST * 2);
12484   format %{ "cselw $dst, $src1, zr gt\t"  %}
12485 
12486   ins_encode %{
12487     __ cselw($dst$$Register,
12488              $src1$$Register,
12489              zr,
12490              Assembler::GT);
12491   %}
12492   ins_pipe(icond_reg);
12493 %}
12494 
12495 // This pattern is automatically generated from aarch64_ad.m4
12496 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12497 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
12498 %{
12499   effect(DEF dst, USE src1, USE cr);
12500   ins_cost(INSN_COST * 2);
12501   format %{ "csincw $dst, $src1, zr le\t"  %}
12502 
12503   ins_encode %{
12504     __ csincw($dst$$Register,
12505              $src1$$Register,
12506              zr,
12507              Assembler::LE);
12508   %}
12509   ins_pipe(icond_reg);
12510 %}
12511 
12512 // This pattern is automatically generated from aarch64_ad.m4
12513 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12514 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
12515 %{
12516   effect(DEF dst, USE src1, USE cr);
12517   ins_cost(INSN_COST * 2);
12518   format %{ "csincw $dst, $src1, zr gt\t"  %}
12519 
12520   ins_encode %{
12521     __ csincw($dst$$Register,
12522              $src1$$Register,
12523              zr,
12524              Assembler::GT);
12525   %}
12526   ins_pipe(icond_reg);
12527 %}
12528 
12529 // This pattern is automatically generated from aarch64_ad.m4
12530 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12531 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
12532 %{
12533   effect(DEF dst, USE src1, USE cr);
12534   ins_cost(INSN_COST * 2);
12535   format %{ "csinvw $dst, $src1, zr lt\t"  %}
12536 
12537   ins_encode %{
12538     __ csinvw($dst$$Register,
12539              $src1$$Register,
12540              zr,
12541              Assembler::LT);
12542   %}
12543   ins_pipe(icond_reg);
12544 %}
12545 
12546 // This pattern is automatically generated from aarch64_ad.m4
12547 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12548 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
12549 %{
12550   effect(DEF dst, USE src1, USE cr);
12551   ins_cost(INSN_COST * 2);
12552   format %{ "csinvw $dst, $src1, zr ge\t"  %}
12553 
12554   ins_encode %{
12555     __ csinvw($dst$$Register,
12556              $src1$$Register,
12557              zr,
12558              Assembler::GE);
12559   %}
12560   ins_pipe(icond_reg);
12561 %}
12562 
12563 // This pattern is automatically generated from aarch64_ad.m4
12564 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12565 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
12566 %{
12567   match(Set dst (MinI src imm));
12568   ins_cost(INSN_COST * 3);
12569   expand %{
12570     rFlagsReg cr;
12571     compI_reg_imm0(cr, src);
12572     cmovI_reg_imm0_lt(dst, src, cr);
12573   %}
12574 %}
12575 
12576 // This pattern is automatically generated from aarch64_ad.m4
12577 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12578 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
12579 %{
12580   match(Set dst (MinI imm src));
12581   ins_cost(INSN_COST * 3);
12582   expand %{
12583     rFlagsReg cr;
12584     compI_reg_imm0(cr, src);
12585     cmovI_reg_imm0_lt(dst, src, cr);
12586   %}
12587 %}
12588 
12589 // This pattern is automatically generated from aarch64_ad.m4
12590 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12591 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
12592 %{
12593   match(Set dst (MinI src imm));
12594   ins_cost(INSN_COST * 3);
12595   expand %{
12596     rFlagsReg cr;
12597     compI_reg_imm0(cr, src);
12598     cmovI_reg_imm1_le(dst, src, cr);
12599   %}
12600 %}
12601 
12602 // This pattern is automatically generated from aarch64_ad.m4
12603 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12604 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
12605 %{
12606   match(Set dst (MinI imm src));
12607   ins_cost(INSN_COST * 3);
12608   expand %{
12609     rFlagsReg cr;
12610     compI_reg_imm0(cr, src);
12611     cmovI_reg_imm1_le(dst, src, cr);
12612   %}
12613 %}
12614 
12615 // This pattern is automatically generated from aarch64_ad.m4
12616 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12617 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
12618 %{
12619   match(Set dst (MinI src imm));
12620   ins_cost(INSN_COST * 3);
12621   expand %{
12622     rFlagsReg cr;
12623     compI_reg_imm0(cr, src);
12624     cmovI_reg_immM1_lt(dst, src, cr);
12625   %}
12626 %}
12627 
12628 // This pattern is automatically generated from aarch64_ad.m4
12629 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12630 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
12631 %{
12632   match(Set dst (MinI imm src));
12633   ins_cost(INSN_COST * 3);
12634   expand %{
12635     rFlagsReg cr;
12636     compI_reg_imm0(cr, src);
12637     cmovI_reg_immM1_lt(dst, src, cr);
12638   %}
12639 %}
12640 
12641 // This pattern is automatically generated from aarch64_ad.m4
12642 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12643 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
12644 %{
12645   match(Set dst (MaxI src imm));
12646   ins_cost(INSN_COST * 3);
12647   expand %{
12648     rFlagsReg cr;
12649     compI_reg_imm0(cr, src);
12650     cmovI_reg_imm0_gt(dst, src, cr);
12651   %}
12652 %}
12653 
12654 // This pattern is automatically generated from aarch64_ad.m4
12655 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12656 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
12657 %{
12658   match(Set dst (MaxI imm src));
12659   ins_cost(INSN_COST * 3);
12660   expand %{
12661     rFlagsReg cr;
12662     compI_reg_imm0(cr, src);
12663     cmovI_reg_imm0_gt(dst, src, cr);
12664   %}
12665 %}
12666 
12667 // This pattern is automatically generated from aarch64_ad.m4
12668 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12669 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
12670 %{
12671   match(Set dst (MaxI src imm));
12672   ins_cost(INSN_COST * 3);
12673   expand %{
12674     rFlagsReg cr;
12675     compI_reg_imm0(cr, src);
12676     cmovI_reg_imm1_gt(dst, src, cr);
12677   %}
12678 %}
12679 
12680 // This pattern is automatically generated from aarch64_ad.m4
12681 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12682 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
12683 %{
12684   match(Set dst (MaxI imm src));
12685   ins_cost(INSN_COST * 3);
12686   expand %{
12687     rFlagsReg cr;
12688     compI_reg_imm0(cr, src);
12689     cmovI_reg_imm1_gt(dst, src, cr);
12690   %}
12691 %}
12692 
12693 // This pattern is automatically generated from aarch64_ad.m4
12694 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12695 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
12696 %{
12697   match(Set dst (MaxI src imm));
12698   ins_cost(INSN_COST * 3);
12699   expand %{
12700     rFlagsReg cr;
12701     compI_reg_imm0(cr, src);
12702     cmovI_reg_immM1_ge(dst, src, cr);
12703   %}
12704 %}
12705 
12706 // This pattern is automatically generated from aarch64_ad.m4
12707 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12708 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
12709 %{
12710   match(Set dst (MaxI imm src));
12711   ins_cost(INSN_COST * 3);
12712   expand %{
12713     rFlagsReg cr;
12714     compI_reg_imm0(cr, src);
12715     cmovI_reg_immM1_ge(dst, src, cr);
12716   %}
12717 %}
12718 
12719 // This pattern is automatically generated from aarch64_ad.m4
12720 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12721 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
12722 %{
12723   match(Set dst (ReverseI src));
12724   ins_cost(INSN_COST);
12725   format %{ "rbitw  $dst, $src" %}
12726   ins_encode %{
12727     __ rbitw($dst$$Register, $src$$Register);
12728   %}
12729   ins_pipe(ialu_reg);
12730 %}
12731 
12732 // This pattern is automatically generated from aarch64_ad.m4
12733 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12734 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
12735 %{
12736   match(Set dst (ReverseL src));
12737   ins_cost(INSN_COST);
12738   format %{ "rbit  $dst, $src" %}
12739   ins_encode %{
12740     __ rbit($dst$$Register, $src$$Register);
12741   %}
12742   ins_pipe(ialu_reg);
12743 %}
12744 
12745 
12746 // END This section of the file is automatically generated. Do not edit --------------
12747 
12748 
12749 // ============================================================================
12750 // Floating Point Arithmetic Instructions
12751 
12752 instruct addHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12753   match(Set dst (AddHF src1 src2));
12754   format %{ "faddh $dst, $src1, $src2" %}
12755   ins_encode %{
12756     __ faddh($dst$$FloatRegister,
12757              $src1$$FloatRegister,
12758              $src2$$FloatRegister);
12759   %}
12760   ins_pipe(fp_dop_reg_reg_s);
12761 %}
12762 
12763 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12764   match(Set dst (AddF src1 src2));
12765 
12766   ins_cost(INSN_COST * 5);
12767   format %{ "fadds   $dst, $src1, $src2" %}
12768 
12769   ins_encode %{
12770     __ fadds(as_FloatRegister($dst$$reg),
12771              as_FloatRegister($src1$$reg),
12772              as_FloatRegister($src2$$reg));
12773   %}
12774 
12775   ins_pipe(fp_dop_reg_reg_s);
12776 %}
12777 
12778 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12779   match(Set dst (AddD src1 src2));
12780 
12781   ins_cost(INSN_COST * 5);
12782   format %{ "faddd   $dst, $src1, $src2" %}
12783 
12784   ins_encode %{
12785     __ faddd(as_FloatRegister($dst$$reg),
12786              as_FloatRegister($src1$$reg),
12787              as_FloatRegister($src2$$reg));
12788   %}
12789 
12790   ins_pipe(fp_dop_reg_reg_d);
12791 %}
12792 
12793 instruct subHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12794   match(Set dst (SubHF src1 src2));
12795   format %{ "fsubh $dst, $src1, $src2" %}
12796   ins_encode %{
12797     __ fsubh($dst$$FloatRegister,
12798              $src1$$FloatRegister,
12799              $src2$$FloatRegister);
12800   %}
12801   ins_pipe(fp_dop_reg_reg_s);
12802 %}
12803 
12804 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12805   match(Set dst (SubF src1 src2));
12806 
12807   ins_cost(INSN_COST * 5);
12808   format %{ "fsubs   $dst, $src1, $src2" %}
12809 
12810   ins_encode %{
12811     __ fsubs(as_FloatRegister($dst$$reg),
12812              as_FloatRegister($src1$$reg),
12813              as_FloatRegister($src2$$reg));
12814   %}
12815 
12816   ins_pipe(fp_dop_reg_reg_s);
12817 %}
12818 
12819 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12820   match(Set dst (SubD src1 src2));
12821 
12822   ins_cost(INSN_COST * 5);
12823   format %{ "fsubd   $dst, $src1, $src2" %}
12824 
12825   ins_encode %{
12826     __ fsubd(as_FloatRegister($dst$$reg),
12827              as_FloatRegister($src1$$reg),
12828              as_FloatRegister($src2$$reg));
12829   %}
12830 
12831   ins_pipe(fp_dop_reg_reg_d);
12832 %}
12833 
12834 instruct mulHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12835   match(Set dst (MulHF src1 src2));
12836   format %{ "fmulh $dst, $src1, $src2" %}
12837   ins_encode %{
12838     __ fmulh($dst$$FloatRegister,
12839              $src1$$FloatRegister,
12840              $src2$$FloatRegister);
12841   %}
12842   ins_pipe(fp_dop_reg_reg_s);
12843 %}
12844 
12845 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12846   match(Set dst (MulF src1 src2));
12847 
12848   ins_cost(INSN_COST * 6);
12849   format %{ "fmuls   $dst, $src1, $src2" %}
12850 
12851   ins_encode %{
12852     __ fmuls(as_FloatRegister($dst$$reg),
12853              as_FloatRegister($src1$$reg),
12854              as_FloatRegister($src2$$reg));
12855   %}
12856 
12857   ins_pipe(fp_dop_reg_reg_s);
12858 %}
12859 
12860 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12861   match(Set dst (MulD src1 src2));
12862 
12863   ins_cost(INSN_COST * 6);
12864   format %{ "fmuld   $dst, $src1, $src2" %}
12865 
12866   ins_encode %{
12867     __ fmuld(as_FloatRegister($dst$$reg),
12868              as_FloatRegister($src1$$reg),
12869              as_FloatRegister($src2$$reg));
12870   %}
12871 
12872   ins_pipe(fp_dop_reg_reg_d);
12873 %}
12874 
12875 // src1 * src2 + src3 (half-precision float)
12876 instruct maddHF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12877   match(Set dst (FmaHF src3 (Binary src1 src2)));
12878   format %{ "fmaddh $dst, $src1, $src2, $src3" %}
12879   ins_encode %{
12880     assert(UseFMA, "Needs FMA instructions support.");
12881     __ fmaddh($dst$$FloatRegister,
12882               $src1$$FloatRegister,
12883               $src2$$FloatRegister,
12884               $src3$$FloatRegister);
12885   %}
12886   ins_pipe(pipe_class_default);
12887 %}
12888 
12889 // src1 * src2 + src3
12890 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12891   match(Set dst (FmaF src3 (Binary src1 src2)));
12892 
12893   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12894 
12895   ins_encode %{
12896     assert(UseFMA, "Needs FMA instructions support.");
12897     __ fmadds(as_FloatRegister($dst$$reg),
12898              as_FloatRegister($src1$$reg),
12899              as_FloatRegister($src2$$reg),
12900              as_FloatRegister($src3$$reg));
12901   %}
12902 
12903   ins_pipe(pipe_class_default);
12904 %}
12905 
12906 // src1 * src2 + src3
12907 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12908   match(Set dst (FmaD src3 (Binary src1 src2)));
12909 
12910   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12911 
12912   ins_encode %{
12913     assert(UseFMA, "Needs FMA instructions support.");
12914     __ fmaddd(as_FloatRegister($dst$$reg),
12915              as_FloatRegister($src1$$reg),
12916              as_FloatRegister($src2$$reg),
12917              as_FloatRegister($src3$$reg));
12918   %}
12919 
12920   ins_pipe(pipe_class_default);
12921 %}
12922 
12923 // src1 * (-src2) + src3
12924 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
12925 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12926   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12927 
12928   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12929 
12930   ins_encode %{
12931     assert(UseFMA, "Needs FMA instructions support.");
12932     __ fmsubs(as_FloatRegister($dst$$reg),
12933               as_FloatRegister($src1$$reg),
12934               as_FloatRegister($src2$$reg),
12935               as_FloatRegister($src3$$reg));
12936   %}
12937 
12938   ins_pipe(pipe_class_default);
12939 %}
12940 
12941 // src1 * (-src2) + src3
12942 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
12943 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12944   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12945 
12946   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12947 
12948   ins_encode %{
12949     assert(UseFMA, "Needs FMA instructions support.");
12950     __ fmsubd(as_FloatRegister($dst$$reg),
12951               as_FloatRegister($src1$$reg),
12952               as_FloatRegister($src2$$reg),
12953               as_FloatRegister($src3$$reg));
12954   %}
12955 
12956   ins_pipe(pipe_class_default);
12957 %}
12958 
12959 // src1 * (-src2) - src3
12960 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
12961 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12962   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12963 
12964   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12965 
12966   ins_encode %{
12967     assert(UseFMA, "Needs FMA instructions support.");
12968     __ fnmadds(as_FloatRegister($dst$$reg),
12969                as_FloatRegister($src1$$reg),
12970                as_FloatRegister($src2$$reg),
12971                as_FloatRegister($src3$$reg));
12972   %}
12973 
12974   ins_pipe(pipe_class_default);
12975 %}
12976 
12977 // src1 * (-src2) - src3
12978 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
12979 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12980   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12981 
12982   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12983 
12984   ins_encode %{
12985     assert(UseFMA, "Needs FMA instructions support.");
12986     __ fnmaddd(as_FloatRegister($dst$$reg),
12987                as_FloatRegister($src1$$reg),
12988                as_FloatRegister($src2$$reg),
12989                as_FloatRegister($src3$$reg));
12990   %}
12991 
12992   ins_pipe(pipe_class_default);
12993 %}
12994 
12995 // src1 * src2 - src3
12996 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12997   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12998 
12999   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13000 
13001   ins_encode %{
13002     assert(UseFMA, "Needs FMA instructions support.");
13003     __ fnmsubs(as_FloatRegister($dst$$reg),
13004                as_FloatRegister($src1$$reg),
13005                as_FloatRegister($src2$$reg),
13006                as_FloatRegister($src3$$reg));
13007   %}
13008 
13009   ins_pipe(pipe_class_default);
13010 %}
13011 
13012 // src1 * src2 - src3
13013 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13014   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13015 
13016   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13017 
13018   ins_encode %{
13019     assert(UseFMA, "Needs FMA instructions support.");
13020     // n.b. insn name should be fnmsubd
13021     __ fnmsub(as_FloatRegister($dst$$reg),
13022               as_FloatRegister($src1$$reg),
13023               as_FloatRegister($src2$$reg),
13024               as_FloatRegister($src3$$reg));
13025   %}
13026 
13027   ins_pipe(pipe_class_default);
13028 %}
13029 
13030 // Math.max(HH)H (half-precision float)
13031 instruct maxHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13032   match(Set dst (MaxHF src1 src2));
13033   format %{ "fmaxh $dst, $src1, $src2" %}
13034   ins_encode %{
13035     __ fmaxh($dst$$FloatRegister,
13036              $src1$$FloatRegister,
13037              $src2$$FloatRegister);
13038   %}
13039   ins_pipe(fp_dop_reg_reg_s);
13040 %}
13041 
13042 // Math.min(HH)H (half-precision float)
13043 instruct minHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13044   match(Set dst (MinHF src1 src2));
13045   format %{ "fminh $dst, $src1, $src2" %}
13046   ins_encode %{
13047     __ fminh($dst$$FloatRegister,
13048              $src1$$FloatRegister,
13049              $src2$$FloatRegister);
13050   %}
13051   ins_pipe(fp_dop_reg_reg_s);
13052 %}
13053 
13054 // Math.max(FF)F
13055 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13056   match(Set dst (MaxF src1 src2));
13057 
13058   format %{ "fmaxs   $dst, $src1, $src2" %}
13059   ins_encode %{
13060     __ fmaxs(as_FloatRegister($dst$$reg),
13061              as_FloatRegister($src1$$reg),
13062              as_FloatRegister($src2$$reg));
13063   %}
13064 
13065   ins_pipe(fp_dop_reg_reg_s);
13066 %}
13067 
13068 // Math.min(FF)F
13069 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13070   match(Set dst (MinF src1 src2));
13071 
13072   format %{ "fmins   $dst, $src1, $src2" %}
13073   ins_encode %{
13074     __ fmins(as_FloatRegister($dst$$reg),
13075              as_FloatRegister($src1$$reg),
13076              as_FloatRegister($src2$$reg));
13077   %}
13078 
13079   ins_pipe(fp_dop_reg_reg_s);
13080 %}
13081 
13082 // Math.max(DD)D
13083 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13084   match(Set dst (MaxD src1 src2));
13085 
13086   format %{ "fmaxd   $dst, $src1, $src2" %}
13087   ins_encode %{
13088     __ fmaxd(as_FloatRegister($dst$$reg),
13089              as_FloatRegister($src1$$reg),
13090              as_FloatRegister($src2$$reg));
13091   %}
13092 
13093   ins_pipe(fp_dop_reg_reg_d);
13094 %}
13095 
13096 // Math.min(DD)D
13097 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13098   match(Set dst (MinD src1 src2));
13099 
13100   format %{ "fmind   $dst, $src1, $src2" %}
13101   ins_encode %{
13102     __ fmind(as_FloatRegister($dst$$reg),
13103              as_FloatRegister($src1$$reg),
13104              as_FloatRegister($src2$$reg));
13105   %}
13106 
13107   ins_pipe(fp_dop_reg_reg_d);
13108 %}
13109 
13110 instruct divHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13111   match(Set dst (DivHF src1  src2));
13112   format %{ "fdivh $dst, $src1, $src2" %}
13113   ins_encode %{
13114     __ fdivh($dst$$FloatRegister,
13115              $src1$$FloatRegister,
13116              $src2$$FloatRegister);
13117   %}
13118   ins_pipe(fp_div_s);
13119 %}
13120 
13121 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13122   match(Set dst (DivF src1  src2));
13123 
13124   ins_cost(INSN_COST * 18);
13125   format %{ "fdivs   $dst, $src1, $src2" %}
13126 
13127   ins_encode %{
13128     __ fdivs(as_FloatRegister($dst$$reg),
13129              as_FloatRegister($src1$$reg),
13130              as_FloatRegister($src2$$reg));
13131   %}
13132 
13133   ins_pipe(fp_div_s);
13134 %}
13135 
13136 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13137   match(Set dst (DivD src1  src2));
13138 
13139   ins_cost(INSN_COST * 32);
13140   format %{ "fdivd   $dst, $src1, $src2" %}
13141 
13142   ins_encode %{
13143     __ fdivd(as_FloatRegister($dst$$reg),
13144              as_FloatRegister($src1$$reg),
13145              as_FloatRegister($src2$$reg));
13146   %}
13147 
13148   ins_pipe(fp_div_d);
13149 %}
13150 
13151 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13152   match(Set dst (NegF src));
13153 
13154   ins_cost(INSN_COST * 3);
13155   format %{ "fneg   $dst, $src" %}
13156 
13157   ins_encode %{
13158     __ fnegs(as_FloatRegister($dst$$reg),
13159              as_FloatRegister($src$$reg));
13160   %}
13161 
13162   ins_pipe(fp_uop_s);
13163 %}
13164 
13165 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13166   match(Set dst (NegD src));
13167 
13168   ins_cost(INSN_COST * 3);
13169   format %{ "fnegd   $dst, $src" %}
13170 
13171   ins_encode %{
13172     __ fnegd(as_FloatRegister($dst$$reg),
13173              as_FloatRegister($src$$reg));
13174   %}
13175 
13176   ins_pipe(fp_uop_d);
13177 %}
13178 
13179 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13180 %{
13181   match(Set dst (AbsI src));
13182 
13183   effect(KILL cr);
13184   ins_cost(INSN_COST * 2);
13185   format %{ "cmpw  $src, zr\n\t"
13186             "cnegw $dst, $src, Assembler::LT\t# int abs"
13187   %}
13188 
13189   ins_encode %{
13190     __ cmpw(as_Register($src$$reg), zr);
13191     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13192   %}
13193   ins_pipe(pipe_class_default);
13194 %}
13195 
13196 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
13197 %{
13198   match(Set dst (AbsL src));
13199 
13200   effect(KILL cr);
13201   ins_cost(INSN_COST * 2);
13202   format %{ "cmp  $src, zr\n\t"
13203             "cneg $dst, $src, Assembler::LT\t# long abs"
13204   %}
13205 
13206   ins_encode %{
13207     __ cmp(as_Register($src$$reg), zr);
13208     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13209   %}
13210   ins_pipe(pipe_class_default);
13211 %}
13212 
13213 instruct absF_reg(vRegF dst, vRegF src) %{
13214   match(Set dst (AbsF src));
13215 
13216   ins_cost(INSN_COST * 3);
13217   format %{ "fabss   $dst, $src" %}
13218   ins_encode %{
13219     __ fabss(as_FloatRegister($dst$$reg),
13220              as_FloatRegister($src$$reg));
13221   %}
13222 
13223   ins_pipe(fp_uop_s);
13224 %}
13225 
13226 instruct absD_reg(vRegD dst, vRegD src) %{
13227   match(Set dst (AbsD src));
13228 
13229   ins_cost(INSN_COST * 3);
13230   format %{ "fabsd   $dst, $src" %}
13231   ins_encode %{
13232     __ fabsd(as_FloatRegister($dst$$reg),
13233              as_FloatRegister($src$$reg));
13234   %}
13235 
13236   ins_pipe(fp_uop_d);
13237 %}
13238 
13239 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
13240   match(Set dst (AbsF (SubF src1 src2)));
13241 
13242   ins_cost(INSN_COST * 3);
13243   format %{ "fabds   $dst, $src1, $src2" %}
13244   ins_encode %{
13245     __ fabds(as_FloatRegister($dst$$reg),
13246              as_FloatRegister($src1$$reg),
13247              as_FloatRegister($src2$$reg));
13248   %}
13249 
13250   ins_pipe(fp_uop_s);
13251 %}
13252 
13253 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
13254   match(Set dst (AbsD (SubD src1 src2)));
13255 
13256   ins_cost(INSN_COST * 3);
13257   format %{ "fabdd   $dst, $src1, $src2" %}
13258   ins_encode %{
13259     __ fabdd(as_FloatRegister($dst$$reg),
13260              as_FloatRegister($src1$$reg),
13261              as_FloatRegister($src2$$reg));
13262   %}
13263 
13264   ins_pipe(fp_uop_d);
13265 %}
13266 
13267 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13268   match(Set dst (SqrtD src));
13269 
13270   ins_cost(INSN_COST * 50);
13271   format %{ "fsqrtd  $dst, $src" %}
13272   ins_encode %{
13273     __ fsqrtd(as_FloatRegister($dst$$reg),
13274              as_FloatRegister($src$$reg));
13275   %}
13276 
13277   ins_pipe(fp_div_s);
13278 %}
13279 
13280 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13281   match(Set dst (SqrtF src));
13282 
13283   ins_cost(INSN_COST * 50);
13284   format %{ "fsqrts  $dst, $src" %}
13285   ins_encode %{
13286     __ fsqrts(as_FloatRegister($dst$$reg),
13287              as_FloatRegister($src$$reg));
13288   %}
13289 
13290   ins_pipe(fp_div_d);
13291 %}
13292 
13293 instruct sqrtHF_reg(vRegF dst, vRegF src) %{
13294   match(Set dst (SqrtHF src));
13295   format %{ "fsqrth $dst, $src" %}
13296   ins_encode %{
13297     __ fsqrth($dst$$FloatRegister,
13298               $src$$FloatRegister);
13299   %}
13300   ins_pipe(fp_div_s);
13301 %}
13302 
13303 // Math.rint, floor, ceil
13304 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
13305   match(Set dst (RoundDoubleMode src rmode));
13306   format %{ "frint  $dst, $src, $rmode" %}
13307   ins_encode %{
13308     switch ($rmode$$constant) {
13309       case RoundDoubleModeNode::rmode_rint:
13310         __ frintnd(as_FloatRegister($dst$$reg),
13311                    as_FloatRegister($src$$reg));
13312         break;
13313       case RoundDoubleModeNode::rmode_floor:
13314         __ frintmd(as_FloatRegister($dst$$reg),
13315                    as_FloatRegister($src$$reg));
13316         break;
13317       case RoundDoubleModeNode::rmode_ceil:
13318         __ frintpd(as_FloatRegister($dst$$reg),
13319                    as_FloatRegister($src$$reg));
13320         break;
13321     }
13322   %}
13323   ins_pipe(fp_uop_d);
13324 %}
13325 
13326 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
13327   match(Set dst (CopySignD src1 (Binary src2 zero)));
13328   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
13329   format %{ "CopySignD  $dst $src1 $src2" %}
13330   ins_encode %{
13331     FloatRegister dst = as_FloatRegister($dst$$reg),
13332                   src1 = as_FloatRegister($src1$$reg),
13333                   src2 = as_FloatRegister($src2$$reg),
13334                   zero = as_FloatRegister($zero$$reg);
13335     __ fnegd(dst, zero);
13336     __ bsl(dst, __ T8B, src2, src1);
13337   %}
13338   ins_pipe(fp_uop_d);
13339 %}
13340 
13341 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
13342   match(Set dst (CopySignF src1 src2));
13343   effect(TEMP_DEF dst, USE src1, USE src2);
13344   format %{ "CopySignF  $dst $src1 $src2" %}
13345   ins_encode %{
13346     FloatRegister dst = as_FloatRegister($dst$$reg),
13347                   src1 = as_FloatRegister($src1$$reg),
13348                   src2 = as_FloatRegister($src2$$reg);
13349     __ movi(dst, __ T2S, 0x80, 24);
13350     __ bsl(dst, __ T8B, src2, src1);
13351   %}
13352   ins_pipe(fp_uop_d);
13353 %}
13354 
13355 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
13356   match(Set dst (SignumD src (Binary zero one)));
13357   effect(TEMP_DEF dst, USE src, USE zero, USE one);
13358   format %{ "signumD  $dst, $src" %}
13359   ins_encode %{
13360     FloatRegister src = as_FloatRegister($src$$reg),
13361                   dst = as_FloatRegister($dst$$reg),
13362                   zero = as_FloatRegister($zero$$reg),
13363                   one = as_FloatRegister($one$$reg);
13364     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
13365     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
13366     // Bit selection instruction gets bit from "one" for each enabled bit in
13367     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
13368     // NaN the whole "src" will be copied because "dst" is zero. For all other
13369     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
13370     // from "src", and all other bits are copied from 1.0.
13371     __ bsl(dst, __ T8B, one, src);
13372   %}
13373   ins_pipe(fp_uop_d);
13374 %}
13375 
13376 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
13377   match(Set dst (SignumF src (Binary zero one)));
13378   effect(TEMP_DEF dst, USE src, USE zero, USE one);
13379   format %{ "signumF  $dst, $src" %}
13380   ins_encode %{
13381     FloatRegister src = as_FloatRegister($src$$reg),
13382                   dst = as_FloatRegister($dst$$reg),
13383                   zero = as_FloatRegister($zero$$reg),
13384                   one = as_FloatRegister($one$$reg);
13385     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
13386     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
13387     // Bit selection instruction gets bit from "one" for each enabled bit in
13388     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
13389     // NaN the whole "src" will be copied because "dst" is zero. For all other
13390     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
13391     // from "src", and all other bits are copied from 1.0.
13392     __ bsl(dst, __ T8B, one, src);
13393   %}
13394   ins_pipe(fp_uop_d);
13395 %}
13396 
13397 instruct onspinwait() %{
13398   match(OnSpinWait);
13399   ins_cost(INSN_COST);
13400 
13401   format %{ "onspinwait" %}
13402 
13403   ins_encode %{
13404     __ spin_wait();
13405   %}
13406   ins_pipe(pipe_class_empty);
13407 %}
13408 
13409 // ============================================================================
13410 // Logical Instructions
13411 
13412 // Integer Logical Instructions
13413 
13414 // And Instructions
13415 
13416 
13417 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13418   match(Set dst (AndI src1 src2));
13419 
13420   format %{ "andw  $dst, $src1, $src2\t# int" %}
13421 
13422   ins_cost(INSN_COST);
13423   ins_encode %{
13424     __ andw(as_Register($dst$$reg),
13425             as_Register($src1$$reg),
13426             as_Register($src2$$reg));
13427   %}
13428 
13429   ins_pipe(ialu_reg_reg);
13430 %}
13431 
13432 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13433   match(Set dst (AndI src1 src2));
13434 
13435   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13436 
13437   ins_cost(INSN_COST);
13438   ins_encode %{
13439     __ andw(as_Register($dst$$reg),
13440             as_Register($src1$$reg),
13441             (uint64_t)($src2$$constant));
13442   %}
13443 
13444   ins_pipe(ialu_reg_imm);
13445 %}
13446 
13447 // Or Instructions
13448 
13449 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13450   match(Set dst (OrI src1 src2));
13451 
13452   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13453 
13454   ins_cost(INSN_COST);
13455   ins_encode %{
13456     __ orrw(as_Register($dst$$reg),
13457             as_Register($src1$$reg),
13458             as_Register($src2$$reg));
13459   %}
13460 
13461   ins_pipe(ialu_reg_reg);
13462 %}
13463 
13464 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13465   match(Set dst (OrI src1 src2));
13466 
13467   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13468 
13469   ins_cost(INSN_COST);
13470   ins_encode %{
13471     __ orrw(as_Register($dst$$reg),
13472             as_Register($src1$$reg),
13473             (uint64_t)($src2$$constant));
13474   %}
13475 
13476   ins_pipe(ialu_reg_imm);
13477 %}
13478 
13479 // Xor Instructions
13480 
13481 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13482   match(Set dst (XorI src1 src2));
13483 
13484   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13485 
13486   ins_cost(INSN_COST);
13487   ins_encode %{
13488     __ eorw(as_Register($dst$$reg),
13489             as_Register($src1$$reg),
13490             as_Register($src2$$reg));
13491   %}
13492 
13493   ins_pipe(ialu_reg_reg);
13494 %}
13495 
13496 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13497   match(Set dst (XorI src1 src2));
13498 
13499   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13500 
13501   ins_cost(INSN_COST);
13502   ins_encode %{
13503     __ eorw(as_Register($dst$$reg),
13504             as_Register($src1$$reg),
13505             (uint64_t)($src2$$constant));
13506   %}
13507 
13508   ins_pipe(ialu_reg_imm);
13509 %}
13510 
13511 // Long Logical Instructions
13512 // TODO
13513 
13514 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13515   match(Set dst (AndL src1 src2));
13516 
13517   format %{ "and  $dst, $src1, $src2\t# int" %}
13518 
13519   ins_cost(INSN_COST);
13520   ins_encode %{
13521     __ andr(as_Register($dst$$reg),
13522             as_Register($src1$$reg),
13523             as_Register($src2$$reg));
13524   %}
13525 
13526   ins_pipe(ialu_reg_reg);
13527 %}
13528 
13529 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13530   match(Set dst (AndL src1 src2));
13531 
13532   format %{ "and  $dst, $src1, $src2\t# int" %}
13533 
13534   ins_cost(INSN_COST);
13535   ins_encode %{
13536     __ andr(as_Register($dst$$reg),
13537             as_Register($src1$$reg),
13538             (uint64_t)($src2$$constant));
13539   %}
13540 
13541   ins_pipe(ialu_reg_imm);
13542 %}
13543 
13544 // Or Instructions
13545 
13546 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13547   match(Set dst (OrL src1 src2));
13548 
13549   format %{ "orr  $dst, $src1, $src2\t# int" %}
13550 
13551   ins_cost(INSN_COST);
13552   ins_encode %{
13553     __ orr(as_Register($dst$$reg),
13554            as_Register($src1$$reg),
13555            as_Register($src2$$reg));
13556   %}
13557 
13558   ins_pipe(ialu_reg_reg);
13559 %}
13560 
13561 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13562   match(Set dst (OrL src1 src2));
13563 
13564   format %{ "orr  $dst, $src1, $src2\t# int" %}
13565 
13566   ins_cost(INSN_COST);
13567   ins_encode %{
13568     __ orr(as_Register($dst$$reg),
13569            as_Register($src1$$reg),
13570            (uint64_t)($src2$$constant));
13571   %}
13572 
13573   ins_pipe(ialu_reg_imm);
13574 %}
13575 
13576 // Xor Instructions
13577 
13578 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13579   match(Set dst (XorL src1 src2));
13580 
13581   format %{ "eor  $dst, $src1, $src2\t# int" %}
13582 
13583   ins_cost(INSN_COST);
13584   ins_encode %{
13585     __ eor(as_Register($dst$$reg),
13586            as_Register($src1$$reg),
13587            as_Register($src2$$reg));
13588   %}
13589 
13590   ins_pipe(ialu_reg_reg);
13591 %}
13592 
13593 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13594   match(Set dst (XorL src1 src2));
13595 
13596   ins_cost(INSN_COST);
13597   format %{ "eor  $dst, $src1, $src2\t# int" %}
13598 
13599   ins_encode %{
13600     __ eor(as_Register($dst$$reg),
13601            as_Register($src1$$reg),
13602            (uint64_t)($src2$$constant));
13603   %}
13604 
13605   ins_pipe(ialu_reg_imm);
13606 %}
13607 
13608 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13609 %{
13610   match(Set dst (ConvI2L src));
13611 
13612   ins_cost(INSN_COST);
13613   format %{ "sxtw  $dst, $src\t# i2l" %}
13614   ins_encode %{
13615     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13616   %}
13617   ins_pipe(ialu_reg_shift);
13618 %}
13619 
13620 // this pattern occurs in bigmath arithmetic
13621 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13622 %{
13623   match(Set dst (AndL (ConvI2L src) mask));
13624 
13625   ins_cost(INSN_COST);
13626   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13627   ins_encode %{
13628     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13629   %}
13630 
13631   ins_pipe(ialu_reg_shift);
13632 %}
13633 
13634 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13635   match(Set dst (ConvL2I src));
13636 
13637   ins_cost(INSN_COST);
13638   format %{ "movw  $dst, $src \t// l2i" %}
13639 
13640   ins_encode %{
13641     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13642   %}
13643 
13644   ins_pipe(ialu_reg);
13645 %}
13646 
13647 instruct convD2F_reg(vRegF dst, vRegD src) %{
13648   match(Set dst (ConvD2F src));
13649 
13650   ins_cost(INSN_COST * 5);
13651   format %{ "fcvtd  $dst, $src \t// d2f" %}
13652 
13653   ins_encode %{
13654     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13655   %}
13656 
13657   ins_pipe(fp_d2f);
13658 %}
13659 
13660 instruct convF2D_reg(vRegD dst, vRegF src) %{
13661   match(Set dst (ConvF2D src));
13662 
13663   ins_cost(INSN_COST * 5);
13664   format %{ "fcvts  $dst, $src \t// f2d" %}
13665 
13666   ins_encode %{
13667     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13668   %}
13669 
13670   ins_pipe(fp_f2d);
13671 %}
13672 
13673 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13674   match(Set dst (ConvF2I src));
13675 
13676   ins_cost(INSN_COST * 5);
13677   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13678 
13679   ins_encode %{
13680     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13681   %}
13682 
13683   ins_pipe(fp_f2i);
13684 %}
13685 
13686 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13687   match(Set dst (ConvF2L src));
13688 
13689   ins_cost(INSN_COST * 5);
13690   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13691 
13692   ins_encode %{
13693     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13694   %}
13695 
13696   ins_pipe(fp_f2l);
13697 %}
13698 
13699 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
13700   match(Set dst (ConvF2HF src));
13701   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
13702             "smov $dst, $tmp\t# move result from $tmp to $dst"
13703   %}
13704   effect(TEMP tmp);
13705   ins_encode %{
13706       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
13707   %}
13708   ins_pipe(pipe_slow);
13709 %}
13710 
13711 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
13712   match(Set dst (ConvHF2F src));
13713   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
13714             "fcvt $dst, $tmp\t# convert half to single precision"
13715   %}
13716   effect(TEMP tmp);
13717   ins_encode %{
13718       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
13719   %}
13720   ins_pipe(pipe_slow);
13721 %}
13722 
13723 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13724   match(Set dst (ConvI2F src));
13725 
13726   ins_cost(INSN_COST * 5);
13727   format %{ "scvtfws  $dst, $src \t// i2f" %}
13728 
13729   ins_encode %{
13730     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13731   %}
13732 
13733   ins_pipe(fp_i2f);
13734 %}
13735 
13736 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13737   match(Set dst (ConvL2F src));
13738 
13739   ins_cost(INSN_COST * 5);
13740   format %{ "scvtfs  $dst, $src \t// l2f" %}
13741 
13742   ins_encode %{
13743     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13744   %}
13745 
13746   ins_pipe(fp_l2f);
13747 %}
13748 
13749 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13750   match(Set dst (ConvD2I src));
13751 
13752   ins_cost(INSN_COST * 5);
13753   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13754 
13755   ins_encode %{
13756     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13757   %}
13758 
13759   ins_pipe(fp_d2i);
13760 %}
13761 
13762 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13763   match(Set dst (ConvD2L src));
13764 
13765   ins_cost(INSN_COST * 5);
13766   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13767 
13768   ins_encode %{
13769     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13770   %}
13771 
13772   ins_pipe(fp_d2l);
13773 %}
13774 
13775 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13776   match(Set dst (ConvI2D src));
13777 
13778   ins_cost(INSN_COST * 5);
13779   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13780 
13781   ins_encode %{
13782     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13783   %}
13784 
13785   ins_pipe(fp_i2d);
13786 %}
13787 
13788 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13789   match(Set dst (ConvL2D src));
13790 
13791   ins_cost(INSN_COST * 5);
13792   format %{ "scvtfd  $dst, $src \t// l2d" %}
13793 
13794   ins_encode %{
13795     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13796   %}
13797 
13798   ins_pipe(fp_l2d);
13799 %}
13800 
13801 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
13802 %{
13803   match(Set dst (RoundD src));
13804   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
13805   format %{ "java_round_double $dst,$src"%}
13806   ins_encode %{
13807     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
13808                          as_FloatRegister($ftmp$$reg));
13809   %}
13810   ins_pipe(pipe_slow);
13811 %}
13812 
13813 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
13814 %{
13815   match(Set dst (RoundF src));
13816   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
13817   format %{ "java_round_float $dst,$src"%}
13818   ins_encode %{
13819     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
13820                         as_FloatRegister($ftmp$$reg));
13821   %}
13822   ins_pipe(pipe_slow);
13823 %}
13824 
13825 // stack <-> reg and reg <-> reg shuffles with no conversion
13826 
13827 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13828 
13829   match(Set dst (MoveF2I src));
13830 
13831   effect(DEF dst, USE src);
13832 
13833   ins_cost(4 * INSN_COST);
13834 
13835   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13836 
13837   ins_encode %{
13838     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13839   %}
13840 
13841   ins_pipe(iload_reg_reg);
13842 
13843 %}
13844 
13845 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13846 
13847   match(Set dst (MoveI2F src));
13848 
13849   effect(DEF dst, USE src);
13850 
13851   ins_cost(4 * INSN_COST);
13852 
13853   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13854 
13855   ins_encode %{
13856     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13857   %}
13858 
13859   ins_pipe(pipe_class_memory);
13860 
13861 %}
13862 
13863 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13864 
13865   match(Set dst (MoveD2L src));
13866 
13867   effect(DEF dst, USE src);
13868 
13869   ins_cost(4 * INSN_COST);
13870 
13871   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13872 
13873   ins_encode %{
13874     __ ldr($dst$$Register, Address(sp, $src$$disp));
13875   %}
13876 
13877   ins_pipe(iload_reg_reg);
13878 
13879 %}
13880 
13881 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13882 
13883   match(Set dst (MoveL2D src));
13884 
13885   effect(DEF dst, USE src);
13886 
13887   ins_cost(4 * INSN_COST);
13888 
13889   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13890 
13891   ins_encode %{
13892     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13893   %}
13894 
13895   ins_pipe(pipe_class_memory);
13896 
13897 %}
13898 
13899 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13900 
13901   match(Set dst (MoveF2I src));
13902 
13903   effect(DEF dst, USE src);
13904 
13905   ins_cost(INSN_COST);
13906 
13907   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13908 
13909   ins_encode %{
13910     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13911   %}
13912 
13913   ins_pipe(pipe_class_memory);
13914 
13915 %}
13916 
13917 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13918 
13919   match(Set dst (MoveI2F src));
13920 
13921   effect(DEF dst, USE src);
13922 
13923   ins_cost(INSN_COST);
13924 
13925   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13926 
13927   ins_encode %{
13928     __ strw($src$$Register, Address(sp, $dst$$disp));
13929   %}
13930 
13931   ins_pipe(istore_reg_reg);
13932 
13933 %}
13934 
13935 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13936 
13937   match(Set dst (MoveD2L src));
13938 
13939   effect(DEF dst, USE src);
13940 
13941   ins_cost(INSN_COST);
13942 
13943   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13944 
13945   ins_encode %{
13946     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13947   %}
13948 
13949   ins_pipe(pipe_class_memory);
13950 
13951 %}
13952 
13953 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13954 
13955   match(Set dst (MoveL2D src));
13956 
13957   effect(DEF dst, USE src);
13958 
13959   ins_cost(INSN_COST);
13960 
13961   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13962 
13963   ins_encode %{
13964     __ str($src$$Register, Address(sp, $dst$$disp));
13965   %}
13966 
13967   ins_pipe(istore_reg_reg);
13968 
13969 %}
13970 
13971 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13972 
13973   match(Set dst (MoveF2I src));
13974 
13975   effect(DEF dst, USE src);
13976 
13977   ins_cost(INSN_COST);
13978 
13979   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13980 
13981   ins_encode %{
13982     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13983   %}
13984 
13985   ins_pipe(fp_f2i);
13986 
13987 %}
13988 
13989 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13990 
13991   match(Set dst (MoveI2F src));
13992 
13993   effect(DEF dst, USE src);
13994 
13995   ins_cost(INSN_COST);
13996 
13997   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13998 
13999   ins_encode %{
14000     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14001   %}
14002 
14003   ins_pipe(fp_i2f);
14004 
14005 %}
14006 
14007 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14008 
14009   match(Set dst (MoveD2L src));
14010 
14011   effect(DEF dst, USE src);
14012 
14013   ins_cost(INSN_COST);
14014 
14015   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14016 
14017   ins_encode %{
14018     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14019   %}
14020 
14021   ins_pipe(fp_d2l);
14022 
14023 %}
14024 
14025 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14026 
14027   match(Set dst (MoveL2D src));
14028 
14029   effect(DEF dst, USE src);
14030 
14031   ins_cost(INSN_COST);
14032 
14033   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14034 
14035   ins_encode %{
14036     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14037   %}
14038 
14039   ins_pipe(fp_l2d);
14040 
14041 %}
14042 
14043 // ============================================================================
14044 // clearing of an array
14045 
14046 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14047 %{
14048   match(Set dummy (ClearArray cnt base));
14049   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14050 
14051   ins_cost(4 * INSN_COST);
14052   format %{ "ClearArray $cnt, $base" %}
14053 
14054   ins_encode %{
14055     address tpc = __ zero_words($base$$Register, $cnt$$Register);
14056     if (tpc == nullptr) {
14057       ciEnv::current()->record_failure("CodeCache is full");
14058       return;
14059     }
14060   %}
14061 
14062   ins_pipe(pipe_class_memory);
14063 %}
14064 
14065 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
14066 %{
14067   predicate((uint64_t)n->in(2)->get_long()
14068             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14069   match(Set dummy (ClearArray cnt base));
14070   effect(TEMP temp, USE_KILL base, KILL cr);
14071 
14072   ins_cost(4 * INSN_COST);
14073   format %{ "ClearArray $cnt, $base" %}
14074 
14075   ins_encode %{
14076     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
14077     if (tpc == nullptr) {
14078       ciEnv::current()->record_failure("CodeCache is full");
14079       return;
14080     }
14081   %}
14082 
14083   ins_pipe(pipe_class_memory);
14084 %}
14085 
14086 // ============================================================================
14087 // Overflow Math Instructions
14088 
14089 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14090 %{
14091   match(Set cr (OverflowAddI op1 op2));
14092 
14093   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14094   ins_cost(INSN_COST);
14095   ins_encode %{
14096     __ cmnw($op1$$Register, $op2$$Register);
14097   %}
14098 
14099   ins_pipe(icmp_reg_reg);
14100 %}
14101 
14102 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14103 %{
14104   match(Set cr (OverflowAddI op1 op2));
14105 
14106   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14107   ins_cost(INSN_COST);
14108   ins_encode %{
14109     __ cmnw($op1$$Register, $op2$$constant);
14110   %}
14111 
14112   ins_pipe(icmp_reg_imm);
14113 %}
14114 
14115 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14116 %{
14117   match(Set cr (OverflowAddL op1 op2));
14118 
14119   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14120   ins_cost(INSN_COST);
14121   ins_encode %{
14122     __ cmn($op1$$Register, $op2$$Register);
14123   %}
14124 
14125   ins_pipe(icmp_reg_reg);
14126 %}
14127 
14128 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14129 %{
14130   match(Set cr (OverflowAddL op1 op2));
14131 
14132   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
14133   ins_cost(INSN_COST);
14134   ins_encode %{
14135     __ adds(zr, $op1$$Register, $op2$$constant);
14136   %}
14137 
14138   ins_pipe(icmp_reg_imm);
14139 %}
14140 
14141 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14142 %{
14143   match(Set cr (OverflowSubI op1 op2));
14144 
14145   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14146   ins_cost(INSN_COST);
14147   ins_encode %{
14148     __ cmpw($op1$$Register, $op2$$Register);
14149   %}
14150 
14151   ins_pipe(icmp_reg_reg);
14152 %}
14153 
14154 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14155 %{
14156   match(Set cr (OverflowSubI op1 op2));
14157 
14158   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14159   ins_cost(INSN_COST);
14160   ins_encode %{
14161     __ cmpw($op1$$Register, $op2$$constant);
14162   %}
14163 
14164   ins_pipe(icmp_reg_imm);
14165 %}
14166 
14167 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14168 %{
14169   match(Set cr (OverflowSubL op1 op2));
14170 
14171   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14172   ins_cost(INSN_COST);
14173   ins_encode %{
14174     __ cmp($op1$$Register, $op2$$Register);
14175   %}
14176 
14177   ins_pipe(icmp_reg_reg);
14178 %}
14179 
14180 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14181 %{
14182   match(Set cr (OverflowSubL op1 op2));
14183 
14184   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14185   ins_cost(INSN_COST);
14186   ins_encode %{
14187     __ subs(zr, $op1$$Register, $op2$$constant);
14188   %}
14189 
14190   ins_pipe(icmp_reg_imm);
14191 %}
14192 
14193 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14194 %{
14195   match(Set cr (OverflowSubI zero op1));
14196 
14197   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14198   ins_cost(INSN_COST);
14199   ins_encode %{
14200     __ cmpw(zr, $op1$$Register);
14201   %}
14202 
14203   ins_pipe(icmp_reg_imm);
14204 %}
14205 
14206 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14207 %{
14208   match(Set cr (OverflowSubL zero op1));
14209 
14210   format %{ "cmp   zr, $op1\t# overflow check long" %}
14211   ins_cost(INSN_COST);
14212   ins_encode %{
14213     __ cmp(zr, $op1$$Register);
14214   %}
14215 
14216   ins_pipe(icmp_reg_imm);
14217 %}
14218 
14219 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14220 %{
14221   match(Set cr (OverflowMulI op1 op2));
14222 
14223   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14224             "cmp   rscratch1, rscratch1, sxtw\n\t"
14225             "movw  rscratch1, #0x80000000\n\t"
14226             "cselw rscratch1, rscratch1, zr, NE\n\t"
14227             "cmpw  rscratch1, #1" %}
14228   ins_cost(5 * INSN_COST);
14229   ins_encode %{
14230     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14231     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14232     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14233     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14234     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14235   %}
14236 
14237   ins_pipe(pipe_slow);
14238 %}
14239 
14240 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14241 %{
14242   match(If cmp (OverflowMulI op1 op2));
14243   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14244             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14245   effect(USE labl, KILL cr);
14246 
14247   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14248             "cmp   rscratch1, rscratch1, sxtw\n\t"
14249             "b$cmp   $labl" %}
14250   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14251   ins_encode %{
14252     Label* L = $labl$$label;
14253     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14254     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14255     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14256     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14257   %}
14258 
14259   ins_pipe(pipe_serial);
14260 %}
14261 
14262 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14263 %{
14264   match(Set cr (OverflowMulL op1 op2));
14265 
14266   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14267             "smulh rscratch2, $op1, $op2\n\t"
14268             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14269             "movw  rscratch1, #0x80000000\n\t"
14270             "cselw rscratch1, rscratch1, zr, NE\n\t"
14271             "cmpw  rscratch1, #1" %}
14272   ins_cost(6 * INSN_COST);
14273   ins_encode %{
14274     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14275     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14276     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14277     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14278     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14279     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14280   %}
14281 
14282   ins_pipe(pipe_slow);
14283 %}
14284 
14285 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14286 %{
14287   match(If cmp (OverflowMulL op1 op2));
14288   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14289             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14290   effect(USE labl, KILL cr);
14291 
14292   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14293             "smulh rscratch2, $op1, $op2\n\t"
14294             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14295             "b$cmp $labl" %}
14296   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14297   ins_encode %{
14298     Label* L = $labl$$label;
14299     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14300     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14301     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14302     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14303     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14304   %}
14305 
14306   ins_pipe(pipe_serial);
14307 %}
14308 
14309 // ============================================================================
14310 // Compare Instructions
14311 
14312 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14313 %{
14314   match(Set cr (CmpI op1 op2));
14315 
14316   effect(DEF cr, USE op1, USE op2);
14317 
14318   ins_cost(INSN_COST);
14319   format %{ "cmpw  $op1, $op2" %}
14320 
14321   ins_encode(aarch64_enc_cmpw(op1, op2));
14322 
14323   ins_pipe(icmp_reg_reg);
14324 %}
14325 
14326 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14327 %{
14328   match(Set cr (CmpI op1 zero));
14329 
14330   effect(DEF cr, USE op1);
14331 
14332   ins_cost(INSN_COST);
14333   format %{ "cmpw $op1, 0" %}
14334 
14335   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14336 
14337   ins_pipe(icmp_reg_imm);
14338 %}
14339 
14340 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14341 %{
14342   match(Set cr (CmpI op1 op2));
14343 
14344   effect(DEF cr, USE op1);
14345 
14346   ins_cost(INSN_COST);
14347   format %{ "cmpw  $op1, $op2" %}
14348 
14349   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14350 
14351   ins_pipe(icmp_reg_imm);
14352 %}
14353 
14354 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14355 %{
14356   match(Set cr (CmpI op1 op2));
14357 
14358   effect(DEF cr, USE op1);
14359 
14360   ins_cost(INSN_COST * 2);
14361   format %{ "cmpw  $op1, $op2" %}
14362 
14363   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14364 
14365   ins_pipe(icmp_reg_imm);
14366 %}
14367 
14368 // Unsigned compare Instructions; really, same as signed compare
14369 // except it should only be used to feed an If or a CMovI which takes a
14370 // cmpOpU.
14371 
14372 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14373 %{
14374   match(Set cr (CmpU op1 op2));
14375 
14376   effect(DEF cr, USE op1, USE op2);
14377 
14378   ins_cost(INSN_COST);
14379   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14380 
14381   ins_encode(aarch64_enc_cmpw(op1, op2));
14382 
14383   ins_pipe(icmp_reg_reg);
14384 %}
14385 
14386 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14387 %{
14388   match(Set cr (CmpU op1 zero));
14389 
14390   effect(DEF cr, USE op1);
14391 
14392   ins_cost(INSN_COST);
14393   format %{ "cmpw $op1, #0\t# unsigned" %}
14394 
14395   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14396 
14397   ins_pipe(icmp_reg_imm);
14398 %}
14399 
14400 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14401 %{
14402   match(Set cr (CmpU op1 op2));
14403 
14404   effect(DEF cr, USE op1);
14405 
14406   ins_cost(INSN_COST);
14407   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14408 
14409   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14410 
14411   ins_pipe(icmp_reg_imm);
14412 %}
14413 
14414 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14415 %{
14416   match(Set cr (CmpU op1 op2));
14417 
14418   effect(DEF cr, USE op1);
14419 
14420   ins_cost(INSN_COST * 2);
14421   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14422 
14423   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14424 
14425   ins_pipe(icmp_reg_imm);
14426 %}
14427 
14428 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14429 %{
14430   match(Set cr (CmpL op1 op2));
14431 
14432   effect(DEF cr, USE op1, USE op2);
14433 
14434   ins_cost(INSN_COST);
14435   format %{ "cmp  $op1, $op2" %}
14436 
14437   ins_encode(aarch64_enc_cmp(op1, op2));
14438 
14439   ins_pipe(icmp_reg_reg);
14440 %}
14441 
14442 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14443 %{
14444   match(Set cr (CmpL op1 zero));
14445 
14446   effect(DEF cr, USE op1);
14447 
14448   ins_cost(INSN_COST);
14449   format %{ "tst  $op1" %}
14450 
14451   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14452 
14453   ins_pipe(icmp_reg_imm);
14454 %}
14455 
14456 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14457 %{
14458   match(Set cr (CmpL op1 op2));
14459 
14460   effect(DEF cr, USE op1);
14461 
14462   ins_cost(INSN_COST);
14463   format %{ "cmp  $op1, $op2" %}
14464 
14465   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14466 
14467   ins_pipe(icmp_reg_imm);
14468 %}
14469 
14470 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14471 %{
14472   match(Set cr (CmpL op1 op2));
14473 
14474   effect(DEF cr, USE op1);
14475 
14476   ins_cost(INSN_COST * 2);
14477   format %{ "cmp  $op1, $op2" %}
14478 
14479   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14480 
14481   ins_pipe(icmp_reg_imm);
14482 %}
14483 
14484 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14485 %{
14486   match(Set cr (CmpUL op1 op2));
14487 
14488   effect(DEF cr, USE op1, USE op2);
14489 
14490   ins_cost(INSN_COST);
14491   format %{ "cmp  $op1, $op2" %}
14492 
14493   ins_encode(aarch64_enc_cmp(op1, op2));
14494 
14495   ins_pipe(icmp_reg_reg);
14496 %}
14497 
14498 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14499 %{
14500   match(Set cr (CmpUL op1 zero));
14501 
14502   effect(DEF cr, USE op1);
14503 
14504   ins_cost(INSN_COST);
14505   format %{ "tst  $op1" %}
14506 
14507   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14508 
14509   ins_pipe(icmp_reg_imm);
14510 %}
14511 
14512 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14513 %{
14514   match(Set cr (CmpUL op1 op2));
14515 
14516   effect(DEF cr, USE op1);
14517 
14518   ins_cost(INSN_COST);
14519   format %{ "cmp  $op1, $op2" %}
14520 
14521   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14522 
14523   ins_pipe(icmp_reg_imm);
14524 %}
14525 
14526 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14527 %{
14528   match(Set cr (CmpUL op1 op2));
14529 
14530   effect(DEF cr, USE op1);
14531 
14532   ins_cost(INSN_COST * 2);
14533   format %{ "cmp  $op1, $op2" %}
14534 
14535   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14536 
14537   ins_pipe(icmp_reg_imm);
14538 %}
14539 
14540 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14541 %{
14542   match(Set cr (CmpP op1 op2));
14543 
14544   effect(DEF cr, USE op1, USE op2);
14545 
14546   ins_cost(INSN_COST);
14547   format %{ "cmp  $op1, $op2\t // ptr" %}
14548 
14549   ins_encode(aarch64_enc_cmpp(op1, op2));
14550 
14551   ins_pipe(icmp_reg_reg);
14552 %}
14553 
14554 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14555 %{
14556   match(Set cr (CmpN op1 op2));
14557 
14558   effect(DEF cr, USE op1, USE op2);
14559 
14560   ins_cost(INSN_COST);
14561   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14562 
14563   ins_encode(aarch64_enc_cmpn(op1, op2));
14564 
14565   ins_pipe(icmp_reg_reg);
14566 %}
14567 
14568 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14569 %{
14570   match(Set cr (CmpP op1 zero));
14571 
14572   effect(DEF cr, USE op1, USE zero);
14573 
14574   ins_cost(INSN_COST);
14575   format %{ "cmp  $op1, 0\t // ptr" %}
14576 
14577   ins_encode(aarch64_enc_testp(op1));
14578 
14579   ins_pipe(icmp_reg_imm);
14580 %}
14581 
14582 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14583 %{
14584   match(Set cr (CmpN op1 zero));
14585 
14586   effect(DEF cr, USE op1, USE zero);
14587 
14588   ins_cost(INSN_COST);
14589   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14590 
14591   ins_encode(aarch64_enc_testn(op1));
14592 
14593   ins_pipe(icmp_reg_imm);
14594 %}
14595 
14596 // FP comparisons
14597 //
14598 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14599 // using normal cmpOp. See declaration of rFlagsReg for details.
14600 
14601 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14602 %{
14603   match(Set cr (CmpF src1 src2));
14604 
14605   ins_cost(3 * INSN_COST);
14606   format %{ "fcmps $src1, $src2" %}
14607 
14608   ins_encode %{
14609     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14610   %}
14611 
14612   ins_pipe(pipe_class_compare);
14613 %}
14614 
14615 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14616 %{
14617   match(Set cr (CmpF src1 src2));
14618 
14619   ins_cost(3 * INSN_COST);
14620   format %{ "fcmps $src1, 0.0" %}
14621 
14622   ins_encode %{
14623     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
14624   %}
14625 
14626   ins_pipe(pipe_class_compare);
14627 %}
14628 // FROM HERE
14629 
14630 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14631 %{
14632   match(Set cr (CmpD src1 src2));
14633 
14634   ins_cost(3 * INSN_COST);
14635   format %{ "fcmpd $src1, $src2" %}
14636 
14637   ins_encode %{
14638     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14639   %}
14640 
14641   ins_pipe(pipe_class_compare);
14642 %}
14643 
14644 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14645 %{
14646   match(Set cr (CmpD src1 src2));
14647 
14648   ins_cost(3 * INSN_COST);
14649   format %{ "fcmpd $src1, 0.0" %}
14650 
14651   ins_encode %{
14652     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
14653   %}
14654 
14655   ins_pipe(pipe_class_compare);
14656 %}
14657 
14658 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14659 %{
14660   match(Set dst (CmpF3 src1 src2));
14661   effect(KILL cr);
14662 
14663   ins_cost(5 * INSN_COST);
14664   format %{ "fcmps $src1, $src2\n\t"
14665             "csinvw($dst, zr, zr, eq\n\t"
14666             "csnegw($dst, $dst, $dst, lt)"
14667   %}
14668 
14669   ins_encode %{
14670     Label done;
14671     FloatRegister s1 = as_FloatRegister($src1$$reg);
14672     FloatRegister s2 = as_FloatRegister($src2$$reg);
14673     Register d = as_Register($dst$$reg);
14674     __ fcmps(s1, s2);
14675     // installs 0 if EQ else -1
14676     __ csinvw(d, zr, zr, Assembler::EQ);
14677     // keeps -1 if less or unordered else installs 1
14678     __ csnegw(d, d, d, Assembler::LT);
14679     __ bind(done);
14680   %}
14681 
14682   ins_pipe(pipe_class_default);
14683 
14684 %}
14685 
14686 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14687 %{
14688   match(Set dst (CmpD3 src1 src2));
14689   effect(KILL cr);
14690 
14691   ins_cost(5 * INSN_COST);
14692   format %{ "fcmpd $src1, $src2\n\t"
14693             "csinvw($dst, zr, zr, eq\n\t"
14694             "csnegw($dst, $dst, $dst, lt)"
14695   %}
14696 
14697   ins_encode %{
14698     Label done;
14699     FloatRegister s1 = as_FloatRegister($src1$$reg);
14700     FloatRegister s2 = as_FloatRegister($src2$$reg);
14701     Register d = as_Register($dst$$reg);
14702     __ fcmpd(s1, s2);
14703     // installs 0 if EQ else -1
14704     __ csinvw(d, zr, zr, Assembler::EQ);
14705     // keeps -1 if less or unordered else installs 1
14706     __ csnegw(d, d, d, Assembler::LT);
14707     __ bind(done);
14708   %}
14709   ins_pipe(pipe_class_default);
14710 
14711 %}
14712 
14713 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14714 %{
14715   match(Set dst (CmpF3 src1 zero));
14716   effect(KILL cr);
14717 
14718   ins_cost(5 * INSN_COST);
14719   format %{ "fcmps $src1, 0.0\n\t"
14720             "csinvw($dst, zr, zr, eq\n\t"
14721             "csnegw($dst, $dst, $dst, lt)"
14722   %}
14723 
14724   ins_encode %{
14725     Label done;
14726     FloatRegister s1 = as_FloatRegister($src1$$reg);
14727     Register d = as_Register($dst$$reg);
14728     __ fcmps(s1, 0.0);
14729     // installs 0 if EQ else -1
14730     __ csinvw(d, zr, zr, Assembler::EQ);
14731     // keeps -1 if less or unordered else installs 1
14732     __ csnegw(d, d, d, Assembler::LT);
14733     __ bind(done);
14734   %}
14735 
14736   ins_pipe(pipe_class_default);
14737 
14738 %}
14739 
14740 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14741 %{
14742   match(Set dst (CmpD3 src1 zero));
14743   effect(KILL cr);
14744 
14745   ins_cost(5 * INSN_COST);
14746   format %{ "fcmpd $src1, 0.0\n\t"
14747             "csinvw($dst, zr, zr, eq\n\t"
14748             "csnegw($dst, $dst, $dst, lt)"
14749   %}
14750 
14751   ins_encode %{
14752     Label done;
14753     FloatRegister s1 = as_FloatRegister($src1$$reg);
14754     Register d = as_Register($dst$$reg);
14755     __ fcmpd(s1, 0.0);
14756     // installs 0 if EQ else -1
14757     __ csinvw(d, zr, zr, Assembler::EQ);
14758     // keeps -1 if less or unordered else installs 1
14759     __ csnegw(d, d, d, Assembler::LT);
14760     __ bind(done);
14761   %}
14762   ins_pipe(pipe_class_default);
14763 
14764 %}
14765 
14766 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14767 %{
14768   match(Set dst (CmpLTMask p q));
14769   effect(KILL cr);
14770 
14771   ins_cost(3 * INSN_COST);
14772 
14773   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14774             "csetw $dst, lt\n\t"
14775             "subw $dst, zr, $dst"
14776   %}
14777 
14778   ins_encode %{
14779     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14780     __ csetw(as_Register($dst$$reg), Assembler::LT);
14781     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14782   %}
14783 
14784   ins_pipe(ialu_reg_reg);
14785 %}
14786 
14787 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14788 %{
14789   match(Set dst (CmpLTMask src zero));
14790   effect(KILL cr);
14791 
14792   ins_cost(INSN_COST);
14793 
14794   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14795 
14796   ins_encode %{
14797     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14798   %}
14799 
14800   ins_pipe(ialu_reg_shift);
14801 %}
14802 
14803 // ============================================================================
14804 // Max and Min
14805 
14806 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
14807 
14808 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
14809 %{
14810   effect(DEF cr, USE src);
14811   ins_cost(INSN_COST);
14812   format %{ "cmpw $src, 0" %}
14813 
14814   ins_encode %{
14815     __ cmpw($src$$Register, 0);
14816   %}
14817   ins_pipe(icmp_reg_imm);
14818 %}
14819 
14820 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
14821 %{
14822   match(Set dst (MinI src1 src2));
14823   ins_cost(INSN_COST * 3);
14824 
14825   expand %{
14826     rFlagsReg cr;
14827     compI_reg_reg(cr, src1, src2);
14828     cmovI_reg_reg_lt(dst, src1, src2, cr);
14829   %}
14830 %}
14831 
14832 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
14833 %{
14834   match(Set dst (MaxI src1 src2));
14835   ins_cost(INSN_COST * 3);
14836 
14837   expand %{
14838     rFlagsReg cr;
14839     compI_reg_reg(cr, src1, src2);
14840     cmovI_reg_reg_gt(dst, src1, src2, cr);
14841   %}
14842 %}
14843 
14844 
14845 // ============================================================================
14846 // Branch Instructions
14847 
14848 // Direct Branch.
14849 instruct branch(label lbl)
14850 %{
14851   match(Goto);
14852 
14853   effect(USE lbl);
14854 
14855   ins_cost(BRANCH_COST);
14856   format %{ "b  $lbl" %}
14857 
14858   ins_encode(aarch64_enc_b(lbl));
14859 
14860   ins_pipe(pipe_branch);
14861 %}
14862 
14863 // Conditional Near Branch
14864 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14865 %{
14866   // Same match rule as `branchConFar'.
14867   match(If cmp cr);
14868 
14869   effect(USE lbl);
14870 
14871   ins_cost(BRANCH_COST);
14872   // If set to 1 this indicates that the current instruction is a
14873   // short variant of a long branch. This avoids using this
14874   // instruction in first-pass matching. It will then only be used in
14875   // the `Shorten_branches' pass.
14876   // ins_short_branch(1);
14877   format %{ "b$cmp  $lbl" %}
14878 
14879   ins_encode(aarch64_enc_br_con(cmp, lbl));
14880 
14881   ins_pipe(pipe_branch_cond);
14882 %}
14883 
14884 // Conditional Near Branch Unsigned
14885 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14886 %{
14887   // Same match rule as `branchConFar'.
14888   match(If cmp cr);
14889 
14890   effect(USE lbl);
14891 
14892   ins_cost(BRANCH_COST);
14893   // If set to 1 this indicates that the current instruction is a
14894   // short variant of a long branch. This avoids using this
14895   // instruction in first-pass matching. It will then only be used in
14896   // the `Shorten_branches' pass.
14897   // ins_short_branch(1);
14898   format %{ "b$cmp  $lbl\t# unsigned" %}
14899 
14900   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14901 
14902   ins_pipe(pipe_branch_cond);
14903 %}
14904 
14905 // Make use of CBZ and CBNZ.  These instructions, as well as being
14906 // shorter than (cmp; branch), have the additional benefit of not
14907 // killing the flags.
14908 
14909 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14910   match(If cmp (CmpI op1 op2));
14911   effect(USE labl);
14912 
14913   ins_cost(BRANCH_COST);
14914   format %{ "cbw$cmp   $op1, $labl" %}
14915   ins_encode %{
14916     Label* L = $labl$$label;
14917     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14918     if (cond == Assembler::EQ)
14919       __ cbzw($op1$$Register, *L);
14920     else
14921       __ cbnzw($op1$$Register, *L);
14922   %}
14923   ins_pipe(pipe_cmp_branch);
14924 %}
14925 
14926 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14927   match(If cmp (CmpL op1 op2));
14928   effect(USE labl);
14929 
14930   ins_cost(BRANCH_COST);
14931   format %{ "cb$cmp   $op1, $labl" %}
14932   ins_encode %{
14933     Label* L = $labl$$label;
14934     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14935     if (cond == Assembler::EQ)
14936       __ cbz($op1$$Register, *L);
14937     else
14938       __ cbnz($op1$$Register, *L);
14939   %}
14940   ins_pipe(pipe_cmp_branch);
14941 %}
14942 
14943 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14944   match(If cmp (CmpP op1 op2));
14945   effect(USE labl);
14946 
14947   ins_cost(BRANCH_COST);
14948   format %{ "cb$cmp   $op1, $labl" %}
14949   ins_encode %{
14950     Label* L = $labl$$label;
14951     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14952     if (cond == Assembler::EQ)
14953       __ cbz($op1$$Register, *L);
14954     else
14955       __ cbnz($op1$$Register, *L);
14956   %}
14957   ins_pipe(pipe_cmp_branch);
14958 %}
14959 
14960 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14961   match(If cmp (CmpN op1 op2));
14962   effect(USE labl);
14963 
14964   ins_cost(BRANCH_COST);
14965   format %{ "cbw$cmp   $op1, $labl" %}
14966   ins_encode %{
14967     Label* L = $labl$$label;
14968     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14969     if (cond == Assembler::EQ)
14970       __ cbzw($op1$$Register, *L);
14971     else
14972       __ cbnzw($op1$$Register, *L);
14973   %}
14974   ins_pipe(pipe_cmp_branch);
14975 %}
14976 
14977 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14978   match(If cmp (CmpP (DecodeN oop) zero));
14979   effect(USE labl);
14980 
14981   ins_cost(BRANCH_COST);
14982   format %{ "cb$cmp   $oop, $labl" %}
14983   ins_encode %{
14984     Label* L = $labl$$label;
14985     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14986     if (cond == Assembler::EQ)
14987       __ cbzw($oop$$Register, *L);
14988     else
14989       __ cbnzw($oop$$Register, *L);
14990   %}
14991   ins_pipe(pipe_cmp_branch);
14992 %}
14993 
14994 instruct cmpUI_imm0_branch(cmpOpUEqNeLeGt cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14995   match(If cmp (CmpU op1 op2));
14996   effect(USE labl);
14997 
14998   ins_cost(BRANCH_COST);
14999   format %{ "cbw$cmp   $op1, $labl" %}
15000   ins_encode %{
15001     Label* L = $labl$$label;
15002     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15003     if (cond == Assembler::EQ || cond == Assembler::LS) {
15004       __ cbzw($op1$$Register, *L);
15005     } else {
15006       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
15007       __ cbnzw($op1$$Register, *L);
15008     }
15009   %}
15010   ins_pipe(pipe_cmp_branch);
15011 %}
15012 
15013 instruct cmpUL_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 op2, label labl) %{
15014   match(If cmp (CmpUL op1 op2));
15015   effect(USE labl);
15016 
15017   ins_cost(BRANCH_COST);
15018   format %{ "cb$cmp   $op1, $labl" %}
15019   ins_encode %{
15020     Label* L = $labl$$label;
15021     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15022     if (cond == Assembler::EQ || cond == Assembler::LS) {
15023       __ cbz($op1$$Register, *L);
15024     } else {
15025       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
15026       __ cbnz($op1$$Register, *L);
15027     }
15028   %}
15029   ins_pipe(pipe_cmp_branch);
15030 %}
15031 
15032 // Test bit and Branch
15033 
15034 // Patterns for short (< 32KiB) variants
15035 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15036   match(If cmp (CmpL op1 op2));
15037   effect(USE labl);
15038 
15039   ins_cost(BRANCH_COST);
15040   format %{ "cb$cmp   $op1, $labl # long" %}
15041   ins_encode %{
15042     Label* L = $labl$$label;
15043     Assembler::Condition cond =
15044       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15045     __ tbr(cond, $op1$$Register, 63, *L);
15046   %}
15047   ins_pipe(pipe_cmp_branch);
15048   ins_short_branch(1);
15049 %}
15050 
15051 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15052   match(If cmp (CmpI op1 op2));
15053   effect(USE labl);
15054 
15055   ins_cost(BRANCH_COST);
15056   format %{ "cb$cmp   $op1, $labl # int" %}
15057   ins_encode %{
15058     Label* L = $labl$$label;
15059     Assembler::Condition cond =
15060       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15061     __ tbr(cond, $op1$$Register, 31, *L);
15062   %}
15063   ins_pipe(pipe_cmp_branch);
15064   ins_short_branch(1);
15065 %}
15066 
15067 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15068   match(If cmp (CmpL (AndL op1 op2) op3));
15069   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15070   effect(USE labl);
15071 
15072   ins_cost(BRANCH_COST);
15073   format %{ "tb$cmp   $op1, $op2, $labl" %}
15074   ins_encode %{
15075     Label* L = $labl$$label;
15076     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15077     int bit = exact_log2_long($op2$$constant);
15078     __ tbr(cond, $op1$$Register, bit, *L);
15079   %}
15080   ins_pipe(pipe_cmp_branch);
15081   ins_short_branch(1);
15082 %}
15083 
15084 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15085   match(If cmp (CmpI (AndI op1 op2) op3));
15086   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15087   effect(USE labl);
15088 
15089   ins_cost(BRANCH_COST);
15090   format %{ "tb$cmp   $op1, $op2, $labl" %}
15091   ins_encode %{
15092     Label* L = $labl$$label;
15093     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15094     int bit = exact_log2((juint)$op2$$constant);
15095     __ tbr(cond, $op1$$Register, bit, *L);
15096   %}
15097   ins_pipe(pipe_cmp_branch);
15098   ins_short_branch(1);
15099 %}
15100 
15101 // And far variants
15102 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15103   match(If cmp (CmpL op1 op2));
15104   effect(USE labl);
15105 
15106   ins_cost(BRANCH_COST);
15107   format %{ "cb$cmp   $op1, $labl # long" %}
15108   ins_encode %{
15109     Label* L = $labl$$label;
15110     Assembler::Condition cond =
15111       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15112     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15113   %}
15114   ins_pipe(pipe_cmp_branch);
15115 %}
15116 
15117 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15118   match(If cmp (CmpI op1 op2));
15119   effect(USE labl);
15120 
15121   ins_cost(BRANCH_COST);
15122   format %{ "cb$cmp   $op1, $labl # int" %}
15123   ins_encode %{
15124     Label* L = $labl$$label;
15125     Assembler::Condition cond =
15126       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15127     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15128   %}
15129   ins_pipe(pipe_cmp_branch);
15130 %}
15131 
15132 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15133   match(If cmp (CmpL (AndL op1 op2) op3));
15134   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15135   effect(USE labl);
15136 
15137   ins_cost(BRANCH_COST);
15138   format %{ "tb$cmp   $op1, $op2, $labl" %}
15139   ins_encode %{
15140     Label* L = $labl$$label;
15141     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15142     int bit = exact_log2_long($op2$$constant);
15143     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15144   %}
15145   ins_pipe(pipe_cmp_branch);
15146 %}
15147 
15148 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15149   match(If cmp (CmpI (AndI op1 op2) op3));
15150   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15151   effect(USE labl);
15152 
15153   ins_cost(BRANCH_COST);
15154   format %{ "tb$cmp   $op1, $op2, $labl" %}
15155   ins_encode %{
15156     Label* L = $labl$$label;
15157     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15158     int bit = exact_log2((juint)$op2$$constant);
15159     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15160   %}
15161   ins_pipe(pipe_cmp_branch);
15162 %}
15163 
15164 // Test bits
15165 
15166 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15167   match(Set cr (CmpL (AndL op1 op2) op3));
15168   predicate(Assembler::operand_valid_for_logical_immediate
15169             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15170 
15171   ins_cost(INSN_COST);
15172   format %{ "tst $op1, $op2 # long" %}
15173   ins_encode %{
15174     __ tst($op1$$Register, $op2$$constant);
15175   %}
15176   ins_pipe(ialu_reg_reg);
15177 %}
15178 
15179 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15180   match(Set cr (CmpI (AndI op1 op2) op3));
15181   predicate(Assembler::operand_valid_for_logical_immediate
15182             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15183 
15184   ins_cost(INSN_COST);
15185   format %{ "tst $op1, $op2 # int" %}
15186   ins_encode %{
15187     __ tstw($op1$$Register, $op2$$constant);
15188   %}
15189   ins_pipe(ialu_reg_reg);
15190 %}
15191 
15192 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15193   match(Set cr (CmpL (AndL op1 op2) op3));
15194 
15195   ins_cost(INSN_COST);
15196   format %{ "tst $op1, $op2 # long" %}
15197   ins_encode %{
15198     __ tst($op1$$Register, $op2$$Register);
15199   %}
15200   ins_pipe(ialu_reg_reg);
15201 %}
15202 
15203 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15204   match(Set cr (CmpI (AndI op1 op2) op3));
15205 
15206   ins_cost(INSN_COST);
15207   format %{ "tstw $op1, $op2 # int" %}
15208   ins_encode %{
15209     __ tstw($op1$$Register, $op2$$Register);
15210   %}
15211   ins_pipe(ialu_reg_reg);
15212 %}
15213 
15214 
15215 // Conditional Far Branch
15216 // Conditional Far Branch Unsigned
15217 // TODO: fixme
15218 
15219 // counted loop end branch near
15220 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15221 %{
15222   match(CountedLoopEnd cmp cr);
15223 
15224   effect(USE lbl);
15225 
15226   ins_cost(BRANCH_COST);
15227   // short variant.
15228   // ins_short_branch(1);
15229   format %{ "b$cmp $lbl \t// counted loop end" %}
15230 
15231   ins_encode(aarch64_enc_br_con(cmp, lbl));
15232 
15233   ins_pipe(pipe_branch);
15234 %}
15235 
15236 // counted loop end branch far
15237 // TODO: fixme
15238 
15239 // ============================================================================
15240 // inlined locking and unlocking
15241 
15242 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
15243 %{
15244   match(Set cr (FastLock object box));
15245   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
15246 
15247   ins_cost(5 * INSN_COST);
15248   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
15249 
15250   ins_encode %{
15251     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
15252   %}
15253 
15254   ins_pipe(pipe_serial);
15255 %}
15256 
15257 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
15258 %{
15259   match(Set cr (FastUnlock object box));
15260   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
15261 
15262   ins_cost(5 * INSN_COST);
15263   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %}
15264 
15265   ins_encode %{
15266     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
15267   %}
15268 
15269   ins_pipe(pipe_serial);
15270 %}
15271 
15272 // ============================================================================
15273 // Safepoint Instructions
15274 
15275 // TODO
15276 // provide a near and far version of this code
15277 
15278 instruct safePoint(rFlagsReg cr, iRegP poll)
15279 %{
15280   match(SafePoint poll);
15281   effect(KILL cr);
15282 
15283   format %{
15284     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15285   %}
15286   ins_encode %{
15287     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15288   %}
15289   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15290 %}
15291 
15292 
15293 // ============================================================================
15294 // Procedure Call/Return Instructions
15295 
15296 // Call Java Static Instruction
15297 
15298 instruct CallStaticJavaDirect(method meth)
15299 %{
15300   match(CallStaticJava);
15301 
15302   effect(USE meth);
15303 
15304   ins_cost(CALL_COST);
15305 
15306   format %{ "call,static $meth \t// ==> " %}
15307 
15308   ins_encode(aarch64_enc_java_static_call(meth),
15309              aarch64_enc_call_epilog);
15310 
15311   ins_pipe(pipe_class_call);
15312 %}
15313 
15314 // TO HERE
15315 
15316 // Call Java Dynamic Instruction
15317 instruct CallDynamicJavaDirect(method meth)
15318 %{
15319   match(CallDynamicJava);
15320 
15321   effect(USE meth);
15322 
15323   ins_cost(CALL_COST);
15324 
15325   format %{ "CALL,dynamic $meth \t// ==> " %}
15326 
15327   ins_encode(aarch64_enc_java_dynamic_call(meth),
15328              aarch64_enc_call_epilog);
15329 
15330   ins_pipe(pipe_class_call);
15331 %}
15332 
15333 // Call Runtime Instruction
15334 
15335 instruct CallRuntimeDirect(method meth)
15336 %{
15337   match(CallRuntime);
15338 
15339   effect(USE meth);
15340 
15341   ins_cost(CALL_COST);
15342 
15343   format %{ "CALL, runtime $meth" %}
15344 
15345   ins_encode( aarch64_enc_java_to_runtime(meth) );
15346 
15347   ins_pipe(pipe_class_call);
15348 %}
15349 
15350 // Call Runtime Instruction
15351 
15352 instruct CallLeafDirect(method meth)
15353 %{
15354   match(CallLeaf);
15355 
15356   effect(USE meth);
15357 
15358   ins_cost(CALL_COST);
15359 
15360   format %{ "CALL, runtime leaf $meth" %}
15361 
15362   ins_encode( aarch64_enc_java_to_runtime(meth) );
15363 
15364   ins_pipe(pipe_class_call);
15365 %}
15366 
15367 // Call Runtime Instruction without safepoint and with vector arguments
15368 instruct CallLeafDirectVector(method meth)
15369 %{
15370   match(CallLeafVector);
15371 
15372   effect(USE meth);
15373 
15374   ins_cost(CALL_COST);
15375 
15376   format %{ "CALL, runtime leaf vector $meth" %}
15377 
15378   ins_encode(aarch64_enc_java_to_runtime(meth));
15379 
15380   ins_pipe(pipe_class_call);
15381 %}
15382 
15383 // Call Runtime Instruction
15384 
15385 instruct CallLeafNoFPDirect(method meth)
15386 %{
15387   match(CallLeafNoFP);
15388 
15389   effect(USE meth);
15390 
15391   ins_cost(CALL_COST);
15392 
15393   format %{ "CALL, runtime leaf nofp $meth" %}
15394 
15395   ins_encode( aarch64_enc_java_to_runtime(meth) );
15396 
15397   ins_pipe(pipe_class_call);
15398 %}
15399 
15400 // Tail Call; Jump from runtime stub to Java code.
15401 // Also known as an 'interprocedural jump'.
15402 // Target of jump will eventually return to caller.
15403 // TailJump below removes the return address.
15404 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
15405 // emitted just above the TailCall which has reset rfp to the caller state.
15406 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
15407 %{
15408   match(TailCall jump_target method_ptr);
15409 
15410   ins_cost(CALL_COST);
15411 
15412   format %{ "br $jump_target\t# $method_ptr holds method" %}
15413 
15414   ins_encode(aarch64_enc_tail_call(jump_target));
15415 
15416   ins_pipe(pipe_class_call);
15417 %}
15418 
15419 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
15420 %{
15421   match(TailJump jump_target ex_oop);
15422 
15423   ins_cost(CALL_COST);
15424 
15425   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15426 
15427   ins_encode(aarch64_enc_tail_jmp(jump_target));
15428 
15429   ins_pipe(pipe_class_call);
15430 %}
15431 
15432 // Forward exception.
15433 instruct ForwardExceptionjmp()
15434 %{
15435   match(ForwardException);
15436   ins_cost(CALL_COST);
15437 
15438   format %{ "b forward_exception_stub" %}
15439   ins_encode %{
15440     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
15441   %}
15442   ins_pipe(pipe_class_call);
15443 %}
15444 
15445 // Create exception oop: created by stack-crawling runtime code.
15446 // Created exception is now available to this handler, and is setup
15447 // just prior to jumping to this handler. No code emitted.
15448 // TODO check
15449 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15450 instruct CreateException(iRegP_R0 ex_oop)
15451 %{
15452   match(Set ex_oop (CreateEx));
15453 
15454   format %{ " -- \t// exception oop; no code emitted" %}
15455 
15456   size(0);
15457 
15458   ins_encode( /*empty*/ );
15459 
15460   ins_pipe(pipe_class_empty);
15461 %}
15462 
15463 // Rethrow exception: The exception oop will come in the first
15464 // argument position. Then JUMP (not call) to the rethrow stub code.
15465 instruct RethrowException() %{
15466   match(Rethrow);
15467   ins_cost(CALL_COST);
15468 
15469   format %{ "b rethrow_stub" %}
15470 
15471   ins_encode( aarch64_enc_rethrow() );
15472 
15473   ins_pipe(pipe_class_call);
15474 %}
15475 
15476 
15477 // Return Instruction
15478 // epilog node loads ret address into lr as part of frame pop
15479 instruct Ret()
15480 %{
15481   match(Return);
15482 
15483   format %{ "ret\t// return register" %}
15484 
15485   ins_encode( aarch64_enc_ret() );
15486 
15487   ins_pipe(pipe_branch);
15488 %}
15489 
15490 // Die now.
15491 instruct ShouldNotReachHere() %{
15492   match(Halt);
15493 
15494   ins_cost(CALL_COST);
15495   format %{ "ShouldNotReachHere" %}
15496 
15497   ins_encode %{
15498     if (is_reachable()) {
15499       const char* str = __ code_string(_halt_reason);
15500       __ stop(str);
15501     }
15502   %}
15503 
15504   ins_pipe(pipe_class_default);
15505 %}
15506 
15507 // ============================================================================
15508 // Partial Subtype Check
15509 //
15510 // superklass array for an instance of the superklass.  Set a hidden
15511 // internal cache on a hit (cache is checked with exposed code in
15512 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15513 // encoding ALSO sets flags.
15514 
15515 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15516 %{
15517   match(Set result (PartialSubtypeCheck sub super));
15518   predicate(!UseSecondarySupersTable);
15519   effect(KILL cr, KILL temp);
15520 
15521   ins_cost(20 * INSN_COST);  // slightly larger than the next version
15522   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15523 
15524   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15525 
15526   opcode(0x1); // Force zero of result reg on hit
15527 
15528   ins_pipe(pipe_class_memory);
15529 %}
15530 
15531 // Two versions of partialSubtypeCheck, both used when we need to
15532 // search for a super class in the secondary supers array. The first
15533 // is used when we don't know _a priori_ the class being searched
15534 // for. The second, far more common, is used when we do know: this is
15535 // used for instanceof, checkcast, and any case where C2 can determine
15536 // it by constant propagation.
15537 
15538 instruct partialSubtypeCheckVarSuper(iRegP_R4 sub, iRegP_R0 super, vRegD_V0 vtemp, iRegP_R5 result,
15539                                      iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
15540                                      rFlagsReg cr)
15541 %{
15542   match(Set result (PartialSubtypeCheck sub super));
15543   predicate(UseSecondarySupersTable);
15544   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
15545 
15546   ins_cost(10 * INSN_COST);  // slightly larger than the next version
15547   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15548 
15549   ins_encode %{
15550     __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
15551                                          $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
15552                                          $vtemp$$FloatRegister,
15553                                          $result$$Register, /*L_success*/nullptr);
15554   %}
15555 
15556   ins_pipe(pipe_class_memory);
15557 %}
15558 
15559 instruct partialSubtypeCheckConstSuper(iRegP_R4 sub, iRegP_R0 super_reg, immP super_con, vRegD_V0 vtemp, iRegP_R5 result,
15560                                        iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
15561                                        rFlagsReg cr)
15562 %{
15563   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
15564   predicate(UseSecondarySupersTable);
15565   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
15566 
15567   ins_cost(5 * INSN_COST);  // smaller than the next version
15568   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
15569 
15570   ins_encode %{
15571     bool success = false;
15572     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
15573     if (InlineSecondarySupersTest) {
15574       success =
15575         __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
15576                                                $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
15577                                                $vtemp$$FloatRegister,
15578                                                $result$$Register,
15579                                                super_klass_slot);
15580     } else {
15581       address call = __ trampoline_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
15582       success = (call != nullptr);
15583     }
15584     if (!success) {
15585       ciEnv::current()->record_failure("CodeCache is full");
15586       return;
15587     }
15588   %}
15589 
15590   ins_pipe(pipe_class_memory);
15591 %}
15592 
15593 // Intrisics for String.compareTo()
15594 
15595 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15596                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15597 %{
15598   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
15599   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15600   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15601 
15602   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15603   ins_encode %{
15604     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15605     __ string_compare($str1$$Register, $str2$$Register,
15606                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15607                       $tmp1$$Register, $tmp2$$Register,
15608                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
15609   %}
15610   ins_pipe(pipe_class_memory);
15611 %}
15612 
15613 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15614                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15615 %{
15616   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
15617   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15618   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15619 
15620   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15621   ins_encode %{
15622     __ string_compare($str1$$Register, $str2$$Register,
15623                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15624                       $tmp1$$Register, $tmp2$$Register,
15625                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
15626   %}
15627   ins_pipe(pipe_class_memory);
15628 %}
15629 
15630 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15631                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15632                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15633 %{
15634   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
15635   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15636   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15637          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15638 
15639   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15640   ins_encode %{
15641     __ string_compare($str1$$Register, $str2$$Register,
15642                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15643                       $tmp1$$Register, $tmp2$$Register,
15644                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15645                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
15646   %}
15647   ins_pipe(pipe_class_memory);
15648 %}
15649 
15650 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15651                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15652                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15653 %{
15654   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
15655   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15656   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15657          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15658 
15659   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15660   ins_encode %{
15661     __ string_compare($str1$$Register, $str2$$Register,
15662                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15663                       $tmp1$$Register, $tmp2$$Register,
15664                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15665                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
15666   %}
15667   ins_pipe(pipe_class_memory);
15668 %}
15669 
15670 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
15671 // these string_compare variants as NEON register type for convenience so that the prototype of
15672 // string_compare can be shared with all variants.
15673 
15674 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15675                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15676                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
15677                               pRegGov_P1 pgtmp2, rFlagsReg cr)
15678 %{
15679   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
15680   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15681   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
15682          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15683 
15684   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
15685   ins_encode %{
15686     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15687     __ string_compare($str1$$Register, $str2$$Register,
15688                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15689                       $tmp1$$Register, $tmp2$$Register,
15690                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
15691                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
15692                       StrIntrinsicNode::LL);
15693   %}
15694   ins_pipe(pipe_class_memory);
15695 %}
15696 
15697 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15698                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15699                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
15700                               pRegGov_P1 pgtmp2, rFlagsReg cr)
15701 %{
15702   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
15703   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15704   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
15705          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15706 
15707   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
15708   ins_encode %{
15709     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15710     __ string_compare($str1$$Register, $str2$$Register,
15711                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15712                       $tmp1$$Register, $tmp2$$Register,
15713                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
15714                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
15715                       StrIntrinsicNode::LU);
15716   %}
15717   ins_pipe(pipe_class_memory);
15718 %}
15719 
15720 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15721                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15722                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
15723                               pRegGov_P1 pgtmp2, rFlagsReg cr)
15724 %{
15725   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
15726   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15727   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
15728          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15729 
15730   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
15731   ins_encode %{
15732     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15733     __ string_compare($str1$$Register, $str2$$Register,
15734                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15735                       $tmp1$$Register, $tmp2$$Register,
15736                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
15737                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
15738                       StrIntrinsicNode::UL);
15739   %}
15740   ins_pipe(pipe_class_memory);
15741 %}
15742 
15743 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15744                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15745                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
15746                               pRegGov_P1 pgtmp2, rFlagsReg cr)
15747 %{
15748   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
15749   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15750   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
15751          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15752 
15753   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
15754   ins_encode %{
15755     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15756     __ string_compare($str1$$Register, $str2$$Register,
15757                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15758                       $tmp1$$Register, $tmp2$$Register,
15759                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
15760                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
15761                       StrIntrinsicNode::UU);
15762   %}
15763   ins_pipe(pipe_class_memory);
15764 %}
15765 
15766 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15767                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15768                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
15769                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
15770 %{
15771   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15772   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15773   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15774          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
15775          TEMP vtmp0, TEMP vtmp1, KILL cr);
15776   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
15777             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
15778 
15779   ins_encode %{
15780     __ string_indexof($str1$$Register, $str2$$Register,
15781                       $cnt1$$Register, $cnt2$$Register,
15782                       $tmp1$$Register, $tmp2$$Register,
15783                       $tmp3$$Register, $tmp4$$Register,
15784                       $tmp5$$Register, $tmp6$$Register,
15785                       -1, $result$$Register, StrIntrinsicNode::UU);
15786   %}
15787   ins_pipe(pipe_class_memory);
15788 %}
15789 
15790 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15791                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15792                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
15793                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
15794 %{
15795   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15796   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15797   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15798          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
15799          TEMP vtmp0, TEMP vtmp1, KILL cr);
15800   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
15801             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
15802 
15803   ins_encode %{
15804     __ string_indexof($str1$$Register, $str2$$Register,
15805                       $cnt1$$Register, $cnt2$$Register,
15806                       $tmp1$$Register, $tmp2$$Register,
15807                       $tmp3$$Register, $tmp4$$Register,
15808                       $tmp5$$Register, $tmp6$$Register,
15809                       -1, $result$$Register, StrIntrinsicNode::LL);
15810   %}
15811   ins_pipe(pipe_class_memory);
15812 %}
15813 
15814 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15815                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
15816                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
15817                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
15818 %{
15819   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15820   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15821   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15822          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
15823          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
15824   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
15825             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
15826 
15827   ins_encode %{
15828     __ string_indexof($str1$$Register, $str2$$Register,
15829                       $cnt1$$Register, $cnt2$$Register,
15830                       $tmp1$$Register, $tmp2$$Register,
15831                       $tmp3$$Register, $tmp4$$Register,
15832                       $tmp5$$Register, $tmp6$$Register,
15833                       -1, $result$$Register, StrIntrinsicNode::UL);
15834   %}
15835   ins_pipe(pipe_class_memory);
15836 %}
15837 
15838 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15839                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
15840                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15841 %{
15842   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15843   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15844   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15845          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15846   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
15847             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
15848 
15849   ins_encode %{
15850     int icnt2 = (int)$int_cnt2$$constant;
15851     __ string_indexof($str1$$Register, $str2$$Register,
15852                       $cnt1$$Register, zr,
15853                       $tmp1$$Register, $tmp2$$Register,
15854                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15855                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15856   %}
15857   ins_pipe(pipe_class_memory);
15858 %}
15859 
15860 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15861                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
15862                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15863 %{
15864   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15865   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15866   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15867          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15868   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
15869             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
15870 
15871   ins_encode %{
15872     int icnt2 = (int)$int_cnt2$$constant;
15873     __ string_indexof($str1$$Register, $str2$$Register,
15874                       $cnt1$$Register, zr,
15875                       $tmp1$$Register, $tmp2$$Register,
15876                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15877                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15878   %}
15879   ins_pipe(pipe_class_memory);
15880 %}
15881 
15882 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15883                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
15884                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15885 %{
15886   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15887   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15888   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15889          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15890   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
15891             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
15892 
15893   ins_encode %{
15894     int icnt2 = (int)$int_cnt2$$constant;
15895     __ string_indexof($str1$$Register, $str2$$Register,
15896                       $cnt1$$Register, zr,
15897                       $tmp1$$Register, $tmp2$$Register,
15898                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15899                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15900   %}
15901   ins_pipe(pipe_class_memory);
15902 %}
15903 
15904 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15905                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15906                              iRegINoSp tmp3, rFlagsReg cr)
15907 %{
15908   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15909   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
15910   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15911          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15912 
15913   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15914 
15915   ins_encode %{
15916     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15917                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15918                            $tmp3$$Register);
15919   %}
15920   ins_pipe(pipe_class_memory);
15921 %}
15922 
15923 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15924                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15925                               iRegINoSp tmp3, rFlagsReg cr)
15926 %{
15927   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15928   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
15929   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15930          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15931 
15932   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15933 
15934   ins_encode %{
15935     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15936                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
15937                             $tmp3$$Register);
15938   %}
15939   ins_pipe(pipe_class_memory);
15940 %}
15941 
15942 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15943                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
15944                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
15945   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
15946   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15947   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
15948   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
15949   ins_encode %{
15950     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
15951                                $result$$Register, $ztmp1$$FloatRegister,
15952                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
15953                                $ptmp$$PRegister, true /* isL */);
15954   %}
15955   ins_pipe(pipe_class_memory);
15956 %}
15957 
15958 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15959                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
15960                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
15961   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
15962   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15963   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
15964   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
15965   ins_encode %{
15966     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
15967                                $result$$Register, $ztmp1$$FloatRegister,
15968                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
15969                                $ptmp$$PRegister, false /* isL */);
15970   %}
15971   ins_pipe(pipe_class_memory);
15972 %}
15973 
15974 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15975                         iRegI_R0 result, rFlagsReg cr)
15976 %{
15977   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15978   match(Set result (StrEquals (Binary str1 str2) cnt));
15979   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15980 
15981   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15982   ins_encode %{
15983     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15984     __ string_equals($str1$$Register, $str2$$Register,
15985                      $result$$Register, $cnt$$Register);
15986   %}
15987   ins_pipe(pipe_class_memory);
15988 %}
15989 
15990 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15991                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15992                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
15993                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
15994                        iRegP_R10 tmp, rFlagsReg cr)
15995 %{
15996   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15997   match(Set result (AryEq ary1 ary2));
15998   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
15999          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16000          TEMP vtmp6, TEMP vtmp7, KILL cr);
16001 
16002   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
16003   ins_encode %{
16004     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
16005                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16006                                    $result$$Register, $tmp$$Register, 1);
16007     if (tpc == nullptr) {
16008       ciEnv::current()->record_failure("CodeCache is full");
16009       return;
16010     }
16011   %}
16012   ins_pipe(pipe_class_memory);
16013 %}
16014 
16015 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16016                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16017                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16018                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16019                        iRegP_R10 tmp, rFlagsReg cr)
16020 %{
16021   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16022   match(Set result (AryEq ary1 ary2));
16023   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
16024          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16025          TEMP vtmp6, TEMP vtmp7, KILL cr);
16026 
16027   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
16028   ins_encode %{
16029     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
16030                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16031                                    $result$$Register, $tmp$$Register, 2);
16032     if (tpc == nullptr) {
16033       ciEnv::current()->record_failure("CodeCache is full");
16034       return;
16035     }
16036   %}
16037   ins_pipe(pipe_class_memory);
16038 %}
16039 
16040 instruct arrays_hashcode(iRegP_R1 ary, iRegI_R2 cnt, iRegI_R0 result, immI basic_type,
16041                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16042                          vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16043                          vRegD_V12 vtmp8, vRegD_V13 vtmp9, rFlagsReg cr)
16044 %{
16045   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
16046   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, TEMP vtmp6,
16047          TEMP vtmp7, TEMP vtmp8, TEMP vtmp9, USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
16048 
16049   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
16050   ins_encode %{
16051     address tpc = __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
16052                                      $vtmp3$$FloatRegister, $vtmp2$$FloatRegister,
16053                                      $vtmp1$$FloatRegister, $vtmp0$$FloatRegister,
16054                                      $vtmp4$$FloatRegister, $vtmp5$$FloatRegister,
16055                                      $vtmp6$$FloatRegister, $vtmp7$$FloatRegister,
16056                                      $vtmp8$$FloatRegister, $vtmp9$$FloatRegister,
16057                                      (BasicType)$basic_type$$constant);
16058     if (tpc == nullptr) {
16059       ciEnv::current()->record_failure("CodeCache is full");
16060       return;
16061     }
16062   %}
16063   ins_pipe(pipe_class_memory);
16064 %}
16065 
16066 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16067 %{
16068   match(Set result (CountPositives ary1 len));
16069   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16070   format %{ "count positives byte[] $ary1,$len -> $result" %}
16071   ins_encode %{
16072     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
16073     if (tpc == nullptr) {
16074       ciEnv::current()->record_failure("CodeCache is full");
16075       return;
16076     }
16077   %}
16078   ins_pipe( pipe_slow );
16079 %}
16080 
16081 // fast char[] to byte[] compression
16082 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16083                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
16084                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
16085                          iRegI_R0 result, rFlagsReg cr)
16086 %{
16087   match(Set result (StrCompressedCopy src (Binary dst len)));
16088   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16089          USE_KILL src, USE_KILL dst, USE len, KILL cr);
16090 
16091   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
16092   ins_encode %{
16093     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16094                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16095                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
16096                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
16097   %}
16098   ins_pipe(pipe_slow);
16099 %}
16100 
16101 // fast byte[] to char[] inflation
16102 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
16103                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16104                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
16105 %{
16106   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16107   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
16108          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
16109          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16110 
16111   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
16112   ins_encode %{
16113     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16114                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16115                                         $vtmp2$$FloatRegister, $tmp$$Register);
16116     if (tpc == nullptr) {
16117       ciEnv::current()->record_failure("CodeCache is full");
16118       return;
16119     }
16120   %}
16121   ins_pipe(pipe_class_memory);
16122 %}
16123 
16124 // encode char[] to byte[] in ISO_8859_1
16125 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16126                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
16127                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
16128                           iRegI_R0 result, rFlagsReg cr)
16129 %{
16130   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
16131   match(Set result (EncodeISOArray src (Binary dst len)));
16132   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
16133          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
16134 
16135   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
16136   ins_encode %{
16137     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16138                         $result$$Register, false,
16139                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16140                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
16141                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
16142   %}
16143   ins_pipe(pipe_class_memory);
16144 %}
16145 
16146 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16147                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
16148                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
16149                             iRegI_R0 result, rFlagsReg cr)
16150 %{
16151   predicate(((EncodeISOArrayNode*)n)->is_ascii());
16152   match(Set result (EncodeISOArray src (Binary dst len)));
16153   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
16154          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
16155 
16156   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
16157   ins_encode %{
16158     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16159                         $result$$Register, true,
16160                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16161                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
16162                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
16163   %}
16164   ins_pipe(pipe_class_memory);
16165 %}
16166 
16167 //----------------------------- CompressBits/ExpandBits ------------------------
16168 
16169 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
16170                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
16171   match(Set dst (CompressBits src mask));
16172   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16173   format %{ "mov    $tsrc, $src\n\t"
16174             "mov    $tmask, $mask\n\t"
16175             "bext   $tdst, $tsrc, $tmask\n\t"
16176             "mov    $dst, $tdst"
16177           %}
16178   ins_encode %{
16179     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
16180     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
16181     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
16182     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
16183   %}
16184   ins_pipe(pipe_slow);
16185 %}
16186 
16187 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
16188                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
16189   match(Set dst (CompressBits (LoadI mem) mask));
16190   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16191   format %{ "ldrs   $tsrc, $mem\n\t"
16192             "ldrs   $tmask, $mask\n\t"
16193             "bext   $tdst, $tsrc, $tmask\n\t"
16194             "mov    $dst, $tdst"
16195           %}
16196   ins_encode %{
16197     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
16198               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
16199     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
16200     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
16201     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
16202   %}
16203   ins_pipe(pipe_slow);
16204 %}
16205 
16206 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
16207                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
16208   match(Set dst (CompressBits src mask));
16209   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16210   format %{ "mov    $tsrc, $src\n\t"
16211             "mov    $tmask, $mask\n\t"
16212             "bext   $tdst, $tsrc, $tmask\n\t"
16213             "mov    $dst, $tdst"
16214           %}
16215   ins_encode %{
16216     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
16217     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
16218     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
16219     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
16220   %}
16221   ins_pipe(pipe_slow);
16222 %}
16223 
16224 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
16225                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
16226   match(Set dst (CompressBits (LoadL mem) mask));
16227   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16228   format %{ "ldrd   $tsrc, $mem\n\t"
16229             "ldrd   $tmask, $mask\n\t"
16230             "bext   $tdst, $tsrc, $tmask\n\t"
16231             "mov    $dst, $tdst"
16232           %}
16233   ins_encode %{
16234     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
16235               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
16236     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
16237     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
16238     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
16239   %}
16240   ins_pipe(pipe_slow);
16241 %}
16242 
16243 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
16244                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
16245   match(Set dst (ExpandBits src mask));
16246   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16247   format %{ "mov    $tsrc, $src\n\t"
16248             "mov    $tmask, $mask\n\t"
16249             "bdep   $tdst, $tsrc, $tmask\n\t"
16250             "mov    $dst, $tdst"
16251           %}
16252   ins_encode %{
16253     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
16254     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
16255     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
16256     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
16257   %}
16258   ins_pipe(pipe_slow);
16259 %}
16260 
16261 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
16262                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
16263   match(Set dst (ExpandBits (LoadI mem) mask));
16264   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16265   format %{ "ldrs   $tsrc, $mem\n\t"
16266             "ldrs   $tmask, $mask\n\t"
16267             "bdep   $tdst, $tsrc, $tmask\n\t"
16268             "mov    $dst, $tdst"
16269           %}
16270   ins_encode %{
16271     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
16272               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
16273     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
16274     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
16275     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
16276   %}
16277   ins_pipe(pipe_slow);
16278 %}
16279 
16280 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
16281                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
16282   match(Set dst (ExpandBits src mask));
16283   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16284   format %{ "mov    $tsrc, $src\n\t"
16285             "mov    $tmask, $mask\n\t"
16286             "bdep   $tdst, $tsrc, $tmask\n\t"
16287             "mov    $dst, $tdst"
16288           %}
16289   ins_encode %{
16290     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
16291     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
16292     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
16293     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
16294   %}
16295   ins_pipe(pipe_slow);
16296 %}
16297 
16298 
16299 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
16300                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
16301   match(Set dst (ExpandBits (LoadL mem) mask));
16302   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16303   format %{ "ldrd   $tsrc, $mem\n\t"
16304             "ldrd   $tmask, $mask\n\t"
16305             "bdep   $tdst, $tsrc, $tmask\n\t"
16306             "mov    $dst, $tdst"
16307           %}
16308   ins_encode %{
16309     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
16310               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
16311     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
16312     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
16313     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
16314   %}
16315   ins_pipe(pipe_slow);
16316 %}
16317 
16318 //----------------------------- Reinterpret ----------------------------------
16319 // Reinterpret a half-precision float value in a floating point register to a general purpose register
16320 instruct reinterpretHF2S(iRegINoSp dst, vRegF src) %{
16321   match(Set dst (ReinterpretHF2S src));
16322   format %{ "reinterpretHF2S $dst, $src" %}
16323   ins_encode %{
16324     __ smov($dst$$Register, $src$$FloatRegister, __ H, 0);
16325   %}
16326   ins_pipe(pipe_slow);
16327 %}
16328 
16329 // Reinterpret a half-precision float value in a general purpose register to a floating point register
16330 instruct reinterpretS2HF(vRegF dst, iRegINoSp src) %{
16331   match(Set dst (ReinterpretS2HF src));
16332   format %{ "reinterpretS2HF $dst, $src" %}
16333   ins_encode %{
16334     __ mov($dst$$FloatRegister, __ H, 0, $src$$Register);
16335   %}
16336   ins_pipe(pipe_slow);
16337 %}
16338 
16339 // Without this optimization, ReinterpretS2HF (ConvF2HF src) would result in the following
16340 // instructions (the first two are for ConvF2HF and the last instruction is for ReinterpretS2HF) -
16341 // fcvt $tmp1_fpr, $src_fpr    // Convert float to half-precision float
16342 // mov  $tmp2_gpr, $tmp1_fpr   // Move half-precision float in FPR to a GPR
16343 // mov  $dst_fpr,  $tmp2_gpr   // Move the result from a GPR to an FPR
16344 // The move from FPR to GPR in ConvF2HF and the move from GPR to FPR in ReinterpretS2HF
16345 // can be omitted in this pattern, resulting in -
16346 // fcvt $dst, $src  // Convert float to half-precision float
16347 instruct convF2HFAndS2HF(vRegF dst, vRegF src)
16348 %{
16349   match(Set dst (ReinterpretS2HF (ConvF2HF src)));
16350   format %{ "convF2HFAndS2HF $dst, $src" %}
16351   ins_encode %{
16352     __ fcvtsh($dst$$FloatRegister, $src$$FloatRegister);
16353   %}
16354   ins_pipe(pipe_slow);
16355 %}
16356 
16357 // Without this optimization, ConvHF2F (ReinterpretHF2S src) would result in the following
16358 // instructions (the first one is for ReinterpretHF2S and the last two are for ConvHF2F) -
16359 // mov  $tmp1_gpr, $src_fpr  // Move the half-precision float from an FPR to a GPR
16360 // mov  $tmp2_fpr, $tmp1_gpr // Move the same value from GPR to an FPR
16361 // fcvt $dst_fpr,  $tmp2_fpr // Convert the half-precision float to 32-bit float
16362 // The move from FPR to GPR in ReinterpretHF2S and the move from GPR to FPR in ConvHF2F
16363 // can be omitted as the input (src) is already in an FPR required for the fcvths instruction
16364 // resulting in -
16365 // fcvt $dst, $src  // Convert half-precision float to a 32-bit float
16366 instruct convHF2SAndHF2F(vRegF dst, vRegF src)
16367 %{
16368   match(Set dst (ConvHF2F (ReinterpretHF2S src)));
16369   format %{ "convHF2SAndHF2F $dst, $src" %}
16370   ins_encode %{
16371     __ fcvths($dst$$FloatRegister, $src$$FloatRegister);
16372   %}
16373   ins_pipe(pipe_slow);
16374 %}
16375 
16376 // ============================================================================
16377 // This name is KNOWN by the ADLC and cannot be changed.
16378 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16379 // for this guy.
16380 instruct tlsLoadP(thread_RegP dst)
16381 %{
16382   match(Set dst (ThreadLocal));
16383 
16384   ins_cost(0);
16385 
16386   format %{ " -- \t// $dst=Thread::current(), empty" %}
16387 
16388   size(0);
16389 
16390   ins_encode( /*empty*/ );
16391 
16392   ins_pipe(pipe_class_empty);
16393 %}
16394 
16395 //----------PEEPHOLE RULES-----------------------------------------------------
16396 // These must follow all instruction definitions as they use the names
16397 // defined in the instructions definitions.
16398 //
16399 // peepmatch ( root_instr_name [preceding_instruction]* );
16400 //
16401 // peepconstraint %{
16402 // (instruction_number.operand_name relational_op instruction_number.operand_name
16403 //  [, ...] );
16404 // // instruction numbers are zero-based using left to right order in peepmatch
16405 //
16406 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16407 // // provide an instruction_number.operand_name for each operand that appears
16408 // // in the replacement instruction's match rule
16409 //
16410 // ---------VM FLAGS---------------------------------------------------------
16411 //
16412 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16413 //
16414 // Each peephole rule is given an identifying number starting with zero and
16415 // increasing by one in the order seen by the parser.  An individual peephole
16416 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16417 // on the command-line.
16418 //
16419 // ---------CURRENT LIMITATIONS----------------------------------------------
16420 //
16421 // Only match adjacent instructions in same basic block
16422 // Only equality constraints
16423 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16424 // Only one replacement instruction
16425 //
16426 // ---------EXAMPLE----------------------------------------------------------
16427 //
16428 // // pertinent parts of existing instructions in architecture description
16429 // instruct movI(iRegINoSp dst, iRegI src)
16430 // %{
16431 //   match(Set dst (CopyI src));
16432 // %}
16433 //
16434 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16435 // %{
16436 //   match(Set dst (AddI dst src));
16437 //   effect(KILL cr);
16438 // %}
16439 //
16440 // // Change (inc mov) to lea
16441 // peephole %{
16442 //   // increment preceded by register-register move
16443 //   peepmatch ( incI_iReg movI );
16444 //   // require that the destination register of the increment
16445 //   // match the destination register of the move
16446 //   peepconstraint ( 0.dst == 1.dst );
16447 //   // construct a replacement instruction that sets
16448 //   // the destination to ( move's source register + one )
16449 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16450 // %}
16451 //
16452 
16453 // Implementation no longer uses movX instructions since
16454 // machine-independent system no longer uses CopyX nodes.
16455 //
16456 // peephole
16457 // %{
16458 //   peepmatch (incI_iReg movI);
16459 //   peepconstraint (0.dst == 1.dst);
16460 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16461 // %}
16462 
16463 // peephole
16464 // %{
16465 //   peepmatch (decI_iReg movI);
16466 //   peepconstraint (0.dst == 1.dst);
16467 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16468 // %}
16469 
16470 // peephole
16471 // %{
16472 //   peepmatch (addI_iReg_imm movI);
16473 //   peepconstraint (0.dst == 1.dst);
16474 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16475 // %}
16476 
16477 // peephole
16478 // %{
16479 //   peepmatch (incL_iReg movL);
16480 //   peepconstraint (0.dst == 1.dst);
16481 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16482 // %}
16483 
16484 // peephole
16485 // %{
16486 //   peepmatch (decL_iReg movL);
16487 //   peepconstraint (0.dst == 1.dst);
16488 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16489 // %}
16490 
16491 // peephole
16492 // %{
16493 //   peepmatch (addL_iReg_imm movL);
16494 //   peepconstraint (0.dst == 1.dst);
16495 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16496 // %}
16497 
16498 // peephole
16499 // %{
16500 //   peepmatch (addP_iReg_imm movP);
16501 //   peepconstraint (0.dst == 1.dst);
16502 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16503 // %}
16504 
16505 // // Change load of spilled value to only a spill
16506 // instruct storeI(memory mem, iRegI src)
16507 // %{
16508 //   match(Set mem (StoreI mem src));
16509 // %}
16510 //
16511 // instruct loadI(iRegINoSp dst, memory mem)
16512 // %{
16513 //   match(Set dst (LoadI mem));
16514 // %}
16515 //
16516 
16517 //----------SMARTSPILL RULES---------------------------------------------------
16518 // These must follow all instruction definitions as they use the names
16519 // defined in the instructions definitions.
16520 
16521 // Local Variables:
16522 // mode: c++
16523 // End: