1 //
    2 // Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
    4 // Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
    5 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    6 //
    7 // This code is free software; you can redistribute it and/or modify it
    8 // under the terms of the GNU General Public License version 2 only, as
    9 // published by the Free Software Foundation.
   10 //
   11 // This code is distributed in the hope that it will be useful, but WITHOUT
   12 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   14 // version 2 for more details (a copy is included in the LICENSE file that
   15 // accompanied this code).
   16 //
   17 // You should have received a copy of the GNU General Public License version
   18 // 2 along with this work; if not, write to the Free Software Foundation,
   19 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   20 //
   21 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   22 // or visit www.oracle.com if you need additional information or have any
   23 // questions.
   24 //
   25 //
   26 
   27 // RISCV Architecture Description File
   28 
   29 //----------REGISTER DEFINITION BLOCK------------------------------------------
   30 // This information is used by the matcher and the register allocator to
   31 // describe individual registers and classes of registers within the target
   32 // architecture.
   33 
   34 register %{
   35 //----------Architecture Description Register Definitions----------------------
   36 // General Registers
   37 // "reg_def"  name ( register save type, C convention save type,
   38 //                   ideal register type, encoding );
   39 // Register Save Types:
   40 //
   41 // NS  = No-Save:       The register allocator assumes that these registers
   42 //                      can be used without saving upon entry to the method, &
   43 //                      that they do not need to be saved at call sites.
   44 //
   45 // SOC = Save-On-Call:  The register allocator assumes that these registers
   46 //                      can be used without saving upon entry to the method,
   47 //                      but that they must be saved at call sites.
   48 //
   49 // SOE = Save-On-Entry: The register allocator assumes that these registers
   50 //                      must be saved before using them upon entry to the
   51 //                      method, but they do not need to be saved at call
   52 //                      sites.
   53 //
   54 // AS  = Always-Save:   The register allocator assumes that these registers
   55 //                      must be saved before using them upon entry to the
   56 //                      method, & that they must be saved at call sites.
   57 //
   58 // Ideal Register Type is used to determine how to save & restore a
   59 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   60 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   61 //
   62 // The encoding number is the actual bit-pattern placed into the opcodes.
   63 
   64 // We must define the 64 bit int registers in two 32 bit halves, the
   65 // real lower register and a virtual upper half register. upper halves
   66 // are used by the register allocator but are not actually supplied as
   67 // operands to memory ops.
   68 //
   69 // follow the C1 compiler in making registers
   70 //
   71 //   x7, x9-x17, x27-x31 volatile (caller save)
   72 //   x0-x4, x8, x23 system (no save, no allocate)
   73 //   x5-x6 non-allocatable (so we can use them as temporary regs)
   74 
   75 //
   76 // as regards Java usage. we don't use any callee save registers
   77 // because this makes it difficult to de-optimise a frame (see comment
   78 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   79 //
   80 
   81 // General Registers
   82 
   83 reg_def R0      ( NS,  NS,  Op_RegI, 0,  x0->as_VMReg()         ); // zr
   84 reg_def R0_H    ( NS,  NS,  Op_RegI, 0,  x0->as_VMReg()->next() );
   85 reg_def R1      ( NS,  SOC, Op_RegI, 1,  x1->as_VMReg()         ); // ra
   86 reg_def R1_H    ( NS,  SOC, Op_RegI, 1,  x1->as_VMReg()->next() );
   87 reg_def R2      ( NS,  NS,  Op_RegI, 2,  x2->as_VMReg()         ); // sp
   88 reg_def R2_H    ( NS,  NS,  Op_RegI, 2,  x2->as_VMReg()->next() );
   89 reg_def R3      ( NS,  NS,  Op_RegI, 3,  x3->as_VMReg()         ); // gp
   90 reg_def R3_H    ( NS,  NS,  Op_RegI, 3,  x3->as_VMReg()->next() );
   91 reg_def R4      ( NS,  NS,  Op_RegI, 4,  x4->as_VMReg()         ); // tp
   92 reg_def R4_H    ( NS,  NS,  Op_RegI, 4,  x4->as_VMReg()->next() );
   93 reg_def R7      ( SOC, SOC, Op_RegI, 7,  x7->as_VMReg()         );
   94 reg_def R7_H    ( SOC, SOC, Op_RegI, 7,  x7->as_VMReg()->next() );
   95 reg_def R8      ( NS,  SOE, Op_RegI, 8,  x8->as_VMReg()         ); // fp
   96 reg_def R8_H    ( NS,  SOE, Op_RegI, 8,  x8->as_VMReg()->next() );
   97 reg_def R9      ( SOC, SOE, Op_RegI, 9,  x9->as_VMReg()         );
   98 reg_def R9_H    ( SOC, SOE, Op_RegI, 9,  x9->as_VMReg()->next() );
   99 reg_def R10     ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()        );
  100 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()->next());
  101 reg_def R11     ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()        );
  102 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()->next());
  103 reg_def R12     ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()        );
  104 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()->next());
  105 reg_def R13     ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()        );
  106 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()->next());
  107 reg_def R14     ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()        );
  108 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()->next());
  109 reg_def R15     ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()        );
  110 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()->next());
  111 reg_def R16     ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()        );
  112 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()->next());
  113 reg_def R17     ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()        );
  114 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()->next());
  115 reg_def R18     ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()        );
  116 reg_def R18_H   ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()->next());
  117 reg_def R19     ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()        );
  118 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()->next());
  119 reg_def R20     ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()        ); // caller esp
  120 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()->next());
  121 reg_def R21     ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()        );
  122 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()->next());
  123 reg_def R22     ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()        );
  124 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()->next());
  125 reg_def R23     ( NS,  SOE, Op_RegI, 23, x23->as_VMReg()        ); // java thread
  126 reg_def R23_H   ( NS,  SOE, Op_RegI, 23, x23->as_VMReg()->next());
  127 reg_def R24     ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()        );
  128 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()->next());
  129 reg_def R25     ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()        );
  130 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()->next());
  131 reg_def R26     ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()        );
  132 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()->next());
  133 reg_def R27     ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()        ); // heapbase
  134 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()->next());
  135 reg_def R28     ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()        );
  136 reg_def R28_H   ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()->next());
  137 reg_def R29     ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()        );
  138 reg_def R29_H   ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()->next());
  139 reg_def R30     ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()        );
  140 reg_def R30_H   ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()->next());
  141 reg_def R31     ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()        );
  142 reg_def R31_H   ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()->next());
  143 
  144 // ----------------------------
  145 // Float/Double Registers
  146 // ----------------------------
  147 
  148 // Double Registers
  149 
  150 // The rules of ADL require that double registers be defined in pairs.
  151 // Each pair must be two 32-bit values, but not necessarily a pair of
  152 // single float registers. In each pair, ADLC-assigned register numbers
  153 // must be adjacent, with the lower number even. Finally, when the
  154 // CPU stores such a register pair to memory, the word associated with
  155 // the lower ADLC-assigned number must be stored to the lower address.
  156 
  157 // RISCV has 32 floating-point registers. Each can store a single
  158 // or double precision floating-point value.
  159 
  160 // for Java use float registers f0-f31 are always save on call whereas
  161 // the platform ABI treats f8-f9 and f18-f27 as callee save). Other
  162 // float registers are SOC as per the platform spec
  163 
  164 reg_def F0    ( SOC, SOC, Op_RegF,  0,  f0->as_VMReg()          );
  165 reg_def F0_H  ( SOC, SOC, Op_RegF,  0,  f0->as_VMReg()->next()  );
  166 reg_def F1    ( SOC, SOC, Op_RegF,  1,  f1->as_VMReg()          );
  167 reg_def F1_H  ( SOC, SOC, Op_RegF,  1,  f1->as_VMReg()->next()  );
  168 reg_def F2    ( SOC, SOC, Op_RegF,  2,  f2->as_VMReg()          );
  169 reg_def F2_H  ( SOC, SOC, Op_RegF,  2,  f2->as_VMReg()->next()  );
  170 reg_def F3    ( SOC, SOC, Op_RegF,  3,  f3->as_VMReg()          );
  171 reg_def F3_H  ( SOC, SOC, Op_RegF,  3,  f3->as_VMReg()->next()  );
  172 reg_def F4    ( SOC, SOC, Op_RegF,  4,  f4->as_VMReg()          );
  173 reg_def F4_H  ( SOC, SOC, Op_RegF,  4,  f4->as_VMReg()->next()  );
  174 reg_def F5    ( SOC, SOC, Op_RegF,  5,  f5->as_VMReg()          );
  175 reg_def F5_H  ( SOC, SOC, Op_RegF,  5,  f5->as_VMReg()->next()  );
  176 reg_def F6    ( SOC, SOC, Op_RegF,  6,  f6->as_VMReg()          );
  177 reg_def F6_H  ( SOC, SOC, Op_RegF,  6,  f6->as_VMReg()->next()  );
  178 reg_def F7    ( SOC, SOC, Op_RegF,  7,  f7->as_VMReg()          );
  179 reg_def F7_H  ( SOC, SOC, Op_RegF,  7,  f7->as_VMReg()->next()  );
  180 reg_def F8    ( SOC, SOE, Op_RegF,  8,  f8->as_VMReg()          );
  181 reg_def F8_H  ( SOC, SOE, Op_RegF,  8,  f8->as_VMReg()->next()  );
  182 reg_def F9    ( SOC, SOE, Op_RegF,  9,  f9->as_VMReg()          );
  183 reg_def F9_H  ( SOC, SOE, Op_RegF,  9,  f9->as_VMReg()->next()  );
  184 reg_def F10   ( SOC, SOC, Op_RegF,  10, f10->as_VMReg()         );
  185 reg_def F10_H ( SOC, SOC, Op_RegF,  10, f10->as_VMReg()->next() );
  186 reg_def F11   ( SOC, SOC, Op_RegF,  11, f11->as_VMReg()         );
  187 reg_def F11_H ( SOC, SOC, Op_RegF,  11, f11->as_VMReg()->next() );
  188 reg_def F12   ( SOC, SOC, Op_RegF,  12, f12->as_VMReg()         );
  189 reg_def F12_H ( SOC, SOC, Op_RegF,  12, f12->as_VMReg()->next() );
  190 reg_def F13   ( SOC, SOC, Op_RegF,  13, f13->as_VMReg()         );
  191 reg_def F13_H ( SOC, SOC, Op_RegF,  13, f13->as_VMReg()->next() );
  192 reg_def F14   ( SOC, SOC, Op_RegF,  14, f14->as_VMReg()         );
  193 reg_def F14_H ( SOC, SOC, Op_RegF,  14, f14->as_VMReg()->next() );
  194 reg_def F15   ( SOC, SOC, Op_RegF,  15, f15->as_VMReg()         );
  195 reg_def F15_H ( SOC, SOC, Op_RegF,  15, f15->as_VMReg()->next() );
  196 reg_def F16   ( SOC, SOC, Op_RegF,  16, f16->as_VMReg()         );
  197 reg_def F16_H ( SOC, SOC, Op_RegF,  16, f16->as_VMReg()->next() );
  198 reg_def F17   ( SOC, SOC, Op_RegF,  17, f17->as_VMReg()         );
  199 reg_def F17_H ( SOC, SOC, Op_RegF,  17, f17->as_VMReg()->next() );
  200 reg_def F18   ( SOC, SOE, Op_RegF,  18, f18->as_VMReg()         );
  201 reg_def F18_H ( SOC, SOE, Op_RegF,  18, f18->as_VMReg()->next() );
  202 reg_def F19   ( SOC, SOE, Op_RegF,  19, f19->as_VMReg()         );
  203 reg_def F19_H ( SOC, SOE, Op_RegF,  19, f19->as_VMReg()->next() );
  204 reg_def F20   ( SOC, SOE, Op_RegF,  20, f20->as_VMReg()         );
  205 reg_def F20_H ( SOC, SOE, Op_RegF,  20, f20->as_VMReg()->next() );
  206 reg_def F21   ( SOC, SOE, Op_RegF,  21, f21->as_VMReg()         );
  207 reg_def F21_H ( SOC, SOE, Op_RegF,  21, f21->as_VMReg()->next() );
  208 reg_def F22   ( SOC, SOE, Op_RegF,  22, f22->as_VMReg()         );
  209 reg_def F22_H ( SOC, SOE, Op_RegF,  22, f22->as_VMReg()->next() );
  210 reg_def F23   ( SOC, SOE, Op_RegF,  23, f23->as_VMReg()         );
  211 reg_def F23_H ( SOC, SOE, Op_RegF,  23, f23->as_VMReg()->next() );
  212 reg_def F24   ( SOC, SOE, Op_RegF,  24, f24->as_VMReg()         );
  213 reg_def F24_H ( SOC, SOE, Op_RegF,  24, f24->as_VMReg()->next() );
  214 reg_def F25   ( SOC, SOE, Op_RegF,  25, f25->as_VMReg()         );
  215 reg_def F25_H ( SOC, SOE, Op_RegF,  25, f25->as_VMReg()->next() );
  216 reg_def F26   ( SOC, SOE, Op_RegF,  26, f26->as_VMReg()         );
  217 reg_def F26_H ( SOC, SOE, Op_RegF,  26, f26->as_VMReg()->next() );
  218 reg_def F27   ( SOC, SOE, Op_RegF,  27, f27->as_VMReg()         );
  219 reg_def F27_H ( SOC, SOE, Op_RegF,  27, f27->as_VMReg()->next() );
  220 reg_def F28   ( SOC, SOC, Op_RegF,  28, f28->as_VMReg()         );
  221 reg_def F28_H ( SOC, SOC, Op_RegF,  28, f28->as_VMReg()->next() );
  222 reg_def F29   ( SOC, SOC, Op_RegF,  29, f29->as_VMReg()         );
  223 reg_def F29_H ( SOC, SOC, Op_RegF,  29, f29->as_VMReg()->next() );
  224 reg_def F30   ( SOC, SOC, Op_RegF,  30, f30->as_VMReg()         );
  225 reg_def F30_H ( SOC, SOC, Op_RegF,  30, f30->as_VMReg()->next() );
  226 reg_def F31   ( SOC, SOC, Op_RegF,  31, f31->as_VMReg()         );
  227 reg_def F31_H ( SOC, SOC, Op_RegF,  31, f31->as_VMReg()->next() );
  228 
  229 // ----------------------------
  230 // Vector Registers
  231 // ----------------------------
  232 
  233 // For RVV vector registers, we simply extend vector register size to 4
  234 // 'logical' slots. This is nominally 128 bits but it actually covers
  235 // all possible 'physical' RVV vector register lengths from 128 ~ 1024
  236 // bits. The 'physical' RVV vector register length is detected during
  237 // startup, so the register allocator is able to identify the correct
  238 // number of bytes needed for an RVV spill/unspill.
  239 
  240 reg_def V0    ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()           );
  241 reg_def V0_H  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next()   );
  242 reg_def V0_J  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next(2)  );
  243 reg_def V0_K  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next(3)  );
  244 
  245 reg_def V1    ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()           );
  246 reg_def V1_H  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next()   );
  247 reg_def V1_J  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next(2)  );
  248 reg_def V1_K  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next(3)  );
  249 
  250 reg_def V2    ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()           );
  251 reg_def V2_H  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next()   );
  252 reg_def V2_J  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next(2)  );
  253 reg_def V2_K  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next(3)  );
  254 
  255 reg_def V3    ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()           );
  256 reg_def V3_H  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next()   );
  257 reg_def V3_J  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next(2)  );
  258 reg_def V3_K  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next(3)  );
  259 
  260 reg_def V4    ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()           );
  261 reg_def V4_H  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next()   );
  262 reg_def V4_J  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next(2)  );
  263 reg_def V4_K  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next(3)  );
  264 
  265 reg_def V5    ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()           );
  266 reg_def V5_H  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next()   );
  267 reg_def V5_J  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next(2)  );
  268 reg_def V5_K  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next(3)  );
  269 
  270 reg_def V6    ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()           );
  271 reg_def V6_H  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next()   );
  272 reg_def V6_J  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next(2)  );
  273 reg_def V6_K  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next(3)  );
  274 
  275 reg_def V7    ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()           );
  276 reg_def V7_H  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next()   );
  277 reg_def V7_J  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next(2)  );
  278 reg_def V7_K  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next(3)  );
  279 
  280 reg_def V8    ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()           );
  281 reg_def V8_H  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next()   );
  282 reg_def V8_J  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next(2)  );
  283 reg_def V8_K  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next(3)  );
  284 
  285 reg_def V9    ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()           );
  286 reg_def V9_H  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next()   );
  287 reg_def V9_J  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next(2)  );
  288 reg_def V9_K  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next(3)  );
  289 
  290 reg_def V10   ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()          );
  291 reg_def V10_H ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next()  );
  292 reg_def V10_J ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(2) );
  293 reg_def V10_K ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(3) );
  294 
  295 reg_def V11   ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()          );
  296 reg_def V11_H ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next()  );
  297 reg_def V11_J ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(2) );
  298 reg_def V11_K ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(3) );
  299 
  300 reg_def V12   ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()          );
  301 reg_def V12_H ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next()  );
  302 reg_def V12_J ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(2) );
  303 reg_def V12_K ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(3) );
  304 
  305 reg_def V13   ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()          );
  306 reg_def V13_H ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next()  );
  307 reg_def V13_J ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(2) );
  308 reg_def V13_K ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(3) );
  309 
  310 reg_def V14   ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()          );
  311 reg_def V14_H ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next()  );
  312 reg_def V14_J ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(2) );
  313 reg_def V14_K ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(3) );
  314 
  315 reg_def V15   ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()          );
  316 reg_def V15_H ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next()  );
  317 reg_def V15_J ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(2) );
  318 reg_def V15_K ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(3) );
  319 
  320 reg_def V16   ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()          );
  321 reg_def V16_H ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next()  );
  322 reg_def V16_J ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(2) );
  323 reg_def V16_K ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(3) );
  324 
  325 reg_def V17   ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()          );
  326 reg_def V17_H ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next()  );
  327 reg_def V17_J ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(2) );
  328 reg_def V17_K ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(3) );
  329 
  330 reg_def V18   ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()          );
  331 reg_def V18_H ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next()  );
  332 reg_def V18_J ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(2) );
  333 reg_def V18_K ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(3) );
  334 
  335 reg_def V19   ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()          );
  336 reg_def V19_H ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next()  );
  337 reg_def V19_J ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(2) );
  338 reg_def V19_K ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(3) );
  339 
  340 reg_def V20   ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()          );
  341 reg_def V20_H ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next()  );
  342 reg_def V20_J ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(2) );
  343 reg_def V20_K ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(3) );
  344 
  345 reg_def V21   ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()          );
  346 reg_def V21_H ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next()  );
  347 reg_def V21_J ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(2) );
  348 reg_def V21_K ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(3) );
  349 
  350 reg_def V22   ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()          );
  351 reg_def V22_H ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next()  );
  352 reg_def V22_J ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(2) );
  353 reg_def V22_K ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(3) );
  354 
  355 reg_def V23   ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()          );
  356 reg_def V23_H ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next()  );
  357 reg_def V23_J ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(2) );
  358 reg_def V23_K ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(3) );
  359 
  360 reg_def V24   ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()          );
  361 reg_def V24_H ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next()  );
  362 reg_def V24_J ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(2) );
  363 reg_def V24_K ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(3) );
  364 
  365 reg_def V25   ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()          );
  366 reg_def V25_H ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next()  );
  367 reg_def V25_J ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(2) );
  368 reg_def V25_K ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(3) );
  369 
  370 reg_def V26   ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()          );
  371 reg_def V26_H ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next()  );
  372 reg_def V26_J ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(2) );
  373 reg_def V26_K ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(3) );
  374 
  375 reg_def V27   ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()          );
  376 reg_def V27_H ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next()  );
  377 reg_def V27_J ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(2) );
  378 reg_def V27_K ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(3) );
  379 
  380 reg_def V28   ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()          );
  381 reg_def V28_H ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next()  );
  382 reg_def V28_J ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(2) );
  383 reg_def V28_K ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(3) );
  384 
  385 reg_def V29   ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()          );
  386 reg_def V29_H ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next()  );
  387 reg_def V29_J ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(2) );
  388 reg_def V29_K ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(3) );
  389 
  390 reg_def V30   ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()          );
  391 reg_def V30_H ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next()  );
  392 reg_def V30_J ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(2) );
  393 reg_def V30_K ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(3) );
  394 
  395 reg_def V31   ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()          );
  396 reg_def V31_H ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next()  );
  397 reg_def V31_J ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(2) );
  398 reg_def V31_K ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(3) );
  399 
  400 // ----------------------------
  401 // Special Registers
  402 // ----------------------------
  403 
  404 // On riscv, the physical flag register is missing, so we use t1 instead,
  405 // to bridge the RegFlag semantics in share/opto
  406 
  407 reg_def RFLAGS   (SOC, SOC, Op_RegFlags, 6, x6->as_VMReg()        );
  408 
  409 // Specify priority of register selection within phases of register
  410 // allocation.  Highest priority is first.  A useful heuristic is to
  411 // give registers a low priority when they are required by machine
  412 // instructions, like EAX and EDX on I486, and choose no-save registers
  413 // before save-on-call, & save-on-call before save-on-entry.  Registers
  414 // which participate in fixed calling sequences should come last.
  415 // Registers which are used as pairs must fall on an even boundary.
  416 
  417 alloc_class chunk0(
  418     // volatiles
  419     R7,  R7_H,
  420     R28, R28_H,
  421     R29, R29_H,
  422     R30, R30_H,
  423     R31, R31_H,
  424 
  425     // arg registers
  426     R10, R10_H,
  427     R11, R11_H,
  428     R12, R12_H,
  429     R13, R13_H,
  430     R14, R14_H,
  431     R15, R15_H,
  432     R16, R16_H,
  433     R17, R17_H,
  434 
  435     // non-volatiles
  436     R9,  R9_H,
  437     R18, R18_H,
  438     R19, R19_H,
  439     R20, R20_H,
  440     R21, R21_H,
  441     R22, R22_H,
  442     R24, R24_H,
  443     R25, R25_H,
  444     R26, R26_H,
  445 
  446     // non-allocatable registers
  447     R23, R23_H, // java thread
  448     R27, R27_H, // heapbase
  449     R4,  R4_H,  // thread
  450     R8,  R8_H,  // fp
  451     R0,  R0_H,  // zero
  452     R1,  R1_H,  // ra
  453     R2,  R2_H,  // sp
  454     R3,  R3_H,  // gp
  455 );
  456 
  457 alloc_class chunk1(
  458 
  459     // no save
  460     F0,  F0_H,
  461     F1,  F1_H,
  462     F2,  F2_H,
  463     F3,  F3_H,
  464     F4,  F4_H,
  465     F5,  F5_H,
  466     F6,  F6_H,
  467     F7,  F7_H,
  468     F28, F28_H,
  469     F29, F29_H,
  470     F30, F30_H,
  471     F31, F31_H,
  472 
  473     // arg registers
  474     F10, F10_H,
  475     F11, F11_H,
  476     F12, F12_H,
  477     F13, F13_H,
  478     F14, F14_H,
  479     F15, F15_H,
  480     F16, F16_H,
  481     F17, F17_H,
  482 
  483     // non-volatiles
  484     F8,  F8_H,
  485     F9,  F9_H,
  486     F18, F18_H,
  487     F19, F19_H,
  488     F20, F20_H,
  489     F21, F21_H,
  490     F22, F22_H,
  491     F23, F23_H,
  492     F24, F24_H,
  493     F25, F25_H,
  494     F26, F26_H,
  495     F27, F27_H,
  496 );
  497 
  498 alloc_class chunk2(
  499     V0, V0_H, V0_J, V0_K,
  500     V1, V1_H, V1_J, V1_K,
  501     V2, V2_H, V2_J, V2_K,
  502     V3, V3_H, V3_J, V3_K,
  503     V4, V4_H, V4_J, V4_K,
  504     V5, V5_H, V5_J, V5_K,
  505     V6, V6_H, V6_J, V6_K,
  506     V7, V7_H, V7_J, V7_K,
  507     V8, V8_H, V8_J, V8_K,
  508     V9, V9_H, V9_J, V9_K,
  509     V10, V10_H, V10_J, V10_K,
  510     V11, V11_H, V11_J, V11_K,
  511     V12, V12_H, V12_J, V12_K,
  512     V13, V13_H, V13_J, V13_K,
  513     V14, V14_H, V14_J, V14_K,
  514     V15, V15_H, V15_J, V15_K,
  515     V16, V16_H, V16_J, V16_K,
  516     V17, V17_H, V17_J, V17_K,
  517     V18, V18_H, V18_J, V18_K,
  518     V19, V19_H, V19_J, V19_K,
  519     V20, V20_H, V20_J, V20_K,
  520     V21, V21_H, V21_J, V21_K,
  521     V22, V22_H, V22_J, V22_K,
  522     V23, V23_H, V23_J, V23_K,
  523     V24, V24_H, V24_J, V24_K,
  524     V25, V25_H, V25_J, V25_K,
  525     V26, V26_H, V26_J, V26_K,
  526     V27, V27_H, V27_J, V27_K,
  527     V28, V28_H, V28_J, V28_K,
  528     V29, V29_H, V29_J, V29_K,
  529     V30, V30_H, V30_J, V30_K,
  530     V31, V31_H, V31_J, V31_K,
  531 );
  532 
  533 alloc_class chunk3(RFLAGS);
  534 
  535 //----------Architecture Description Register Classes--------------------------
  536 // Several register classes are automatically defined based upon information in
  537 // this architecture description.
  538 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  539 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  540 //
  541 
  542 // Class for all 32 bit general purpose registers
  543 reg_class all_reg32(
  544     R0,
  545     R1,
  546     R2,
  547     R3,
  548     R4,
  549     R7,
  550     R8,
  551     R9,
  552     R10,
  553     R11,
  554     R12,
  555     R13,
  556     R14,
  557     R15,
  558     R16,
  559     R17,
  560     R18,
  561     R19,
  562     R20,
  563     R21,
  564     R22,
  565     R23,
  566     R24,
  567     R25,
  568     R26,
  569     R27,
  570     R28,
  571     R29,
  572     R30,
  573     R31
  574 );
  575 
  576 // Class for any 32 bit integer registers (excluding zr)
  577 reg_class any_reg32 %{
  578   return _ANY_REG32_mask;
  579 %}
  580 
  581 // Singleton class for R10 int register
  582 reg_class int_r10_reg(R10);
  583 
  584 // Singleton class for R12 int register
  585 reg_class int_r12_reg(R12);
  586 
  587 // Singleton class for R13 int register
  588 reg_class int_r13_reg(R13);
  589 
  590 // Singleton class for R14 int register
  591 reg_class int_r14_reg(R14);
  592 
  593 // Class for all long integer registers
  594 reg_class all_reg(
  595     R0,  R0_H,
  596     R1,  R1_H,
  597     R2,  R2_H,
  598     R3,  R3_H,
  599     R4,  R4_H,
  600     R7,  R7_H,
  601     R8,  R8_H,
  602     R9,  R9_H,
  603     R10, R10_H,
  604     R11, R11_H,
  605     R12, R12_H,
  606     R13, R13_H,
  607     R14, R14_H,
  608     R15, R15_H,
  609     R16, R16_H,
  610     R17, R17_H,
  611     R18, R18_H,
  612     R19, R19_H,
  613     R20, R20_H,
  614     R21, R21_H,
  615     R22, R22_H,
  616     R23, R23_H,
  617     R24, R24_H,
  618     R25, R25_H,
  619     R26, R26_H,
  620     R27, R27_H,
  621     R28, R28_H,
  622     R29, R29_H,
  623     R30, R30_H,
  624     R31, R31_H
  625 );
  626 
  627 // Class for all long integer registers (excluding zr)
  628 reg_class any_reg %{
  629   return _ANY_REG_mask;
  630 %}
  631 
  632 // Class for non-allocatable 32 bit registers
  633 reg_class non_allocatable_reg32(
  634     R0,                       // zr
  635     R1,                       // ra
  636     R2,                       // sp
  637     R3,                       // gp
  638     R4,                       // tp
  639     R23                       // java thread
  640 );
  641 
  642 // Class for non-allocatable 64 bit registers
  643 reg_class non_allocatable_reg(
  644     R0,  R0_H,                // zr
  645     R1,  R1_H,                // ra
  646     R2,  R2_H,                // sp
  647     R3,  R3_H,                // gp
  648     R4,  R4_H,                // tp
  649     R23, R23_H                // java thread
  650 );
  651 
  652 // Class for all non-special integer registers
  653 reg_class no_special_reg32 %{
  654   return _NO_SPECIAL_REG32_mask;
  655 %}
  656 
  657 // Class for all non-special long integer registers
  658 reg_class no_special_reg %{
  659   return _NO_SPECIAL_REG_mask;
  660 %}
  661 
  662 reg_class ptr_reg %{
  663   return _PTR_REG_mask;
  664 %}
  665 
  666 // Class for all non_special pointer registers
  667 reg_class no_special_ptr_reg %{
  668   return _NO_SPECIAL_PTR_REG_mask;
  669 %}
  670 
  671 // Class for all non_special pointer registers (excluding fp)
  672 reg_class no_special_no_fp_ptr_reg %{
  673   return _NO_SPECIAL_NO_FP_PTR_REG_mask;
  674 %}
  675 
  676 // Class for 64 bit register r10
  677 reg_class r10_reg(
  678     R10, R10_H
  679 );
  680 
  681 // Class for 64 bit register r11
  682 reg_class r11_reg(
  683     R11, R11_H
  684 );
  685 
  686 // Class for 64 bit register r12
  687 reg_class r12_reg(
  688     R12, R12_H
  689 );
  690 
  691 // Class for 64 bit register r13
  692 reg_class r13_reg(
  693     R13, R13_H
  694 );
  695 
  696 // Class for 64 bit register r14
  697 reg_class r14_reg(
  698     R14, R14_H
  699 );
  700 
  701 // Class for 64 bit register r15
  702 reg_class r15_reg(
  703     R15, R15_H
  704 );
  705 
  706 // Class for 64 bit register r16
  707 reg_class r16_reg(
  708     R16, R16_H
  709 );
  710 
  711 // Class for method register
  712 reg_class method_reg(
  713     R31, R31_H
  714 );
  715 
  716 // Class for java thread register
  717 reg_class java_thread_reg(
  718     R23, R23_H
  719 );
  720 
  721 reg_class r28_reg(
  722     R28, R28_H
  723 );
  724 
  725 reg_class r29_reg(
  726     R29, R29_H
  727 );
  728 
  729 reg_class r30_reg(
  730     R30, R30_H
  731 );
  732 
  733 reg_class r31_reg(
  734     R31, R31_H
  735 );
  736 
  737 // Class for zero registesr
  738 reg_class zr_reg(
  739     R0, R0_H
  740 );
  741 
  742 // Class for thread register
  743 reg_class thread_reg(
  744     R4, R4_H
  745 );
  746 
  747 // Class for frame pointer register
  748 reg_class fp_reg(
  749     R8, R8_H
  750 );
  751 
  752 // Class for link register
  753 reg_class ra_reg(
  754     R1, R1_H
  755 );
  756 
  757 // Class for long sp register
  758 reg_class sp_reg(
  759     R2, R2_H
  760 );
  761 
  762 // Class for all float registers
  763 reg_class float_reg(
  764     F0,
  765     F1,
  766     F2,
  767     F3,
  768     F4,
  769     F5,
  770     F6,
  771     F7,
  772     F8,
  773     F9,
  774     F10,
  775     F11,
  776     F12,
  777     F13,
  778     F14,
  779     F15,
  780     F16,
  781     F17,
  782     F18,
  783     F19,
  784     F20,
  785     F21,
  786     F22,
  787     F23,
  788     F24,
  789     F25,
  790     F26,
  791     F27,
  792     F28,
  793     F29,
  794     F30,
  795     F31
  796 );
  797 
  798 // Double precision float registers have virtual `high halves' that
  799 // are needed by the allocator.
  800 // Class for all double registers
  801 reg_class double_reg(
  802     F0,  F0_H,
  803     F1,  F1_H,
  804     F2,  F2_H,
  805     F3,  F3_H,
  806     F4,  F4_H,
  807     F5,  F5_H,
  808     F6,  F6_H,
  809     F7,  F7_H,
  810     F8,  F8_H,
  811     F9,  F9_H,
  812     F10, F10_H,
  813     F11, F11_H,
  814     F12, F12_H,
  815     F13, F13_H,
  816     F14, F14_H,
  817     F15, F15_H,
  818     F16, F16_H,
  819     F17, F17_H,
  820     F18, F18_H,
  821     F19, F19_H,
  822     F20, F20_H,
  823     F21, F21_H,
  824     F22, F22_H,
  825     F23, F23_H,
  826     F24, F24_H,
  827     F25, F25_H,
  828     F26, F26_H,
  829     F27, F27_H,
  830     F28, F28_H,
  831     F29, F29_H,
  832     F30, F30_H,
  833     F31, F31_H
  834 );
  835 
  836 // Class for RVV vector registers
  837 // Note: v0, v30 and v31 are used as mask registers.
  838 reg_class vectora_reg(
  839     V1, V1_H, V1_J, V1_K,
  840     V2, V2_H, V2_J, V2_K,
  841     V3, V3_H, V3_J, V3_K,
  842     V4, V4_H, V4_J, V4_K,
  843     V5, V5_H, V5_J, V5_K,
  844     V6, V6_H, V6_J, V6_K,
  845     V7, V7_H, V7_J, V7_K,
  846     V8, V8_H, V8_J, V8_K,
  847     V9, V9_H, V9_J, V9_K,
  848     V10, V10_H, V10_J, V10_K,
  849     V11, V11_H, V11_J, V11_K,
  850     V12, V12_H, V12_J, V12_K,
  851     V13, V13_H, V13_J, V13_K,
  852     V14, V14_H, V14_J, V14_K,
  853     V15, V15_H, V15_J, V15_K,
  854     V16, V16_H, V16_J, V16_K,
  855     V17, V17_H, V17_J, V17_K,
  856     V18, V18_H, V18_J, V18_K,
  857     V19, V19_H, V19_J, V19_K,
  858     V20, V20_H, V20_J, V20_K,
  859     V21, V21_H, V21_J, V21_K,
  860     V22, V22_H, V22_J, V22_K,
  861     V23, V23_H, V23_J, V23_K,
  862     V24, V24_H, V24_J, V24_K,
  863     V25, V25_H, V25_J, V25_K,
  864     V26, V26_H, V26_J, V26_K,
  865     V27, V27_H, V27_J, V27_K,
  866     V28, V28_H, V28_J, V28_K,
  867     V29, V29_H, V29_J, V29_K
  868 );
  869 
  870 // Class for 64 bit register f0
  871 reg_class f0_reg(
  872     F0, F0_H
  873 );
  874 
  875 // Class for 64 bit register f1
  876 reg_class f1_reg(
  877     F1, F1_H
  878 );
  879 
  880 // Class for 64 bit register f2
  881 reg_class f2_reg(
  882     F2, F2_H
  883 );
  884 
  885 // Class for 64 bit register f3
  886 reg_class f3_reg(
  887     F3, F3_H
  888 );
  889 
  890 // class for vector register v1
  891 reg_class v1_reg(
  892     V1, V1_H, V1_J, V1_K
  893 );
  894 
  895 // class for vector register v2
  896 reg_class v2_reg(
  897     V2, V2_H, V2_J, V2_K
  898 );
  899 
  900 // class for vector register v3
  901 reg_class v3_reg(
  902     V3, V3_H, V3_J, V3_K
  903 );
  904 
  905 // class for vector register v4
  906 reg_class v4_reg(
  907     V4, V4_H, V4_J, V4_K
  908 );
  909 
  910 // class for vector register v5
  911 reg_class v5_reg(
  912     V5, V5_H, V5_J, V5_K
  913 );
  914 
  915 // class for vector register v6
  916 reg_class v6_reg(
  917     V6, V6_H, V6_J, V6_K
  918 );
  919 
  920 // class for vector register v7
  921 reg_class v7_reg(
  922     V7, V7_H, V7_J, V7_K
  923 );
  924 
  925 // class for vector register v8
  926 reg_class v8_reg(
  927     V8, V8_H, V8_J, V8_K
  928 );
  929 
  930 // class for vector register v9
  931 reg_class v9_reg(
  932     V9, V9_H, V9_J, V9_K
  933 );
  934 
  935 // class for vector register v10
  936 reg_class v10_reg(
  937     V10, V10_H, V10_J, V10_K
  938 );
  939 
  940 // class for vector register v11
  941 reg_class v11_reg(
  942     V11, V11_H, V11_J, V11_K
  943 );
  944 
  945 // class for condition codes
  946 reg_class reg_flags(RFLAGS);
  947 
  948 // Class for RVV v0 mask register
  949 // https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc#53-vector-masking
  950 // The mask value used to control execution of a masked vector
  951 // instruction is always supplied by vector register v0.
  952 reg_class vmask_reg_v0 (
  953     V0
  954 );
  955 
  956 // Class for RVV mask registers
  957 // We need two more vmask registers to do the vector mask logical ops,
  958 // so define v30, v31 as mask register too.
  959 reg_class vmask_reg (
  960     V0,
  961     V30,
  962     V31
  963 );
  964 %}
  965 
  966 //----------DEFINITION BLOCK---------------------------------------------------
  967 // Define name --> value mappings to inform the ADLC of an integer valued name
  968 // Current support includes integer values in the range [0, 0x7FFFFFFF]
  969 // Format:
  970 //        int_def  <name>         ( <int_value>, <expression>);
  971 // Generated Code in ad_<arch>.hpp
  972 //        #define  <name>   (<expression>)
  973 //        // value == <int_value>
  974 // Generated code in ad_<arch>.cpp adlc_verification()
  975 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
  976 //
  977 
  978 // we follow the ppc-aix port in using a simple cost model which ranks
  979 // register operations as cheap, memory ops as more expensive and
  980 // branches as most expensive. the first two have a low as well as a
  981 // normal cost. huge cost appears to be a way of saying don't do
  982 // something
  983 
  984 definitions %{
  985   // The default cost (of a register move instruction).
  986   int_def DEFAULT_COST         (  100,               100);
  987   int_def ALU_COST             (  100,  1 * DEFAULT_COST);          // unknown, const, arith, shift, slt,
  988                                                                     // multi, auipc, nop, logical, move
  989   int_def LOAD_COST            (  300,  3 * DEFAULT_COST);          // load, fpload
  990   int_def STORE_COST           (  100,  1 * DEFAULT_COST);          // store, fpstore
  991   int_def XFER_COST            (  300,  3 * DEFAULT_COST);          // mfc, mtc, fcvt, fmove, fcmp
  992   int_def FMVX_COST            (  100,  1 * DEFAULT_COST);          // shuffles with no conversion
  993   int_def BRANCH_COST          (  200,  2 * DEFAULT_COST);          // branch, jmp, call
  994   int_def IMUL_COST            ( 1000, 10 * DEFAULT_COST);          // imul
  995   int_def IDIVSI_COST          ( 3400, 34 * DEFAULT_COST);          // idivsi
  996   int_def IDIVDI_COST          ( 6600, 66 * DEFAULT_COST);          // idivdi
  997   int_def FMUL_SINGLE_COST     (  500,  5 * DEFAULT_COST);          // fmul, fmadd
  998   int_def FMUL_DOUBLE_COST     (  700,  7 * DEFAULT_COST);          // fmul, fmadd
  999   int_def FDIV_COST            ( 2000, 20 * DEFAULT_COST);          // fdiv
 1000   int_def FSQRT_COST           ( 2500, 25 * DEFAULT_COST);          // fsqrt
 1001   int_def VOLATILE_REF_COST    ( 1000, 10 * DEFAULT_COST);
 1002   int_def CACHE_MISS_COST      ( 2000, 20 * DEFAULT_COST);          // typicall cache miss penalty
 1003 %}
 1004 
 1005 
 1006 
 1007 //----------SOURCE BLOCK-------------------------------------------------------
 1008 // This is a block of C++ code which provides values, functions, and
 1009 // definitions necessary in the rest of the architecture description
 1010 
 1011 source_hpp %{
 1012 
 1013 #include "asm/macroAssembler.hpp"
 1014 #include "gc/shared/barrierSetAssembler.hpp"
 1015 #include "gc/shared/cardTable.hpp"
 1016 #include "gc/shared/cardTableBarrierSet.hpp"
 1017 #include "gc/shared/collectedHeap.hpp"
 1018 #include "opto/addnode.hpp"
 1019 #include "opto/convertnode.hpp"
 1020 #include "runtime/objectMonitor.hpp"
 1021 
 1022 extern RegMask _ANY_REG32_mask;
 1023 extern RegMask _ANY_REG_mask;
 1024 extern RegMask _PTR_REG_mask;
 1025 extern RegMask _NO_SPECIAL_REG32_mask;
 1026 extern RegMask _NO_SPECIAL_REG_mask;
 1027 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1028 extern RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
 1029 
 1030 class CallStubImpl {
 1031 
 1032   //--------------------------------------------------------------
 1033   //---<  Used for optimization in Compile::shorten_branches  >---
 1034   //--------------------------------------------------------------
 1035 
 1036  public:
 1037   // Size of call trampoline stub.
 1038   static uint size_call_trampoline() {
 1039     return 0; // no call trampolines on this platform
 1040   }
 1041 
 1042   // number of relocations needed by a call trampoline stub
 1043   static uint reloc_call_trampoline() {
 1044     return 0; // no call trampolines on this platform
 1045   }
 1046 };
 1047 
 1048 class HandlerImpl {
 1049 
 1050  public:
 1051 
 1052   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1053 
 1054   static uint size_deopt_handler() {
 1055     // count far call + j
 1056     return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
 1057   }
 1058 };
 1059 
 1060 class Node::PD {
 1061 public:
 1062   enum NodeFlags {
 1063     _last_flag = Node::_last_flag
 1064   };
 1065 };
 1066 
 1067 bool is_CAS(int opcode, bool maybe_volatile);
 1068 
 1069 // predicate controlling translation of CompareAndSwapX
 1070 bool needs_acquiring_load_reserved(const Node *load);
 1071 
 1072 // predicate controlling addressing modes
 1073 bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1074 %}
 1075 
 1076 source %{
 1077 
 1078 // Derived RegMask with conditionally allocatable registers
 1079 
 1080 RegMask _ANY_REG32_mask;
 1081 RegMask _ANY_REG_mask;
 1082 RegMask _PTR_REG_mask;
 1083 RegMask _NO_SPECIAL_REG32_mask;
 1084 RegMask _NO_SPECIAL_REG_mask;
 1085 RegMask _NO_SPECIAL_PTR_REG_mask;
 1086 RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
 1087 
 1088 void reg_mask_init() {
 1089 
 1090   _ANY_REG32_mask.assignFrom(_ALL_REG32_mask);
 1091   _ANY_REG32_mask.remove(OptoReg::as_OptoReg(x0->as_VMReg()));
 1092 
 1093   _ANY_REG_mask.assignFrom(_ALL_REG_mask);
 1094   _ANY_REG_mask.subtract(_ZR_REG_mask);
 1095 
 1096   _PTR_REG_mask.assignFrom(_ALL_REG_mask);
 1097   _PTR_REG_mask.subtract(_ZR_REG_mask);
 1098 
 1099   _NO_SPECIAL_REG32_mask.assignFrom(_ALL_REG32_mask);
 1100   _NO_SPECIAL_REG32_mask.subtract(_NON_ALLOCATABLE_REG32_mask);
 1101 
 1102   _NO_SPECIAL_REG_mask.assignFrom(_ALL_REG_mask);
 1103   _NO_SPECIAL_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
 1104 
 1105   _NO_SPECIAL_PTR_REG_mask.assignFrom(_ALL_REG_mask);
 1106   _NO_SPECIAL_PTR_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
 1107 
 1108   // x27 is not allocatable when compressed oops is on
 1109   if (UseCompressedOops) {
 1110     _NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1111     _NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1112     _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1113   }
 1114 
 1115   // x8 is not allocatable when PreserveFramePointer is on
 1116   if (PreserveFramePointer) {
 1117     _NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1118     _NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1119     _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1120   }
 1121 
 1122   _NO_SPECIAL_NO_FP_PTR_REG_mask.assignFrom(_NO_SPECIAL_PTR_REG_mask);
 1123   _NO_SPECIAL_NO_FP_PTR_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1124 }
 1125 
 1126 void PhaseOutput::pd_perform_mach_node_analysis() {
 1127 }
 1128 
 1129 int MachNode::pd_alignment_required() const {
 1130   return 1;
 1131 }
 1132 
 1133 int MachNode::compute_padding(int current_offset) const {
 1134   return 0;
 1135 }
 1136 
 1137 // is_CAS(int opcode, bool maybe_volatile)
 1138 //
 1139 // return true if opcode is one of the possible CompareAndSwapX
 1140 // values otherwise false.
 1141 bool is_CAS(int opcode, bool maybe_volatile)
 1142 {
 1143   switch (opcode) {
 1144     // We handle these
 1145     case Op_CompareAndSwapI:
 1146     case Op_CompareAndSwapL:
 1147     case Op_CompareAndSwapP:
 1148     case Op_CompareAndSwapN:
 1149     case Op_CompareAndSwapB:
 1150     case Op_CompareAndSwapS:
 1151     case Op_GetAndSetI:
 1152     case Op_GetAndSetL:
 1153     case Op_GetAndSetP:
 1154     case Op_GetAndSetN:
 1155     case Op_GetAndAddI:
 1156     case Op_GetAndAddL:
 1157       return true;
 1158     case Op_CompareAndExchangeI:
 1159     case Op_CompareAndExchangeN:
 1160     case Op_CompareAndExchangeB:
 1161     case Op_CompareAndExchangeS:
 1162     case Op_CompareAndExchangeL:
 1163     case Op_CompareAndExchangeP:
 1164     case Op_WeakCompareAndSwapB:
 1165     case Op_WeakCompareAndSwapS:
 1166     case Op_WeakCompareAndSwapI:
 1167     case Op_WeakCompareAndSwapL:
 1168     case Op_WeakCompareAndSwapP:
 1169     case Op_WeakCompareAndSwapN:
 1170       return maybe_volatile;
 1171     default:
 1172       return false;
 1173   }
 1174 }
 1175 
 1176 constexpr uint64_t MAJIK_DWORD = 0xabbaabbaabbaabbaull;
 1177 
 1178 // predicate controlling translation of CAS
 1179 //
 1180 // returns true if CAS needs to use an acquiring load otherwise false
 1181 bool needs_acquiring_load_reserved(const Node *n)
 1182 {
 1183   assert(n != nullptr && is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1184 
 1185   LoadStoreNode* ldst = n->as_LoadStore();
 1186   if (n != nullptr && is_CAS(n->Opcode(), false)) {
 1187     assert(ldst != nullptr && ldst->trailing_membar() != nullptr, "expected trailing membar");
 1188   } else {
 1189     return ldst != nullptr && ldst->trailing_membar() != nullptr;
 1190   }
 1191   // so we can just return true here
 1192   return true;
 1193 }
 1194 #define __ masm->
 1195 
 1196 // advance declarations for helper functions to convert register
 1197 // indices to register objects
 1198 
 1199 // the ad file has to provide implementations of certain methods
 1200 // expected by the generic code
 1201 //
 1202 // REQUIRED FUNCTIONALITY
 1203 
 1204 //=============================================================================
 1205 
 1206 // !!!!! Special hack to get all types of calls to specify the byte offset
 1207 //       from the start of the call to the point where the return address
 1208 //       will point.
 1209 
 1210 int MachCallStaticJavaNode::ret_addr_offset()
 1211 {
 1212   return 3 * NativeInstruction::instruction_size; // auipc + ld + jalr
 1213 }
 1214 
 1215 int MachCallDynamicJavaNode::ret_addr_offset()
 1216 {
 1217   return NativeMovConstReg::movptr2_instruction_size + (3 * NativeInstruction::instruction_size); // movptr2, auipc + ld + jal
 1218 }
 1219 
 1220 int MachCallRuntimeNode::ret_addr_offset() {
 1221   // For address inside the code cache the call will be:
 1222   //   auipc + jalr
 1223   // For real runtime callouts it will be 8 instructions
 1224   // see riscv_enc_java_to_runtime
 1225   //   la(t0, retaddr)                                             ->  auipc + addi
 1226   //   sd(t0, Address(xthread, JavaThread::last_Java_pc_offset())) ->  sd
 1227   //   movptr(t1, addr, offset, t0)                                ->  lui + lui + slli + add
 1228   //   jalr(t1, offset)                                            ->  jalr
 1229   if (CodeCache::contains(_entry_point)) {
 1230     return 2 * NativeInstruction::instruction_size;
 1231   } else {
 1232     return 8 * NativeInstruction::instruction_size;
 1233   }
 1234 }
 1235 
 1236 //
 1237 // Compute padding required for nodes which need alignment
 1238 //
 1239 
 1240 // With RVC a call instruction may get 2-byte aligned.
 1241 // The address of the call instruction needs to be 4-byte aligned to
 1242 // ensure that it does not span a cache line so that it can be patched.
 1243 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
 1244 {
 1245   // to make sure the address of jal 4-byte aligned.
 1246   return align_up(current_offset, alignment_required()) - current_offset;
 1247 }
 1248 
 1249 // With RVC a call instruction may get 2-byte aligned.
 1250 // The address of the call instruction needs to be 4-byte aligned to
 1251 // ensure that it does not span a cache line so that it can be patched.
 1252 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
 1253 {
 1254   // skip the movptr2 in MacroAssembler::ic_call():
 1255   // lui, lui, slli, add, addi
 1256   // Though movptr2() has already 4-byte aligned with or without RVC,
 1257   // We need to prevent from further changes by explicitly calculating the size.
 1258   current_offset += NativeMovConstReg::movptr2_instruction_size;
 1259   // to make sure the address of jal 4-byte aligned.
 1260   return align_up(current_offset, alignment_required()) - current_offset;
 1261 }
 1262 
 1263 int CallRuntimeDirectNode::compute_padding(int current_offset) const
 1264 {
 1265   return align_up(current_offset, alignment_required()) - current_offset;
 1266 }
 1267 
 1268 int CallLeafDirectNode::compute_padding(int current_offset) const
 1269 {
 1270   return align_up(current_offset, alignment_required()) - current_offset;
 1271 }
 1272 
 1273 int CallLeafDirectVectorNode::compute_padding(int current_offset) const
 1274 {
 1275   return align_up(current_offset, alignment_required()) - current_offset;
 1276 }
 1277 
 1278 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const
 1279 {
 1280   return align_up(current_offset, alignment_required()) - current_offset;
 1281 }
 1282 
 1283 //=============================================================================
 1284 
 1285 #ifndef PRODUCT
 1286 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1287   assert_cond(st != nullptr);
 1288   st->print("BREAKPOINT");
 1289 }
 1290 #endif
 1291 
 1292 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1293   __ ebreak();
 1294 }
 1295 
 1296 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1297   return MachNode::size(ra_);
 1298 }
 1299 
 1300 //=============================================================================
 1301 
 1302 #ifndef PRODUCT
 1303   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1304     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1305   }
 1306 #endif
 1307 
 1308   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1309     Assembler::CompressibleScope scope(masm); // nops shall be 2-byte under RVC for alignment purposes.
 1310     for (int i = 0; i < _count; i++) {
 1311       __ nop();
 1312     }
 1313   }
 1314 
 1315   uint MachNopNode::size(PhaseRegAlloc*) const {
 1316     return _count * (UseRVC ? NativeInstruction::compressed_instruction_size : NativeInstruction::instruction_size);
 1317   }
 1318 
 1319 //=============================================================================
 1320 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::EMPTY;
 1321 
 1322 int ConstantTable::calculate_table_base_offset() const {
 1323   return 0;  // absolute addressing, no offset
 1324 }
 1325 
 1326 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1327 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1328   ShouldNotReachHere();
 1329 }
 1330 
 1331 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1332   // Empty encoding
 1333 }
 1334 
 1335 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1336   return 0;
 1337 }
 1338 
 1339 #ifndef PRODUCT
 1340 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1341   assert_cond(st != nullptr);
 1342   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1343 }
 1344 #endif
 1345 
 1346 #ifndef PRODUCT
 1347 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1348   assert_cond(st != nullptr && ra_ != nullptr);
 1349   Compile* C = ra_->C;
 1350 
 1351   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1352 
 1353   if (C->output()->need_stack_bang(framesize)) {
 1354     st->print("# stack bang size=%d\n\t", framesize);
 1355   }
 1356 
 1357   st->print("sub sp, sp, #%d\n\t", framesize);
 1358   st->print("sd  fp, [sp, #%d]\n\t", framesize - 2 * wordSize);
 1359   st->print("sd  ra, [sp, #%d]\n\t", framesize - wordSize);
 1360   if (PreserveFramePointer) { st->print("add fp, sp, #%d\n\t", framesize); }
 1361 
 1362   if (VerifyStackAtCalls) {
 1363     st->print("mv  t2, %ld\n\t", MAJIK_DWORD);
 1364     st->print("sd  t2, [sp, #%d]\n\t", framesize - 3 * wordSize);
 1365   }
 1366 
 1367   if (C->stub_function() == nullptr) {
 1368     st->print("ld  t0, [guard]\n\t");
 1369     st->print("membar LoadLoad\n\t");
 1370     st->print("ld  t1, [xthread, #thread_disarmed_guard_value_offset]\n\t");
 1371     st->print("beq t0, t1, skip\n\t");
 1372     st->print("jalr #nmethod_entry_barrier_stub\n\t");
 1373     st->print("j skip\n\t");
 1374     st->print("guard: int\n\t");
 1375     st->print("skip:\n\t");
 1376   }
 1377 }
 1378 #endif
 1379 
 1380 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1381   assert_cond(ra_ != nullptr);
 1382   Compile* C = ra_->C;
 1383 
 1384   // n.b. frame size includes space for return pc and fp
 1385   const int framesize = C->output()->frame_size_in_bytes();
 1386 
 1387   assert_cond(C != nullptr);
 1388 
 1389   if (C->clinit_barrier_on_entry()) {
 1390     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1391 
 1392     Label L_skip_barrier;
 1393 
 1394     __ mov_metadata(t1, C->method()->holder()->constant_encoding());
 1395     __ clinit_barrier(t1, t0, &L_skip_barrier);
 1396     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1397     __ bind(L_skip_barrier);
 1398   }
 1399 
 1400   int bangsize = C->output()->bang_size_in_bytes();
 1401   if (C->output()->need_stack_bang(bangsize)) {
 1402     __ generate_stack_overflow_check(bangsize);
 1403   }
 1404 
 1405   __ build_frame(framesize);
 1406 
 1407   if (VerifyStackAtCalls) {
 1408     __ mv(t2, MAJIK_DWORD);
 1409     __ sd(t2, Address(sp, framesize - 3 * wordSize));
 1410   }
 1411 
 1412   if (C->stub_function() == nullptr) {
 1413     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1414     // Dummy labels for just measuring the code size
 1415     Label dummy_slow_path;
 1416     Label dummy_continuation;
 1417     Label dummy_guard;
 1418     Label* slow_path = &dummy_slow_path;
 1419     Label* continuation = &dummy_continuation;
 1420     Label* guard = &dummy_guard;
 1421     if (!Compile::current()->output()->in_scratch_emit_size()) {
 1422       // Use real labels from actual stub when not emitting code for purpose of measuring its size
 1423       C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1424       Compile::current()->output()->add_stub(stub);
 1425       slow_path = &stub->entry();
 1426       continuation = &stub->continuation();
 1427       guard = &stub->guard();
 1428     }
 1429     // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1430     bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1431   }
 1432 
 1433   C->output()->set_frame_complete(__ offset());
 1434 
 1435   if (C->has_mach_constant_base_node()) {
 1436     // NOTE: We set the table base offset here because users might be
 1437     // emitted before MachConstantBaseNode.
 1438     ConstantTable& constant_table = C->output()->constant_table();
 1439     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1440   }
 1441 }
 1442 
 1443 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1444 {
 1445   assert_cond(ra_ != nullptr);
 1446   return MachNode::size(ra_); // too many variables; just compute it
 1447                               // the hard way
 1448 }
 1449 
 1450 int MachPrologNode::reloc() const
 1451 {
 1452   return 0;
 1453 }
 1454 
 1455 //=============================================================================
 1456 
 1457 #ifndef PRODUCT
 1458 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1459   assert_cond(st != nullptr && ra_ != nullptr);
 1460   Compile* C = ra_->C;
 1461   assert_cond(C != nullptr);
 1462   int framesize = C->output()->frame_size_in_bytes();
 1463 
 1464   st->print("# pop frame %d\n\t", framesize);
 1465 
 1466   if (framesize == 0) {
 1467     st->print("ld  ra, [sp,#%d]\n\t", (2 * wordSize));
 1468     st->print("ld  fp, [sp,#%d]\n\t", (3 * wordSize));
 1469     st->print("add sp, sp, #%d\n\t", (2 * wordSize));
 1470   } else {
 1471     st->print("add  sp, sp, #%d\n\t", framesize);
 1472     st->print("ld  ra, [sp,#%d]\n\t", - 2 * wordSize);
 1473     st->print("ld  fp, [sp,#%d]\n\t", - wordSize);
 1474   }
 1475 
 1476   if (do_polling() && C->is_method_compilation()) {
 1477     st->print("# test polling word\n\t");
 1478     st->print("ld t0, [xthread,#%d]\n\t", in_bytes(JavaThread::polling_word_offset()));
 1479     st->print("bgtu sp, t0, #slow_path");
 1480   }
 1481 }
 1482 #endif
 1483 
 1484 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1485   assert_cond(ra_ != nullptr);
 1486   Compile* C = ra_->C;
 1487   assert_cond(C != nullptr);
 1488   int framesize = C->output()->frame_size_in_bytes();
 1489 
 1490   __ remove_frame(framesize);
 1491 
 1492   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1493     __ reserved_stack_check();
 1494   }
 1495 
 1496   if (do_polling() && C->is_method_compilation()) {
 1497     Label dummy_label;
 1498     Label* code_stub = &dummy_label;
 1499     if (!C->output()->in_scratch_emit_size()) {
 1500       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1501       C->output()->add_stub(stub);
 1502       code_stub = &stub->entry();
 1503     }
 1504     __ relocate(relocInfo::poll_return_type);
 1505     __ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
 1506   }
 1507 }
 1508 
 1509 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1510   assert_cond(ra_ != nullptr);
 1511   // Variable size. Determine dynamically.
 1512   return MachNode::size(ra_);
 1513 }
 1514 
 1515 int MachEpilogNode::reloc() const {
 1516   // Return number of relocatable values contained in this instruction.
 1517   return 1; // 1 for polling page.
 1518 }
 1519 const Pipeline * MachEpilogNode::pipeline() const {
 1520   return MachNode::pipeline_class();
 1521 }
 1522 
 1523 //=============================================================================
 1524 
 1525 // Figure out which register class each belongs in: rc_int, rc_float or
 1526 // rc_stack.
 1527 enum RC { rc_bad, rc_int, rc_float, rc_vector, rc_stack };
 1528 
 1529 static enum RC rc_class(OptoReg::Name reg) {
 1530 
 1531   if (reg == OptoReg::Bad) {
 1532     return rc_bad;
 1533   }
 1534 
 1535   // we have 30 int registers * 2 halves
 1536   // (t0 and t1 are omitted)
 1537   int slots_of_int_registers = Register::max_slots_per_register * (Register::number_of_registers - 2);
 1538   if (reg < slots_of_int_registers) {
 1539     return rc_int;
 1540   }
 1541 
 1542   // we have 32 float register * 2 halves
 1543   int slots_of_float_registers = FloatRegister::max_slots_per_register * FloatRegister::number_of_registers;
 1544   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1545     return rc_float;
 1546   }
 1547 
 1548   // we have 32 vector register * 4 halves
 1549   int slots_of_vector_registers = VectorRegister::max_slots_per_register * VectorRegister::number_of_registers;
 1550   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_vector_registers) {
 1551     return rc_vector;
 1552   }
 1553 
 1554   // Between vector regs & stack is the flags regs.
 1555   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1556 
 1557   return rc_stack;
 1558 }
 1559 
 1560 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1561   assert_cond(ra_ != nullptr);
 1562   Compile* C = ra_->C;
 1563 
 1564   // Get registers to move.
 1565   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1566   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1567   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1568   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1569 
 1570   enum RC src_hi_rc = rc_class(src_hi);
 1571   enum RC src_lo_rc = rc_class(src_lo);
 1572   enum RC dst_hi_rc = rc_class(dst_hi);
 1573   enum RC dst_lo_rc = rc_class(dst_lo);
 1574 
 1575   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1576 
 1577   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1578     assert((src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1579            (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi,
 1580            "expected aligned-adjacent pairs");
 1581   }
 1582 
 1583   if (src_lo == dst_lo && src_hi == dst_hi) {
 1584     return 0;            // Self copy, no move.
 1585   }
 1586 
 1587   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1588               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1589   int src_offset = ra_->reg2offset(src_lo);
 1590   int dst_offset = ra_->reg2offset(dst_lo);
 1591 
 1592   if (bottom_type()->isa_vect() != nullptr) {
 1593     uint ireg = ideal_reg();
 1594     if (ireg == Op_VecA && masm) {
 1595       int vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1596       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1597         // stack to stack
 1598         __ spill_copy_vector_stack_to_stack(src_offset, dst_offset,
 1599                                             vector_reg_size_in_bytes);
 1600       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
 1601         // vpr to stack
 1602         __ spill(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
 1603       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
 1604         // stack to vpr
 1605         __ unspill(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
 1606       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
 1607         // vpr to vpr
 1608         __ vsetvli_helper(T_BYTE, MaxVectorSize);
 1609         __ vmv_v_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
 1610       } else {
 1611         ShouldNotReachHere();
 1612       }
 1613     } else if (bottom_type()->isa_vectmask() && masm) {
 1614       int vmask_size_in_bytes = Matcher::scalable_predicate_reg_slots() * 32 / 8;
 1615       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1616         // stack to stack
 1617         __ spill_copy_vmask_stack_to_stack(src_offset, dst_offset,
 1618                                            vmask_size_in_bytes);
 1619       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
 1620         // vmask to stack
 1621         __ spill_vmask(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
 1622       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
 1623         // stack to vmask
 1624         __ unspill_vmask(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
 1625       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
 1626         // vmask to vmask
 1627         __ vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
 1628         __ vmv_v_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
 1629       } else {
 1630         ShouldNotReachHere();
 1631       }
 1632     }
 1633   } else if (masm != nullptr) {
 1634     switch (src_lo_rc) {
 1635       case rc_int:
 1636         if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 1637           if (!is64 && this->ideal_reg() != Op_RegI) { // zero extended for narrow oop or klass
 1638             __ zext(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]), 32);
 1639           } else {
 1640             __ mv(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]));
 1641           }
 1642         } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 1643           if (is64) {
 1644             __ fmv_d_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1645                        as_Register(Matcher::_regEncode[src_lo]));
 1646           } else {
 1647             __ fmv_w_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1648                        as_Register(Matcher::_regEncode[src_lo]));
 1649           }
 1650         } else {                    // gpr --> stack spill
 1651           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1652           __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 1653         }
 1654         break;
 1655       case rc_float:
 1656         if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 1657           if (is64) {
 1658             __ fmv_x_d(as_Register(Matcher::_regEncode[dst_lo]),
 1659                        as_FloatRegister(Matcher::_regEncode[src_lo]));
 1660           } else {
 1661             __ fmv_x_w(as_Register(Matcher::_regEncode[dst_lo]),
 1662                        as_FloatRegister(Matcher::_regEncode[src_lo]));
 1663           }
 1664         } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 1665           if (is64) {
 1666             __ fmv_d(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1667                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1668           } else {
 1669             __ fmv_s(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1670                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1671           }
 1672         } else {                    // fpr --> stack spill
 1673           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1674           __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 1675                    is64, dst_offset);
 1676         }
 1677         break;
 1678       case rc_stack:
 1679         if (dst_lo_rc == rc_int) {  // stack --> gpr load
 1680           if (this->ideal_reg() == Op_RegI) {
 1681             __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 1682           } else { // // zero extended for narrow oop or klass
 1683             __ unspillu(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 1684           }
 1685         } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 1686           __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1687                      is64, src_offset);
 1688         } else {                    // stack --> stack copy
 1689           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1690           if (this->ideal_reg() == Op_RegI) {
 1691             __ unspill(t0, is64, src_offset);
 1692           } else { // zero extended for narrow oop or klass
 1693             __ unspillu(t0, is64, src_offset);
 1694           }
 1695           __ spill(t0, is64, dst_offset);
 1696         }
 1697         break;
 1698       default:
 1699         ShouldNotReachHere();
 1700     }
 1701   }
 1702 
 1703   if (st != nullptr) {
 1704     st->print("spill ");
 1705     if (src_lo_rc == rc_stack) {
 1706       st->print("[sp, #%d] -> ", src_offset);
 1707     } else {
 1708       st->print("%s -> ", Matcher::regName[src_lo]);
 1709     }
 1710     if (dst_lo_rc == rc_stack) {
 1711       st->print("[sp, #%d]", dst_offset);
 1712     } else {
 1713       st->print("%s", Matcher::regName[dst_lo]);
 1714     }
 1715     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1716       int vsize = 0;
 1717       if (ideal_reg() == Op_VecA) {
 1718         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 1719       } else {
 1720         ShouldNotReachHere();
 1721       }
 1722       st->print("\t# vector spill size = %d", vsize);
 1723     } else if (ideal_reg() == Op_RegVectMask) {
 1724       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 1725       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 1726       st->print("\t# vmask spill size = %d", vsize);
 1727     } else {
 1728       st->print("\t# spill size = %d", is64 ? 64 : 32);
 1729     }
 1730   }
 1731 
 1732   return 0;
 1733 }
 1734 
 1735 #ifndef PRODUCT
 1736 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1737   if (ra_ == nullptr) {
 1738     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 1739   } else {
 1740     implementation(nullptr, ra_, false, st);
 1741   }
 1742 }
 1743 #endif
 1744 
 1745 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1746   implementation(masm, ra_, false, nullptr);
 1747 }
 1748 
 1749 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 1750   return MachNode::size(ra_);
 1751 }
 1752 
 1753 //=============================================================================
 1754 
 1755 #ifndef PRODUCT
 1756 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1757   assert_cond(ra_ != nullptr && st != nullptr);
 1758   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1759   int reg = ra_->get_reg_first(this);
 1760   st->print("add %s, sp, #%d\t# box lock",
 1761             Matcher::regName[reg], offset);
 1762 }
 1763 #endif
 1764 
 1765 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1766   Assembler::IncompressibleScope scope(masm); // Fixed length: see BoxLockNode::size()
 1767 
 1768   assert_cond(ra_ != nullptr);
 1769   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1770   int reg    = ra_->get_encode(this);
 1771 
 1772   if (Assembler::is_simm12(offset)) {
 1773     __ addi(as_Register(reg), sp, offset);
 1774   } else {
 1775     __ li32(t0, offset);
 1776     __ add(as_Register(reg), sp, t0);
 1777   }
 1778 }
 1779 
 1780 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 1781   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 1782   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1783 
 1784   if (Assembler::is_simm12(offset)) {
 1785     return NativeInstruction::instruction_size;
 1786   } else {
 1787     return 3 * NativeInstruction::instruction_size; // lui + addiw + add;
 1788   }
 1789 }
 1790 
 1791 //=============================================================================
 1792 
 1793 #ifndef PRODUCT
 1794 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 1795 {
 1796   assert_cond(st != nullptr);
 1797   st->print_cr("# MachUEPNode");
 1798   st->print_cr("\tlwu t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 1799   st->print_cr("\tlwu t2, [t0      + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 1800   st->print_cr("\tbeq t1, t2, ic_hit");
 1801   st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
 1802   st->print_cr("\tic_hit:");
 1803 }
 1804 #endif
 1805 
 1806 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 1807 {
 1808   // This is the unverified entry point.
 1809   __ ic_check(CodeEntryAlignment);
 1810 
 1811   // ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4).
 1812   assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point");
 1813 }
 1814 
 1815 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 1816 {
 1817   assert_cond(ra_ != nullptr);
 1818   return MachNode::size(ra_);
 1819 }
 1820 
 1821 // REQUIRED EMIT CODE
 1822 
 1823 //=============================================================================
 1824 
 1825 // Emit deopt handler code.
 1826 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 1827 {
 1828   address base = __ start_a_stub(size_deopt_handler());
 1829   if (base == nullptr) {
 1830     ciEnv::current()->record_failure("CodeCache is full");
 1831     return 0;  // CodeBuffer::expand failed
 1832   }
 1833   int offset = __ offset();
 1834 
 1835   Label start;
 1836   __ bind(start);
 1837 
 1838   __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 1839 
 1840   int entry_offset = __ offset();
 1841   __ j(start);
 1842 
 1843   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
 1844   assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
 1845          "out of bounds read in post-call NOP check");
 1846   __ end_a_stub();
 1847   return entry_offset;
 1848 
 1849 }
 1850 // REQUIRED MATCHER CODE
 1851 
 1852 //=============================================================================
 1853 
 1854 bool Matcher::match_rule_supported(int opcode) {
 1855   if (!has_match_rule(opcode)) {
 1856     return false;
 1857   }
 1858 
 1859   switch (opcode) {
 1860     case Op_OnSpinWait:
 1861       return VM_Version::supports_on_spin_wait();
 1862     case Op_CacheWB:           // fall through
 1863     case Op_CacheWBPreSync:    // fall through
 1864     case Op_CacheWBPostSync:
 1865       if (!VM_Version::supports_data_cache_line_flush()) {
 1866         return false;
 1867       }
 1868       break;
 1869 
 1870     case Op_StrCompressedCopy: // fall through
 1871     case Op_StrInflatedCopy:   // fall through
 1872     case Op_CountPositives:    // fall through
 1873     case Op_EncodeISOArray:
 1874       return UseRVV;
 1875 
 1876     case Op_PopCountI:
 1877     case Op_PopCountL:
 1878       return UsePopCountInstruction;
 1879 
 1880     case Op_ReverseI:
 1881     case Op_ReverseL:
 1882       return UseZbkb;
 1883 
 1884     case Op_ReverseBytesI:
 1885     case Op_ReverseBytesL:
 1886     case Op_ReverseBytesS:
 1887     case Op_ReverseBytesUS:
 1888     case Op_RotateRight:
 1889     case Op_RotateLeft:
 1890     case Op_CountLeadingZerosI:
 1891     case Op_CountLeadingZerosL:
 1892     case Op_CountTrailingZerosI:
 1893     case Op_CountTrailingZerosL:
 1894       return UseZbb;
 1895 
 1896     case Op_FmaF:
 1897     case Op_FmaD:
 1898       return UseFMA;
 1899 
 1900     case Op_ConvHF2F:
 1901     case Op_ConvF2HF:
 1902       return VM_Version::supports_float16_float_conversion();
 1903     case Op_ReinterpretS2HF:
 1904     case Op_ReinterpretHF2S:
 1905       return UseZfh || UseZfhmin;
 1906     case Op_AddHF:
 1907     case Op_DivHF:
 1908     case Op_FmaHF:
 1909     case Op_MaxHF:
 1910     case Op_MinHF:
 1911     case Op_MulHF:
 1912     case Op_SqrtHF:
 1913     case Op_SubHF:
 1914       return UseZfh;
 1915 
 1916     case Op_CMoveP:
 1917     case Op_CMoveN:
 1918       return false;
 1919   }
 1920 
 1921   return true; // Per default match rules are supported.
 1922 }
 1923 
 1924 const RegMask* Matcher::predicate_reg_mask(void) {
 1925   return &_VMASK_REG_mask;
 1926 }
 1927 
 1928 // Vector calling convention not yet implemented.
 1929 bool Matcher::supports_vector_calling_convention(void) {
 1930   return EnableVectorSupport;
 1931 }
 1932 
 1933 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 1934   assert(EnableVectorSupport, "sanity");
 1935   assert(ideal_reg == Op_VecA, "sanity");
 1936   // check more info at https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc
 1937   int lo = V8_num;
 1938   int hi = V8_K_num;
 1939   return OptoRegPair(hi, lo);
 1940 }
 1941 
 1942 // Is this branch offset short enough that a short branch can be used?
 1943 //
 1944 // NOTE: If the platform does not provide any short branch variants, then
 1945 //       this method should return false for offset 0.
 1946 // |---label(L1)-----|
 1947 // |-----------------|
 1948 // |-----------------|----------eq: float-------------------
 1949 // |-----------------| // far_cmpD_branch   |   cmpD_branch
 1950 // |------- ---------|    feq;              |      feq;
 1951 // |-far_cmpD_branch-|    beqz done;        |      bnez L;
 1952 // |-----------------|    j L;              |
 1953 // |-----------------|    bind(done);       |
 1954 // |-----------------|--------------------------------------
 1955 // |-----------------| // so shortBrSize = br_size - 4;
 1956 // |-----------------| // so offs = offset - shortBrSize + 4;
 1957 // |---label(L2)-----|
 1958 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 1959   // The passed offset is relative to address of the branch.
 1960   int shortBrSize = br_size - 4;
 1961   int offs = offset - shortBrSize + 4;
 1962   return (-4096 <= offs && offs < 4096);
 1963 }
 1964 
 1965 // Vector width in bytes.
 1966 int Matcher::vector_width_in_bytes(BasicType bt) {
 1967   if (UseRVV) {
 1968     // The MaxVectorSize should have been set by detecting RVV max vector register size when check UseRVV.
 1969     // MaxVectorSize == VM_Version::_initial_vector_length
 1970     int size = MaxVectorSize;
 1971     // Minimum 2 values in vector
 1972     if (size < 2 * type2aelembytes(bt)) size = 0;
 1973     // But never < 4
 1974     if (size < 4) size = 0;
 1975     return size;
 1976   }
 1977   return 0;
 1978 }
 1979 
 1980 // Limits on vector size (number of elements) loaded into vector.
 1981 int Matcher::max_vector_size(const BasicType bt) {
 1982   return vector_width_in_bytes(bt) / type2aelembytes(bt);
 1983 }
 1984 
 1985 int Matcher::min_vector_size(const BasicType bt) {
 1986   int size;
 1987   switch(bt) {
 1988     case T_BOOLEAN:
 1989       // Load/store a vector mask with only 2 elements for vector types
 1990       // such as "2I/2F/2L/2D".
 1991       size = 2;
 1992       break;
 1993     case T_BYTE:
 1994       // Generate a "4B" vector, to support vector cast between "8B/16B"
 1995       // and "4S/4I/4L/4F/4D".
 1996       size = 4;
 1997       break;
 1998     case T_SHORT:
 1999       // Generate a "2S" vector, to support vector cast between "4S/8S"
 2000       // and "2I/2L/2F/2D".
 2001       size = 2;
 2002       break;
 2003     default:
 2004       // Limit the min vector length to 64-bit.
 2005       size = 8 / type2aelembytes(bt);
 2006       // The number of elements in a vector should be at least 2.
 2007       size = MAX2(size, 2);
 2008   }
 2009 
 2010   int max_size = max_vector_size(bt);
 2011   return MIN2(size, max_size);
 2012 }
 2013 
 2014 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2015   return Matcher::max_vector_size(bt);
 2016 }
 2017 
 2018 // Vector ideal reg.
 2019 uint Matcher::vector_ideal_reg(int len) {
 2020   assert(MaxVectorSize >= len, "");
 2021   if (UseRVV) {
 2022     return Op_VecA;
 2023   }
 2024 
 2025   ShouldNotReachHere();
 2026   return 0;
 2027 }
 2028 
 2029 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2030   return Matcher::max_vector_size(bt);
 2031 }
 2032 
 2033 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
 2034   ShouldNotReachHere(); // generic vector operands not supported
 2035   return nullptr;
 2036 }
 2037 
 2038 bool Matcher::is_reg2reg_move(MachNode* m) {
 2039   ShouldNotReachHere(); // generic vector operands not supported
 2040   return false;
 2041 }
 2042 
 2043 bool Matcher::is_register_biasing_candidate(const MachNode* mdef, int oper_index) {
 2044   return false;
 2045 }
 2046 
 2047 bool Matcher::is_generic_vector(MachOper* opnd) {
 2048   ShouldNotReachHere(); // generic vector operands not supported
 2049   return false;
 2050 }
 2051 
 2052 #ifdef ASSERT
 2053 // Return whether or not this register is ever used as an argument.
 2054 bool Matcher::can_be_java_arg(int reg)
 2055 {
 2056   return
 2057     reg ==  R10_num || reg == R10_H_num ||
 2058     reg ==  R11_num || reg == R11_H_num ||
 2059     reg ==  R12_num || reg == R12_H_num ||
 2060     reg ==  R13_num || reg == R13_H_num ||
 2061     reg ==  R14_num || reg == R14_H_num ||
 2062     reg ==  R15_num || reg == R15_H_num ||
 2063     reg ==  R16_num || reg == R16_H_num ||
 2064     reg ==  R17_num || reg == R17_H_num ||
 2065     reg ==  F10_num || reg == F10_H_num ||
 2066     reg ==  F11_num || reg == F11_H_num ||
 2067     reg ==  F12_num || reg == F12_H_num ||
 2068     reg ==  F13_num || reg == F13_H_num ||
 2069     reg ==  F14_num || reg == F14_H_num ||
 2070     reg ==  F15_num || reg == F15_H_num ||
 2071     reg ==  F16_num || reg == F16_H_num ||
 2072     reg ==  F17_num || reg == F17_H_num;
 2073 }
 2074 #endif
 2075 
 2076 uint Matcher::int_pressure_limit()
 2077 {
 2078   // A derived pointer is live at CallNode and then is flagged by RA
 2079   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2080   // derived pointers and lastly fail to spill after reaching maximum
 2081   // number of iterations. Lowering the default pressure threshold to
 2082   // (_NO_SPECIAL_REG32_mask.size() minus 1) forces CallNode to become
 2083   // a high register pressure area of the code so that split_DEF can
 2084   // generate DefinitionSpillCopy for the derived pointer.
 2085   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.size() - 1;
 2086   if (!PreserveFramePointer) {
 2087     // When PreserveFramePointer is off, frame pointer is allocatable,
 2088     // but different from other SOC registers, it is excluded from
 2089     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2090     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2091     // See check_pressure_at_fatproj().
 2092     default_int_pressure_threshold--;
 2093   }
 2094   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2095 }
 2096 
 2097 uint Matcher::float_pressure_limit()
 2098 {
 2099   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2100   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
 2101 }
 2102 
 2103 const RegMask& Matcher::divI_proj_mask() {
 2104   ShouldNotReachHere();
 2105   return RegMask::EMPTY;
 2106 }
 2107 
 2108 // Register for MODI projection of divmodI.
 2109 const RegMask& Matcher::modI_proj_mask() {
 2110   ShouldNotReachHere();
 2111   return RegMask::EMPTY;
 2112 }
 2113 
 2114 // Register for DIVL projection of divmodL.
 2115 const RegMask& Matcher::divL_proj_mask() {
 2116   ShouldNotReachHere();
 2117   return RegMask::EMPTY;
 2118 }
 2119 
 2120 // Register for MODL projection of divmodL.
 2121 const RegMask& Matcher::modL_proj_mask() {
 2122   ShouldNotReachHere();
 2123   return RegMask::EMPTY;
 2124 }
 2125 
 2126 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2127   assert_cond(addp != nullptr);
 2128   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2129     Node* u = addp->fast_out(i);
 2130     if (u != nullptr && u->is_Mem()) {
 2131       int opsize = u->as_Mem()->memory_size();
 2132       assert(opsize > 0, "unexpected memory operand size");
 2133       if (u->as_Mem()->memory_size() != (1 << shift)) {
 2134         return false;
 2135       }
 2136     }
 2137   }
 2138   return true;
 2139 }
 2140 
 2141 // Binary src (Replicate scalar/immediate)
 2142 static bool is_vector_scalar_bitwise_pattern(Node* n, Node* m) {
 2143   if (n == nullptr || m == nullptr) {
 2144     return false;
 2145   }
 2146 
 2147   if (m->Opcode() != Op_Replicate) {
 2148     return false;
 2149   }
 2150 
 2151   switch (n->Opcode()) {
 2152     case Op_AndV:
 2153     case Op_OrV:
 2154     case Op_XorV:
 2155     case Op_AddVB:
 2156     case Op_AddVS:
 2157     case Op_AddVI:
 2158     case Op_AddVL:
 2159     case Op_SubVB:
 2160     case Op_SubVS:
 2161     case Op_SubVI:
 2162     case Op_SubVL:
 2163     case Op_MulVB:
 2164     case Op_MulVS:
 2165     case Op_MulVI:
 2166     case Op_MulVL: {
 2167       return true;
 2168     }
 2169     default:
 2170       return false;
 2171   }
 2172 }
 2173 
 2174 // (XorV src (Replicate m1))
 2175 // (XorVMask src (MaskAll m1))
 2176 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2177   if (n != nullptr && m != nullptr) {
 2178     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2179            VectorNode::is_all_ones_vector(m);
 2180   }
 2181   return false;
 2182 }
 2183 
 2184 // Should the Matcher clone input 'm' of node 'n'?
 2185 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2186   assert_cond(m != nullptr);
 2187   if (is_vshift_con_pattern(n, m) || // ShiftV src (ShiftCntV con)
 2188       is_vector_bitwise_not_pattern(n, m) ||
 2189       is_vector_scalar_bitwise_pattern(n, m) ||
 2190       is_encode_and_store_pattern(n, m)) {
 2191     mstack.push(m, Visit);
 2192     return true;
 2193   }
 2194   return false;
 2195 }
 2196 
 2197 // Should the Matcher clone shifts on addressing modes, expecting them
 2198 // to be subsumed into complex addressing expressions or compute them
 2199 // into registers?
 2200 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2201   return clone_base_plus_offset_address(m, mstack, address_visited);
 2202 }
 2203 
 2204 %}
 2205 
 2206 
 2207 
 2208 //----------ENCODING BLOCK-----------------------------------------------------
 2209 // This block specifies the encoding classes used by the compiler to
 2210 // output byte streams.  Encoding classes are parameterized macros
 2211 // used by Machine Instruction Nodes in order to generate the bit
 2212 // encoding of the instruction.  Operands specify their base encoding
 2213 // interface with the interface keyword.  There are currently
 2214 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2215 // COND_INTER.  REG_INTER causes an operand to generate a function
 2216 // which returns its register number when queried.  CONST_INTER causes
 2217 // an operand to generate a function which returns the value of the
 2218 // constant when queried.  MEMORY_INTER causes an operand to generate
 2219 // four functions which return the Base Register, the Index Register,
 2220 // the Scale Value, and the Offset Value of the operand when queried.
 2221 // COND_INTER causes an operand to generate six functions which return
 2222 // the encoding code (ie - encoding bits for the instruction)
 2223 // associated with each basic boolean condition for a conditional
 2224 // instruction.
 2225 //
 2226 // Instructions specify two basic values for encoding.  Again, a
 2227 // function is available to check if the constant displacement is an
 2228 // oop. They use the ins_encode keyword to specify their encoding
 2229 // classes (which must be a sequence of enc_class names, and their
 2230 // parameters, specified in the encoding block), and they use the
 2231 // opcode keyword to specify, in order, their primary, secondary, and
 2232 // tertiary opcode.  Only the opcode sections which a particular
 2233 // instruction needs for encoding need to be specified.
 2234 encode %{
 2235   // BEGIN Non-volatile memory access
 2236 
 2237   enc_class riscv_enc_mov_imm(iRegIorL dst, immIorL src) %{
 2238     int64_t con = (int64_t)$src$$constant;
 2239     Register dst_reg = as_Register($dst$$reg);
 2240     __ mv(dst_reg, con);
 2241   %}
 2242 
 2243   enc_class riscv_enc_mov_p(iRegP dst, immP src) %{
 2244     Register dst_reg = as_Register($dst$$reg);
 2245     address con = (address)$src$$constant;
 2246     if (con == nullptr || con == (address)1) {
 2247       ShouldNotReachHere();
 2248     } else {
 2249       relocInfo::relocType rtype = $src->constant_reloc();
 2250       if (rtype == relocInfo::oop_type) {
 2251         __ movoop(dst_reg, (jobject)con);
 2252       } else if (rtype == relocInfo::metadata_type) {
 2253         __ mov_metadata(dst_reg, (Metadata*)con);
 2254       } else {
 2255         assert(rtype == relocInfo::none || rtype == relocInfo::external_word_type, "unexpected reloc type");
 2256         __ mv(dst_reg, $src$$constant);
 2257       }
 2258     }
 2259   %}
 2260 
 2261   enc_class riscv_enc_mov_p1(iRegP dst) %{
 2262     Register dst_reg = as_Register($dst$$reg);
 2263     __ mv(dst_reg, 1);
 2264   %}
 2265 
 2266   enc_class riscv_enc_mov_n(iRegN dst, immN src) %{
 2267     Register dst_reg = as_Register($dst$$reg);
 2268     address con = (address)$src$$constant;
 2269     if (con == nullptr) {
 2270       ShouldNotReachHere();
 2271     } else {
 2272       relocInfo::relocType rtype = $src->constant_reloc();
 2273       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 2274       __ set_narrow_oop(dst_reg, (jobject)con);
 2275     }
 2276   %}
 2277 
 2278   enc_class riscv_enc_mov_zero(iRegNorP dst) %{
 2279     Register dst_reg = as_Register($dst$$reg);
 2280     __ mv(dst_reg, zr);
 2281   %}
 2282 
 2283   enc_class riscv_enc_mov_nk(iRegN dst, immNKlass src) %{
 2284     Register dst_reg = as_Register($dst$$reg);
 2285     address con = (address)$src$$constant;
 2286     if (con == nullptr) {
 2287       ShouldNotReachHere();
 2288     } else {
 2289       relocInfo::relocType rtype = $src->constant_reloc();
 2290       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 2291       __ set_narrow_klass(dst_reg, (Klass *)con);
 2292     }
 2293   %}
 2294 
 2295   // compare and branch instruction encodings
 2296 
 2297   enc_class riscv_enc_j(label lbl) %{
 2298     Label* L = $lbl$$label;
 2299     __ j(*L);
 2300   %}
 2301 
 2302   enc_class riscv_enc_far_cmpULtGe_imm0_branch(cmpOpULtGe cmp, iRegIorL op1, label lbl) %{
 2303     Label* L = $lbl$$label;
 2304     switch ($cmp$$cmpcode) {
 2305       case(BoolTest::ge):
 2306         __ j(*L);
 2307         break;
 2308       case(BoolTest::lt):
 2309         break;
 2310       default:
 2311         Unimplemented();
 2312     }
 2313   %}
 2314 
 2315   // call instruction encodings
 2316 
 2317   enc_class riscv_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result) %{
 2318     Register sub_reg = as_Register($sub$$reg);
 2319     Register super_reg = as_Register($super$$reg);
 2320     Register temp_reg = as_Register($temp$$reg);
 2321     Register result_reg = as_Register($result$$reg);
 2322     Register cr_reg = t1;
 2323 
 2324     Label miss;
 2325     Label done;
 2326     __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 2327                                      nullptr, &miss, /*set_cond_codes*/ true);
 2328     if ($primary) {
 2329       __ mv(result_reg, zr);
 2330     } else {
 2331       __ mv(cr_reg, zr);
 2332       __ j(done);
 2333     }
 2334 
 2335     __ bind(miss);
 2336     if (!$primary) {
 2337       __ mv(cr_reg, 1);
 2338     }
 2339 
 2340     __ bind(done);
 2341   %}
 2342 
 2343   enc_class riscv_enc_java_static_call(method meth) %{
 2344     Assembler::IncompressibleScope scope(masm); // Fixed length: see ret_addr_offset
 2345 
 2346     address addr = (address)$meth$$method;
 2347     address call = nullptr;
 2348     assert_cond(addr != nullptr);
 2349     if (!_method) {
 2350       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 2351       call = __ reloc_call(Address(addr, relocInfo::runtime_call_type));
 2352       if (call == nullptr) {
 2353         ciEnv::current()->record_failure("CodeCache is full");
 2354         return;
 2355       }
 2356     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 2357       // The NOP here is purely to ensure that eliding a call to
 2358       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 2359       __ nop();
 2360       __ nop();
 2361       __ nop();
 2362       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 2363     } else {
 2364       int method_index = resolved_method_index(masm);
 2365       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 2366                                                   : static_call_Relocation::spec(method_index);
 2367       call = __ reloc_call(Address(addr, rspec));
 2368       if (call == nullptr) {
 2369         ciEnv::current()->record_failure("CodeCache is full");
 2370         return;
 2371       }
 2372 
 2373       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 2374         // Calls of the same statically bound method can share
 2375         // a stub to the interpreter.
 2376         __ code()->shared_stub_to_interp_for(_method, call - (__ begin()));
 2377       } else {
 2378         // Emit stub for static call
 2379         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 2380         if (stub == nullptr) {
 2381           ciEnv::current()->record_failure("CodeCache is full");
 2382           return;
 2383         }
 2384       }
 2385     }
 2386 
 2387     __ post_call_nop();
 2388   %}
 2389 
 2390   enc_class riscv_enc_java_dynamic_call(method meth) %{
 2391     Assembler::IncompressibleScope scope(masm); // Fixed length: see ret_addr_offset
 2392     int method_index = resolved_method_index(masm);
 2393     address call = __ ic_call((address)$meth$$method, method_index);
 2394     if (call == nullptr) {
 2395       ciEnv::current()->record_failure("CodeCache is full");
 2396       return;
 2397     }
 2398 
 2399     __ post_call_nop();
 2400   %}
 2401 
 2402   enc_class riscv_enc_call_epilog() %{
 2403     if (VerifyStackAtCalls) {
 2404       // Check that stack depth is unchanged: find majik cookie on stack
 2405       int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3 * VMRegImpl::slots_per_word));
 2406       Label stack_ok;
 2407       __ ld(t1, Address(sp, framesize));
 2408       __ mv(t2, MAJIK_DWORD);
 2409       __ beq(t2, t1, stack_ok);
 2410       __ stop("MAJIK_DWORD not found");
 2411       __ bind(stack_ok);
 2412     }
 2413   %}
 2414 
 2415   enc_class riscv_enc_java_to_runtime(method meth) %{
 2416     Assembler::IncompressibleScope scope(masm); // Fixed length: see ret_addr_offset
 2417 
 2418     // Some calls to generated routines (arraycopy code) are scheduled by C2
 2419     // as runtime calls. if so we can call them using a far call (they will be
 2420     // in the code cache, thus in a reachable segment) otherwise we have to use
 2421     // a movptr+jalr pair which loads the absolute address into a register.
 2422     address entry = (address)$meth$$method;
 2423     if (CodeCache::contains(entry)) {
 2424       __ far_call(Address(entry, relocInfo::runtime_call_type));
 2425       __ post_call_nop();
 2426     } else {
 2427       Label retaddr;
 2428       // Make the anchor frame walkable
 2429       __ la(t0, retaddr);
 2430       __ sd(t0, Address(xthread, JavaThread::last_Java_pc_offset()));
 2431       int32_t offset = 0;
 2432       // No relocation needed
 2433       __ movptr(t1, entry, offset, t0); // lui + lui + slli + add
 2434       __ jalr(t1, offset);
 2435       __ bind(retaddr);
 2436       __ post_call_nop();
 2437     }
 2438   %}
 2439 
 2440   enc_class riscv_enc_tail_call(iRegP jump_target) %{
 2441     Register target_reg = as_Register($jump_target$$reg);
 2442     __ jr(target_reg);
 2443   %}
 2444 
 2445   enc_class riscv_enc_tail_jmp(iRegP jump_target) %{
 2446     Register target_reg = as_Register($jump_target$$reg);
 2447     // exception oop should be in x10
 2448     // ret addr has been popped into ra
 2449     // callee expects it in x13
 2450     __ mv(x13, ra);
 2451     __ jr(target_reg);
 2452   %}
 2453 
 2454   enc_class riscv_enc_rethrow() %{
 2455     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 2456   %}
 2457 
 2458   enc_class riscv_enc_ret() %{
 2459     __ ret();
 2460   %}
 2461 
 2462 %}
 2463 
 2464 //----------FRAME--------------------------------------------------------------
 2465 // Definition of frame structure and management information.
 2466 //
 2467 //  S T A C K   L A Y O U T    Allocators stack-slot number
 2468 //                             |   (to get allocators register number
 2469 //  G  Owned by    |        |  v    add OptoReg::stack0())
 2470 //  r   CALLER     |        |
 2471 //  o     |        +--------+      pad to even-align allocators stack-slot
 2472 //  w     V        |  pad0  |        numbers; owned by CALLER
 2473 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 2474 //  h     ^        |   in   |  5
 2475 //        |        |  args  |  4   Holes in incoming args owned by SELF
 2476 //  |     |        |        |  3
 2477 //  |     |        +--------+
 2478 //  V     |        | old out|      Empty on Intel, window on Sparc
 2479 //        |    old |preserve|      Must be even aligned.
 2480 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 2481 //        |        |   in   |  3   area for Intel ret address
 2482 //     Owned by    |preserve|      Empty on Sparc.
 2483 //       SELF      +--------+
 2484 //        |        |  pad2  |  2   pad to align old SP
 2485 //        |        +--------+  1
 2486 //        |        | locks  |  0
 2487 //        |        +--------+----> OptoReg::stack0(), even aligned
 2488 //        |        |  pad1  | 11   pad to align new SP
 2489 //        |        +--------+
 2490 //        |        |        | 10
 2491 //        |        | spills |  9   spills
 2492 //        V        |        |  8   (pad0 slot for callee)
 2493 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 2494 //        ^        |  out   |  7
 2495 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 2496 //     Owned by    +--------+
 2497 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 2498 //        |    new |preserve|      Must be even-aligned.
 2499 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 2500 //        |        |        |
 2501 //
 2502 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 2503 //         known from SELF's arguments and the Java calling convention.
 2504 //         Region 6-7 is determined per call site.
 2505 // Note 2: If the calling convention leaves holes in the incoming argument
 2506 //         area, those holes are owned by SELF.  Holes in the outgoing area
 2507 //         are owned by the CALLEE.  Holes should not be necessary in the
 2508 //         incoming area, as the Java calling convention is completely under
 2509 //         the control of the AD file.  Doubles can be sorted and packed to
 2510 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 2511 //         varargs C calling conventions.
 2512 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 2513 //         even aligned with pad0 as needed.
 2514 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 2515 //           (the latter is true on Intel but is it false on RISCV?)
 2516 //         region 6-11 is even aligned; it may be padded out more so that
 2517 //         the region from SP to FP meets the minimum stack alignment.
 2518 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 2519 //         alignment.  Region 11, pad1, may be dynamically extended so that
 2520 //         SP meets the minimum alignment.
 2521 
 2522 frame %{
 2523   // These three registers define part of the calling convention
 2524   // between compiled code and the interpreter.
 2525 
 2526   // Inline Cache Register or methodOop for I2C.
 2527   inline_cache_reg(R31);
 2528 
 2529   // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
 2530   cisc_spilling_operand_name(indOffset);
 2531 
 2532   // Number of stack slots consumed by locking an object
 2533   // generate Compile::sync_stack_slots
 2534   // VMRegImpl::slots_per_word = wordSize / stack_slot_size = 8 / 4 = 2
 2535   sync_stack_slots(1 * VMRegImpl::slots_per_word);
 2536 
 2537   // Compiled code's Frame Pointer
 2538   frame_pointer(R2);
 2539 
 2540   // Stack alignment requirement
 2541   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 2542 
 2543   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 2544   // for calls to C.  Supports the var-args backing area for register parms.
 2545   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes / BytesPerInt);
 2546 
 2547   // The after-PROLOG location of the return address.  Location of
 2548   // return address specifies a type (REG or STACK) and a number
 2549   // representing the register number (i.e. - use a register name) or
 2550   // stack slot.
 2551   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 2552   // Otherwise, it is above the locks and verification slot and alignment word
 2553   // TODO this may well be correct but need to check why that - 2 is there
 2554   // ppc port uses 0 but we definitely need to allow for fixed_slots
 2555   // which folds in the space used for monitors
 2556   return_addr(STACK - 2 +
 2557               align_up((Compile::current()->in_preserve_stack_slots() +
 2558                         Compile::current()->fixed_slots()),
 2559                        stack_alignment_in_slots()));
 2560 
 2561   // Location of compiled Java return values.  Same as C for now.
 2562   return_value
 2563   %{
 2564     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 2565            "only return normal values");
 2566 
 2567     static const int lo[Op_RegL + 1] = { // enum name
 2568       0,                                 // Op_Node
 2569       0,                                 // Op_Set
 2570       R10_num,                           // Op_RegN
 2571       R10_num,                           // Op_RegI
 2572       R10_num,                           // Op_RegP
 2573       F10_num,                           // Op_RegF
 2574       F10_num,                           // Op_RegD
 2575       R10_num                            // Op_RegL
 2576     };
 2577 
 2578     static const int hi[Op_RegL + 1] = { // enum name
 2579       0,                                 // Op_Node
 2580       0,                                 // Op_Set
 2581       OptoReg::Bad,                      // Op_RegN
 2582       OptoReg::Bad,                      // Op_RegI
 2583       R10_H_num,                         // Op_RegP
 2584       OptoReg::Bad,                      // Op_RegF
 2585       F10_H_num,                         // Op_RegD
 2586       R10_H_num                          // Op_RegL
 2587     };
 2588 
 2589     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 2590   %}
 2591 %}
 2592 
 2593 //----------ATTRIBUTES---------------------------------------------------------
 2594 //----------Operand Attributes-------------------------------------------------
 2595 op_attrib op_cost(1);        // Required cost attribute
 2596 
 2597 //----------Instruction Attributes---------------------------------------------
 2598 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
 2599 ins_attrib ins_size(32);        // Required size attribute (in bits)
 2600 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 2601                                 // a non-matching short branch variant
 2602                                 // of some long branch?
 2603 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 2604                                 // be a power of 2) specifies the
 2605                                 // alignment that some part of the
 2606                                 // instruction (not necessarily the
 2607                                 // start) requires.  If > 1, a
 2608                                 // compute_padding() function must be
 2609                                 // provided for the instruction
 2610 
 2611 // Whether this node is expanded during code emission into a sequence of
 2612 // instructions and the first instruction can perform an implicit null check.
 2613 ins_attrib ins_is_late_expanded_null_check_candidate(false);
 2614 
 2615 //----------OPERANDS-----------------------------------------------------------
 2616 // Operand definitions must precede instruction definitions for correct parsing
 2617 // in the ADLC because operands constitute user defined types which are used in
 2618 // instruction definitions.
 2619 
 2620 //----------Simple Operands----------------------------------------------------
 2621 
 2622 // Integer operands 32 bit
 2623 // 32 bit immediate
 2624 operand immI()
 2625 %{
 2626   match(ConI);
 2627 
 2628   op_cost(0);
 2629   format %{ %}
 2630   interface(CONST_INTER);
 2631 %}
 2632 
 2633 // 32 bit zero
 2634 operand immI0()
 2635 %{
 2636   predicate(n->get_int() == 0);
 2637   match(ConI);
 2638 
 2639   op_cost(0);
 2640   format %{ %}
 2641   interface(CONST_INTER);
 2642 %}
 2643 
 2644 // 32 bit unit increment
 2645 operand immI_1()
 2646 %{
 2647   predicate(n->get_int() == 1);
 2648   match(ConI);
 2649 
 2650   op_cost(0);
 2651   format %{ %}
 2652   interface(CONST_INTER);
 2653 %}
 2654 
 2655 // 32 bit unit decrement
 2656 operand immI_M1()
 2657 %{
 2658   predicate(n->get_int() == -1);
 2659   match(ConI);
 2660 
 2661   op_cost(0);
 2662   format %{ %}
 2663   interface(CONST_INTER);
 2664 %}
 2665 
 2666 // Unsigned Integer Immediate:  6-bit int, greater than 32
 2667 operand uimmI6_ge32() %{
 2668   predicate(((unsigned int)(n->get_int()) < 64) && (n->get_int() >= 32));
 2669   match(ConI);
 2670   op_cost(0);
 2671   format %{ %}
 2672   interface(CONST_INTER);
 2673 %}
 2674 
 2675 operand immI_le_4()
 2676 %{
 2677   predicate(n->get_int() <= 4);
 2678   match(ConI);
 2679 
 2680   op_cost(0);
 2681   format %{ %}
 2682   interface(CONST_INTER);
 2683 %}
 2684 
 2685 operand immI_16()
 2686 %{
 2687   predicate(n->get_int() == 16);
 2688   match(ConI);
 2689   op_cost(0);
 2690   format %{ %}
 2691   interface(CONST_INTER);
 2692 %}
 2693 
 2694 operand immI_24()
 2695 %{
 2696   predicate(n->get_int() == 24);
 2697   match(ConI);
 2698   op_cost(0);
 2699   format %{ %}
 2700   interface(CONST_INTER);
 2701 %}
 2702 
 2703 operand immI_31()
 2704 %{
 2705   predicate(n->get_int() == 31);
 2706   match(ConI);
 2707 
 2708   op_cost(0);
 2709   format %{ %}
 2710   interface(CONST_INTER);
 2711 %}
 2712 
 2713 operand immI_63()
 2714 %{
 2715   predicate(n->get_int() == 63);
 2716   match(ConI);
 2717 
 2718   op_cost(0);
 2719   format %{ %}
 2720   interface(CONST_INTER);
 2721 %}
 2722 
 2723 // 32 bit integer valid for add immediate
 2724 operand immIAdd()
 2725 %{
 2726   predicate(Assembler::is_simm12((int64_t)n->get_int()));
 2727   match(ConI);
 2728   op_cost(0);
 2729   format %{ %}
 2730   interface(CONST_INTER);
 2731 %}
 2732 
 2733 // 32 bit integer valid for sub immediate
 2734 operand immISub()
 2735 %{
 2736   predicate(Assembler::is_simm12(-(int64_t)n->get_int()));
 2737   match(ConI);
 2738   op_cost(0);
 2739   format %{ %}
 2740   interface(CONST_INTER);
 2741 %}
 2742 
 2743 // 5 bit signed value.
 2744 operand immI5()
 2745 %{
 2746   predicate(n->get_int() <= 15 && n->get_int() >= -16);
 2747   match(ConI);
 2748 
 2749   op_cost(0);
 2750   format %{ %}
 2751   interface(CONST_INTER);
 2752 %}
 2753 
 2754 // 5 bit signed value (simm5)
 2755 operand immL5()
 2756 %{
 2757   predicate(n->get_long() <= 15 && n->get_long() >= -16);
 2758   match(ConL);
 2759 
 2760   op_cost(0);
 2761   format %{ %}
 2762   interface(CONST_INTER);
 2763 %}
 2764 
 2765 // Integer operands 64 bit
 2766 // 64 bit immediate
 2767 operand immL()
 2768 %{
 2769   match(ConL);
 2770 
 2771   op_cost(0);
 2772   format %{ %}
 2773   interface(CONST_INTER);
 2774 %}
 2775 
 2776 // 64 bit zero
 2777 operand immL0()
 2778 %{
 2779   predicate(n->get_long() == 0);
 2780   match(ConL);
 2781 
 2782   op_cost(0);
 2783   format %{ %}
 2784   interface(CONST_INTER);
 2785 %}
 2786 
 2787 // Pointer operands
 2788 // Pointer Immediate
 2789 operand immP()
 2790 %{
 2791   match(ConP);
 2792 
 2793   op_cost(0);
 2794   format %{ %}
 2795   interface(CONST_INTER);
 2796 %}
 2797 
 2798 // Null Pointer Immediate
 2799 operand immP0()
 2800 %{
 2801   predicate(n->get_ptr() == 0);
 2802   match(ConP);
 2803 
 2804   op_cost(0);
 2805   format %{ %}
 2806   interface(CONST_INTER);
 2807 %}
 2808 
 2809 // Pointer Immediate One
 2810 // this is used in object initialization (initial object header)
 2811 operand immP_1()
 2812 %{
 2813   predicate(n->get_ptr() == 1);
 2814   match(ConP);
 2815 
 2816   op_cost(0);
 2817   format %{ %}
 2818   interface(CONST_INTER);
 2819 %}
 2820 
 2821 // Int Immediate: low 16-bit mask
 2822 operand immI_16bits()
 2823 %{
 2824   predicate(n->get_int() == 0xFFFF);
 2825   match(ConI);
 2826   op_cost(0);
 2827   format %{ %}
 2828   interface(CONST_INTER);
 2829 %}
 2830 
 2831 operand immIpowerOf2() %{
 2832   predicate(is_power_of_2((juint)(n->get_int())));
 2833   match(ConI);
 2834   op_cost(0);
 2835   format %{ %}
 2836   interface(CONST_INTER);
 2837 %}
 2838 
 2839 // Long Immediate: low 32-bit mask
 2840 operand immL_32bits()
 2841 %{
 2842   predicate(n->get_long() == 0xFFFFFFFFL);
 2843   match(ConL);
 2844   op_cost(0);
 2845   format %{ %}
 2846   interface(CONST_INTER);
 2847 %}
 2848 
 2849 // 64 bit unit decrement
 2850 operand immL_M1()
 2851 %{
 2852   predicate(n->get_long() == -1);
 2853   match(ConL);
 2854 
 2855   op_cost(0);
 2856   format %{ %}
 2857   interface(CONST_INTER);
 2858 %}
 2859 
 2860 
 2861 // 64 bit integer valid for add immediate
 2862 operand immLAdd()
 2863 %{
 2864   predicate(Assembler::is_simm12(n->get_long()));
 2865   match(ConL);
 2866   op_cost(0);
 2867   format %{ %}
 2868   interface(CONST_INTER);
 2869 %}
 2870 
 2871 // 64 bit integer valid for sub immediate
 2872 operand immLSub()
 2873 %{
 2874   predicate(Assembler::is_simm12(-(n->get_long())));
 2875   match(ConL);
 2876   op_cost(0);
 2877   format %{ %}
 2878   interface(CONST_INTER);
 2879 %}
 2880 
 2881 // Narrow pointer operands
 2882 // Narrow Pointer Immediate
 2883 operand immN()
 2884 %{
 2885   match(ConN);
 2886 
 2887   op_cost(0);
 2888   format %{ %}
 2889   interface(CONST_INTER);
 2890 %}
 2891 
 2892 // Narrow Null Pointer Immediate
 2893 operand immN0()
 2894 %{
 2895   predicate(n->get_narrowcon() == 0);
 2896   match(ConN);
 2897 
 2898   op_cost(0);
 2899   format %{ %}
 2900   interface(CONST_INTER);
 2901 %}
 2902 
 2903 operand immNKlass()
 2904 %{
 2905   match(ConNKlass);
 2906 
 2907   op_cost(0);
 2908   format %{ %}
 2909   interface(CONST_INTER);
 2910 %}
 2911 
 2912 // Float and Double operands
 2913 // Double Immediate
 2914 operand immD()
 2915 %{
 2916   match(ConD);
 2917   op_cost(0);
 2918   format %{ %}
 2919   interface(CONST_INTER);
 2920 %}
 2921 
 2922 // Double Immediate: +0.0d
 2923 operand immD0()
 2924 %{
 2925   predicate(jlong_cast(n->getd()) == 0);
 2926   match(ConD);
 2927 
 2928   op_cost(0);
 2929   format %{ %}
 2930   interface(CONST_INTER);
 2931 %}
 2932 
 2933 // Float Immediate
 2934 operand immF()
 2935 %{
 2936   match(ConF);
 2937   op_cost(0);
 2938   format %{ %}
 2939   interface(CONST_INTER);
 2940 %}
 2941 
 2942 // Float Immediate: +0.0f.
 2943 operand immF0()
 2944 %{
 2945   predicate(jint_cast(n->getf()) == 0);
 2946   match(ConF);
 2947 
 2948   op_cost(0);
 2949   format %{ %}
 2950   interface(CONST_INTER);
 2951 %}
 2952 
 2953 // Half Float Immediate
 2954 operand immH()
 2955 %{
 2956   match(ConH);
 2957 
 2958   op_cost(0);
 2959   format %{ %}
 2960   interface(CONST_INTER);
 2961 %}
 2962 
 2963 // Half Float Immediate: +0.0f.
 2964 operand immH0()
 2965 %{
 2966   predicate(jint_cast(n->geth()) == 0);
 2967   match(ConH);
 2968 
 2969   op_cost(0);
 2970   format %{ %}
 2971   interface(CONST_INTER);
 2972 %}
 2973 
 2974 operand immIOffset()
 2975 %{
 2976   predicate(Assembler::is_simm12(n->get_int()));
 2977   match(ConI);
 2978   op_cost(0);
 2979   format %{ %}
 2980   interface(CONST_INTER);
 2981 %}
 2982 
 2983 operand immLOffset()
 2984 %{
 2985   predicate(Assembler::is_simm12(n->get_long()));
 2986   match(ConL);
 2987   op_cost(0);
 2988   format %{ %}
 2989   interface(CONST_INTER);
 2990 %}
 2991 
 2992 // Scale values
 2993 operand immIScale()
 2994 %{
 2995   predicate(1 <= n->get_int() && (n->get_int() <= 3));
 2996   match(ConI);
 2997 
 2998   op_cost(0);
 2999   format %{ %}
 3000   interface(CONST_INTER);
 3001 %}
 3002 
 3003 // Integer 32 bit Register Operands
 3004 operand iRegI()
 3005 %{
 3006   constraint(ALLOC_IN_RC(any_reg32));
 3007   match(RegI);
 3008   match(iRegINoSp);
 3009   op_cost(0);
 3010   format %{ %}
 3011   interface(REG_INTER);
 3012 %}
 3013 
 3014 // Integer 32 bit Register not Special
 3015 operand iRegINoSp()
 3016 %{
 3017   constraint(ALLOC_IN_RC(no_special_reg32));
 3018   match(RegI);
 3019   op_cost(0);
 3020   format %{ %}
 3021   interface(REG_INTER);
 3022 %}
 3023 
 3024 // Register R10 only
 3025 operand iRegI_R10()
 3026 %{
 3027   constraint(ALLOC_IN_RC(int_r10_reg));
 3028   match(RegI);
 3029   match(iRegINoSp);
 3030   op_cost(0);
 3031   format %{ %}
 3032   interface(REG_INTER);
 3033 %}
 3034 
 3035 // Register R12 only
 3036 operand iRegI_R12()
 3037 %{
 3038   constraint(ALLOC_IN_RC(int_r12_reg));
 3039   match(RegI);
 3040   match(iRegINoSp);
 3041   op_cost(0);
 3042   format %{ %}
 3043   interface(REG_INTER);
 3044 %}
 3045 
 3046 // Register R13 only
 3047 operand iRegI_R13()
 3048 %{
 3049   constraint(ALLOC_IN_RC(int_r13_reg));
 3050   match(RegI);
 3051   match(iRegINoSp);
 3052   op_cost(0);
 3053   format %{ %}
 3054   interface(REG_INTER);
 3055 %}
 3056 
 3057 // Register R14 only
 3058 operand iRegI_R14()
 3059 %{
 3060   constraint(ALLOC_IN_RC(int_r14_reg));
 3061   match(RegI);
 3062   match(iRegINoSp);
 3063   op_cost(0);
 3064   format %{ %}
 3065   interface(REG_INTER);
 3066 %}
 3067 
 3068 // Integer 64 bit Register Operands
 3069 operand iRegL()
 3070 %{
 3071   constraint(ALLOC_IN_RC(any_reg));
 3072   match(RegL);
 3073   match(iRegLNoSp);
 3074   op_cost(0);
 3075   format %{ %}
 3076   interface(REG_INTER);
 3077 %}
 3078 
 3079 // Integer 64 bit Register not Special
 3080 operand iRegLNoSp()
 3081 %{
 3082   constraint(ALLOC_IN_RC(no_special_reg));
 3083   match(RegL);
 3084   match(iRegL_R10);
 3085   format %{ %}
 3086   interface(REG_INTER);
 3087 %}
 3088 
 3089 // Long 64 bit Register R29 only
 3090 operand iRegL_R29()
 3091 %{
 3092   constraint(ALLOC_IN_RC(r29_reg));
 3093   match(RegL);
 3094   match(iRegLNoSp);
 3095   op_cost(0);
 3096   format %{ %}
 3097   interface(REG_INTER);
 3098 %}
 3099 
 3100 // Long 64 bit Register R30 only
 3101 operand iRegL_R30()
 3102 %{
 3103   constraint(ALLOC_IN_RC(r30_reg));
 3104   match(RegL);
 3105   match(iRegLNoSp);
 3106   op_cost(0);
 3107   format %{ %}
 3108   interface(REG_INTER);
 3109 %}
 3110 
 3111 // Pointer Register Operands
 3112 // Pointer Register
 3113 operand iRegP()
 3114 %{
 3115   constraint(ALLOC_IN_RC(ptr_reg));
 3116   match(RegP);
 3117   match(iRegPNoSp);
 3118   match(iRegP_R10);
 3119   match(iRegP_R15);
 3120   match(javaThread_RegP);
 3121   op_cost(0);
 3122   format %{ %}
 3123   interface(REG_INTER);
 3124 %}
 3125 
 3126 // Pointer 64 bit Register not Special
 3127 operand iRegPNoSp()
 3128 %{
 3129   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 3130   match(RegP);
 3131   op_cost(0);
 3132   format %{ %}
 3133   interface(REG_INTER);
 3134 %}
 3135 
 3136 // This operand is not allowed to use fp even if
 3137 // fp is not used to hold the frame pointer.
 3138 operand iRegPNoSpNoFp()
 3139 %{
 3140   constraint(ALLOC_IN_RC(no_special_no_fp_ptr_reg));
 3141   match(RegP);
 3142   match(iRegPNoSp);
 3143   op_cost(0);
 3144   format %{ %}
 3145   interface(REG_INTER);
 3146 %}
 3147 
 3148 operand iRegP_R10()
 3149 %{
 3150   constraint(ALLOC_IN_RC(r10_reg));
 3151   match(RegP);
 3152   // match(iRegP);
 3153   match(iRegPNoSp);
 3154   op_cost(0);
 3155   format %{ %}
 3156   interface(REG_INTER);
 3157 %}
 3158 
 3159 // Pointer 64 bit Register R11 only
 3160 operand iRegP_R11()
 3161 %{
 3162   constraint(ALLOC_IN_RC(r11_reg));
 3163   match(RegP);
 3164   match(iRegPNoSp);
 3165   op_cost(0);
 3166   format %{ %}
 3167   interface(REG_INTER);
 3168 %}
 3169 
 3170 operand iRegP_R12()
 3171 %{
 3172   constraint(ALLOC_IN_RC(r12_reg));
 3173   match(RegP);
 3174   // match(iRegP);
 3175   match(iRegPNoSp);
 3176   op_cost(0);
 3177   format %{ %}
 3178   interface(REG_INTER);
 3179 %}
 3180 
 3181 // Pointer 64 bit Register R13 only
 3182 operand iRegP_R13()
 3183 %{
 3184   constraint(ALLOC_IN_RC(r13_reg));
 3185   match(RegP);
 3186   match(iRegPNoSp);
 3187   op_cost(0);
 3188   format %{ %}
 3189   interface(REG_INTER);
 3190 %}
 3191 
 3192 operand iRegP_R14()
 3193 %{
 3194   constraint(ALLOC_IN_RC(r14_reg));
 3195   match(RegP);
 3196   // match(iRegP);
 3197   match(iRegPNoSp);
 3198   op_cost(0);
 3199   format %{ %}
 3200   interface(REG_INTER);
 3201 %}
 3202 
 3203 operand iRegP_R15()
 3204 %{
 3205   constraint(ALLOC_IN_RC(r15_reg));
 3206   match(RegP);
 3207   // match(iRegP);
 3208   match(iRegPNoSp);
 3209   op_cost(0);
 3210   format %{ %}
 3211   interface(REG_INTER);
 3212 %}
 3213 
 3214 operand iRegP_R16()
 3215 %{
 3216   constraint(ALLOC_IN_RC(r16_reg));
 3217   match(RegP);
 3218   match(iRegPNoSp);
 3219   op_cost(0);
 3220   format %{ %}
 3221   interface(REG_INTER);
 3222 %}
 3223 
 3224 // Pointer 64 bit Register R28 only
 3225 operand iRegP_R28()
 3226 %{
 3227   constraint(ALLOC_IN_RC(r28_reg));
 3228   match(RegP);
 3229   match(iRegPNoSp);
 3230   op_cost(0);
 3231   format %{ %}
 3232   interface(REG_INTER);
 3233 %}
 3234 
 3235 // Pointer 64 bit Register R30 only
 3236 operand iRegP_R30()
 3237 %{
 3238   constraint(ALLOC_IN_RC(r30_reg));
 3239   match(RegP);
 3240   match(iRegPNoSp);
 3241   op_cost(0);
 3242   format %{ %}
 3243   interface(REG_INTER);
 3244 %}
 3245 
 3246 // Pointer 64 bit Register R31 only
 3247 operand iRegP_R31()
 3248 %{
 3249   constraint(ALLOC_IN_RC(r31_reg));
 3250   match(RegP);
 3251   match(iRegPNoSp);
 3252   op_cost(0);
 3253   format %{ %}
 3254   interface(REG_INTER);
 3255 %}
 3256 
 3257 // Pointer Register Operands
 3258 // Narrow Pointer Register
 3259 operand iRegN()
 3260 %{
 3261   constraint(ALLOC_IN_RC(any_reg32));
 3262   match(RegN);
 3263   match(iRegNNoSp);
 3264   op_cost(0);
 3265   format %{ %}
 3266   interface(REG_INTER);
 3267 %}
 3268 
 3269 // Integer 64 bit Register not Special
 3270 operand iRegNNoSp()
 3271 %{
 3272   constraint(ALLOC_IN_RC(no_special_reg32));
 3273   match(RegN);
 3274   op_cost(0);
 3275   format %{ %}
 3276   interface(REG_INTER);
 3277 %}
 3278 
 3279 // Long 64 bit Register R10 only
 3280 operand iRegL_R10()
 3281 %{
 3282   constraint(ALLOC_IN_RC(r10_reg));
 3283   match(RegL);
 3284   match(iRegLNoSp);
 3285   op_cost(0);
 3286   format %{ %}
 3287   interface(REG_INTER);
 3288 %}
 3289 
 3290 // Float Register
 3291 // Float register operands
 3292 operand fRegF()
 3293 %{
 3294   constraint(ALLOC_IN_RC(float_reg));
 3295   match(RegF);
 3296 
 3297   op_cost(0);
 3298   format %{ %}
 3299   interface(REG_INTER);
 3300 %}
 3301 
 3302 // Double Register
 3303 // Double register operands
 3304 operand fRegD()
 3305 %{
 3306   constraint(ALLOC_IN_RC(double_reg));
 3307   match(RegD);
 3308 
 3309   op_cost(0);
 3310   format %{ %}
 3311   interface(REG_INTER);
 3312 %}
 3313 
 3314 // Generic vector class. This will be used for
 3315 // all vector operands.
 3316 operand vReg()
 3317 %{
 3318   constraint(ALLOC_IN_RC(vectora_reg));
 3319   match(VecA);
 3320   op_cost(0);
 3321   format %{ %}
 3322   interface(REG_INTER);
 3323 %}
 3324 
 3325 operand vReg_V1()
 3326 %{
 3327   constraint(ALLOC_IN_RC(v1_reg));
 3328   match(VecA);
 3329   match(vReg);
 3330   op_cost(0);
 3331   format %{ %}
 3332   interface(REG_INTER);
 3333 %}
 3334 
 3335 operand vReg_V2()
 3336 %{
 3337   constraint(ALLOC_IN_RC(v2_reg));
 3338   match(VecA);
 3339   match(vReg);
 3340   op_cost(0);
 3341   format %{ %}
 3342   interface(REG_INTER);
 3343 %}
 3344 
 3345 operand vReg_V3()
 3346 %{
 3347   constraint(ALLOC_IN_RC(v3_reg));
 3348   match(VecA);
 3349   match(vReg);
 3350   op_cost(0);
 3351   format %{ %}
 3352   interface(REG_INTER);
 3353 %}
 3354 
 3355 operand vReg_V4()
 3356 %{
 3357   constraint(ALLOC_IN_RC(v4_reg));
 3358   match(VecA);
 3359   match(vReg);
 3360   op_cost(0);
 3361   format %{ %}
 3362   interface(REG_INTER);
 3363 %}
 3364 
 3365 operand vReg_V5()
 3366 %{
 3367   constraint(ALLOC_IN_RC(v5_reg));
 3368   match(VecA);
 3369   match(vReg);
 3370   op_cost(0);
 3371   format %{ %}
 3372   interface(REG_INTER);
 3373 %}
 3374 
 3375 operand vReg_V6()
 3376 %{
 3377   constraint(ALLOC_IN_RC(v6_reg));
 3378   match(VecA);
 3379   match(vReg);
 3380   op_cost(0);
 3381   format %{ %}
 3382   interface(REG_INTER);
 3383 %}
 3384 
 3385 operand vReg_V7()
 3386 %{
 3387   constraint(ALLOC_IN_RC(v7_reg));
 3388   match(VecA);
 3389   match(vReg);
 3390   op_cost(0);
 3391   format %{ %}
 3392   interface(REG_INTER);
 3393 %}
 3394 
 3395 operand vReg_V8()
 3396 %{
 3397   constraint(ALLOC_IN_RC(v8_reg));
 3398   match(VecA);
 3399   match(vReg);
 3400   op_cost(0);
 3401   format %{ %}
 3402   interface(REG_INTER);
 3403 %}
 3404 
 3405 operand vReg_V9()
 3406 %{
 3407   constraint(ALLOC_IN_RC(v9_reg));
 3408   match(VecA);
 3409   match(vReg);
 3410   op_cost(0);
 3411   format %{ %}
 3412   interface(REG_INTER);
 3413 %}
 3414 
 3415 operand vReg_V10()
 3416 %{
 3417   constraint(ALLOC_IN_RC(v10_reg));
 3418   match(VecA);
 3419   match(vReg);
 3420   op_cost(0);
 3421   format %{ %}
 3422   interface(REG_INTER);
 3423 %}
 3424 
 3425 operand vReg_V11()
 3426 %{
 3427   constraint(ALLOC_IN_RC(v11_reg));
 3428   match(VecA);
 3429   match(vReg);
 3430   op_cost(0);
 3431   format %{ %}
 3432   interface(REG_INTER);
 3433 %}
 3434 
 3435 operand vRegMask()
 3436 %{
 3437   constraint(ALLOC_IN_RC(vmask_reg));
 3438   match(RegVectMask);
 3439   match(vRegMask_V0);
 3440   op_cost(0);
 3441   format %{ %}
 3442   interface(REG_INTER);
 3443 %}
 3444 
 3445 // The mask value used to control execution of a masked
 3446 // vector instruction is always supplied by vector register v0.
 3447 operand vRegMask_V0()
 3448 %{
 3449   constraint(ALLOC_IN_RC(vmask_reg_v0));
 3450   match(RegVectMask);
 3451   match(vRegMask);
 3452   op_cost(0);
 3453   format %{ %}
 3454   interface(REG_INTER);
 3455 %}
 3456 
 3457 // Java Thread Register
 3458 operand javaThread_RegP(iRegP reg)
 3459 %{
 3460   constraint(ALLOC_IN_RC(java_thread_reg)); // java_thread_reg
 3461   match(reg);
 3462   op_cost(0);
 3463   format %{ %}
 3464   interface(REG_INTER);
 3465 %}
 3466 
 3467 //----------Memory Operands----------------------------------------------------
 3468 // RISCV has only base_plus_offset and literal address mode, so no need to use
 3469 // index and scale. Here set index as 0xffffffff and scale as 0x0.
 3470 operand indirect(iRegP reg)
 3471 %{
 3472   constraint(ALLOC_IN_RC(ptr_reg));
 3473   match(reg);
 3474   op_cost(0);
 3475   format %{ "[$reg]" %}
 3476   interface(MEMORY_INTER) %{
 3477     base($reg);
 3478     index(0xffffffff);
 3479     scale(0x0);
 3480     disp(0x0);
 3481   %}
 3482 %}
 3483 
 3484 operand indOffI(iRegP reg, immIOffset off)
 3485 %{
 3486   constraint(ALLOC_IN_RC(ptr_reg));
 3487   match(AddP reg off);
 3488   op_cost(0);
 3489   format %{ "[$reg, $off]" %}
 3490   interface(MEMORY_INTER) %{
 3491     base($reg);
 3492     index(0xffffffff);
 3493     scale(0x0);
 3494     disp($off);
 3495   %}
 3496 %}
 3497 
 3498 operand indOffL(iRegP reg, immLOffset off)
 3499 %{
 3500   constraint(ALLOC_IN_RC(ptr_reg));
 3501   match(AddP reg off);
 3502   op_cost(0);
 3503   format %{ "[$reg, $off]" %}
 3504   interface(MEMORY_INTER) %{
 3505     base($reg);
 3506     index(0xffffffff);
 3507     scale(0x0);
 3508     disp($off);
 3509   %}
 3510 %}
 3511 
 3512 operand indirectN(iRegN reg)
 3513 %{
 3514   predicate(CompressedOops::shift() == 0);
 3515   constraint(ALLOC_IN_RC(ptr_reg));
 3516   match(DecodeN reg);
 3517   op_cost(0);
 3518   format %{ "[$reg]\t# narrow" %}
 3519   interface(MEMORY_INTER) %{
 3520     base($reg);
 3521     index(0xffffffff);
 3522     scale(0x0);
 3523     disp(0x0);
 3524   %}
 3525 %}
 3526 
 3527 operand indOffIN(iRegN reg, immIOffset off)
 3528 %{
 3529   predicate(CompressedOops::shift() == 0);
 3530   constraint(ALLOC_IN_RC(ptr_reg));
 3531   match(AddP (DecodeN reg) off);
 3532   op_cost(0);
 3533   format %{ "[$reg, $off]\t# narrow" %}
 3534   interface(MEMORY_INTER) %{
 3535     base($reg);
 3536     index(0xffffffff);
 3537     scale(0x0);
 3538     disp($off);
 3539   %}
 3540 %}
 3541 
 3542 operand indOffLN(iRegN reg, immLOffset off)
 3543 %{
 3544   predicate(CompressedOops::shift() == 0);
 3545   constraint(ALLOC_IN_RC(ptr_reg));
 3546   match(AddP (DecodeN reg) off);
 3547   op_cost(0);
 3548   format %{ "[$reg, $off]\t# narrow" %}
 3549   interface(MEMORY_INTER) %{
 3550     base($reg);
 3551     index(0xffffffff);
 3552     scale(0x0);
 3553     disp($off);
 3554   %}
 3555 %}
 3556 
 3557 //----------Special Memory Operands--------------------------------------------
 3558 // Stack Slot Operand - This operand is used for loading and storing temporary
 3559 //                      values on the stack where a match requires a value to
 3560 //                      flow through memory.
 3561 operand stackSlotI(sRegI reg)
 3562 %{
 3563   constraint(ALLOC_IN_RC(stack_slots));
 3564   // No match rule because this operand is only generated in matching
 3565   // match(RegI);
 3566   format %{ "[$reg]" %}
 3567   interface(MEMORY_INTER) %{
 3568     base(0x02);  // RSP
 3569     index(0xffffffff);  // No Index
 3570     scale(0x0);  // No Scale
 3571     disp($reg);  // Stack Offset
 3572   %}
 3573 %}
 3574 
 3575 operand stackSlotF(sRegF reg)
 3576 %{
 3577   constraint(ALLOC_IN_RC(stack_slots));
 3578   // No match rule because this operand is only generated in matching
 3579   // match(RegF);
 3580   format %{ "[$reg]" %}
 3581   interface(MEMORY_INTER) %{
 3582     base(0x02);  // RSP
 3583     index(0xffffffff);  // No Index
 3584     scale(0x0);  // No Scale
 3585     disp($reg);  // Stack Offset
 3586   %}
 3587 %}
 3588 
 3589 operand stackSlotD(sRegD reg)
 3590 %{
 3591   constraint(ALLOC_IN_RC(stack_slots));
 3592   // No match rule because this operand is only generated in matching
 3593   // match(RegD);
 3594   format %{ "[$reg]" %}
 3595   interface(MEMORY_INTER) %{
 3596     base(0x02);  // RSP
 3597     index(0xffffffff);  // No Index
 3598     scale(0x0);  // No Scale
 3599     disp($reg);  // Stack Offset
 3600   %}
 3601 %}
 3602 
 3603 operand stackSlotL(sRegL reg)
 3604 %{
 3605   constraint(ALLOC_IN_RC(stack_slots));
 3606   // No match rule because this operand is only generated in matching
 3607   // match(RegL);
 3608   format %{ "[$reg]" %}
 3609   interface(MEMORY_INTER) %{
 3610     base(0x02);  // RSP
 3611     index(0xffffffff);  // No Index
 3612     scale(0x0);  // No Scale
 3613     disp($reg);  // Stack Offset
 3614   %}
 3615 %}
 3616 
 3617 // Special operand allowing long args to int ops to be truncated for free
 3618 
 3619 operand iRegL2I(iRegL reg) %{
 3620 
 3621   op_cost(0);
 3622 
 3623   match(ConvL2I reg);
 3624 
 3625   format %{ "l2i($reg)" %}
 3626 
 3627   interface(REG_INTER)
 3628 %}
 3629 
 3630 
 3631 // Comparison Operands
 3632 // NOTE: Label is a predefined operand which should not be redefined in
 3633 //       the AD file. It is generically handled within the ADLC.
 3634 
 3635 //----------Conditional Branch Operands----------------------------------------
 3636 // Comparison Op  - This is the operation of the comparison, and is limited to
 3637 //                  the following set of codes:
 3638 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 3639 //
 3640 // Other attributes of the comparison, such as unsignedness, are specified
 3641 // by the comparison instruction that sets a condition code flags register.
 3642 // That result is represented by a flags operand whose subtype is appropriate
 3643 // to the unsignedness (etc.) of the comparison.
 3644 //
 3645 // Later, the instruction which matches both the Comparison Op (a Bool) and
 3646 // the flags (produced by the Cmp) specifies the coding of the comparison op
 3647 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 3648 
 3649 
 3650 // used for signed integral comparisons and fp comparisons
 3651 operand cmpOp()
 3652 %{
 3653   match(Bool);
 3654 
 3655   format %{ "" %}
 3656 
 3657   // the values in interface derives from struct BoolTest::mask
 3658   interface(COND_INTER) %{
 3659     equal(0x0, "eq");
 3660     greater(0x1, "gt");
 3661     overflow(0x2, "overflow");
 3662     less(0x3, "lt");
 3663     not_equal(0x4, "ne");
 3664     less_equal(0x5, "le");
 3665     no_overflow(0x6, "no_overflow");
 3666     greater_equal(0x7, "ge");
 3667   %}
 3668 %}
 3669 
 3670 // used for unsigned integral comparisons
 3671 operand cmpOpU()
 3672 %{
 3673   match(Bool);
 3674 
 3675   format %{ "" %}
 3676   // the values in interface derives from struct BoolTest::mask
 3677   interface(COND_INTER) %{
 3678     equal(0x0, "eq");
 3679     greater(0x1, "gtu");
 3680     overflow(0x2, "overflow");
 3681     less(0x3, "ltu");
 3682     not_equal(0x4, "ne");
 3683     less_equal(0x5, "leu");
 3684     no_overflow(0x6, "no_overflow");
 3685     greater_equal(0x7, "geu");
 3686   %}
 3687 %}
 3688 
 3689 // used for certain integral comparisons which can be
 3690 // converted to bxx instructions
 3691 operand cmpOpEqNe()
 3692 %{
 3693   match(Bool);
 3694   op_cost(0);
 3695   predicate(n->as_Bool()->_test._test == BoolTest::ne ||
 3696             n->as_Bool()->_test._test == BoolTest::eq);
 3697 
 3698   format %{ "" %}
 3699   interface(COND_INTER) %{
 3700     equal(0x0, "eq");
 3701     greater(0x1, "gt");
 3702     overflow(0x2, "overflow");
 3703     less(0x3, "lt");
 3704     not_equal(0x4, "ne");
 3705     less_equal(0x5, "le");
 3706     no_overflow(0x6, "no_overflow");
 3707     greater_equal(0x7, "ge");
 3708   %}
 3709 %}
 3710 
 3711 operand cmpOpULtGe()
 3712 %{
 3713   match(Bool);
 3714   op_cost(0);
 3715   predicate(n->as_Bool()->_test._test == BoolTest::lt ||
 3716             n->as_Bool()->_test._test == BoolTest::ge);
 3717 
 3718   format %{ "" %}
 3719   interface(COND_INTER) %{
 3720     equal(0x0, "eq");
 3721     greater(0x1, "gtu");
 3722     overflow(0x2, "overflow");
 3723     less(0x3, "ltu");
 3724     not_equal(0x4, "ne");
 3725     less_equal(0x5, "leu");
 3726     no_overflow(0x6, "no_overflow");
 3727     greater_equal(0x7, "geu");
 3728   %}
 3729 %}
 3730 
 3731 operand cmpOpUEqNeLeGt()
 3732 %{
 3733   match(Bool);
 3734   op_cost(0);
 3735   predicate(n->as_Bool()->_test._test == BoolTest::ne ||
 3736             n->as_Bool()->_test._test == BoolTest::eq ||
 3737             n->as_Bool()->_test._test == BoolTest::le ||
 3738             n->as_Bool()->_test._test == BoolTest::gt);
 3739 
 3740   format %{ "" %}
 3741   interface(COND_INTER) %{
 3742     equal(0x0, "eq");
 3743     greater(0x1, "gtu");
 3744     overflow(0x2, "overflow");
 3745     less(0x3, "ltu");
 3746     not_equal(0x4, "ne");
 3747     less_equal(0x5, "leu");
 3748     no_overflow(0x6, "no_overflow");
 3749     greater_equal(0x7, "geu");
 3750   %}
 3751 %}
 3752 
 3753 
 3754 // Flags register, used as output of compare logic
 3755 operand rFlagsReg()
 3756 %{
 3757   constraint(ALLOC_IN_RC(reg_flags));
 3758   match(RegFlags);
 3759 
 3760   op_cost(0);
 3761   format %{ "RFLAGS" %}
 3762   interface(REG_INTER);
 3763 %}
 3764 
 3765 // Special Registers
 3766 
 3767 // Method Register
 3768 operand inline_cache_RegP(iRegP reg)
 3769 %{
 3770   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 3771   match(reg);
 3772   match(iRegPNoSp);
 3773   op_cost(0);
 3774   format %{ %}
 3775   interface(REG_INTER);
 3776 %}
 3777 
 3778 //----------OPERAND CLASSES----------------------------------------------------
 3779 // Operand Classes are groups of operands that are used as to simplify
 3780 // instruction definitions by not requiring the AD writer to specify
 3781 // separate instructions for every form of operand when the
 3782 // instruction accepts multiple operand types with the same basic
 3783 // encoding and format. The classic case of this is memory operands.
 3784 
 3785 // memory is used to define read/write location for load/store
 3786 // instruction defs. we can turn a memory op into an Address
 3787 
 3788 opclass memory(indirect, indOffI, indOffL, indirectN, indOffIN, indOffLN);
 3789 
 3790 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 3791 // operations. it allows the src to be either an iRegI or a (ConvL2I
 3792 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 3793 // can be elided because the 32-bit instruction will just employ the
 3794 // lower 32 bits anyway.
 3795 //
 3796 // n.b. this does not elide all L2I conversions. if the truncated
 3797 // value is consumed by more than one operation then the ConvL2I
 3798 // cannot be bundled into the consuming nodes so an l2i gets planted
 3799 // (actually an addiw $dst, $src, 0) and the downstream instructions
 3800 // consume the result of the L2I as an iRegI input. That's a shame since
 3801 // the addiw is actually redundant but its not too costly.
 3802 
 3803 opclass iRegIorL2I(iRegI, iRegL2I);
 3804 opclass iRegIorL(iRegI, iRegL);
 3805 opclass iRegNorP(iRegN, iRegP);
 3806 opclass iRegILNP(iRegI, iRegL, iRegN, iRegP);
 3807 opclass iRegILNPNoSp(iRegINoSp, iRegLNoSp, iRegNNoSp, iRegPNoSp);
 3808 opclass immIorL(immI, immL);
 3809 
 3810 //----------PIPELINE-----------------------------------------------------------
 3811 // Rules which define the behavior of the target architectures pipeline.
 3812 
 3813 // For specific pipelines, e.g. generic RISC-V, define the stages of that pipeline
 3814 //pipe_desc(ID, EX, MEM, WR);
 3815 #define ID   S0
 3816 #define EX   S1
 3817 #define MEM  S2
 3818 #define WR   S3
 3819 
 3820 // Integer ALU reg operation
 3821 pipeline %{
 3822 
 3823 attributes %{
 3824   // RISC-V instructions are of length 2 or 4 bytes.
 3825   variable_size_instructions;
 3826   instruction_unit_size = 2;
 3827 
 3828   // Up to 4 instructions per bundle
 3829   max_instructions_per_bundle = 4;
 3830 
 3831   // The RISC-V processor fetches 64 bytes...
 3832   instruction_fetch_unit_size = 64;
 3833 
 3834   // ...in one line.
 3835   instruction_fetch_units = 1;
 3836 %}
 3837 
 3838 // We don't use an actual pipeline model so don't care about resources
 3839 // or description. we do use pipeline classes to introduce fixed
 3840 // latencies
 3841 
 3842 //----------RESOURCES----------------------------------------------------------
 3843 // Resources are the functional units available to the machine
 3844 
 3845 // Generic RISC-V pipeline
 3846 // 1 decoder
 3847 // 1 instruction decoded per cycle
 3848 // 1 load/store ops per cycle, 1 branch, 1 FPU
 3849 // 1 mul, 1 div
 3850 
 3851 resources ( DECODE,
 3852             ALU,
 3853             MUL,
 3854             DIV,
 3855             BRANCH,
 3856             LDST,
 3857             FPU);
 3858 
 3859 //----------PIPELINE DESCRIPTION-----------------------------------------------
 3860 // Pipeline Description specifies the stages in the machine's pipeline
 3861 
 3862 // Define the pipeline as a generic 6 stage pipeline
 3863 pipe_desc(S0, S1, S2, S3, S4, S5);
 3864 
 3865 //----------PIPELINE CLASSES---------------------------------------------------
 3866 // Pipeline Classes describe the stages in which input and output are
 3867 // referenced by the hardware pipeline.
 3868 
 3869 pipe_class fp_dop_reg_reg_s(fRegF dst, fRegF src1, fRegF src2)
 3870 %{
 3871   single_instruction;
 3872   src1   : S1(read);
 3873   src2   : S2(read);
 3874   dst    : S5(write);
 3875   DECODE : ID;
 3876   FPU    : S5;
 3877 %}
 3878 
 3879 pipe_class fp_dop_reg_reg_d(fRegD dst, fRegD src1, fRegD src2)
 3880 %{
 3881   src1   : S1(read);
 3882   src2   : S2(read);
 3883   dst    : S5(write);
 3884   DECODE : ID;
 3885   FPU    : S5;
 3886 %}
 3887 
 3888 pipe_class fp_uop_s(fRegF dst, fRegF src)
 3889 %{
 3890   single_instruction;
 3891   src    : S1(read);
 3892   dst    : S5(write);
 3893   DECODE : ID;
 3894   FPU    : S5;
 3895 %}
 3896 
 3897 pipe_class fp_uop_d(fRegD dst, fRegD src)
 3898 %{
 3899   single_instruction;
 3900   src    : S1(read);
 3901   dst    : S5(write);
 3902   DECODE : ID;
 3903   FPU    : S5;
 3904 %}
 3905 
 3906 pipe_class fp_d2f(fRegF dst, fRegD src)
 3907 %{
 3908   single_instruction;
 3909   src    : S1(read);
 3910   dst    : S5(write);
 3911   DECODE : ID;
 3912   FPU    : S5;
 3913 %}
 3914 
 3915 pipe_class fp_f2d(fRegD dst, fRegF src)
 3916 %{
 3917   single_instruction;
 3918   src    : S1(read);
 3919   dst    : S5(write);
 3920   DECODE : ID;
 3921   FPU    : S5;
 3922 %}
 3923 
 3924 pipe_class fp_f2i(iRegINoSp dst, fRegF src)
 3925 %{
 3926   single_instruction;
 3927   src    : S1(read);
 3928   dst    : S5(write);
 3929   DECODE : ID;
 3930   FPU    : S5;
 3931 %}
 3932 
 3933 pipe_class fp_f2l(iRegLNoSp dst, fRegF src)
 3934 %{
 3935   single_instruction;
 3936   src    : S1(read);
 3937   dst    : S5(write);
 3938   DECODE : ID;
 3939   FPU    : S5;
 3940 %}
 3941 
 3942 pipe_class fp_i2f(fRegF dst, iRegIorL2I src)
 3943 %{
 3944   single_instruction;
 3945   src    : S1(read);
 3946   dst    : S5(write);
 3947   DECODE : ID;
 3948   FPU    : S5;
 3949 %}
 3950 
 3951 pipe_class fp_l2f(fRegF dst, iRegL src)
 3952 %{
 3953   single_instruction;
 3954   src    : S1(read);
 3955   dst    : S5(write);
 3956   DECODE : ID;
 3957   FPU    : S5;
 3958 %}
 3959 
 3960 pipe_class fp_d2i(iRegINoSp dst, fRegD src)
 3961 %{
 3962   single_instruction;
 3963   src    : S1(read);
 3964   dst    : S5(write);
 3965   DECODE : ID;
 3966   FPU    : S5;
 3967 %}
 3968 
 3969 pipe_class fp_d2l(iRegLNoSp dst, fRegD src)
 3970 %{
 3971   single_instruction;
 3972   src    : S1(read);
 3973   dst    : S5(write);
 3974   DECODE : ID;
 3975   FPU    : S5;
 3976 %}
 3977 
 3978 pipe_class fp_i2d(fRegD dst, iRegIorL2I src)
 3979 %{
 3980   single_instruction;
 3981   src    : S1(read);
 3982   dst    : S5(write);
 3983   DECODE : ID;
 3984   FPU    : S5;
 3985 %}
 3986 
 3987 pipe_class fp_l2d(fRegD dst, iRegIorL2I src)
 3988 %{
 3989   single_instruction;
 3990   src    : S1(read);
 3991   dst    : S5(write);
 3992   DECODE : ID;
 3993   FPU    : S5;
 3994 %}
 3995 
 3996 pipe_class fp_div_s(fRegF dst, fRegF src1, fRegF src2)
 3997 %{
 3998   single_instruction;
 3999   src1   : S1(read);
 4000   src2   : S2(read);
 4001   dst    : S5(write);
 4002   DECODE : ID;
 4003   FPU    : S5;
 4004 %}
 4005 
 4006 pipe_class fp_div_d(fRegD dst, fRegD src1, fRegD src2)
 4007 %{
 4008   single_instruction;
 4009   src1   : S1(read);
 4010   src2   : S2(read);
 4011   dst    : S5(write);
 4012   DECODE : ID;
 4013   FPU    : S5;
 4014 %}
 4015 
 4016 pipe_class fp_sqrt_s(fRegF dst, fRegF src)
 4017 %{
 4018   single_instruction;
 4019   src    : S1(read);
 4020   dst    : S5(write);
 4021   DECODE : ID;
 4022   FPU    : S5;
 4023 %}
 4024 
 4025 pipe_class fp_sqrt_d(fRegD dst, fRegD src)
 4026 %{
 4027   single_instruction;
 4028   src    : S1(read);
 4029   dst    : S5(write);
 4030   DECODE : ID;
 4031   FPU    : S5;
 4032 %}
 4033 
 4034 pipe_class fp_load_constant_s(fRegF dst)
 4035 %{
 4036   single_instruction;
 4037   dst    : S5(write);
 4038   DECODE : ID;
 4039   FPU    : S5;
 4040 %}
 4041 
 4042 pipe_class fp_load_constant_d(fRegD dst)
 4043 %{
 4044   single_instruction;
 4045   dst    : S5(write);
 4046   DECODE : ID;
 4047   FPU    : S5;
 4048 %}
 4049 
 4050 pipe_class fp_load_mem_s(fRegF dst, memory mem)
 4051 %{
 4052   single_instruction;
 4053   mem    : S1(read);
 4054   dst    : S5(write);
 4055   DECODE : ID;
 4056   LDST   : MEM;
 4057 %}
 4058 
 4059 pipe_class fp_load_mem_d(fRegD dst, memory mem)
 4060 %{
 4061   single_instruction;
 4062   mem    : S1(read);
 4063   dst    : S5(write);
 4064   DECODE : ID;
 4065   LDST   : MEM;
 4066 %}
 4067 
 4068 pipe_class fp_store_reg_s(fRegF src, memory mem)
 4069 %{
 4070   single_instruction;
 4071   src    : S1(read);
 4072   mem    : S5(write);
 4073   DECODE : ID;
 4074   LDST   : MEM;
 4075 %}
 4076 
 4077 pipe_class fp_store_reg_d(fRegD src, memory mem)
 4078 %{
 4079   single_instruction;
 4080   src    : S1(read);
 4081   mem    : S5(write);
 4082   DECODE : ID;
 4083   LDST   : MEM;
 4084 %}
 4085 
 4086 //------- Integer ALU operations --------------------------
 4087 
 4088 // Integer ALU reg-reg operation
 4089 // Operands needs in ID, result generated in EX
 4090 // E.g.  ADD   Rd, Rs1, Rs2
 4091 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4092 %{
 4093   single_instruction;
 4094   dst    : EX(write);
 4095   src1   : ID(read);
 4096   src2   : ID(read);
 4097   DECODE : ID;
 4098   ALU    : EX;
 4099 %}
 4100 
 4101 // Integer ALU reg operation with constant shift
 4102 // E.g. SLLI    Rd, Rs1, #shift
 4103 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 4104 %{
 4105   single_instruction;
 4106   dst    : EX(write);
 4107   src1   : ID(read);
 4108   DECODE : ID;
 4109   ALU    : EX;
 4110 %}
 4111 
 4112 // Integer ALU reg-reg operation with variable shift
 4113 // both operands must be available in ID
 4114 // E.g. SLL   Rd, Rs1, Rs2
 4115 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 4116 %{
 4117   single_instruction;
 4118   dst    : EX(write);
 4119   src1   : ID(read);
 4120   src2   : ID(read);
 4121   DECODE : ID;
 4122   ALU    : EX;
 4123 %}
 4124 
 4125 // Integer ALU reg operation
 4126 // E.g. NEG   Rd, Rs2
 4127 pipe_class ialu_reg(iRegI dst, iRegI src)
 4128 %{
 4129   single_instruction;
 4130   dst    : EX(write);
 4131   src    : ID(read);
 4132   DECODE : ID;
 4133   ALU    : EX;
 4134 %}
 4135 
 4136 // Integer ALU reg immediate operation
 4137 // E.g. ADDI   Rd, Rs1, #imm
 4138 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 4139 %{
 4140   single_instruction;
 4141   dst    : EX(write);
 4142   src1   : ID(read);
 4143   DECODE : ID;
 4144   ALU    : EX;
 4145 %}
 4146 
 4147 // Integer ALU immediate operation (no source operands)
 4148 // E.g. LI    Rd, #imm
 4149 pipe_class ialu_imm(iRegI dst)
 4150 %{
 4151   single_instruction;
 4152   dst    : EX(write);
 4153   DECODE : ID;
 4154   ALU    : EX;
 4155 %}
 4156 
 4157 //------- Multiply pipeline operations --------------------
 4158 
 4159 // Multiply reg-reg
 4160 // E.g. MULW   Rd, Rs1, Rs2
 4161 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4162 %{
 4163   single_instruction;
 4164   dst    : WR(write);
 4165   src1   : ID(read);
 4166   src2   : ID(read);
 4167   DECODE : ID;
 4168   MUL    : WR;
 4169 %}
 4170 
 4171 // E.g. MUL   RD, Rs1, Rs2
 4172 pipe_class lmul_reg_reg(iRegL dst, iRegL src1, iRegL src2)
 4173 %{
 4174   single_instruction;
 4175   fixed_latency(3); // Maximum latency for 64 bit mul
 4176   dst    : WR(write);
 4177   src1   : ID(read);
 4178   src2   : ID(read);
 4179   DECODE : ID;
 4180   MUL    : WR;
 4181 %}
 4182 
 4183 //------- Divide pipeline operations --------------------
 4184 
 4185 // E.g. DIVW   Rd, Rs1, Rs2
 4186 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4187 %{
 4188   single_instruction;
 4189   fixed_latency(8); // Maximum latency for 32 bit divide
 4190   dst    : WR(write);
 4191   src1   : ID(read);
 4192   src2   : ID(read);
 4193   DECODE : ID;
 4194   DIV    : WR;
 4195 %}
 4196 
 4197 // E.g. DIV   RD, Rs1, Rs2
 4198 pipe_class ldiv_reg_reg(iRegL dst, iRegL src1, iRegL src2)
 4199 %{
 4200   single_instruction;
 4201   fixed_latency(16); // Maximum latency for 64 bit divide
 4202   dst    : WR(write);
 4203   src1   : ID(read);
 4204   src2   : ID(read);
 4205   DECODE : ID;
 4206   DIV    : WR;
 4207 %}
 4208 
 4209 //------- Load pipeline operations ------------------------
 4210 
 4211 // Load - prefetch
 4212 // Eg.  PREFETCH_W  mem
 4213 pipe_class iload_prefetch(memory mem)
 4214 %{
 4215   single_instruction;
 4216   mem    : ID(read);
 4217   DECODE : ID;
 4218   LDST   : MEM;
 4219 %}
 4220 
 4221 // Load - reg, mem
 4222 // E.g. LA    Rd, mem
 4223 pipe_class iload_reg_mem(iRegI dst, memory mem)
 4224 %{
 4225   single_instruction;
 4226   dst    : WR(write);
 4227   mem    : ID(read);
 4228   DECODE : ID;
 4229   LDST   : MEM;
 4230 %}
 4231 
 4232 // Load - reg, reg
 4233 // E.g. LD    Rd, Rs
 4234 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 4235 %{
 4236   single_instruction;
 4237   dst    : WR(write);
 4238   src    : ID(read);
 4239   DECODE : ID;
 4240   LDST   : MEM;
 4241 %}
 4242 
 4243 //------- Store pipeline operations -----------------------
 4244 
 4245 // Store - zr, mem
 4246 // E.g. SD    zr, mem
 4247 pipe_class istore_mem(memory mem)
 4248 %{
 4249   single_instruction;
 4250   mem    : ID(read);
 4251   DECODE : ID;
 4252   LDST   : MEM;
 4253 %}
 4254 
 4255 // Store - reg, mem
 4256 // E.g. SD    Rs, mem
 4257 pipe_class istore_reg_mem(iRegI src, memory mem)
 4258 %{
 4259   single_instruction;
 4260   mem    : ID(read);
 4261   src    : EX(read);
 4262   DECODE : ID;
 4263   LDST   : MEM;
 4264 %}
 4265 
 4266 // Store - reg, reg
 4267 // E.g. SD    Rs2, Rs1
 4268 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 4269 %{
 4270   single_instruction;
 4271   dst    : ID(read);
 4272   src    : EX(read);
 4273   DECODE : ID;
 4274   LDST   : MEM;
 4275 %}
 4276 
 4277 //------- Control transfer pipeline operations ------------
 4278 
 4279 // Branch
 4280 pipe_class pipe_branch()
 4281 %{
 4282   single_instruction;
 4283   DECODE : ID;
 4284   BRANCH : EX;
 4285 %}
 4286 
 4287 // Branch
 4288 pipe_class pipe_branch_reg(iRegI src)
 4289 %{
 4290   single_instruction;
 4291   src    : ID(read);
 4292   DECODE : ID;
 4293   BRANCH : EX;
 4294 %}
 4295 
 4296 // Compare & Branch
 4297 // E.g. BEQ   Rs1, Rs2, L
 4298 pipe_class pipe_cmp_branch(iRegI src1, iRegI src2)
 4299 %{
 4300   single_instruction;
 4301   src1   : ID(read);
 4302   src2   : ID(read);
 4303   DECODE : ID;
 4304   BRANCH : EX;
 4305 %}
 4306 
 4307 // E.g. BEQZ Rs, L
 4308 pipe_class pipe_cmpz_branch(iRegI src)
 4309 %{
 4310   single_instruction;
 4311   src    : ID(read);
 4312   DECODE : ID;
 4313   BRANCH : EX;
 4314 %}
 4315 
 4316 //------- Synchronisation operations ----------------------
 4317 // Any operation requiring serialization
 4318 // E.g. FENCE/Atomic Ops/Load Acquire/Store Release
 4319 pipe_class pipe_serial()
 4320 %{
 4321   single_instruction;
 4322   force_serialization;
 4323   fixed_latency(16);
 4324   DECODE : ID;
 4325   LDST   : MEM;
 4326 %}
 4327 
 4328 pipe_class pipe_slow()
 4329 %{
 4330   instruction_count(10);
 4331   multiple_bundles;
 4332   force_serialization;
 4333   fixed_latency(16);
 4334   DECODE : ID;
 4335   LDST   : MEM;
 4336 %}
 4337 
 4338 // The real do-nothing guy
 4339 pipe_class real_empty()
 4340 %{
 4341     instruction_count(0);
 4342 %}
 4343 
 4344 // Empty pipeline class
 4345 pipe_class pipe_class_empty()
 4346 %{
 4347   single_instruction;
 4348   fixed_latency(0);
 4349 %}
 4350 
 4351 // Default pipeline class.
 4352 pipe_class pipe_class_default()
 4353 %{
 4354   single_instruction;
 4355   fixed_latency(2);
 4356 %}
 4357 
 4358 // Pipeline class for compares.
 4359 pipe_class pipe_class_compare()
 4360 %{
 4361   single_instruction;
 4362   fixed_latency(16);
 4363 %}
 4364 
 4365 // Pipeline class for memory operations.
 4366 pipe_class pipe_class_memory()
 4367 %{
 4368   single_instruction;
 4369   fixed_latency(16);
 4370 %}
 4371 
 4372 // Pipeline class for call.
 4373 pipe_class pipe_class_call()
 4374 %{
 4375   single_instruction;
 4376   fixed_latency(100);
 4377 %}
 4378 
 4379 // Define the class for the Nop node.
 4380 define %{
 4381    MachNop = pipe_class_empty;
 4382 %}
 4383 %}
 4384 //----------INSTRUCTIONS-------------------------------------------------------
 4385 //
 4386 // match      -- States which machine-independent subtree may be replaced
 4387 //               by this instruction.
 4388 // ins_cost   -- The estimated cost of this instruction is used by instruction
 4389 //               selection to identify a minimum cost tree of machine
 4390 //               instructions that matches a tree of machine-independent
 4391 //               instructions.
 4392 // format     -- A string providing the disassembly for this instruction.
 4393 //               The value of an instruction's operand may be inserted
 4394 //               by referring to it with a '$' prefix.
 4395 // opcode     -- Three instruction opcodes may be provided.  These are referred
 4396 //               to within an encode class as $primary, $secondary, and $tertiary
 4397 //               rrspectively.  The primary opcode is commonly used to
 4398 //               indicate the type of machine instruction, while secondary
 4399 //               and tertiary are often used for prefix options or addressing
 4400 //               modes.
 4401 // ins_encode -- A list of encode classes with parameters. The encode class
 4402 //               name must have been defined in an 'enc_class' specification
 4403 //               in the encode section of the architecture description.
 4404 
 4405 // ============================================================================
 4406 // Memory (Load/Store) Instructions
 4407 
 4408 // Load Instructions
 4409 
 4410 // Load Byte (8 bit signed)
 4411 instruct loadB(iRegINoSp dst, memory mem)
 4412 %{
 4413   match(Set dst (LoadB mem));
 4414 
 4415   ins_cost(LOAD_COST);
 4416   format %{ "lb  $dst, $mem\t# byte, #@loadB" %}
 4417 
 4418   ins_encode %{
 4419     __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4420   %}
 4421 
 4422   ins_pipe(iload_reg_mem);
 4423 %}
 4424 
 4425 // Load Byte (8 bit signed) into long
 4426 instruct loadB2L(iRegLNoSp dst, memory mem)
 4427 %{
 4428   match(Set dst (ConvI2L (LoadB mem)));
 4429 
 4430   ins_cost(LOAD_COST);
 4431   format %{ "lb  $dst, $mem\t# byte, #@loadB2L" %}
 4432 
 4433   ins_encode %{
 4434     __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4435   %}
 4436 
 4437   ins_pipe(iload_reg_mem);
 4438 %}
 4439 
 4440 // Load Byte (8 bit unsigned)
 4441 instruct loadUB(iRegINoSp dst, memory mem)
 4442 %{
 4443   match(Set dst (LoadUB mem));
 4444 
 4445   ins_cost(LOAD_COST);
 4446   format %{ "lbu  $dst, $mem\t# byte, #@loadUB" %}
 4447 
 4448   ins_encode %{
 4449     __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4450   %}
 4451 
 4452   ins_pipe(iload_reg_mem);
 4453 %}
 4454 
 4455 // Load Byte (8 bit unsigned) into long
 4456 instruct loadUB2L(iRegLNoSp dst, memory mem)
 4457 %{
 4458   match(Set dst (ConvI2L (LoadUB mem)));
 4459 
 4460   ins_cost(LOAD_COST);
 4461   format %{ "lbu  $dst, $mem\t# byte, #@loadUB2L" %}
 4462 
 4463   ins_encode %{
 4464     __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4465   %}
 4466 
 4467   ins_pipe(iload_reg_mem);
 4468 %}
 4469 
 4470 // Load Short (16 bit signed)
 4471 instruct loadS(iRegINoSp dst, memory mem)
 4472 %{
 4473   match(Set dst (LoadS mem));
 4474 
 4475   ins_cost(LOAD_COST);
 4476   format %{ "lh  $dst, $mem\t# short, #@loadS" %}
 4477 
 4478   ins_encode %{
 4479     __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4480   %}
 4481 
 4482   ins_pipe(iload_reg_mem);
 4483 %}
 4484 
 4485 // Load Short (16 bit signed) into long
 4486 instruct loadS2L(iRegLNoSp dst, memory mem)
 4487 %{
 4488   match(Set dst (ConvI2L (LoadS mem)));
 4489 
 4490   ins_cost(LOAD_COST);
 4491   format %{ "lh  $dst, $mem\t# short, #@loadS2L" %}
 4492 
 4493   ins_encode %{
 4494     __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4495   %}
 4496 
 4497   ins_pipe(iload_reg_mem);
 4498 %}
 4499 
 4500 // Load Char (16 bit unsigned)
 4501 instruct loadUS(iRegINoSp dst, memory mem)
 4502 %{
 4503   match(Set dst (LoadUS mem));
 4504 
 4505   ins_cost(LOAD_COST);
 4506   format %{ "lhu  $dst, $mem\t# short, #@loadUS" %}
 4507 
 4508   ins_encode %{
 4509     __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4510   %}
 4511 
 4512   ins_pipe(iload_reg_mem);
 4513 %}
 4514 
 4515 // Load Short/Char (16 bit unsigned) into long
 4516 instruct loadUS2L(iRegLNoSp dst, memory mem)
 4517 %{
 4518   match(Set dst (ConvI2L (LoadUS mem)));
 4519 
 4520   ins_cost(LOAD_COST);
 4521   format %{ "lhu  $dst, $mem\t# short, #@loadUS2L" %}
 4522 
 4523   ins_encode %{
 4524     __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4525   %}
 4526 
 4527   ins_pipe(iload_reg_mem);
 4528 %}
 4529 
 4530 // Load Integer (32 bit signed)
 4531 instruct loadI(iRegINoSp dst, memory mem)
 4532 %{
 4533   match(Set dst (LoadI mem));
 4534 
 4535   ins_cost(LOAD_COST);
 4536   format %{ "lw  $dst, $mem\t# int, #@loadI" %}
 4537 
 4538   ins_encode %{
 4539     __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4540   %}
 4541 
 4542   ins_pipe(iload_reg_mem);
 4543 %}
 4544 
 4545 // Load Integer (32 bit signed) into long
 4546 instruct loadI2L(iRegLNoSp dst, memory mem)
 4547 %{
 4548   match(Set dst (ConvI2L (LoadI mem)));
 4549 
 4550   ins_cost(LOAD_COST);
 4551   format %{ "lw  $dst, $mem\t# int, #@loadI2L" %}
 4552 
 4553   ins_encode %{
 4554     __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4555   %}
 4556 
 4557   ins_pipe(iload_reg_mem);
 4558 %}
 4559 
 4560 // Load Integer (32 bit unsigned) into long
 4561 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
 4562 %{
 4563   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 4564 
 4565   ins_cost(LOAD_COST);
 4566   format %{ "lwu  $dst, $mem\t# int, #@loadUI2L" %}
 4567 
 4568   ins_encode %{
 4569     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4570   %}
 4571 
 4572   ins_pipe(iload_reg_mem);
 4573 %}
 4574 
 4575 // Load Long (64 bit signed)
 4576 instruct loadL(iRegLNoSp dst, memory mem)
 4577 %{
 4578   match(Set dst (LoadL mem));
 4579 
 4580   ins_cost(LOAD_COST);
 4581   format %{ "ld  $dst, $mem\t# int, #@loadL" %}
 4582 
 4583   ins_encode %{
 4584     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4585   %}
 4586 
 4587   ins_pipe(iload_reg_mem);
 4588 %}
 4589 
 4590 // Load Range
 4591 instruct loadRange(iRegINoSp dst, memory mem)
 4592 %{
 4593   match(Set dst (LoadRange mem));
 4594 
 4595   ins_cost(LOAD_COST);
 4596   format %{ "lwu  $dst, $mem\t# range, #@loadRange" %}
 4597 
 4598   ins_encode %{
 4599     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4600   %}
 4601 
 4602   ins_pipe(iload_reg_mem);
 4603 %}
 4604 
 4605 // Load Pointer
 4606 instruct loadP(iRegPNoSp dst, memory mem)
 4607 %{
 4608   match(Set dst (LoadP mem));
 4609   predicate(n->as_Load()->barrier_data() == 0);
 4610 
 4611   ins_cost(LOAD_COST);
 4612   format %{ "ld  $dst, $mem\t# ptr, #@loadP" %}
 4613 
 4614   ins_encode %{
 4615     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4616   %}
 4617 
 4618   ins_pipe(iload_reg_mem);
 4619 %}
 4620 
 4621 // Load Compressed Pointer
 4622 instruct loadN(iRegNNoSp dst, memory mem)
 4623 %{
 4624   predicate(n->as_Load()->barrier_data() == 0);
 4625   match(Set dst (LoadN mem));
 4626 
 4627   ins_cost(LOAD_COST);
 4628   format %{ "lwu  $dst, $mem\t# compressed ptr, #@loadN" %}
 4629 
 4630   ins_encode %{
 4631     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4632   %}
 4633 
 4634   ins_pipe(iload_reg_mem);
 4635 %}
 4636 
 4637 // Load Klass Pointer
 4638 instruct loadKlass(iRegPNoSp dst, memory mem)
 4639 %{
 4640   match(Set dst (LoadKlass mem));
 4641 
 4642   ins_cost(LOAD_COST);
 4643   format %{ "ld  $dst, $mem\t# class, #@loadKlass" %}
 4644 
 4645   ins_encode %{
 4646     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4647   %}
 4648 
 4649   ins_pipe(iload_reg_mem);
 4650 %}
 4651 
 4652 // Load Narrow Klass Pointer
 4653 instruct loadNKlass(iRegNNoSp dst, memory mem)
 4654 %{
 4655   predicate(!UseCompactObjectHeaders);
 4656   match(Set dst (LoadNKlass mem));
 4657 
 4658   ins_cost(LOAD_COST);
 4659   format %{ "lwu  $dst, $mem\t# compressed class ptr, #@loadNKlass" %}
 4660 
 4661   ins_encode %{
 4662     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4663   %}
 4664 
 4665   ins_pipe(iload_reg_mem);
 4666 %}
 4667 
 4668 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory mem)
 4669 %{
 4670   predicate(UseCompactObjectHeaders);
 4671   match(Set dst (LoadNKlass mem));
 4672 
 4673   ins_cost(LOAD_COST);
 4674   format %{
 4675     "lwu  $dst, $mem\t# compressed klass ptr, shifted\n\t"
 4676     "srli $dst, $dst, markWord::klass_shift_at_offset"
 4677   %}
 4678 
 4679   ins_encode %{
 4680     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4681     __ srli(as_Register($dst$$reg), as_Register($dst$$reg), (unsigned) markWord::klass_shift_at_offset);
 4682   %}
 4683 
 4684   ins_pipe(iload_reg_mem);
 4685 %}
 4686 
 4687 // Load Float
 4688 instruct loadF(fRegF dst, memory mem)
 4689 %{
 4690   match(Set dst (LoadF mem));
 4691 
 4692   ins_cost(LOAD_COST);
 4693   format %{ "flw  $dst, $mem\t# float, #@loadF" %}
 4694 
 4695   ins_encode %{
 4696     __ flw(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4697   %}
 4698 
 4699   ins_pipe(fp_load_mem_s);
 4700 %}
 4701 
 4702 // Load Double
 4703 instruct loadD(fRegD dst, memory mem)
 4704 %{
 4705   match(Set dst (LoadD mem));
 4706 
 4707   ins_cost(LOAD_COST);
 4708   format %{ "fld  $dst, $mem\t# double, #@loadD" %}
 4709 
 4710   ins_encode %{
 4711     __ fld(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4712   %}
 4713 
 4714   ins_pipe(fp_load_mem_d);
 4715 %}
 4716 
 4717 // Load Int Constant
 4718 instruct loadConI(iRegINoSp dst, immI src)
 4719 %{
 4720   match(Set dst src);
 4721 
 4722   ins_cost(ALU_COST);
 4723   format %{ "mv $dst, $src\t# int, #@loadConI" %}
 4724 
 4725   ins_encode(riscv_enc_mov_imm(dst, src));
 4726 
 4727   ins_pipe(ialu_imm);
 4728 %}
 4729 
 4730 // Load Long Constant
 4731 instruct loadConL(iRegLNoSp dst, immL src)
 4732 %{
 4733   match(Set dst src);
 4734 
 4735   ins_cost(ALU_COST);
 4736   format %{ "mv $dst, $src\t# long, #@loadConL" %}
 4737 
 4738   ins_encode(riscv_enc_mov_imm(dst, src));
 4739 
 4740   ins_pipe(ialu_imm);
 4741 %}
 4742 
 4743 // Load Pointer Constant
 4744 instruct loadConP(iRegPNoSp dst, immP con)
 4745 %{
 4746   match(Set dst con);
 4747 
 4748   ins_cost(ALU_COST);
 4749   format %{ "mv  $dst, $con\t# ptr, #@loadConP" %}
 4750 
 4751   ins_encode(riscv_enc_mov_p(dst, con));
 4752 
 4753   ins_pipe(ialu_imm);
 4754 %}
 4755 
 4756 // Load Null Pointer Constant
 4757 instruct loadConP0(iRegPNoSp dst, immP0 con)
 4758 %{
 4759   match(Set dst con);
 4760 
 4761   ins_cost(ALU_COST);
 4762   format %{ "mv  $dst, $con\t# null pointer, #@loadConP0" %}
 4763 
 4764   ins_encode(riscv_enc_mov_zero(dst));
 4765 
 4766   ins_pipe(ialu_imm);
 4767 %}
 4768 
 4769 // Load Pointer Constant One
 4770 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 4771 %{
 4772   match(Set dst con);
 4773 
 4774   ins_cost(ALU_COST);
 4775   format %{ "mv  $dst, $con\t# load ptr constant one, #@loadConP1" %}
 4776 
 4777   ins_encode(riscv_enc_mov_p1(dst));
 4778 
 4779   ins_pipe(ialu_imm);
 4780 %}
 4781 
 4782 // Load Narrow Pointer Constant
 4783 instruct loadConN(iRegNNoSp dst, immN con)
 4784 %{
 4785   match(Set dst con);
 4786 
 4787   ins_cost(ALU_COST * 4);
 4788   format %{ "mv  $dst, $con\t# compressed ptr, #@loadConN" %}
 4789 
 4790   ins_encode(riscv_enc_mov_n(dst, con));
 4791 
 4792   ins_pipe(ialu_imm);
 4793 %}
 4794 
 4795 // Load Narrow Null Pointer Constant
 4796 instruct loadConN0(iRegNNoSp dst, immN0 con)
 4797 %{
 4798   match(Set dst con);
 4799 
 4800   ins_cost(ALU_COST);
 4801   format %{ "mv  $dst, $con\t# compressed null pointer, #@loadConN0" %}
 4802 
 4803   ins_encode(riscv_enc_mov_zero(dst));
 4804 
 4805   ins_pipe(ialu_imm);
 4806 %}
 4807 
 4808 // Load Narrow Klass Constant
 4809 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 4810 %{
 4811   match(Set dst con);
 4812 
 4813   ins_cost(ALU_COST * 6);
 4814   format %{ "mv  $dst, $con\t# compressed klass ptr, #@loadConNKlass" %}
 4815 
 4816   ins_encode(riscv_enc_mov_nk(dst, con));
 4817 
 4818   ins_pipe(ialu_imm);
 4819 %}
 4820 
 4821 // Load Half Float Constant
 4822 instruct loadConH(fRegF dst, immH con) %{
 4823   match(Set dst con);
 4824 
 4825   ins_cost(LOAD_COST);
 4826   format %{
 4827     "flh $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConH"
 4828   %}
 4829 
 4830   ins_encode %{
 4831     assert(UseZfh || UseZfhmin, "must");
 4832     if (MacroAssembler::can_hf_imm_load($con$$constant)) {
 4833       __ fli_h(as_FloatRegister($dst$$reg), $con$$constant);
 4834     } else {
 4835       __ flh(as_FloatRegister($dst$$reg), $constantaddress($con));
 4836     }
 4837   %}
 4838 
 4839   ins_pipe(fp_load_constant_s);
 4840 %}
 4841 
 4842 instruct loadConH0(fRegF dst, immH0 con) %{
 4843   match(Set dst con);
 4844 
 4845   ins_cost(XFER_COST);
 4846 
 4847   format %{ "fmv.h.x $dst, zr\t# float, #@loadConH0" %}
 4848 
 4849   ins_encode %{
 4850     assert(UseZfh || UseZfhmin, "must");
 4851     __ fmv_h_x(as_FloatRegister($dst$$reg), zr);
 4852   %}
 4853 
 4854   ins_pipe(fp_load_constant_s);
 4855 %}
 4856 
 4857 // Load Float Constant
 4858 instruct loadConF(fRegF dst, immF con) %{
 4859   match(Set dst con);
 4860 
 4861   ins_cost(LOAD_COST);
 4862   format %{
 4863     "flw $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConF"
 4864   %}
 4865 
 4866   ins_encode %{
 4867     if (MacroAssembler::can_fp_imm_load($con$$constant)) {
 4868       __ fli_s(as_FloatRegister($dst$$reg), $con$$constant);
 4869     } else {
 4870       __ flw(as_FloatRegister($dst$$reg), $constantaddress($con));
 4871     }
 4872   %}
 4873 
 4874   ins_pipe(fp_load_constant_s);
 4875 %}
 4876 
 4877 instruct loadConF0(fRegF dst, immF0 con) %{
 4878   match(Set dst con);
 4879 
 4880   ins_cost(XFER_COST);
 4881 
 4882   format %{ "fmv.w.x $dst, zr\t# float, #@loadConF0" %}
 4883 
 4884   ins_encode %{
 4885     __ fmv_w_x(as_FloatRegister($dst$$reg), zr);
 4886   %}
 4887 
 4888   ins_pipe(fp_load_constant_s);
 4889 %}
 4890 
 4891 // Load Double Constant
 4892 instruct loadConD(fRegD dst, immD con) %{
 4893   match(Set dst con);
 4894 
 4895   ins_cost(LOAD_COST);
 4896   format %{
 4897     "fld $dst, [$constantaddress]\t# load from constant table: double=$con, #@loadConD"
 4898   %}
 4899 
 4900   ins_encode %{
 4901     if (MacroAssembler::can_dp_imm_load($con$$constant)) {
 4902       __ fli_d(as_FloatRegister($dst$$reg), $con$$constant);
 4903     } else {
 4904       __ fld(as_FloatRegister($dst$$reg), $constantaddress($con));
 4905     }
 4906   %}
 4907 
 4908   ins_pipe(fp_load_constant_d);
 4909 %}
 4910 
 4911 instruct loadConD0(fRegD dst, immD0 con) %{
 4912   match(Set dst con);
 4913 
 4914   ins_cost(XFER_COST);
 4915 
 4916   format %{ "fmv.d.x $dst, zr\t# double, #@loadConD0" %}
 4917 
 4918   ins_encode %{
 4919     __ fmv_d_x(as_FloatRegister($dst$$reg), zr);
 4920   %}
 4921 
 4922   ins_pipe(fp_load_constant_d);
 4923 %}
 4924 
 4925 // Store Byte
 4926 instruct storeB(iRegIorL2I src, memory mem)
 4927 %{
 4928   match(Set mem (StoreB mem src));
 4929 
 4930   ins_cost(STORE_COST);
 4931   format %{ "sb  $src, $mem\t# byte, #@storeB" %}
 4932 
 4933   ins_encode %{
 4934     __ sb(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4935   %}
 4936 
 4937   ins_pipe(istore_reg_mem);
 4938 %}
 4939 
 4940 instruct storeimmB0(immI0 zero, memory mem)
 4941 %{
 4942   match(Set mem (StoreB mem zero));
 4943 
 4944   ins_cost(STORE_COST);
 4945   format %{ "sb zr, $mem\t# byte, #@storeimmB0" %}
 4946 
 4947   ins_encode %{
 4948     __ sb(zr, Address(as_Register($mem$$base), $mem$$disp));
 4949   %}
 4950 
 4951   ins_pipe(istore_mem);
 4952 %}
 4953 
 4954 // Store Char/Short
 4955 instruct storeC(iRegIorL2I src, memory mem)
 4956 %{
 4957   match(Set mem (StoreC mem src));
 4958 
 4959   ins_cost(STORE_COST);
 4960   format %{ "sh  $src, $mem\t# short, #@storeC" %}
 4961 
 4962   ins_encode %{
 4963     __ sh(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4964   %}
 4965 
 4966   ins_pipe(istore_reg_mem);
 4967 %}
 4968 
 4969 instruct storeimmC0(immI0 zero, memory mem)
 4970 %{
 4971   match(Set mem (StoreC mem zero));
 4972 
 4973   ins_cost(STORE_COST);
 4974   format %{ "sh  zr, $mem\t# short, #@storeimmC0" %}
 4975 
 4976   ins_encode %{
 4977     __ sh(zr, Address(as_Register($mem$$base), $mem$$disp));
 4978   %}
 4979 
 4980   ins_pipe(istore_mem);
 4981 %}
 4982 
 4983 // Store Integer
 4984 instruct storeI(iRegIorL2I src, memory mem)
 4985 %{
 4986   match(Set mem(StoreI mem src));
 4987 
 4988   ins_cost(STORE_COST);
 4989   format %{ "sw  $src, $mem\t# int, #@storeI" %}
 4990 
 4991   ins_encode %{
 4992     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4993   %}
 4994 
 4995   ins_pipe(istore_reg_mem);
 4996 %}
 4997 
 4998 instruct storeimmI0(immI0 zero, memory mem)
 4999 %{
 5000   match(Set mem(StoreI mem zero));
 5001 
 5002   ins_cost(STORE_COST);
 5003   format %{ "sw  zr, $mem\t# int, #@storeimmI0" %}
 5004 
 5005   ins_encode %{
 5006     __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
 5007   %}
 5008 
 5009   ins_pipe(istore_mem);
 5010 %}
 5011 
 5012 // Store Long (64 bit signed)
 5013 instruct storeL(iRegL src, memory mem)
 5014 %{
 5015   match(Set mem (StoreL mem src));
 5016 
 5017   ins_cost(STORE_COST);
 5018   format %{ "sd  $src, $mem\t# long, #@storeL" %}
 5019 
 5020   ins_encode %{
 5021     __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5022   %}
 5023 
 5024   ins_pipe(istore_reg_mem);
 5025 %}
 5026 
 5027 // Store Long (64 bit signed)
 5028 instruct storeimmL0(immL0 zero, memory mem)
 5029 %{
 5030   match(Set mem (StoreL mem zero));
 5031 
 5032   ins_cost(STORE_COST);
 5033   format %{ "sd  zr, $mem\t# long, #@storeimmL0" %}
 5034 
 5035   ins_encode %{
 5036     __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
 5037   %}
 5038 
 5039   ins_pipe(istore_mem);
 5040 %}
 5041 
 5042 // Store Pointer
 5043 instruct storeP(iRegP src, memory mem)
 5044 %{
 5045   match(Set mem (StoreP mem src));
 5046   predicate(n->as_Store()->barrier_data() == 0);
 5047 
 5048   ins_cost(STORE_COST);
 5049   format %{ "sd  $src, $mem\t# ptr, #@storeP" %}
 5050 
 5051   ins_encode %{
 5052     __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5053   %}
 5054 
 5055   ins_pipe(istore_reg_mem);
 5056 %}
 5057 
 5058 // Store Pointer
 5059 instruct storeimmP0(immP0 zero, memory mem)
 5060 %{
 5061   match(Set mem (StoreP mem zero));
 5062   predicate(n->as_Store()->barrier_data() == 0);
 5063 
 5064   ins_cost(STORE_COST);
 5065   format %{ "sd zr, $mem\t# ptr, #@storeimmP0" %}
 5066 
 5067   ins_encode %{
 5068     __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
 5069   %}
 5070 
 5071   ins_pipe(istore_mem);
 5072 %}
 5073 
 5074 // Store Compressed Pointer
 5075 instruct storeN(iRegN src, memory mem)
 5076 %{
 5077   predicate(n->as_Store()->barrier_data() == 0);
 5078   match(Set mem (StoreN mem src));
 5079 
 5080   ins_cost(STORE_COST);
 5081   format %{ "sw  $src, $mem\t# compressed ptr, #@storeN" %}
 5082 
 5083   ins_encode %{
 5084     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5085   %}
 5086 
 5087   ins_pipe(istore_reg_mem);
 5088 %}
 5089 
 5090 instruct storeImmN0(immN0 zero, memory mem)
 5091 %{
 5092   predicate(n->as_Store()->barrier_data() == 0);
 5093   match(Set mem (StoreN mem zero));
 5094 
 5095   ins_cost(STORE_COST);
 5096   format %{ "sw  zr, $mem\t# compressed ptr, #@storeImmN0" %}
 5097 
 5098   ins_encode %{
 5099     __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
 5100   %}
 5101 
 5102   ins_pipe(istore_reg_mem);
 5103 %}
 5104 
 5105 // Store Float
 5106 instruct storeF(fRegF src, memory mem)
 5107 %{
 5108   match(Set mem (StoreF mem src));
 5109 
 5110   ins_cost(STORE_COST);
 5111   format %{ "fsw  $src, $mem\t# float, #@storeF" %}
 5112 
 5113   ins_encode %{
 5114     __ fsw(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5115   %}
 5116 
 5117   ins_pipe(fp_store_reg_s);
 5118 %}
 5119 
 5120 // Store Double
 5121 instruct storeD(fRegD src, memory mem)
 5122 %{
 5123   match(Set mem (StoreD mem src));
 5124 
 5125   ins_cost(STORE_COST);
 5126   format %{ "fsd  $src, $mem\t# double, #@storeD" %}
 5127 
 5128   ins_encode %{
 5129     __ fsd(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5130   %}
 5131 
 5132   ins_pipe(fp_store_reg_d);
 5133 %}
 5134 
 5135 // Store Compressed Klass Pointer
 5136 instruct storeNKlass(iRegN src, memory mem)
 5137 %{
 5138   match(Set mem (StoreNKlass mem src));
 5139 
 5140   ins_cost(STORE_COST);
 5141   format %{ "sw  $src, $mem\t# compressed klass ptr, #@storeNKlass" %}
 5142 
 5143   ins_encode %{
 5144     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5145   %}
 5146 
 5147   ins_pipe(istore_reg_mem);
 5148 %}
 5149 
 5150 // ============================================================================
 5151 // Prefetch instructions
 5152 // Must be safe to execute with invalid address (cannot fault).
 5153 
 5154 instruct prefetchalloc( memory mem ) %{
 5155   predicate(UseZicbop);
 5156   match(PrefetchAllocation mem);
 5157 
 5158   ins_cost(ALU_COST * 1);
 5159   format %{ "prefetch_w $mem\t# Prefetch for write" %}
 5160 
 5161   ins_encode %{
 5162     if (Assembler::is_simm12($mem$$disp)) {
 5163       if (($mem$$disp & 0x1f) == 0) {
 5164         __ prefetch_w(as_Register($mem$$base), $mem$$disp);
 5165       } else {
 5166         __ addi(t0, as_Register($mem$$base), $mem$$disp);
 5167         __ prefetch_w(t0, 0);
 5168       }
 5169     } else {
 5170       __ mv(t0, $mem$$disp);
 5171       __ add(t0, as_Register($mem$$base), t0);
 5172       __ prefetch_w(t0, 0);
 5173     }
 5174   %}
 5175 
 5176   ins_pipe(iload_prefetch);
 5177 %}
 5178 
 5179 // ============================================================================
 5180 // Atomic operation instructions
 5181 //
 5182 
 5183 // standard CompareAndSwapX when we are using barriers
 5184 // these have higher priority than the rules selected by a predicate
 5185 instruct compareAndSwapB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5186                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5187 %{
 5188   predicate(!UseZabha || !UseZacas);
 5189 
 5190   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 5191 
 5192   ins_cost(2 * VOLATILE_REF_COST);
 5193 
 5194   effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
 5195 
 5196   format %{
 5197     "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
 5198     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB_narrow"
 5199   %}
 5200 
 5201   ins_encode %{
 5202     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5203                             Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
 5204                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5205   %}
 5206 
 5207   ins_pipe(pipe_slow);
 5208 %}
 5209 
 5210 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5211 %{
 5212   predicate(UseZabha && UseZacas);
 5213 
 5214   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 5215 
 5216   ins_cost(2 * VOLATILE_REF_COST);
 5217 
 5218   format %{
 5219     "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
 5220     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB"
 5221   %}
 5222 
 5223   ins_encode %{
 5224     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5225                Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
 5226                true /* result as bool */);
 5227   %}
 5228 
 5229   ins_pipe(pipe_slow);
 5230 %}
 5231 
 5232 instruct compareAndSwapS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5233                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5234 %{
 5235   predicate(!UseZabha || !UseZacas);
 5236 
 5237   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 5238 
 5239   ins_cost(2 * VOLATILE_REF_COST);
 5240 
 5241   effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
 5242 
 5243   format %{
 5244     "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
 5245     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS_narrow"
 5246   %}
 5247 
 5248   ins_encode %{
 5249     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5250                             Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
 5251                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5252   %}
 5253 
 5254   ins_pipe(pipe_slow);
 5255 %}
 5256 
 5257 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5258 %{
 5259   predicate(UseZabha && UseZacas);
 5260 
 5261   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 5262 
 5263   ins_cost(2 * VOLATILE_REF_COST);
 5264 
 5265   format %{
 5266     "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
 5267     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS"
 5268   %}
 5269 
 5270   ins_encode %{
 5271     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5272                Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
 5273                true /* result as bool */);
 5274   %}
 5275 
 5276   ins_pipe(pipe_slow);
 5277 %}
 5278 
 5279 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5280 %{
 5281   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 5282 
 5283   ins_cost(2 * VOLATILE_REF_COST);
 5284 
 5285   format %{
 5286     "cmpxchg $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
 5287     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapI"
 5288   %}
 5289 
 5290   ins_encode %{
 5291     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5292                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5293                /*result as bool*/ true);
 5294   %}
 5295 
 5296   ins_pipe(pipe_slow);
 5297 %}
 5298 
 5299 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5300 %{
 5301   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 5302 
 5303   ins_cost(2 * VOLATILE_REF_COST);
 5304 
 5305   format %{
 5306     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
 5307     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapL"
 5308   %}
 5309 
 5310   ins_encode %{
 5311     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5312                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5313                /*result as bool*/ true);
 5314   %}
 5315 
 5316   ins_pipe(pipe_slow);
 5317 %}
 5318 
 5319 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5320 %{
 5321   predicate(n->as_LoadStore()->barrier_data() == 0);
 5322 
 5323   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 5324 
 5325   ins_cost(2 * VOLATILE_REF_COST);
 5326 
 5327   format %{
 5328     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
 5329     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapP"
 5330   %}
 5331 
 5332   ins_encode %{
 5333     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5334                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5335                /*result as bool*/ true);
 5336   %}
 5337 
 5338   ins_pipe(pipe_slow);
 5339 %}
 5340 
 5341 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5342 %{
 5343   predicate(n->as_LoadStore()->barrier_data() == 0);
 5344 
 5345   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 5346 
 5347   ins_cost(2 * VOLATILE_REF_COST);
 5348 
 5349   format %{
 5350     "cmpxchg $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
 5351     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapN"
 5352   %}
 5353 
 5354   ins_encode %{
 5355     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5356                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5357                /*result as bool*/ true);
 5358   %}
 5359 
 5360   ins_pipe(pipe_slow);
 5361 %}
 5362 
 5363 // alternative CompareAndSwapX when we are eliding barriers
 5364 instruct compareAndSwapBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5365                                    iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5366 %{
 5367   predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
 5368 
 5369   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 5370 
 5371   ins_cost(2 * VOLATILE_REF_COST);
 5372 
 5373   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5374 
 5375   format %{
 5376     "cmpxchg_acq $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
 5377     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq_narrow"
 5378   %}
 5379 
 5380   ins_encode %{
 5381     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5382                             Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
 5383                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5384   %}
 5385 
 5386   ins_pipe(pipe_slow);
 5387 %}
 5388 
 5389 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5390 %{
 5391   predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
 5392 
 5393   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 5394 
 5395   ins_cost(2 * VOLATILE_REF_COST);
 5396 
 5397   format %{
 5398     "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
 5399     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq"
 5400   %}
 5401 
 5402   ins_encode %{
 5403     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5404                Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
 5405                true /* result as bool */);
 5406   %}
 5407 
 5408   ins_pipe(pipe_slow);
 5409 %}
 5410 
 5411 instruct compareAndSwapSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5412                                    iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5413 %{
 5414   predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
 5415 
 5416   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 5417 
 5418   ins_cost(2 * VOLATILE_REF_COST);
 5419 
 5420   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5421 
 5422   format %{
 5423     "cmpxchg_acq $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
 5424     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq_narrow"
 5425   %}
 5426 
 5427   ins_encode %{
 5428     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5429                             Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
 5430                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5431   %}
 5432 
 5433   ins_pipe(pipe_slow);
 5434 %}
 5435 
 5436 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5437 %{
 5438   predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
 5439 
 5440   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 5441 
 5442   ins_cost(2 * VOLATILE_REF_COST);
 5443 
 5444   format %{
 5445     "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
 5446     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq"
 5447   %}
 5448 
 5449   ins_encode %{
 5450     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5451                Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
 5452                true /* result as bool */);
 5453   %}
 5454 
 5455   ins_pipe(pipe_slow);
 5456 %}
 5457 
 5458 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5459 %{
 5460   predicate(needs_acquiring_load_reserved(n));
 5461 
 5462   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 5463 
 5464   ins_cost(2 * VOLATILE_REF_COST);
 5465 
 5466   format %{
 5467     "cmpxchg_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
 5468     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapIAcq"
 5469   %}
 5470 
 5471   ins_encode %{
 5472     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5473                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5474                /*result as bool*/ true);
 5475   %}
 5476 
 5477   ins_pipe(pipe_slow);
 5478 %}
 5479 
 5480 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5481 %{
 5482   predicate(needs_acquiring_load_reserved(n));
 5483 
 5484   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 5485 
 5486   ins_cost(2 * VOLATILE_REF_COST);
 5487 
 5488   format %{
 5489     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
 5490     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapLAcq"
 5491   %}
 5492 
 5493   ins_encode %{
 5494     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5495                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5496                /*result as bool*/ true);
 5497   %}
 5498 
 5499   ins_pipe(pipe_slow);
 5500 %}
 5501 
 5502 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5503 %{
 5504   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 5505 
 5506   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 5507 
 5508   ins_cost(2 * VOLATILE_REF_COST);
 5509 
 5510   format %{
 5511     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
 5512     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapPAcq"
 5513   %}
 5514 
 5515   ins_encode %{
 5516     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5517                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5518                /*result as bool*/ true);
 5519   %}
 5520 
 5521   ins_pipe(pipe_slow);
 5522 %}
 5523 
 5524 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5525 %{
 5526   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 5527 
 5528   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 5529 
 5530   ins_cost(2 * VOLATILE_REF_COST);
 5531 
 5532   format %{
 5533     "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
 5534     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapNAcq"
 5535   %}
 5536 
 5537   ins_encode %{
 5538     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5539                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5540                /*result as bool*/ true);
 5541   %}
 5542 
 5543   ins_pipe(pipe_slow);
 5544 %}
 5545 
 5546 // Sundry CAS operations.  Note that release is always true,
 5547 // regardless of the memory ordering of the CAS.  This is because we
 5548 // need the volatile case to be sequentially consistent but there is
 5549 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 5550 // can't check the type of memory ordering here, so we always emit a
 5551 // sc_d(w) with rl bit set.
 5552 instruct compareAndExchangeB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5553                                     iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5554 %{
 5555   predicate(!UseZabha || !UseZacas);
 5556 
 5557   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 5558 
 5559   ins_cost(2 * VOLATILE_REF_COST);
 5560 
 5561   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5562 
 5563   format %{
 5564     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB_narrow"
 5565   %}
 5566 
 5567   ins_encode %{
 5568     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5569                             /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5570                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5571   %}
 5572 
 5573   ins_pipe(pipe_slow);
 5574 %}
 5575 
 5576 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5577 %{
 5578   predicate(UseZabha && UseZacas);
 5579 
 5580   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 5581 
 5582   ins_cost(2 * VOLATILE_REF_COST);
 5583 
 5584   format %{
 5585     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB"
 5586   %}
 5587 
 5588   ins_encode %{
 5589     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5590                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5591   %}
 5592 
 5593   ins_pipe(pipe_slow);
 5594 %}
 5595 
 5596 instruct compareAndExchangeS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5597                                     iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5598 %{
 5599   predicate(!UseZabha || !UseZacas);
 5600 
 5601   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 5602 
 5603   ins_cost(2 * VOLATILE_REF_COST);
 5604 
 5605   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5606 
 5607   format %{
 5608     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS_narrow"
 5609   %}
 5610 
 5611   ins_encode %{
 5612     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5613                             /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5614                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5615   %}
 5616 
 5617   ins_pipe(pipe_slow);
 5618 %}
 5619 
 5620 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5621 %{
 5622   predicate(UseZabha && UseZacas);
 5623 
 5624   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 5625 
 5626   ins_cost(2 * VOLATILE_REF_COST);
 5627 
 5628   format %{
 5629     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS"
 5630   %}
 5631 
 5632   ins_encode %{
 5633     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5634                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5635   %}
 5636 
 5637   ins_pipe(pipe_slow);
 5638 %}
 5639 
 5640 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5641 %{
 5642   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 5643 
 5644   ins_cost(2 * VOLATILE_REF_COST);
 5645 
 5646   format %{
 5647     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeI"
 5648   %}
 5649 
 5650   ins_encode %{
 5651     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5652                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5653   %}
 5654 
 5655   ins_pipe(pipe_slow);
 5656 %}
 5657 
 5658 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
 5659 %{
 5660   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 5661 
 5662   ins_cost(2 * VOLATILE_REF_COST);
 5663 
 5664   format %{
 5665     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeL"
 5666   %}
 5667 
 5668   ins_encode %{
 5669     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5670                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5671   %}
 5672 
 5673   ins_pipe(pipe_slow);
 5674 %}
 5675 
 5676 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
 5677 %{
 5678   predicate(n->as_LoadStore()->barrier_data() == 0);
 5679 
 5680   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 5681 
 5682   ins_cost(2 * VOLATILE_REF_COST);
 5683 
 5684   format %{
 5685     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN"
 5686   %}
 5687 
 5688   ins_encode %{
 5689     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5690                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5691   %}
 5692 
 5693   ins_pipe(pipe_slow);
 5694 %}
 5695 
 5696 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
 5697 %{
 5698   predicate(n->as_LoadStore()->barrier_data() == 0);
 5699 
 5700   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 5701 
 5702   ins_cost(2 * VOLATILE_REF_COST);
 5703 
 5704   format %{
 5705     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeP"
 5706   %}
 5707 
 5708   ins_encode %{
 5709     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5710                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5711   %}
 5712 
 5713   ins_pipe(pipe_slow);
 5714 %}
 5715 
 5716 instruct compareAndExchangeBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5717                                        iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5718 %{
 5719   predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
 5720 
 5721   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 5722 
 5723   ins_cost(2 * VOLATILE_REF_COST);
 5724 
 5725   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5726 
 5727   format %{
 5728     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq_narrow"
 5729   %}
 5730 
 5731   ins_encode %{
 5732     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5733                             /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5734                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5735   %}
 5736 
 5737   ins_pipe(pipe_slow);
 5738 %}
 5739 
 5740 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5741 %{
 5742   predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
 5743 
 5744   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 5745 
 5746   ins_cost(2 * VOLATILE_REF_COST);
 5747 
 5748   format %{
 5749     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq"
 5750   %}
 5751 
 5752   ins_encode %{
 5753     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5754                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5755   %}
 5756 
 5757   ins_pipe(pipe_slow);
 5758 %}
 5759 
 5760 instruct compareAndExchangeSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5761                                        iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5762 %{
 5763   predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
 5764 
 5765   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 5766 
 5767   ins_cost(2 * VOLATILE_REF_COST);
 5768 
 5769   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5770 
 5771   format %{
 5772     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq_narrow"
 5773   %}
 5774 
 5775   ins_encode %{
 5776     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5777                             /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5778                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5779   %}
 5780 
 5781   ins_pipe(pipe_slow);
 5782 %}
 5783 
 5784 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5785 %{
 5786   predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
 5787 
 5788   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 5789 
 5790   ins_cost(2 * VOLATILE_REF_COST);
 5791 
 5792   format %{
 5793     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq"
 5794   %}
 5795 
 5796   ins_encode %{
 5797     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5798                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5799   %}
 5800 
 5801   ins_pipe(pipe_slow);
 5802 %}
 5803 
 5804 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5805 %{
 5806   predicate(needs_acquiring_load_reserved(n));
 5807 
 5808   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 5809 
 5810   ins_cost(2 * VOLATILE_REF_COST);
 5811 
 5812   format %{
 5813     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeIAcq"
 5814   %}
 5815 
 5816   ins_encode %{
 5817     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5818                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5819   %}
 5820 
 5821   ins_pipe(pipe_slow);
 5822 %}
 5823 
 5824 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
 5825 %{
 5826   predicate(needs_acquiring_load_reserved(n));
 5827 
 5828   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 5829 
 5830   ins_cost(2 * VOLATILE_REF_COST);
 5831 
 5832   format %{
 5833     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeLAcq"
 5834   %}
 5835 
 5836   ins_encode %{
 5837     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5838                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5839   %}
 5840 
 5841   ins_pipe(pipe_slow);
 5842 %}
 5843 
 5844 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
 5845 %{
 5846   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 5847 
 5848   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 5849 
 5850   ins_cost(2 * VOLATILE_REF_COST);
 5851 
 5852   format %{
 5853     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq"
 5854   %}
 5855 
 5856   ins_encode %{
 5857     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5858                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5859   %}
 5860 
 5861   ins_pipe(pipe_slow);
 5862 %}
 5863 
 5864 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
 5865 %{
 5866   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 5867 
 5868   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 5869 
 5870   ins_cost(2 * VOLATILE_REF_COST);
 5871 
 5872   format %{
 5873     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq"
 5874   %}
 5875 
 5876   ins_encode %{
 5877     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5878                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5879   %}
 5880 
 5881   ins_pipe(pipe_slow);
 5882 %}
 5883 
 5884 instruct weakCompareAndSwapB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5885                                     iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5886 %{
 5887   predicate(!UseZabha || !UseZacas);
 5888 
 5889   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 5890 
 5891   ins_cost(2 * VOLATILE_REF_COST);
 5892 
 5893   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5894 
 5895   format %{
 5896     "weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5897     "# $res == 1 when success, #@weakCompareAndSwapB_narrow"
 5898   %}
 5899 
 5900   ins_encode %{
 5901     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5902                                  /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5903                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5904   %}
 5905 
 5906   ins_pipe(pipe_slow);
 5907 %}
 5908 
 5909 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5910 %{
 5911   predicate(UseZabha && UseZacas);
 5912 
 5913   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 5914 
 5915   ins_cost(2 * VOLATILE_REF_COST);
 5916 
 5917   format %{
 5918     "weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5919     "# $res == 1 when success, #@weakCompareAndSwapB"
 5920   %}
 5921 
 5922   ins_encode %{
 5923     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5924                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5925   %}
 5926 
 5927   ins_pipe(pipe_slow);
 5928 %}
 5929 
 5930 instruct weakCompareAndSwapS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5931                                     iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5932 %{
 5933   predicate(!UseZabha || !UseZacas);
 5934 
 5935   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 5936 
 5937   ins_cost(2 * VOLATILE_REF_COST);
 5938 
 5939   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5940 
 5941   format %{
 5942     "weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5943     "# $res == 1 when success, #@weakCompareAndSwapS_narrow"
 5944   %}
 5945 
 5946   ins_encode %{
 5947     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5948                                  /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5949                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5950   %}
 5951 
 5952   ins_pipe(pipe_slow);
 5953 %}
 5954 
 5955 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5956 %{
 5957   predicate(UseZabha && UseZacas);
 5958 
 5959   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 5960 
 5961   ins_cost(2 * VOLATILE_REF_COST);
 5962 
 5963   format %{
 5964     "weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5965     "# $res == 1 when success, #@weakCompareAndSwapS"
 5966   %}
 5967 
 5968   ins_encode %{
 5969     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5970                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5971   %}
 5972 
 5973   ins_pipe(pipe_slow);
 5974 %}
 5975 
 5976 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5977 %{
 5978   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 5979 
 5980   ins_cost(2 * VOLATILE_REF_COST);
 5981 
 5982   format %{
 5983     "weak_cmpxchg $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5984     "# $res == 1 when success, #@weakCompareAndSwapI"
 5985   %}
 5986 
 5987   ins_encode %{
 5988     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5989                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5990   %}
 5991 
 5992   ins_pipe(pipe_slow);
 5993 %}
 5994 
 5995 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5996 %{
 5997   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 5998 
 5999   ins_cost(2 * VOLATILE_REF_COST);
 6000 
 6001   format %{
 6002     "weak_cmpxchg $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6003     "# $res == 1 when success, #@weakCompareAndSwapL"
 6004   %}
 6005 
 6006   ins_encode %{
 6007     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 6008                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 6009   %}
 6010 
 6011   ins_pipe(pipe_slow);
 6012 %}
 6013 
 6014 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 6015 %{
 6016   predicate(n->as_LoadStore()->barrier_data() == 0);
 6017 
 6018   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 6019 
 6020   ins_cost(2 * VOLATILE_REF_COST);
 6021 
 6022   format %{
 6023     "weak_cmpxchg $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6024     "# $res == 1 when success, #@weakCompareAndSwapN"
 6025   %}
 6026 
 6027   ins_encode %{
 6028     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 6029                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 6030   %}
 6031 
 6032   ins_pipe(pipe_slow);
 6033 %}
 6034 
 6035 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 6036 %{
 6037   predicate(n->as_LoadStore()->barrier_data() == 0);
 6038 
 6039   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 6040 
 6041   ins_cost(2 * VOLATILE_REF_COST);
 6042 
 6043   format %{
 6044     "weak_cmpxchg $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6045     "# $res == 1 when success, #@weakCompareAndSwapP"
 6046   %}
 6047 
 6048   ins_encode %{
 6049     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 6050                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 6051   %}
 6052 
 6053   ins_pipe(pipe_slow);
 6054 %}
 6055 
 6056 instruct weakCompareAndSwapBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 6057                                        iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 6058 %{
 6059   predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
 6060 
 6061   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 6062 
 6063   ins_cost(2 * VOLATILE_REF_COST);
 6064 
 6065   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 6066 
 6067   format %{
 6068     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6069     "# $res == 1 when success, #@weakCompareAndSwapBAcq_narrow"
 6070   %}
 6071 
 6072   ins_encode %{
 6073     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 6074                                  /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 6075                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 6076   %}
 6077 
 6078   ins_pipe(pipe_slow);
 6079 %}
 6080 
 6081 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 6082 %{
 6083   predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
 6084 
 6085   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 6086 
 6087   ins_cost(2 * VOLATILE_REF_COST);
 6088 
 6089   format %{
 6090     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6091     "# $res == 1 when success, #@weakCompareAndSwapBAcq"
 6092   %}
 6093 
 6094   ins_encode %{
 6095     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 6096                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6097   %}
 6098 
 6099   ins_pipe(pipe_slow);
 6100 %}
 6101 
 6102 instruct weakCompareAndSwapSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 6103                                        iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 6104 %{
 6105   predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
 6106 
 6107   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 6108 
 6109   ins_cost(2 * VOLATILE_REF_COST);
 6110 
 6111   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 6112 
 6113   format %{
 6114     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6115     "# $res == 1 when success, #@weakCompareAndSwapSAcq_narrow"
 6116   %}
 6117 
 6118   ins_encode %{
 6119     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 6120                                  /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 6121                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 6122   %}
 6123 
 6124   ins_pipe(pipe_slow);
 6125 %}
 6126 
 6127 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 6128 %{
 6129   predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
 6130 
 6131   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 6132 
 6133   ins_cost(2 * VOLATILE_REF_COST);
 6134 
 6135   format %{
 6136     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6137     "# $res == 1 when success, #@weakCompareAndSwapSAcq"
 6138   %}
 6139 
 6140   ins_encode %{
 6141     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 6142                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6143   %}
 6144 
 6145   ins_pipe(pipe_slow);
 6146 %}
 6147 
 6148 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 6149 %{
 6150   predicate(needs_acquiring_load_reserved(n));
 6151 
 6152   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 6153 
 6154   ins_cost(2 * VOLATILE_REF_COST);
 6155 
 6156   format %{
 6157     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6158     "# $res == 1 when success, #@weakCompareAndSwapIAcq"
 6159   %}
 6160 
 6161   ins_encode %{
 6162     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 6163                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6164   %}
 6165 
 6166   ins_pipe(pipe_slow);
 6167 %}
 6168 
 6169 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 6170 %{
 6171   predicate(needs_acquiring_load_reserved(n));
 6172 
 6173   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 6174 
 6175   ins_cost(2 * VOLATILE_REF_COST);
 6176 
 6177   format %{
 6178     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6179     "# $res == 1 when success, #@weakCompareAndSwapLAcq"
 6180   %}
 6181 
 6182   ins_encode %{
 6183     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 6184                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6185   %}
 6186 
 6187   ins_pipe(pipe_slow);
 6188 %}
 6189 
 6190 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 6191 %{
 6192   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 6193 
 6194   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 6195 
 6196   ins_cost(2 * VOLATILE_REF_COST);
 6197 
 6198   format %{
 6199     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6200     "# $res == 1 when success, #@weakCompareAndSwapNAcq"
 6201   %}
 6202 
 6203   ins_encode %{
 6204     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 6205                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6206   %}
 6207 
 6208   ins_pipe(pipe_slow);
 6209 %}
 6210 
 6211 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 6212 %{
 6213   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 6214 
 6215   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 6216 
 6217   ins_cost(2 * VOLATILE_REF_COST);
 6218 
 6219   format %{
 6220     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6221     "\t# $res == 1 when success, #@weakCompareAndSwapPAcq"
 6222   %}
 6223 
 6224   ins_encode %{
 6225     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 6226                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6227   %}
 6228 
 6229   ins_pipe(pipe_slow);
 6230 %}
 6231 
 6232 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev)
 6233 %{
 6234   match(Set prev (GetAndSetI mem newv));
 6235 
 6236   ins_cost(ALU_COST);
 6237 
 6238   format %{ "atomic_xchgw  $prev, $newv, [$mem]\t#@get_and_setI" %}
 6239 
 6240   ins_encode %{
 6241     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6242   %}
 6243 
 6244   ins_pipe(pipe_serial);
 6245 %}
 6246 
 6247 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev)
 6248 %{
 6249   match(Set prev (GetAndSetL mem newv));
 6250 
 6251   ins_cost(ALU_COST);
 6252 
 6253   format %{ "atomic_xchg  $prev, $newv, [$mem]\t#@get_and_setL" %}
 6254 
 6255   ins_encode %{
 6256     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6257   %}
 6258 
 6259   ins_pipe(pipe_serial);
 6260 %}
 6261 
 6262 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev)
 6263 %{
 6264   predicate(n->as_LoadStore()->barrier_data() == 0);
 6265 
 6266   match(Set prev (GetAndSetN mem newv));
 6267 
 6268   ins_cost(ALU_COST);
 6269 
 6270   format %{ "atomic_xchgwu $prev, $newv, [$mem]\t#@get_and_setN" %}
 6271 
 6272   ins_encode %{
 6273     __ atomic_xchgwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6274   %}
 6275 
 6276   ins_pipe(pipe_serial);
 6277 %}
 6278 
 6279 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev)
 6280 %{
 6281   predicate(n->as_LoadStore()->barrier_data() == 0);
 6282   match(Set prev (GetAndSetP mem newv));
 6283 
 6284   ins_cost(ALU_COST);
 6285 
 6286   format %{ "atomic_xchg  $prev, $newv, [$mem]\t#@get_and_setP" %}
 6287 
 6288   ins_encode %{
 6289     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6290   %}
 6291 
 6292   ins_pipe(pipe_serial);
 6293 %}
 6294 
 6295 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev)
 6296 %{
 6297   predicate(needs_acquiring_load_reserved(n));
 6298 
 6299   match(Set prev (GetAndSetI mem newv));
 6300 
 6301   ins_cost(ALU_COST);
 6302 
 6303   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]\t#@get_and_setIAcq" %}
 6304 
 6305   ins_encode %{
 6306     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6307   %}
 6308 
 6309   ins_pipe(pipe_serial);
 6310 %}
 6311 
 6312 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev)
 6313 %{
 6314   predicate(needs_acquiring_load_reserved(n));
 6315 
 6316   match(Set prev (GetAndSetL mem newv));
 6317 
 6318   ins_cost(ALU_COST);
 6319 
 6320   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]\t#@get_and_setLAcq" %}
 6321 
 6322   ins_encode %{
 6323     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6324   %}
 6325 
 6326   ins_pipe(pipe_serial);
 6327 %}
 6328 
 6329 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev)
 6330 %{
 6331   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 6332 
 6333   match(Set prev (GetAndSetN mem newv));
 6334 
 6335   ins_cost(ALU_COST);
 6336 
 6337   format %{ "atomic_xchgwu_acq $prev, $newv, [$mem]\t#@get_and_setNAcq" %}
 6338 
 6339   ins_encode %{
 6340     __ atomic_xchgalwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6341   %}
 6342 
 6343   ins_pipe(pipe_serial);
 6344 %}
 6345 
 6346 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev)
 6347 %{
 6348   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 6349 
 6350   match(Set prev (GetAndSetP mem newv));
 6351 
 6352   ins_cost(ALU_COST);
 6353 
 6354   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]\t#@get_and_setPAcq" %}
 6355 
 6356   ins_encode %{
 6357     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6358   %}
 6359 
 6360   ins_pipe(pipe_serial);
 6361 %}
 6362 
 6363 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr)
 6364 %{
 6365   match(Set newval (GetAndAddL mem incr));
 6366 
 6367   ins_cost(ALU_COST);
 6368 
 6369   format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addL" %}
 6370 
 6371   ins_encode %{
 6372     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6373   %}
 6374 
 6375   ins_pipe(pipe_serial);
 6376 %}
 6377 
 6378 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr)
 6379 %{
 6380   predicate(n->as_LoadStore()->result_not_used());
 6381 
 6382   match(Set dummy (GetAndAddL mem incr));
 6383 
 6384   ins_cost(ALU_COST);
 6385 
 6386   format %{ "get_and_addL [$mem], $incr\t#@get_and_addL_no_res" %}
 6387 
 6388   ins_encode %{
 6389     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 6390   %}
 6391 
 6392   ins_pipe(pipe_serial);
 6393 %}
 6394 
 6395 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAdd incr)
 6396 %{
 6397   match(Set newval (GetAndAddL mem incr));
 6398 
 6399   ins_cost(ALU_COST);
 6400 
 6401   format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addLi" %}
 6402 
 6403   ins_encode %{
 6404     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6405   %}
 6406 
 6407   ins_pipe(pipe_serial);
 6408 %}
 6409 
 6410 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAdd incr)
 6411 %{
 6412   predicate(n->as_LoadStore()->result_not_used());
 6413 
 6414   match(Set dummy (GetAndAddL mem incr));
 6415 
 6416   ins_cost(ALU_COST);
 6417 
 6418   format %{ "get_and_addL [$mem], $incr\t#@get_and_addLi_no_res" %}
 6419 
 6420   ins_encode %{
 6421     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 6422   %}
 6423 
 6424   ins_pipe(pipe_serial);
 6425 %}
 6426 
 6427 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr)
 6428 %{
 6429   match(Set newval (GetAndAddI mem incr));
 6430 
 6431   ins_cost(ALU_COST);
 6432 
 6433   format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addI" %}
 6434 
 6435   ins_encode %{
 6436     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6437   %}
 6438 
 6439   ins_pipe(pipe_serial);
 6440 %}
 6441 
 6442 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr)
 6443 %{
 6444   predicate(n->as_LoadStore()->result_not_used());
 6445 
 6446   match(Set dummy (GetAndAddI mem incr));
 6447 
 6448   ins_cost(ALU_COST);
 6449 
 6450   format %{ "get_and_addI [$mem], $incr\t#@get_and_addI_no_res" %}
 6451 
 6452   ins_encode %{
 6453     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 6454   %}
 6455 
 6456   ins_pipe(pipe_serial);
 6457 %}
 6458 
 6459 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAdd incr)
 6460 %{
 6461   match(Set newval (GetAndAddI mem incr));
 6462 
 6463   ins_cost(ALU_COST);
 6464 
 6465   format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addIi" %}
 6466 
 6467   ins_encode %{
 6468     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6469   %}
 6470 
 6471   ins_pipe(pipe_serial);
 6472 %}
 6473 
 6474 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAdd incr)
 6475 %{
 6476   predicate(n->as_LoadStore()->result_not_used());
 6477 
 6478   match(Set dummy (GetAndAddI mem incr));
 6479 
 6480   ins_cost(ALU_COST);
 6481 
 6482   format %{ "get_and_addI [$mem], $incr\t#@get_and_addIi_no_res" %}
 6483 
 6484   ins_encode %{
 6485     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 6486   %}
 6487 
 6488   ins_pipe(pipe_serial);
 6489 %}
 6490 
 6491 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr)
 6492 %{
 6493   predicate(needs_acquiring_load_reserved(n));
 6494 
 6495   match(Set newval (GetAndAddL mem incr));
 6496 
 6497   ins_cost(ALU_COST);
 6498 
 6499   format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLAcq" %}
 6500 
 6501   ins_encode %{
 6502     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6503   %}
 6504 
 6505   ins_pipe(pipe_serial);
 6506 %}
 6507 
 6508 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 6509   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6510 
 6511   match(Set dummy (GetAndAddL mem incr));
 6512 
 6513   ins_cost(ALU_COST);
 6514 
 6515   format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addL_no_resAcq" %}
 6516 
 6517   ins_encode %{
 6518     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 6519   %}
 6520 
 6521   ins_pipe(pipe_serial);
 6522 %}
 6523 
 6524 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAdd incr)
 6525 %{
 6526   predicate(needs_acquiring_load_reserved(n));
 6527 
 6528   match(Set newval (GetAndAddL mem incr));
 6529 
 6530   ins_cost(ALU_COST);
 6531 
 6532   format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLiAcq" %}
 6533 
 6534   ins_encode %{
 6535     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6536   %}
 6537 
 6538   ins_pipe(pipe_serial);
 6539 %}
 6540 
 6541 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAdd incr)
 6542 %{
 6543   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6544 
 6545   match(Set dummy (GetAndAddL mem incr));
 6546 
 6547   ins_cost(ALU_COST);
 6548 
 6549   format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addLi_no_resAcq" %}
 6550 
 6551   ins_encode %{
 6552     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 6553   %}
 6554 
 6555   ins_pipe(pipe_serial);
 6556 %}
 6557 
 6558 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr)
 6559 %{
 6560   predicate(needs_acquiring_load_reserved(n));
 6561 
 6562   match(Set newval (GetAndAddI mem incr));
 6563 
 6564   ins_cost(ALU_COST);
 6565 
 6566   format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIAcq" %}
 6567 
 6568   ins_encode %{
 6569     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6570   %}
 6571 
 6572   ins_pipe(pipe_serial);
 6573 %}
 6574 
 6575 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr)
 6576 %{
 6577   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6578 
 6579   match(Set dummy (GetAndAddI mem incr));
 6580 
 6581   ins_cost(ALU_COST);
 6582 
 6583   format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addI_no_resAcq" %}
 6584 
 6585   ins_encode %{
 6586     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 6587   %}
 6588 
 6589   ins_pipe(pipe_serial);
 6590 %}
 6591 
 6592 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAdd incr)
 6593 %{
 6594   predicate(needs_acquiring_load_reserved(n));
 6595 
 6596   match(Set newval (GetAndAddI mem incr));
 6597 
 6598   ins_cost(ALU_COST);
 6599 
 6600   format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIiAcq" %}
 6601 
 6602   ins_encode %{
 6603     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6604   %}
 6605 
 6606   ins_pipe(pipe_serial);
 6607 %}
 6608 
 6609 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAdd incr)
 6610 %{
 6611   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6612 
 6613   match(Set dummy (GetAndAddI mem incr));
 6614 
 6615   ins_cost(ALU_COST);
 6616 
 6617   format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addIi_no_resAcq" %}
 6618 
 6619   ins_encode %{
 6620     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 6621   %}
 6622 
 6623   ins_pipe(pipe_serial);
 6624 %}
 6625 
 6626 // ============================================================================
 6627 // Arithmetic Instructions
 6628 //
 6629 
 6630 // Integer Addition
 6631 
 6632 // TODO
 6633 // these currently employ operations which do not set CR and hence are
 6634 // not flagged as killing CR but we would like to isolate the cases
 6635 // where we want to set flags from those where we don't. need to work
 6636 // out how to do that.
 6637 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6638   match(Set dst (AddI src1 src2));
 6639 
 6640   ins_cost(ALU_COST);
 6641   format %{ "addw  $dst, $src1, $src2\t#@addI_reg_reg" %}
 6642 
 6643   ins_encode %{
 6644     __ addw(as_Register($dst$$reg),
 6645             as_Register($src1$$reg),
 6646             as_Register($src2$$reg));
 6647   %}
 6648 
 6649   ins_pipe(ialu_reg_reg);
 6650 %}
 6651 
 6652 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAdd src2) %{
 6653   match(Set dst (AddI src1 src2));
 6654 
 6655   ins_cost(ALU_COST);
 6656   format %{ "addiw  $dst, $src1, $src2\t#@addI_reg_imm" %}
 6657 
 6658   ins_encode %{
 6659     __ addiw(as_Register($dst$$reg),
 6660              as_Register($src1$$reg),
 6661              $src2$$constant);
 6662   %}
 6663 
 6664   ins_pipe(ialu_reg_imm);
 6665 %}
 6666 
 6667 instruct addI_reg_imm_l2i(iRegINoSp dst, iRegL src1, immIAdd src2) %{
 6668   match(Set dst (AddI (ConvL2I src1) src2));
 6669 
 6670   ins_cost(ALU_COST);
 6671   format %{ "addiw  $dst, $src1, $src2\t#@addI_reg_imm_l2i" %}
 6672 
 6673   ins_encode %{
 6674     __ addiw(as_Register($dst$$reg),
 6675              as_Register($src1$$reg),
 6676              $src2$$constant);
 6677   %}
 6678 
 6679   ins_pipe(ialu_reg_imm);
 6680 %}
 6681 
 6682 // Pointer Addition
 6683 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
 6684   match(Set dst (AddP src1 src2));
 6685 
 6686   ins_cost(ALU_COST);
 6687   format %{ "add $dst, $src1, $src2\t# ptr, #@addP_reg_reg" %}
 6688 
 6689   ins_encode %{
 6690     __ add(as_Register($dst$$reg),
 6691            as_Register($src1$$reg),
 6692            as_Register($src2$$reg));
 6693   %}
 6694 
 6695   ins_pipe(ialu_reg_reg);
 6696 %}
 6697 
 6698 // If we shift more than 32 bits, we need not convert I2L.
 6699 instruct lShiftL_regI_immGE32(iRegLNoSp dst, iRegI src, uimmI6_ge32 scale) %{
 6700   match(Set dst (LShiftL (ConvI2L src) scale));
 6701   ins_cost(ALU_COST);
 6702   format %{ "slli  $dst, $src, $scale & 63\t#@lShiftL_regI_immGE32" %}
 6703 
 6704   ins_encode %{
 6705     __ slli(as_Register($dst$$reg), as_Register($src$$reg), $scale$$constant & 63);
 6706   %}
 6707 
 6708   ins_pipe(ialu_reg_shift);
 6709 %}
 6710 
 6711 // Pointer Immediate Addition
 6712 // n.b. this needs to be more expensive than using an indirect memory
 6713 // operand
 6714 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAdd src2) %{
 6715   match(Set dst (AddP src1 src2));
 6716   ins_cost(ALU_COST);
 6717   format %{ "addi  $dst, $src1, $src2\t# ptr, #@addP_reg_imm" %}
 6718 
 6719   ins_encode %{
 6720     __ addi(as_Register($dst$$reg),
 6721             as_Register($src1$$reg),
 6722             $src2$$constant);
 6723   %}
 6724 
 6725   ins_pipe(ialu_reg_imm);
 6726 %}
 6727 
 6728 // Long Addition
 6729 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6730   match(Set dst (AddL src1 src2));
 6731   ins_cost(ALU_COST);
 6732   format %{ "add  $dst, $src1, $src2\t#@addL_reg_reg" %}
 6733 
 6734   ins_encode %{
 6735     __ add(as_Register($dst$$reg),
 6736            as_Register($src1$$reg),
 6737            as_Register($src2$$reg));
 6738   %}
 6739 
 6740   ins_pipe(ialu_reg_reg);
 6741 %}
 6742 
 6743 // No constant pool entries requiredLong Immediate Addition.
 6744 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 6745   match(Set dst (AddL src1 src2));
 6746   ins_cost(ALU_COST);
 6747   format %{ "addi  $dst, $src1, $src2\t#@addL_reg_imm" %}
 6748 
 6749   ins_encode %{
 6750     // src2 is imm, so actually call the addi
 6751     __ addi(as_Register($dst$$reg),
 6752             as_Register($src1$$reg),
 6753             $src2$$constant);
 6754   %}
 6755 
 6756   ins_pipe(ialu_reg_imm);
 6757 %}
 6758 
 6759 // Integer Subtraction
 6760 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6761   match(Set dst (SubI src1 src2));
 6762 
 6763   ins_cost(ALU_COST);
 6764   format %{ "subw  $dst, $src1, $src2\t#@subI_reg_reg" %}
 6765 
 6766   ins_encode %{
 6767     __ subw(as_Register($dst$$reg),
 6768             as_Register($src1$$reg),
 6769             as_Register($src2$$reg));
 6770   %}
 6771 
 6772   ins_pipe(ialu_reg_reg);
 6773 %}
 6774 
 6775 // Immediate Subtraction
 6776 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immISub src2) %{
 6777   match(Set dst (SubI src1 src2));
 6778 
 6779   ins_cost(ALU_COST);
 6780   format %{ "addiw  $dst, $src1, -$src2\t#@subI_reg_imm" %}
 6781 
 6782   ins_encode %{
 6783     // src2 is imm, so actually call the addiw
 6784     __ subiw(as_Register($dst$$reg),
 6785              as_Register($src1$$reg),
 6786              $src2$$constant);
 6787   %}
 6788 
 6789   ins_pipe(ialu_reg_imm);
 6790 %}
 6791 
 6792 // Long Subtraction
 6793 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6794   match(Set dst (SubL src1 src2));
 6795   ins_cost(ALU_COST);
 6796   format %{ "sub  $dst, $src1, $src2\t#@subL_reg_reg" %}
 6797 
 6798   ins_encode %{
 6799     __ sub(as_Register($dst$$reg),
 6800            as_Register($src1$$reg),
 6801            as_Register($src2$$reg));
 6802   %}
 6803 
 6804   ins_pipe(ialu_reg_reg);
 6805 %}
 6806 
 6807 // No constant pool entries requiredLong Immediate Subtraction.
 6808 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLSub src2) %{
 6809   match(Set dst (SubL src1 src2));
 6810   ins_cost(ALU_COST);
 6811   format %{ "addi  $dst, $src1, -$src2\t#@subL_reg_imm" %}
 6812 
 6813   ins_encode %{
 6814     // src2 is imm, so actually call the addi
 6815     __ subi(as_Register($dst$$reg),
 6816             as_Register($src1$$reg),
 6817             $src2$$constant);
 6818   %}
 6819 
 6820   ins_pipe(ialu_reg_imm);
 6821 %}
 6822 
 6823 // Integer Negation (special case for sub)
 6824 
 6825 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 6826   match(Set dst (SubI zero src));
 6827   ins_cost(ALU_COST);
 6828   format %{ "subw  $dst, x0, $src\t# int, #@negI_reg" %}
 6829 
 6830   ins_encode %{
 6831     // actually call the subw
 6832     __ negw(as_Register($dst$$reg),
 6833             as_Register($src$$reg));
 6834   %}
 6835 
 6836   ins_pipe(ialu_reg);
 6837 %}
 6838 
 6839 // Long Negation
 6840 
 6841 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero) %{
 6842   match(Set dst (SubL zero src));
 6843   ins_cost(ALU_COST);
 6844   format %{ "sub  $dst, x0, $src\t# long, #@negL_reg" %}
 6845 
 6846   ins_encode %{
 6847     // actually call the sub
 6848     __ neg(as_Register($dst$$reg),
 6849            as_Register($src$$reg));
 6850   %}
 6851 
 6852   ins_pipe(ialu_reg);
 6853 %}
 6854 
 6855 // Integer Multiply
 6856 
 6857 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6858   match(Set dst (MulI src1 src2));
 6859   ins_cost(IMUL_COST);
 6860   format %{ "mulw  $dst, $src1, $src2\t#@mulI" %}
 6861 
 6862   //this means 2 word multi, and no sign extend to 64 bits
 6863   ins_encode %{
 6864     // riscv64 mulw will sign-extension to high 32 bits in dst reg
 6865     __ mulw(as_Register($dst$$reg),
 6866             as_Register($src1$$reg),
 6867             as_Register($src2$$reg));
 6868   %}
 6869 
 6870   ins_pipe(imul_reg_reg);
 6871 %}
 6872 
 6873 // Long Multiply
 6874 
 6875 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6876   match(Set dst (MulL src1 src2));
 6877   ins_cost(IMUL_COST);
 6878   format %{ "mul  $dst, $src1, $src2\t#@mulL" %}
 6879 
 6880   ins_encode %{
 6881     __ mul(as_Register($dst$$reg),
 6882            as_Register($src1$$reg),
 6883            as_Register($src2$$reg));
 6884   %}
 6885 
 6886   ins_pipe(lmul_reg_reg);
 6887 %}
 6888 
 6889 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
 6890 %{
 6891   match(Set dst (MulHiL src1 src2));
 6892   ins_cost(IMUL_COST);
 6893   format %{ "mulh  $dst, $src1, $src2\t# mulhi, #@mulHiL_rReg" %}
 6894 
 6895   ins_encode %{
 6896     __ mulh(as_Register($dst$$reg),
 6897             as_Register($src1$$reg),
 6898             as_Register($src2$$reg));
 6899   %}
 6900 
 6901   ins_pipe(lmul_reg_reg);
 6902 %}
 6903 
 6904 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
 6905 %{
 6906   match(Set dst (UMulHiL src1 src2));
 6907   ins_cost(IMUL_COST);
 6908   format %{ "mulhu  $dst, $src1, $src2\t# umulhi, #@umulHiL_rReg" %}
 6909 
 6910   ins_encode %{
 6911     __ mulhu(as_Register($dst$$reg),
 6912              as_Register($src1$$reg),
 6913              as_Register($src2$$reg));
 6914   %}
 6915 
 6916   ins_pipe(lmul_reg_reg);
 6917 %}
 6918 
 6919 // Integer Divide
 6920 
 6921 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6922   match(Set dst (DivI src1 src2));
 6923   ins_cost(IDIVSI_COST);
 6924   format %{ "divw  $dst, $src1, $src2\t#@divI"%}
 6925 
 6926   ins_encode %{
 6927     __ divw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6928   %}
 6929   ins_pipe(idiv_reg_reg);
 6930 %}
 6931 
 6932 instruct UdivI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6933   match(Set dst (UDivI src1 src2));
 6934   ins_cost(IDIVSI_COST);
 6935   format %{ "divuw  $dst, $src1, $src2\t#@UdivI"%}
 6936 
 6937   ins_encode %{
 6938     __ divuw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6939   %}
 6940   ins_pipe(idiv_reg_reg);
 6941 %}
 6942 
 6943 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
 6944   match(Set dst (URShiftI (RShiftI src1 div1) div2));
 6945   ins_cost(ALU_COST);
 6946   format %{ "srliw $dst, $src1, $div1\t# int signExtract, #@signExtract" %}
 6947 
 6948   ins_encode %{
 6949     __ srliw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
 6950   %}
 6951   ins_pipe(ialu_reg_shift);
 6952 %}
 6953 
 6954 // Long Divide
 6955 
 6956 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6957   match(Set dst (DivL src1 src2));
 6958   ins_cost(IDIVDI_COST);
 6959   format %{ "div  $dst, $src1, $src2\t#@divL" %}
 6960 
 6961   ins_encode %{
 6962     __ div(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6963   %}
 6964   ins_pipe(ldiv_reg_reg);
 6965 %}
 6966 
 6967 instruct UdivL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6968   match(Set dst (UDivL src1 src2));
 6969   ins_cost(IDIVDI_COST);
 6970 
 6971   format %{ "divu $dst, $src1, $src2\t#@UdivL" %}
 6972 
 6973   ins_encode %{
 6974     __ divu(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6975   %}
 6976   ins_pipe(ldiv_reg_reg);
 6977 %}
 6978 
 6979 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
 6980   match(Set dst (URShiftL (RShiftL src1 div1) div2));
 6981   ins_cost(ALU_COST);
 6982   format %{ "srli $dst, $src1, $div1\t# long signExtract, #@signExtractL" %}
 6983 
 6984   ins_encode %{
 6985     __ srli(as_Register($dst$$reg), as_Register($src1$$reg), 63);
 6986   %}
 6987   ins_pipe(ialu_reg_shift);
 6988 %}
 6989 
 6990 // Integer Remainder
 6991 
 6992 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6993   match(Set dst (ModI src1 src2));
 6994   ins_cost(IDIVSI_COST);
 6995   format %{ "remw  $dst, $src1, $src2\t#@modI" %}
 6996 
 6997   ins_encode %{
 6998     __ remw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6999   %}
 7000   ins_pipe(ialu_reg_reg);
 7001 %}
 7002 
 7003 instruct UmodI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 7004   match(Set dst (UModI src1 src2));
 7005   ins_cost(IDIVSI_COST);
 7006   format %{ "remuw  $dst, $src1, $src2\t#@UmodI" %}
 7007 
 7008   ins_encode %{
 7009     __ remuw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 7010   %}
 7011   ins_pipe(ialu_reg_reg);
 7012 %}
 7013 
 7014 // Long Remainder
 7015 
 7016 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7017   match(Set dst (ModL src1 src2));
 7018   ins_cost(IDIVDI_COST);
 7019   format %{ "rem  $dst, $src1, $src2\t#@modL" %}
 7020 
 7021   ins_encode %{
 7022     __ rem(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 7023   %}
 7024   ins_pipe(ialu_reg_reg);
 7025 %}
 7026 
 7027 instruct UmodL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7028   match(Set dst (UModL src1 src2));
 7029   ins_cost(IDIVDI_COST);
 7030   format %{ "remu  $dst, $src1, $src2\t#@UmodL" %}
 7031 
 7032   ins_encode %{
 7033     __ remu(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 7034   %}
 7035   ins_pipe(ialu_reg_reg);
 7036 %}
 7037 
 7038 // Integer Shifts
 7039 
 7040 // Shift Left Register
 7041 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
 7042 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 7043   match(Set dst (LShiftI src1 src2));
 7044   ins_cost(ALU_COST);
 7045   format %{ "sllw  $dst, $src1, $src2\t#@lShiftI_reg_reg" %}
 7046 
 7047   ins_encode %{
 7048     __ sllw(as_Register($dst$$reg),
 7049             as_Register($src1$$reg),
 7050             as_Register($src2$$reg));
 7051   %}
 7052 
 7053   ins_pipe(ialu_reg_reg_vshift);
 7054 %}
 7055 
 7056 // Shift Left Immediate
 7057 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 7058   match(Set dst (LShiftI src1 src2));
 7059   ins_cost(ALU_COST);
 7060   format %{ "slliw  $dst, $src1, ($src2 & 0x1f)\t#@lShiftI_reg_imm" %}
 7061 
 7062   ins_encode %{
 7063     // the shift amount is encoded in the lower
 7064     // 5 bits of the I-immediate field for RV32I
 7065     __ slliw(as_Register($dst$$reg),
 7066              as_Register($src1$$reg),
 7067              (unsigned) $src2$$constant & 0x1f);
 7068   %}
 7069 
 7070   ins_pipe(ialu_reg_shift);
 7071 %}
 7072 
 7073 // Shift Right Logical Register
 7074 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
 7075 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 7076   match(Set dst (URShiftI src1 src2));
 7077   ins_cost(ALU_COST);
 7078   format %{ "srlw  $dst, $src1, $src2\t#@urShiftI_reg_reg" %}
 7079 
 7080   ins_encode %{
 7081     __ srlw(as_Register($dst$$reg),
 7082             as_Register($src1$$reg),
 7083             as_Register($src2$$reg));
 7084   %}
 7085 
 7086   ins_pipe(ialu_reg_reg_vshift);
 7087 %}
 7088 
 7089 // Shift Right Logical Immediate
 7090 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 7091   match(Set dst (URShiftI src1 src2));
 7092   ins_cost(ALU_COST);
 7093   format %{ "srliw  $dst, $src1, ($src2 & 0x1f)\t#@urShiftI_reg_imm" %}
 7094 
 7095   ins_encode %{
 7096     // the shift amount is encoded in the lower
 7097     // 6 bits of the I-immediate field for RV64I
 7098     __ srliw(as_Register($dst$$reg),
 7099              as_Register($src1$$reg),
 7100              (unsigned) $src2$$constant & 0x1f);
 7101   %}
 7102 
 7103   ins_pipe(ialu_reg_shift);
 7104 %}
 7105 
 7106 // Shift Right Arithmetic Register
 7107 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
 7108 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 7109   match(Set dst (RShiftI src1 src2));
 7110   ins_cost(ALU_COST);
 7111   format %{ "sraw  $dst, $src1, $src2\t#@rShiftI_reg_reg" %}
 7112 
 7113   ins_encode %{
 7114     // riscv will sign-ext dst high 32 bits
 7115     __ sraw(as_Register($dst$$reg),
 7116             as_Register($src1$$reg),
 7117             as_Register($src2$$reg));
 7118   %}
 7119 
 7120   ins_pipe(ialu_reg_reg_vshift);
 7121 %}
 7122 
 7123 // Shift Right Arithmetic Immediate
 7124 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 7125   match(Set dst (RShiftI src1 src2));
 7126   ins_cost(ALU_COST);
 7127   format %{ "sraiw  $dst, $src1, ($src2 & 0x1f)\t#@rShiftI_reg_imm" %}
 7128 
 7129   ins_encode %{
 7130     // riscv will sign-ext dst high 32 bits
 7131     __ sraiw(as_Register($dst$$reg),
 7132              as_Register($src1$$reg),
 7133              (unsigned) $src2$$constant & 0x1f);
 7134   %}
 7135 
 7136   ins_pipe(ialu_reg_shift);
 7137 %}
 7138 
 7139 // Long Shifts
 7140 
 7141 // Shift Left Register
 7142 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
 7143 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 7144   match(Set dst (LShiftL src1 src2));
 7145 
 7146   ins_cost(ALU_COST);
 7147   format %{ "sll  $dst, $src1, $src2\t#@lShiftL_reg_reg" %}
 7148 
 7149   ins_encode %{
 7150     __ sll(as_Register($dst$$reg),
 7151            as_Register($src1$$reg),
 7152            as_Register($src2$$reg));
 7153   %}
 7154 
 7155   ins_pipe(ialu_reg_reg_vshift);
 7156 %}
 7157 
 7158 // Shift Left Immediate
 7159 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 7160   match(Set dst (LShiftL src1 src2));
 7161 
 7162   ins_cost(ALU_COST);
 7163   format %{ "slli  $dst, $src1, ($src2 & 0x3f)\t#@lShiftL_reg_imm" %}
 7164 
 7165   ins_encode %{
 7166     // the shift amount is encoded in the lower
 7167     // 6 bits of the I-immediate field for RV64I
 7168     __ slli(as_Register($dst$$reg),
 7169             as_Register($src1$$reg),
 7170             (unsigned) $src2$$constant & 0x3f);
 7171   %}
 7172 
 7173   ins_pipe(ialu_reg_shift);
 7174 %}
 7175 
 7176 // Shift Right Logical Register
 7177 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
 7178 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 7179   match(Set dst (URShiftL src1 src2));
 7180 
 7181   ins_cost(ALU_COST);
 7182   format %{ "srl  $dst, $src1, $src2\t#@urShiftL_reg_reg" %}
 7183 
 7184   ins_encode %{
 7185     __ srl(as_Register($dst$$reg),
 7186             as_Register($src1$$reg),
 7187             as_Register($src2$$reg));
 7188   %}
 7189 
 7190   ins_pipe(ialu_reg_reg_vshift);
 7191 %}
 7192 
 7193 // Shift Right Logical Immediate
 7194 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 7195   match(Set dst (URShiftL src1 src2));
 7196 
 7197   ins_cost(ALU_COST);
 7198   format %{ "srli  $dst, $src1, ($src2 & 0x3f)\t#@urShiftL_reg_imm" %}
 7199 
 7200   ins_encode %{
 7201     // the shift amount is encoded in the lower
 7202     // 6 bits of the I-immediate field for RV64I
 7203     __ srli(as_Register($dst$$reg),
 7204             as_Register($src1$$reg),
 7205             (unsigned) $src2$$constant & 0x3f);
 7206   %}
 7207 
 7208   ins_pipe(ialu_reg_shift);
 7209 %}
 7210 
 7211 // A special-case pattern for card table stores.
 7212 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
 7213   match(Set dst (URShiftL (CastP2X src1) src2));
 7214 
 7215   ins_cost(ALU_COST);
 7216   format %{ "srli  $dst, p2x($src1), ($src2 & 0x3f)\t#@urShiftP_reg_imm" %}
 7217 
 7218   ins_encode %{
 7219     // the shift amount is encoded in the lower
 7220     // 6 bits of the I-immediate field for RV64I
 7221     __ srli(as_Register($dst$$reg),
 7222             as_Register($src1$$reg),
 7223             (unsigned) $src2$$constant & 0x3f);
 7224   %}
 7225 
 7226   ins_pipe(ialu_reg_shift);
 7227 %}
 7228 
 7229 // Shift Right Arithmetic Register
 7230 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
 7231 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 7232   match(Set dst (RShiftL src1 src2));
 7233 
 7234   ins_cost(ALU_COST);
 7235   format %{ "sra  $dst, $src1, $src2\t#@rShiftL_reg_reg" %}
 7236 
 7237   ins_encode %{
 7238     __ sra(as_Register($dst$$reg),
 7239            as_Register($src1$$reg),
 7240            as_Register($src2$$reg));
 7241   %}
 7242 
 7243   ins_pipe(ialu_reg_reg_vshift);
 7244 %}
 7245 
 7246 // Shift Right Arithmetic Immediate
 7247 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 7248   match(Set dst (RShiftL src1 src2));
 7249 
 7250   ins_cost(ALU_COST);
 7251   format %{ "srai  $dst, $src1, ($src2 & 0x3f)\t#@rShiftL_reg_imm" %}
 7252 
 7253   ins_encode %{
 7254     // the shift amount is encoded in the lower
 7255     // 6 bits of the I-immediate field for RV64I
 7256     __ srai(as_Register($dst$$reg),
 7257             as_Register($src1$$reg),
 7258             (unsigned) $src2$$constant & 0x3f);
 7259   %}
 7260 
 7261   ins_pipe(ialu_reg_shift);
 7262 %}
 7263 
 7264 instruct regI_not_reg(iRegINoSp dst, iRegI src1, immI_M1 m1) %{
 7265   match(Set dst (XorI src1 m1));
 7266   ins_cost(ALU_COST);
 7267   format %{ "xori  $dst, $src1, -1\t#@regI_not_reg" %}
 7268 
 7269   ins_encode %{
 7270     __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
 7271   %}
 7272 
 7273   ins_pipe(ialu_reg_imm);
 7274 %}
 7275 
 7276 instruct regL_not_reg(iRegLNoSp dst, iRegL src1, immL_M1 m1) %{
 7277   match(Set dst (XorL src1 m1));
 7278   ins_cost(ALU_COST);
 7279   format %{ "xori  $dst, $src1, -1\t#@regL_not_reg" %}
 7280 
 7281   ins_encode %{
 7282     __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
 7283   %}
 7284 
 7285   ins_pipe(ialu_reg_imm);
 7286 %}
 7287 
 7288 
 7289 // ============================================================================
 7290 // Floating Point Arithmetic Instructions
 7291 
 7292 instruct addF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7293   match(Set dst (AddF src1 src2));
 7294 
 7295   ins_cost(DEFAULT_COST * 5);
 7296   format %{ "fadd.s  $dst, $src1, $src2\t#@addF_reg_reg" %}
 7297 
 7298   ins_encode %{
 7299     __ fadd_s(as_FloatRegister($dst$$reg),
 7300               as_FloatRegister($src1$$reg),
 7301               as_FloatRegister($src2$$reg));
 7302   %}
 7303 
 7304   ins_pipe(fp_dop_reg_reg_s);
 7305 %}
 7306 
 7307 instruct addD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7308   match(Set dst (AddD src1 src2));
 7309 
 7310   ins_cost(DEFAULT_COST * 5);
 7311   format %{ "fadd.d  $dst, $src1, $src2\t#@addD_reg_reg" %}
 7312 
 7313   ins_encode %{
 7314     __ fadd_d(as_FloatRegister($dst$$reg),
 7315               as_FloatRegister($src1$$reg),
 7316               as_FloatRegister($src2$$reg));
 7317   %}
 7318 
 7319   ins_pipe(fp_dop_reg_reg_d);
 7320 %}
 7321 
 7322 instruct subF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7323   match(Set dst (SubF src1 src2));
 7324 
 7325   ins_cost(DEFAULT_COST * 5);
 7326   format %{ "fsub.s  $dst, $src1, $src2\t#@subF_reg_reg" %}
 7327 
 7328   ins_encode %{
 7329     __ fsub_s(as_FloatRegister($dst$$reg),
 7330               as_FloatRegister($src1$$reg),
 7331               as_FloatRegister($src2$$reg));
 7332   %}
 7333 
 7334   ins_pipe(fp_dop_reg_reg_s);
 7335 %}
 7336 
 7337 instruct subD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7338   match(Set dst (SubD src1 src2));
 7339 
 7340   ins_cost(DEFAULT_COST * 5);
 7341   format %{ "fsub.d  $dst, $src1, $src2\t#@subD_reg_reg" %}
 7342 
 7343   ins_encode %{
 7344     __ fsub_d(as_FloatRegister($dst$$reg),
 7345               as_FloatRegister($src1$$reg),
 7346               as_FloatRegister($src2$$reg));
 7347   %}
 7348 
 7349   ins_pipe(fp_dop_reg_reg_d);
 7350 %}
 7351 
 7352 instruct mulF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7353   match(Set dst (MulF src1 src2));
 7354 
 7355   ins_cost(FMUL_SINGLE_COST);
 7356   format %{ "fmul.s  $dst, $src1, $src2\t#@mulF_reg_reg" %}
 7357 
 7358   ins_encode %{
 7359     __ fmul_s(as_FloatRegister($dst$$reg),
 7360               as_FloatRegister($src1$$reg),
 7361               as_FloatRegister($src2$$reg));
 7362   %}
 7363 
 7364   ins_pipe(fp_dop_reg_reg_s);
 7365 %}
 7366 
 7367 instruct mulD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7368   match(Set dst (MulD src1 src2));
 7369 
 7370   ins_cost(FMUL_DOUBLE_COST);
 7371   format %{ "fmul.d  $dst, $src1, $src2\t#@mulD_reg_reg" %}
 7372 
 7373   ins_encode %{
 7374     __ fmul_d(as_FloatRegister($dst$$reg),
 7375               as_FloatRegister($src1$$reg),
 7376               as_FloatRegister($src2$$reg));
 7377   %}
 7378 
 7379   ins_pipe(fp_dop_reg_reg_d);
 7380 %}
 7381 
 7382 // src1 * src2 + src3
 7383 instruct maddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7384   match(Set dst (FmaF src3 (Binary src1 src2)));
 7385 
 7386   ins_cost(FMUL_SINGLE_COST);
 7387   format %{ "fmadd.s  $dst, $src1, $src2, $src3\t#@maddF_reg_reg" %}
 7388 
 7389   ins_encode %{
 7390     assert(UseFMA, "Needs FMA instructions support.");
 7391     __ fmadd_s(as_FloatRegister($dst$$reg),
 7392                as_FloatRegister($src1$$reg),
 7393                as_FloatRegister($src2$$reg),
 7394                as_FloatRegister($src3$$reg));
 7395   %}
 7396 
 7397   ins_pipe(pipe_class_default);
 7398 %}
 7399 
 7400 // src1 * src2 + src3
 7401 instruct maddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7402   match(Set dst (FmaD src3 (Binary src1 src2)));
 7403 
 7404   ins_cost(FMUL_DOUBLE_COST);
 7405   format %{ "fmadd.d  $dst, $src1, $src2, $src3\t#@maddD_reg_reg" %}
 7406 
 7407   ins_encode %{
 7408     assert(UseFMA, "Needs FMA instructions support.");
 7409     __ fmadd_d(as_FloatRegister($dst$$reg),
 7410                as_FloatRegister($src1$$reg),
 7411                as_FloatRegister($src2$$reg),
 7412                as_FloatRegister($src3$$reg));
 7413   %}
 7414 
 7415   ins_pipe(pipe_class_default);
 7416 %}
 7417 
 7418 // src1 * src2 - src3
 7419 instruct msubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7420   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
 7421 
 7422   ins_cost(FMUL_SINGLE_COST);
 7423   format %{ "fmsub.s  $dst, $src1, $src2, $src3\t#@msubF_reg_reg" %}
 7424 
 7425   ins_encode %{
 7426     assert(UseFMA, "Needs FMA instructions support.");
 7427     __ fmsub_s(as_FloatRegister($dst$$reg),
 7428                as_FloatRegister($src1$$reg),
 7429                as_FloatRegister($src2$$reg),
 7430                as_FloatRegister($src3$$reg));
 7431   %}
 7432 
 7433   ins_pipe(pipe_class_default);
 7434 %}
 7435 
 7436 // src1 * src2 - src3
 7437 instruct msubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7438   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
 7439 
 7440   ins_cost(FMUL_DOUBLE_COST);
 7441   format %{ "fmsub.d  $dst, $src1, $src2, $src3\t#@msubD_reg_reg" %}
 7442 
 7443   ins_encode %{
 7444     assert(UseFMA, "Needs FMA instructions support.");
 7445     __ fmsub_d(as_FloatRegister($dst$$reg),
 7446                as_FloatRegister($src1$$reg),
 7447                as_FloatRegister($src2$$reg),
 7448                as_FloatRegister($src3$$reg));
 7449   %}
 7450 
 7451   ins_pipe(pipe_class_default);
 7452 %}
 7453 
 7454 // src1 * (-src2) + src3
 7455 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
 7456 instruct nmsubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7457   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
 7458 
 7459   ins_cost(FMUL_SINGLE_COST);
 7460   format %{ "fnmsub.s  $dst, $src1, $src2, $src3\t#@nmsubF_reg_reg" %}
 7461 
 7462   ins_encode %{
 7463     assert(UseFMA, "Needs FMA instructions support.");
 7464     __ fnmsub_s(as_FloatRegister($dst$$reg),
 7465                 as_FloatRegister($src1$$reg),
 7466                 as_FloatRegister($src2$$reg),
 7467                 as_FloatRegister($src3$$reg));
 7468   %}
 7469 
 7470   ins_pipe(pipe_class_default);
 7471 %}
 7472 
 7473 // src1 * (-src2) + src3
 7474 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
 7475 instruct nmsubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7476   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
 7477 
 7478   ins_cost(FMUL_DOUBLE_COST);
 7479   format %{ "fnmsub.d  $dst, $src1, $src2, $src3\t#@nmsubD_reg_reg" %}
 7480 
 7481   ins_encode %{
 7482     assert(UseFMA, "Needs FMA instructions support.");
 7483     __ fnmsub_d(as_FloatRegister($dst$$reg),
 7484                 as_FloatRegister($src1$$reg),
 7485                 as_FloatRegister($src2$$reg),
 7486                 as_FloatRegister($src3$$reg));
 7487   %}
 7488 
 7489   ins_pipe(pipe_class_default);
 7490 %}
 7491 
 7492 // src1 * (-src2) - src3
 7493 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
 7494 instruct nmaddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7495   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
 7496 
 7497   ins_cost(FMUL_SINGLE_COST);
 7498   format %{ "fnmadd.s  $dst, $src1, $src2, $src3\t#@nmaddF_reg_reg" %}
 7499 
 7500   ins_encode %{
 7501     assert(UseFMA, "Needs FMA instructions support.");
 7502     __ fnmadd_s(as_FloatRegister($dst$$reg),
 7503                 as_FloatRegister($src1$$reg),
 7504                 as_FloatRegister($src2$$reg),
 7505                 as_FloatRegister($src3$$reg));
 7506   %}
 7507 
 7508   ins_pipe(pipe_class_default);
 7509 %}
 7510 
 7511 // src1 * (-src2) - src3
 7512 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
 7513 instruct nmaddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7514   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
 7515 
 7516   ins_cost(FMUL_DOUBLE_COST);
 7517   format %{ "fnmadd.d  $dst, $src1, $src2, $src3\t#@nmaddD_reg_reg" %}
 7518 
 7519   ins_encode %{
 7520     assert(UseFMA, "Needs FMA instructions support.");
 7521     __ fnmadd_d(as_FloatRegister($dst$$reg),
 7522                 as_FloatRegister($src1$$reg),
 7523                 as_FloatRegister($src2$$reg),
 7524                 as_FloatRegister($src3$$reg));
 7525   %}
 7526 
 7527   ins_pipe(pipe_class_default);
 7528 %}
 7529 
 7530 // Math.max(FF)F
 7531 instruct maxF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
 7532   predicate(!UseZfa);
 7533   match(Set dst (MaxF src1 src2));
 7534   effect(KILL cr);
 7535 
 7536   format %{ "maxF $dst, $src1, $src2" %}
 7537 
 7538   ins_encode %{
 7539     __ minmax_fp(as_FloatRegister($dst$$reg),
 7540                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7541                  __ FLOAT_TYPE::single_precision, false /* is_min */);
 7542   %}
 7543 
 7544   ins_pipe(pipe_class_default);
 7545 %}
 7546 
 7547 instruct maxF_reg_reg_zfa(fRegF dst, fRegF src1, fRegF src2) %{
 7548   predicate(UseZfa);
 7549   match(Set dst (MaxF src1 src2));
 7550 
 7551   format %{ "maxF $dst, $src1, $src2" %}
 7552 
 7553   ins_encode %{
 7554     __ fmaxm_s(as_FloatRegister($dst$$reg),
 7555                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 7556   %}
 7557 
 7558   ins_pipe(pipe_class_default);
 7559 %}
 7560 
 7561 // Math.min(FF)F
 7562 instruct minF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
 7563   predicate(!UseZfa);
 7564   match(Set dst (MinF src1 src2));
 7565   effect(KILL cr);
 7566 
 7567   format %{ "minF $dst, $src1, $src2" %}
 7568 
 7569   ins_encode %{
 7570     __ minmax_fp(as_FloatRegister($dst$$reg),
 7571                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7572                  __ FLOAT_TYPE::single_precision, true /* is_min */);
 7573   %}
 7574 
 7575   ins_pipe(pipe_class_default);
 7576 %}
 7577 
 7578 instruct minF_reg_reg_zfa(fRegF dst, fRegF src1, fRegF src2) %{
 7579   predicate(UseZfa);
 7580   match(Set dst (MinF src1 src2));
 7581 
 7582   format %{ "minF $dst, $src1, $src2" %}
 7583 
 7584   ins_encode %{
 7585     __ fminm_s(as_FloatRegister($dst$$reg),
 7586                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 7587   %}
 7588 
 7589   ins_pipe(pipe_class_default);
 7590 %}
 7591 
 7592 // Math.max(DD)D
 7593 instruct maxD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
 7594   predicate(!UseZfa);
 7595   match(Set dst (MaxD src1 src2));
 7596   effect(KILL cr);
 7597 
 7598   format %{ "maxD $dst, $src1, $src2" %}
 7599 
 7600   ins_encode %{
 7601     __ minmax_fp(as_FloatRegister($dst$$reg),
 7602                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7603                  __ FLOAT_TYPE::double_precision, false /* is_min */);
 7604   %}
 7605 
 7606   ins_pipe(pipe_class_default);
 7607 %}
 7608 
 7609 instruct maxD_reg_reg_zfa(fRegD dst, fRegD src1, fRegD src2) %{
 7610   predicate(UseZfa);
 7611   match(Set dst (MaxD src1 src2));
 7612 
 7613   format %{ "maxD $dst, $src1, $src2" %}
 7614 
 7615   ins_encode %{
 7616     __ fmaxm_d(as_FloatRegister($dst$$reg),
 7617                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 7618   %}
 7619 
 7620   ins_pipe(pipe_class_default);
 7621 %}
 7622 
 7623 // Math.min(DD)D
 7624 instruct minD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
 7625   predicate(!UseZfa);
 7626   match(Set dst (MinD src1 src2));
 7627   effect(KILL cr);
 7628 
 7629   format %{ "minD $dst, $src1, $src2" %}
 7630 
 7631   ins_encode %{
 7632     __ minmax_fp(as_FloatRegister($dst$$reg),
 7633                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7634                  __ FLOAT_TYPE::double_precision, true /* is_min */);
 7635   %}
 7636 
 7637   ins_pipe(pipe_class_default);
 7638 %}
 7639 
 7640 instruct minD_reg_reg_zfa(fRegD dst, fRegD src1, fRegD src2) %{
 7641   predicate(UseZfa);
 7642   match(Set dst (MinD src1 src2));
 7643 
 7644   format %{ "minD $dst, $src1, $src2" %}
 7645 
 7646   ins_encode %{
 7647     __ fminm_d(as_FloatRegister($dst$$reg),
 7648                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 7649   %}
 7650 
 7651   ins_pipe(pipe_class_default);
 7652 %}
 7653 
 7654 // Float.isInfinite
 7655 instruct isInfiniteF_reg_reg(iRegINoSp dst, fRegF src)
 7656 %{
 7657   match(Set dst (IsInfiniteF src));
 7658 
 7659   format %{ "isInfinite $dst, $src" %}
 7660   ins_encode %{
 7661     __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7662     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::inf);
 7663     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7664   %}
 7665 
 7666   ins_pipe(pipe_class_default);
 7667 %}
 7668 
 7669 // Double.isInfinite
 7670 instruct isInfiniteD_reg_reg(iRegINoSp dst, fRegD src)
 7671 %{
 7672   match(Set dst (IsInfiniteD src));
 7673 
 7674   format %{ "isInfinite $dst, $src" %}
 7675   ins_encode %{
 7676     __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7677     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::inf);
 7678     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7679   %}
 7680 
 7681   ins_pipe(pipe_class_default);
 7682 %}
 7683 
 7684 // Float.isFinite
 7685 instruct isFiniteF_reg_reg(iRegINoSp dst, fRegF src)
 7686 %{
 7687   match(Set dst (IsFiniteF src));
 7688 
 7689   format %{ "isFinite $dst, $src" %}
 7690   ins_encode %{
 7691     __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7692     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::finite);
 7693     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7694   %}
 7695 
 7696   ins_pipe(pipe_class_default);
 7697 %}
 7698 
 7699 // Double.isFinite
 7700 instruct isFiniteD_reg_reg(iRegINoSp dst, fRegD src)
 7701 %{
 7702   match(Set dst (IsFiniteD src));
 7703 
 7704   format %{ "isFinite $dst, $src" %}
 7705   ins_encode %{
 7706     __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7707     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::finite);
 7708     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7709   %}
 7710 
 7711   ins_pipe(pipe_class_default);
 7712 %}
 7713 
 7714 instruct divF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7715   match(Set dst (DivF src1  src2));
 7716 
 7717   ins_cost(FDIV_COST);
 7718   format %{ "fdiv.s  $dst, $src1, $src2\t#@divF_reg_reg" %}
 7719 
 7720   ins_encode %{
 7721     __ fdiv_s(as_FloatRegister($dst$$reg),
 7722               as_FloatRegister($src1$$reg),
 7723               as_FloatRegister($src2$$reg));
 7724   %}
 7725 
 7726   ins_pipe(fp_div_s);
 7727 %}
 7728 
 7729 instruct divD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7730   match(Set dst (DivD src1  src2));
 7731 
 7732   ins_cost(FDIV_COST);
 7733   format %{ "fdiv.d  $dst, $src1, $src2\t#@divD_reg_reg" %}
 7734 
 7735   ins_encode %{
 7736     __ fdiv_d(as_FloatRegister($dst$$reg),
 7737               as_FloatRegister($src1$$reg),
 7738               as_FloatRegister($src2$$reg));
 7739   %}
 7740 
 7741   ins_pipe(fp_div_d);
 7742 %}
 7743 
 7744 instruct negF_reg_reg(fRegF dst, fRegF src) %{
 7745   match(Set dst (NegF src));
 7746 
 7747   ins_cost(XFER_COST);
 7748   format %{ "fsgnjn.s  $dst, $src, $src\t#@negF_reg_reg" %}
 7749 
 7750   ins_encode %{
 7751     __ fneg_s(as_FloatRegister($dst$$reg),
 7752               as_FloatRegister($src$$reg));
 7753   %}
 7754 
 7755   ins_pipe(fp_uop_s);
 7756 %}
 7757 
 7758 instruct negD_reg_reg(fRegD dst, fRegD src) %{
 7759   match(Set dst (NegD src));
 7760 
 7761   ins_cost(XFER_COST);
 7762   format %{ "fsgnjn.d  $dst, $src, $src\t#@negD_reg_reg" %}
 7763 
 7764   ins_encode %{
 7765     __ fneg_d(as_FloatRegister($dst$$reg),
 7766               as_FloatRegister($src$$reg));
 7767   %}
 7768 
 7769   ins_pipe(fp_uop_d);
 7770 %}
 7771 
 7772 instruct absI_reg(iRegINoSp dst, iRegIorL2I src) %{
 7773   match(Set dst (AbsI src));
 7774 
 7775   ins_cost(ALU_COST * 3);
 7776   format %{
 7777     "sraiw  t0, $src, 0x1f\n\t"
 7778     "addw  $dst, $src, t0\n\t"
 7779     "xorr  $dst, $dst, t0\t#@absI_reg"
 7780   %}
 7781 
 7782   ins_encode %{
 7783     __ sraiw(t0, as_Register($src$$reg), 0x1f);
 7784     __ addw(as_Register($dst$$reg), as_Register($src$$reg), t0);
 7785     __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
 7786   %}
 7787 
 7788   ins_pipe(pipe_class_default);
 7789 %}
 7790 
 7791 instruct absL_reg(iRegLNoSp dst, iRegL src) %{
 7792   match(Set dst (AbsL src));
 7793 
 7794   ins_cost(ALU_COST * 3);
 7795   format %{
 7796     "srai  t0, $src, 0x3f\n\t"
 7797     "add  $dst, $src, t0\n\t"
 7798     "xorr  $dst, $dst, t0\t#@absL_reg"
 7799   %}
 7800 
 7801   ins_encode %{
 7802     __ srai(t0, as_Register($src$$reg), 0x3f);
 7803     __ add(as_Register($dst$$reg), as_Register($src$$reg), t0);
 7804     __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
 7805   %}
 7806 
 7807   ins_pipe(pipe_class_default);
 7808 %}
 7809 
 7810 instruct absF_reg(fRegF dst, fRegF src) %{
 7811   match(Set dst (AbsF src));
 7812 
 7813   ins_cost(XFER_COST);
 7814   format %{ "fsgnjx.s  $dst, $src, $src\t#@absF_reg" %}
 7815   ins_encode %{
 7816     __ fabs_s(as_FloatRegister($dst$$reg),
 7817               as_FloatRegister($src$$reg));
 7818   %}
 7819 
 7820   ins_pipe(fp_uop_s);
 7821 %}
 7822 
 7823 instruct absD_reg(fRegD dst, fRegD src) %{
 7824   match(Set dst (AbsD src));
 7825 
 7826   ins_cost(XFER_COST);
 7827   format %{ "fsgnjx.d  $dst, $src, $src\t#@absD_reg" %}
 7828   ins_encode %{
 7829     __ fabs_d(as_FloatRegister($dst$$reg),
 7830               as_FloatRegister($src$$reg));
 7831   %}
 7832 
 7833   ins_pipe(fp_uop_d);
 7834 %}
 7835 
 7836 instruct sqrtF_reg(fRegF dst, fRegF src) %{
 7837   match(Set dst (SqrtF src));
 7838 
 7839   ins_cost(FSQRT_COST);
 7840   format %{ "fsqrt.s  $dst, $src\t#@sqrtF_reg" %}
 7841   ins_encode %{
 7842     __ fsqrt_s(as_FloatRegister($dst$$reg),
 7843                as_FloatRegister($src$$reg));
 7844   %}
 7845 
 7846   ins_pipe(fp_sqrt_s);
 7847 %}
 7848 
 7849 instruct sqrtD_reg(fRegD dst, fRegD src) %{
 7850   match(Set dst (SqrtD src));
 7851 
 7852   ins_cost(FSQRT_COST);
 7853   format %{ "fsqrt.d  $dst, $src\t#@sqrtD_reg" %}
 7854   ins_encode %{
 7855     __ fsqrt_d(as_FloatRegister($dst$$reg),
 7856                as_FloatRegister($src$$reg));
 7857   %}
 7858 
 7859   ins_pipe(fp_sqrt_d);
 7860 %}
 7861 
 7862 // Round Instruction
 7863 instruct roundD_reg(fRegD dst, fRegD src, immI rmode, iRegLNoSp tmp1, iRegLNoSp tmp2, iRegLNoSp tmp3) %{
 7864   match(Set dst (RoundDoubleMode src rmode));
 7865   ins_cost(2 * XFER_COST + BRANCH_COST);
 7866   effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 7867 
 7868   format %{ "RoundDoubleMode $src, $rmode" %}
 7869   ins_encode %{
 7870     __ round_double_mode(as_FloatRegister($dst$$reg),
 7871                as_FloatRegister($src$$reg), $rmode$$constant, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 7872   %}
 7873   ins_pipe(pipe_class_default);
 7874 %}
 7875 
 7876 // Copysign and signum intrinsics
 7877 
 7878 instruct copySignD_reg(fRegD dst, fRegD src1, fRegD src2, immD zero) %{
 7879   match(Set dst (CopySignD src1 (Binary src2 zero)));
 7880   format %{ "CopySignD  $dst $src1 $src2" %}
 7881   ins_encode %{
 7882     FloatRegister dst = as_FloatRegister($dst$$reg),
 7883                   src1 = as_FloatRegister($src1$$reg),
 7884                   src2 = as_FloatRegister($src2$$reg);
 7885     __ fsgnj_d(dst, src1, src2);
 7886   %}
 7887   ins_pipe(fp_dop_reg_reg_d);
 7888 %}
 7889 
 7890 instruct copySignF_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7891   match(Set dst (CopySignF src1 src2));
 7892   format %{ "CopySignF  $dst $src1 $src2" %}
 7893   ins_encode %{
 7894     FloatRegister dst = as_FloatRegister($dst$$reg),
 7895                   src1 = as_FloatRegister($src1$$reg),
 7896                   src2 = as_FloatRegister($src2$$reg);
 7897     __ fsgnj_s(dst, src1, src2);
 7898   %}
 7899   ins_pipe(fp_dop_reg_reg_s);
 7900 %}
 7901 
 7902 instruct signumD_reg(fRegD dst, immD zero, fRegD one) %{
 7903   match(Set dst (SignumD dst (Binary zero one)));
 7904   format %{ "signumD  $dst, $dst" %}
 7905   ins_encode %{
 7906     __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), true /* is_double */);
 7907   %}
 7908   ins_pipe(pipe_class_default);
 7909 %}
 7910 
 7911 instruct signumF_reg(fRegF dst, immF zero, fRegF one) %{
 7912   match(Set dst (SignumF dst (Binary zero one)));
 7913   format %{ "signumF  $dst, $dst" %}
 7914   ins_encode %{
 7915     __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), false /* is_double */);
 7916   %}
 7917   ins_pipe(pipe_class_default);
 7918 %}
 7919 
 7920 // Arithmetic Instructions End
 7921 
 7922 // ============================================================================
 7923 // Logical Instructions
 7924 
 7925 // Register And
 7926 instruct andI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7927   match(Set dst (AndI src1 src2));
 7928 
 7929   format %{ "andr  $dst, $src1, $src2\t#@andI_reg_reg" %}
 7930 
 7931   ins_cost(ALU_COST);
 7932   ins_encode %{
 7933     __ andr(as_Register($dst$$reg),
 7934             as_Register($src1$$reg),
 7935             as_Register($src2$$reg));
 7936   %}
 7937 
 7938   ins_pipe(ialu_reg_reg);
 7939 %}
 7940 
 7941 // Immediate And
 7942 instruct andI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7943   match(Set dst (AndI src1 src2));
 7944 
 7945   format %{ "andi  $dst, $src1, $src2\t#@andI_reg_imm" %}
 7946 
 7947   ins_cost(ALU_COST);
 7948   ins_encode %{
 7949     __ andi(as_Register($dst$$reg),
 7950             as_Register($src1$$reg),
 7951             (int32_t)($src2$$constant));
 7952   %}
 7953 
 7954   ins_pipe(ialu_reg_imm);
 7955 %}
 7956 
 7957 // Register Or
 7958 instruct orI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7959   match(Set dst (OrI src1 src2));
 7960 
 7961   format %{ "orr  $dst, $src1, $src2\t#@orI_reg_reg" %}
 7962 
 7963   ins_cost(ALU_COST);
 7964   ins_encode %{
 7965     __ orr(as_Register($dst$$reg),
 7966            as_Register($src1$$reg),
 7967            as_Register($src2$$reg));
 7968   %}
 7969 
 7970   ins_pipe(ialu_reg_reg);
 7971 %}
 7972 
 7973 // Immediate Or
 7974 instruct orI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7975   match(Set dst (OrI src1 src2));
 7976 
 7977   format %{ "ori  $dst, $src1, $src2\t#@orI_reg_imm" %}
 7978 
 7979   ins_cost(ALU_COST);
 7980   ins_encode %{
 7981     __ ori(as_Register($dst$$reg),
 7982            as_Register($src1$$reg),
 7983            (int32_t)($src2$$constant));
 7984   %}
 7985 
 7986   ins_pipe(ialu_reg_imm);
 7987 %}
 7988 
 7989 // Register Xor
 7990 instruct xorI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7991   match(Set dst (XorI src1 src2));
 7992 
 7993   format %{ "xorr  $dst, $src1, $src2\t#@xorI_reg_reg" %}
 7994 
 7995   ins_cost(ALU_COST);
 7996   ins_encode %{
 7997     __ xorr(as_Register($dst$$reg),
 7998             as_Register($src1$$reg),
 7999             as_Register($src2$$reg));
 8000   %}
 8001 
 8002   ins_pipe(ialu_reg_reg);
 8003 %}
 8004 
 8005 // Immediate Xor
 8006 instruct xorI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 8007   match(Set dst (XorI src1 src2));
 8008 
 8009   format %{ "xori  $dst, $src1, $src2\t#@xorI_reg_imm" %}
 8010 
 8011   ins_cost(ALU_COST);
 8012   ins_encode %{
 8013     __ xori(as_Register($dst$$reg),
 8014             as_Register($src1$$reg),
 8015             (int32_t)($src2$$constant));
 8016   %}
 8017 
 8018   ins_pipe(ialu_reg_imm);
 8019 %}
 8020 
 8021 // Register And Long
 8022 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 8023   match(Set dst (AndL src1 src2));
 8024 
 8025   format %{ "andr  $dst, $src1, $src2\t#@andL_reg_reg" %}
 8026 
 8027   ins_cost(ALU_COST);
 8028   ins_encode %{
 8029     __ andr(as_Register($dst$$reg),
 8030             as_Register($src1$$reg),
 8031             as_Register($src2$$reg));
 8032   %}
 8033 
 8034   ins_pipe(ialu_reg_reg);
 8035 %}
 8036 
 8037 // Immediate And Long
 8038 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 8039   match(Set dst (AndL src1 src2));
 8040 
 8041   format %{ "andi  $dst, $src1, $src2\t#@andL_reg_imm" %}
 8042 
 8043   ins_cost(ALU_COST);
 8044   ins_encode %{
 8045     __ andi(as_Register($dst$$reg),
 8046             as_Register($src1$$reg),
 8047             (int32_t)($src2$$constant));
 8048   %}
 8049 
 8050   ins_pipe(ialu_reg_imm);
 8051 %}
 8052 
 8053 // Register Or Long
 8054 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 8055   match(Set dst (OrL src1 src2));
 8056 
 8057   format %{ "orr  $dst, $src1, $src2\t#@orL_reg_reg" %}
 8058 
 8059   ins_cost(ALU_COST);
 8060   ins_encode %{
 8061     __ orr(as_Register($dst$$reg),
 8062            as_Register($src1$$reg),
 8063            as_Register($src2$$reg));
 8064   %}
 8065 
 8066   ins_pipe(ialu_reg_reg);
 8067 %}
 8068 
 8069 // Immediate Or Long
 8070 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 8071   match(Set dst (OrL src1 src2));
 8072 
 8073   format %{ "ori  $dst, $src1, $src2\t#@orL_reg_imm" %}
 8074 
 8075   ins_cost(ALU_COST);
 8076   ins_encode %{
 8077     __ ori(as_Register($dst$$reg),
 8078            as_Register($src1$$reg),
 8079            (int32_t)($src2$$constant));
 8080   %}
 8081 
 8082   ins_pipe(ialu_reg_imm);
 8083 %}
 8084 
 8085 // Register Xor Long
 8086 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 8087   match(Set dst (XorL src1 src2));
 8088 
 8089   format %{ "xorr  $dst, $src1, $src2\t#@xorL_reg_reg" %}
 8090 
 8091   ins_cost(ALU_COST);
 8092   ins_encode %{
 8093     __ xorr(as_Register($dst$$reg),
 8094             as_Register($src1$$reg),
 8095             as_Register($src2$$reg));
 8096   %}
 8097 
 8098   ins_pipe(ialu_reg_reg);
 8099 %}
 8100 
 8101 // Immediate Xor Long
 8102 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 8103   match(Set dst (XorL src1 src2));
 8104 
 8105   ins_cost(ALU_COST);
 8106   format %{ "xori  $dst, $src1, $src2\t#@xorL_reg_imm" %}
 8107 
 8108   ins_encode %{
 8109     __ xori(as_Register($dst$$reg),
 8110             as_Register($src1$$reg),
 8111             (int32_t)($src2$$constant));
 8112   %}
 8113 
 8114   ins_pipe(ialu_reg_imm);
 8115 %}
 8116 
 8117 // ============================================================================
 8118 // MemBar Instruction
 8119 
 8120 // RVTSO
 8121 
 8122 instruct unnecessary_membar_rvtso() %{
 8123   predicate(UseZtso);
 8124   match(LoadFence);
 8125   match(StoreFence);
 8126   match(StoreStoreFence);
 8127   match(MemBarAcquire);
 8128   match(MemBarRelease);
 8129   match(MemBarStoreStore);
 8130   match(MemBarAcquireLock);
 8131   match(MemBarReleaseLock);
 8132 
 8133   ins_cost(0);
 8134 
 8135   size(0);
 8136 
 8137   format %{ "#@unnecessary_membar_rvtso elided/tso (empty encoding)" %}
 8138   ins_encode %{
 8139     __ block_comment("unnecessary_membar_rvtso");
 8140   %}
 8141   ins_pipe(real_empty);
 8142 %}
 8143 
 8144 instruct membar_storeload_rvtso() %{
 8145   predicate(UseZtso);
 8146   match(MemBarStoreLoad);
 8147   ins_cost(VOLATILE_REF_COST);
 8148 
 8149   format %{ "#@membar_storeload_rvtso\n\t"
 8150             "fence w, r"%}
 8151 
 8152   ins_encode %{
 8153     __ block_comment("membar_storeload_rvtso");
 8154     __ membar(MacroAssembler::StoreLoad);
 8155   %}
 8156 
 8157   ins_pipe(pipe_slow);
 8158 %}
 8159 
 8160 instruct membar_volatile_rvtso() %{
 8161   predicate(UseZtso);
 8162   match(MemBarVolatile);
 8163   ins_cost(VOLATILE_REF_COST);
 8164 
 8165   format %{ "#@membar_volatile_rvtso\n\t"
 8166             "fence w, r"%}
 8167 
 8168   ins_encode %{
 8169     __ block_comment("membar_volatile_rvtso");
 8170     __ membar(MacroAssembler::StoreLoad);
 8171   %}
 8172 
 8173   ins_pipe(pipe_slow);
 8174 %}
 8175 
 8176 instruct unnecessary_membar_volatile_rvtso() %{
 8177   predicate(UseZtso && Matcher::post_store_load_barrier(n));
 8178   match(MemBarVolatile);
 8179   ins_cost(0);
 8180 
 8181   size(0);
 8182 
 8183   format %{ "#@unnecessary_membar_volatile_rvtso (unnecessary so empty encoding)" %}
 8184   ins_encode %{
 8185     __ block_comment("unnecessary_membar_volatile_rvtso");
 8186   %}
 8187   ins_pipe(real_empty);
 8188 %}
 8189 
 8190 instruct membar_full_rvtso() %{
 8191   predicate(UseZtso);
 8192   match(MemBarFull);
 8193   ins_cost(VOLATILE_REF_COST);
 8194 
 8195   format %{ "#@membar_full_rvtso\n\t"
 8196             "fence rw, rw" %}
 8197 
 8198   ins_encode %{
 8199     __ block_comment("membar_full_rvtso");
 8200     __ membar(MacroAssembler::AnyAny);
 8201   %}
 8202 
 8203   ins_pipe(pipe_slow);
 8204 %}
 8205 
 8206 // RVWMO
 8207 
 8208 instruct membar_aqcuire_rvwmo() %{
 8209   predicate(!UseZtso);
 8210   match(LoadFence);
 8211   match(MemBarAcquire);
 8212   ins_cost(VOLATILE_REF_COST);
 8213 
 8214   format %{ "#@membar_aqcuire_rvwmo\n\t"
 8215             "fence r, rw" %}
 8216 
 8217   ins_encode %{
 8218     __ block_comment("membar_aqcuire_rvwmo");
 8219     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
 8220   %}
 8221   ins_pipe(pipe_serial);
 8222 %}
 8223 
 8224 instruct membar_release_rvwmo() %{
 8225   predicate(!UseZtso);
 8226   match(StoreFence);
 8227   match(MemBarRelease);
 8228   ins_cost(VOLATILE_REF_COST);
 8229 
 8230   format %{ "#@membar_release_rvwmo\n\t"
 8231             "fence rw, w" %}
 8232 
 8233   ins_encode %{
 8234     __ block_comment("membar_release_rvwmo");
 8235     __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
 8236   %}
 8237   ins_pipe(pipe_serial);
 8238 %}
 8239 
 8240 instruct membar_storestore_rvwmo() %{
 8241   predicate(!UseZtso);
 8242   match(MemBarStoreStore);
 8243   match(StoreStoreFence);
 8244   ins_cost(VOLATILE_REF_COST);
 8245 
 8246   format %{ "#@membar_storestore_rvwmo\n\t"
 8247             "fence w, w" %}
 8248 
 8249   ins_encode %{
 8250     __ membar(MacroAssembler::StoreStore);
 8251   %}
 8252   ins_pipe(pipe_serial);
 8253 %}
 8254 
 8255 instruct membar_storeload_rvwmo() %{
 8256   predicate(!UseZtso);
 8257   match(MemBarStoreLoad);
 8258   ins_cost(VOLATILE_REF_COST);
 8259 
 8260   format %{ "#@membar_storeload_rvwmo\n\t"
 8261             "fence w, r"%}
 8262 
 8263   ins_encode %{
 8264     __ block_comment("membar_storeload_rvwmo");
 8265     __ membar(MacroAssembler::StoreLoad);
 8266   %}
 8267 
 8268   ins_pipe(pipe_serial);
 8269 %}
 8270 
 8271 instruct membar_volatile_rvwmo() %{
 8272   predicate(!UseZtso);
 8273   match(MemBarVolatile);
 8274   ins_cost(VOLATILE_REF_COST);
 8275 
 8276   format %{ "#@membar_volatile_rvwmo\n\t"
 8277             "fence w, r"%}
 8278 
 8279   ins_encode %{
 8280     __ block_comment("membar_volatile_rvwmo");
 8281     __ membar(MacroAssembler::StoreLoad);
 8282   %}
 8283 
 8284   ins_pipe(pipe_serial);
 8285 %}
 8286 
 8287 instruct membar_lock_rvwmo() %{
 8288   predicate(!UseZtso);
 8289   match(MemBarAcquireLock);
 8290   match(MemBarReleaseLock);
 8291   ins_cost(0);
 8292 
 8293   format %{ "#@membar_lock_rvwmo (elided)" %}
 8294 
 8295   ins_encode %{
 8296     __ block_comment("membar_lock_rvwmo (elided)");
 8297   %}
 8298 
 8299   ins_pipe(pipe_serial);
 8300 %}
 8301 
 8302 instruct unnecessary_membar_volatile_rvwmo() %{
 8303   predicate(!UseZtso && Matcher::post_store_load_barrier(n));
 8304   match(MemBarVolatile);
 8305   ins_cost(0);
 8306 
 8307   size(0);
 8308   format %{ "#@unnecessary_membar_volatile_rvwmo (unnecessary so empty encoding)" %}
 8309   ins_encode %{
 8310     __ block_comment("unnecessary_membar_volatile_rvwmo");
 8311   %}
 8312   ins_pipe(real_empty);
 8313 %}
 8314 
 8315 instruct membar_full_rvwmo() %{
 8316   predicate(!UseZtso);
 8317   match(MemBarFull);
 8318   ins_cost(VOLATILE_REF_COST);
 8319 
 8320   format %{ "#@membar_full_rvwmo\n\t"
 8321             "fence rw, rw" %}
 8322 
 8323   ins_encode %{
 8324     __ block_comment("membar_full_rvwmo");
 8325     __ membar(MacroAssembler::AnyAny);
 8326   %}
 8327 
 8328   ins_pipe(pipe_serial);
 8329 %}
 8330 
 8331 instruct spin_wait() %{
 8332   predicate(UseZihintpause);
 8333   match(OnSpinWait);
 8334   ins_cost(CACHE_MISS_COST);
 8335 
 8336   format %{ "spin_wait" %}
 8337 
 8338   ins_encode %{
 8339     __ pause();
 8340   %}
 8341 
 8342   ins_pipe(pipe_serial);
 8343 %}
 8344 
 8345 // ============================================================================
 8346 // Cast Instructions (Java-level type cast)
 8347 
 8348 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8349   match(Set dst (CastX2P src));
 8350 
 8351   ins_cost(ALU_COST);
 8352   format %{ "mv  $dst, $src\t# long -> ptr, #@castX2P" %}
 8353 
 8354   ins_encode %{
 8355     if ($dst$$reg != $src$$reg) {
 8356       __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 8357     }
 8358   %}
 8359 
 8360   ins_pipe(ialu_reg);
 8361 %}
 8362 
 8363 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8364   match(Set dst (CastP2X src));
 8365 
 8366   ins_cost(ALU_COST);
 8367   format %{ "mv  $dst, $src\t# ptr -> long, #@castP2X" %}
 8368 
 8369   ins_encode %{
 8370     if ($dst$$reg != $src$$reg) {
 8371       __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 8372     }
 8373   %}
 8374 
 8375   ins_pipe(ialu_reg);
 8376 %}
 8377 
 8378 instruct castPP(iRegPNoSp dst)
 8379 %{
 8380   match(Set dst (CastPP dst));
 8381   ins_cost(0);
 8382 
 8383   size(0);
 8384   format %{ "# castPP of $dst, #@castPP" %}
 8385   ins_encode(/* empty encoding */);
 8386   ins_pipe(pipe_class_empty);
 8387 %}
 8388 
 8389 instruct castLL(iRegL dst)
 8390 %{
 8391   match(Set dst (CastLL dst));
 8392 
 8393   size(0);
 8394   format %{ "# castLL of $dst, #@castLL" %}
 8395   ins_encode(/* empty encoding */);
 8396   ins_cost(0);
 8397   ins_pipe(pipe_class_empty);
 8398 %}
 8399 
 8400 instruct castII(iRegI dst)
 8401 %{
 8402   match(Set dst (CastII dst));
 8403 
 8404   size(0);
 8405   format %{ "# castII of $dst, #@castII" %}
 8406   ins_encode(/* empty encoding */);
 8407   ins_cost(0);
 8408   ins_pipe(pipe_class_empty);
 8409 %}
 8410 
 8411 instruct checkCastPP(iRegPNoSp dst)
 8412 %{
 8413   match(Set dst (CheckCastPP dst));
 8414 
 8415   size(0);
 8416   ins_cost(0);
 8417   format %{ "# checkcastPP of $dst, #@checkCastPP" %}
 8418   ins_encode(/* empty encoding */);
 8419   ins_pipe(pipe_class_empty);
 8420 %}
 8421 
 8422 instruct castHH(fRegF dst)
 8423 %{
 8424   match(Set dst (CastHH dst));
 8425 
 8426   size(0);
 8427   format %{ "# castHH of $dst" %}
 8428   ins_encode(/* empty encoding */);
 8429   ins_cost(0);
 8430   ins_pipe(pipe_class_empty);
 8431 %}
 8432 
 8433 instruct castFF(fRegF dst)
 8434 %{
 8435   match(Set dst (CastFF dst));
 8436 
 8437   size(0);
 8438   format %{ "# castFF of $dst" %}
 8439   ins_encode(/* empty encoding */);
 8440   ins_cost(0);
 8441   ins_pipe(pipe_class_empty);
 8442 %}
 8443 
 8444 instruct castDD(fRegD dst)
 8445 %{
 8446   match(Set dst (CastDD dst));
 8447 
 8448   size(0);
 8449   format %{ "# castDD of $dst" %}
 8450   ins_encode(/* empty encoding */);
 8451   ins_cost(0);
 8452   ins_pipe(pipe_class_empty);
 8453 %}
 8454 
 8455 instruct castVV(vReg dst)
 8456 %{
 8457   match(Set dst (CastVV dst));
 8458 
 8459   size(0);
 8460   format %{ "# castVV of $dst" %}
 8461   ins_encode(/* empty encoding */);
 8462   ins_cost(0);
 8463   ins_pipe(pipe_class_empty);
 8464 %}
 8465 
 8466 instruct castVVMask(vRegMask dst)
 8467 %{
 8468   match(Set dst (CastVV dst));
 8469 
 8470   size(0);
 8471   format %{ "# castVV of $dst" %}
 8472   ins_encode(/* empty encoding */);
 8473   ins_cost(0);
 8474   ins_pipe(pipe_class_empty);
 8475 %}
 8476 
 8477 // ============================================================================
 8478 // Convert Instructions
 8479 
 8480 // int to bool
 8481 instruct convI2Bool(iRegINoSp dst, iRegI src)
 8482 %{
 8483   match(Set dst (Conv2B src));
 8484 
 8485   ins_cost(ALU_COST);
 8486   format %{ "snez  $dst, $src\t#@convI2Bool" %}
 8487 
 8488   ins_encode %{
 8489     __ snez(as_Register($dst$$reg), as_Register($src$$reg));
 8490   %}
 8491 
 8492   ins_pipe(ialu_reg);
 8493 %}
 8494 
 8495 // pointer to bool
 8496 instruct convP2Bool(iRegINoSp dst, iRegP src)
 8497 %{
 8498   match(Set dst (Conv2B src));
 8499 
 8500   ins_cost(ALU_COST);
 8501   format %{ "snez  $dst, $src\t#@convP2Bool" %}
 8502 
 8503   ins_encode %{
 8504     __ snez(as_Register($dst$$reg), as_Register($src$$reg));
 8505   %}
 8506 
 8507   ins_pipe(ialu_reg);
 8508 %}
 8509 
 8510 // int <-> long
 8511 
 8512 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
 8513 %{
 8514   match(Set dst (ConvI2L src));
 8515 
 8516   ins_cost(ALU_COST);
 8517   format %{ "addw  $dst, $src, zr\t#@convI2L_reg_reg" %}
 8518   ins_encode %{
 8519     __ sext(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8520   %}
 8521   ins_pipe(ialu_reg);
 8522 %}
 8523 
 8524 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
 8525   match(Set dst (ConvL2I src));
 8526 
 8527   ins_cost(ALU_COST);
 8528   format %{ "addw  $dst, $src, zr\t#@convL2I_reg" %}
 8529 
 8530   ins_encode %{
 8531     __ sext(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8532   %}
 8533 
 8534   ins_pipe(ialu_reg);
 8535 %}
 8536 
 8537 // int to unsigned long (Zero-extend)
 8538 instruct convI2UL_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
 8539 %{
 8540   match(Set dst (AndL (ConvI2L src) mask));
 8541 
 8542   ins_cost(ALU_COST * 2);
 8543   format %{ "zext $dst, $src, 32\t# i2ul, #@convI2UL_reg_reg" %}
 8544 
 8545   ins_encode %{
 8546     __ zext(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8547   %}
 8548 
 8549   ins_pipe(ialu_reg_shift);
 8550 %}
 8551 
 8552 // float <-> double
 8553 
 8554 instruct convF2D_reg(fRegD dst, fRegF src) %{
 8555   match(Set dst (ConvF2D src));
 8556 
 8557   ins_cost(XFER_COST);
 8558   format %{ "fcvt.d.s  $dst, $src\t#@convF2D_reg" %}
 8559 
 8560   ins_encode %{
 8561     __ fcvt_d_s(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8562   %}
 8563 
 8564   ins_pipe(fp_f2d);
 8565 %}
 8566 
 8567 instruct convD2F_reg(fRegF dst, fRegD src) %{
 8568   match(Set dst (ConvD2F src));
 8569 
 8570   ins_cost(XFER_COST);
 8571   format %{ "fcvt.s.d  $dst, $src\t#@convD2F_reg" %}
 8572 
 8573   ins_encode %{
 8574     __ fcvt_s_d(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8575   %}
 8576 
 8577   ins_pipe(fp_d2f);
 8578 %}
 8579 
 8580 // single <-> half precision
 8581 
 8582 instruct convHF2F_reg_reg(fRegF dst, iRegIorL2I src, iRegINoSp tmp) %{
 8583   match(Set dst (ConvHF2F src));
 8584   effect(TEMP tmp);
 8585   format %{ "fmv.h.x $dst, $src\t# move source from $src to $dst\n\t"
 8586             "fcvt.s.h $dst, $dst\t# convert half to single precision"
 8587   %}
 8588   ins_encode %{
 8589     __ float16_to_float($dst$$FloatRegister, $src$$Register, $tmp$$Register);
 8590   %}
 8591   ins_pipe(pipe_slow);
 8592 %}
 8593 
 8594 instruct convF2HF_reg_reg(iRegINoSp dst, fRegF src, fRegF ftmp, iRegINoSp xtmp) %{
 8595   match(Set dst (ConvF2HF src));
 8596   effect(TEMP_DEF dst, TEMP ftmp, TEMP xtmp);
 8597   format %{ "fcvt.h.s $ftmp, $src\t# convert single precision to half\n\t"
 8598             "fmv.x.h $dst, $ftmp\t# move result from $ftmp to $dst"
 8599   %}
 8600   ins_encode %{
 8601     __ float_to_float16($dst$$Register, $src$$FloatRegister, $ftmp$$FloatRegister, $xtmp$$Register);
 8602   %}
 8603   ins_pipe(pipe_slow);
 8604 %}
 8605 
 8606 // half precision operations
 8607 
 8608 instruct reinterpretS2HF(fRegF dst, iRegI src)
 8609 %{
 8610   match(Set dst (ReinterpretS2HF src));
 8611   format %{ "fmv.h.x $dst, $src\t# reinterpretS2HF" %}
 8612   ins_encode %{
 8613     __ fmv_h_x($dst$$FloatRegister, $src$$Register);
 8614   %}
 8615   ins_pipe(fp_i2f);
 8616 %}
 8617 
 8618 instruct convF2HFAndS2HF(fRegF dst, fRegF src)
 8619 %{
 8620   match(Set dst (ReinterpretS2HF (ConvF2HF src)));
 8621   format %{ "convF2HFAndS2HF $dst, $src" %}
 8622   ins_encode %{
 8623     __ fcvt_h_s($dst$$FloatRegister, $src$$FloatRegister);
 8624   %}
 8625   ins_pipe(fp_uop_s);
 8626 %}
 8627 
 8628 instruct reinterpretHF2S(iRegINoSp dst, fRegF src)
 8629 %{
 8630   match(Set dst (ReinterpretHF2S src));
 8631   format %{ "fmv.x.h $dst, $src\t# reinterpretHF2S" %}
 8632   ins_encode %{
 8633     __ fmv_x_h($dst$$Register, $src$$FloatRegister);
 8634   %}
 8635   ins_pipe(fp_f2i);
 8636 %}
 8637 
 8638 instruct convHF2SAndHF2F(fRegF dst, fRegF src)
 8639 %{
 8640   match(Set dst (ConvHF2F (ReinterpretHF2S src)));
 8641   format %{ "convHF2SAndHF2F $dst, $src" %}
 8642   ins_encode %{
 8643     __ fcvt_s_h($dst$$FloatRegister, $src$$FloatRegister);
 8644   %}
 8645   ins_pipe(fp_uop_s);
 8646 %}
 8647 
 8648 instruct sqrt_HF_reg(fRegF dst, fRegF src)
 8649 %{
 8650   match(Set dst (SqrtHF src));
 8651   format %{ "fsqrt.h $dst, $src" %}
 8652   ins_encode %{
 8653     __ fsqrt_h($dst$$FloatRegister, $src$$FloatRegister);
 8654   %}
 8655   ins_pipe(fp_sqrt_s);
 8656 %}
 8657 
 8658 instruct binOps_HF_reg(fRegF dst, fRegF src1, fRegF src2)
 8659 %{
 8660   match(Set dst (AddHF src1 src2));
 8661   match(Set dst (SubHF src1 src2));
 8662   match(Set dst (MulHF src1 src2));
 8663   match(Set dst (DivHF src1 src2));
 8664   format %{ "binop_hf $dst, $src1, $src2" %}
 8665   ins_encode %{
 8666     int opcode = this->ideal_Opcode();
 8667     switch(opcode) {
 8668       case Op_AddHF: __ fadd_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
 8669       case Op_SubHF: __ fsub_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
 8670       case Op_MulHF: __ fmul_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
 8671       case Op_DivHF: __ fdiv_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
 8672       default: assert(false, "%s is not supported here", NodeClassNames[opcode]); break;
 8673     }
 8674   %}
 8675   ins_pipe(fp_dop_reg_reg_s);
 8676 %}
 8677 
 8678 instruct min_HF_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr)
 8679 %{
 8680   predicate(!UseZfa);
 8681   match(Set dst (MinHF src1 src2));
 8682   effect(KILL cr);
 8683 
 8684   format %{ "min_hf $dst, $src1, $src2" %}
 8685 
 8686   ins_encode %{
 8687     __ minmax_fp($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
 8688                  __ FLOAT_TYPE::half_precision, true /* is_min */);
 8689   %}
 8690   ins_pipe(pipe_class_default);
 8691 %}
 8692 
 8693 instruct min_HF_reg_zfa(fRegF dst, fRegF src1, fRegF src2)
 8694 %{
 8695   predicate(UseZfa);
 8696   match(Set dst (MinHF src1 src2));
 8697 
 8698   format %{ "min_hf $dst, $src1, $src2" %}
 8699 
 8700   ins_encode %{
 8701     __ fminm_h(as_FloatRegister($dst$$reg),
 8702                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 8703   %}
 8704 
 8705   ins_pipe(pipe_class_default);
 8706 %}
 8707 
 8708 instruct max_HF_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr)
 8709 %{
 8710   predicate(!UseZfa);
 8711   match(Set dst (MaxHF src1 src2));
 8712   effect(KILL cr);
 8713 
 8714   format %{ "max_hf $dst, $src1, $src2" %}
 8715 
 8716   ins_encode %{
 8717     __ minmax_fp($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
 8718                  __ FLOAT_TYPE::half_precision, false /* is_min */);
 8719   %}
 8720   ins_pipe(pipe_class_default);
 8721 %}
 8722 
 8723 instruct max_HF_reg_zfa(fRegF dst, fRegF src1, fRegF src2)
 8724 %{
 8725   predicate(UseZfa);
 8726   match(Set dst (MaxHF src1 src2));
 8727 
 8728   format %{ "max_hf $dst, $src1, $src2" %}
 8729 
 8730   ins_encode %{
 8731     __ fmaxm_h(as_FloatRegister($dst$$reg),
 8732                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 8733   %}
 8734 
 8735   ins_pipe(pipe_class_default);
 8736 %}
 8737 
 8738 instruct fma_HF_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3)
 8739 %{
 8740   match(Set dst (FmaHF src3 (Binary src1 src2)));
 8741   format %{ "fmadd.h $dst, $src1, $src2, $src3\t# $dst = $src1 * $src2 + $src3 fma packedH" %}
 8742   ins_encode %{
 8743     __ fmadd_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
 8744   %}
 8745   ins_pipe(pipe_class_default);
 8746 %}
 8747 
 8748 // float <-> int
 8749 
 8750 instruct convF2I_reg_reg(iRegINoSp dst, fRegF src) %{
 8751   match(Set dst (ConvF2I src));
 8752 
 8753   ins_cost(XFER_COST);
 8754   format %{ "fcvt.w.s  $dst, $src\t#@convF2I_reg_reg" %}
 8755 
 8756   ins_encode %{
 8757     __ fcvt_w_s_safe($dst$$Register, $src$$FloatRegister);
 8758   %}
 8759 
 8760   ins_pipe(fp_f2i);
 8761 %}
 8762 
 8763 instruct convI2F_reg_reg(fRegF dst, iRegIorL2I src) %{
 8764   match(Set dst (ConvI2F src));
 8765 
 8766   ins_cost(XFER_COST);
 8767   format %{ "fcvt.s.w  $dst, $src\t#@convI2F_reg_reg" %}
 8768 
 8769   ins_encode %{
 8770     __ fcvt_s_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8771   %}
 8772 
 8773   ins_pipe(fp_i2f);
 8774 %}
 8775 
 8776 // float <-> long
 8777 
 8778 instruct convF2L_reg_reg(iRegLNoSp dst, fRegF src) %{
 8779   match(Set dst (ConvF2L src));
 8780 
 8781   ins_cost(XFER_COST);
 8782   format %{ "fcvt.l.s  $dst, $src\t#@convF2L_reg_reg" %}
 8783 
 8784   ins_encode %{
 8785     __ fcvt_l_s_safe($dst$$Register, $src$$FloatRegister);
 8786   %}
 8787 
 8788   ins_pipe(fp_f2l);
 8789 %}
 8790 
 8791 instruct convL2F_reg_reg(fRegF dst, iRegL src) %{
 8792   match(Set dst (ConvL2F src));
 8793 
 8794   ins_cost(XFER_COST);
 8795   format %{ "fcvt.s.l  $dst, $src\t#@convL2F_reg_reg" %}
 8796 
 8797   ins_encode %{
 8798     __ fcvt_s_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8799   %}
 8800 
 8801   ins_pipe(fp_l2f);
 8802 %}
 8803 
 8804 // double <-> int
 8805 
 8806 instruct convD2I_reg_reg(iRegINoSp dst, fRegD src) %{
 8807   match(Set dst (ConvD2I src));
 8808 
 8809   ins_cost(XFER_COST);
 8810   format %{ "fcvt.w.d  $dst, $src\t#@convD2I_reg_reg" %}
 8811 
 8812   ins_encode %{
 8813     __ fcvt_w_d_safe($dst$$Register, $src$$FloatRegister);
 8814   %}
 8815 
 8816   ins_pipe(fp_d2i);
 8817 %}
 8818 
 8819 instruct convI2D_reg_reg(fRegD dst, iRegIorL2I src) %{
 8820   match(Set dst (ConvI2D src));
 8821 
 8822   ins_cost(XFER_COST);
 8823   format %{ "fcvt.d.w  $dst, $src\t#@convI2D_reg_reg" %}
 8824 
 8825   ins_encode %{
 8826     __ fcvt_d_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8827   %}
 8828 
 8829   ins_pipe(fp_i2d);
 8830 %}
 8831 
 8832 // double <-> long
 8833 
 8834 instruct convD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
 8835   match(Set dst (ConvD2L src));
 8836 
 8837   ins_cost(XFER_COST);
 8838   format %{ "fcvt.l.d  $dst, $src\t#@convD2L_reg_reg" %}
 8839 
 8840   ins_encode %{
 8841     __ fcvt_l_d_safe($dst$$Register, $src$$FloatRegister);
 8842   %}
 8843 
 8844   ins_pipe(fp_d2l);
 8845 %}
 8846 
 8847 instruct convL2D_reg_reg(fRegD dst, iRegL src) %{
 8848   match(Set dst (ConvL2D src));
 8849 
 8850   ins_cost(XFER_COST);
 8851   format %{ "fcvt.d.l  $dst, $src\t#@convL2D_reg_reg" %}
 8852 
 8853   ins_encode %{
 8854     __ fcvt_d_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8855   %}
 8856 
 8857   ins_pipe(fp_l2d);
 8858 %}
 8859 
 8860 // Convert oop into int for vectors alignment masking
 8861 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8862   match(Set dst (ConvL2I (CastP2X src)));
 8863 
 8864   ins_cost(ALU_COST * 2);
 8865   format %{ "zext $dst, $src, 32\t# ptr -> int, #@convP2I" %}
 8866 
 8867   ins_encode %{
 8868     __ zext($dst$$Register, $src$$Register, 32);
 8869   %}
 8870 
 8871   ins_pipe(ialu_reg);
 8872 %}
 8873 
 8874 // Convert compressed oop into int for vectors alignment masking
 8875 // in case of 32bit oops (heap < 4Gb).
 8876 instruct convN2I(iRegINoSp dst, iRegN src)
 8877 %{
 8878   predicate(CompressedOops::shift() == 0);
 8879   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8880 
 8881   ins_cost(ALU_COST);
 8882   format %{ "mv  $dst, $src\t# compressed ptr -> int, #@convN2I" %}
 8883 
 8884   ins_encode %{
 8885     __ mv($dst$$Register, $src$$Register);
 8886   %}
 8887 
 8888   ins_pipe(ialu_reg);
 8889 %}
 8890 
 8891 instruct round_double_reg(iRegLNoSp dst, fRegD src, fRegD ftmp) %{
 8892   match(Set dst (RoundD src));
 8893 
 8894   ins_cost(XFER_COST + BRANCH_COST);
 8895   effect(TEMP ftmp);
 8896   format %{ "java_round_double $dst, $src\t#@round_double_reg" %}
 8897 
 8898   ins_encode %{
 8899     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
 8900   %}
 8901 
 8902   ins_pipe(pipe_slow);
 8903 %}
 8904 
 8905 instruct round_float_reg(iRegINoSp dst, fRegF src, fRegF ftmp) %{
 8906   match(Set dst (RoundF src));
 8907 
 8908   ins_cost(XFER_COST + BRANCH_COST);
 8909   effect(TEMP ftmp);
 8910   format %{ "java_round_float $dst, $src\t#@round_float_reg" %}
 8911 
 8912   ins_encode %{
 8913     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
 8914   %}
 8915 
 8916   ins_pipe(pipe_slow);
 8917 %}
 8918 
 8919 // Convert oop pointer into compressed form
 8920 instruct encodeHeapOop(iRegNNoSp dst, iRegP src) %{
 8921   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8922   match(Set dst (EncodeP src));
 8923   ins_cost(ALU_COST);
 8924   format %{ "encode_heap_oop  $dst, $src\t#@encodeHeapOop" %}
 8925   ins_encode %{
 8926     Register s = $src$$Register;
 8927     Register d = $dst$$Register;
 8928     __ encode_heap_oop(d, s);
 8929   %}
 8930   ins_pipe(pipe_class_default);
 8931 %}
 8932 
 8933 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src) %{
 8934   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8935   match(Set dst (EncodeP src));
 8936   ins_cost(ALU_COST);
 8937   format %{ "encode_heap_oop_not_null $dst, $src\t#@encodeHeapOop_not_null" %}
 8938   ins_encode %{
 8939     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8940   %}
 8941   ins_pipe(pipe_class_default);
 8942 %}
 8943 
 8944 instruct decodeHeapOop(iRegPNoSp dst, iRegN src) %{
 8945   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8946             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8947   match(Set dst (DecodeN src));
 8948 
 8949   ins_cost(0);
 8950   format %{ "decode_heap_oop  $dst, $src\t#@decodeHeapOop" %}
 8951   ins_encode %{
 8952     Register s = $src$$Register;
 8953     Register d = $dst$$Register;
 8954     __ decode_heap_oop(d, s);
 8955   %}
 8956   ins_pipe(pipe_class_default);
 8957 %}
 8958 
 8959 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src) %{
 8960   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8961             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8962   match(Set dst (DecodeN src));
 8963 
 8964   ins_cost(0);
 8965   format %{ "decode_heap_oop_not_null $dst, $src\t#@decodeHeapOop_not_null" %}
 8966   ins_encode %{
 8967     Register s = $src$$Register;
 8968     Register d = $dst$$Register;
 8969     __ decode_heap_oop_not_null(d, s);
 8970   %}
 8971   ins_pipe(pipe_class_default);
 8972 %}
 8973 
 8974 // Convert klass pointer into compressed form.
 8975 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8976   match(Set dst (EncodePKlass src));
 8977 
 8978   ins_cost(ALU_COST);
 8979   format %{ "encode_klass_not_null  $dst, $src\t#@encodeKlass_not_null" %}
 8980 
 8981   ins_encode %{
 8982     Register src_reg = as_Register($src$$reg);
 8983     Register dst_reg = as_Register($dst$$reg);
 8984     __ encode_klass_not_null(dst_reg, src_reg, t0);
 8985   %}
 8986 
 8987    ins_pipe(pipe_class_default);
 8988 %}
 8989 
 8990 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src, iRegPNoSp tmp) %{
 8991   match(Set dst (DecodeNKlass src));
 8992 
 8993   effect(TEMP_DEF dst, TEMP tmp);
 8994 
 8995   ins_cost(ALU_COST);
 8996   format %{ "decode_klass_not_null  $dst, $src\t#@decodeKlass_not_null" %}
 8997 
 8998   ins_encode %{
 8999     Register src_reg = as_Register($src$$reg);
 9000     Register dst_reg = as_Register($dst$$reg);
 9001     Register tmp_reg = as_Register($tmp$$reg);
 9002     __ decode_klass_not_null(dst_reg, src_reg, tmp_reg);
 9003   %}
 9004 
 9005    ins_pipe(pipe_class_default);
 9006 %}
 9007 
 9008 // stack <-> reg and reg <-> reg shuffles with no conversion
 9009 
 9010 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
 9011 
 9012   match(Set dst (MoveF2I src));
 9013 
 9014   effect(DEF dst, USE src);
 9015 
 9016   ins_cost(LOAD_COST);
 9017 
 9018   format %{ "lw  $dst, $src\t#@MoveF2I_stack_reg" %}
 9019 
 9020   ins_encode %{
 9021     __ lw(as_Register($dst$$reg), Address(sp, $src$$disp));
 9022   %}
 9023 
 9024   ins_pipe(iload_reg_reg);
 9025 
 9026 %}
 9027 
 9028 instruct MoveI2F_stack_reg(fRegF dst, stackSlotI src) %{
 9029 
 9030   match(Set dst (MoveI2F src));
 9031 
 9032   effect(DEF dst, USE src);
 9033 
 9034   ins_cost(LOAD_COST);
 9035 
 9036   format %{ "flw  $dst, $src\t#@MoveI2F_stack_reg" %}
 9037 
 9038   ins_encode %{
 9039     __ flw(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
 9040   %}
 9041 
 9042   ins_pipe(fp_load_mem_s);
 9043 
 9044 %}
 9045 
 9046 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
 9047 
 9048   match(Set dst (MoveD2L src));
 9049 
 9050   effect(DEF dst, USE src);
 9051 
 9052   ins_cost(LOAD_COST);
 9053 
 9054   format %{ "ld  $dst, $src\t#@MoveD2L_stack_reg" %}
 9055 
 9056   ins_encode %{
 9057     __ ld(as_Register($dst$$reg), Address(sp, $src$$disp));
 9058   %}
 9059 
 9060   ins_pipe(iload_reg_reg);
 9061 
 9062 %}
 9063 
 9064 instruct MoveL2D_stack_reg(fRegD dst, stackSlotL src) %{
 9065 
 9066   match(Set dst (MoveL2D src));
 9067 
 9068   effect(DEF dst, USE src);
 9069 
 9070   ins_cost(LOAD_COST);
 9071 
 9072   format %{ "fld  $dst, $src\t#@MoveL2D_stack_reg" %}
 9073 
 9074   ins_encode %{
 9075     __ fld(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
 9076   %}
 9077 
 9078   ins_pipe(fp_load_mem_d);
 9079 
 9080 %}
 9081 
 9082 instruct MoveF2I_reg_stack(stackSlotI dst, fRegF src) %{
 9083 
 9084   match(Set dst (MoveF2I src));
 9085 
 9086   effect(DEF dst, USE src);
 9087 
 9088   ins_cost(STORE_COST);
 9089 
 9090   format %{ "fsw  $src, $dst\t#@MoveF2I_reg_stack" %}
 9091 
 9092   ins_encode %{
 9093     __ fsw(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
 9094   %}
 9095 
 9096   ins_pipe(fp_store_reg_s);
 9097 
 9098 %}
 9099 
 9100 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
 9101 
 9102   match(Set dst (MoveI2F src));
 9103 
 9104   effect(DEF dst, USE src);
 9105 
 9106   ins_cost(STORE_COST);
 9107 
 9108   format %{ "sw  $src, $dst\t#@MoveI2F_reg_stack" %}
 9109 
 9110   ins_encode %{
 9111     __ sw(as_Register($src$$reg), Address(sp, $dst$$disp));
 9112   %}
 9113 
 9114   ins_pipe(istore_reg_reg);
 9115 
 9116 %}
 9117 
 9118 instruct MoveD2L_reg_stack(stackSlotL dst, fRegD src) %{
 9119 
 9120   match(Set dst (MoveD2L src));
 9121 
 9122   effect(DEF dst, USE src);
 9123 
 9124   ins_cost(STORE_COST);
 9125 
 9126   format %{ "fsd  $dst, $src\t#@MoveD2L_reg_stack" %}
 9127 
 9128   ins_encode %{
 9129     __ fsd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
 9130   %}
 9131 
 9132   ins_pipe(fp_store_reg_d);
 9133 
 9134 %}
 9135 
 9136 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
 9137 
 9138   match(Set dst (MoveL2D src));
 9139 
 9140   effect(DEF dst, USE src);
 9141 
 9142   ins_cost(STORE_COST);
 9143 
 9144   format %{ "sd  $src, $dst\t#@MoveL2D_reg_stack" %}
 9145 
 9146   ins_encode %{
 9147     __ sd(as_Register($src$$reg), Address(sp, $dst$$disp));
 9148   %}
 9149 
 9150   ins_pipe(istore_reg_reg);
 9151 
 9152 %}
 9153 
 9154 instruct MoveF2I_reg_reg(iRegINoSp dst, fRegF src) %{
 9155 
 9156   match(Set dst (MoveF2I src));
 9157 
 9158   effect(DEF dst, USE src);
 9159 
 9160   ins_cost(FMVX_COST);
 9161 
 9162   format %{ "fmv.x.w  $dst, $src\t#@MoveF2I_reg_reg" %}
 9163 
 9164   ins_encode %{
 9165     __ fmv_x_w(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 9166   %}
 9167 
 9168   ins_pipe(fp_f2i);
 9169 
 9170 %}
 9171 
 9172 instruct MoveI2F_reg_reg(fRegF dst, iRegI src) %{
 9173 
 9174   match(Set dst (MoveI2F src));
 9175 
 9176   effect(DEF dst, USE src);
 9177 
 9178   ins_cost(FMVX_COST);
 9179 
 9180   format %{ "fmv.w.x  $dst, $src\t#@MoveI2F_reg_reg" %}
 9181 
 9182   ins_encode %{
 9183     __ fmv_w_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 9184   %}
 9185 
 9186   ins_pipe(fp_i2f);
 9187 
 9188 %}
 9189 
 9190 instruct MoveD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
 9191 
 9192   match(Set dst (MoveD2L src));
 9193 
 9194   effect(DEF dst, USE src);
 9195 
 9196   ins_cost(FMVX_COST);
 9197 
 9198   format %{ "fmv.x.d $dst, $src\t#@MoveD2L_reg_reg" %}
 9199 
 9200   ins_encode %{
 9201     __ fmv_x_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 9202   %}
 9203 
 9204   ins_pipe(fp_d2l);
 9205 
 9206 %}
 9207 
 9208 instruct MoveL2D_reg_reg(fRegD dst, iRegL src) %{
 9209 
 9210   match(Set dst (MoveL2D src));
 9211 
 9212   effect(DEF dst, USE src);
 9213 
 9214   ins_cost(FMVX_COST);
 9215 
 9216   format %{ "fmv.d.x  $dst, $src\t#@MoveL2D_reg_reg" %}
 9217 
 9218   ins_encode %{
 9219     __ fmv_d_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 9220   %}
 9221 
 9222   ins_pipe(fp_l2d);
 9223 
 9224 %}
 9225 
 9226 // ============================================================================
 9227 // Compare Instructions which set the result float comparisons in dest register.
 9228 
 9229 instruct cmpF3_reg_reg(iRegINoSp dst, fRegF op1, fRegF op2)
 9230 %{
 9231   match(Set dst (CmpF3 op1 op2));
 9232 
 9233   ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
 9234   format %{ "flt.s  $dst, $op2, $op1\t#@cmpF3_reg_reg\n\t"
 9235             "bgtz   $dst, done\n\t"
 9236             "feq.s  $dst, $op1, $op2\n\t"
 9237             "addi   $dst, $dst, -1\n\t"
 9238             "done:"
 9239   %}
 9240 
 9241   ins_encode %{
 9242     // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
 9243     __ float_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg),
 9244                      as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
 9245   %}
 9246 
 9247   ins_pipe(pipe_class_default);
 9248 %}
 9249 
 9250 instruct cmpD3_reg_reg(iRegINoSp dst, fRegD op1, fRegD op2)
 9251 %{
 9252   match(Set dst (CmpD3 op1 op2));
 9253 
 9254   ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
 9255   format %{ "flt.d  $dst, $op2, $op1\t#@cmpD3_reg_reg\n\t"
 9256             "bgtz   $dst, done\n\t"
 9257             "feq.d  $dst, $op1, $op2\n\t"
 9258             "addi   $dst, $dst, -1\n\t"
 9259             "done:"
 9260   %}
 9261 
 9262   ins_encode %{
 9263     // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
 9264     __ double_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
 9265   %}
 9266 
 9267   ins_pipe(pipe_class_default);
 9268 %}
 9269 
 9270 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
 9271 %{
 9272   match(Set dst (CmpL3 op1 op2));
 9273 
 9274   ins_cost(ALU_COST * 3 + BRANCH_COST);
 9275   format %{ "slt   $dst, $op2, $op1\t#@cmpL3_reg_reg\n\t"
 9276             "bnez  $dst, done\n\t"
 9277             "slt   $dst, $op1, $op2\n\t"
 9278             "neg   $dst, $dst\n\t"
 9279             "done:"
 9280   %}
 9281   ins_encode %{
 9282     __ cmp_l2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 9283     __ mv(as_Register($dst$$reg), t0);
 9284   %}
 9285 
 9286   ins_pipe(pipe_class_default);
 9287 %}
 9288 
 9289 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
 9290 %{
 9291   match(Set dst (CmpUL3 op1 op2));
 9292 
 9293   ins_cost(ALU_COST * 3 + BRANCH_COST);
 9294   format %{ "sltu  $dst, $op2, $op1\t#@cmpUL3_reg_reg\n\t"
 9295             "bnez  $dst, done\n\t"
 9296             "sltu  $dst, $op1, $op2\n\t"
 9297             "neg   $dst, $dst\n\t"
 9298             "done:"
 9299   %}
 9300   ins_encode %{
 9301     __ cmp_ul2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 9302     __ mv(as_Register($dst$$reg), t0);
 9303   %}
 9304 
 9305   ins_pipe(pipe_class_default);
 9306 %}
 9307 
 9308 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI op1, iRegI op2)
 9309 %{
 9310   match(Set dst (CmpU3 op1 op2));
 9311 
 9312   ins_cost(ALU_COST * 3 + BRANCH_COST);
 9313   format %{ "sltu  $dst, $op2, $op1\t#@cmpU3_reg_reg\n\t"
 9314             "bnez  $dst, done\n\t"
 9315             "sltu  $dst, $op1, $op2\n\t"
 9316             "neg   $dst, $dst\n\t"
 9317             "done:"
 9318   %}
 9319   ins_encode %{
 9320     __ cmp_uw2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 9321     __ mv(as_Register($dst$$reg), t0);
 9322   %}
 9323 
 9324   ins_pipe(pipe_class_default);
 9325 %}
 9326 
 9327 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegI p, iRegI q)
 9328 %{
 9329   match(Set dst (CmpLTMask p q));
 9330 
 9331   ins_cost(2 * ALU_COST);
 9332 
 9333   format %{ "slt $dst, $p, $q\t#@cmpLTMask_reg_reg\n\t"
 9334             "subw $dst, zr, $dst\t#@cmpLTMask_reg_reg"
 9335   %}
 9336 
 9337   ins_encode %{
 9338     __ slt(as_Register($dst$$reg), as_Register($p$$reg), as_Register($q$$reg));
 9339     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 9340   %}
 9341 
 9342   ins_pipe(ialu_reg_reg);
 9343 %}
 9344 
 9345 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I op, immI0 zero)
 9346 %{
 9347   match(Set dst (CmpLTMask op zero));
 9348 
 9349   ins_cost(ALU_COST);
 9350 
 9351   format %{ "sraiw $dst, $dst, 31\t#@cmpLTMask_reg_reg" %}
 9352 
 9353   ins_encode %{
 9354     __ sraiw(as_Register($dst$$reg), as_Register($op$$reg), 31);
 9355   %}
 9356 
 9357   ins_pipe(ialu_reg_shift);
 9358 %}
 9359 
 9360 
 9361 // ============================================================================
 9362 // Max and Min
 9363 
 9364 instruct minI_reg_reg(iRegINoSp dst, iRegI src)
 9365 %{
 9366   match(Set dst (MinI dst src));
 9367 
 9368   ins_cost(BRANCH_COST + ALU_COST);
 9369   format %{"minI_reg_reg $dst, $dst, $src\t#@minI_reg_reg\n\t"%}
 9370 
 9371   ins_encode %{
 9372     __ cmov_gt(as_Register($dst$$reg), as_Register($src$$reg),
 9373                as_Register($dst$$reg), as_Register($src$$reg));
 9374   %}
 9375 
 9376   ins_pipe(pipe_class_compare);
 9377 %}
 9378 
 9379 instruct maxI_reg_reg(iRegINoSp dst, iRegI src)
 9380 %{
 9381   match(Set dst (MaxI dst src));
 9382 
 9383   ins_cost(BRANCH_COST + ALU_COST);
 9384   format %{"maxI_reg_reg $dst, $dst, $src\t#@maxI_reg_reg\n\t"%}
 9385 
 9386   ins_encode %{
 9387     __ cmov_lt(as_Register($dst$$reg), as_Register($src$$reg),
 9388                as_Register($dst$$reg), as_Register($src$$reg));
 9389   %}
 9390 
 9391   ins_pipe(pipe_class_compare);
 9392 %}
 9393 
 9394 // special case for comparing with zero
 9395 // n.b. this is selected in preference to the rule above because it
 9396 // avoids loading constant 0 into a source register
 9397 
 9398 instruct minI_reg_zero(iRegINoSp dst, immI0 zero)
 9399 %{
 9400   match(Set dst (MinI dst zero));
 9401   match(Set dst (MinI zero dst));
 9402 
 9403   ins_cost(BRANCH_COST + ALU_COST);
 9404   format %{"minI_reg_zero $dst, $dst, zr\t#@minI_reg_zero\n\t"%}
 9405 
 9406   ins_encode %{
 9407     __ cmov_gt(as_Register($dst$$reg), zr,
 9408                as_Register($dst$$reg), zr);
 9409   %}
 9410 
 9411   ins_pipe(pipe_class_compare);
 9412 %}
 9413 
 9414 instruct maxI_reg_zero(iRegINoSp dst, immI0 zero)
 9415 %{
 9416   match(Set dst (MaxI dst zero));
 9417   match(Set dst (MaxI zero dst));
 9418 
 9419   ins_cost(BRANCH_COST + ALU_COST);
 9420   format %{"maxI_reg_zero $dst, $dst, zr\t#@maxI_reg_zero\n\t"%}
 9421 
 9422   ins_encode %{
 9423     __ cmov_lt(as_Register($dst$$reg), zr,
 9424                as_Register($dst$$reg), zr);
 9425   %}
 9426 
 9427   ins_pipe(pipe_class_compare);
 9428 %}
 9429 
 9430 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
 9431 %{
 9432   match(Set dst (MinI src1 src2));
 9433 
 9434   effect(DEF dst, USE src1, USE src2);
 9435 
 9436   ins_cost(BRANCH_COST + ALU_COST * 2);
 9437   format %{"minI_rReg $dst, $src1, $src2\t#@minI_rReg\n\t"%}
 9438 
 9439   ins_encode %{
 9440     __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
 9441     __ cmov_gt(as_Register($src1$$reg), as_Register($src2$$reg),
 9442                as_Register($dst$$reg), as_Register($src2$$reg));
 9443   %}
 9444 
 9445   ins_pipe(pipe_class_compare);
 9446 %}
 9447 
 9448 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
 9449 %{
 9450   match(Set dst (MaxI src1 src2));
 9451 
 9452   effect(DEF dst, USE src1, USE src2);
 9453 
 9454   ins_cost(BRANCH_COST + ALU_COST * 2);
 9455   format %{"maxI_rReg $dst, $src1, $src2\t#@maxI_rReg\n\t"%}
 9456 
 9457   ins_encode %{
 9458     __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
 9459     __ cmov_lt(as_Register($src1$$reg), as_Register($src2$$reg),
 9460                as_Register($dst$$reg), as_Register($src2$$reg));
 9461   %}
 9462 
 9463   ins_pipe(pipe_class_compare);
 9464 %}
 9465 
 9466 // ============================================================================
 9467 // Branch Instructions
 9468 // Direct Branch.
 9469 instruct branch(label lbl)
 9470 %{
 9471   match(Goto);
 9472 
 9473   effect(USE lbl);
 9474 
 9475   ins_cost(BRANCH_COST);
 9476   format %{ "j  $lbl\t#@branch" %}
 9477 
 9478   ins_encode(riscv_enc_j(lbl));
 9479 
 9480   ins_pipe(pipe_branch);
 9481 %}
 9482 
 9483 // ============================================================================
 9484 // Compare and Branch Instructions
 9485 
 9486 // Patterns for short (< 12KiB) variants
 9487 
 9488 // Compare flags and branch near instructions.
 9489 instruct cmpFlag_branch(cmpOpEqNe cmp, rFlagsReg cr, label lbl) %{
 9490   match(If cmp cr);
 9491   effect(USE lbl);
 9492 
 9493   ins_cost(BRANCH_COST);
 9494   format %{ "b$cmp  $cr, zr, $lbl\t#@cmpFlag_branch" %}
 9495 
 9496   ins_encode %{
 9497     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label));
 9498   %}
 9499   ins_pipe(pipe_cmpz_branch);
 9500   ins_short_branch(1);
 9501 %}
 9502 
 9503 // Compare signed int and branch near instructions
 9504 instruct cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
 9505 %{
 9506   // Same match rule as `far_cmpI_branch'.
 9507   match(If cmp (CmpI op1 op2));
 9508 
 9509   effect(USE lbl);
 9510 
 9511   ins_cost(BRANCH_COST);
 9512 
 9513   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpI_branch" %}
 9514 
 9515   ins_encode %{
 9516     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9517   %}
 9518 
 9519   ins_pipe(pipe_cmp_branch);
 9520   ins_short_branch(1);
 9521 %}
 9522 
 9523 instruct cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
 9524 %{
 9525   // Same match rule as `far_cmpI_loop'.
 9526   match(CountedLoopEnd cmp (CmpI op1 op2));
 9527 
 9528   effect(USE lbl);
 9529 
 9530   ins_cost(BRANCH_COST);
 9531 
 9532   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpI_loop" %}
 9533 
 9534   ins_encode %{
 9535     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9536   %}
 9537 
 9538   ins_pipe(pipe_cmp_branch);
 9539   ins_short_branch(1);
 9540 %}
 9541 
 9542 // Compare unsigned int and branch near instructions
 9543 instruct cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl)
 9544 %{
 9545   // Same match rule as `far_cmpU_branch'.
 9546   match(If cmp (CmpU op1 op2));
 9547 
 9548   effect(USE lbl);
 9549 
 9550   ins_cost(BRANCH_COST);
 9551 
 9552   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpU_branch" %}
 9553 
 9554   ins_encode %{
 9555     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9556                   as_Register($op2$$reg), *($lbl$$label));
 9557   %}
 9558 
 9559   ins_pipe(pipe_cmp_branch);
 9560   ins_short_branch(1);
 9561 %}
 9562 
 9563 // Compare signed long and branch near instructions
 9564 instruct cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
 9565 %{
 9566   // Same match rule as `far_cmpL_branch'.
 9567   match(If cmp (CmpL op1 op2));
 9568 
 9569   effect(USE lbl);
 9570 
 9571   ins_cost(BRANCH_COST);
 9572 
 9573   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpL_branch" %}
 9574 
 9575   ins_encode %{
 9576     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9577   %}
 9578 
 9579   ins_pipe(pipe_cmp_branch);
 9580   ins_short_branch(1);
 9581 %}
 9582 
 9583 instruct cmpL_loop(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
 9584 %{
 9585   // Same match rule as `far_cmpL_loop'.
 9586   match(CountedLoopEnd cmp (CmpL op1 op2));
 9587 
 9588   effect(USE lbl);
 9589 
 9590   ins_cost(BRANCH_COST);
 9591 
 9592   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpL_loop" %}
 9593 
 9594   ins_encode %{
 9595     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9596   %}
 9597 
 9598   ins_pipe(pipe_cmp_branch);
 9599   ins_short_branch(1);
 9600 %}
 9601 
 9602 // Compare unsigned long and branch near instructions
 9603 instruct cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl)
 9604 %{
 9605   // Same match rule as `far_cmpUL_branch'.
 9606   match(If cmp (CmpUL op1 op2));
 9607 
 9608   effect(USE lbl);
 9609 
 9610   ins_cost(BRANCH_COST);
 9611   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpUL_branch" %}
 9612 
 9613   ins_encode %{
 9614     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9615                   as_Register($op2$$reg), *($lbl$$label));
 9616   %}
 9617 
 9618   ins_pipe(pipe_cmp_branch);
 9619   ins_short_branch(1);
 9620 %}
 9621 
 9622 // Compare pointer and branch near instructions
 9623 instruct cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
 9624 %{
 9625   // Same match rule as `far_cmpP_branch'.
 9626   match(If cmp (CmpP op1 op2));
 9627 
 9628   effect(USE lbl);
 9629 
 9630   ins_cost(BRANCH_COST);
 9631 
 9632   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpP_branch" %}
 9633 
 9634   ins_encode %{
 9635     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9636                   as_Register($op2$$reg), *($lbl$$label));
 9637   %}
 9638 
 9639   ins_pipe(pipe_cmp_branch);
 9640   ins_short_branch(1);
 9641 %}
 9642 
 9643 // Compare narrow pointer and branch near instructions
 9644 instruct cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
 9645 %{
 9646   // Same match rule as `far_cmpN_branch'.
 9647   match(If cmp (CmpN op1 op2));
 9648 
 9649   effect(USE lbl);
 9650 
 9651   ins_cost(BRANCH_COST);
 9652 
 9653   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpN_branch" %}
 9654 
 9655   ins_encode %{
 9656     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9657                   as_Register($op2$$reg), *($lbl$$label));
 9658   %}
 9659 
 9660   ins_pipe(pipe_cmp_branch);
 9661   ins_short_branch(1);
 9662 %}
 9663 
 9664 // Compare float and branch near instructions
 9665 instruct cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
 9666 %{
 9667   // Same match rule as `far_cmpF_branch'.
 9668   match(If cmp (CmpF op1 op2));
 9669 
 9670   effect(USE lbl);
 9671 
 9672   ins_cost(XFER_COST + BRANCH_COST);
 9673   format %{ "float_b$cmp $op1, $op2, $lbl \t#@cmpF_branch"%}
 9674 
 9675   ins_encode %{
 9676     __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), *($lbl$$label));
 9677   %}
 9678 
 9679   ins_pipe(pipe_class_compare);
 9680   ins_short_branch(1);
 9681 %}
 9682 
 9683 // Compare double and branch near instructions
 9684 instruct cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
 9685 %{
 9686   // Same match rule as `far_cmpD_branch'.
 9687   match(If cmp (CmpD op1 op2));
 9688   effect(USE lbl);
 9689 
 9690   ins_cost(XFER_COST + BRANCH_COST);
 9691   format %{ "double_b$cmp $op1, $op2, $lbl\t#@cmpD_branch"%}
 9692 
 9693   ins_encode %{
 9694     __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
 9695                         as_FloatRegister($op2$$reg), *($lbl$$label));
 9696   %}
 9697 
 9698   ins_pipe(pipe_class_compare);
 9699   ins_short_branch(1);
 9700 %}
 9701 
 9702 // Compare signed int with zero and branch near instructions
 9703 instruct cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9704 %{
 9705   // Same match rule as `far_cmpI_reg_imm0_branch'.
 9706   match(If cmp (CmpI op1 zero));
 9707 
 9708   effect(USE op1, USE lbl);
 9709 
 9710   ins_cost(BRANCH_COST);
 9711   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpI_reg_imm0_branch" %}
 9712 
 9713   ins_encode %{
 9714     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9715   %}
 9716 
 9717   ins_pipe(pipe_cmpz_branch);
 9718   ins_short_branch(1);
 9719 %}
 9720 
 9721 instruct cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9722 %{
 9723   // Same match rule as `far_cmpI_reg_imm0_loop'.
 9724   match(CountedLoopEnd cmp (CmpI op1 zero));
 9725 
 9726   effect(USE op1, USE lbl);
 9727 
 9728   ins_cost(BRANCH_COST);
 9729 
 9730   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpI_reg_imm0_loop" %}
 9731 
 9732   ins_encode %{
 9733     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9734   %}
 9735 
 9736   ins_pipe(pipe_cmpz_branch);
 9737   ins_short_branch(1);
 9738 %}
 9739 
 9740 // Compare unsigned int with zero and branch near instructions
 9741 instruct cmpUEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
 9742 %{
 9743   // Same match rule as `far_cmpUEqNeLeGt_reg_imm0_branch'.
 9744   match(If cmp (CmpU op1 zero));
 9745 
 9746   effect(USE op1, USE lbl);
 9747 
 9748   ins_cost(BRANCH_COST);
 9749 
 9750   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpUEqNeLeGt_reg_imm0_branch" %}
 9751 
 9752   ins_encode %{
 9753     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9754   %}
 9755 
 9756   ins_pipe(pipe_cmpz_branch);
 9757   ins_short_branch(1);
 9758 %}
 9759 
 9760 // Compare signed long with zero and branch near instructions
 9761 instruct cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9762 %{
 9763   // Same match rule as `far_cmpL_reg_imm0_branch'.
 9764   match(If cmp (CmpL op1 zero));
 9765 
 9766   effect(USE op1, USE lbl);
 9767 
 9768   ins_cost(BRANCH_COST);
 9769 
 9770   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpL_reg_imm0_branch" %}
 9771 
 9772   ins_encode %{
 9773     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9774   %}
 9775 
 9776   ins_pipe(pipe_cmpz_branch);
 9777   ins_short_branch(1);
 9778 %}
 9779 
 9780 instruct cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9781 %{
 9782   // Same match rule as `far_cmpL_reg_imm0_loop'.
 9783   match(CountedLoopEnd cmp (CmpL op1 zero));
 9784 
 9785   effect(USE op1, USE lbl);
 9786 
 9787   ins_cost(BRANCH_COST);
 9788 
 9789   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpL_reg_imm0_loop" %}
 9790 
 9791   ins_encode %{
 9792     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9793   %}
 9794 
 9795   ins_pipe(pipe_cmpz_branch);
 9796   ins_short_branch(1);
 9797 %}
 9798 
 9799 // Compare unsigned long with zero and branch near instructions
 9800 instruct cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
 9801 %{
 9802   // Same match rule as `far_cmpULEqNeLeGt_reg_imm0_branch'.
 9803   match(If cmp (CmpUL op1 zero));
 9804 
 9805   effect(USE op1, USE lbl);
 9806 
 9807   ins_cost(BRANCH_COST);
 9808 
 9809   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpULEqNeLeGt_reg_imm0_branch" %}
 9810 
 9811   ins_encode %{
 9812     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9813   %}
 9814 
 9815   ins_pipe(pipe_cmpz_branch);
 9816   ins_short_branch(1);
 9817 %}
 9818 
 9819 // Compare pointer with zero and branch near instructions
 9820 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
 9821   // Same match rule as `far_cmpP_reg_imm0_branch'.
 9822   match(If cmp (CmpP op1 zero));
 9823   effect(USE lbl);
 9824 
 9825   ins_cost(BRANCH_COST);
 9826   format %{ "b$cmp   $op1, zr, $lbl\t#@cmpP_imm0_branch" %}
 9827 
 9828   ins_encode %{
 9829     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9830   %}
 9831 
 9832   ins_pipe(pipe_cmpz_branch);
 9833   ins_short_branch(1);
 9834 %}
 9835 
 9836 // Compare narrow pointer with zero and branch near instructions
 9837 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
 9838   // Same match rule as `far_cmpN_reg_imm0_branch'.
 9839   match(If cmp (CmpN op1 zero));
 9840   effect(USE lbl);
 9841 
 9842   ins_cost(BRANCH_COST);
 9843 
 9844   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpN_imm0_branch" %}
 9845 
 9846   ins_encode %{
 9847     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9848   %}
 9849 
 9850   ins_pipe(pipe_cmpz_branch);
 9851   ins_short_branch(1);
 9852 %}
 9853 
 9854 // Compare narrow pointer with pointer zero and branch near instructions
 9855 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
 9856   // Same match rule as `far_cmpP_narrowOop_imm0_branch'.
 9857   match(If cmp (CmpP (DecodeN op1) zero));
 9858   effect(USE lbl);
 9859 
 9860   ins_cost(BRANCH_COST);
 9861   format %{ "b$cmp   $op1, zr, $lbl\t#@cmpP_narrowOop_imm0_branch" %}
 9862 
 9863   ins_encode %{
 9864     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9865   %}
 9866 
 9867   ins_pipe(pipe_cmpz_branch);
 9868   ins_short_branch(1);
 9869 %}
 9870 
 9871 // Patterns for far (20KiB) variants
 9872 
 9873 instruct far_cmpFlag_branch(cmpOp cmp, rFlagsReg cr, label lbl) %{
 9874   match(If cmp cr);
 9875   effect(USE lbl);
 9876 
 9877   ins_cost(BRANCH_COST);
 9878   format %{ "far_b$cmp $cr, zr, $lbl\t#@far_cmpFlag_branch"%}
 9879 
 9880   ins_encode %{
 9881     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label), /* is_far */ true);
 9882   %}
 9883 
 9884   ins_pipe(pipe_cmpz_branch);
 9885 %}
 9886 
 9887 // Compare signed int and branch far instructions
 9888 instruct far_cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
 9889   match(If cmp (CmpI op1 op2));
 9890   effect(USE lbl);
 9891 
 9892   ins_cost(BRANCH_COST * 2);
 9893 
 9894   // the format instruction [far_b$cmp] here is be used as two insructions
 9895   // in macroassembler: b$not_cmp(op1, op2, done), j($lbl), bind(done)
 9896   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpI_branch" %}
 9897 
 9898   ins_encode %{
 9899     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9900   %}
 9901 
 9902   ins_pipe(pipe_cmp_branch);
 9903 %}
 9904 
 9905 instruct far_cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
 9906   match(CountedLoopEnd cmp (CmpI op1 op2));
 9907   effect(USE lbl);
 9908 
 9909   ins_cost(BRANCH_COST * 2);
 9910   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpI_loop" %}
 9911 
 9912   ins_encode %{
 9913     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9914   %}
 9915 
 9916   ins_pipe(pipe_cmp_branch);
 9917 %}
 9918 
 9919 instruct far_cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl) %{
 9920   match(If cmp (CmpU op1 op2));
 9921   effect(USE lbl);
 9922 
 9923   ins_cost(BRANCH_COST * 2);
 9924   format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpU_branch" %}
 9925 
 9926   ins_encode %{
 9927     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9928                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9929   %}
 9930 
 9931   ins_pipe(pipe_cmp_branch);
 9932 %}
 9933 
 9934 instruct far_cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
 9935   match(If cmp (CmpL op1 op2));
 9936   effect(USE lbl);
 9937 
 9938   ins_cost(BRANCH_COST * 2);
 9939   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpL_branch" %}
 9940 
 9941   ins_encode %{
 9942     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9943   %}
 9944 
 9945   ins_pipe(pipe_cmp_branch);
 9946 %}
 9947 
 9948 instruct far_cmpLloop(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
 9949   match(CountedLoopEnd cmp (CmpL op1 op2));
 9950   effect(USE lbl);
 9951 
 9952   ins_cost(BRANCH_COST * 2);
 9953   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpL_loop" %}
 9954 
 9955   ins_encode %{
 9956     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9957   %}
 9958 
 9959   ins_pipe(pipe_cmp_branch);
 9960 %}
 9961 
 9962 instruct far_cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl) %{
 9963   match(If cmp (CmpUL op1 op2));
 9964   effect(USE lbl);
 9965 
 9966   ins_cost(BRANCH_COST * 2);
 9967   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpUL_branch" %}
 9968 
 9969   ins_encode %{
 9970     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9971                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9972   %}
 9973 
 9974   ins_pipe(pipe_cmp_branch);
 9975 %}
 9976 
 9977 instruct far_cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
 9978 %{
 9979   match(If cmp (CmpP op1 op2));
 9980 
 9981   effect(USE lbl);
 9982 
 9983   ins_cost(BRANCH_COST * 2);
 9984 
 9985   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpP_branch" %}
 9986 
 9987   ins_encode %{
 9988     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9989                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9990   %}
 9991 
 9992   ins_pipe(pipe_cmp_branch);
 9993 %}
 9994 
 9995 instruct far_cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
 9996 %{
 9997   match(If cmp (CmpN op1 op2));
 9998 
 9999   effect(USE lbl);
10000 
10001   ins_cost(BRANCH_COST * 2);
10002 
10003   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpN_branch" %}
10004 
10005   ins_encode %{
10006     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
10007                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
10008   %}
10009 
10010   ins_pipe(pipe_cmp_branch);
10011 %}
10012 
10013 // Float compare and branch instructions
10014 instruct far_cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
10015 %{
10016   match(If cmp (CmpF op1 op2));
10017 
10018   effect(USE lbl);
10019 
10020   ins_cost(XFER_COST + BRANCH_COST * 2);
10021   format %{ "far_float_b$cmp $op1, $op2, $lbl\t#@far_cmpF_branch"%}
10022 
10023   ins_encode %{
10024     __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10025                         *($lbl$$label), /* is_far */ true);
10026   %}
10027 
10028   ins_pipe(pipe_class_compare);
10029 %}
10030 
10031 // Double compare and branch instructions
10032 instruct far_cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
10033 %{
10034   match(If cmp (CmpD op1 op2));
10035   effect(USE lbl);
10036 
10037   ins_cost(XFER_COST + BRANCH_COST * 2);
10038   format %{ "far_double_b$cmp $op1, $op2, $lbl\t#@far_cmpD_branch"%}
10039 
10040   ins_encode %{
10041     __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
10042                         as_FloatRegister($op2$$reg), *($lbl$$label), /* is_far */ true);
10043   %}
10044 
10045   ins_pipe(pipe_class_compare);
10046 %}
10047 
10048 instruct far_cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
10049 %{
10050   match(If cmp (CmpI op1 zero));
10051 
10052   effect(USE op1, USE lbl);
10053 
10054   ins_cost(BRANCH_COST * 2);
10055 
10056   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpI_reg_imm0_branch" %}
10057 
10058   ins_encode %{
10059     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10060   %}
10061 
10062   ins_pipe(pipe_cmpz_branch);
10063 %}
10064 
10065 instruct far_cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
10066 %{
10067   match(CountedLoopEnd cmp (CmpI op1 zero));
10068 
10069   effect(USE op1, USE lbl);
10070 
10071   ins_cost(BRANCH_COST * 2);
10072 
10073   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpI_reg_imm0_loop" %}
10074 
10075   ins_encode %{
10076     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10077   %}
10078 
10079   ins_pipe(pipe_cmpz_branch);
10080 %}
10081 
10082 instruct far_cmpUEqNeLeGt_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
10083 %{
10084   match(If cmp (CmpU op1 zero));
10085 
10086   effect(USE op1, USE lbl);
10087 
10088   ins_cost(BRANCH_COST * 2);
10089 
10090   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpUEqNeLeGt_imm0_branch" %}
10091 
10092   ins_encode %{
10093     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10094   %}
10095 
10096   ins_pipe(pipe_cmpz_branch);
10097 %}
10098 
10099 // compare lt/ge unsigned instructs has no short instruct with same match
10100 instruct far_cmpULtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegI op1, immI0 zero, label lbl)
10101 %{
10102   match(If cmp (CmpU op1 zero));
10103 
10104   effect(USE op1, USE lbl);
10105 
10106   ins_cost(BRANCH_COST);
10107 
10108   format %{ "j  $lbl if $cmp == ge\t#@far_cmpULtGe_reg_imm0_branch" %}
10109 
10110   ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
10111 
10112   ins_pipe(pipe_cmpz_branch);
10113 %}
10114 
10115 instruct far_cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
10116 %{
10117   match(If cmp (CmpL op1 zero));
10118 
10119   effect(USE op1, USE lbl);
10120 
10121   ins_cost(BRANCH_COST * 2);
10122 
10123   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpL_reg_imm0_branch" %}
10124 
10125   ins_encode %{
10126     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10127   %}
10128 
10129   ins_pipe(pipe_cmpz_branch);
10130 %}
10131 
10132 instruct far_cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
10133 %{
10134   match(CountedLoopEnd cmp (CmpL op1 zero));
10135 
10136   effect(USE op1, USE lbl);
10137 
10138   ins_cost(BRANCH_COST * 2);
10139 
10140   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpL_reg_imm0_loop" %}
10141 
10142   ins_encode %{
10143     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10144   %}
10145 
10146   ins_pipe(pipe_cmpz_branch);
10147 %}
10148 
10149 instruct far_cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
10150 %{
10151   match(If cmp (CmpUL op1 zero));
10152 
10153   effect(USE op1, USE lbl);
10154 
10155   ins_cost(BRANCH_COST * 2);
10156 
10157   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpULEqNeLeGt_reg_imm0_branch" %}
10158 
10159   ins_encode %{
10160     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10161   %}
10162 
10163   ins_pipe(pipe_cmpz_branch);
10164 %}
10165 
10166 // compare lt/ge unsigned instructs has no short instruct with same match
10167 instruct far_cmpULLtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegL op1, immL0 zero, label lbl)
10168 %{
10169   match(If cmp (CmpUL op1 zero));
10170 
10171   effect(USE op1, USE lbl);
10172 
10173   ins_cost(BRANCH_COST);
10174 
10175   format %{ "j  $lbl if $cmp == ge\t#@far_cmpULLtGe_reg_imm0_branch" %}
10176 
10177   ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
10178 
10179   ins_pipe(pipe_cmpz_branch);
10180 %}
10181 
10182 instruct far_cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
10183   match(If cmp (CmpP op1 zero));
10184   effect(USE lbl);
10185 
10186   ins_cost(BRANCH_COST * 2);
10187   format %{ "far_b$cmp   $op1, zr, $lbl\t#@far_cmpP_imm0_branch" %}
10188 
10189   ins_encode %{
10190     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10191   %}
10192 
10193   ins_pipe(pipe_cmpz_branch);
10194 %}
10195 
10196 instruct far_cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
10197   match(If cmp (CmpN op1 zero));
10198   effect(USE lbl);
10199 
10200   ins_cost(BRANCH_COST * 2);
10201 
10202   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpN_imm0_branch" %}
10203 
10204   ins_encode %{
10205     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10206   %}
10207 
10208   ins_pipe(pipe_cmpz_branch);
10209 %}
10210 
10211 instruct far_cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
10212   match(If cmp (CmpP (DecodeN op1) zero));
10213   effect(USE lbl);
10214 
10215   ins_cost(BRANCH_COST * 2);
10216   format %{ "far_b$cmp   $op1, zr, $lbl\t#@far_cmpP_narrowOop_imm0_branch" %}
10217 
10218   ins_encode %{
10219     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10220   %}
10221 
10222   ins_pipe(pipe_cmpz_branch);
10223 %}
10224 
10225 // ============================================================================
10226 // Conditional Move Instructions
10227 
10228 // --------- CMoveI ---------
10229 
10230 instruct cmovI_cmpI(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOp cop) %{
10231   match(Set dst (CMoveI (Binary cop (CmpI op1 op2)) (Binary dst src)));
10232   ins_cost(ALU_COST + BRANCH_COST);
10233 
10234   format %{
10235     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpI\n\t"
10236   %}
10237 
10238   ins_encode %{
10239     __ enc_cmove($cop$$cmpcode,
10240                  as_Register($op1$$reg), as_Register($op2$$reg),
10241                  as_Register($dst$$reg), as_Register($src$$reg));
10242   %}
10243 
10244   ins_pipe(pipe_class_compare);
10245 %}
10246 
10247 instruct cmovI_cmpU(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOpU cop) %{
10248   match(Set dst (CMoveI (Binary cop (CmpU op1 op2)) (Binary dst src)));
10249   ins_cost(ALU_COST + BRANCH_COST);
10250 
10251   format %{
10252     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpU\n\t"
10253   %}
10254 
10255   ins_encode %{
10256     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10257                  as_Register($op1$$reg), as_Register($op2$$reg),
10258                  as_Register($dst$$reg), as_Register($src$$reg));
10259   %}
10260 
10261   ins_pipe(pipe_class_compare);
10262 %}
10263 
10264 instruct cmovI_cmpL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOp cop) %{
10265   match(Set dst (CMoveI (Binary cop (CmpL op1 op2)) (Binary dst src)));
10266   ins_cost(ALU_COST + BRANCH_COST);
10267 
10268   format %{
10269     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpL\n\t"
10270   %}
10271 
10272   ins_encode %{
10273     __ enc_cmove($cop$$cmpcode,
10274                  as_Register($op1$$reg), as_Register($op2$$reg),
10275                  as_Register($dst$$reg), as_Register($src$$reg));
10276   %}
10277 
10278   ins_pipe(pipe_class_compare);
10279 %}
10280 
10281 instruct cmovI_cmpUL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOpU cop) %{
10282   match(Set dst (CMoveI (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10283   ins_cost(ALU_COST + BRANCH_COST);
10284 
10285   format %{
10286     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpUL\n\t"
10287   %}
10288 
10289   ins_encode %{
10290     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10291                  as_Register($op1$$reg), as_Register($op2$$reg),
10292                  as_Register($dst$$reg), as_Register($src$$reg));
10293   %}
10294 
10295   ins_pipe(pipe_class_compare);
10296 %}
10297 
10298 instruct cmovI_cmpF(iRegINoSp dst, iRegI src, fRegF op1, fRegF op2, cmpOp cop) %{
10299   match(Set dst (CMoveI (Binary cop (CmpF op1 op2)) (Binary dst src)));
10300   ins_cost(ALU_COST + BRANCH_COST);
10301 
10302   format %{
10303     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpF\n\t"
10304   %}
10305 
10306   ins_encode %{
10307     __ enc_cmove_cmp_fp($cop$$cmpcode,
10308                         as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10309                         as_Register($dst$$reg), as_Register($src$$reg), true /* is_single */);
10310   %}
10311 
10312   ins_pipe(pipe_class_compare);
10313 %}
10314 
10315 instruct cmovI_cmpD(iRegINoSp dst, iRegI src, fRegD op1, fRegD op2, cmpOp cop) %{
10316   match(Set dst (CMoveI (Binary cop (CmpD op1 op2)) (Binary dst src)));
10317   ins_cost(ALU_COST + BRANCH_COST);
10318 
10319   format %{
10320     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpD\n\t"
10321   %}
10322 
10323   ins_encode %{
10324     __ enc_cmove_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10325                         as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10326                         as_Register($dst$$reg), as_Register($src$$reg), false /* is_single */);
10327   %}
10328 
10329   ins_pipe(pipe_class_compare);
10330 %}
10331 
10332 instruct cmovI_cmpN(iRegINoSp dst, iRegI src, iRegN op1, iRegN op2, cmpOpU cop) %{
10333   match(Set dst (CMoveI (Binary cop (CmpN op1 op2)) (Binary dst src)));
10334   ins_cost(ALU_COST + BRANCH_COST);
10335 
10336   format %{
10337     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpN\n\t"
10338   %}
10339 
10340   ins_encode %{
10341     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10342                  as_Register($op1$$reg), as_Register($op2$$reg),
10343                  as_Register($dst$$reg), as_Register($src$$reg));
10344   %}
10345 
10346   ins_pipe(pipe_class_compare);
10347 %}
10348 
10349 instruct cmovI_cmpP(iRegINoSp dst, iRegI src, iRegP op1, iRegP op2, cmpOpU cop) %{
10350   match(Set dst (CMoveI (Binary cop (CmpP op1 op2)) (Binary dst src)));
10351   ins_cost(ALU_COST + BRANCH_COST);
10352 
10353   format %{
10354     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpP\n\t"
10355   %}
10356 
10357   ins_encode %{
10358     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10359                  as_Register($op1$$reg), as_Register($op2$$reg),
10360                  as_Register($dst$$reg), as_Register($src$$reg));
10361   %}
10362 
10363   ins_pipe(pipe_class_compare);
10364 %}
10365 
10366 // --------- CMoveL ---------
10367 
10368 instruct cmovL_cmpL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOp cop) %{
10369   match(Set dst (CMoveL (Binary cop (CmpL op1 op2)) (Binary dst src)));
10370   ins_cost(ALU_COST + BRANCH_COST);
10371 
10372   format %{
10373     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpL\n\t"
10374   %}
10375 
10376   ins_encode %{
10377     __ enc_cmove($cop$$cmpcode,
10378                  as_Register($op1$$reg), as_Register($op2$$reg),
10379                  as_Register($dst$$reg), as_Register($src$$reg));
10380   %}
10381 
10382   ins_pipe(pipe_class_compare);
10383 %}
10384 
10385 instruct cmovL_cmpUL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOpU cop) %{
10386   match(Set dst (CMoveL (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10387   ins_cost(ALU_COST + BRANCH_COST);
10388 
10389   format %{
10390     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpUL\n\t"
10391   %}
10392 
10393   ins_encode %{
10394     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10395                  as_Register($op1$$reg), as_Register($op2$$reg),
10396                  as_Register($dst$$reg), as_Register($src$$reg));
10397   %}
10398 
10399   ins_pipe(pipe_class_compare);
10400 %}
10401 
10402 instruct cmovL_cmpI(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOp cop) %{
10403   match(Set dst (CMoveL (Binary cop (CmpI op1 op2)) (Binary dst src)));
10404   ins_cost(ALU_COST + BRANCH_COST);
10405 
10406   format %{
10407     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpI\n\t"
10408   %}
10409 
10410   ins_encode %{
10411     __ enc_cmove($cop$$cmpcode,
10412                  as_Register($op1$$reg), as_Register($op2$$reg),
10413                  as_Register($dst$$reg), as_Register($src$$reg));
10414   %}
10415 
10416   ins_pipe(pipe_class_compare);
10417 %}
10418 
10419 instruct cmovL_cmpU(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOpU cop) %{
10420   match(Set dst (CMoveL (Binary cop (CmpU op1 op2)) (Binary dst src)));
10421   ins_cost(ALU_COST + BRANCH_COST);
10422 
10423   format %{
10424     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpU\n\t"
10425   %}
10426 
10427   ins_encode %{
10428     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10429                  as_Register($op1$$reg), as_Register($op2$$reg),
10430                  as_Register($dst$$reg), as_Register($src$$reg));
10431   %}
10432 
10433   ins_pipe(pipe_class_compare);
10434 %}
10435 
10436 instruct cmovL_cmpF(iRegLNoSp dst, iRegL src, fRegF op1, fRegF op2, cmpOp cop) %{
10437   match(Set dst (CMoveL (Binary cop (CmpF op1 op2)) (Binary dst src)));
10438   ins_cost(ALU_COST + BRANCH_COST);
10439 
10440   format %{
10441     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpF\n\t"
10442   %}
10443 
10444   ins_encode %{
10445     __ enc_cmove_cmp_fp($cop$$cmpcode,
10446                         as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10447                         as_Register($dst$$reg), as_Register($src$$reg), true /* is_single */);
10448   %}
10449 
10450   ins_pipe(pipe_class_compare);
10451 %}
10452 
10453 instruct cmovL_cmpD(iRegLNoSp dst, iRegL src, fRegD op1, fRegD op2, cmpOp cop) %{
10454   match(Set dst (CMoveL (Binary cop (CmpD op1 op2)) (Binary dst src)));
10455   ins_cost(ALU_COST + BRANCH_COST);
10456 
10457   format %{
10458     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpD\n\t"
10459   %}
10460 
10461   ins_encode %{
10462     __ enc_cmove_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10463                         as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10464                         as_Register($dst$$reg), as_Register($src$$reg), false /* is_single */);
10465   %}
10466 
10467   ins_pipe(pipe_class_compare);
10468 %}
10469 
10470 instruct cmovL_cmpN(iRegLNoSp dst, iRegL src, iRegN op1, iRegN op2, cmpOpU cop) %{
10471   match(Set dst (CMoveL (Binary cop (CmpN op1 op2)) (Binary dst src)));
10472   ins_cost(ALU_COST + BRANCH_COST);
10473 
10474   format %{
10475     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpN\n\t"
10476   %}
10477 
10478   ins_encode %{
10479     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10480                  as_Register($op1$$reg), as_Register($op2$$reg),
10481                  as_Register($dst$$reg), as_Register($src$$reg));
10482   %}
10483 
10484   ins_pipe(pipe_class_compare);
10485 %}
10486 
10487 instruct cmovL_cmpP(iRegLNoSp dst, iRegL src, iRegP op1, iRegP op2, cmpOpU cop) %{
10488   match(Set dst (CMoveL (Binary cop (CmpP op1 op2)) (Binary dst src)));
10489   ins_cost(ALU_COST + BRANCH_COST);
10490 
10491   format %{
10492     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpP\n\t"
10493   %}
10494 
10495   ins_encode %{
10496     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10497                  as_Register($op1$$reg), as_Register($op2$$reg),
10498                  as_Register($dst$$reg), as_Register($src$$reg));
10499   %}
10500 
10501   ins_pipe(pipe_class_compare);
10502 %}
10503 
10504 // --------- CMoveF ---------
10505 
10506 instruct cmovF_cmpI(fRegF dst, fRegF src, iRegI op1, iRegI op2, cmpOp cop) %{
10507   match(Set dst (CMoveF (Binary cop (CmpI op1 op2)) (Binary dst src)));
10508   ins_cost(ALU_COST + BRANCH_COST);
10509 
10510   format %{
10511     "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpI\n\t"
10512   %}
10513 
10514   ins_encode %{
10515     __ enc_cmove_fp_cmp($cop$$cmpcode,
10516                  as_Register($op1$$reg), as_Register($op2$$reg),
10517                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10518   %}
10519 
10520   ins_pipe(pipe_class_compare);
10521 %}
10522 
10523 instruct cmovF_cmpU(fRegF dst, fRegF src, iRegI op1, iRegI op2, cmpOpU cop) %{
10524   match(Set dst (CMoveF (Binary cop (CmpU op1 op2)) (Binary dst src)));
10525   ins_cost(ALU_COST + BRANCH_COST);
10526 
10527   format %{
10528     "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpU\n\t"
10529   %}
10530 
10531   ins_encode %{
10532     __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10533                  as_Register($op1$$reg), as_Register($op2$$reg),
10534                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10535   %}
10536 
10537   ins_pipe(pipe_class_compare);
10538 %}
10539 
10540 instruct cmovF_cmpL(fRegF dst, fRegF src, iRegL op1, iRegL op2, cmpOp cop) %{
10541   match(Set dst (CMoveF (Binary cop (CmpL op1 op2)) (Binary dst src)));
10542   ins_cost(ALU_COST + BRANCH_COST);
10543 
10544   format %{
10545     "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpL\n\t"
10546   %}
10547 
10548   ins_encode %{
10549     __ enc_cmove_fp_cmp($cop$$cmpcode,
10550                  as_Register($op1$$reg), as_Register($op2$$reg),
10551                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10552   %}
10553 
10554   ins_pipe(pipe_class_compare);
10555 %}
10556 
10557 instruct cmovF_cmpUL(fRegF dst, fRegF src, iRegL op1, iRegL op2, cmpOpU cop) %{
10558   match(Set dst (CMoveF (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10559   ins_cost(ALU_COST + BRANCH_COST);
10560 
10561   format %{
10562     "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpUL\n\t"
10563   %}
10564 
10565   ins_encode %{
10566     __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10567                  as_Register($op1$$reg), as_Register($op2$$reg),
10568                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10569   %}
10570 
10571   ins_pipe(pipe_class_compare);
10572 %}
10573 
10574 instruct cmovF_cmpF(fRegF dst, fRegF src, fRegF op1, fRegF op2, cmpOp cop) %{
10575   match(Set dst (CMoveF (Binary cop (CmpF op1 op2)) (Binary dst src)));
10576   ins_cost(ALU_COST + BRANCH_COST);
10577 
10578   format %{
10579     "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpF\n\t"
10580   %}
10581 
10582   ins_encode %{
10583     __ enc_cmove_fp_cmp_fp($cop$$cmpcode,
10584                     as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10585                     as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
10586                     true /* cmp_single */, true /* cmov_single */);
10587   %}
10588 
10589   ins_pipe(pipe_class_compare);
10590 %}
10591 
10592 instruct cmovF_cmpD(fRegF dst, fRegF src, fRegD op1, fRegD op2, cmpOp cop) %{
10593   match(Set dst (CMoveF (Binary cop (CmpD op1 op2)) (Binary dst src)));
10594   ins_cost(ALU_COST + BRANCH_COST);
10595 
10596   format %{
10597     "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpD\n\t"
10598   %}
10599 
10600   ins_encode %{
10601     __ enc_cmove_fp_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10602                     as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10603                     as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
10604                     false /* cmp_single */, true /* cmov_single */);
10605   %}
10606 
10607   ins_pipe(pipe_class_compare);
10608 %}
10609 
10610 instruct cmovF_cmpN(fRegF dst, fRegF src, iRegN op1, iRegN op2, cmpOp cop) %{
10611   match(Set dst (CMoveF (Binary cop (CmpN op1 op2)) (Binary dst src)));
10612   ins_cost(ALU_COST + BRANCH_COST);
10613 
10614   format %{
10615     "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpN\n\t"
10616   %}
10617 
10618   ins_encode %{
10619     __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10620                  as_Register($op1$$reg), as_Register($op2$$reg),
10621                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10622   %}
10623 
10624   ins_pipe(pipe_class_compare);
10625 %}
10626 
10627 instruct cmovF_cmpP(fRegF dst, fRegF src, iRegP op1, iRegP op2, cmpOp cop) %{
10628   match(Set dst (CMoveF (Binary cop (CmpP op1 op2)) (Binary dst src)));
10629   ins_cost(ALU_COST + BRANCH_COST);
10630 
10631   format %{
10632     "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpP\n\t"
10633   %}
10634 
10635   ins_encode %{
10636     __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10637                  as_Register($op1$$reg), as_Register($op2$$reg),
10638                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10639   %}
10640 
10641   ins_pipe(pipe_class_compare);
10642 %}
10643 
10644 // --------- CMoveD ---------
10645 
10646 instruct cmovD_cmpI(fRegD dst, fRegD src, iRegI op1, iRegI op2, cmpOp cop) %{
10647   match(Set dst (CMoveD (Binary cop (CmpI op1 op2)) (Binary dst src)));
10648   ins_cost(ALU_COST + BRANCH_COST);
10649 
10650   format %{
10651     "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpI\n\t"
10652   %}
10653 
10654   ins_encode %{
10655     __ enc_cmove_fp_cmp($cop$$cmpcode,
10656                  as_Register($op1$$reg), as_Register($op2$$reg),
10657                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10658   %}
10659 
10660   ins_pipe(pipe_class_compare);
10661 %}
10662 
10663 instruct cmovD_cmpU(fRegD dst, fRegD src, iRegI op1, iRegI op2, cmpOpU cop) %{
10664   match(Set dst (CMoveD (Binary cop (CmpU op1 op2)) (Binary dst src)));
10665   ins_cost(ALU_COST + BRANCH_COST);
10666 
10667   format %{
10668     "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpU\n\t"
10669   %}
10670 
10671   ins_encode %{
10672     __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10673                  as_Register($op1$$reg), as_Register($op2$$reg),
10674                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10675   %}
10676 
10677   ins_pipe(pipe_class_compare);
10678 %}
10679 
10680 instruct cmovD_cmpL(fRegD dst, fRegD src, iRegL op1, iRegL op2, cmpOp cop) %{
10681   match(Set dst (CMoveD (Binary cop (CmpL op1 op2)) (Binary dst src)));
10682   ins_cost(ALU_COST + BRANCH_COST);
10683 
10684   format %{
10685     "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpL\n\t"
10686   %}
10687 
10688   ins_encode %{
10689     __ enc_cmove_fp_cmp($cop$$cmpcode,
10690                  as_Register($op1$$reg), as_Register($op2$$reg),
10691                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10692   %}
10693 
10694   ins_pipe(pipe_class_compare);
10695 %}
10696 
10697 instruct cmovD_cmpUL(fRegD dst, fRegD src, iRegL op1, iRegL op2, cmpOpU cop) %{
10698   match(Set dst (CMoveD (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10699   ins_cost(ALU_COST + BRANCH_COST);
10700 
10701   format %{
10702     "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpUL\n\t"
10703   %}
10704 
10705   ins_encode %{
10706     __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10707                  as_Register($op1$$reg), as_Register($op2$$reg),
10708                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10709   %}
10710 
10711   ins_pipe(pipe_class_compare);
10712 %}
10713 
10714 instruct cmovD_cmpF(fRegD dst, fRegD src, fRegF op1, fRegF op2, cmpOp cop) %{
10715   match(Set dst (CMoveD (Binary cop (CmpF op1 op2)) (Binary dst src)));
10716   ins_cost(ALU_COST + BRANCH_COST);
10717 
10718   format %{
10719     "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpF\n\t"
10720   %}
10721 
10722   ins_encode %{
10723     __ enc_cmove_fp_cmp_fp($cop$$cmpcode,
10724                     as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10725                     as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
10726                     true /* cmp_single */, false /* cmov_single */);
10727   %}
10728 
10729   ins_pipe(pipe_class_compare);
10730 %}
10731 
10732 instruct cmovD_cmpD(fRegD dst, fRegD src, fRegD op1, fRegD op2, cmpOp cop) %{
10733   match(Set dst (CMoveD (Binary cop (CmpD op1 op2)) (Binary dst src)));
10734   ins_cost(ALU_COST + BRANCH_COST);
10735 
10736   format %{
10737     "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpD\n\t"
10738   %}
10739 
10740   ins_encode %{
10741     __ enc_cmove_fp_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10742                     as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10743                     as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
10744                     false /* cmp_single */, false /* cmov_single */);
10745   %}
10746 
10747   ins_pipe(pipe_class_compare);
10748 %}
10749 
10750 instruct cmovD_cmpN(fRegD dst, fRegD src, iRegN op1, iRegN op2, cmpOp cop) %{
10751   match(Set dst (CMoveD (Binary cop (CmpN op1 op2)) (Binary dst src)));
10752   ins_cost(ALU_COST + BRANCH_COST);
10753 
10754   format %{
10755     "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpN\n\t"
10756   %}
10757 
10758   ins_encode %{
10759     __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10760                  as_Register($op1$$reg), as_Register($op2$$reg),
10761                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10762   %}
10763 
10764   ins_pipe(pipe_class_compare);
10765 %}
10766 
10767 instruct cmovD_cmpP(fRegD dst, fRegD src, iRegP op1, iRegP op2, cmpOp cop) %{
10768   match(Set dst (CMoveD (Binary cop (CmpP op1 op2)) (Binary dst src)));
10769   ins_cost(ALU_COST + BRANCH_COST);
10770 
10771   format %{
10772     "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpP\n\t"
10773   %}
10774 
10775   ins_encode %{
10776     __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10777                  as_Register($op1$$reg), as_Register($op2$$reg),
10778                  as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10779   %}
10780 
10781   ins_pipe(pipe_class_compare);
10782 %}
10783 
10784 // ============================================================================
10785 // Procedure Call/Return Instructions
10786 
10787 // Call Java Static Instruction
10788 // Note: If this code changes, the corresponding ret_addr_offset() and
10789 //       compute_padding() functions will have to be adjusted.
10790 instruct CallStaticJavaDirect(method meth)
10791 %{
10792   match(CallStaticJava);
10793 
10794   effect(USE meth);
10795 
10796   ins_cost(BRANCH_COST);
10797 
10798   format %{ "CALL,static $meth\t#@CallStaticJavaDirect" %}
10799 
10800   ins_encode(riscv_enc_java_static_call(meth),
10801              riscv_enc_call_epilog);
10802 
10803   ins_pipe(pipe_class_call);
10804   ins_alignment(4);
10805 %}
10806 
10807 // TO HERE
10808 
10809 // Call Java Dynamic Instruction
10810 // Note: If this code changes, the corresponding ret_addr_offset() and
10811 //       compute_padding() functions will have to be adjusted.
10812 instruct CallDynamicJavaDirect(method meth)
10813 %{
10814   match(CallDynamicJava);
10815 
10816   effect(USE meth);
10817 
10818   ins_cost(BRANCH_COST + ALU_COST * 5);
10819 
10820   format %{ "CALL,dynamic $meth\t#@CallDynamicJavaDirect" %}
10821 
10822   ins_encode(riscv_enc_java_dynamic_call(meth),
10823              riscv_enc_call_epilog);
10824 
10825   ins_pipe(pipe_class_call);
10826   ins_alignment(4);
10827 %}
10828 
10829 // Call Runtime Instruction
10830 
10831 instruct CallRuntimeDirect(method meth)
10832 %{
10833   match(CallRuntime);
10834 
10835   effect(USE meth);
10836 
10837   ins_cost(BRANCH_COST);
10838 
10839   format %{ "CALL, runtime $meth\t#@CallRuntimeDirect" %}
10840 
10841   ins_encode(riscv_enc_java_to_runtime(meth));
10842 
10843   ins_pipe(pipe_class_call);
10844   ins_alignment(4);
10845 %}
10846 
10847 // Call Runtime Instruction
10848 
10849 instruct CallLeafDirect(method meth)
10850 %{
10851   match(CallLeaf);
10852 
10853   effect(USE meth);
10854 
10855   ins_cost(BRANCH_COST);
10856 
10857   format %{ "CALL, runtime leaf $meth\t#@CallLeafDirect" %}
10858 
10859   ins_encode(riscv_enc_java_to_runtime(meth));
10860 
10861   ins_pipe(pipe_class_call);
10862   ins_alignment(4);
10863 %}
10864 
10865 // Call Runtime Instruction without safepoint and with vector arguments
10866 
10867 instruct CallLeafDirectVector(method meth)
10868 %{
10869   match(CallLeafVector);
10870 
10871   effect(USE meth);
10872 
10873   ins_cost(BRANCH_COST);
10874 
10875   format %{ "CALL, runtime leaf vector $meth" %}
10876 
10877   ins_encode(riscv_enc_java_to_runtime(meth));
10878 
10879   ins_pipe(pipe_class_call);
10880   ins_alignment(4);
10881 %}
10882 
10883 // Call Runtime Instruction
10884 
10885 instruct CallLeafNoFPDirect(method meth)
10886 %{
10887   match(CallLeafNoFP);
10888 
10889   effect(USE meth);
10890 
10891   ins_cost(BRANCH_COST);
10892 
10893   format %{ "CALL, runtime leaf nofp $meth\t#@CallLeafNoFPDirect" %}
10894 
10895   ins_encode(riscv_enc_java_to_runtime(meth));
10896 
10897   ins_pipe(pipe_class_call);
10898   ins_alignment(4);
10899 %}
10900 
10901 // ============================================================================
10902 // Partial Subtype Check
10903 //
10904 // superklass array for an instance of the superklass.  Set a hidden
10905 // internal cache on a hit (cache is checked with exposed code in
10906 // gen_subtype_check()).  Return zero for a hit.  The encoding
10907 // ALSO sets flags.
10908 
10909 instruct partialSubtypeCheck(iRegP_R15 result, iRegP_R14 sub, iRegP_R10 super, iRegP_R12 tmp, rFlagsReg cr)
10910 %{
10911   predicate(!UseSecondarySupersTable);
10912   match(Set result (PartialSubtypeCheck sub super));
10913   effect(KILL tmp, KILL cr);
10914 
10915   ins_cost(20 * DEFAULT_COST);
10916   format %{ "partialSubtypeCheck $result, $sub, $super\t#@partialSubtypeCheck" %}
10917 
10918   ins_encode(riscv_enc_partial_subtype_check(sub, super, tmp, result));
10919 
10920   opcode(0x1); // Force zero of result reg on hit
10921 
10922   ins_pipe(pipe_class_memory);
10923 %}
10924 
10925 // Two versions of partialSubtypeCheck, both used when we need to
10926 // search for a super class in the secondary supers array. The first
10927 // is used when we don't know _a priori_ the class being searched
10928 // for. The second, far more common, is used when we do know: this is
10929 // used for instanceof, checkcast, and any case where C2 can determine
10930 // it by constant propagation.
10931 
10932 instruct partialSubtypeCheckVarSuper(iRegP_R14 sub, iRegP_R10 super, iRegP_R15 result,
10933                                      iRegP_R11 tmpR11, iRegP_R12 tmpR12, iRegP_R13 tmpR13,
10934                                      iRegP_R16 tmpR16, rFlagsReg cr)
10935 %{
10936   predicate(UseSecondarySupersTable);
10937   match(Set result (PartialSubtypeCheck sub super));
10938   effect(TEMP tmpR11, TEMP tmpR12, TEMP tmpR13, TEMP tmpR16, KILL cr);
10939 
10940   ins_cost(10 * DEFAULT_COST);  // slightly larger than the next version
10941   format %{ "partialSubtypeCheck $result, $sub, $super" %}
10942 
10943   ins_encode %{
10944     __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register, $result$$Register,
10945                                          $tmpR11$$Register, $tmpR12$$Register, $tmpR13$$Register,
10946                                          $tmpR16$$Register, nullptr /*L_success*/);
10947   %}
10948 
10949   ins_pipe(pipe_class_memory);
10950 %}
10951 
10952 instruct partialSubtypeCheckConstSuper(iRegP_R14 sub, iRegP_R10 super_reg, immP super_con, iRegP_R15 result,
10953                                        iRegP_R11 tmpR11, iRegP_R12 tmpR12, iRegP_R13 tmpR13, iRegP_R16 tmpR16, rFlagsReg cr)
10954 %{
10955   predicate(UseSecondarySupersTable);
10956   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
10957   effect(TEMP tmpR11, TEMP tmpR12, TEMP tmpR13, TEMP tmpR16, KILL cr);
10958 
10959   ins_cost(5 * DEFAULT_COST); // needs to be less than competing nodes
10960   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
10961 
10962   ins_encode %{
10963     bool success = false;
10964     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
10965     if (InlineSecondarySupersTest) {
10966       success = __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register, $result$$Register,
10967                                                        $tmpR11$$Register, $tmpR12$$Register, $tmpR13$$Register,
10968                                                        $tmpR16$$Register, super_klass_slot);
10969     } else {
10970       address call = __ reloc_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
10971       success = (call != nullptr);
10972     }
10973     if (!success) {
10974       ciEnv::current()->record_failure("CodeCache is full");
10975       return;
10976     }
10977   %}
10978 
10979   ins_pipe(pipe_class_memory);
10980 %}
10981 
10982 instruct string_compareU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10983                          iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10984 %{
10985   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UU);
10986   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10987   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10988 
10989   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareU" %}
10990   ins_encode %{
10991     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
10992     __ string_compare($str1$$Register, $str2$$Register,
10993                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10994                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10995                       StrIntrinsicNode::UU);
10996   %}
10997   ins_pipe(pipe_class_memory);
10998 %}
10999 
11000 instruct string_compareL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
11001                          iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
11002 %{
11003   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LL);
11004   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
11005   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
11006 
11007   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareL" %}
11008   ins_encode %{
11009     __ string_compare($str1$$Register, $str2$$Register,
11010                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
11011                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
11012                       StrIntrinsicNode::LL);
11013   %}
11014   ins_pipe(pipe_class_memory);
11015 %}
11016 
11017 instruct string_compareUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
11018                           iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
11019 %{
11020   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UL);
11021   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
11022   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
11023 
11024   format %{"String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareUL" %}
11025   ins_encode %{
11026     __ string_compare($str1$$Register, $str2$$Register,
11027                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
11028                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
11029                       StrIntrinsicNode::UL);
11030   %}
11031   ins_pipe(pipe_class_memory);
11032 %}
11033 
11034 instruct string_compareLU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
11035                           iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3,
11036                           rFlagsReg cr)
11037 %{
11038   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LU);
11039   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
11040   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
11041 
11042   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareLU" %}
11043   ins_encode %{
11044     __ string_compare($str1$$Register, $str2$$Register,
11045                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
11046                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
11047                       StrIntrinsicNode::LU);
11048   %}
11049   ins_pipe(pipe_class_memory);
11050 %}
11051 
11052 instruct string_indexofUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
11053                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
11054                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
11055 %{
11056   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
11057   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
11058   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
11059          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
11060 
11061   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
11062   ins_encode %{
11063     __ string_indexof($str1$$Register, $str2$$Register,
11064                       $cnt1$$Register, $cnt2$$Register,
11065                       $tmp1$$Register, $tmp2$$Register,
11066                       $tmp3$$Register, $tmp4$$Register,
11067                       $tmp5$$Register, $tmp6$$Register,
11068                       $result$$Register, StrIntrinsicNode::UU);
11069   %}
11070   ins_pipe(pipe_class_memory);
11071 %}
11072 
11073 instruct string_indexofLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
11074                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
11075                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
11076 %{
11077   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
11078   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
11079   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
11080          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
11081 
11082   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
11083   ins_encode %{
11084     __ string_indexof($str1$$Register, $str2$$Register,
11085                       $cnt1$$Register, $cnt2$$Register,
11086                       $tmp1$$Register, $tmp2$$Register,
11087                       $tmp3$$Register, $tmp4$$Register,
11088                       $tmp5$$Register, $tmp6$$Register,
11089                       $result$$Register, StrIntrinsicNode::LL);
11090   %}
11091   ins_pipe(pipe_class_memory);
11092 %}
11093 
11094 instruct string_indexofUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
11095                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
11096                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
11097 %{
11098   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
11099   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
11100   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
11101          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
11102   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
11103 
11104   ins_encode %{
11105     __ string_indexof($str1$$Register, $str2$$Register,
11106                       $cnt1$$Register, $cnt2$$Register,
11107                       $tmp1$$Register, $tmp2$$Register,
11108                       $tmp3$$Register, $tmp4$$Register,
11109                       $tmp5$$Register, $tmp6$$Register,
11110                       $result$$Register, StrIntrinsicNode::UL);
11111   %}
11112   ins_pipe(pipe_class_memory);
11113 %}
11114 
11115 instruct string_indexof_conUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
11116                               immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
11117                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
11118 %{
11119   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
11120   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
11121   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
11122          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
11123 
11124   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
11125 
11126   ins_encode %{
11127     int icnt2 = (int)$int_cnt2$$constant;
11128     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
11129                                  $cnt1$$Register, zr,
11130                                  $tmp1$$Register, $tmp2$$Register,
11131                                  $tmp3$$Register, $tmp4$$Register,
11132                                  icnt2, $result$$Register, StrIntrinsicNode::UU);
11133   %}
11134   ins_pipe(pipe_class_memory);
11135 %}
11136 
11137 instruct string_indexof_conLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
11138                               immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
11139                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
11140 %{
11141   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
11142   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
11143   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
11144          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
11145 
11146   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
11147   ins_encode %{
11148     int icnt2 = (int)$int_cnt2$$constant;
11149     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
11150                                  $cnt1$$Register, zr,
11151                                  $tmp1$$Register, $tmp2$$Register,
11152                                  $tmp3$$Register, $tmp4$$Register,
11153                                  icnt2, $result$$Register, StrIntrinsicNode::LL);
11154   %}
11155   ins_pipe(pipe_class_memory);
11156 %}
11157 
11158 instruct string_indexof_conUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
11159                               immI_1 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
11160                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
11161 %{
11162   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
11163   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
11164   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
11165          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
11166 
11167   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
11168   ins_encode %{
11169     int icnt2 = (int)$int_cnt2$$constant;
11170     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
11171                                  $cnt1$$Register, zr,
11172                                  $tmp1$$Register, $tmp2$$Register,
11173                                  $tmp3$$Register, $tmp4$$Register,
11174                                  icnt2, $result$$Register, StrIntrinsicNode::UL);
11175   %}
11176   ins_pipe(pipe_class_memory);
11177 %}
11178 
11179 instruct stringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
11180                               iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
11181                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
11182 %{
11183   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
11184   predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
11185   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
11186          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
11187 
11188   format %{ "StringUTF16 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
11189   ins_encode %{
11190     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
11191                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
11192                            $tmp3$$Register, $tmp4$$Register, false /* isU */);
11193   %}
11194   ins_pipe(pipe_class_memory);
11195 %}
11196 
11197 
11198 instruct stringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
11199                               iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
11200                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
11201 %{
11202   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
11203   predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
11204   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
11205          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
11206 
11207   format %{ "StringLatin1 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
11208   ins_encode %{
11209     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
11210                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
11211                            $tmp3$$Register, $tmp4$$Register, true /* isL */);
11212   %}
11213   ins_pipe(pipe_class_memory);
11214 %}
11215 
11216 // clearing of an array
11217 instruct clearArray_reg_reg(iRegL_R29 cnt, iRegP_R28 base, iRegP_R30 tmp1,
11218                             iRegP_R31 tmp2, rFlagsReg cr, Universe dummy)
11219 %{
11220   // temp registers must match the one used in StubGenerator::generate_zero_blocks()
11221   predicate(UseBlockZeroing || !UseRVV);
11222   match(Set dummy (ClearArray cnt base));
11223   effect(USE_KILL cnt, USE_KILL base, TEMP tmp1, TEMP tmp2, KILL cr);
11224 
11225   ins_cost(4 * DEFAULT_COST);
11226   format %{ "ClearArray $cnt, $base\t#@clearArray_reg_reg" %}
11227 
11228   ins_encode %{
11229     address tpc = __ zero_words($base$$Register, $cnt$$Register);
11230     if (tpc == nullptr) {
11231       ciEnv::current()->record_failure("CodeCache is full");
11232       return;
11233     }
11234   %}
11235 
11236   ins_pipe(pipe_class_memory);
11237 %}
11238 
11239 instruct clearArray_imm_reg(immL cnt, iRegP_R28 base, Universe dummy, rFlagsReg cr)
11240 %{
11241   predicate(!UseRVV && (uint64_t)n->in(2)->get_long()
11242             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
11243   match(Set dummy (ClearArray cnt base));
11244   effect(USE_KILL base, KILL cr);
11245 
11246   ins_cost(4 * DEFAULT_COST);
11247   format %{ "ClearArray $cnt, $base\t#@clearArray_imm_reg" %}
11248 
11249   ins_encode %{
11250     __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
11251   %}
11252 
11253   ins_pipe(pipe_class_memory);
11254 %}
11255 
11256 instruct string_equalsL(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt,
11257                         iRegI_R10 result, rFlagsReg cr)
11258 %{
11259   predicate(!UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
11260   match(Set result (StrEquals (Binary str1 str2) cnt));
11261   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
11262 
11263   format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsL" %}
11264   ins_encode %{
11265     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
11266     __ string_equals($str1$$Register, $str2$$Register,
11267                      $result$$Register, $cnt$$Register);
11268   %}
11269   ins_pipe(pipe_class_memory);
11270 %}
11271 
11272 instruct array_equalsB(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
11273                        iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
11274 %{
11275   predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
11276   match(Set result (AryEq ary1 ary2));
11277   effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
11278 
11279   format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsB // KILL all" %}
11280   ins_encode %{
11281     __ arrays_equals($ary1$$Register, $ary2$$Register,
11282                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
11283                      $result$$Register, 1);
11284   %}
11285   ins_pipe(pipe_class_memory);
11286 %}
11287 
11288 instruct array_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
11289                        iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
11290 %{
11291   predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
11292   match(Set result (AryEq ary1 ary2));
11293   effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
11294 
11295   format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsC // KILL all" %}
11296   ins_encode %{
11297     __ arrays_equals($ary1$$Register, $ary2$$Register,
11298                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
11299                      $result$$Register, 2);
11300   %}
11301   ins_pipe(pipe_class_memory);
11302 %}
11303 
11304 // fast ArraysSupport.vectorizedHashCode
11305 instruct arrays_hashcode(iRegP_R11 ary, iRegI_R12 cnt, iRegI_R10 result, immI basic_type,
11306                          iRegLNoSp tmp1, iRegLNoSp tmp2,
11307                          iRegLNoSp tmp3, iRegLNoSp tmp4,
11308                          iRegLNoSp tmp5, iRegLNoSp tmp6, rFlagsReg cr)
11309 %{
11310   predicate(!UseRVV);
11311   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
11312   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
11313          USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
11314 
11315   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
11316   ins_encode %{
11317     __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
11318                        $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
11319                        $tmp4$$Register, $tmp5$$Register, $tmp6$$Register,
11320                        (BasicType)$basic_type$$constant);
11321   %}
11322   ins_pipe(pipe_class_memory);
11323 %}
11324 
11325 // ============================================================================
11326 // Safepoint Instructions
11327 
11328 instruct safePoint(iRegP poll)
11329 %{
11330   match(SafePoint poll);
11331 
11332   ins_cost(2 * LOAD_COST);
11333   format %{
11334     "lwu zr, [$poll]\t# Safepoint: poll for GC, #@safePoint"
11335   %}
11336   ins_encode %{
11337     __ read_polling_page(as_Register($poll$$reg), 0, relocInfo::poll_type);
11338   %}
11339   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
11340 %}
11341 
11342 // ============================================================================
11343 // This name is KNOWN by the ADLC and cannot be changed.
11344 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
11345 // for this guy.
11346 instruct tlsLoadP(javaThread_RegP dst)
11347 %{
11348   match(Set dst (ThreadLocal));
11349 
11350   ins_cost(0);
11351 
11352   format %{ " -- \t// $dst=Thread::current(), empty, #@tlsLoadP" %}
11353 
11354   size(0);
11355 
11356   ins_encode( /*empty*/ );
11357 
11358   ins_pipe(pipe_class_empty);
11359 %}
11360 
11361 // inlined locking and unlocking
11362 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
11363 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box,
11364                      iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
11365 %{
11366   match(Set cr (FastLock object box));
11367   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
11368 
11369   ins_cost(10 * DEFAULT_COST);
11370   format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLock" %}
11371 
11372   ins_encode %{
11373     __ fast_lock($object$$Register, $box$$Register,
11374                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
11375   %}
11376 
11377   ins_pipe(pipe_serial);
11378 %}
11379 
11380 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
11381 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box,
11382                        iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
11383 %{
11384   match(Set cr (FastUnlock object box));
11385   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
11386 
11387   ins_cost(10 * DEFAULT_COST);
11388   format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlock" %}
11389 
11390   ins_encode %{
11391     __ fast_unlock($object$$Register, $box$$Register,
11392                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
11393   %}
11394 
11395   ins_pipe(pipe_serial);
11396 %}
11397 
11398 // Tail Call; Jump from runtime stub to Java code.
11399 // Also known as an 'interprocedural jump'.
11400 // Target of jump will eventually return to caller.
11401 // TailJump below removes the return address.
11402 // Don't use fp for 'jump_target' because a MachEpilogNode has already been
11403 // emitted just above the TailCall which has reset fp to the caller state.
11404 instruct TailCalljmpInd(iRegPNoSpNoFp jump_target, inline_cache_RegP method_oop)
11405 %{
11406   match(TailCall jump_target method_oop);
11407 
11408   ins_cost(BRANCH_COST);
11409 
11410   format %{ "jalr $jump_target\t# $method_oop holds method oop, #@TailCalljmpInd." %}
11411 
11412   ins_encode(riscv_enc_tail_call(jump_target));
11413 
11414   ins_pipe(pipe_class_call);
11415 %}
11416 
11417 instruct TailjmpInd(iRegPNoSpNoFp jump_target, iRegP_R10 ex_oop)
11418 %{
11419   match(TailJump jump_target ex_oop);
11420 
11421   ins_cost(ALU_COST + BRANCH_COST);
11422 
11423   format %{ "jalr $jump_target\t# $ex_oop holds exception oop, #@TailjmpInd." %}
11424 
11425   ins_encode(riscv_enc_tail_jmp(jump_target));
11426 
11427   ins_pipe(pipe_class_call);
11428 %}
11429 
11430 // Forward exception.
11431 instruct ForwardExceptionjmp()
11432 %{
11433   match(ForwardException);
11434 
11435   ins_cost(BRANCH_COST);
11436 
11437   format %{ "j forward_exception_stub\t#@ForwardException" %}
11438 
11439   ins_encode %{
11440     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
11441   %}
11442 
11443   ins_pipe(pipe_class_call);
11444 %}
11445 
11446 // Create exception oop: created by stack-crawling runtime code.
11447 // Created exception is now available to this handler, and is setup
11448 // just prior to jumping to this handler. No code emitted.
11449 instruct CreateException(iRegP_R10 ex_oop)
11450 %{
11451   match(Set ex_oop (CreateEx));
11452 
11453   ins_cost(0);
11454   format %{ " -- \t// exception oop; no code emitted, #@CreateException" %}
11455 
11456   size(0);
11457 
11458   ins_encode( /*empty*/ );
11459 
11460   ins_pipe(pipe_class_empty);
11461 %}
11462 
11463 // Rethrow exception: The exception oop will come in the first
11464 // argument position. Then JUMP (not call) to the rethrow stub code.
11465 instruct RethrowException()
11466 %{
11467   match(Rethrow);
11468 
11469   ins_cost(BRANCH_COST);
11470 
11471   format %{ "j rethrow_stub\t#@RethrowException" %}
11472 
11473   ins_encode(riscv_enc_rethrow());
11474 
11475   ins_pipe(pipe_class_call);
11476 %}
11477 
11478 // Return Instruction
11479 // epilog node loads ret address into ra as part of frame pop
11480 instruct Ret()
11481 %{
11482   match(Return);
11483 
11484   ins_cost(BRANCH_COST);
11485   format %{ "ret\t// return register, #@Ret" %}
11486 
11487   ins_encode(riscv_enc_ret());
11488 
11489   ins_pipe(pipe_branch);
11490 %}
11491 
11492 // Die now.
11493 instruct ShouldNotReachHere() %{
11494   match(Halt);
11495 
11496   ins_cost(BRANCH_COST);
11497 
11498   format %{ "#@ShouldNotReachHere" %}
11499 
11500   ins_encode %{
11501     if (is_reachable()) {
11502       const char* str = __ code_string(_halt_reason);
11503       __ stop(str);
11504     }
11505   %}
11506 
11507   ins_pipe(pipe_class_default);
11508 %}
11509 
11510 
11511 //----------PEEPHOLE RULES-----------------------------------------------------
11512 // These must follow all instruction definitions as they use the names
11513 // defined in the instructions definitions.
11514 //
11515 // peepmatch ( root_instr_name [preceding_instruction]* );
11516 //
11517 // peepconstraint %{
11518 // (instruction_number.operand_name relational_op instruction_number.operand_name
11519 //  [, ...] );
11520 // // instruction numbers are zero-based using left to right order in peepmatch
11521 //
11522 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
11523 // // provide an instruction_number.operand_name for each operand that appears
11524 // // in the replacement instruction's match rule
11525 //
11526 // ---------VM FLAGS---------------------------------------------------------
11527 //
11528 // All peephole optimizations can be turned off using -XX:-OptoPeephole
11529 //
11530 // Each peephole rule is given an identifying number starting with zero and
11531 // increasing by one in the order seen by the parser.  An individual peephole
11532 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
11533 // on the command-line.
11534 //
11535 // ---------CURRENT LIMITATIONS----------------------------------------------
11536 //
11537 // Only match adjacent instructions in same basic block
11538 // Only equality constraints
11539 // Only constraints between operands, not (0.dest_reg == RAX_enc)
11540 // Only one replacement instruction
11541 //
11542 //----------SMARTSPILL RULES---------------------------------------------------
11543 // These must follow all instruction definitions as they use the names
11544 // defined in the instructions definitions.
11545 
11546 // Local Variables:
11547 // mode: c++
11548 // End: