1 //
    2 // Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
    4 // Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
    5 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    6 //
    7 // This code is free software; you can redistribute it and/or modify it
    8 // under the terms of the GNU General Public License version 2 only, as
    9 // published by the Free Software Foundation.
   10 //
   11 // This code is distributed in the hope that it will be useful, but WITHOUT
   12 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   14 // version 2 for more details (a copy is included in the LICENSE file that
   15 // accompanied this code).
   16 //
   17 // You should have received a copy of the GNU General Public License version
   18 // 2 along with this work; if not, write to the Free Software Foundation,
   19 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   20 //
   21 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   22 // or visit www.oracle.com if you need additional information or have any
   23 // questions.
   24 //
   25 //
   26 
   27 // RISCV Architecture Description File
   28 
   29 //----------REGISTER DEFINITION BLOCK------------------------------------------
   30 // This information is used by the matcher and the register allocator to
   31 // describe individual registers and classes of registers within the target
   32 // architecture.
   33 
   34 register %{
   35 //----------Architecture Description Register Definitions----------------------
   36 // General Registers
   37 // "reg_def"  name ( register save type, C convention save type,
   38 //                   ideal register type, encoding );
   39 // Register Save Types:
   40 //
   41 // NS  = No-Save:       The register allocator assumes that these registers
   42 //                      can be used without saving upon entry to the method, &
   43 //                      that they do not need to be saved at call sites.
   44 //
   45 // SOC = Save-On-Call:  The register allocator assumes that these registers
   46 //                      can be used without saving upon entry to the method,
   47 //                      but that they must be saved at call sites.
   48 //
   49 // SOE = Save-On-Entry: The register allocator assumes that these registers
   50 //                      must be saved before using them upon entry to the
   51 //                      method, but they do not need to be saved at call
   52 //                      sites.
   53 //
   54 // AS  = Always-Save:   The register allocator assumes that these registers
   55 //                      must be saved before using them upon entry to the
   56 //                      method, & that they must be saved at call sites.
   57 //
   58 // Ideal Register Type is used to determine how to save & restore a
   59 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   60 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   61 //
   62 // The encoding number is the actual bit-pattern placed into the opcodes.
   63 
   64 // We must define the 64 bit int registers in two 32 bit halves, the
   65 // real lower register and a virtual upper half register. upper halves
   66 // are used by the register allocator but are not actually supplied as
   67 // operands to memory ops.
   68 //
   69 // follow the C1 compiler in making registers
   70 //
   71 //   x7, x9-x17, x27-x31 volatile (caller save)
   72 //   x0-x4, x8, x23 system (no save, no allocate)
   73 //   x5-x6 non-allocatable (so we can use them as temporary regs)
   74 
   75 //
   76 // as regards Java usage. we don't use any callee save registers
   77 // because this makes it difficult to de-optimise a frame (see comment
   78 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   79 //
   80 
   81 // General Registers
   82 
   83 reg_def R0      ( NS,  NS,  Op_RegI, 0,  x0->as_VMReg()         ); // zr
   84 reg_def R0_H    ( NS,  NS,  Op_RegI, 0,  x0->as_VMReg()->next() );
   85 reg_def R1      ( NS,  SOC, Op_RegI, 1,  x1->as_VMReg()         ); // ra
   86 reg_def R1_H    ( NS,  SOC, Op_RegI, 1,  x1->as_VMReg()->next() );
   87 reg_def R2      ( NS,  NS,  Op_RegI, 2,  x2->as_VMReg()         ); // sp
   88 reg_def R2_H    ( NS,  NS,  Op_RegI, 2,  x2->as_VMReg()->next() );
   89 reg_def R3      ( NS,  NS,  Op_RegI, 3,  x3->as_VMReg()         ); // gp
   90 reg_def R3_H    ( NS,  NS,  Op_RegI, 3,  x3->as_VMReg()->next() );
   91 reg_def R4      ( NS,  NS,  Op_RegI, 4,  x4->as_VMReg()         ); // tp
   92 reg_def R4_H    ( NS,  NS,  Op_RegI, 4,  x4->as_VMReg()->next() );
   93 reg_def R7      ( SOC, SOC, Op_RegI, 7,  x7->as_VMReg()         );
   94 reg_def R7_H    ( SOC, SOC, Op_RegI, 7,  x7->as_VMReg()->next() );
   95 reg_def R8      ( NS,  SOE, Op_RegI, 8,  x8->as_VMReg()         ); // fp
   96 reg_def R8_H    ( NS,  SOE, Op_RegI, 8,  x8->as_VMReg()->next() );
   97 reg_def R9      ( SOC, SOE, Op_RegI, 9,  x9->as_VMReg()         );
   98 reg_def R9_H    ( SOC, SOE, Op_RegI, 9,  x9->as_VMReg()->next() );
   99 reg_def R10     ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()        );
  100 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()->next());
  101 reg_def R11     ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()        );
  102 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()->next());
  103 reg_def R12     ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()        );
  104 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()->next());
  105 reg_def R13     ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()        );
  106 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()->next());
  107 reg_def R14     ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()        );
  108 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()->next());
  109 reg_def R15     ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()        );
  110 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()->next());
  111 reg_def R16     ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()        );
  112 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()->next());
  113 reg_def R17     ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()        );
  114 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()->next());
  115 reg_def R18     ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()        );
  116 reg_def R18_H   ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()->next());
  117 reg_def R19     ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()        );
  118 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()->next());
  119 reg_def R20     ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()        ); // caller esp
  120 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()->next());
  121 reg_def R21     ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()        );
  122 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()->next());
  123 reg_def R22     ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()        );
  124 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()->next());
  125 reg_def R23     ( NS,  SOE, Op_RegI, 23, x23->as_VMReg()        ); // java thread
  126 reg_def R23_H   ( NS,  SOE, Op_RegI, 23, x23->as_VMReg()->next());
  127 reg_def R24     ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()        );
  128 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()->next());
  129 reg_def R25     ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()        );
  130 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()->next());
  131 reg_def R26     ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()        );
  132 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()->next());
  133 reg_def R27     ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()        ); // heapbase
  134 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()->next());
  135 reg_def R28     ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()        );
  136 reg_def R28_H   ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()->next());
  137 reg_def R29     ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()        );
  138 reg_def R29_H   ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()->next());
  139 reg_def R30     ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()        );
  140 reg_def R30_H   ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()->next());
  141 reg_def R31     ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()        );
  142 reg_def R31_H   ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()->next());
  143 
  144 // ----------------------------
  145 // Float/Double Registers
  146 // ----------------------------
  147 
  148 // Double Registers
  149 
  150 // The rules of ADL require that double registers be defined in pairs.
  151 // Each pair must be two 32-bit values, but not necessarily a pair of
  152 // single float registers. In each pair, ADLC-assigned register numbers
  153 // must be adjacent, with the lower number even. Finally, when the
  154 // CPU stores such a register pair to memory, the word associated with
  155 // the lower ADLC-assigned number must be stored to the lower address.
  156 
  157 // RISCV has 32 floating-point registers. Each can store a single
  158 // or double precision floating-point value.
  159 
  160 // for Java use float registers f0-f31 are always save on call whereas
  161 // the platform ABI treats f8-f9 and f18-f27 as callee save). Other
  162 // float registers are SOC as per the platform spec
  163 
  164 reg_def F0    ( SOC, SOC, Op_RegF,  0,  f0->as_VMReg()          );
  165 reg_def F0_H  ( SOC, SOC, Op_RegF,  0,  f0->as_VMReg()->next()  );
  166 reg_def F1    ( SOC, SOC, Op_RegF,  1,  f1->as_VMReg()          );
  167 reg_def F1_H  ( SOC, SOC, Op_RegF,  1,  f1->as_VMReg()->next()  );
  168 reg_def F2    ( SOC, SOC, Op_RegF,  2,  f2->as_VMReg()          );
  169 reg_def F2_H  ( SOC, SOC, Op_RegF,  2,  f2->as_VMReg()->next()  );
  170 reg_def F3    ( SOC, SOC, Op_RegF,  3,  f3->as_VMReg()          );
  171 reg_def F3_H  ( SOC, SOC, Op_RegF,  3,  f3->as_VMReg()->next()  );
  172 reg_def F4    ( SOC, SOC, Op_RegF,  4,  f4->as_VMReg()          );
  173 reg_def F4_H  ( SOC, SOC, Op_RegF,  4,  f4->as_VMReg()->next()  );
  174 reg_def F5    ( SOC, SOC, Op_RegF,  5,  f5->as_VMReg()          );
  175 reg_def F5_H  ( SOC, SOC, Op_RegF,  5,  f5->as_VMReg()->next()  );
  176 reg_def F6    ( SOC, SOC, Op_RegF,  6,  f6->as_VMReg()          );
  177 reg_def F6_H  ( SOC, SOC, Op_RegF,  6,  f6->as_VMReg()->next()  );
  178 reg_def F7    ( SOC, SOC, Op_RegF,  7,  f7->as_VMReg()          );
  179 reg_def F7_H  ( SOC, SOC, Op_RegF,  7,  f7->as_VMReg()->next()  );
  180 reg_def F8    ( SOC, SOE, Op_RegF,  8,  f8->as_VMReg()          );
  181 reg_def F8_H  ( SOC, SOE, Op_RegF,  8,  f8->as_VMReg()->next()  );
  182 reg_def F9    ( SOC, SOE, Op_RegF,  9,  f9->as_VMReg()          );
  183 reg_def F9_H  ( SOC, SOE, Op_RegF,  9,  f9->as_VMReg()->next()  );
  184 reg_def F10   ( SOC, SOC, Op_RegF,  10, f10->as_VMReg()         );
  185 reg_def F10_H ( SOC, SOC, Op_RegF,  10, f10->as_VMReg()->next() );
  186 reg_def F11   ( SOC, SOC, Op_RegF,  11, f11->as_VMReg()         );
  187 reg_def F11_H ( SOC, SOC, Op_RegF,  11, f11->as_VMReg()->next() );
  188 reg_def F12   ( SOC, SOC, Op_RegF,  12, f12->as_VMReg()         );
  189 reg_def F12_H ( SOC, SOC, Op_RegF,  12, f12->as_VMReg()->next() );
  190 reg_def F13   ( SOC, SOC, Op_RegF,  13, f13->as_VMReg()         );
  191 reg_def F13_H ( SOC, SOC, Op_RegF,  13, f13->as_VMReg()->next() );
  192 reg_def F14   ( SOC, SOC, Op_RegF,  14, f14->as_VMReg()         );
  193 reg_def F14_H ( SOC, SOC, Op_RegF,  14, f14->as_VMReg()->next() );
  194 reg_def F15   ( SOC, SOC, Op_RegF,  15, f15->as_VMReg()         );
  195 reg_def F15_H ( SOC, SOC, Op_RegF,  15, f15->as_VMReg()->next() );
  196 reg_def F16   ( SOC, SOC, Op_RegF,  16, f16->as_VMReg()         );
  197 reg_def F16_H ( SOC, SOC, Op_RegF,  16, f16->as_VMReg()->next() );
  198 reg_def F17   ( SOC, SOC, Op_RegF,  17, f17->as_VMReg()         );
  199 reg_def F17_H ( SOC, SOC, Op_RegF,  17, f17->as_VMReg()->next() );
  200 reg_def F18   ( SOC, SOE, Op_RegF,  18, f18->as_VMReg()         );
  201 reg_def F18_H ( SOC, SOE, Op_RegF,  18, f18->as_VMReg()->next() );
  202 reg_def F19   ( SOC, SOE, Op_RegF,  19, f19->as_VMReg()         );
  203 reg_def F19_H ( SOC, SOE, Op_RegF,  19, f19->as_VMReg()->next() );
  204 reg_def F20   ( SOC, SOE, Op_RegF,  20, f20->as_VMReg()         );
  205 reg_def F20_H ( SOC, SOE, Op_RegF,  20, f20->as_VMReg()->next() );
  206 reg_def F21   ( SOC, SOE, Op_RegF,  21, f21->as_VMReg()         );
  207 reg_def F21_H ( SOC, SOE, Op_RegF,  21, f21->as_VMReg()->next() );
  208 reg_def F22   ( SOC, SOE, Op_RegF,  22, f22->as_VMReg()         );
  209 reg_def F22_H ( SOC, SOE, Op_RegF,  22, f22->as_VMReg()->next() );
  210 reg_def F23   ( SOC, SOE, Op_RegF,  23, f23->as_VMReg()         );
  211 reg_def F23_H ( SOC, SOE, Op_RegF,  23, f23->as_VMReg()->next() );
  212 reg_def F24   ( SOC, SOE, Op_RegF,  24, f24->as_VMReg()         );
  213 reg_def F24_H ( SOC, SOE, Op_RegF,  24, f24->as_VMReg()->next() );
  214 reg_def F25   ( SOC, SOE, Op_RegF,  25, f25->as_VMReg()         );
  215 reg_def F25_H ( SOC, SOE, Op_RegF,  25, f25->as_VMReg()->next() );
  216 reg_def F26   ( SOC, SOE, Op_RegF,  26, f26->as_VMReg()         );
  217 reg_def F26_H ( SOC, SOE, Op_RegF,  26, f26->as_VMReg()->next() );
  218 reg_def F27   ( SOC, SOE, Op_RegF,  27, f27->as_VMReg()         );
  219 reg_def F27_H ( SOC, SOE, Op_RegF,  27, f27->as_VMReg()->next() );
  220 reg_def F28   ( SOC, SOC, Op_RegF,  28, f28->as_VMReg()         );
  221 reg_def F28_H ( SOC, SOC, Op_RegF,  28, f28->as_VMReg()->next() );
  222 reg_def F29   ( SOC, SOC, Op_RegF,  29, f29->as_VMReg()         );
  223 reg_def F29_H ( SOC, SOC, Op_RegF,  29, f29->as_VMReg()->next() );
  224 reg_def F30   ( SOC, SOC, Op_RegF,  30, f30->as_VMReg()         );
  225 reg_def F30_H ( SOC, SOC, Op_RegF,  30, f30->as_VMReg()->next() );
  226 reg_def F31   ( SOC, SOC, Op_RegF,  31, f31->as_VMReg()         );
  227 reg_def F31_H ( SOC, SOC, Op_RegF,  31, f31->as_VMReg()->next() );
  228 
  229 // ----------------------------
  230 // Vector Registers
  231 // ----------------------------
  232 
  233 // For RVV vector registers, we simply extend vector register size to 4
  234 // 'logical' slots. This is nominally 128 bits but it actually covers
  235 // all possible 'physical' RVV vector register lengths from 128 ~ 1024
  236 // bits. The 'physical' RVV vector register length is detected during
  237 // startup, so the register allocator is able to identify the correct
  238 // number of bytes needed for an RVV spill/unspill.
  239 
  240 reg_def V0    ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()           );
  241 reg_def V0_H  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next()   );
  242 reg_def V0_J  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next(2)  );
  243 reg_def V0_K  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next(3)  );
  244 
  245 reg_def V1    ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()           );
  246 reg_def V1_H  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next()   );
  247 reg_def V1_J  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next(2)  );
  248 reg_def V1_K  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next(3)  );
  249 
  250 reg_def V2    ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()           );
  251 reg_def V2_H  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next()   );
  252 reg_def V2_J  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next(2)  );
  253 reg_def V2_K  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next(3)  );
  254 
  255 reg_def V3    ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()           );
  256 reg_def V3_H  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next()   );
  257 reg_def V3_J  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next(2)  );
  258 reg_def V3_K  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next(3)  );
  259 
  260 reg_def V4    ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()           );
  261 reg_def V4_H  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next()   );
  262 reg_def V4_J  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next(2)  );
  263 reg_def V4_K  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next(3)  );
  264 
  265 reg_def V5    ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()           );
  266 reg_def V5_H  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next()   );
  267 reg_def V5_J  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next(2)  );
  268 reg_def V5_K  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next(3)  );
  269 
  270 reg_def V6    ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()           );
  271 reg_def V6_H  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next()   );
  272 reg_def V6_J  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next(2)  );
  273 reg_def V6_K  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next(3)  );
  274 
  275 reg_def V7    ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()           );
  276 reg_def V7_H  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next()   );
  277 reg_def V7_J  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next(2)  );
  278 reg_def V7_K  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next(3)  );
  279 
  280 reg_def V8    ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()           );
  281 reg_def V8_H  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next()   );
  282 reg_def V8_J  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next(2)  );
  283 reg_def V8_K  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next(3)  );
  284 
  285 reg_def V9    ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()           );
  286 reg_def V9_H  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next()   );
  287 reg_def V9_J  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next(2)  );
  288 reg_def V9_K  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next(3)  );
  289 
  290 reg_def V10   ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()          );
  291 reg_def V10_H ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next()  );
  292 reg_def V10_J ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(2) );
  293 reg_def V10_K ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(3) );
  294 
  295 reg_def V11   ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()          );
  296 reg_def V11_H ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next()  );
  297 reg_def V11_J ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(2) );
  298 reg_def V11_K ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(3) );
  299 
  300 reg_def V12   ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()          );
  301 reg_def V12_H ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next()  );
  302 reg_def V12_J ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(2) );
  303 reg_def V12_K ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(3) );
  304 
  305 reg_def V13   ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()          );
  306 reg_def V13_H ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next()  );
  307 reg_def V13_J ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(2) );
  308 reg_def V13_K ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(3) );
  309 
  310 reg_def V14   ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()          );
  311 reg_def V14_H ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next()  );
  312 reg_def V14_J ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(2) );
  313 reg_def V14_K ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(3) );
  314 
  315 reg_def V15   ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()          );
  316 reg_def V15_H ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next()  );
  317 reg_def V15_J ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(2) );
  318 reg_def V15_K ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(3) );
  319 
  320 reg_def V16   ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()          );
  321 reg_def V16_H ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next()  );
  322 reg_def V16_J ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(2) );
  323 reg_def V16_K ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(3) );
  324 
  325 reg_def V17   ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()          );
  326 reg_def V17_H ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next()  );
  327 reg_def V17_J ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(2) );
  328 reg_def V17_K ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(3) );
  329 
  330 reg_def V18   ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()          );
  331 reg_def V18_H ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next()  );
  332 reg_def V18_J ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(2) );
  333 reg_def V18_K ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(3) );
  334 
  335 reg_def V19   ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()          );
  336 reg_def V19_H ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next()  );
  337 reg_def V19_J ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(2) );
  338 reg_def V19_K ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(3) );
  339 
  340 reg_def V20   ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()          );
  341 reg_def V20_H ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next()  );
  342 reg_def V20_J ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(2) );
  343 reg_def V20_K ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(3) );
  344 
  345 reg_def V21   ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()          );
  346 reg_def V21_H ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next()  );
  347 reg_def V21_J ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(2) );
  348 reg_def V21_K ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(3) );
  349 
  350 reg_def V22   ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()          );
  351 reg_def V22_H ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next()  );
  352 reg_def V22_J ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(2) );
  353 reg_def V22_K ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(3) );
  354 
  355 reg_def V23   ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()          );
  356 reg_def V23_H ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next()  );
  357 reg_def V23_J ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(2) );
  358 reg_def V23_K ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(3) );
  359 
  360 reg_def V24   ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()          );
  361 reg_def V24_H ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next()  );
  362 reg_def V24_J ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(2) );
  363 reg_def V24_K ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(3) );
  364 
  365 reg_def V25   ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()          );
  366 reg_def V25_H ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next()  );
  367 reg_def V25_J ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(2) );
  368 reg_def V25_K ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(3) );
  369 
  370 reg_def V26   ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()          );
  371 reg_def V26_H ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next()  );
  372 reg_def V26_J ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(2) );
  373 reg_def V26_K ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(3) );
  374 
  375 reg_def V27   ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()          );
  376 reg_def V27_H ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next()  );
  377 reg_def V27_J ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(2) );
  378 reg_def V27_K ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(3) );
  379 
  380 reg_def V28   ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()          );
  381 reg_def V28_H ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next()  );
  382 reg_def V28_J ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(2) );
  383 reg_def V28_K ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(3) );
  384 
  385 reg_def V29   ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()          );
  386 reg_def V29_H ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next()  );
  387 reg_def V29_J ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(2) );
  388 reg_def V29_K ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(3) );
  389 
  390 reg_def V30   ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()          );
  391 reg_def V30_H ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next()  );
  392 reg_def V30_J ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(2) );
  393 reg_def V30_K ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(3) );
  394 
  395 reg_def V31   ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()          );
  396 reg_def V31_H ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next()  );
  397 reg_def V31_J ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(2) );
  398 reg_def V31_K ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(3) );
  399 
  400 // ----------------------------
  401 // Special Registers
  402 // ----------------------------
  403 
  404 // On riscv, the physical flag register is missing, so we use t1 instead,
  405 // to bridge the RegFlag semantics in share/opto
  406 
  407 reg_def RFLAGS   (SOC, SOC, Op_RegFlags, 6, x6->as_VMReg()        );
  408 
  409 // Specify priority of register selection within phases of register
  410 // allocation.  Highest priority is first.  A useful heuristic is to
  411 // give registers a low priority when they are required by machine
  412 // instructions, like EAX and EDX on I486, and choose no-save registers
  413 // before save-on-call, & save-on-call before save-on-entry.  Registers
  414 // which participate in fixed calling sequences should come last.
  415 // Registers which are used as pairs must fall on an even boundary.
  416 
  417 alloc_class chunk0(
  418     // volatiles
  419     R7,  R7_H,
  420     R28, R28_H,
  421     R29, R29_H,
  422     R30, R30_H,
  423     R31, R31_H,
  424 
  425     // arg registers
  426     R10, R10_H,
  427     R11, R11_H,
  428     R12, R12_H,
  429     R13, R13_H,
  430     R14, R14_H,
  431     R15, R15_H,
  432     R16, R16_H,
  433     R17, R17_H,
  434 
  435     // non-volatiles
  436     R9,  R9_H,
  437     R18, R18_H,
  438     R19, R19_H,
  439     R20, R20_H,
  440     R21, R21_H,
  441     R22, R22_H,
  442     R24, R24_H,
  443     R25, R25_H,
  444     R26, R26_H,
  445 
  446     // non-allocatable registers
  447     R23, R23_H, // java thread
  448     R27, R27_H, // heapbase
  449     R4,  R4_H,  // thread
  450     R8,  R8_H,  // fp
  451     R0,  R0_H,  // zero
  452     R1,  R1_H,  // ra
  453     R2,  R2_H,  // sp
  454     R3,  R3_H,  // gp
  455 );
  456 
  457 alloc_class chunk1(
  458 
  459     // no save
  460     F0,  F0_H,
  461     F1,  F1_H,
  462     F2,  F2_H,
  463     F3,  F3_H,
  464     F4,  F4_H,
  465     F5,  F5_H,
  466     F6,  F6_H,
  467     F7,  F7_H,
  468     F28, F28_H,
  469     F29, F29_H,
  470     F30, F30_H,
  471     F31, F31_H,
  472 
  473     // arg registers
  474     F10, F10_H,
  475     F11, F11_H,
  476     F12, F12_H,
  477     F13, F13_H,
  478     F14, F14_H,
  479     F15, F15_H,
  480     F16, F16_H,
  481     F17, F17_H,
  482 
  483     // non-volatiles
  484     F8,  F8_H,
  485     F9,  F9_H,
  486     F18, F18_H,
  487     F19, F19_H,
  488     F20, F20_H,
  489     F21, F21_H,
  490     F22, F22_H,
  491     F23, F23_H,
  492     F24, F24_H,
  493     F25, F25_H,
  494     F26, F26_H,
  495     F27, F27_H,
  496 );
  497 
  498 alloc_class chunk2(
  499     V0, V0_H, V0_J, V0_K,
  500     V1, V1_H, V1_J, V1_K,
  501     V2, V2_H, V2_J, V2_K,
  502     V3, V3_H, V3_J, V3_K,
  503     V4, V4_H, V4_J, V4_K,
  504     V5, V5_H, V5_J, V5_K,
  505     V6, V6_H, V6_J, V6_K,
  506     V7, V7_H, V7_J, V7_K,
  507     V8, V8_H, V8_J, V8_K,
  508     V9, V9_H, V9_J, V9_K,
  509     V10, V10_H, V10_J, V10_K,
  510     V11, V11_H, V11_J, V11_K,
  511     V12, V12_H, V12_J, V12_K,
  512     V13, V13_H, V13_J, V13_K,
  513     V14, V14_H, V14_J, V14_K,
  514     V15, V15_H, V15_J, V15_K,
  515     V16, V16_H, V16_J, V16_K,
  516     V17, V17_H, V17_J, V17_K,
  517     V18, V18_H, V18_J, V18_K,
  518     V19, V19_H, V19_J, V19_K,
  519     V20, V20_H, V20_J, V20_K,
  520     V21, V21_H, V21_J, V21_K,
  521     V22, V22_H, V22_J, V22_K,
  522     V23, V23_H, V23_J, V23_K,
  523     V24, V24_H, V24_J, V24_K,
  524     V25, V25_H, V25_J, V25_K,
  525     V26, V26_H, V26_J, V26_K,
  526     V27, V27_H, V27_J, V27_K,
  527     V28, V28_H, V28_J, V28_K,
  528     V29, V29_H, V29_J, V29_K,
  529     V30, V30_H, V30_J, V30_K,
  530     V31, V31_H, V31_J, V31_K,
  531 );
  532 
  533 alloc_class chunk3(RFLAGS);
  534 
  535 //----------Architecture Description Register Classes--------------------------
  536 // Several register classes are automatically defined based upon information in
  537 // this architecture description.
  538 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  539 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  540 //
  541 
  542 // Class for all 32 bit general purpose registers
  543 reg_class all_reg32(
  544     R0,
  545     R1,
  546     R2,
  547     R3,
  548     R4,
  549     R7,
  550     R8,
  551     R9,
  552     R10,
  553     R11,
  554     R12,
  555     R13,
  556     R14,
  557     R15,
  558     R16,
  559     R17,
  560     R18,
  561     R19,
  562     R20,
  563     R21,
  564     R22,
  565     R23,
  566     R24,
  567     R25,
  568     R26,
  569     R27,
  570     R28,
  571     R29,
  572     R30,
  573     R31
  574 );
  575 
  576 // Class for any 32 bit integer registers (excluding zr)
  577 reg_class any_reg32 %{
  578   return _ANY_REG32_mask;
  579 %}
  580 
  581 // Singleton class for R10 int register
  582 reg_class int_r10_reg(R10);
  583 
  584 // Singleton class for R12 int register
  585 reg_class int_r12_reg(R12);
  586 
  587 // Singleton class for R13 int register
  588 reg_class int_r13_reg(R13);
  589 
  590 // Singleton class for R14 int register
  591 reg_class int_r14_reg(R14);
  592 
  593 // Class for all long integer registers
  594 reg_class all_reg(
  595     R0,  R0_H,
  596     R1,  R1_H,
  597     R2,  R2_H,
  598     R3,  R3_H,
  599     R4,  R4_H,
  600     R7,  R7_H,
  601     R8,  R8_H,
  602     R9,  R9_H,
  603     R10, R10_H,
  604     R11, R11_H,
  605     R12, R12_H,
  606     R13, R13_H,
  607     R14, R14_H,
  608     R15, R15_H,
  609     R16, R16_H,
  610     R17, R17_H,
  611     R18, R18_H,
  612     R19, R19_H,
  613     R20, R20_H,
  614     R21, R21_H,
  615     R22, R22_H,
  616     R23, R23_H,
  617     R24, R24_H,
  618     R25, R25_H,
  619     R26, R26_H,
  620     R27, R27_H,
  621     R28, R28_H,
  622     R29, R29_H,
  623     R30, R30_H,
  624     R31, R31_H
  625 );
  626 
  627 // Class for all long integer registers (excluding zr)
  628 reg_class any_reg %{
  629   return _ANY_REG_mask;
  630 %}
  631 
  632 // Class for non-allocatable 32 bit registers
  633 reg_class non_allocatable_reg32(
  634     R0,                       // zr
  635     R1,                       // ra
  636     R2,                       // sp
  637     R3,                       // gp
  638     R4,                       // tp
  639     R23                       // java thread
  640 );
  641 
  642 // Class for non-allocatable 64 bit registers
  643 reg_class non_allocatable_reg(
  644     R0,  R0_H,                // zr
  645     R1,  R1_H,                // ra
  646     R2,  R2_H,                // sp
  647     R3,  R3_H,                // gp
  648     R4,  R4_H,                // tp
  649     R23, R23_H                // java thread
  650 );
  651 
  652 // Class for all non-special integer registers
  653 reg_class no_special_reg32 %{
  654   return _NO_SPECIAL_REG32_mask;
  655 %}
  656 
  657 // Class for all non-special long integer registers
  658 reg_class no_special_reg %{
  659   return _NO_SPECIAL_REG_mask;
  660 %}
  661 
  662 reg_class ptr_reg %{
  663   return _PTR_REG_mask;
  664 %}
  665 
  666 // Class for all non_special pointer registers
  667 reg_class no_special_ptr_reg %{
  668   return _NO_SPECIAL_PTR_REG_mask;
  669 %}
  670 
  671 // Class for all non_special pointer registers (excluding fp)
  672 reg_class no_special_no_fp_ptr_reg %{
  673   return _NO_SPECIAL_NO_FP_PTR_REG_mask;
  674 %}
  675 
  676 // Class for 64 bit register r10
  677 reg_class r10_reg(
  678     R10, R10_H
  679 );
  680 
  681 // Class for 64 bit register r11
  682 reg_class r11_reg(
  683     R11, R11_H
  684 );
  685 
  686 // Class for 64 bit register r12
  687 reg_class r12_reg(
  688     R12, R12_H
  689 );
  690 
  691 // Class for 64 bit register r13
  692 reg_class r13_reg(
  693     R13, R13_H
  694 );
  695 
  696 // Class for 64 bit register r14
  697 reg_class r14_reg(
  698     R14, R14_H
  699 );
  700 
  701 // Class for 64 bit register r15
  702 reg_class r15_reg(
  703     R15, R15_H
  704 );
  705 
  706 // Class for 64 bit register r16
  707 reg_class r16_reg(
  708     R16, R16_H
  709 );
  710 
  711 // Class for method register
  712 reg_class method_reg(
  713     R31, R31_H
  714 );
  715 
  716 // Class for java thread register
  717 reg_class java_thread_reg(
  718     R23, R23_H
  719 );
  720 
  721 reg_class r28_reg(
  722     R28, R28_H
  723 );
  724 
  725 reg_class r29_reg(
  726     R29, R29_H
  727 );
  728 
  729 reg_class r30_reg(
  730     R30, R30_H
  731 );
  732 
  733 reg_class r31_reg(
  734     R31, R31_H
  735 );
  736 
  737 // Class for zero registesr
  738 reg_class zr_reg(
  739     R0, R0_H
  740 );
  741 
  742 // Class for thread register
  743 reg_class thread_reg(
  744     R4, R4_H
  745 );
  746 
  747 // Class for frame pointer register
  748 reg_class fp_reg(
  749     R8, R8_H
  750 );
  751 
  752 // Class for link register
  753 reg_class ra_reg(
  754     R1, R1_H
  755 );
  756 
  757 // Class for long sp register
  758 reg_class sp_reg(
  759     R2, R2_H
  760 );
  761 
  762 // Class for all float registers
  763 reg_class float_reg(
  764     F0,
  765     F1,
  766     F2,
  767     F3,
  768     F4,
  769     F5,
  770     F6,
  771     F7,
  772     F8,
  773     F9,
  774     F10,
  775     F11,
  776     F12,
  777     F13,
  778     F14,
  779     F15,
  780     F16,
  781     F17,
  782     F18,
  783     F19,
  784     F20,
  785     F21,
  786     F22,
  787     F23,
  788     F24,
  789     F25,
  790     F26,
  791     F27,
  792     F28,
  793     F29,
  794     F30,
  795     F31
  796 );
  797 
  798 // Double precision float registers have virtual `high halves' that
  799 // are needed by the allocator.
  800 // Class for all double registers
  801 reg_class double_reg(
  802     F0,  F0_H,
  803     F1,  F1_H,
  804     F2,  F2_H,
  805     F3,  F3_H,
  806     F4,  F4_H,
  807     F5,  F5_H,
  808     F6,  F6_H,
  809     F7,  F7_H,
  810     F8,  F8_H,
  811     F9,  F9_H,
  812     F10, F10_H,
  813     F11, F11_H,
  814     F12, F12_H,
  815     F13, F13_H,
  816     F14, F14_H,
  817     F15, F15_H,
  818     F16, F16_H,
  819     F17, F17_H,
  820     F18, F18_H,
  821     F19, F19_H,
  822     F20, F20_H,
  823     F21, F21_H,
  824     F22, F22_H,
  825     F23, F23_H,
  826     F24, F24_H,
  827     F25, F25_H,
  828     F26, F26_H,
  829     F27, F27_H,
  830     F28, F28_H,
  831     F29, F29_H,
  832     F30, F30_H,
  833     F31, F31_H
  834 );
  835 
  836 // Class for RVV vector registers
  837 // Note: v0, v30 and v31 are used as mask registers.
  838 reg_class vectora_reg(
  839     V1, V1_H, V1_J, V1_K,
  840     V2, V2_H, V2_J, V2_K,
  841     V3, V3_H, V3_J, V3_K,
  842     V4, V4_H, V4_J, V4_K,
  843     V5, V5_H, V5_J, V5_K,
  844     V6, V6_H, V6_J, V6_K,
  845     V7, V7_H, V7_J, V7_K,
  846     V8, V8_H, V8_J, V8_K,
  847     V9, V9_H, V9_J, V9_K,
  848     V10, V10_H, V10_J, V10_K,
  849     V11, V11_H, V11_J, V11_K,
  850     V12, V12_H, V12_J, V12_K,
  851     V13, V13_H, V13_J, V13_K,
  852     V14, V14_H, V14_J, V14_K,
  853     V15, V15_H, V15_J, V15_K,
  854     V16, V16_H, V16_J, V16_K,
  855     V17, V17_H, V17_J, V17_K,
  856     V18, V18_H, V18_J, V18_K,
  857     V19, V19_H, V19_J, V19_K,
  858     V20, V20_H, V20_J, V20_K,
  859     V21, V21_H, V21_J, V21_K,
  860     V22, V22_H, V22_J, V22_K,
  861     V23, V23_H, V23_J, V23_K,
  862     V24, V24_H, V24_J, V24_K,
  863     V25, V25_H, V25_J, V25_K,
  864     V26, V26_H, V26_J, V26_K,
  865     V27, V27_H, V27_J, V27_K,
  866     V28, V28_H, V28_J, V28_K,
  867     V29, V29_H, V29_J, V29_K
  868 );
  869 
  870 // Class for 64 bit register f0
  871 reg_class f0_reg(
  872     F0, F0_H
  873 );
  874 
  875 // Class for 64 bit register f1
  876 reg_class f1_reg(
  877     F1, F1_H
  878 );
  879 
  880 // Class for 64 bit register f2
  881 reg_class f2_reg(
  882     F2, F2_H
  883 );
  884 
  885 // Class for 64 bit register f3
  886 reg_class f3_reg(
  887     F3, F3_H
  888 );
  889 
  890 // class for vector register v1
  891 reg_class v1_reg(
  892     V1, V1_H, V1_J, V1_K
  893 );
  894 
  895 // class for vector register v2
  896 reg_class v2_reg(
  897     V2, V2_H, V2_J, V2_K
  898 );
  899 
  900 // class for vector register v3
  901 reg_class v3_reg(
  902     V3, V3_H, V3_J, V3_K
  903 );
  904 
  905 // class for vector register v4
  906 reg_class v4_reg(
  907     V4, V4_H, V4_J, V4_K
  908 );
  909 
  910 // class for vector register v5
  911 reg_class v5_reg(
  912     V5, V5_H, V5_J, V5_K
  913 );
  914 
  915 // class for vector register v6
  916 reg_class v6_reg(
  917     V6, V6_H, V6_J, V6_K
  918 );
  919 
  920 // class for vector register v7
  921 reg_class v7_reg(
  922     V7, V7_H, V7_J, V7_K
  923 );
  924 
  925 // class for vector register v8
  926 reg_class v8_reg(
  927     V8, V8_H, V8_J, V8_K
  928 );
  929 
  930 // class for vector register v9
  931 reg_class v9_reg(
  932     V9, V9_H, V9_J, V9_K
  933 );
  934 
  935 // class for vector register v10
  936 reg_class v10_reg(
  937     V10, V10_H, V10_J, V10_K
  938 );
  939 
  940 // class for vector register v11
  941 reg_class v11_reg(
  942     V11, V11_H, V11_J, V11_K
  943 );
  944 
  945 // class for condition codes
  946 reg_class reg_flags(RFLAGS);
  947 
  948 // Class for RVV v0 mask register
  949 // https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc#53-vector-masking
  950 // The mask value used to control execution of a masked vector
  951 // instruction is always supplied by vector register v0.
  952 reg_class vmask_reg_v0 (
  953     V0
  954 );
  955 
  956 // Class for RVV mask registers
  957 // We need two more vmask registers to do the vector mask logical ops,
  958 // so define v30, v31 as mask register too.
  959 reg_class vmask_reg (
  960     V0,
  961     V30,
  962     V31
  963 );
  964 %}
  965 
  966 //----------DEFINITION BLOCK---------------------------------------------------
  967 // Define name --> value mappings to inform the ADLC of an integer valued name
  968 // Current support includes integer values in the range [0, 0x7FFFFFFF]
  969 // Format:
  970 //        int_def  <name>         ( <int_value>, <expression>);
  971 // Generated Code in ad_<arch>.hpp
  972 //        #define  <name>   (<expression>)
  973 //        // value == <int_value>
  974 // Generated code in ad_<arch>.cpp adlc_verification()
  975 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
  976 //
  977 
  978 // we follow the ppc-aix port in using a simple cost model which ranks
  979 // register operations as cheap, memory ops as more expensive and
  980 // branches as most expensive. the first two have a low as well as a
  981 // normal cost. huge cost appears to be a way of saying don't do
  982 // something
  983 
  984 definitions %{
  985   // The default cost (of a register move instruction).
  986   int_def DEFAULT_COST         (  100,               100);
  987   int_def ALU_COST             (  100,  1 * DEFAULT_COST);          // unknown, const, arith, shift, slt,
  988                                                                     // multi, auipc, nop, logical, move
  989   int_def LOAD_COST            (  300,  3 * DEFAULT_COST);          // load, fpload
  990   int_def STORE_COST           (  100,  1 * DEFAULT_COST);          // store, fpstore
  991   int_def XFER_COST            (  300,  3 * DEFAULT_COST);          // mfc, mtc, fcvt, fmove, fcmp
  992   int_def FMVX_COST            (  100,  1 * DEFAULT_COST);          // shuffles with no conversion
  993   int_def BRANCH_COST          (  200,  2 * DEFAULT_COST);          // branch, jmp, call
  994   int_def IMUL_COST            ( 1000, 10 * DEFAULT_COST);          // imul
  995   int_def IDIVSI_COST          ( 3400, 34 * DEFAULT_COST);          // idivsi
  996   int_def IDIVDI_COST          ( 6600, 66 * DEFAULT_COST);          // idivdi
  997   int_def FMUL_SINGLE_COST     (  500,  5 * DEFAULT_COST);          // fmul, fmadd
  998   int_def FMUL_DOUBLE_COST     (  700,  7 * DEFAULT_COST);          // fmul, fmadd
  999   int_def FDIV_COST            ( 2000, 20 * DEFAULT_COST);          // fdiv
 1000   int_def FSQRT_COST           ( 2500, 25 * DEFAULT_COST);          // fsqrt
 1001   int_def VOLATILE_REF_COST    ( 1000, 10 * DEFAULT_COST);
 1002   int_def CACHE_MISS_COST      ( 2000, 20 * DEFAULT_COST);          // typicall cache miss penalty
 1003 %}
 1004 
 1005 
 1006 
 1007 //----------SOURCE BLOCK-------------------------------------------------------
 1008 // This is a block of C++ code which provides values, functions, and
 1009 // definitions necessary in the rest of the architecture description
 1010 
 1011 source_hpp %{
 1012 
 1013 #include "asm/macroAssembler.hpp"
 1014 #include "gc/shared/barrierSetAssembler.hpp"
 1015 #include "gc/shared/cardTable.hpp"
 1016 #include "gc/shared/cardTableBarrierSet.hpp"
 1017 #include "gc/shared/collectedHeap.hpp"
 1018 #include "opto/addnode.hpp"
 1019 #include "opto/convertnode.hpp"
 1020 #include "runtime/objectMonitor.hpp"
 1021 
 1022 extern RegMask _ANY_REG32_mask;
 1023 extern RegMask _ANY_REG_mask;
 1024 extern RegMask _PTR_REG_mask;
 1025 extern RegMask _NO_SPECIAL_REG32_mask;
 1026 extern RegMask _NO_SPECIAL_REG_mask;
 1027 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1028 extern RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
 1029 
 1030 class CallStubImpl {
 1031 
 1032   //--------------------------------------------------------------
 1033   //---<  Used for optimization in Compile::shorten_branches  >---
 1034   //--------------------------------------------------------------
 1035 
 1036  public:
 1037   // Size of call trampoline stub.
 1038   static uint size_call_trampoline() {
 1039     return 0; // no call trampolines on this platform
 1040   }
 1041 
 1042   // number of relocations needed by a call trampoline stub
 1043   static uint reloc_call_trampoline() {
 1044     return 0; // no call trampolines on this platform
 1045   }
 1046 };
 1047 
 1048 class HandlerImpl {
 1049 
 1050  public:
 1051 
 1052   static int emit_exception_handler(C2_MacroAssembler *masm);
 1053   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1054 
 1055   static uint size_exception_handler() {
 1056     return MacroAssembler::far_branch_size();
 1057   }
 1058 
 1059   static uint size_deopt_handler() {
 1060     // count auipc + far branch
 1061     return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
 1062   }
 1063 };
 1064 
 1065 class Node::PD {
 1066 public:
 1067   enum NodeFlags {
 1068     _last_flag = Node::_last_flag
 1069   };
 1070 };
 1071 
 1072 bool is_CAS(int opcode, bool maybe_volatile);
 1073 
 1074 // predicate controlling translation of CompareAndSwapX
 1075 bool needs_acquiring_load_reserved(const Node *load);
 1076 
 1077 // predicate controlling addressing modes
 1078 bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1079 %}
 1080 
 1081 source %{
 1082 
 1083 // Derived RegMask with conditionally allocatable registers
 1084 
 1085 RegMask _ANY_REG32_mask;
 1086 RegMask _ANY_REG_mask;
 1087 RegMask _PTR_REG_mask;
 1088 RegMask _NO_SPECIAL_REG32_mask;
 1089 RegMask _NO_SPECIAL_REG_mask;
 1090 RegMask _NO_SPECIAL_PTR_REG_mask;
 1091 RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
 1092 
 1093 void reg_mask_init() {
 1094 
 1095   _ANY_REG32_mask = _ALL_REG32_mask;
 1096   _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(x0->as_VMReg()));
 1097 
 1098   _ANY_REG_mask = _ALL_REG_mask;
 1099   _ANY_REG_mask.SUBTRACT(_ZR_REG_mask);
 1100 
 1101   _PTR_REG_mask = _ALL_REG_mask;
 1102   _PTR_REG_mask.SUBTRACT(_ZR_REG_mask);
 1103 
 1104   _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1105   _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1106 
 1107   _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1108   _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1109 
 1110   _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1111   _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1112 
 1113   // x27 is not allocatable when compressed oops is on
 1114   if (UseCompressedOops) {
 1115     _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1116     _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1117     _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1118   }
 1119 
 1120   // x8 is not allocatable when PreserveFramePointer is on
 1121   if (PreserveFramePointer) {
 1122     _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1123     _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1124     _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1125   }
 1126 
 1127   _NO_SPECIAL_NO_FP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1128   _NO_SPECIAL_NO_FP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1129 }
 1130 
 1131 void PhaseOutput::pd_perform_mach_node_analysis() {
 1132 }
 1133 
 1134 int MachNode::pd_alignment_required() const {
 1135   return 1;
 1136 }
 1137 
 1138 int MachNode::compute_padding(int current_offset) const {
 1139   return 0;
 1140 }
 1141 
 1142 // is_CAS(int opcode, bool maybe_volatile)
 1143 //
 1144 // return true if opcode is one of the possible CompareAndSwapX
 1145 // values otherwise false.
 1146 bool is_CAS(int opcode, bool maybe_volatile)
 1147 {
 1148   switch (opcode) {
 1149     // We handle these
 1150     case Op_CompareAndSwapI:
 1151     case Op_CompareAndSwapL:
 1152     case Op_CompareAndSwapP:
 1153     case Op_CompareAndSwapN:
 1154     case Op_ShenandoahCompareAndSwapP:
 1155     case Op_ShenandoahCompareAndSwapN:
 1156     case Op_CompareAndSwapB:
 1157     case Op_CompareAndSwapS:
 1158     case Op_GetAndSetI:
 1159     case Op_GetAndSetL:
 1160     case Op_GetAndSetP:
 1161     case Op_GetAndSetN:
 1162     case Op_GetAndAddI:
 1163     case Op_GetAndAddL:
 1164       return true;
 1165     case Op_CompareAndExchangeI:
 1166     case Op_CompareAndExchangeN:
 1167     case Op_CompareAndExchangeB:
 1168     case Op_CompareAndExchangeS:
 1169     case Op_CompareAndExchangeL:
 1170     case Op_CompareAndExchangeP:
 1171     case Op_WeakCompareAndSwapB:
 1172     case Op_WeakCompareAndSwapS:
 1173     case Op_WeakCompareAndSwapI:
 1174     case Op_WeakCompareAndSwapL:
 1175     case Op_WeakCompareAndSwapP:
 1176     case Op_WeakCompareAndSwapN:
 1177     case Op_ShenandoahWeakCompareAndSwapP:
 1178     case Op_ShenandoahWeakCompareAndSwapN:
 1179     case Op_ShenandoahCompareAndExchangeP:
 1180     case Op_ShenandoahCompareAndExchangeN:
 1181       return maybe_volatile;
 1182     default:
 1183       return false;
 1184   }
 1185 }
 1186 
 1187 // predicate controlling translation of CAS
 1188 //
 1189 // returns true if CAS needs to use an acquiring load otherwise false
 1190 bool needs_acquiring_load_reserved(const Node *n)
 1191 {
 1192   assert(n != nullptr && is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1193 
 1194   LoadStoreNode* ldst = n->as_LoadStore();
 1195   if (n != nullptr && is_CAS(n->Opcode(), false)) {
 1196     assert(ldst != nullptr && ldst->trailing_membar() != nullptr, "expected trailing membar");
 1197   } else {
 1198     return ldst != nullptr && ldst->trailing_membar() != nullptr;
 1199   }
 1200   // so we can just return true here
 1201   return true;
 1202 }
 1203 #define __ masm->
 1204 
 1205 // advance declarations for helper functions to convert register
 1206 // indices to register objects
 1207 
 1208 // the ad file has to provide implementations of certain methods
 1209 // expected by the generic code
 1210 //
 1211 // REQUIRED FUNCTIONALITY
 1212 
 1213 //=============================================================================
 1214 
 1215 // !!!!! Special hack to get all types of calls to specify the byte offset
 1216 //       from the start of the call to the point where the return address
 1217 //       will point.
 1218 
 1219 int MachCallStaticJavaNode::ret_addr_offset()
 1220 {
 1221   return 3 * NativeInstruction::instruction_size; // auipc + ld + jalr
 1222 }
 1223 
 1224 int MachCallDynamicJavaNode::ret_addr_offset()
 1225 {
 1226   return NativeMovConstReg::movptr2_instruction_size + (3 * NativeInstruction::instruction_size); // movptr2, auipc + ld + jal
 1227 }
 1228 
 1229 int MachCallRuntimeNode::ret_addr_offset() {
 1230   // For address inside the code cache the call will be:
 1231   //   auipc + jalr
 1232   // For real runtime callouts it will be 8 instructions
 1233   // see riscv_enc_java_to_runtime
 1234   //   la(t0, retaddr)                                             ->  auipc + addi
 1235   //   sd(t0, Address(xthread, JavaThread::last_Java_pc_offset())) ->  sd
 1236   //   movptr(t1, addr, offset, t0)                                ->  lui + lui + slli + add
 1237   //   jalr(t1, offset)                                            ->  jalr
 1238   if (CodeCache::contains(_entry_point)) {
 1239     return 2 * NativeInstruction::instruction_size;
 1240   } else {
 1241     return 8 * NativeInstruction::instruction_size;
 1242   }
 1243 }
 1244 
 1245 //
 1246 // Compute padding required for nodes which need alignment
 1247 //
 1248 
 1249 // With RVC a call instruction may get 2-byte aligned.
 1250 // The address of the call instruction needs to be 4-byte aligned to
 1251 // ensure that it does not span a cache line so that it can be patched.
 1252 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
 1253 {
 1254   // to make sure the address of jal 4-byte aligned.
 1255   return align_up(current_offset, alignment_required()) - current_offset;
 1256 }
 1257 
 1258 // With RVC a call instruction may get 2-byte aligned.
 1259 // The address of the call instruction needs to be 4-byte aligned to
 1260 // ensure that it does not span a cache line so that it can be patched.
 1261 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
 1262 {
 1263   // skip the movptr2 in MacroAssembler::ic_call():
 1264   // lui, lui, slli, add, addi
 1265   // Though movptr2() has already 4-byte aligned with or without RVC,
 1266   // We need to prevent from further changes by explicitly calculating the size.
 1267   current_offset += NativeMovConstReg::movptr2_instruction_size;
 1268   // to make sure the address of jal 4-byte aligned.
 1269   return align_up(current_offset, alignment_required()) - current_offset;
 1270 }
 1271 
 1272 //=============================================================================
 1273 
 1274 #ifndef PRODUCT
 1275 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1276   assert_cond(st != nullptr);
 1277   st->print("BREAKPOINT");
 1278 }
 1279 #endif
 1280 
 1281 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1282   __ ebreak();
 1283 }
 1284 
 1285 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1286   return MachNode::size(ra_);
 1287 }
 1288 
 1289 //=============================================================================
 1290 
 1291 #ifndef PRODUCT
 1292   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1293     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1294   }
 1295 #endif
 1296 
 1297   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1298     Assembler::CompressibleRegion cr(masm); // nops shall be 2-byte under RVC for alignment purposes.
 1299     for (int i = 0; i < _count; i++) {
 1300       __ nop();
 1301     }
 1302   }
 1303 
 1304   uint MachNopNode::size(PhaseRegAlloc*) const {
 1305     return _count * (UseRVC ? NativeInstruction::compressed_instruction_size : NativeInstruction::instruction_size);
 1306   }
 1307 
 1308 //=============================================================================
 1309 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1310 
 1311 int ConstantTable::calculate_table_base_offset() const {
 1312   return 0;  // absolute addressing, no offset
 1313 }
 1314 
 1315 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1316 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1317   ShouldNotReachHere();
 1318 }
 1319 
 1320 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1321   // Empty encoding
 1322 }
 1323 
 1324 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1325   return 0;
 1326 }
 1327 
 1328 #ifndef PRODUCT
 1329 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1330   assert_cond(st != nullptr);
 1331   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1332 }
 1333 #endif
 1334 
 1335 #ifndef PRODUCT
 1336 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1337   assert_cond(st != nullptr && ra_ != nullptr);
 1338   Compile* C = ra_->C;
 1339 
 1340   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1341 
 1342   if (C->output()->need_stack_bang(framesize)) {
 1343     st->print("# stack bang size=%d\n\t", framesize);
 1344   }
 1345 
 1346   st->print("sd  fp, [sp, #%d]\n\t", - 2 * wordSize);
 1347   st->print("sd  ra, [sp, #%d]\n\t", - wordSize);
 1348   if (PreserveFramePointer) { st->print("sub  fp, sp, #%d\n\t", 2 * wordSize); }
 1349   st->print("sub sp, sp, #%d\n\t", framesize);
 1350 
 1351   if (C->stub_function() == nullptr && BarrierSet::barrier_set()->barrier_set_nmethod() != nullptr) {
 1352     st->print("ld  t0, [guard]\n\t");
 1353     st->print("membar LoadLoad\n\t");
 1354     st->print("ld  t1, [xthread, #thread_disarmed_guard_value_offset]\n\t");
 1355     st->print("beq t0, t1, skip\n\t");
 1356     st->print("jalr #nmethod_entry_barrier_stub\n\t");
 1357     st->print("j skip\n\t");
 1358     st->print("guard: int\n\t");
 1359     st->print("skip:\n\t");
 1360   }
 1361 }
 1362 #endif
 1363 
 1364 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1365   assert_cond(ra_ != nullptr);
 1366   Compile* C = ra_->C;
 1367 
 1368   // n.b. frame size includes space for return pc and fp
 1369   const int framesize = C->output()->frame_size_in_bytes();
 1370 
 1371   // insert a nop at the start of the prolog so we can patch in a
 1372   // branch if we need to invalidate the method later
 1373   {
 1374     Assembler::IncompressibleRegion ir(masm);  // keep the nop as 4 bytes for patching.
 1375     MacroAssembler::assert_alignment(__ pc());
 1376     __ nop();  // 4 bytes
 1377   }
 1378 
 1379   assert_cond(C != nullptr);
 1380 
 1381   if (C->clinit_barrier_on_entry()) {
 1382     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1383 
 1384     Label L_skip_barrier;
 1385 
 1386     __ mov_metadata(t1, C->method()->holder()->constant_encoding());
 1387     __ clinit_barrier(t1, t0, &L_skip_barrier);
 1388     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1389     __ bind(L_skip_barrier);
 1390   }
 1391 
 1392   int bangsize = C->output()->bang_size_in_bytes();
 1393   if (C->output()->need_stack_bang(bangsize)) {
 1394     __ generate_stack_overflow_check(bangsize);
 1395   }
 1396 
 1397   __ build_frame(framesize);
 1398 
 1399   if (C->stub_function() == nullptr) {
 1400     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1401     if (BarrierSet::barrier_set()->barrier_set_nmethod() != nullptr) {
 1402       // Dummy labels for just measuring the code size
 1403       Label dummy_slow_path;
 1404       Label dummy_continuation;
 1405       Label dummy_guard;
 1406       Label* slow_path = &dummy_slow_path;
 1407       Label* continuation = &dummy_continuation;
 1408       Label* guard = &dummy_guard;
 1409       if (!Compile::current()->output()->in_scratch_emit_size()) {
 1410         // Use real labels from actual stub when not emitting code for purpose of measuring its size
 1411         C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1412         Compile::current()->output()->add_stub(stub);
 1413         slow_path = &stub->entry();
 1414         continuation = &stub->continuation();
 1415         guard = &stub->guard();
 1416       }
 1417       // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1418       bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1419     }
 1420   }
 1421 
 1422   if (VerifyStackAtCalls) {
 1423     Unimplemented();
 1424   }
 1425 
 1426   C->output()->set_frame_complete(__ offset());
 1427 
 1428   if (C->has_mach_constant_base_node()) {
 1429     // NOTE: We set the table base offset here because users might be
 1430     // emitted before MachConstantBaseNode.
 1431     ConstantTable& constant_table = C->output()->constant_table();
 1432     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1433   }
 1434 }
 1435 
 1436 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1437 {
 1438   assert_cond(ra_ != nullptr);
 1439   return MachNode::size(ra_); // too many variables; just compute it
 1440                               // the hard way
 1441 }
 1442 
 1443 int MachPrologNode::reloc() const
 1444 {
 1445   return 0;
 1446 }
 1447 
 1448 //=============================================================================
 1449 
 1450 #ifndef PRODUCT
 1451 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1452   assert_cond(st != nullptr && ra_ != nullptr);
 1453   Compile* C = ra_->C;
 1454   assert_cond(C != nullptr);
 1455   int framesize = C->output()->frame_size_in_bytes();
 1456 
 1457   st->print("# pop frame %d\n\t", framesize);
 1458 
 1459   if (framesize == 0) {
 1460     st->print("ld  ra, [sp,#%d]\n\t", (2 * wordSize));
 1461     st->print("ld  fp, [sp,#%d]\n\t", (3 * wordSize));
 1462     st->print("add sp, sp, #%d\n\t", (2 * wordSize));
 1463   } else {
 1464     st->print("add  sp, sp, #%d\n\t", framesize);
 1465     st->print("ld  ra, [sp,#%d]\n\t", - 2 * wordSize);
 1466     st->print("ld  fp, [sp,#%d]\n\t", - wordSize);
 1467   }
 1468 
 1469   if (do_polling() && C->is_method_compilation()) {
 1470     st->print("# test polling word\n\t");
 1471     st->print("ld t0, [xthread,#%d]\n\t", in_bytes(JavaThread::polling_word_offset()));
 1472     st->print("bgtu sp, t0, #slow_path");
 1473   }
 1474 }
 1475 #endif
 1476 
 1477 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1478   assert_cond(ra_ != nullptr);
 1479   Compile* C = ra_->C;
 1480   assert_cond(C != nullptr);
 1481   int framesize = C->output()->frame_size_in_bytes();
 1482 
 1483   __ remove_frame(framesize);
 1484 
 1485   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1486     __ reserved_stack_check();
 1487   }
 1488 
 1489   if (do_polling() && C->is_method_compilation()) {
 1490     Label dummy_label;
 1491     Label* code_stub = &dummy_label;
 1492     if (!C->output()->in_scratch_emit_size()) {
 1493       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1494       C->output()->add_stub(stub);
 1495       code_stub = &stub->entry();
 1496     }
 1497     __ relocate(relocInfo::poll_return_type);
 1498     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1499   }
 1500 }
 1501 
 1502 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1503   assert_cond(ra_ != nullptr);
 1504   // Variable size. Determine dynamically.
 1505   return MachNode::size(ra_);
 1506 }
 1507 
 1508 int MachEpilogNode::reloc() const {
 1509   // Return number of relocatable values contained in this instruction.
 1510   return 1; // 1 for polling page.
 1511 }
 1512 const Pipeline * MachEpilogNode::pipeline() const {
 1513   return MachNode::pipeline_class();
 1514 }
 1515 
 1516 //=============================================================================
 1517 
 1518 // Figure out which register class each belongs in: rc_int, rc_float or
 1519 // rc_stack.
 1520 enum RC { rc_bad, rc_int, rc_float, rc_vector, rc_stack };
 1521 
 1522 static enum RC rc_class(OptoReg::Name reg) {
 1523 
 1524   if (reg == OptoReg::Bad) {
 1525     return rc_bad;
 1526   }
 1527 
 1528   // we have 30 int registers * 2 halves
 1529   // (t0 and t1 are omitted)
 1530   int slots_of_int_registers = Register::max_slots_per_register * (Register::number_of_registers - 2);
 1531   if (reg < slots_of_int_registers) {
 1532     return rc_int;
 1533   }
 1534 
 1535   // we have 32 float register * 2 halves
 1536   int slots_of_float_registers = FloatRegister::max_slots_per_register * FloatRegister::number_of_registers;
 1537   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1538     return rc_float;
 1539   }
 1540 
 1541   // we have 32 vector register * 4 halves
 1542   int slots_of_vector_registers = VectorRegister::max_slots_per_register * VectorRegister::number_of_registers;
 1543   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_vector_registers) {
 1544     return rc_vector;
 1545   }
 1546 
 1547   // Between vector regs & stack is the flags regs.
 1548   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1549 
 1550   return rc_stack;
 1551 }
 1552 
 1553 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1554   assert_cond(ra_ != nullptr);
 1555   Compile* C = ra_->C;
 1556 
 1557   // Get registers to move.
 1558   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1559   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1560   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1561   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1562 
 1563   enum RC src_hi_rc = rc_class(src_hi);
 1564   enum RC src_lo_rc = rc_class(src_lo);
 1565   enum RC dst_hi_rc = rc_class(dst_hi);
 1566   enum RC dst_lo_rc = rc_class(dst_lo);
 1567 
 1568   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1569 
 1570   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1571     assert((src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1572            (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi,
 1573            "expected aligned-adjacent pairs");
 1574   }
 1575 
 1576   if (src_lo == dst_lo && src_hi == dst_hi) {
 1577     return 0;            // Self copy, no move.
 1578   }
 1579 
 1580   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1581               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1582   int src_offset = ra_->reg2offset(src_lo);
 1583   int dst_offset = ra_->reg2offset(dst_lo);
 1584 
 1585   if (bottom_type()->isa_vect() != nullptr) {
 1586     uint ireg = ideal_reg();
 1587     if (ireg == Op_VecA && masm) {
 1588       int vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1589       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1590         // stack to stack
 1591         __ spill_copy_vector_stack_to_stack(src_offset, dst_offset,
 1592                                             vector_reg_size_in_bytes);
 1593       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
 1594         // vpr to stack
 1595         __ spill(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
 1596       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
 1597         // stack to vpr
 1598         __ unspill(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
 1599       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
 1600         // vpr to vpr
 1601         __ vmv1r_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
 1602       } else {
 1603         ShouldNotReachHere();
 1604       }
 1605     } else if (bottom_type()->isa_vectmask() && masm) {
 1606       int vmask_size_in_bytes = Matcher::scalable_predicate_reg_slots() * 32 / 8;
 1607       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1608         // stack to stack
 1609         __ spill_copy_vmask_stack_to_stack(src_offset, dst_offset,
 1610                                            vmask_size_in_bytes);
 1611       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
 1612         // vmask to stack
 1613         __ spill_vmask(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
 1614       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
 1615         // stack to vmask
 1616         __ unspill_vmask(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
 1617       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
 1618         // vmask to vmask
 1619         __ vmv1r_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
 1620       } else {
 1621         ShouldNotReachHere();
 1622       }
 1623     }
 1624   } else if (masm != nullptr) {
 1625     switch (src_lo_rc) {
 1626       case rc_int:
 1627         if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 1628           if (!is64 && this->ideal_reg() != Op_RegI) { // zero extended for narrow oop or klass
 1629             __ zero_extend(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]), 32);
 1630           } else {
 1631             __ mv(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]));
 1632           }
 1633         } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 1634           if (is64) {
 1635             __ fmv_d_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1636                        as_Register(Matcher::_regEncode[src_lo]));
 1637           } else {
 1638             __ fmv_w_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1639                        as_Register(Matcher::_regEncode[src_lo]));
 1640           }
 1641         } else {                    // gpr --> stack spill
 1642           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1643           __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 1644         }
 1645         break;
 1646       case rc_float:
 1647         if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 1648           if (is64) {
 1649             __ fmv_x_d(as_Register(Matcher::_regEncode[dst_lo]),
 1650                        as_FloatRegister(Matcher::_regEncode[src_lo]));
 1651           } else {
 1652             __ fmv_x_w(as_Register(Matcher::_regEncode[dst_lo]),
 1653                        as_FloatRegister(Matcher::_regEncode[src_lo]));
 1654           }
 1655         } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 1656           if (is64) {
 1657             __ fmv_d(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1658                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1659           } else {
 1660             __ fmv_s(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1661                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1662           }
 1663         } else {                    // fpr --> stack spill
 1664           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1665           __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 1666                    is64, dst_offset);
 1667         }
 1668         break;
 1669       case rc_stack:
 1670         if (dst_lo_rc == rc_int) {  // stack --> gpr load
 1671           if (this->ideal_reg() == Op_RegI) {
 1672             __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 1673           } else { // // zero extended for narrow oop or klass
 1674             __ unspillu(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 1675           }
 1676         } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 1677           __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1678                      is64, src_offset);
 1679         } else {                    // stack --> stack copy
 1680           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1681           if (this->ideal_reg() == Op_RegI) {
 1682             __ unspill(t0, is64, src_offset);
 1683           } else { // zero extended for narrow oop or klass
 1684             __ unspillu(t0, is64, src_offset);
 1685           }
 1686           __ spill(t0, is64, dst_offset);
 1687         }
 1688         break;
 1689       default:
 1690         ShouldNotReachHere();
 1691     }
 1692   }
 1693 
 1694   if (st != nullptr) {
 1695     st->print("spill ");
 1696     if (src_lo_rc == rc_stack) {
 1697       st->print("[sp, #%d] -> ", src_offset);
 1698     } else {
 1699       st->print("%s -> ", Matcher::regName[src_lo]);
 1700     }
 1701     if (dst_lo_rc == rc_stack) {
 1702       st->print("[sp, #%d]", dst_offset);
 1703     } else {
 1704       st->print("%s", Matcher::regName[dst_lo]);
 1705     }
 1706     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1707       int vsize = 0;
 1708       if (ideal_reg() == Op_VecA) {
 1709         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 1710       } else {
 1711         ShouldNotReachHere();
 1712       }
 1713       st->print("\t# vector spill size = %d", vsize);
 1714     } else if (ideal_reg() == Op_RegVectMask) {
 1715       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 1716       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 1717       st->print("\t# vmask spill size = %d", vsize);
 1718     } else {
 1719       st->print("\t# spill size = %d", is64 ? 64 : 32);
 1720     }
 1721   }
 1722 
 1723   return 0;
 1724 }
 1725 
 1726 #ifndef PRODUCT
 1727 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1728   if (ra_ == nullptr) {
 1729     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 1730   } else {
 1731     implementation(nullptr, ra_, false, st);
 1732   }
 1733 }
 1734 #endif
 1735 
 1736 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1737   implementation(masm, ra_, false, nullptr);
 1738 }
 1739 
 1740 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 1741   return MachNode::size(ra_);
 1742 }
 1743 
 1744 //=============================================================================
 1745 
 1746 #ifndef PRODUCT
 1747 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1748   assert_cond(ra_ != nullptr && st != nullptr);
 1749   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1750   int reg = ra_->get_reg_first(this);
 1751   st->print("add %s, sp, #%d\t# box lock",
 1752             Matcher::regName[reg], offset);
 1753 }
 1754 #endif
 1755 
 1756 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1757   Assembler::IncompressibleRegion ir(masm);  // Fixed length: see BoxLockNode::size()
 1758 
 1759   assert_cond(ra_ != nullptr);
 1760   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1761   int reg    = ra_->get_encode(this);
 1762 
 1763   if (Assembler::is_simm12(offset)) {
 1764     __ addi(as_Register(reg), sp, offset);
 1765   } else {
 1766     __ li32(t0, offset);
 1767     __ add(as_Register(reg), sp, t0);
 1768   }
 1769 }
 1770 
 1771 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 1772   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 1773   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1774 
 1775   if (Assembler::is_simm12(offset)) {
 1776     return NativeInstruction::instruction_size;
 1777   } else {
 1778     return 3 * NativeInstruction::instruction_size; // lui + addiw + add;
 1779   }
 1780 }
 1781 
 1782 //=============================================================================
 1783 
 1784 #ifndef PRODUCT
 1785 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 1786 {
 1787   assert_cond(st != nullptr);
 1788   st->print_cr("# MachUEPNode");
 1789   if (UseCompressedClassPointers) {
 1790     st->print_cr("\tlwu t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 1791     st->print_cr("\tlwu t2, [t0      + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 1792   } else {
 1793     st->print_cr("\tld t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 1794     st->print_cr("\tld t2, [t0      + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 1795   }
 1796   st->print_cr("\tbeq t1, t2, ic_hit");
 1797   st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
 1798   st->print_cr("\tic_hit:");
 1799 }
 1800 #endif
 1801 
 1802 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 1803 {
 1804   // This is the unverified entry point.
 1805   __ ic_check(CodeEntryAlignment);
 1806 
 1807   // Verified entry point must be properly 4 bytes aligned for patching by NativeJump::patch_verified_entry().
 1808   // ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4).
 1809   assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point");
 1810 }
 1811 
 1812 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 1813 {
 1814   assert_cond(ra_ != nullptr);
 1815   return MachNode::size(ra_);
 1816 }
 1817 
 1818 // REQUIRED EMIT CODE
 1819 
 1820 //=============================================================================
 1821 
 1822 // Emit exception handler code.
 1823 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 1824 {
 1825   // auipc t1, #exception_blob_entry_point
 1826   // jr (offset)t1
 1827   // Note that the code buffer's insts_mark is always relative to insts.
 1828   // That's why we must use the macroassembler to generate a handler.
 1829   address base = __ start_a_stub(size_exception_handler());
 1830   if (base == nullptr) {
 1831     ciEnv::current()->record_failure("CodeCache is full");
 1832     return 0;  // CodeBuffer::expand failed
 1833   }
 1834   int offset = __ offset();
 1835   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 1836   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 1837   __ end_a_stub();
 1838   return offset;
 1839 }
 1840 
 1841 // Emit deopt handler code.
 1842 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 1843 {
 1844   address base = __ start_a_stub(size_deopt_handler());
 1845   if (base == nullptr) {
 1846     ciEnv::current()->record_failure("CodeCache is full");
 1847     return 0;  // CodeBuffer::expand failed
 1848   }
 1849   int offset = __ offset();
 1850 
 1851   __ auipc(ra, 0);
 1852   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 1853 
 1854   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
 1855   __ end_a_stub();
 1856   return offset;
 1857 
 1858 }
 1859 // REQUIRED MATCHER CODE
 1860 
 1861 //=============================================================================
 1862 
 1863 bool Matcher::match_rule_supported(int opcode) {
 1864   if (!has_match_rule(opcode)) {
 1865     return false;
 1866   }
 1867 
 1868   switch (opcode) {
 1869     case Op_OnSpinWait:
 1870       return VM_Version::supports_on_spin_wait();
 1871     case Op_CacheWB:           // fall through
 1872     case Op_CacheWBPreSync:    // fall through
 1873     case Op_CacheWBPostSync:
 1874       if (!VM_Version::supports_data_cache_line_flush()) {
 1875         return false;
 1876       }
 1877       break;
 1878 
 1879     case Op_StrCompressedCopy: // fall through
 1880     case Op_StrInflatedCopy:   // fall through
 1881     case Op_CountPositives:    // fall through
 1882     case Op_EncodeISOArray:
 1883       return UseRVV;
 1884 
 1885     // Current test shows that, it brings performance gain when MaxVectorSize >= 32, but brings
 1886     // regression when MaxVectorSize == 16. So only enable the intrinsic when MaxVectorSize >= 32.
 1887     case Op_RoundVF:
 1888       return UseRVV && MaxVectorSize >= 32;
 1889 
 1890     // For double, current test shows that even with MaxVectorSize == 32, there is still some regression.
 1891     // Although there is no hardware to verify it for now, from the trend of performance data on hardwares
 1892     // (with vlenb == 16 and 32 respectively), it's promising to bring better performance rather than
 1893     // regression for double when MaxVectorSize == 64+. So only enable the intrinsic when MaxVectorSize >= 64.
 1894     case Op_RoundVD:
 1895       return UseRVV && MaxVectorSize >= 64;
 1896 
 1897     case Op_PopCountI:
 1898     case Op_PopCountL:
 1899       return UsePopCountInstruction;
 1900 
 1901     case Op_ReverseBytesI:
 1902     case Op_ReverseBytesL:
 1903     case Op_ReverseBytesS:
 1904     case Op_ReverseBytesUS:
 1905     case Op_RotateRight:
 1906     case Op_RotateLeft:
 1907     case Op_CountLeadingZerosI:
 1908     case Op_CountLeadingZerosL:
 1909     case Op_CountTrailingZerosI:
 1910     case Op_CountTrailingZerosL:
 1911       return UseZbb;
 1912 
 1913     case Op_FmaF:
 1914     case Op_FmaD:
 1915     case Op_FmaVF:
 1916     case Op_FmaVD:
 1917       return UseFMA;
 1918 
 1919     case Op_ConvHF2F:
 1920     case Op_ConvF2HF:
 1921       return UseZfh;
 1922   }
 1923 
 1924   return true; // Per default match rules are supported.
 1925 }
 1926 
 1927 const RegMask* Matcher::predicate_reg_mask(void) {
 1928   return &_VMASK_REG_mask;
 1929 }
 1930 
 1931 // Vector calling convention not yet implemented.
 1932 bool Matcher::supports_vector_calling_convention(void) {
 1933   return EnableVectorSupport && UseVectorStubs;
 1934 }
 1935 
 1936 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 1937   assert(EnableVectorSupport && UseVectorStubs, "sanity");
 1938   assert(ideal_reg == Op_VecA, "sanity");
 1939   // check more info at https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc
 1940   int lo = V8_num;
 1941   int hi = V8_K_num;
 1942   return OptoRegPair(hi, lo);
 1943 }
 1944 
 1945 // Is this branch offset short enough that a short branch can be used?
 1946 //
 1947 // NOTE: If the platform does not provide any short branch variants, then
 1948 //       this method should return false for offset 0.
 1949 // |---label(L1)-----|
 1950 // |-----------------|
 1951 // |-----------------|----------eq: float-------------------
 1952 // |-----------------| // far_cmpD_branch   |   cmpD_branch
 1953 // |------- ---------|    feq;              |      feq;
 1954 // |-far_cmpD_branch-|    beqz done;        |      bnez L;
 1955 // |-----------------|    j L;              |
 1956 // |-----------------|    bind(done);       |
 1957 // |-----------------|--------------------------------------
 1958 // |-----------------| // so shortBrSize = br_size - 4;
 1959 // |-----------------| // so offs = offset - shortBrSize + 4;
 1960 // |---label(L2)-----|
 1961 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 1962   // The passed offset is relative to address of the branch.
 1963   int shortBrSize = br_size - 4;
 1964   int offs = offset - shortBrSize + 4;
 1965   return (-4096 <= offs && offs < 4096);
 1966 }
 1967 
 1968 // Vector width in bytes.
 1969 int Matcher::vector_width_in_bytes(BasicType bt) {
 1970   if (UseRVV) {
 1971     // The MaxVectorSize should have been set by detecting RVV max vector register size when check UseRVV.
 1972     // MaxVectorSize == VM_Version::_initial_vector_length
 1973     int size = MaxVectorSize;
 1974     // Minimum 2 values in vector
 1975     if (size < 2 * type2aelembytes(bt)) size = 0;
 1976     // But never < 4
 1977     if (size < 4) size = 0;
 1978     return size;
 1979   }
 1980   return 0;
 1981 }
 1982 
 1983 // Limits on vector size (number of elements) loaded into vector.
 1984 int Matcher::max_vector_size(const BasicType bt) {
 1985   return vector_width_in_bytes(bt) / type2aelembytes(bt);
 1986 }
 1987 
 1988 int Matcher::min_vector_size(const BasicType bt) {
 1989   int max_size = max_vector_size(bt);
 1990   // Limit the min vector size to 8 bytes.
 1991   int size = 8 / type2aelembytes(bt);
 1992   if (bt == T_BYTE) {
 1993     // To support vector api shuffle/rearrange.
 1994     size = 4;
 1995   } else if (bt == T_BOOLEAN) {
 1996     // To support vector api load/store mask.
 1997     size = 2;
 1998   }
 1999   if (size < 2) size = 2;
 2000   return MIN2(size, max_size);
 2001 }
 2002 
 2003 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2004   return Matcher::max_vector_size(bt);
 2005 }
 2006 
 2007 // Vector ideal reg.
 2008 uint Matcher::vector_ideal_reg(int len) {
 2009   assert(MaxVectorSize >= len, "");
 2010   if (UseRVV) {
 2011     return Op_VecA;
 2012   }
 2013 
 2014   ShouldNotReachHere();
 2015   return 0;
 2016 }
 2017 
 2018 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2019   return Matcher::max_vector_size(bt);
 2020 }
 2021 
 2022 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
 2023   ShouldNotReachHere(); // generic vector operands not supported
 2024   return nullptr;
 2025 }
 2026 
 2027 bool Matcher::is_reg2reg_move(MachNode* m) {
 2028   ShouldNotReachHere(); // generic vector operands not supported
 2029   return false;
 2030 }
 2031 
 2032 bool Matcher::is_generic_vector(MachOper* opnd) {
 2033   ShouldNotReachHere(); // generic vector operands not supported
 2034   return false;
 2035 }
 2036 
 2037 // Return whether or not this register is ever used as an argument.
 2038 // This function is used on startup to build the trampoline stubs in
 2039 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2040 // call in the trampoline, and arguments in those registers not be
 2041 // available to the callee.
 2042 bool Matcher::can_be_java_arg(int reg)
 2043 {
 2044   return
 2045     reg ==  R10_num || reg == R10_H_num ||
 2046     reg ==  R11_num || reg == R11_H_num ||
 2047     reg ==  R12_num || reg == R12_H_num ||
 2048     reg ==  R13_num || reg == R13_H_num ||
 2049     reg ==  R14_num || reg == R14_H_num ||
 2050     reg ==  R15_num || reg == R15_H_num ||
 2051     reg ==  R16_num || reg == R16_H_num ||
 2052     reg ==  R17_num || reg == R17_H_num ||
 2053     reg ==  F10_num || reg == F10_H_num ||
 2054     reg ==  F11_num || reg == F11_H_num ||
 2055     reg ==  F12_num || reg == F12_H_num ||
 2056     reg ==  F13_num || reg == F13_H_num ||
 2057     reg ==  F14_num || reg == F14_H_num ||
 2058     reg ==  F15_num || reg == F15_H_num ||
 2059     reg ==  F16_num || reg == F16_H_num ||
 2060     reg ==  F17_num || reg == F17_H_num;
 2061 }
 2062 
 2063 bool Matcher::is_spillable_arg(int reg)
 2064 {
 2065   return can_be_java_arg(reg);
 2066 }
 2067 
 2068 uint Matcher::int_pressure_limit()
 2069 {
 2070   // A derived pointer is live at CallNode and then is flagged by RA
 2071   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2072   // derived pointers and lastly fail to spill after reaching maximum
 2073   // number of iterations. Lowering the default pressure threshold to
 2074   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2075   // a high register pressure area of the code so that split_DEF can
 2076   // generate DefinitionSpillCopy for the derived pointer.
 2077   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2078   if (!PreserveFramePointer) {
 2079     // When PreserveFramePointer is off, frame pointer is allocatable,
 2080     // but different from other SOC registers, it is excluded from
 2081     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2082     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2083     // See check_pressure_at_fatproj().
 2084     default_int_pressure_threshold--;
 2085   }
 2086   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2087 }
 2088 
 2089 uint Matcher::float_pressure_limit()
 2090 {
 2091   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2092   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2093 }
 2094 
 2095 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2096   return false;
 2097 }
 2098 
 2099 RegMask Matcher::divI_proj_mask() {
 2100   ShouldNotReachHere();
 2101   return RegMask();
 2102 }
 2103 
 2104 // Register for MODI projection of divmodI.
 2105 RegMask Matcher::modI_proj_mask() {
 2106   ShouldNotReachHere();
 2107   return RegMask();
 2108 }
 2109 
 2110 // Register for DIVL projection of divmodL.
 2111 RegMask Matcher::divL_proj_mask() {
 2112   ShouldNotReachHere();
 2113   return RegMask();
 2114 }
 2115 
 2116 // Register for MODL projection of divmodL.
 2117 RegMask Matcher::modL_proj_mask() {
 2118   ShouldNotReachHere();
 2119   return RegMask();
 2120 }
 2121 
 2122 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2123   return FP_REG_mask();
 2124 }
 2125 
 2126 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2127   assert_cond(addp != nullptr);
 2128   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2129     Node* u = addp->fast_out(i);
 2130     if (u != nullptr && u->is_Mem()) {
 2131       int opsize = u->as_Mem()->memory_size();
 2132       assert(opsize > 0, "unexpected memory operand size");
 2133       if (u->as_Mem()->memory_size() != (1 << shift)) {
 2134         return false;
 2135       }
 2136     }
 2137   }
 2138   return true;
 2139 }
 2140 
 2141 // Binary src (Replicate scalar/immediate)
 2142 static bool is_vector_scalar_bitwise_pattern(Node* n, Node* m) {
 2143   if (n == nullptr || m == nullptr) {
 2144     return false;
 2145   }
 2146 
 2147   if (m->Opcode() != Op_Replicate) {
 2148     return false;
 2149   }
 2150 
 2151   switch (n->Opcode()) {
 2152     case Op_AndV:
 2153     case Op_OrV:
 2154     case Op_XorV:
 2155     case Op_AddVB:
 2156     case Op_AddVS:
 2157     case Op_AddVI:
 2158     case Op_AddVL:
 2159     case Op_SubVB:
 2160     case Op_SubVS:
 2161     case Op_SubVI:
 2162     case Op_SubVL:
 2163     case Op_MulVB:
 2164     case Op_MulVS:
 2165     case Op_MulVI:
 2166     case Op_MulVL: {
 2167       return true;
 2168     }
 2169     default:
 2170       return false;
 2171   }
 2172 }
 2173 
 2174 // (XorV src (Replicate m1))
 2175 // (XorVMask src (MaskAll m1))
 2176 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2177   if (n != nullptr && m != nullptr) {
 2178     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2179            VectorNode::is_all_ones_vector(m);
 2180   }
 2181   return false;
 2182 }
 2183 
 2184 // Should the Matcher clone input 'm' of node 'n'?
 2185 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2186   assert_cond(m != nullptr);
 2187   if (is_vshift_con_pattern(n, m) || // ShiftV src (ShiftCntV con)
 2188       is_vector_bitwise_not_pattern(n, m) ||
 2189       is_vector_scalar_bitwise_pattern(n, m) ||
 2190       is_encode_and_store_pattern(n, m)) {
 2191     mstack.push(m, Visit);
 2192     return true;
 2193   }
 2194   return false;
 2195 }
 2196 
 2197 // Should the Matcher clone shifts on addressing modes, expecting them
 2198 // to be subsumed into complex addressing expressions or compute them
 2199 // into registers?
 2200 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2201   return clone_base_plus_offset_address(m, mstack, address_visited);
 2202 }
 2203 
 2204 %}
 2205 
 2206 
 2207 
 2208 //----------ENCODING BLOCK-----------------------------------------------------
 2209 // This block specifies the encoding classes used by the compiler to
 2210 // output byte streams.  Encoding classes are parameterized macros
 2211 // used by Machine Instruction Nodes in order to generate the bit
 2212 // encoding of the instruction.  Operands specify their base encoding
 2213 // interface with the interface keyword.  There are currently
 2214 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2215 // COND_INTER.  REG_INTER causes an operand to generate a function
 2216 // which returns its register number when queried.  CONST_INTER causes
 2217 // an operand to generate a function which returns the value of the
 2218 // constant when queried.  MEMORY_INTER causes an operand to generate
 2219 // four functions which return the Base Register, the Index Register,
 2220 // the Scale Value, and the Offset Value of the operand when queried.
 2221 // COND_INTER causes an operand to generate six functions which return
 2222 // the encoding code (ie - encoding bits for the instruction)
 2223 // associated with each basic boolean condition for a conditional
 2224 // instruction.
 2225 //
 2226 // Instructions specify two basic values for encoding.  Again, a
 2227 // function is available to check if the constant displacement is an
 2228 // oop. They use the ins_encode keyword to specify their encoding
 2229 // classes (which must be a sequence of enc_class names, and their
 2230 // parameters, specified in the encoding block), and they use the
 2231 // opcode keyword to specify, in order, their primary, secondary, and
 2232 // tertiary opcode.  Only the opcode sections which a particular
 2233 // instruction needs for encoding need to be specified.
 2234 encode %{
 2235   // BEGIN Non-volatile memory access
 2236 
 2237   enc_class riscv_enc_mov_imm(iRegIorL dst, immIorL src) %{
 2238     int64_t con = (int64_t)$src$$constant;
 2239     Register dst_reg = as_Register($dst$$reg);
 2240     __ mv(dst_reg, con);
 2241   %}
 2242 
 2243   enc_class riscv_enc_mov_p(iRegP dst, immP src) %{
 2244     Register dst_reg = as_Register($dst$$reg);
 2245     address con = (address)$src$$constant;
 2246     if (con == nullptr || con == (address)1) {
 2247       ShouldNotReachHere();
 2248     } else {
 2249       relocInfo::relocType rtype = $src->constant_reloc();
 2250       if (rtype == relocInfo::oop_type) {
 2251         __ movoop(dst_reg, (jobject)con);
 2252       } else if (rtype == relocInfo::metadata_type) {
 2253         __ mov_metadata(dst_reg, (Metadata*)con);
 2254       } else {
 2255         assert(rtype == relocInfo::none, "unexpected reloc type");
 2256         __ mv(dst_reg, $src$$constant);
 2257       }
 2258     }
 2259   %}
 2260 
 2261   enc_class riscv_enc_mov_p1(iRegP dst) %{
 2262     Register dst_reg = as_Register($dst$$reg);
 2263     __ mv(dst_reg, 1);
 2264   %}
 2265 
 2266   enc_class riscv_enc_mov_byte_map_base(iRegP dst) %{
 2267     __ load_byte_map_base($dst$$Register);
 2268   %}
 2269 
 2270   enc_class riscv_enc_mov_n(iRegN dst, immN src) %{
 2271     Register dst_reg = as_Register($dst$$reg);
 2272     address con = (address)$src$$constant;
 2273     if (con == nullptr) {
 2274       ShouldNotReachHere();
 2275     } else {
 2276       relocInfo::relocType rtype = $src->constant_reloc();
 2277       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 2278       __ set_narrow_oop(dst_reg, (jobject)con);
 2279     }
 2280   %}
 2281 
 2282   enc_class riscv_enc_mov_zero(iRegNorP dst) %{
 2283     Register dst_reg = as_Register($dst$$reg);
 2284     __ mv(dst_reg, zr);
 2285   %}
 2286 
 2287   enc_class riscv_enc_mov_nk(iRegN dst, immNKlass src) %{
 2288     Register dst_reg = as_Register($dst$$reg);
 2289     address con = (address)$src$$constant;
 2290     if (con == nullptr) {
 2291       ShouldNotReachHere();
 2292     } else {
 2293       relocInfo::relocType rtype = $src->constant_reloc();
 2294       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 2295       __ set_narrow_klass(dst_reg, (Klass *)con);
 2296     }
 2297   %}
 2298 
 2299   enc_class riscv_enc_cmpxchgw(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2300     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 2301                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 2302                /*result as bool*/ true);
 2303   %}
 2304 
 2305   enc_class riscv_enc_cmpxchgn(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2306     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 2307                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 2308                /*result as bool*/ true);
 2309   %}
 2310 
 2311   enc_class riscv_enc_cmpxchg(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
 2312     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 2313                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 2314                /*result as bool*/ true);
 2315   %}
 2316 
 2317   enc_class riscv_enc_cmpxchgw_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2318     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 2319                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 2320                /*result as bool*/ true);
 2321   %}
 2322 
 2323   enc_class riscv_enc_cmpxchgn_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2324     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 2325                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 2326                /*result as bool*/ true);
 2327   %}
 2328 
 2329   enc_class riscv_enc_cmpxchg_acq(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
 2330     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 2331                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 2332                /*result as bool*/ true);
 2333   %}
 2334 
 2335   // compare and branch instruction encodings
 2336 
 2337   enc_class riscv_enc_j(label lbl) %{
 2338     Label* L = $lbl$$label;
 2339     __ j(*L);
 2340   %}
 2341 
 2342   enc_class riscv_enc_far_cmpULtGe_imm0_branch(cmpOpULtGe cmp, iRegIorL op1, label lbl) %{
 2343     Label* L = $lbl$$label;
 2344     switch ($cmp$$cmpcode) {
 2345       case(BoolTest::ge):
 2346         __ j(*L);
 2347         break;
 2348       case(BoolTest::lt):
 2349         break;
 2350       default:
 2351         Unimplemented();
 2352     }
 2353   %}
 2354 
 2355   // call instruction encodings
 2356 
 2357   enc_class riscv_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result) %{
 2358     Register sub_reg = as_Register($sub$$reg);
 2359     Register super_reg = as_Register($super$$reg);
 2360     Register temp_reg = as_Register($temp$$reg);
 2361     Register result_reg = as_Register($result$$reg);
 2362     Register cr_reg = t1;
 2363 
 2364     Label miss;
 2365     Label done;
 2366     __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 2367                                      nullptr, &miss);
 2368     if ($primary) {
 2369       __ mv(result_reg, zr);
 2370     } else {
 2371       __ mv(cr_reg, zr);
 2372       __ j(done);
 2373     }
 2374 
 2375     __ bind(miss);
 2376     if (!$primary) {
 2377       __ mv(cr_reg, 1);
 2378     }
 2379 
 2380     __ bind(done);
 2381   %}
 2382 
 2383   enc_class riscv_enc_java_static_call(method meth) %{
 2384     Assembler::IncompressibleRegion ir(masm);  // Fixed length: see ret_addr_offset
 2385 
 2386     address addr = (address)$meth$$method;
 2387     address call = nullptr;
 2388     assert_cond(addr != nullptr);
 2389     if (!_method) {
 2390       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 2391       call = __ reloc_call(Address(addr, relocInfo::runtime_call_type));
 2392       if (call == nullptr) {
 2393         ciEnv::current()->record_failure("CodeCache is full");
 2394         return;
 2395       }
 2396     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 2397       // The NOP here is purely to ensure that eliding a call to
 2398       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 2399       __ nop();
 2400       __ nop();
 2401       __ nop();
 2402       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 2403     } else {
 2404       int method_index = resolved_method_index(masm);
 2405       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 2406                                                   : static_call_Relocation::spec(method_index);
 2407       call = __ reloc_call(Address(addr, rspec));
 2408       if (call == nullptr) {
 2409         ciEnv::current()->record_failure("CodeCache is full");
 2410         return;
 2411       }
 2412 
 2413       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 2414         // Calls of the same statically bound method can share
 2415         // a stub to the interpreter.
 2416         __ code()->shared_stub_to_interp_for(_method, call - (__ begin()));
 2417       } else {
 2418         // Emit stub for static call
 2419         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 2420         if (stub == nullptr) {
 2421           ciEnv::current()->record_failure("CodeCache is full");
 2422           return;
 2423         }
 2424       }
 2425     }
 2426 
 2427     __ post_call_nop();
 2428   %}
 2429 
 2430   enc_class riscv_enc_java_dynamic_call(method meth) %{
 2431     Assembler::IncompressibleRegion ir(masm);  // Fixed length: see ret_addr_offset
 2432     int method_index = resolved_method_index(masm);
 2433     address call = __ ic_call((address)$meth$$method, method_index);
 2434     if (call == nullptr) {
 2435       ciEnv::current()->record_failure("CodeCache is full");
 2436       return;
 2437     }
 2438 
 2439     __ post_call_nop();
 2440   %}
 2441 
 2442   enc_class riscv_enc_call_epilog() %{
 2443     if (VerifyStackAtCalls) {
 2444       // Check that stack depth is unchanged: find majik cookie on stack
 2445       __ call_Unimplemented();
 2446     }
 2447   %}
 2448 
 2449   enc_class riscv_enc_java_to_runtime(method meth) %{
 2450     Assembler::IncompressibleRegion ir(masm);  // Fixed length: see ret_addr_offset
 2451 
 2452     // Some calls to generated routines (arraycopy code) are scheduled by C2
 2453     // as runtime calls. if so we can call them using a far call (they will be
 2454     // in the code cache, thus in a reachable segment) otherwise we have to use
 2455     // a movptr+jalr pair which loads the absolute address into a register.
 2456     address entry = (address)$meth$$method;
 2457     if (CodeCache::contains(entry)) {
 2458       __ far_call(Address(entry, relocInfo::runtime_call_type));
 2459       __ post_call_nop();
 2460     } else {
 2461       Label retaddr;
 2462       // Make the anchor frame walkable
 2463       __ la(t0, retaddr);
 2464       __ sd(t0, Address(xthread, JavaThread::last_Java_pc_offset()));
 2465       int32_t offset = 0;
 2466       // No relocation needed
 2467       __ movptr(t1, entry, offset, t0); // lui + lui + slli + add
 2468       __ jalr(t1, offset);
 2469       __ bind(retaddr);
 2470       __ post_call_nop();
 2471     }
 2472   %}
 2473 
 2474   // arithmetic encodings
 2475 
 2476   enc_class riscv_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 2477     Register dst_reg = as_Register($dst$$reg);
 2478     Register src1_reg = as_Register($src1$$reg);
 2479     Register src2_reg = as_Register($src2$$reg);
 2480     __ corrected_idivl(dst_reg, src1_reg, src2_reg, /* want_remainder */ false, /* is_signed */ true);
 2481   %}
 2482 
 2483   enc_class riscv_enc_divuw(iRegI dst, iRegI src1, iRegI src2) %{
 2484     Register dst_reg = as_Register($dst$$reg);
 2485     Register src1_reg = as_Register($src1$$reg);
 2486     Register src2_reg = as_Register($src2$$reg);
 2487     __ corrected_idivl(dst_reg, src1_reg, src2_reg, /* want_remainder */ false, /* is_signed */ false);
 2488   %}
 2489 
 2490   enc_class riscv_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 2491     Register dst_reg = as_Register($dst$$reg);
 2492     Register src1_reg = as_Register($src1$$reg);
 2493     Register src2_reg = as_Register($src2$$reg);
 2494     __ corrected_idivq(dst_reg, src1_reg, src2_reg, /* want_remainder */ false, /* is_signed */ true);
 2495   %}
 2496 
 2497   enc_class riscv_enc_divu(iRegI dst, iRegI src1, iRegI src2) %{
 2498     Register dst_reg = as_Register($dst$$reg);
 2499     Register src1_reg = as_Register($src1$$reg);
 2500     Register src2_reg = as_Register($src2$$reg);
 2501     __ corrected_idivq(dst_reg, src1_reg, src2_reg, /* want_remainder */ false, /* is_signed */ false);
 2502   %}
 2503 
 2504   enc_class riscv_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 2505     Register dst_reg = as_Register($dst$$reg);
 2506     Register src1_reg = as_Register($src1$$reg);
 2507     Register src2_reg = as_Register($src2$$reg);
 2508     __ corrected_idivl(dst_reg, src1_reg, src2_reg, /* want_remainder */ true, /* is_signed */ true);
 2509   %}
 2510 
 2511   enc_class riscv_enc_moduw(iRegI dst, iRegI src1, iRegI src2) %{
 2512     Register dst_reg = as_Register($dst$$reg);
 2513     Register src1_reg = as_Register($src1$$reg);
 2514     Register src2_reg = as_Register($src2$$reg);
 2515     __ corrected_idivl(dst_reg, src1_reg, src2_reg, /* want_remainder */ true, /* is_signed */ false);
 2516   %}
 2517 
 2518   enc_class riscv_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 2519     Register dst_reg = as_Register($dst$$reg);
 2520     Register src1_reg = as_Register($src1$$reg);
 2521     Register src2_reg = as_Register($src2$$reg);
 2522     __ corrected_idivq(dst_reg, src1_reg, src2_reg, /* want_remainder */ true, /* is_signed */ true);
 2523   %}
 2524 
 2525   enc_class riscv_enc_modu(iRegI dst, iRegI src1, iRegI src2) %{
 2526     Register dst_reg = as_Register($dst$$reg);
 2527     Register src1_reg = as_Register($src1$$reg);
 2528     Register src2_reg = as_Register($src2$$reg);
 2529     __ corrected_idivq(dst_reg, src1_reg, src2_reg, /* want_remainder */ true, /* is_signed */ false);
 2530   %}
 2531 
 2532   enc_class riscv_enc_tail_call(iRegP jump_target) %{
 2533     Register target_reg = as_Register($jump_target$$reg);
 2534     __ jr(target_reg);
 2535   %}
 2536 
 2537   enc_class riscv_enc_tail_jmp(iRegP jump_target) %{
 2538     Register target_reg = as_Register($jump_target$$reg);
 2539     // exception oop should be in x10
 2540     // ret addr has been popped into ra
 2541     // callee expects it in x13
 2542     __ mv(x13, ra);
 2543     __ jr(target_reg);
 2544   %}
 2545 
 2546   enc_class riscv_enc_rethrow() %{
 2547     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 2548   %}
 2549 
 2550   enc_class riscv_enc_ret() %{
 2551     __ ret();
 2552   %}
 2553 
 2554 %}
 2555 
 2556 //----------FRAME--------------------------------------------------------------
 2557 // Definition of frame structure and management information.
 2558 //
 2559 //  S T A C K   L A Y O U T    Allocators stack-slot number
 2560 //                             |   (to get allocators register number
 2561 //  G  Owned by    |        |  v    add OptoReg::stack0())
 2562 //  r   CALLER     |        |
 2563 //  o     |        +--------+      pad to even-align allocators stack-slot
 2564 //  w     V        |  pad0  |        numbers; owned by CALLER
 2565 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 2566 //  h     ^        |   in   |  5
 2567 //        |        |  args  |  4   Holes in incoming args owned by SELF
 2568 //  |     |        |        |  3
 2569 //  |     |        +--------+
 2570 //  V     |        | old out|      Empty on Intel, window on Sparc
 2571 //        |    old |preserve|      Must be even aligned.
 2572 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 2573 //        |        |   in   |  3   area for Intel ret address
 2574 //     Owned by    |preserve|      Empty on Sparc.
 2575 //       SELF      +--------+
 2576 //        |        |  pad2  |  2   pad to align old SP
 2577 //        |        +--------+  1
 2578 //        |        | locks  |  0
 2579 //        |        +--------+----> OptoReg::stack0(), even aligned
 2580 //        |        |  pad1  | 11   pad to align new SP
 2581 //        |        +--------+
 2582 //        |        |        | 10
 2583 //        |        | spills |  9   spills
 2584 //        V        |        |  8   (pad0 slot for callee)
 2585 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 2586 //        ^        |  out   |  7
 2587 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 2588 //     Owned by    +--------+
 2589 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 2590 //        |    new |preserve|      Must be even-aligned.
 2591 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 2592 //        |        |        |
 2593 //
 2594 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 2595 //         known from SELF's arguments and the Java calling convention.
 2596 //         Region 6-7 is determined per call site.
 2597 // Note 2: If the calling convention leaves holes in the incoming argument
 2598 //         area, those holes are owned by SELF.  Holes in the outgoing area
 2599 //         are owned by the CALLEE.  Holes should not be necessary in the
 2600 //         incoming area, as the Java calling convention is completely under
 2601 //         the control of the AD file.  Doubles can be sorted and packed to
 2602 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 2603 //         varargs C calling conventions.
 2604 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 2605 //         even aligned with pad0 as needed.
 2606 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 2607 //           (the latter is true on Intel but is it false on RISCV?)
 2608 //         region 6-11 is even aligned; it may be padded out more so that
 2609 //         the region from SP to FP meets the minimum stack alignment.
 2610 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 2611 //         alignment.  Region 11, pad1, may be dynamically extended so that
 2612 //         SP meets the minimum alignment.
 2613 
 2614 frame %{
 2615   // These three registers define part of the calling convention
 2616   // between compiled code and the interpreter.
 2617 
 2618   // Inline Cache Register or methodOop for I2C.
 2619   inline_cache_reg(R31);
 2620 
 2621   // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
 2622   cisc_spilling_operand_name(indOffset);
 2623 
 2624   // Number of stack slots consumed by locking an object
 2625   // generate Compile::sync_stack_slots
 2626   // VMRegImpl::slots_per_word = wordSize / stack_slot_size = 8 / 4 = 2
 2627   sync_stack_slots(1 * VMRegImpl::slots_per_word);
 2628 
 2629   // Compiled code's Frame Pointer
 2630   frame_pointer(R2);
 2631 
 2632   // Interpreter stores its frame pointer in a register which is
 2633   // stored to the stack by I2CAdaptors.
 2634   // I2CAdaptors convert from interpreted java to compiled java.
 2635   interpreter_frame_pointer(R8);
 2636 
 2637   // Stack alignment requirement
 2638   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 2639 
 2640   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 2641   // for calls to C.  Supports the var-args backing area for register parms.
 2642   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes / BytesPerInt);
 2643 
 2644   // The after-PROLOG location of the return address.  Location of
 2645   // return address specifies a type (REG or STACK) and a number
 2646   // representing the register number (i.e. - use a register name) or
 2647   // stack slot.
 2648   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 2649   // Otherwise, it is above the locks and verification slot and alignment word
 2650   // TODO this may well be correct but need to check why that - 2 is there
 2651   // ppc port uses 0 but we definitely need to allow for fixed_slots
 2652   // which folds in the space used for monitors
 2653   return_addr(STACK - 2 +
 2654               align_up((Compile::current()->in_preserve_stack_slots() +
 2655                         Compile::current()->fixed_slots()),
 2656                        stack_alignment_in_slots()));
 2657 
 2658   // Location of compiled Java return values.  Same as C for now.
 2659   return_value
 2660   %{
 2661     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 2662            "only return normal values");
 2663 
 2664     static const int lo[Op_RegL + 1] = { // enum name
 2665       0,                                 // Op_Node
 2666       0,                                 // Op_Set
 2667       R10_num,                           // Op_RegN
 2668       R10_num,                           // Op_RegI
 2669       R10_num,                           // Op_RegP
 2670       F10_num,                           // Op_RegF
 2671       F10_num,                           // Op_RegD
 2672       R10_num                            // Op_RegL
 2673     };
 2674 
 2675     static const int hi[Op_RegL + 1] = { // enum name
 2676       0,                                 // Op_Node
 2677       0,                                 // Op_Set
 2678       OptoReg::Bad,                      // Op_RegN
 2679       OptoReg::Bad,                      // Op_RegI
 2680       R10_H_num,                         // Op_RegP
 2681       OptoReg::Bad,                      // Op_RegF
 2682       F10_H_num,                         // Op_RegD
 2683       R10_H_num                          // Op_RegL
 2684     };
 2685 
 2686     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 2687   %}
 2688 %}
 2689 
 2690 //----------ATTRIBUTES---------------------------------------------------------
 2691 //----------Operand Attributes-------------------------------------------------
 2692 op_attrib op_cost(1);        // Required cost attribute
 2693 
 2694 //----------Instruction Attributes---------------------------------------------
 2695 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
 2696 ins_attrib ins_size(32);        // Required size attribute (in bits)
 2697 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 2698                                 // a non-matching short branch variant
 2699                                 // of some long branch?
 2700 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 2701                                 // be a power of 2) specifies the
 2702                                 // alignment that some part of the
 2703                                 // instruction (not necessarily the
 2704                                 // start) requires.  If > 1, a
 2705                                 // compute_padding() function must be
 2706                                 // provided for the instruction
 2707 
 2708 //----------OPERANDS-----------------------------------------------------------
 2709 // Operand definitions must precede instruction definitions for correct parsing
 2710 // in the ADLC because operands constitute user defined types which are used in
 2711 // instruction definitions.
 2712 
 2713 //----------Simple Operands----------------------------------------------------
 2714 
 2715 // Integer operands 32 bit
 2716 // 32 bit immediate
 2717 operand immI()
 2718 %{
 2719   match(ConI);
 2720 
 2721   op_cost(0);
 2722   format %{ %}
 2723   interface(CONST_INTER);
 2724 %}
 2725 
 2726 // 32 bit zero
 2727 operand immI0()
 2728 %{
 2729   predicate(n->get_int() == 0);
 2730   match(ConI);
 2731 
 2732   op_cost(0);
 2733   format %{ %}
 2734   interface(CONST_INTER);
 2735 %}
 2736 
 2737 // 32 bit unit increment
 2738 operand immI_1()
 2739 %{
 2740   predicate(n->get_int() == 1);
 2741   match(ConI);
 2742 
 2743   op_cost(0);
 2744   format %{ %}
 2745   interface(CONST_INTER);
 2746 %}
 2747 
 2748 // 32 bit unit decrement
 2749 operand immI_M1()
 2750 %{
 2751   predicate(n->get_int() == -1);
 2752   match(ConI);
 2753 
 2754   op_cost(0);
 2755   format %{ %}
 2756   interface(CONST_INTER);
 2757 %}
 2758 
 2759 // Unsigned Integer Immediate:  6-bit int, greater than 32
 2760 operand uimmI6_ge32() %{
 2761   predicate(((unsigned int)(n->get_int()) < 64) && (n->get_int() >= 32));
 2762   match(ConI);
 2763   op_cost(0);
 2764   format %{ %}
 2765   interface(CONST_INTER);
 2766 %}
 2767 
 2768 operand immI_le_4()
 2769 %{
 2770   predicate(n->get_int() <= 4);
 2771   match(ConI);
 2772 
 2773   op_cost(0);
 2774   format %{ %}
 2775   interface(CONST_INTER);
 2776 %}
 2777 
 2778 operand immI_16()
 2779 %{
 2780   predicate(n->get_int() == 16);
 2781   match(ConI);
 2782   op_cost(0);
 2783   format %{ %}
 2784   interface(CONST_INTER);
 2785 %}
 2786 
 2787 operand immI_24()
 2788 %{
 2789   predicate(n->get_int() == 24);
 2790   match(ConI);
 2791   op_cost(0);
 2792   format %{ %}
 2793   interface(CONST_INTER);
 2794 %}
 2795 
 2796 operand immI_31()
 2797 %{
 2798   predicate(n->get_int() == 31);
 2799   match(ConI);
 2800 
 2801   op_cost(0);
 2802   format %{ %}
 2803   interface(CONST_INTER);
 2804 %}
 2805 
 2806 operand immI_63()
 2807 %{
 2808   predicate(n->get_int() == 63);
 2809   match(ConI);
 2810 
 2811   op_cost(0);
 2812   format %{ %}
 2813   interface(CONST_INTER);
 2814 %}
 2815 
 2816 // 32 bit integer valid for add immediate
 2817 operand immIAdd()
 2818 %{
 2819   predicate(Assembler::is_simm12((int64_t)n->get_int()));
 2820   match(ConI);
 2821   op_cost(0);
 2822   format %{ %}
 2823   interface(CONST_INTER);
 2824 %}
 2825 
 2826 // 32 bit integer valid for sub immediate
 2827 operand immISub()
 2828 %{
 2829   predicate(Assembler::is_simm12(-(int64_t)n->get_int()));
 2830   match(ConI);
 2831   op_cost(0);
 2832   format %{ %}
 2833   interface(CONST_INTER);
 2834 %}
 2835 
 2836 // 5 bit signed value.
 2837 operand immI5()
 2838 %{
 2839   predicate(n->get_int() <= 15 && n->get_int() >= -16);
 2840   match(ConI);
 2841 
 2842   op_cost(0);
 2843   format %{ %}
 2844   interface(CONST_INTER);
 2845 %}
 2846 
 2847 // 5 bit signed value (simm5)
 2848 operand immL5()
 2849 %{
 2850   predicate(n->get_long() <= 15 && n->get_long() >= -16);
 2851   match(ConL);
 2852 
 2853   op_cost(0);
 2854   format %{ %}
 2855   interface(CONST_INTER);
 2856 %}
 2857 
 2858 // Integer operands 64 bit
 2859 // 64 bit immediate
 2860 operand immL()
 2861 %{
 2862   match(ConL);
 2863 
 2864   op_cost(0);
 2865   format %{ %}
 2866   interface(CONST_INTER);
 2867 %}
 2868 
 2869 // 64 bit zero
 2870 operand immL0()
 2871 %{
 2872   predicate(n->get_long() == 0);
 2873   match(ConL);
 2874 
 2875   op_cost(0);
 2876   format %{ %}
 2877   interface(CONST_INTER);
 2878 %}
 2879 
 2880 // Pointer operands
 2881 // Pointer Immediate
 2882 operand immP()
 2883 %{
 2884   match(ConP);
 2885 
 2886   op_cost(0);
 2887   format %{ %}
 2888   interface(CONST_INTER);
 2889 %}
 2890 
 2891 // Null Pointer Immediate
 2892 operand immP0()
 2893 %{
 2894   predicate(n->get_ptr() == 0);
 2895   match(ConP);
 2896 
 2897   op_cost(0);
 2898   format %{ %}
 2899   interface(CONST_INTER);
 2900 %}
 2901 
 2902 // Pointer Immediate One
 2903 // this is used in object initialization (initial object header)
 2904 operand immP_1()
 2905 %{
 2906   predicate(n->get_ptr() == 1);
 2907   match(ConP);
 2908 
 2909   op_cost(0);
 2910   format %{ %}
 2911   interface(CONST_INTER);
 2912 %}
 2913 
 2914 // Card Table Byte Map Base
 2915 operand immByteMapBase()
 2916 %{
 2917   // Get base of card map
 2918   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 2919             (CardTable::CardValue*)n->get_ptr() ==
 2920              ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 2921   match(ConP);
 2922 
 2923   op_cost(0);
 2924   format %{ %}
 2925   interface(CONST_INTER);
 2926 %}
 2927 
 2928 // Int Immediate: low 16-bit mask
 2929 operand immI_16bits()
 2930 %{
 2931   predicate(n->get_int() == 0xFFFF);
 2932   match(ConI);
 2933   op_cost(0);
 2934   format %{ %}
 2935   interface(CONST_INTER);
 2936 %}
 2937 
 2938 operand immIpowerOf2() %{
 2939   predicate(is_power_of_2((juint)(n->get_int())));
 2940   match(ConI);
 2941   op_cost(0);
 2942   format %{ %}
 2943   interface(CONST_INTER);
 2944 %}
 2945 
 2946 // Long Immediate: low 32-bit mask
 2947 operand immL_32bits()
 2948 %{
 2949   predicate(n->get_long() == 0xFFFFFFFFL);
 2950   match(ConL);
 2951   op_cost(0);
 2952   format %{ %}
 2953   interface(CONST_INTER);
 2954 %}
 2955 
 2956 // 64 bit unit decrement
 2957 operand immL_M1()
 2958 %{
 2959   predicate(n->get_long() == -1);
 2960   match(ConL);
 2961 
 2962   op_cost(0);
 2963   format %{ %}
 2964   interface(CONST_INTER);
 2965 %}
 2966 
 2967 
 2968 // 64 bit integer valid for add immediate
 2969 operand immLAdd()
 2970 %{
 2971   predicate(Assembler::is_simm12(n->get_long()));
 2972   match(ConL);
 2973   op_cost(0);
 2974   format %{ %}
 2975   interface(CONST_INTER);
 2976 %}
 2977 
 2978 // 64 bit integer valid for sub immediate
 2979 operand immLSub()
 2980 %{
 2981   predicate(Assembler::is_simm12(-(n->get_long())));
 2982   match(ConL);
 2983   op_cost(0);
 2984   format %{ %}
 2985   interface(CONST_INTER);
 2986 %}
 2987 
 2988 // Narrow pointer operands
 2989 // Narrow Pointer Immediate
 2990 operand immN()
 2991 %{
 2992   match(ConN);
 2993 
 2994   op_cost(0);
 2995   format %{ %}
 2996   interface(CONST_INTER);
 2997 %}
 2998 
 2999 // Narrow Null Pointer Immediate
 3000 operand immN0()
 3001 %{
 3002   predicate(n->get_narrowcon() == 0);
 3003   match(ConN);
 3004 
 3005   op_cost(0);
 3006   format %{ %}
 3007   interface(CONST_INTER);
 3008 %}
 3009 
 3010 operand immNKlass()
 3011 %{
 3012   match(ConNKlass);
 3013 
 3014   op_cost(0);
 3015   format %{ %}
 3016   interface(CONST_INTER);
 3017 %}
 3018 
 3019 // Float and Double operands
 3020 // Double Immediate
 3021 operand immD()
 3022 %{
 3023   match(ConD);
 3024   op_cost(0);
 3025   format %{ %}
 3026   interface(CONST_INTER);
 3027 %}
 3028 
 3029 // Double Immediate: +0.0d
 3030 operand immD0()
 3031 %{
 3032   predicate(jlong_cast(n->getd()) == 0);
 3033   match(ConD);
 3034 
 3035   op_cost(0);
 3036   format %{ %}
 3037   interface(CONST_INTER);
 3038 %}
 3039 
 3040 // Float Immediate
 3041 operand immF()
 3042 %{
 3043   match(ConF);
 3044   op_cost(0);
 3045   format %{ %}
 3046   interface(CONST_INTER);
 3047 %}
 3048 
 3049 // Float Immediate: +0.0f.
 3050 operand immF0()
 3051 %{
 3052   predicate(jint_cast(n->getf()) == 0);
 3053   match(ConF);
 3054 
 3055   op_cost(0);
 3056   format %{ %}
 3057   interface(CONST_INTER);
 3058 %}
 3059 
 3060 operand immIOffset()
 3061 %{
 3062   predicate(Assembler::is_simm12(n->get_int()));
 3063   match(ConI);
 3064   op_cost(0);
 3065   format %{ %}
 3066   interface(CONST_INTER);
 3067 %}
 3068 
 3069 operand immLOffset()
 3070 %{
 3071   predicate(Assembler::is_simm12(n->get_long()));
 3072   match(ConL);
 3073   op_cost(0);
 3074   format %{ %}
 3075   interface(CONST_INTER);
 3076 %}
 3077 
 3078 // Scale values
 3079 operand immIScale()
 3080 %{
 3081   predicate(1 <= n->get_int() && (n->get_int() <= 3));
 3082   match(ConI);
 3083 
 3084   op_cost(0);
 3085   format %{ %}
 3086   interface(CONST_INTER);
 3087 %}
 3088 
 3089 // Integer 32 bit Register Operands
 3090 operand iRegI()
 3091 %{
 3092   constraint(ALLOC_IN_RC(any_reg32));
 3093   match(RegI);
 3094   match(iRegINoSp);
 3095   op_cost(0);
 3096   format %{ %}
 3097   interface(REG_INTER);
 3098 %}
 3099 
 3100 // Integer 32 bit Register not Special
 3101 operand iRegINoSp()
 3102 %{
 3103   constraint(ALLOC_IN_RC(no_special_reg32));
 3104   match(RegI);
 3105   op_cost(0);
 3106   format %{ %}
 3107   interface(REG_INTER);
 3108 %}
 3109 
 3110 // Register R10 only
 3111 operand iRegI_R10()
 3112 %{
 3113   constraint(ALLOC_IN_RC(int_r10_reg));
 3114   match(RegI);
 3115   match(iRegINoSp);
 3116   op_cost(0);
 3117   format %{ %}
 3118   interface(REG_INTER);
 3119 %}
 3120 
 3121 // Register R12 only
 3122 operand iRegI_R12()
 3123 %{
 3124   constraint(ALLOC_IN_RC(int_r12_reg));
 3125   match(RegI);
 3126   match(iRegINoSp);
 3127   op_cost(0);
 3128   format %{ %}
 3129   interface(REG_INTER);
 3130 %}
 3131 
 3132 // Register R13 only
 3133 operand iRegI_R13()
 3134 %{
 3135   constraint(ALLOC_IN_RC(int_r13_reg));
 3136   match(RegI);
 3137   match(iRegINoSp);
 3138   op_cost(0);
 3139   format %{ %}
 3140   interface(REG_INTER);
 3141 %}
 3142 
 3143 // Register R14 only
 3144 operand iRegI_R14()
 3145 %{
 3146   constraint(ALLOC_IN_RC(int_r14_reg));
 3147   match(RegI);
 3148   match(iRegINoSp);
 3149   op_cost(0);
 3150   format %{ %}
 3151   interface(REG_INTER);
 3152 %}
 3153 
 3154 // Integer 64 bit Register Operands
 3155 operand iRegL()
 3156 %{
 3157   constraint(ALLOC_IN_RC(any_reg));
 3158   match(RegL);
 3159   match(iRegLNoSp);
 3160   op_cost(0);
 3161   format %{ %}
 3162   interface(REG_INTER);
 3163 %}
 3164 
 3165 // Integer 64 bit Register not Special
 3166 operand iRegLNoSp()
 3167 %{
 3168   constraint(ALLOC_IN_RC(no_special_reg));
 3169   match(RegL);
 3170   match(iRegL_R10);
 3171   format %{ %}
 3172   interface(REG_INTER);
 3173 %}
 3174 
 3175 // Long 64 bit Register R29 only
 3176 operand iRegL_R29()
 3177 %{
 3178   constraint(ALLOC_IN_RC(r29_reg));
 3179   match(RegL);
 3180   match(iRegLNoSp);
 3181   op_cost(0);
 3182   format %{ %}
 3183   interface(REG_INTER);
 3184 %}
 3185 
 3186 // Long 64 bit Register R30 only
 3187 operand iRegL_R30()
 3188 %{
 3189   constraint(ALLOC_IN_RC(r30_reg));
 3190   match(RegL);
 3191   match(iRegLNoSp);
 3192   op_cost(0);
 3193   format %{ %}
 3194   interface(REG_INTER);
 3195 %}
 3196 
 3197 // Pointer Register Operands
 3198 // Pointer Register
 3199 operand iRegP()
 3200 %{
 3201   constraint(ALLOC_IN_RC(ptr_reg));
 3202   match(RegP);
 3203   match(iRegPNoSp);
 3204   match(iRegP_R10);
 3205   match(iRegP_R15);
 3206   match(javaThread_RegP);
 3207   op_cost(0);
 3208   format %{ %}
 3209   interface(REG_INTER);
 3210 %}
 3211 
 3212 // Pointer 64 bit Register not Special
 3213 operand iRegPNoSp()
 3214 %{
 3215   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 3216   match(RegP);
 3217   op_cost(0);
 3218   format %{ %}
 3219   interface(REG_INTER);
 3220 %}
 3221 
 3222 // This operand is not allowed to use fp even if
 3223 // fp is not used to hold the frame pointer.
 3224 operand iRegPNoSpNoFp()
 3225 %{
 3226   constraint(ALLOC_IN_RC(no_special_no_fp_ptr_reg));
 3227   match(RegP);
 3228   match(iRegPNoSp);
 3229   op_cost(0);
 3230   format %{ %}
 3231   interface(REG_INTER);
 3232 %}
 3233 
 3234 operand iRegP_R10()
 3235 %{
 3236   constraint(ALLOC_IN_RC(r10_reg));
 3237   match(RegP);
 3238   // match(iRegP);
 3239   match(iRegPNoSp);
 3240   op_cost(0);
 3241   format %{ %}
 3242   interface(REG_INTER);
 3243 %}
 3244 
 3245 // Pointer 64 bit Register R11 only
 3246 operand iRegP_R11()
 3247 %{
 3248   constraint(ALLOC_IN_RC(r11_reg));
 3249   match(RegP);
 3250   match(iRegPNoSp);
 3251   op_cost(0);
 3252   format %{ %}
 3253   interface(REG_INTER);
 3254 %}
 3255 
 3256 operand iRegP_R12()
 3257 %{
 3258   constraint(ALLOC_IN_RC(r12_reg));
 3259   match(RegP);
 3260   // match(iRegP);
 3261   match(iRegPNoSp);
 3262   op_cost(0);
 3263   format %{ %}
 3264   interface(REG_INTER);
 3265 %}
 3266 
 3267 // Pointer 64 bit Register R13 only
 3268 operand iRegP_R13()
 3269 %{
 3270   constraint(ALLOC_IN_RC(r13_reg));
 3271   match(RegP);
 3272   match(iRegPNoSp);
 3273   op_cost(0);
 3274   format %{ %}
 3275   interface(REG_INTER);
 3276 %}
 3277 
 3278 operand iRegP_R14()
 3279 %{
 3280   constraint(ALLOC_IN_RC(r14_reg));
 3281   match(RegP);
 3282   // match(iRegP);
 3283   match(iRegPNoSp);
 3284   op_cost(0);
 3285   format %{ %}
 3286   interface(REG_INTER);
 3287 %}
 3288 
 3289 operand iRegP_R15()
 3290 %{
 3291   constraint(ALLOC_IN_RC(r15_reg));
 3292   match(RegP);
 3293   // match(iRegP);
 3294   match(iRegPNoSp);
 3295   op_cost(0);
 3296   format %{ %}
 3297   interface(REG_INTER);
 3298 %}
 3299 
 3300 operand iRegP_R16()
 3301 %{
 3302   constraint(ALLOC_IN_RC(r16_reg));
 3303   match(RegP);
 3304   match(iRegPNoSp);
 3305   op_cost(0);
 3306   format %{ %}
 3307   interface(REG_INTER);
 3308 %}
 3309 
 3310 // Pointer 64 bit Register R28 only
 3311 operand iRegP_R28()
 3312 %{
 3313   constraint(ALLOC_IN_RC(r28_reg));
 3314   match(RegP);
 3315   match(iRegPNoSp);
 3316   op_cost(0);
 3317   format %{ %}
 3318   interface(REG_INTER);
 3319 %}
 3320 
 3321 // Pointer 64 bit Register R30 only
 3322 operand iRegP_R30()
 3323 %{
 3324   constraint(ALLOC_IN_RC(r30_reg));
 3325   match(RegP);
 3326   match(iRegPNoSp);
 3327   op_cost(0);
 3328   format %{ %}
 3329   interface(REG_INTER);
 3330 %}
 3331 
 3332 // Pointer 64 bit Register R31 only
 3333 operand iRegP_R31()
 3334 %{
 3335   constraint(ALLOC_IN_RC(r31_reg));
 3336   match(RegP);
 3337   match(iRegPNoSp);
 3338   op_cost(0);
 3339   format %{ %}
 3340   interface(REG_INTER);
 3341 %}
 3342 
 3343 // Pointer Register Operands
 3344 // Narrow Pointer Register
 3345 operand iRegN()
 3346 %{
 3347   constraint(ALLOC_IN_RC(any_reg32));
 3348   match(RegN);
 3349   match(iRegNNoSp);
 3350   op_cost(0);
 3351   format %{ %}
 3352   interface(REG_INTER);
 3353 %}
 3354 
 3355 // Integer 64 bit Register not Special
 3356 operand iRegNNoSp()
 3357 %{
 3358   constraint(ALLOC_IN_RC(no_special_reg32));
 3359   match(RegN);
 3360   op_cost(0);
 3361   format %{ %}
 3362   interface(REG_INTER);
 3363 %}
 3364 
 3365 // Long 64 bit Register R10 only
 3366 operand iRegL_R10()
 3367 %{
 3368   constraint(ALLOC_IN_RC(r10_reg));
 3369   match(RegL);
 3370   match(iRegLNoSp);
 3371   op_cost(0);
 3372   format %{ %}
 3373   interface(REG_INTER);
 3374 %}
 3375 
 3376 // Float Register
 3377 // Float register operands
 3378 operand fRegF()
 3379 %{
 3380   constraint(ALLOC_IN_RC(float_reg));
 3381   match(RegF);
 3382 
 3383   op_cost(0);
 3384   format %{ %}
 3385   interface(REG_INTER);
 3386 %}
 3387 
 3388 // Double Register
 3389 // Double register operands
 3390 operand fRegD()
 3391 %{
 3392   constraint(ALLOC_IN_RC(double_reg));
 3393   match(RegD);
 3394 
 3395   op_cost(0);
 3396   format %{ %}
 3397   interface(REG_INTER);
 3398 %}
 3399 
 3400 // Generic vector class. This will be used for
 3401 // all vector operands.
 3402 operand vReg()
 3403 %{
 3404   constraint(ALLOC_IN_RC(vectora_reg));
 3405   match(VecA);
 3406   op_cost(0);
 3407   format %{ %}
 3408   interface(REG_INTER);
 3409 %}
 3410 
 3411 operand vReg_V1()
 3412 %{
 3413   constraint(ALLOC_IN_RC(v1_reg));
 3414   match(VecA);
 3415   match(vReg);
 3416   op_cost(0);
 3417   format %{ %}
 3418   interface(REG_INTER);
 3419 %}
 3420 
 3421 operand vReg_V2()
 3422 %{
 3423   constraint(ALLOC_IN_RC(v2_reg));
 3424   match(VecA);
 3425   match(vReg);
 3426   op_cost(0);
 3427   format %{ %}
 3428   interface(REG_INTER);
 3429 %}
 3430 
 3431 operand vReg_V3()
 3432 %{
 3433   constraint(ALLOC_IN_RC(v3_reg));
 3434   match(VecA);
 3435   match(vReg);
 3436   op_cost(0);
 3437   format %{ %}
 3438   interface(REG_INTER);
 3439 %}
 3440 
 3441 operand vReg_V4()
 3442 %{
 3443   constraint(ALLOC_IN_RC(v4_reg));
 3444   match(VecA);
 3445   match(vReg);
 3446   op_cost(0);
 3447   format %{ %}
 3448   interface(REG_INTER);
 3449 %}
 3450 
 3451 operand vReg_V5()
 3452 %{
 3453   constraint(ALLOC_IN_RC(v5_reg));
 3454   match(VecA);
 3455   match(vReg);
 3456   op_cost(0);
 3457   format %{ %}
 3458   interface(REG_INTER);
 3459 %}
 3460 
 3461 operand vReg_V6()
 3462 %{
 3463   constraint(ALLOC_IN_RC(v6_reg));
 3464   match(VecA);
 3465   match(vReg);
 3466   op_cost(0);
 3467   format %{ %}
 3468   interface(REG_INTER);
 3469 %}
 3470 
 3471 operand vReg_V7()
 3472 %{
 3473   constraint(ALLOC_IN_RC(v7_reg));
 3474   match(VecA);
 3475   match(vReg);
 3476   op_cost(0);
 3477   format %{ %}
 3478   interface(REG_INTER);
 3479 %}
 3480 
 3481 operand vReg_V8()
 3482 %{
 3483   constraint(ALLOC_IN_RC(v8_reg));
 3484   match(VecA);
 3485   match(vReg);
 3486   op_cost(0);
 3487   format %{ %}
 3488   interface(REG_INTER);
 3489 %}
 3490 
 3491 operand vReg_V9()
 3492 %{
 3493   constraint(ALLOC_IN_RC(v9_reg));
 3494   match(VecA);
 3495   match(vReg);
 3496   op_cost(0);
 3497   format %{ %}
 3498   interface(REG_INTER);
 3499 %}
 3500 
 3501 operand vReg_V10()
 3502 %{
 3503   constraint(ALLOC_IN_RC(v10_reg));
 3504   match(VecA);
 3505   match(vReg);
 3506   op_cost(0);
 3507   format %{ %}
 3508   interface(REG_INTER);
 3509 %}
 3510 
 3511 operand vReg_V11()
 3512 %{
 3513   constraint(ALLOC_IN_RC(v11_reg));
 3514   match(VecA);
 3515   match(vReg);
 3516   op_cost(0);
 3517   format %{ %}
 3518   interface(REG_INTER);
 3519 %}
 3520 
 3521 operand vRegMask()
 3522 %{
 3523   constraint(ALLOC_IN_RC(vmask_reg));
 3524   match(RegVectMask);
 3525   match(vRegMask_V0);
 3526   op_cost(0);
 3527   format %{ %}
 3528   interface(REG_INTER);
 3529 %}
 3530 
 3531 // The mask value used to control execution of a masked
 3532 // vector instruction is always supplied by vector register v0.
 3533 operand vRegMask_V0()
 3534 %{
 3535   constraint(ALLOC_IN_RC(vmask_reg_v0));
 3536   match(RegVectMask);
 3537   match(vRegMask);
 3538   op_cost(0);
 3539   format %{ %}
 3540   interface(REG_INTER);
 3541 %}
 3542 
 3543 // Java Thread Register
 3544 operand javaThread_RegP(iRegP reg)
 3545 %{
 3546   constraint(ALLOC_IN_RC(java_thread_reg)); // java_thread_reg
 3547   match(reg);
 3548   op_cost(0);
 3549   format %{ %}
 3550   interface(REG_INTER);
 3551 %}
 3552 
 3553 //----------Memory Operands----------------------------------------------------
 3554 // RISCV has only base_plus_offset and literal address mode, so no need to use
 3555 // index and scale. Here set index as 0xffffffff and scale as 0x0.
 3556 operand indirect(iRegP reg)
 3557 %{
 3558   constraint(ALLOC_IN_RC(ptr_reg));
 3559   match(reg);
 3560   op_cost(0);
 3561   format %{ "[$reg]" %}
 3562   interface(MEMORY_INTER) %{
 3563     base($reg);
 3564     index(0xffffffff);
 3565     scale(0x0);
 3566     disp(0x0);
 3567   %}
 3568 %}
 3569 
 3570 operand indOffI(iRegP reg, immIOffset off)
 3571 %{
 3572   constraint(ALLOC_IN_RC(ptr_reg));
 3573   match(AddP reg off);
 3574   op_cost(0);
 3575   format %{ "[$reg, $off]" %}
 3576   interface(MEMORY_INTER) %{
 3577     base($reg);
 3578     index(0xffffffff);
 3579     scale(0x0);
 3580     disp($off);
 3581   %}
 3582 %}
 3583 
 3584 operand indOffL(iRegP reg, immLOffset off)
 3585 %{
 3586   constraint(ALLOC_IN_RC(ptr_reg));
 3587   match(AddP reg off);
 3588   op_cost(0);
 3589   format %{ "[$reg, $off]" %}
 3590   interface(MEMORY_INTER) %{
 3591     base($reg);
 3592     index(0xffffffff);
 3593     scale(0x0);
 3594     disp($off);
 3595   %}
 3596 %}
 3597 
 3598 operand indirectN(iRegN reg)
 3599 %{
 3600   predicate(CompressedOops::shift() == 0);
 3601   constraint(ALLOC_IN_RC(ptr_reg));
 3602   match(DecodeN reg);
 3603   op_cost(0);
 3604   format %{ "[$reg]\t# narrow" %}
 3605   interface(MEMORY_INTER) %{
 3606     base($reg);
 3607     index(0xffffffff);
 3608     scale(0x0);
 3609     disp(0x0);
 3610   %}
 3611 %}
 3612 
 3613 operand indOffIN(iRegN reg, immIOffset off)
 3614 %{
 3615   predicate(CompressedOops::shift() == 0);
 3616   constraint(ALLOC_IN_RC(ptr_reg));
 3617   match(AddP (DecodeN reg) off);
 3618   op_cost(0);
 3619   format %{ "[$reg, $off]\t# narrow" %}
 3620   interface(MEMORY_INTER) %{
 3621     base($reg);
 3622     index(0xffffffff);
 3623     scale(0x0);
 3624     disp($off);
 3625   %}
 3626 %}
 3627 
 3628 operand indOffLN(iRegN reg, immLOffset off)
 3629 %{
 3630   predicate(CompressedOops::shift() == 0);
 3631   constraint(ALLOC_IN_RC(ptr_reg));
 3632   match(AddP (DecodeN reg) off);
 3633   op_cost(0);
 3634   format %{ "[$reg, $off]\t# narrow" %}
 3635   interface(MEMORY_INTER) %{
 3636     base($reg);
 3637     index(0xffffffff);
 3638     scale(0x0);
 3639     disp($off);
 3640   %}
 3641 %}
 3642 
 3643 //----------Special Memory Operands--------------------------------------------
 3644 // Stack Slot Operand - This operand is used for loading and storing temporary
 3645 //                      values on the stack where a match requires a value to
 3646 //                      flow through memory.
 3647 operand stackSlotI(sRegI reg)
 3648 %{
 3649   constraint(ALLOC_IN_RC(stack_slots));
 3650   // No match rule because this operand is only generated in matching
 3651   // match(RegI);
 3652   format %{ "[$reg]" %}
 3653   interface(MEMORY_INTER) %{
 3654     base(0x02);  // RSP
 3655     index(0xffffffff);  // No Index
 3656     scale(0x0);  // No Scale
 3657     disp($reg);  // Stack Offset
 3658   %}
 3659 %}
 3660 
 3661 operand stackSlotF(sRegF reg)
 3662 %{
 3663   constraint(ALLOC_IN_RC(stack_slots));
 3664   // No match rule because this operand is only generated in matching
 3665   // match(RegF);
 3666   format %{ "[$reg]" %}
 3667   interface(MEMORY_INTER) %{
 3668     base(0x02);  // RSP
 3669     index(0xffffffff);  // No Index
 3670     scale(0x0);  // No Scale
 3671     disp($reg);  // Stack Offset
 3672   %}
 3673 %}
 3674 
 3675 operand stackSlotD(sRegD reg)
 3676 %{
 3677   constraint(ALLOC_IN_RC(stack_slots));
 3678   // No match rule because this operand is only generated in matching
 3679   // match(RegD);
 3680   format %{ "[$reg]" %}
 3681   interface(MEMORY_INTER) %{
 3682     base(0x02);  // RSP
 3683     index(0xffffffff);  // No Index
 3684     scale(0x0);  // No Scale
 3685     disp($reg);  // Stack Offset
 3686   %}
 3687 %}
 3688 
 3689 operand stackSlotL(sRegL reg)
 3690 %{
 3691   constraint(ALLOC_IN_RC(stack_slots));
 3692   // No match rule because this operand is only generated in matching
 3693   // match(RegL);
 3694   format %{ "[$reg]" %}
 3695   interface(MEMORY_INTER) %{
 3696     base(0x02);  // RSP
 3697     index(0xffffffff);  // No Index
 3698     scale(0x0);  // No Scale
 3699     disp($reg);  // Stack Offset
 3700   %}
 3701 %}
 3702 
 3703 // Special operand allowing long args to int ops to be truncated for free
 3704 
 3705 operand iRegL2I(iRegL reg) %{
 3706 
 3707   op_cost(0);
 3708 
 3709   match(ConvL2I reg);
 3710 
 3711   format %{ "l2i($reg)" %}
 3712 
 3713   interface(REG_INTER)
 3714 %}
 3715 
 3716 
 3717 // Comparison Operands
 3718 // NOTE: Label is a predefined operand which should not be redefined in
 3719 //       the AD file. It is generically handled within the ADLC.
 3720 
 3721 //----------Conditional Branch Operands----------------------------------------
 3722 // Comparison Op  - This is the operation of the comparison, and is limited to
 3723 //                  the following set of codes:
 3724 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 3725 //
 3726 // Other attributes of the comparison, such as unsignedness, are specified
 3727 // by the comparison instruction that sets a condition code flags register.
 3728 // That result is represented by a flags operand whose subtype is appropriate
 3729 // to the unsignedness (etc.) of the comparison.
 3730 //
 3731 // Later, the instruction which matches both the Comparison Op (a Bool) and
 3732 // the flags (produced by the Cmp) specifies the coding of the comparison op
 3733 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 3734 
 3735 
 3736 // used for signed integral comparisons and fp comparisons
 3737 operand cmpOp()
 3738 %{
 3739   match(Bool);
 3740 
 3741   format %{ "" %}
 3742 
 3743   // the values in interface derives from struct BoolTest::mask
 3744   interface(COND_INTER) %{
 3745     equal(0x0, "eq");
 3746     greater(0x1, "gt");
 3747     overflow(0x2, "overflow");
 3748     less(0x3, "lt");
 3749     not_equal(0x4, "ne");
 3750     less_equal(0x5, "le");
 3751     no_overflow(0x6, "no_overflow");
 3752     greater_equal(0x7, "ge");
 3753   %}
 3754 %}
 3755 
 3756 // used for unsigned integral comparisons
 3757 operand cmpOpU()
 3758 %{
 3759   match(Bool);
 3760 
 3761   format %{ "" %}
 3762   // the values in interface derives from struct BoolTest::mask
 3763   interface(COND_INTER) %{
 3764     equal(0x0, "eq");
 3765     greater(0x1, "gtu");
 3766     overflow(0x2, "overflow");
 3767     less(0x3, "ltu");
 3768     not_equal(0x4, "ne");
 3769     less_equal(0x5, "leu");
 3770     no_overflow(0x6, "no_overflow");
 3771     greater_equal(0x7, "geu");
 3772   %}
 3773 %}
 3774 
 3775 // used for certain integral comparisons which can be
 3776 // converted to bxx instructions
 3777 operand cmpOpEqNe()
 3778 %{
 3779   match(Bool);
 3780   op_cost(0);
 3781   predicate(n->as_Bool()->_test._test == BoolTest::ne ||
 3782             n->as_Bool()->_test._test == BoolTest::eq);
 3783 
 3784   format %{ "" %}
 3785   interface(COND_INTER) %{
 3786     equal(0x0, "eq");
 3787     greater(0x1, "gt");
 3788     overflow(0x2, "overflow");
 3789     less(0x3, "lt");
 3790     not_equal(0x4, "ne");
 3791     less_equal(0x5, "le");
 3792     no_overflow(0x6, "no_overflow");
 3793     greater_equal(0x7, "ge");
 3794   %}
 3795 %}
 3796 
 3797 operand cmpOpULtGe()
 3798 %{
 3799   match(Bool);
 3800   op_cost(0);
 3801   predicate(n->as_Bool()->_test._test == BoolTest::lt ||
 3802             n->as_Bool()->_test._test == BoolTest::ge);
 3803 
 3804   format %{ "" %}
 3805   interface(COND_INTER) %{
 3806     equal(0x0, "eq");
 3807     greater(0x1, "gtu");
 3808     overflow(0x2, "overflow");
 3809     less(0x3, "ltu");
 3810     not_equal(0x4, "ne");
 3811     less_equal(0x5, "leu");
 3812     no_overflow(0x6, "no_overflow");
 3813     greater_equal(0x7, "geu");
 3814   %}
 3815 %}
 3816 
 3817 operand cmpOpUEqNeLeGt()
 3818 %{
 3819   match(Bool);
 3820   op_cost(0);
 3821   predicate(n->as_Bool()->_test._test == BoolTest::ne ||
 3822             n->as_Bool()->_test._test == BoolTest::eq ||
 3823             n->as_Bool()->_test._test == BoolTest::le ||
 3824             n->as_Bool()->_test._test == BoolTest::gt);
 3825 
 3826   format %{ "" %}
 3827   interface(COND_INTER) %{
 3828     equal(0x0, "eq");
 3829     greater(0x1, "gtu");
 3830     overflow(0x2, "overflow");
 3831     less(0x3, "ltu");
 3832     not_equal(0x4, "ne");
 3833     less_equal(0x5, "leu");
 3834     no_overflow(0x6, "no_overflow");
 3835     greater_equal(0x7, "geu");
 3836   %}
 3837 %}
 3838 
 3839 
 3840 // Flags register, used as output of compare logic
 3841 operand rFlagsReg()
 3842 %{
 3843   constraint(ALLOC_IN_RC(reg_flags));
 3844   match(RegFlags);
 3845 
 3846   op_cost(0);
 3847   format %{ "RFLAGS" %}
 3848   interface(REG_INTER);
 3849 %}
 3850 
 3851 // Special Registers
 3852 
 3853 // Method Register
 3854 operand inline_cache_RegP(iRegP reg)
 3855 %{
 3856   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 3857   match(reg);
 3858   match(iRegPNoSp);
 3859   op_cost(0);
 3860   format %{ %}
 3861   interface(REG_INTER);
 3862 %}
 3863 
 3864 //----------OPERAND CLASSES----------------------------------------------------
 3865 // Operand Classes are groups of operands that are used as to simplify
 3866 // instruction definitions by not requiring the AD writer to specify
 3867 // separate instructions for every form of operand when the
 3868 // instruction accepts multiple operand types with the same basic
 3869 // encoding and format. The classic case of this is memory operands.
 3870 
 3871 // memory is used to define read/write location for load/store
 3872 // instruction defs. we can turn a memory op into an Address
 3873 
 3874 opclass memory(indirect, indOffI, indOffL, indirectN, indOffIN, indOffLN);
 3875 
 3876 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 3877 // operations. it allows the src to be either an iRegI or a (ConvL2I
 3878 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 3879 // can be elided because the 32-bit instruction will just employ the
 3880 // lower 32 bits anyway.
 3881 //
 3882 // n.b. this does not elide all L2I conversions. if the truncated
 3883 // value is consumed by more than one operation then the ConvL2I
 3884 // cannot be bundled into the consuming nodes so an l2i gets planted
 3885 // (actually an addiw $dst, $src, 0) and the downstream instructions
 3886 // consume the result of the L2I as an iRegI input. That's a shame since
 3887 // the addiw is actually redundant but its not too costly.
 3888 
 3889 opclass iRegIorL2I(iRegI, iRegL2I);
 3890 opclass iRegIorL(iRegI, iRegL);
 3891 opclass iRegNorP(iRegN, iRegP);
 3892 opclass iRegILNP(iRegI, iRegL, iRegN, iRegP);
 3893 opclass iRegILNPNoSp(iRegINoSp, iRegLNoSp, iRegNNoSp, iRegPNoSp);
 3894 opclass immIorL(immI, immL);
 3895 
 3896 //----------PIPELINE-----------------------------------------------------------
 3897 // Rules which define the behavior of the target architectures pipeline.
 3898 
 3899 // For specific pipelines, e.g. generic RISC-V, define the stages of that pipeline
 3900 //pipe_desc(ID, EX, MEM, WR);
 3901 #define ID   S0
 3902 #define EX   S1
 3903 #define MEM  S2
 3904 #define WR   S3
 3905 
 3906 // Integer ALU reg operation
 3907 pipeline %{
 3908 
 3909 attributes %{
 3910   // RISC-V instructions are of fixed length
 3911   fixed_size_instructions;           // Fixed size instructions TODO does
 3912   max_instructions_per_bundle = 2;   // Generic RISC-V 1, Sifive Series 7 2
 3913   // RISC-V instructions come in 32-bit word units
 3914   instruction_unit_size = 4;         // An instruction is 4 bytes long
 3915   instruction_fetch_unit_size = 64;  // The processor fetches one line
 3916   instruction_fetch_units = 1;       // of 64 bytes
 3917 
 3918   // List of nop instructions
 3919   nops( MachNop );
 3920 %}
 3921 
 3922 // We don't use an actual pipeline model so don't care about resources
 3923 // or description. we do use pipeline classes to introduce fixed
 3924 // latencies
 3925 
 3926 //----------RESOURCES----------------------------------------------------------
 3927 // Resources are the functional units available to the machine
 3928 
 3929 // Generic RISC-V pipeline
 3930 // 1 decoder
 3931 // 1 instruction decoded per cycle
 3932 // 1 load/store ops per cycle, 1 branch, 1 FPU
 3933 // 1 mul, 1 div
 3934 
 3935 resources ( DECODE,
 3936             ALU,
 3937             MUL,
 3938             DIV,
 3939             BRANCH,
 3940             LDST,
 3941             FPU);
 3942 
 3943 //----------PIPELINE DESCRIPTION-----------------------------------------------
 3944 // Pipeline Description specifies the stages in the machine's pipeline
 3945 
 3946 // Define the pipeline as a generic 6 stage pipeline
 3947 pipe_desc(S0, S1, S2, S3, S4, S5);
 3948 
 3949 //----------PIPELINE CLASSES---------------------------------------------------
 3950 // Pipeline Classes describe the stages in which input and output are
 3951 // referenced by the hardware pipeline.
 3952 
 3953 pipe_class fp_dop_reg_reg_s(fRegF dst, fRegF src1, fRegF src2)
 3954 %{
 3955   single_instruction;
 3956   src1   : S1(read);
 3957   src2   : S2(read);
 3958   dst    : S5(write);
 3959   DECODE : ID;
 3960   FPU    : S5;
 3961 %}
 3962 
 3963 pipe_class fp_dop_reg_reg_d(fRegD dst, fRegD src1, fRegD src2)
 3964 %{
 3965   src1   : S1(read);
 3966   src2   : S2(read);
 3967   dst    : S5(write);
 3968   DECODE : ID;
 3969   FPU    : S5;
 3970 %}
 3971 
 3972 pipe_class fp_uop_s(fRegF dst, fRegF src)
 3973 %{
 3974   single_instruction;
 3975   src    : S1(read);
 3976   dst    : S5(write);
 3977   DECODE : ID;
 3978   FPU    : S5;
 3979 %}
 3980 
 3981 pipe_class fp_uop_d(fRegD dst, fRegD src)
 3982 %{
 3983   single_instruction;
 3984   src    : S1(read);
 3985   dst    : S5(write);
 3986   DECODE : ID;
 3987   FPU    : S5;
 3988 %}
 3989 
 3990 pipe_class fp_d2f(fRegF dst, fRegD src)
 3991 %{
 3992   single_instruction;
 3993   src    : S1(read);
 3994   dst    : S5(write);
 3995   DECODE : ID;
 3996   FPU    : S5;
 3997 %}
 3998 
 3999 pipe_class fp_f2d(fRegD dst, fRegF src)
 4000 %{
 4001   single_instruction;
 4002   src    : S1(read);
 4003   dst    : S5(write);
 4004   DECODE : ID;
 4005   FPU    : S5;
 4006 %}
 4007 
 4008 pipe_class fp_f2i(iRegINoSp dst, fRegF src)
 4009 %{
 4010   single_instruction;
 4011   src    : S1(read);
 4012   dst    : S5(write);
 4013   DECODE : ID;
 4014   FPU    : S5;
 4015 %}
 4016 
 4017 pipe_class fp_f2l(iRegLNoSp dst, fRegF src)
 4018 %{
 4019   single_instruction;
 4020   src    : S1(read);
 4021   dst    : S5(write);
 4022   DECODE : ID;
 4023   FPU    : S5;
 4024 %}
 4025 
 4026 pipe_class fp_i2f(fRegF dst, iRegIorL2I src)
 4027 %{
 4028   single_instruction;
 4029   src    : S1(read);
 4030   dst    : S5(write);
 4031   DECODE : ID;
 4032   FPU    : S5;
 4033 %}
 4034 
 4035 pipe_class fp_l2f(fRegF dst, iRegL src)
 4036 %{
 4037   single_instruction;
 4038   src    : S1(read);
 4039   dst    : S5(write);
 4040   DECODE : ID;
 4041   FPU    : S5;
 4042 %}
 4043 
 4044 pipe_class fp_d2i(iRegINoSp dst, fRegD src)
 4045 %{
 4046   single_instruction;
 4047   src    : S1(read);
 4048   dst    : S5(write);
 4049   DECODE : ID;
 4050   FPU    : S5;
 4051 %}
 4052 
 4053 pipe_class fp_d2l(iRegLNoSp dst, fRegD src)
 4054 %{
 4055   single_instruction;
 4056   src    : S1(read);
 4057   dst    : S5(write);
 4058   DECODE : ID;
 4059   FPU    : S5;
 4060 %}
 4061 
 4062 pipe_class fp_i2d(fRegD dst, iRegIorL2I src)
 4063 %{
 4064   single_instruction;
 4065   src    : S1(read);
 4066   dst    : S5(write);
 4067   DECODE : ID;
 4068   FPU    : S5;
 4069 %}
 4070 
 4071 pipe_class fp_l2d(fRegD dst, iRegIorL2I src)
 4072 %{
 4073   single_instruction;
 4074   src    : S1(read);
 4075   dst    : S5(write);
 4076   DECODE : ID;
 4077   FPU    : S5;
 4078 %}
 4079 
 4080 pipe_class fp_div_s(fRegF dst, fRegF src1, fRegF src2)
 4081 %{
 4082   single_instruction;
 4083   src1   : S1(read);
 4084   src2   : S2(read);
 4085   dst    : S5(write);
 4086   DECODE : ID;
 4087   FPU    : S5;
 4088 %}
 4089 
 4090 pipe_class fp_div_d(fRegD dst, fRegD src1, fRegD src2)
 4091 %{
 4092   single_instruction;
 4093   src1   : S1(read);
 4094   src2   : S2(read);
 4095   dst    : S5(write);
 4096   DECODE : ID;
 4097   FPU    : S5;
 4098 %}
 4099 
 4100 pipe_class fp_sqrt_s(fRegF dst, fRegF src1, fRegF src2)
 4101 %{
 4102   single_instruction;
 4103   src1   : S1(read);
 4104   src2   : S2(read);
 4105   dst    : S5(write);
 4106   DECODE : ID;
 4107   FPU    : S5;
 4108 %}
 4109 
 4110 pipe_class fp_sqrt_d(fRegD dst, fRegD src1, fRegD src2)
 4111 %{
 4112   single_instruction;
 4113   src1   : S1(read);
 4114   src2   : S2(read);
 4115   dst    : S5(write);
 4116   DECODE : ID;
 4117   FPU    : S5;
 4118 %}
 4119 
 4120 pipe_class fp_load_constant_s(fRegF dst)
 4121 %{
 4122   single_instruction;
 4123   dst    : S5(write);
 4124   DECODE : ID;
 4125   FPU    : S5;
 4126 %}
 4127 
 4128 pipe_class fp_load_constant_d(fRegD dst)
 4129 %{
 4130   single_instruction;
 4131   dst    : S5(write);
 4132   DECODE : ID;
 4133   FPU    : S5;
 4134 %}
 4135 
 4136 pipe_class fp_load_mem_s(fRegF dst, memory mem)
 4137 %{
 4138   single_instruction;
 4139   mem    : S1(read);
 4140   dst    : S5(write);
 4141   DECODE : ID;
 4142   LDST   : MEM;
 4143 %}
 4144 
 4145 pipe_class fp_load_mem_d(fRegD dst, memory mem)
 4146 %{
 4147   single_instruction;
 4148   mem    : S1(read);
 4149   dst    : S5(write);
 4150   DECODE : ID;
 4151   LDST   : MEM;
 4152 %}
 4153 
 4154 pipe_class fp_store_reg_s(fRegF src, memory mem)
 4155 %{
 4156   single_instruction;
 4157   src    : S1(read);
 4158   mem    : S5(write);
 4159   DECODE : ID;
 4160   LDST   : MEM;
 4161 %}
 4162 
 4163 pipe_class fp_store_reg_d(fRegD src, memory mem)
 4164 %{
 4165   single_instruction;
 4166   src    : S1(read);
 4167   mem    : S5(write);
 4168   DECODE : ID;
 4169   LDST   : MEM;
 4170 %}
 4171 
 4172 //------- Integer ALU operations --------------------------
 4173 
 4174 // Integer ALU reg-reg operation
 4175 // Operands needs in ID, result generated in EX
 4176 // E.g.  ADD   Rd, Rs1, Rs2
 4177 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4178 %{
 4179   single_instruction;
 4180   dst    : EX(write);
 4181   src1   : ID(read);
 4182   src2   : ID(read);
 4183   DECODE : ID;
 4184   ALU    : EX;
 4185 %}
 4186 
 4187 // Integer ALU reg operation with constant shift
 4188 // E.g. SLLI    Rd, Rs1, #shift
 4189 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 4190 %{
 4191   single_instruction;
 4192   dst    : EX(write);
 4193   src1   : ID(read);
 4194   DECODE : ID;
 4195   ALU    : EX;
 4196 %}
 4197 
 4198 // Integer ALU reg-reg operation with variable shift
 4199 // both operands must be available in ID
 4200 // E.g. SLL   Rd, Rs1, Rs2
 4201 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 4202 %{
 4203   single_instruction;
 4204   dst    : EX(write);
 4205   src1   : ID(read);
 4206   src2   : ID(read);
 4207   DECODE : ID;
 4208   ALU    : EX;
 4209 %}
 4210 
 4211 // Integer ALU reg operation
 4212 // E.g. NEG   Rd, Rs2
 4213 pipe_class ialu_reg(iRegI dst, iRegI src)
 4214 %{
 4215   single_instruction;
 4216   dst    : EX(write);
 4217   src    : ID(read);
 4218   DECODE : ID;
 4219   ALU    : EX;
 4220 %}
 4221 
 4222 // Integer ALU reg immediate operation
 4223 // E.g. ADDI   Rd, Rs1, #imm
 4224 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 4225 %{
 4226   single_instruction;
 4227   dst    : EX(write);
 4228   src1   : ID(read);
 4229   DECODE : ID;
 4230   ALU    : EX;
 4231 %}
 4232 
 4233 // Integer ALU immediate operation (no source operands)
 4234 // E.g. LI    Rd, #imm
 4235 pipe_class ialu_imm(iRegI dst)
 4236 %{
 4237   single_instruction;
 4238   dst    : EX(write);
 4239   DECODE : ID;
 4240   ALU    : EX;
 4241 %}
 4242 
 4243 //------- Multiply pipeline operations --------------------
 4244 
 4245 // Multiply reg-reg
 4246 // E.g. MULW   Rd, Rs1, Rs2
 4247 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4248 %{
 4249   single_instruction;
 4250   dst    : WR(write);
 4251   src1   : ID(read);
 4252   src2   : ID(read);
 4253   DECODE : ID;
 4254   MUL    : WR;
 4255 %}
 4256 
 4257 // E.g. MUL   RD, Rs1, Rs2
 4258 pipe_class lmul_reg_reg(iRegL dst, iRegL src1, iRegL src2)
 4259 %{
 4260   single_instruction;
 4261   fixed_latency(3); // Maximum latency for 64 bit mul
 4262   dst    : WR(write);
 4263   src1   : ID(read);
 4264   src2   : ID(read);
 4265   DECODE : ID;
 4266   MUL    : WR;
 4267 %}
 4268 
 4269 //------- Divide pipeline operations --------------------
 4270 
 4271 // E.g. DIVW   Rd, Rs1, Rs2
 4272 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4273 %{
 4274   single_instruction;
 4275   fixed_latency(8); // Maximum latency for 32 bit divide
 4276   dst    : WR(write);
 4277   src1   : ID(read);
 4278   src2   : ID(read);
 4279   DECODE : ID;
 4280   DIV    : WR;
 4281 %}
 4282 
 4283 // E.g. DIV   RD, Rs1, Rs2
 4284 pipe_class ldiv_reg_reg(iRegL dst, iRegL src1, iRegL src2)
 4285 %{
 4286   single_instruction;
 4287   fixed_latency(16); // Maximum latency for 64 bit divide
 4288   dst    : WR(write);
 4289   src1   : ID(read);
 4290   src2   : ID(read);
 4291   DECODE : ID;
 4292   DIV    : WR;
 4293 %}
 4294 
 4295 //------- Load pipeline operations ------------------------
 4296 
 4297 // Load - prefetch
 4298 // Eg.  PREFETCH_W  mem
 4299 pipe_class iload_prefetch(memory mem)
 4300 %{
 4301   single_instruction;
 4302   mem    : ID(read);
 4303   DECODE : ID;
 4304   LDST   : MEM;
 4305 %}
 4306 
 4307 // Load - reg, mem
 4308 // E.g. LA    Rd, mem
 4309 pipe_class iload_reg_mem(iRegI dst, memory mem)
 4310 %{
 4311   single_instruction;
 4312   dst    : WR(write);
 4313   mem    : ID(read);
 4314   DECODE : ID;
 4315   LDST   : MEM;
 4316 %}
 4317 
 4318 // Load - reg, reg
 4319 // E.g. LD    Rd, Rs
 4320 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 4321 %{
 4322   single_instruction;
 4323   dst    : WR(write);
 4324   src    : ID(read);
 4325   DECODE : ID;
 4326   LDST   : MEM;
 4327 %}
 4328 
 4329 //------- Store pipeline operations -----------------------
 4330 
 4331 // Store - zr, mem
 4332 // E.g. SD    zr, mem
 4333 pipe_class istore_mem(memory mem)
 4334 %{
 4335   single_instruction;
 4336   mem    : ID(read);
 4337   DECODE : ID;
 4338   LDST   : MEM;
 4339 %}
 4340 
 4341 // Store - reg, mem
 4342 // E.g. SD    Rs, mem
 4343 pipe_class istore_reg_mem(iRegI src, memory mem)
 4344 %{
 4345   single_instruction;
 4346   mem    : ID(read);
 4347   src    : EX(read);
 4348   DECODE : ID;
 4349   LDST   : MEM;
 4350 %}
 4351 
 4352 // Store - reg, reg
 4353 // E.g. SD    Rs2, Rs1
 4354 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 4355 %{
 4356   single_instruction;
 4357   dst    : ID(read);
 4358   src    : EX(read);
 4359   DECODE : ID;
 4360   LDST   : MEM;
 4361 %}
 4362 
 4363 //------- Control transfer pipeline operations ------------
 4364 
 4365 // Branch
 4366 pipe_class pipe_branch()
 4367 %{
 4368   single_instruction;
 4369   DECODE : ID;
 4370   BRANCH : EX;
 4371 %}
 4372 
 4373 // Branch
 4374 pipe_class pipe_branch_reg(iRegI src)
 4375 %{
 4376   single_instruction;
 4377   src    : ID(read);
 4378   DECODE : ID;
 4379   BRANCH : EX;
 4380 %}
 4381 
 4382 // Compare & Branch
 4383 // E.g. BEQ   Rs1, Rs2, L
 4384 pipe_class pipe_cmp_branch(iRegI src1, iRegI src2)
 4385 %{
 4386   single_instruction;
 4387   src1   : ID(read);
 4388   src2   : ID(read);
 4389   DECODE : ID;
 4390   BRANCH : EX;
 4391 %}
 4392 
 4393 // E.g. BEQZ Rs, L
 4394 pipe_class pipe_cmpz_branch(iRegI src)
 4395 %{
 4396   single_instruction;
 4397   src    : ID(read);
 4398   DECODE : ID;
 4399   BRANCH : EX;
 4400 %}
 4401 
 4402 //------- Synchronisation operations ----------------------
 4403 // Any operation requiring serialization
 4404 // E.g. FENCE/Atomic Ops/Load Acquire/Store Release
 4405 pipe_class pipe_serial()
 4406 %{
 4407   single_instruction;
 4408   force_serialization;
 4409   fixed_latency(16);
 4410   DECODE : ID;
 4411   LDST   : MEM;
 4412 %}
 4413 
 4414 pipe_class pipe_slow()
 4415 %{
 4416   instruction_count(10);
 4417   multiple_bundles;
 4418   force_serialization;
 4419   fixed_latency(16);
 4420   DECODE : ID;
 4421   LDST   : MEM;
 4422 %}
 4423 
 4424 // Empty pipeline class
 4425 pipe_class pipe_class_empty()
 4426 %{
 4427   single_instruction;
 4428   fixed_latency(0);
 4429 %}
 4430 
 4431 // Default pipeline class.
 4432 pipe_class pipe_class_default()
 4433 %{
 4434   single_instruction;
 4435   fixed_latency(2);
 4436 %}
 4437 
 4438 // Pipeline class for compares.
 4439 pipe_class pipe_class_compare()
 4440 %{
 4441   single_instruction;
 4442   fixed_latency(16);
 4443 %}
 4444 
 4445 // Pipeline class for memory operations.
 4446 pipe_class pipe_class_memory()
 4447 %{
 4448   single_instruction;
 4449   fixed_latency(16);
 4450 %}
 4451 
 4452 // Pipeline class for call.
 4453 pipe_class pipe_class_call()
 4454 %{
 4455   single_instruction;
 4456   fixed_latency(100);
 4457 %}
 4458 
 4459 // Define the class for the Nop node.
 4460 define %{
 4461    MachNop = pipe_class_empty;
 4462 %}
 4463 %}
 4464 //----------INSTRUCTIONS-------------------------------------------------------
 4465 //
 4466 // match      -- States which machine-independent subtree may be replaced
 4467 //               by this instruction.
 4468 // ins_cost   -- The estimated cost of this instruction is used by instruction
 4469 //               selection to identify a minimum cost tree of machine
 4470 //               instructions that matches a tree of machine-independent
 4471 //               instructions.
 4472 // format     -- A string providing the disassembly for this instruction.
 4473 //               The value of an instruction's operand may be inserted
 4474 //               by referring to it with a '$' prefix.
 4475 // opcode     -- Three instruction opcodes may be provided.  These are referred
 4476 //               to within an encode class as $primary, $secondary, and $tertiary
 4477 //               rrspectively.  The primary opcode is commonly used to
 4478 //               indicate the type of machine instruction, while secondary
 4479 //               and tertiary are often used for prefix options or addressing
 4480 //               modes.
 4481 // ins_encode -- A list of encode classes with parameters. The encode class
 4482 //               name must have been defined in an 'enc_class' specification
 4483 //               in the encode section of the architecture description.
 4484 
 4485 // ============================================================================
 4486 // Memory (Load/Store) Instructions
 4487 
 4488 // Load Instructions
 4489 
 4490 // Load Byte (8 bit signed)
 4491 instruct loadB(iRegINoSp dst, memory mem)
 4492 %{
 4493   match(Set dst (LoadB mem));
 4494 
 4495   ins_cost(LOAD_COST);
 4496   format %{ "lb  $dst, $mem\t# byte, #@loadB" %}
 4497 
 4498   ins_encode %{
 4499     __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4500   %}
 4501 
 4502   ins_pipe(iload_reg_mem);
 4503 %}
 4504 
 4505 // Load Byte (8 bit signed) into long
 4506 instruct loadB2L(iRegLNoSp dst, memory mem)
 4507 %{
 4508   match(Set dst (ConvI2L (LoadB mem)));
 4509 
 4510   ins_cost(LOAD_COST);
 4511   format %{ "lb  $dst, $mem\t# byte, #@loadB2L" %}
 4512 
 4513   ins_encode %{
 4514     __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4515   %}
 4516 
 4517   ins_pipe(iload_reg_mem);
 4518 %}
 4519 
 4520 // Load Byte (8 bit unsigned)
 4521 instruct loadUB(iRegINoSp dst, memory mem)
 4522 %{
 4523   match(Set dst (LoadUB mem));
 4524 
 4525   ins_cost(LOAD_COST);
 4526   format %{ "lbu  $dst, $mem\t# byte, #@loadUB" %}
 4527 
 4528   ins_encode %{
 4529     __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4530   %}
 4531 
 4532   ins_pipe(iload_reg_mem);
 4533 %}
 4534 
 4535 // Load Byte (8 bit unsigned) into long
 4536 instruct loadUB2L(iRegLNoSp dst, memory mem)
 4537 %{
 4538   match(Set dst (ConvI2L (LoadUB mem)));
 4539 
 4540   ins_cost(LOAD_COST);
 4541   format %{ "lbu  $dst, $mem\t# byte, #@loadUB2L" %}
 4542 
 4543   ins_encode %{
 4544     __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4545   %}
 4546 
 4547   ins_pipe(iload_reg_mem);
 4548 %}
 4549 
 4550 // Load Short (16 bit signed)
 4551 instruct loadS(iRegINoSp dst, memory mem)
 4552 %{
 4553   match(Set dst (LoadS mem));
 4554 
 4555   ins_cost(LOAD_COST);
 4556   format %{ "lh  $dst, $mem\t# short, #@loadS" %}
 4557 
 4558   ins_encode %{
 4559     __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4560   %}
 4561 
 4562   ins_pipe(iload_reg_mem);
 4563 %}
 4564 
 4565 // Load Short (16 bit signed) into long
 4566 instruct loadS2L(iRegLNoSp dst, memory mem)
 4567 %{
 4568   match(Set dst (ConvI2L (LoadS mem)));
 4569 
 4570   ins_cost(LOAD_COST);
 4571   format %{ "lh  $dst, $mem\t# short, #@loadS2L" %}
 4572 
 4573   ins_encode %{
 4574     __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4575   %}
 4576 
 4577   ins_pipe(iload_reg_mem);
 4578 %}
 4579 
 4580 // Load Char (16 bit unsigned)
 4581 instruct loadUS(iRegINoSp dst, memory mem)
 4582 %{
 4583   match(Set dst (LoadUS mem));
 4584 
 4585   ins_cost(LOAD_COST);
 4586   format %{ "lhu  $dst, $mem\t# short, #@loadUS" %}
 4587 
 4588   ins_encode %{
 4589     __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4590   %}
 4591 
 4592   ins_pipe(iload_reg_mem);
 4593 %}
 4594 
 4595 // Load Short/Char (16 bit unsigned) into long
 4596 instruct loadUS2L(iRegLNoSp dst, memory mem)
 4597 %{
 4598   match(Set dst (ConvI2L (LoadUS mem)));
 4599 
 4600   ins_cost(LOAD_COST);
 4601   format %{ "lhu  $dst, $mem\t# short, #@loadUS2L" %}
 4602 
 4603   ins_encode %{
 4604     __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4605   %}
 4606 
 4607   ins_pipe(iload_reg_mem);
 4608 %}
 4609 
 4610 // Load Integer (32 bit signed)
 4611 instruct loadI(iRegINoSp dst, memory mem)
 4612 %{
 4613   match(Set dst (LoadI mem));
 4614 
 4615   ins_cost(LOAD_COST);
 4616   format %{ "lw  $dst, $mem\t# int, #@loadI" %}
 4617 
 4618   ins_encode %{
 4619     __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4620   %}
 4621 
 4622   ins_pipe(iload_reg_mem);
 4623 %}
 4624 
 4625 // Load Integer (32 bit signed) into long
 4626 instruct loadI2L(iRegLNoSp dst, memory mem)
 4627 %{
 4628   match(Set dst (ConvI2L (LoadI mem)));
 4629 
 4630   ins_cost(LOAD_COST);
 4631   format %{ "lw  $dst, $mem\t# int, #@loadI2L" %}
 4632 
 4633   ins_encode %{
 4634     __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4635   %}
 4636 
 4637   ins_pipe(iload_reg_mem);
 4638 %}
 4639 
 4640 // Load Integer (32 bit unsigned) into long
 4641 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
 4642 %{
 4643   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 4644 
 4645   ins_cost(LOAD_COST);
 4646   format %{ "lwu  $dst, $mem\t# int, #@loadUI2L" %}
 4647 
 4648   ins_encode %{
 4649     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4650   %}
 4651 
 4652   ins_pipe(iload_reg_mem);
 4653 %}
 4654 
 4655 // Load Long (64 bit signed)
 4656 instruct loadL(iRegLNoSp dst, memory mem)
 4657 %{
 4658   match(Set dst (LoadL mem));
 4659 
 4660   ins_cost(LOAD_COST);
 4661   format %{ "ld  $dst, $mem\t# int, #@loadL" %}
 4662 
 4663   ins_encode %{
 4664     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4665   %}
 4666 
 4667   ins_pipe(iload_reg_mem);
 4668 %}
 4669 
 4670 // Load Range
 4671 instruct loadRange(iRegINoSp dst, memory mem)
 4672 %{
 4673   match(Set dst (LoadRange mem));
 4674 
 4675   ins_cost(LOAD_COST);
 4676   format %{ "lwu  $dst, $mem\t# range, #@loadRange" %}
 4677 
 4678   ins_encode %{
 4679     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4680   %}
 4681 
 4682   ins_pipe(iload_reg_mem);
 4683 %}
 4684 
 4685 // Load Pointer
 4686 instruct loadP(iRegPNoSp dst, memory mem)
 4687 %{
 4688   match(Set dst (LoadP mem));
 4689   predicate(n->as_Load()->barrier_data() == 0);
 4690 
 4691   ins_cost(LOAD_COST);
 4692   format %{ "ld  $dst, $mem\t# ptr, #@loadP" %}
 4693 
 4694   ins_encode %{
 4695     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4696   %}
 4697 
 4698   ins_pipe(iload_reg_mem);
 4699 %}
 4700 
 4701 // Load Compressed Pointer
 4702 instruct loadN(iRegNNoSp dst, memory mem)
 4703 %{
 4704   predicate(n->as_Load()->barrier_data() == 0);
 4705   match(Set dst (LoadN mem));
 4706 
 4707   ins_cost(LOAD_COST);
 4708   format %{ "lwu  $dst, $mem\t# compressed ptr, #@loadN" %}
 4709 
 4710   ins_encode %{
 4711     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4712   %}
 4713 
 4714   ins_pipe(iload_reg_mem);
 4715 %}
 4716 
 4717 // Load Klass Pointer
 4718 instruct loadKlass(iRegPNoSp dst, memory mem)
 4719 %{
 4720   match(Set dst (LoadKlass mem));
 4721 
 4722   ins_cost(LOAD_COST);
 4723   format %{ "ld  $dst, $mem\t# class, #@loadKlass" %}
 4724 
 4725   ins_encode %{
 4726     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4727   %}
 4728 
 4729   ins_pipe(iload_reg_mem);
 4730 %}
 4731 
 4732 // Load Narrow Klass Pointer
 4733 instruct loadNKlass(iRegNNoSp dst, memory mem)
 4734 %{
 4735   predicate(!UseCompactObjectHeaders);
 4736   match(Set dst (LoadNKlass mem));
 4737 
 4738   ins_cost(LOAD_COST);
 4739   format %{ "lwu  $dst, $mem\t# compressed class ptr, #@loadNKlass" %}
 4740 
 4741   ins_encode %{
 4742     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4743   %}
 4744 
 4745   ins_pipe(iload_reg_mem);
 4746 %}
 4747 
 4748 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory mem)
 4749 %{
 4750   predicate(UseCompactObjectHeaders);
 4751   match(Set dst (LoadNKlass mem));
 4752 
 4753   ins_cost(LOAD_COST);
 4754   format %{
 4755     "lwu  $dst, $mem\t# compressed klass ptr, shifted\n\t"
 4756     "srli $dst, $dst, markWord::klass_shift_at_offset"
 4757   %}
 4758 
 4759   ins_encode %{
 4760     Unimplemented();
 4761     // __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4762     // __ srli(as_Register($dst$$reg), as_Register($dst$$reg), (unsigned) markWord::klass_shift_at_offset);
 4763   %}
 4764 
 4765   ins_pipe(iload_reg_mem);
 4766 %}
 4767 
 4768 // Load Float
 4769 instruct loadF(fRegF dst, memory mem)
 4770 %{
 4771   match(Set dst (LoadF mem));
 4772 
 4773   ins_cost(LOAD_COST);
 4774   format %{ "flw  $dst, $mem\t# float, #@loadF" %}
 4775 
 4776   ins_encode %{
 4777     __ flw(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4778   %}
 4779 
 4780   ins_pipe(fp_load_mem_s);
 4781 %}
 4782 
 4783 // Load Double
 4784 instruct loadD(fRegD dst, memory mem)
 4785 %{
 4786   match(Set dst (LoadD mem));
 4787 
 4788   ins_cost(LOAD_COST);
 4789   format %{ "fld  $dst, $mem\t# double, #@loadD" %}
 4790 
 4791   ins_encode %{
 4792     __ fld(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4793   %}
 4794 
 4795   ins_pipe(fp_load_mem_d);
 4796 %}
 4797 
 4798 // Load Int Constant
 4799 instruct loadConI(iRegINoSp dst, immI src)
 4800 %{
 4801   match(Set dst src);
 4802 
 4803   ins_cost(ALU_COST);
 4804   format %{ "mv $dst, $src\t# int, #@loadConI" %}
 4805 
 4806   ins_encode(riscv_enc_mov_imm(dst, src));
 4807 
 4808   ins_pipe(ialu_imm);
 4809 %}
 4810 
 4811 // Load Long Constant
 4812 instruct loadConL(iRegLNoSp dst, immL src)
 4813 %{
 4814   match(Set dst src);
 4815 
 4816   ins_cost(ALU_COST);
 4817   format %{ "mv $dst, $src\t# long, #@loadConL" %}
 4818 
 4819   ins_encode(riscv_enc_mov_imm(dst, src));
 4820 
 4821   ins_pipe(ialu_imm);
 4822 %}
 4823 
 4824 // Load Pointer Constant
 4825 instruct loadConP(iRegPNoSp dst, immP con)
 4826 %{
 4827   match(Set dst con);
 4828 
 4829   ins_cost(ALU_COST);
 4830   format %{ "mv  $dst, $con\t# ptr, #@loadConP" %}
 4831 
 4832   ins_encode(riscv_enc_mov_p(dst, con));
 4833 
 4834   ins_pipe(ialu_imm);
 4835 %}
 4836 
 4837 // Load Null Pointer Constant
 4838 instruct loadConP0(iRegPNoSp dst, immP0 con)
 4839 %{
 4840   match(Set dst con);
 4841 
 4842   ins_cost(ALU_COST);
 4843   format %{ "mv  $dst, $con\t# null pointer, #@loadConP0" %}
 4844 
 4845   ins_encode(riscv_enc_mov_zero(dst));
 4846 
 4847   ins_pipe(ialu_imm);
 4848 %}
 4849 
 4850 // Load Pointer Constant One
 4851 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 4852 %{
 4853   match(Set dst con);
 4854 
 4855   ins_cost(ALU_COST);
 4856   format %{ "mv  $dst, $con\t# load ptr constant one, #@loadConP1" %}
 4857 
 4858   ins_encode(riscv_enc_mov_p1(dst));
 4859 
 4860   ins_pipe(ialu_imm);
 4861 %}
 4862 
 4863 // Load Byte Map Base Constant
 4864 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 4865 %{
 4866   match(Set dst con);
 4867   ins_cost(ALU_COST);
 4868   format %{ "mv  $dst, $con\t# Byte Map Base, #@loadByteMapBase" %}
 4869 
 4870   ins_encode(riscv_enc_mov_byte_map_base(dst));
 4871 
 4872   ins_pipe(ialu_imm);
 4873 %}
 4874 
 4875 // Load Narrow Pointer Constant
 4876 instruct loadConN(iRegNNoSp dst, immN con)
 4877 %{
 4878   match(Set dst con);
 4879 
 4880   ins_cost(ALU_COST * 4);
 4881   format %{ "mv  $dst, $con\t# compressed ptr, #@loadConN" %}
 4882 
 4883   ins_encode(riscv_enc_mov_n(dst, con));
 4884 
 4885   ins_pipe(ialu_imm);
 4886 %}
 4887 
 4888 // Load Narrow Null Pointer Constant
 4889 instruct loadConN0(iRegNNoSp dst, immN0 con)
 4890 %{
 4891   match(Set dst con);
 4892 
 4893   ins_cost(ALU_COST);
 4894   format %{ "mv  $dst, $con\t# compressed null pointer, #@loadConN0" %}
 4895 
 4896   ins_encode(riscv_enc_mov_zero(dst));
 4897 
 4898   ins_pipe(ialu_imm);
 4899 %}
 4900 
 4901 // Load Narrow Klass Constant
 4902 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 4903 %{
 4904   match(Set dst con);
 4905 
 4906   ins_cost(ALU_COST * 6);
 4907   format %{ "mv  $dst, $con\t# compressed klass ptr, #@loadConNKlass" %}
 4908 
 4909   ins_encode(riscv_enc_mov_nk(dst, con));
 4910 
 4911   ins_pipe(ialu_imm);
 4912 %}
 4913 
 4914 // Load Float Constant
 4915 instruct loadConF(fRegF dst, immF con) %{
 4916   match(Set dst con);
 4917 
 4918   ins_cost(LOAD_COST);
 4919   format %{
 4920     "flw $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConF"
 4921   %}
 4922 
 4923   ins_encode %{
 4924     __ flw(as_FloatRegister($dst$$reg), $constantaddress($con));
 4925   %}
 4926 
 4927   ins_pipe(fp_load_constant_s);
 4928 %}
 4929 
 4930 instruct loadConF0(fRegF dst, immF0 con) %{
 4931   match(Set dst con);
 4932 
 4933   ins_cost(XFER_COST);
 4934 
 4935   format %{ "fmv.w.x $dst, zr\t# float, #@loadConF0" %}
 4936 
 4937   ins_encode %{
 4938     __ fmv_w_x(as_FloatRegister($dst$$reg), zr);
 4939   %}
 4940 
 4941   ins_pipe(fp_load_constant_s);
 4942 %}
 4943 
 4944 // Load Double Constant
 4945 instruct loadConD(fRegD dst, immD con) %{
 4946   match(Set dst con);
 4947 
 4948   ins_cost(LOAD_COST);
 4949   format %{
 4950     "fld $dst, [$constantaddress]\t# load from constant table: double=$con, #@loadConD"
 4951   %}
 4952 
 4953   ins_encode %{
 4954     __ fld(as_FloatRegister($dst$$reg), $constantaddress($con));
 4955   %}
 4956 
 4957   ins_pipe(fp_load_constant_d);
 4958 %}
 4959 
 4960 instruct loadConD0(fRegD dst, immD0 con) %{
 4961   match(Set dst con);
 4962 
 4963   ins_cost(XFER_COST);
 4964 
 4965   format %{ "fmv.d.x $dst, zr\t# double, #@loadConD0" %}
 4966 
 4967   ins_encode %{
 4968     __ fmv_d_x(as_FloatRegister($dst$$reg), zr);
 4969   %}
 4970 
 4971   ins_pipe(fp_load_constant_d);
 4972 %}
 4973 
 4974 // Store Byte
 4975 instruct storeB(iRegIorL2I src, memory mem)
 4976 %{
 4977   match(Set mem (StoreB mem src));
 4978 
 4979   ins_cost(STORE_COST);
 4980   format %{ "sb  $src, $mem\t# byte, #@storeB" %}
 4981 
 4982   ins_encode %{
 4983     __ sb(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4984   %}
 4985 
 4986   ins_pipe(istore_reg_mem);
 4987 %}
 4988 
 4989 instruct storeimmB0(immI0 zero, memory mem)
 4990 %{
 4991   match(Set mem (StoreB mem zero));
 4992 
 4993   ins_cost(STORE_COST);
 4994   format %{ "sb zr, $mem\t# byte, #@storeimmB0" %}
 4995 
 4996   ins_encode %{
 4997     __ sb(zr, Address(as_Register($mem$$base), $mem$$disp));
 4998   %}
 4999 
 5000   ins_pipe(istore_mem);
 5001 %}
 5002 
 5003 // Store Char/Short
 5004 instruct storeC(iRegIorL2I src, memory mem)
 5005 %{
 5006   match(Set mem (StoreC mem src));
 5007 
 5008   ins_cost(STORE_COST);
 5009   format %{ "sh  $src, $mem\t# short, #@storeC" %}
 5010 
 5011   ins_encode %{
 5012     __ sh(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5013   %}
 5014 
 5015   ins_pipe(istore_reg_mem);
 5016 %}
 5017 
 5018 instruct storeimmC0(immI0 zero, memory mem)
 5019 %{
 5020   match(Set mem (StoreC mem zero));
 5021 
 5022   ins_cost(STORE_COST);
 5023   format %{ "sh  zr, $mem\t# short, #@storeimmC0" %}
 5024 
 5025   ins_encode %{
 5026     __ sh(zr, Address(as_Register($mem$$base), $mem$$disp));
 5027   %}
 5028 
 5029   ins_pipe(istore_mem);
 5030 %}
 5031 
 5032 // Store Integer
 5033 instruct storeI(iRegIorL2I src, memory mem)
 5034 %{
 5035   match(Set mem(StoreI mem src));
 5036 
 5037   ins_cost(STORE_COST);
 5038   format %{ "sw  $src, $mem\t# int, #@storeI" %}
 5039 
 5040   ins_encode %{
 5041     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5042   %}
 5043 
 5044   ins_pipe(istore_reg_mem);
 5045 %}
 5046 
 5047 instruct storeimmI0(immI0 zero, memory mem)
 5048 %{
 5049   match(Set mem(StoreI mem zero));
 5050 
 5051   ins_cost(STORE_COST);
 5052   format %{ "sw  zr, $mem\t# int, #@storeimmI0" %}
 5053 
 5054   ins_encode %{
 5055     __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
 5056   %}
 5057 
 5058   ins_pipe(istore_mem);
 5059 %}
 5060 
 5061 // Store Long (64 bit signed)
 5062 instruct storeL(iRegL src, memory mem)
 5063 %{
 5064   match(Set mem (StoreL mem src));
 5065 
 5066   ins_cost(STORE_COST);
 5067   format %{ "sd  $src, $mem\t# long, #@storeL" %}
 5068 
 5069   ins_encode %{
 5070     __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5071   %}
 5072 
 5073   ins_pipe(istore_reg_mem);
 5074 %}
 5075 
 5076 // Store Long (64 bit signed)
 5077 instruct storeimmL0(immL0 zero, memory mem)
 5078 %{
 5079   match(Set mem (StoreL mem zero));
 5080 
 5081   ins_cost(STORE_COST);
 5082   format %{ "sd  zr, $mem\t# long, #@storeimmL0" %}
 5083 
 5084   ins_encode %{
 5085     __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
 5086   %}
 5087 
 5088   ins_pipe(istore_mem);
 5089 %}
 5090 
 5091 // Store Pointer
 5092 instruct storeP(iRegP src, memory mem)
 5093 %{
 5094   match(Set mem (StoreP mem src));
 5095   predicate(n->as_Store()->barrier_data() == 0);
 5096 
 5097   ins_cost(STORE_COST);
 5098   format %{ "sd  $src, $mem\t# ptr, #@storeP" %}
 5099 
 5100   ins_encode %{
 5101     __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5102   %}
 5103 
 5104   ins_pipe(istore_reg_mem);
 5105 %}
 5106 
 5107 // Store Pointer
 5108 instruct storeimmP0(immP0 zero, memory mem)
 5109 %{
 5110   match(Set mem (StoreP mem zero));
 5111   predicate(n->as_Store()->barrier_data() == 0);
 5112 
 5113   ins_cost(STORE_COST);
 5114   format %{ "sd zr, $mem\t# ptr, #@storeimmP0" %}
 5115 
 5116   ins_encode %{
 5117     __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
 5118   %}
 5119 
 5120   ins_pipe(istore_mem);
 5121 %}
 5122 
 5123 // Store Compressed Pointer
 5124 instruct storeN(iRegN src, memory mem)
 5125 %{
 5126   predicate(n->as_Store()->barrier_data() == 0);
 5127   match(Set mem (StoreN mem src));
 5128 
 5129   ins_cost(STORE_COST);
 5130   format %{ "sw  $src, $mem\t# compressed ptr, #@storeN" %}
 5131 
 5132   ins_encode %{
 5133     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5134   %}
 5135 
 5136   ins_pipe(istore_reg_mem);
 5137 %}
 5138 
 5139 instruct storeImmN0(immN0 zero, memory mem)
 5140 %{
 5141   predicate(n->as_Store()->barrier_data() == 0);
 5142   match(Set mem (StoreN mem zero));
 5143 
 5144   ins_cost(STORE_COST);
 5145   format %{ "sw  zr, $mem\t# compressed ptr, #@storeImmN0" %}
 5146 
 5147   ins_encode %{
 5148     __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
 5149   %}
 5150 
 5151   ins_pipe(istore_reg_mem);
 5152 %}
 5153 
 5154 // Store Float
 5155 instruct storeF(fRegF src, memory mem)
 5156 %{
 5157   match(Set mem (StoreF mem src));
 5158 
 5159   ins_cost(STORE_COST);
 5160   format %{ "fsw  $src, $mem\t# float, #@storeF" %}
 5161 
 5162   ins_encode %{
 5163     __ fsw(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5164   %}
 5165 
 5166   ins_pipe(fp_store_reg_s);
 5167 %}
 5168 
 5169 // Store Double
 5170 instruct storeD(fRegD src, memory mem)
 5171 %{
 5172   match(Set mem (StoreD mem src));
 5173 
 5174   ins_cost(STORE_COST);
 5175   format %{ "fsd  $src, $mem\t# double, #@storeD" %}
 5176 
 5177   ins_encode %{
 5178     __ fsd(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5179   %}
 5180 
 5181   ins_pipe(fp_store_reg_d);
 5182 %}
 5183 
 5184 // Store Compressed Klass Pointer
 5185 instruct storeNKlass(iRegN src, memory mem)
 5186 %{
 5187   match(Set mem (StoreNKlass mem src));
 5188 
 5189   ins_cost(STORE_COST);
 5190   format %{ "sw  $src, $mem\t# compressed klass ptr, #@storeNKlass" %}
 5191 
 5192   ins_encode %{
 5193     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5194   %}
 5195 
 5196   ins_pipe(istore_reg_mem);
 5197 %}
 5198 
 5199 // ============================================================================
 5200 // Prefetch instructions
 5201 // Must be safe to execute with invalid address (cannot fault).
 5202 
 5203 instruct prefetchalloc( memory mem ) %{
 5204   predicate(UseZicbop);
 5205   match(PrefetchAllocation mem);
 5206 
 5207   ins_cost(ALU_COST * 1);
 5208   format %{ "prefetch_w $mem\t# Prefetch for write" %}
 5209 
 5210   ins_encode %{
 5211     if (Assembler::is_simm12($mem$$disp)) {
 5212       if (($mem$$disp & 0x1f) == 0) {
 5213         __ prefetch_w(as_Register($mem$$base), $mem$$disp);
 5214       } else {
 5215         __ addi(t0, as_Register($mem$$base), $mem$$disp);
 5216         __ prefetch_w(t0, 0);
 5217       }
 5218     } else {
 5219       __ mv(t0, $mem$$disp);
 5220       __ add(t0, as_Register($mem$$base), t0);
 5221       __ prefetch_w(t0, 0);
 5222     }
 5223   %}
 5224 
 5225   ins_pipe(iload_prefetch);
 5226 %}
 5227 
 5228 // ============================================================================
 5229 // Atomic operation instructions
 5230 //
 5231 
 5232 // standard CompareAndSwapX when we are using barriers
 5233 // these have higher priority than the rules selected by a predicate
 5234 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5235                          iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5236 %{
 5237   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 5238 
 5239   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 10 + BRANCH_COST * 4);
 5240 
 5241   effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
 5242 
 5243   format %{
 5244     "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
 5245     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB"
 5246   %}
 5247 
 5248   ins_encode %{
 5249     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5250                             Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
 5251                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5252   %}
 5253 
 5254   ins_pipe(pipe_slow);
 5255 %}
 5256 
 5257 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5258                          iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5259 %{
 5260   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 5261 
 5262   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 11 + BRANCH_COST * 4);
 5263 
 5264   effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
 5265 
 5266   format %{
 5267     "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
 5268     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS"
 5269   %}
 5270 
 5271   ins_encode %{
 5272     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5273                             Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
 5274                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5275   %}
 5276 
 5277   ins_pipe(pipe_slow);
 5278 %}
 5279 
 5280 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5281 %{
 5282   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 5283 
 5284   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5285 
 5286   format %{
 5287     "cmpxchg $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
 5288     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapI"
 5289   %}
 5290 
 5291   ins_encode(riscv_enc_cmpxchgw(res, mem, oldval, newval));
 5292 
 5293   ins_pipe(pipe_slow);
 5294 %}
 5295 
 5296 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5297 %{
 5298   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 5299 
 5300   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5301 
 5302   format %{
 5303     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
 5304     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapL"
 5305   %}
 5306 
 5307   ins_encode(riscv_enc_cmpxchg(res, mem, oldval, newval));
 5308 
 5309   ins_pipe(pipe_slow);
 5310 %}
 5311 
 5312 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5313 %{
 5314   predicate(n->as_LoadStore()->barrier_data() == 0);
 5315 
 5316   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 5317 
 5318   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5319 
 5320   format %{
 5321     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
 5322     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapP"
 5323   %}
 5324 
 5325   ins_encode(riscv_enc_cmpxchg(res, mem, oldval, newval));
 5326 
 5327   ins_pipe(pipe_slow);
 5328 %}
 5329 
 5330 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5331 %{
 5332   predicate(n->as_LoadStore()->barrier_data() == 0);
 5333   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 5334 
 5335   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 8 + BRANCH_COST * 4);
 5336 
 5337   format %{
 5338     "cmpxchg $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
 5339     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapN"
 5340   %}
 5341 
 5342   ins_encode(riscv_enc_cmpxchgn(res, mem, oldval, newval));
 5343 
 5344   ins_pipe(pipe_slow);
 5345 %}
 5346 
 5347 // alternative CompareAndSwapX when we are eliding barriers
 5348 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5349                             iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5350 %{
 5351   predicate(needs_acquiring_load_reserved(n));
 5352 
 5353   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 5354 
 5355   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 10 + BRANCH_COST * 4);
 5356 
 5357   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5358 
 5359   format %{
 5360     "cmpxchg_acq $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
 5361     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq"
 5362   %}
 5363 
 5364   ins_encode %{
 5365     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5366                             Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
 5367                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5368   %}
 5369 
 5370   ins_pipe(pipe_slow);
 5371 %}
 5372 
 5373 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5374                             iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5375 %{
 5376   predicate(needs_acquiring_load_reserved(n));
 5377 
 5378   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 5379 
 5380   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 11 + BRANCH_COST * 4);
 5381 
 5382   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5383 
 5384   format %{
 5385     "cmpxchg_acq $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
 5386     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq"
 5387   %}
 5388 
 5389   ins_encode %{
 5390     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5391                             Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
 5392                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5393   %}
 5394 
 5395   ins_pipe(pipe_slow);
 5396 %}
 5397 
 5398 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5399 %{
 5400   predicate(needs_acquiring_load_reserved(n));
 5401 
 5402   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 5403 
 5404   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5405 
 5406   format %{
 5407     "cmpxchg_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
 5408     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapIAcq"
 5409   %}
 5410 
 5411   ins_encode(riscv_enc_cmpxchgw_acq(res, mem, oldval, newval));
 5412 
 5413   ins_pipe(pipe_slow);
 5414 %}
 5415 
 5416 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5417 %{
 5418   predicate(needs_acquiring_load_reserved(n));
 5419 
 5420   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 5421 
 5422   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5423 
 5424   format %{
 5425     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
 5426     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapLAcq"
 5427   %}
 5428 
 5429   ins_encode(riscv_enc_cmpxchg_acq(res, mem, oldval, newval));
 5430 
 5431   ins_pipe(pipe_slow);
 5432 %}
 5433 
 5434 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5435 %{
 5436   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 5437 
 5438   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 5439 
 5440   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5441 
 5442   format %{
 5443     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
 5444     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapPAcq"
 5445   %}
 5446 
 5447   ins_encode(riscv_enc_cmpxchg_acq(res, mem, oldval, newval));
 5448 
 5449   ins_pipe(pipe_slow);
 5450 %}
 5451 
 5452 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5453 %{
 5454   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 5455 
 5456   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 5457 
 5458   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 8 + BRANCH_COST * 4);
 5459 
 5460   format %{
 5461     "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
 5462     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapNAcq"
 5463   %}
 5464 
 5465   ins_encode(riscv_enc_cmpxchgn_acq(res, mem, oldval, newval));
 5466 
 5467   ins_pipe(pipe_slow);
 5468 %}
 5469 
 5470 // Sundry CAS operations.  Note that release is always true,
 5471 // regardless of the memory ordering of the CAS.  This is because we
 5472 // need the volatile case to be sequentially consistent but there is
 5473 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 5474 // can't check the type of memory ordering here, so we always emit a
 5475 // sc_d(w) with rl bit set.
 5476 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5477                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5478 %{
 5479   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 5480 
 5481   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 5);
 5482 
 5483   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5484 
 5485   format %{
 5486     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB"
 5487   %}
 5488 
 5489   ins_encode %{
 5490     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5491                             /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5492                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5493   %}
 5494 
 5495   ins_pipe(pipe_slow);
 5496 %}
 5497 
 5498 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5499                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5500 %{
 5501   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 5502 
 5503   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 6);
 5504 
 5505   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5506 
 5507   format %{
 5508     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS"
 5509   %}
 5510 
 5511   ins_encode %{
 5512     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5513                             /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5514                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5515   %}
 5516 
 5517   ins_pipe(pipe_slow);
 5518 %}
 5519 
 5520 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5521 %{
 5522   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 5523 
 5524   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5525 
 5526   effect(TEMP_DEF res);
 5527 
 5528   format %{
 5529     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeI"
 5530   %}
 5531 
 5532   ins_encode %{
 5533     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5534                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5535   %}
 5536 
 5537   ins_pipe(pipe_slow);
 5538 %}
 5539 
 5540 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
 5541 %{
 5542   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 5543 
 5544   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5545 
 5546   effect(TEMP_DEF res);
 5547 
 5548   format %{
 5549     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeL"
 5550   %}
 5551 
 5552   ins_encode %{
 5553     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5554                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5555   %}
 5556 
 5557   ins_pipe(pipe_slow);
 5558 %}
 5559 
 5560 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
 5561 %{
 5562   predicate(n->as_LoadStore()->barrier_data() == 0);
 5563   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 5564 
 5565   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 3);
 5566 
 5567   effect(TEMP_DEF res);
 5568 
 5569   format %{
 5570     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN"
 5571   %}
 5572 
 5573   ins_encode %{
 5574     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5575                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5576   %}
 5577 
 5578   ins_pipe(pipe_slow);
 5579 %}
 5580 
 5581 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
 5582 %{
 5583   predicate(n->as_LoadStore()->barrier_data() == 0);
 5584   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 5585 
 5586   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5587 
 5588   effect(TEMP_DEF res);
 5589 
 5590   format %{
 5591     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeP"
 5592   %}
 5593 
 5594   ins_encode %{
 5595     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5596                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5597   %}
 5598 
 5599   ins_pipe(pipe_slow);
 5600 %}
 5601 
 5602 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5603                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5604 %{
 5605   predicate(needs_acquiring_load_reserved(n));
 5606 
 5607   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 5608 
 5609   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 5);
 5610 
 5611   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5612 
 5613   format %{
 5614     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq"
 5615   %}
 5616 
 5617   ins_encode %{
 5618     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5619                             /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5620                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5621   %}
 5622 
 5623   ins_pipe(pipe_slow);
 5624 %}
 5625 
 5626 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5627                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5628 %{
 5629   predicate(needs_acquiring_load_reserved(n));
 5630 
 5631   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 5632 
 5633   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 6);
 5634 
 5635   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5636 
 5637   format %{
 5638     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq"
 5639   %}
 5640 
 5641   ins_encode %{
 5642     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5643                             /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5644                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5645   %}
 5646 
 5647   ins_pipe(pipe_slow);
 5648 %}
 5649 
 5650 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5651 %{
 5652   predicate(needs_acquiring_load_reserved(n));
 5653 
 5654   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 5655 
 5656   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5657 
 5658   effect(TEMP_DEF res);
 5659 
 5660   format %{
 5661     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeIAcq"
 5662   %}
 5663 
 5664   ins_encode %{
 5665     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5666                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5667   %}
 5668 
 5669   ins_pipe(pipe_slow);
 5670 %}
 5671 
 5672 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
 5673 %{
 5674   predicate(needs_acquiring_load_reserved(n));
 5675 
 5676   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 5677 
 5678   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5679 
 5680   effect(TEMP_DEF res);
 5681 
 5682   format %{
 5683     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeLAcq"
 5684   %}
 5685 
 5686   ins_encode %{
 5687     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5688                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5689   %}
 5690 
 5691   ins_pipe(pipe_slow);
 5692 %}
 5693 
 5694 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
 5695 %{
 5696   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 5697 
 5698   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 5699 
 5700   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5701 
 5702   effect(TEMP_DEF res);
 5703 
 5704   format %{
 5705     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq"
 5706   %}
 5707 
 5708   ins_encode %{
 5709     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5710                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5711   %}
 5712 
 5713   ins_pipe(pipe_slow);
 5714 %}
 5715 
 5716 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
 5717 %{
 5718   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 5719 
 5720   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 5721 
 5722   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5723 
 5724   effect(TEMP_DEF res);
 5725 
 5726   format %{
 5727     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq"
 5728   %}
 5729 
 5730   ins_encode %{
 5731     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5732                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5733   %}
 5734 
 5735   ins_pipe(pipe_slow);
 5736 %}
 5737 
 5738 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5739                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5740 %{
 5741   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 5742 
 5743   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 6);
 5744 
 5745   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5746 
 5747   format %{
 5748     "weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5749     "# $res == 1 when success, #@weakCompareAndSwapB"
 5750   %}
 5751 
 5752   ins_encode %{
 5753     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5754                                  /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5755                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5756   %}
 5757 
 5758   ins_pipe(pipe_slow);
 5759 %}
 5760 
 5761 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5762                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5763 %{
 5764   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 5765 
 5766   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 7);
 5767 
 5768   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5769 
 5770   format %{
 5771     "weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5772     "# $res == 1 when success, #@weakCompareAndSwapS"
 5773   %}
 5774 
 5775   ins_encode %{
 5776     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5777                                  /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5778                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5779   %}
 5780 
 5781   ins_pipe(pipe_slow);
 5782 %}
 5783 
 5784 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5785 %{
 5786   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 5787 
 5788   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5789 
 5790   format %{
 5791     "weak_cmpxchg $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5792     "# $res == 1 when success, #@weakCompareAndSwapI"
 5793   %}
 5794 
 5795   ins_encode %{
 5796     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5797                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5798   %}
 5799 
 5800   ins_pipe(pipe_slow);
 5801 %}
 5802 
 5803 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5804 %{
 5805   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 5806 
 5807   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5808 
 5809   format %{
 5810     "weak_cmpxchg $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5811     "# $res == 1 when success, #@weakCompareAndSwapL"
 5812   %}
 5813 
 5814   ins_encode %{
 5815     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5816                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5817   %}
 5818 
 5819   ins_pipe(pipe_slow);
 5820 %}
 5821 
 5822 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5823 %{
 5824   predicate(n->as_LoadStore()->barrier_data() == 0);
 5825   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 5826 
 5827   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 4);
 5828 
 5829   format %{
 5830     "weak_cmpxchg $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5831     "# $res == 1 when success, #@weakCompareAndSwapN"
 5832   %}
 5833 
 5834   ins_encode %{
 5835     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5836                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5837   %}
 5838 
 5839   ins_pipe(pipe_slow);
 5840 %}
 5841 
 5842 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5843 %{
 5844   predicate(n->as_LoadStore()->barrier_data() == 0);
 5845   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 5846 
 5847   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5848 
 5849   format %{
 5850     "weak_cmpxchg $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5851     "# $res == 1 when success, #@weakCompareAndSwapP"
 5852   %}
 5853 
 5854   ins_encode %{
 5855     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5856                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5857   %}
 5858 
 5859   ins_pipe(pipe_slow);
 5860 %}
 5861 
 5862 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5863                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5864 %{
 5865   predicate(needs_acquiring_load_reserved(n));
 5866 
 5867   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 5868 
 5869   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 6);
 5870 
 5871   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5872 
 5873   format %{
 5874     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5875     "# $res == 1 when success, #@weakCompareAndSwapBAcq"
 5876   %}
 5877 
 5878   ins_encode %{
 5879     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5880                                  /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5881                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5882   %}
 5883 
 5884   ins_pipe(pipe_slow);
 5885 %}
 5886 
 5887 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5888                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5889 %{
 5890   predicate(needs_acquiring_load_reserved(n));
 5891 
 5892   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 5893 
 5894   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 7);
 5895 
 5896   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5897 
 5898   format %{
 5899     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5900     "# $res == 1 when success, #@weakCompareAndSwapSAcq"
 5901   %}
 5902 
 5903   ins_encode %{
 5904     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5905                                  /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5906                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5907   %}
 5908 
 5909   ins_pipe(pipe_slow);
 5910 %}
 5911 
 5912 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5913 %{
 5914   predicate(needs_acquiring_load_reserved(n));
 5915 
 5916   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 5917 
 5918   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5919 
 5920   format %{
 5921     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5922     "# $res == 1 when success, #@weakCompareAndSwapIAcq"
 5923   %}
 5924 
 5925   ins_encode %{
 5926     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5927                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5928   %}
 5929 
 5930   ins_pipe(pipe_slow);
 5931 %}
 5932 
 5933 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5934 %{
 5935   predicate(needs_acquiring_load_reserved(n));
 5936 
 5937   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 5938 
 5939   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5940 
 5941   format %{
 5942     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5943     "# $res == 1 when success, #@weakCompareAndSwapLAcq"
 5944   %}
 5945 
 5946   ins_encode %{
 5947     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5948                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5949   %}
 5950 
 5951   ins_pipe(pipe_slow);
 5952 %}
 5953 
 5954 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5955 %{
 5956   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 5957 
 5958   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 5959 
 5960   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 4);
 5961 
 5962   format %{
 5963     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5964     "# $res == 1 when success, #@weakCompareAndSwapNAcq"
 5965   %}
 5966 
 5967   ins_encode %{
 5968     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5969                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5970   %}
 5971 
 5972   ins_pipe(pipe_slow);
 5973 %}
 5974 
 5975 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5976 %{
 5977   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 5978 
 5979   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 5980 
 5981   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5982 
 5983   format %{
 5984     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5985     "\t# $res == 1 when success, #@weakCompareAndSwapPAcq"
 5986   %}
 5987 
 5988   ins_encode %{
 5989     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5990                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5991   %}
 5992 
 5993   ins_pipe(pipe_slow);
 5994 %}
 5995 
 5996 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev)
 5997 %{
 5998   match(Set prev (GetAndSetI mem newv));
 5999 
 6000   ins_cost(ALU_COST);
 6001 
 6002   format %{ "atomic_xchgw  $prev, $newv, [$mem]\t#@get_and_setI" %}
 6003 
 6004   ins_encode %{
 6005     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6006   %}
 6007 
 6008   ins_pipe(pipe_serial);
 6009 %}
 6010 
 6011 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev)
 6012 %{
 6013   match(Set prev (GetAndSetL mem newv));
 6014 
 6015   ins_cost(ALU_COST);
 6016 
 6017   format %{ "atomic_xchg  $prev, $newv, [$mem]\t#@get_and_setL" %}
 6018 
 6019   ins_encode %{
 6020     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6021   %}
 6022 
 6023   ins_pipe(pipe_serial);
 6024 %}
 6025 
 6026 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev)
 6027 %{
 6028   predicate(n->as_LoadStore()->barrier_data() == 0);
 6029 
 6030   match(Set prev (GetAndSetN mem newv));
 6031 
 6032   ins_cost(ALU_COST);
 6033 
 6034   format %{ "atomic_xchgwu $prev, $newv, [$mem]\t#@get_and_setN" %}
 6035 
 6036   ins_encode %{
 6037     __ atomic_xchgwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6038   %}
 6039 
 6040   ins_pipe(pipe_serial);
 6041 %}
 6042 
 6043 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev)
 6044 %{
 6045   predicate(n->as_LoadStore()->barrier_data() == 0);
 6046   match(Set prev (GetAndSetP mem newv));
 6047 
 6048   ins_cost(ALU_COST);
 6049 
 6050   format %{ "atomic_xchg  $prev, $newv, [$mem]\t#@get_and_setP" %}
 6051 
 6052   ins_encode %{
 6053     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6054   %}
 6055 
 6056   ins_pipe(pipe_serial);
 6057 %}
 6058 
 6059 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev)
 6060 %{
 6061   predicate(needs_acquiring_load_reserved(n));
 6062 
 6063   match(Set prev (GetAndSetI mem newv));
 6064 
 6065   ins_cost(ALU_COST);
 6066 
 6067   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]\t#@get_and_setIAcq" %}
 6068 
 6069   ins_encode %{
 6070     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6071   %}
 6072 
 6073   ins_pipe(pipe_serial);
 6074 %}
 6075 
 6076 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev)
 6077 %{
 6078   predicate(needs_acquiring_load_reserved(n));
 6079 
 6080   match(Set prev (GetAndSetL mem newv));
 6081 
 6082   ins_cost(ALU_COST);
 6083 
 6084   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]\t#@get_and_setLAcq" %}
 6085 
 6086   ins_encode %{
 6087     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6088   %}
 6089 
 6090   ins_pipe(pipe_serial);
 6091 %}
 6092 
 6093 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev)
 6094 %{
 6095   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 6096 
 6097   match(Set prev (GetAndSetN mem newv));
 6098 
 6099   ins_cost(ALU_COST);
 6100 
 6101   format %{ "atomic_xchgwu_acq $prev, $newv, [$mem]\t#@get_and_setNAcq" %}
 6102 
 6103   ins_encode %{
 6104     __ atomic_xchgalwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6105   %}
 6106 
 6107   ins_pipe(pipe_serial);
 6108 %}
 6109 
 6110 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev)
 6111 %{
 6112   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 6113 
 6114   match(Set prev (GetAndSetP mem newv));
 6115 
 6116   ins_cost(ALU_COST);
 6117 
 6118   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]\t#@get_and_setPAcq" %}
 6119 
 6120   ins_encode %{
 6121     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6122   %}
 6123 
 6124   ins_pipe(pipe_serial);
 6125 %}
 6126 
 6127 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr)
 6128 %{
 6129   match(Set newval (GetAndAddL mem incr));
 6130 
 6131   ins_cost(ALU_COST);
 6132 
 6133   format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addL" %}
 6134 
 6135   ins_encode %{
 6136     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6137   %}
 6138 
 6139   ins_pipe(pipe_serial);
 6140 %}
 6141 
 6142 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr)
 6143 %{
 6144   predicate(n->as_LoadStore()->result_not_used());
 6145 
 6146   match(Set dummy (GetAndAddL mem incr));
 6147 
 6148   ins_cost(ALU_COST);
 6149 
 6150   format %{ "get_and_addL [$mem], $incr\t#@get_and_addL_no_res" %}
 6151 
 6152   ins_encode %{
 6153     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 6154   %}
 6155 
 6156   ins_pipe(pipe_serial);
 6157 %}
 6158 
 6159 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAdd incr)
 6160 %{
 6161   match(Set newval (GetAndAddL mem incr));
 6162 
 6163   ins_cost(ALU_COST);
 6164 
 6165   format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addLi" %}
 6166 
 6167   ins_encode %{
 6168     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6169   %}
 6170 
 6171   ins_pipe(pipe_serial);
 6172 %}
 6173 
 6174 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAdd incr)
 6175 %{
 6176   predicate(n->as_LoadStore()->result_not_used());
 6177 
 6178   match(Set dummy (GetAndAddL mem incr));
 6179 
 6180   ins_cost(ALU_COST);
 6181 
 6182   format %{ "get_and_addL [$mem], $incr\t#@get_and_addLi_no_res" %}
 6183 
 6184   ins_encode %{
 6185     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 6186   %}
 6187 
 6188   ins_pipe(pipe_serial);
 6189 %}
 6190 
 6191 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr)
 6192 %{
 6193   match(Set newval (GetAndAddI mem incr));
 6194 
 6195   ins_cost(ALU_COST);
 6196 
 6197   format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addI" %}
 6198 
 6199   ins_encode %{
 6200     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6201   %}
 6202 
 6203   ins_pipe(pipe_serial);
 6204 %}
 6205 
 6206 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr)
 6207 %{
 6208   predicate(n->as_LoadStore()->result_not_used());
 6209 
 6210   match(Set dummy (GetAndAddI mem incr));
 6211 
 6212   ins_cost(ALU_COST);
 6213 
 6214   format %{ "get_and_addI [$mem], $incr\t#@get_and_addI_no_res" %}
 6215 
 6216   ins_encode %{
 6217     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 6218   %}
 6219 
 6220   ins_pipe(pipe_serial);
 6221 %}
 6222 
 6223 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAdd incr)
 6224 %{
 6225   match(Set newval (GetAndAddI mem incr));
 6226 
 6227   ins_cost(ALU_COST);
 6228 
 6229   format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addIi" %}
 6230 
 6231   ins_encode %{
 6232     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6233   %}
 6234 
 6235   ins_pipe(pipe_serial);
 6236 %}
 6237 
 6238 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAdd incr)
 6239 %{
 6240   predicate(n->as_LoadStore()->result_not_used());
 6241 
 6242   match(Set dummy (GetAndAddI mem incr));
 6243 
 6244   ins_cost(ALU_COST);
 6245 
 6246   format %{ "get_and_addI [$mem], $incr\t#@get_and_addIi_no_res" %}
 6247 
 6248   ins_encode %{
 6249     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 6250   %}
 6251 
 6252   ins_pipe(pipe_serial);
 6253 %}
 6254 
 6255 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr)
 6256 %{
 6257   predicate(needs_acquiring_load_reserved(n));
 6258 
 6259   match(Set newval (GetAndAddL mem incr));
 6260 
 6261   ins_cost(ALU_COST);
 6262 
 6263   format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLAcq" %}
 6264 
 6265   ins_encode %{
 6266     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6267   %}
 6268 
 6269   ins_pipe(pipe_serial);
 6270 %}
 6271 
 6272 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 6273   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6274 
 6275   match(Set dummy (GetAndAddL mem incr));
 6276 
 6277   ins_cost(ALU_COST);
 6278 
 6279   format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addL_no_resAcq" %}
 6280 
 6281   ins_encode %{
 6282     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 6283   %}
 6284 
 6285   ins_pipe(pipe_serial);
 6286 %}
 6287 
 6288 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAdd incr)
 6289 %{
 6290   predicate(needs_acquiring_load_reserved(n));
 6291 
 6292   match(Set newval (GetAndAddL mem incr));
 6293 
 6294   ins_cost(ALU_COST);
 6295 
 6296   format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLiAcq" %}
 6297 
 6298   ins_encode %{
 6299     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6300   %}
 6301 
 6302   ins_pipe(pipe_serial);
 6303 %}
 6304 
 6305 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAdd incr)
 6306 %{
 6307   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6308 
 6309   match(Set dummy (GetAndAddL mem incr));
 6310 
 6311   ins_cost(ALU_COST);
 6312 
 6313   format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addLi_no_resAcq" %}
 6314 
 6315   ins_encode %{
 6316     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 6317   %}
 6318 
 6319   ins_pipe(pipe_serial);
 6320 %}
 6321 
 6322 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr)
 6323 %{
 6324   predicate(needs_acquiring_load_reserved(n));
 6325 
 6326   match(Set newval (GetAndAddI mem incr));
 6327 
 6328   ins_cost(ALU_COST);
 6329 
 6330   format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIAcq" %}
 6331 
 6332   ins_encode %{
 6333     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6334   %}
 6335 
 6336   ins_pipe(pipe_serial);
 6337 %}
 6338 
 6339 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr)
 6340 %{
 6341   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6342 
 6343   match(Set dummy (GetAndAddI mem incr));
 6344 
 6345   ins_cost(ALU_COST);
 6346 
 6347   format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addI_no_resAcq" %}
 6348 
 6349   ins_encode %{
 6350     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 6351   %}
 6352 
 6353   ins_pipe(pipe_serial);
 6354 %}
 6355 
 6356 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAdd incr)
 6357 %{
 6358   predicate(needs_acquiring_load_reserved(n));
 6359 
 6360   match(Set newval (GetAndAddI mem incr));
 6361 
 6362   ins_cost(ALU_COST);
 6363 
 6364   format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIiAcq" %}
 6365 
 6366   ins_encode %{
 6367     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6368   %}
 6369 
 6370   ins_pipe(pipe_serial);
 6371 %}
 6372 
 6373 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAdd incr)
 6374 %{
 6375   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6376 
 6377   match(Set dummy (GetAndAddI mem incr));
 6378 
 6379   ins_cost(ALU_COST);
 6380 
 6381   format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addIi_no_resAcq" %}
 6382 
 6383   ins_encode %{
 6384     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 6385   %}
 6386 
 6387   ins_pipe(pipe_serial);
 6388 %}
 6389 
 6390 // ============================================================================
 6391 // Arithmetic Instructions
 6392 //
 6393 
 6394 // Integer Addition
 6395 
 6396 // TODO
 6397 // these currently employ operations which do not set CR and hence are
 6398 // not flagged as killing CR but we would like to isolate the cases
 6399 // where we want to set flags from those where we don't. need to work
 6400 // out how to do that.
 6401 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6402   match(Set dst (AddI src1 src2));
 6403 
 6404   ins_cost(ALU_COST);
 6405   format %{ "addw  $dst, $src1, $src2\t#@addI_reg_reg" %}
 6406 
 6407   ins_encode %{
 6408     __ addw(as_Register($dst$$reg),
 6409             as_Register($src1$$reg),
 6410             as_Register($src2$$reg));
 6411   %}
 6412 
 6413   ins_pipe(ialu_reg_reg);
 6414 %}
 6415 
 6416 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAdd src2) %{
 6417   match(Set dst (AddI src1 src2));
 6418 
 6419   ins_cost(ALU_COST);
 6420   format %{ "addiw  $dst, $src1, $src2\t#@addI_reg_imm" %}
 6421 
 6422   ins_encode %{
 6423     int32_t con = (int32_t)$src2$$constant;
 6424     __ addiw(as_Register($dst$$reg),
 6425              as_Register($src1$$reg),
 6426              $src2$$constant);
 6427   %}
 6428 
 6429   ins_pipe(ialu_reg_imm);
 6430 %}
 6431 
 6432 instruct addI_reg_imm_l2i(iRegINoSp dst, iRegL src1, immIAdd src2) %{
 6433   match(Set dst (AddI (ConvL2I src1) src2));
 6434 
 6435   ins_cost(ALU_COST);
 6436   format %{ "addiw  $dst, $src1, $src2\t#@addI_reg_imm_l2i" %}
 6437 
 6438   ins_encode %{
 6439     __ addiw(as_Register($dst$$reg),
 6440              as_Register($src1$$reg),
 6441              $src2$$constant);
 6442   %}
 6443 
 6444   ins_pipe(ialu_reg_imm);
 6445 %}
 6446 
 6447 // Pointer Addition
 6448 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
 6449   match(Set dst (AddP src1 src2));
 6450 
 6451   ins_cost(ALU_COST);
 6452   format %{ "add $dst, $src1, $src2\t# ptr, #@addP_reg_reg" %}
 6453 
 6454   ins_encode %{
 6455     __ add(as_Register($dst$$reg),
 6456            as_Register($src1$$reg),
 6457            as_Register($src2$$reg));
 6458   %}
 6459 
 6460   ins_pipe(ialu_reg_reg);
 6461 %}
 6462 
 6463 // If we shift more than 32 bits, we need not convert I2L.
 6464 instruct lShiftL_regI_immGE32(iRegLNoSp dst, iRegI src, uimmI6_ge32 scale) %{
 6465   match(Set dst (LShiftL (ConvI2L src) scale));
 6466   ins_cost(ALU_COST);
 6467   format %{ "slli  $dst, $src, $scale & 63\t#@lShiftL_regI_immGE32" %}
 6468 
 6469   ins_encode %{
 6470     __ slli(as_Register($dst$$reg), as_Register($src$$reg), $scale$$constant & 63);
 6471   %}
 6472 
 6473   ins_pipe(ialu_reg_shift);
 6474 %}
 6475 
 6476 // Pointer Immediate Addition
 6477 // n.b. this needs to be more expensive than using an indirect memory
 6478 // operand
 6479 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAdd src2) %{
 6480   match(Set dst (AddP src1 src2));
 6481   ins_cost(ALU_COST);
 6482   format %{ "addi  $dst, $src1, $src2\t# ptr, #@addP_reg_imm" %}
 6483 
 6484   ins_encode %{
 6485     // src2 is imm, so actually call the addi
 6486     __ add(as_Register($dst$$reg),
 6487            as_Register($src1$$reg),
 6488            $src2$$constant);
 6489   %}
 6490 
 6491   ins_pipe(ialu_reg_imm);
 6492 %}
 6493 
 6494 // Long Addition
 6495 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6496   match(Set dst (AddL src1 src2));
 6497   ins_cost(ALU_COST);
 6498   format %{ "add  $dst, $src1, $src2\t#@addL_reg_reg" %}
 6499 
 6500   ins_encode %{
 6501     __ add(as_Register($dst$$reg),
 6502            as_Register($src1$$reg),
 6503            as_Register($src2$$reg));
 6504   %}
 6505 
 6506   ins_pipe(ialu_reg_reg);
 6507 %}
 6508 
 6509 // No constant pool entries requiredLong Immediate Addition.
 6510 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 6511   match(Set dst (AddL src1 src2));
 6512   ins_cost(ALU_COST);
 6513   format %{ "addi  $dst, $src1, $src2\t#@addL_reg_imm" %}
 6514 
 6515   ins_encode %{
 6516     // src2 is imm, so actually call the addi
 6517     __ add(as_Register($dst$$reg),
 6518            as_Register($src1$$reg),
 6519            $src2$$constant);
 6520   %}
 6521 
 6522   ins_pipe(ialu_reg_imm);
 6523 %}
 6524 
 6525 // Integer Subtraction
 6526 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6527   match(Set dst (SubI src1 src2));
 6528 
 6529   ins_cost(ALU_COST);
 6530   format %{ "subw  $dst, $src1, $src2\t#@subI_reg_reg" %}
 6531 
 6532   ins_encode %{
 6533     __ subw(as_Register($dst$$reg),
 6534             as_Register($src1$$reg),
 6535             as_Register($src2$$reg));
 6536   %}
 6537 
 6538   ins_pipe(ialu_reg_reg);
 6539 %}
 6540 
 6541 // Immediate Subtraction
 6542 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immISub src2) %{
 6543   match(Set dst (SubI src1 src2));
 6544 
 6545   ins_cost(ALU_COST);
 6546   format %{ "addiw  $dst, $src1, -$src2\t#@subI_reg_imm" %}
 6547 
 6548   ins_encode %{
 6549     // src2 is imm, so actually call the addiw
 6550     __ subw(as_Register($dst$$reg),
 6551             as_Register($src1$$reg),
 6552             $src2$$constant);
 6553   %}
 6554 
 6555   ins_pipe(ialu_reg_imm);
 6556 %}
 6557 
 6558 // Long Subtraction
 6559 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6560   match(Set dst (SubL src1 src2));
 6561   ins_cost(ALU_COST);
 6562   format %{ "sub  $dst, $src1, $src2\t#@subL_reg_reg" %}
 6563 
 6564   ins_encode %{
 6565     __ sub(as_Register($dst$$reg),
 6566            as_Register($src1$$reg),
 6567            as_Register($src2$$reg));
 6568   %}
 6569 
 6570   ins_pipe(ialu_reg_reg);
 6571 %}
 6572 
 6573 // No constant pool entries requiredLong Immediate Subtraction.
 6574 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLSub src2) %{
 6575   match(Set dst (SubL src1 src2));
 6576   ins_cost(ALU_COST);
 6577   format %{ "addi  $dst, $src1, -$src2\t#@subL_reg_imm" %}
 6578 
 6579   ins_encode %{
 6580     // src2 is imm, so actually call the addi
 6581     __ sub(as_Register($dst$$reg),
 6582            as_Register($src1$$reg),
 6583            $src2$$constant);
 6584   %}
 6585 
 6586   ins_pipe(ialu_reg_imm);
 6587 %}
 6588 
 6589 // Integer Negation (special case for sub)
 6590 
 6591 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 6592   match(Set dst (SubI zero src));
 6593   ins_cost(ALU_COST);
 6594   format %{ "subw  $dst, x0, $src\t# int, #@negI_reg" %}
 6595 
 6596   ins_encode %{
 6597     // actually call the subw
 6598     __ negw(as_Register($dst$$reg),
 6599             as_Register($src$$reg));
 6600   %}
 6601 
 6602   ins_pipe(ialu_reg);
 6603 %}
 6604 
 6605 // Long Negation
 6606 
 6607 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero) %{
 6608   match(Set dst (SubL zero src));
 6609   ins_cost(ALU_COST);
 6610   format %{ "sub  $dst, x0, $src\t# long, #@negL_reg" %}
 6611 
 6612   ins_encode %{
 6613     // actually call the sub
 6614     __ neg(as_Register($dst$$reg),
 6615            as_Register($src$$reg));
 6616   %}
 6617 
 6618   ins_pipe(ialu_reg);
 6619 %}
 6620 
 6621 // Integer Multiply
 6622 
 6623 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6624   match(Set dst (MulI src1 src2));
 6625   ins_cost(IMUL_COST);
 6626   format %{ "mulw  $dst, $src1, $src2\t#@mulI" %}
 6627 
 6628   //this means 2 word multi, and no sign extend to 64 bits
 6629   ins_encode %{
 6630     // riscv64 mulw will sign-extension to high 32 bits in dst reg
 6631     __ mulw(as_Register($dst$$reg),
 6632             as_Register($src1$$reg),
 6633             as_Register($src2$$reg));
 6634   %}
 6635 
 6636   ins_pipe(imul_reg_reg);
 6637 %}
 6638 
 6639 // Long Multiply
 6640 
 6641 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6642   match(Set dst (MulL src1 src2));
 6643   ins_cost(IMUL_COST);
 6644   format %{ "mul  $dst, $src1, $src2\t#@mulL" %}
 6645 
 6646   ins_encode %{
 6647     __ mul(as_Register($dst$$reg),
 6648            as_Register($src1$$reg),
 6649            as_Register($src2$$reg));
 6650   %}
 6651 
 6652   ins_pipe(lmul_reg_reg);
 6653 %}
 6654 
 6655 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
 6656 %{
 6657   match(Set dst (MulHiL src1 src2));
 6658   ins_cost(IMUL_COST);
 6659   format %{ "mulh  $dst, $src1, $src2\t# mulhi, #@mulHiL_rReg" %}
 6660 
 6661   ins_encode %{
 6662     __ mulh(as_Register($dst$$reg),
 6663             as_Register($src1$$reg),
 6664             as_Register($src2$$reg));
 6665   %}
 6666 
 6667   ins_pipe(lmul_reg_reg);
 6668 %}
 6669 
 6670 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
 6671 %{
 6672   match(Set dst (UMulHiL src1 src2));
 6673   ins_cost(IMUL_COST);
 6674   format %{ "mulhu  $dst, $src1, $src2\t# umulhi, #@umulHiL_rReg" %}
 6675 
 6676   ins_encode %{
 6677     __ mulhu(as_Register($dst$$reg),
 6678              as_Register($src1$$reg),
 6679              as_Register($src2$$reg));
 6680   %}
 6681 
 6682   ins_pipe(lmul_reg_reg);
 6683 %}
 6684 
 6685 // Integer Divide
 6686 
 6687 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6688   match(Set dst (DivI src1 src2));
 6689   ins_cost(IDIVSI_COST);
 6690   format %{ "divw  $dst, $src1, $src2\t#@divI"%}
 6691 
 6692   ins_encode(riscv_enc_divw(dst, src1, src2));
 6693   ins_pipe(idiv_reg_reg);
 6694 %}
 6695 
 6696 instruct UdivI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6697   match(Set dst (UDivI src1 src2));
 6698   ins_cost(IDIVSI_COST);
 6699   format %{ "divuw  $dst, $src1, $src2\t#@UdivI"%}
 6700 
 6701   ins_encode(riscv_enc_divuw(dst, src1, src2));
 6702   ins_pipe(idiv_reg_reg);
 6703 %}
 6704 
 6705 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
 6706   match(Set dst (URShiftI (RShiftI src1 div1) div2));
 6707   ins_cost(ALU_COST);
 6708   format %{ "srliw $dst, $src1, $div1\t# int signExtract, #@signExtract" %}
 6709 
 6710   ins_encode %{
 6711     __ srliw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
 6712   %}
 6713   ins_pipe(ialu_reg_shift);
 6714 %}
 6715 
 6716 // Long Divide
 6717 
 6718 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6719   match(Set dst (DivL src1 src2));
 6720   ins_cost(IDIVDI_COST);
 6721   format %{ "div  $dst, $src1, $src2\t#@divL" %}
 6722 
 6723   ins_encode(riscv_enc_div(dst, src1, src2));
 6724   ins_pipe(ldiv_reg_reg);
 6725 %}
 6726 
 6727 instruct UdivL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6728   match(Set dst (UDivL src1 src2));
 6729   ins_cost(IDIVDI_COST);
 6730 
 6731   format %{ "divu $dst, $src1, $src2\t#@UdivL" %}
 6732 
 6733   ins_encode(riscv_enc_divu(dst, src1, src2));
 6734   ins_pipe(ldiv_reg_reg);
 6735 %}
 6736 
 6737 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
 6738   match(Set dst (URShiftL (RShiftL src1 div1) div2));
 6739   ins_cost(ALU_COST);
 6740   format %{ "srli $dst, $src1, $div1\t# long signExtract, #@signExtractL" %}
 6741 
 6742   ins_encode %{
 6743     __ srli(as_Register($dst$$reg), as_Register($src1$$reg), 63);
 6744   %}
 6745   ins_pipe(ialu_reg_shift);
 6746 %}
 6747 
 6748 // Integer Remainder
 6749 
 6750 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6751   match(Set dst (ModI src1 src2));
 6752   ins_cost(IDIVSI_COST);
 6753   format %{ "remw  $dst, $src1, $src2\t#@modI" %}
 6754 
 6755   ins_encode(riscv_enc_modw(dst, src1, src2));
 6756   ins_pipe(ialu_reg_reg);
 6757 %}
 6758 
 6759 instruct UmodI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6760   match(Set dst (UModI src1 src2));
 6761   ins_cost(IDIVSI_COST);
 6762   format %{ "remuw  $dst, $src1, $src2\t#@UmodI" %}
 6763 
 6764   ins_encode(riscv_enc_moduw(dst, src1, src2));
 6765   ins_pipe(ialu_reg_reg);
 6766 %}
 6767 
 6768 // Long Remainder
 6769 
 6770 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6771   match(Set dst (ModL src1 src2));
 6772   ins_cost(IDIVDI_COST);
 6773   format %{ "rem  $dst, $src1, $src2\t#@modL" %}
 6774 
 6775   ins_encode(riscv_enc_mod(dst, src1, src2));
 6776   ins_pipe(ialu_reg_reg);
 6777 %}
 6778 
 6779 instruct UmodL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6780   match(Set dst (UModL src1 src2));
 6781   ins_cost(IDIVDI_COST);
 6782   format %{ "remu  $dst, $src1, $src2\t#@UmodL" %}
 6783 
 6784   ins_encode(riscv_enc_modu(dst, src1, src2));
 6785   ins_pipe(ialu_reg_reg);
 6786 %}
 6787 
 6788 // Integer Shifts
 6789 
 6790 // Shift Left Register
 6791 // In RV64I, only the low 5 bits of src2 are considered for the shift amount
 6792 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6793   match(Set dst (LShiftI src1 src2));
 6794   ins_cost(ALU_COST);
 6795   format %{ "sllw  $dst, $src1, $src2\t#@lShiftI_reg_reg" %}
 6796 
 6797   ins_encode %{
 6798     __ sllw(as_Register($dst$$reg),
 6799             as_Register($src1$$reg),
 6800             as_Register($src2$$reg));
 6801   %}
 6802 
 6803   ins_pipe(ialu_reg_reg_vshift);
 6804 %}
 6805 
 6806 // Shift Left Immediate
 6807 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 6808   match(Set dst (LShiftI src1 src2));
 6809   ins_cost(ALU_COST);
 6810   format %{ "slliw  $dst, $src1, ($src2 & 0x1f)\t#@lShiftI_reg_imm" %}
 6811 
 6812   ins_encode %{
 6813     // the shift amount is encoded in the lower
 6814     // 5 bits of the I-immediate field for RV32I
 6815     __ slliw(as_Register($dst$$reg),
 6816              as_Register($src1$$reg),
 6817              (unsigned) $src2$$constant & 0x1f);
 6818   %}
 6819 
 6820   ins_pipe(ialu_reg_shift);
 6821 %}
 6822 
 6823 // Shift Right Logical Register
 6824 // In RV64I, only the low 5 bits of src2 are considered for the shift amount
 6825 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6826   match(Set dst (URShiftI src1 src2));
 6827   ins_cost(ALU_COST);
 6828   format %{ "srlw  $dst, $src1, $src2\t#@urShiftI_reg_reg" %}
 6829 
 6830   ins_encode %{
 6831     __ srlw(as_Register($dst$$reg),
 6832             as_Register($src1$$reg),
 6833             as_Register($src2$$reg));
 6834   %}
 6835 
 6836   ins_pipe(ialu_reg_reg_vshift);
 6837 %}
 6838 
 6839 // Shift Right Logical Immediate
 6840 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 6841   match(Set dst (URShiftI src1 src2));
 6842   ins_cost(ALU_COST);
 6843   format %{ "srliw  $dst, $src1, ($src2 & 0x1f)\t#@urShiftI_reg_imm" %}
 6844 
 6845   ins_encode %{
 6846     // the shift amount is encoded in the lower
 6847     // 6 bits of the I-immediate field for RV64I
 6848     __ srliw(as_Register($dst$$reg),
 6849              as_Register($src1$$reg),
 6850              (unsigned) $src2$$constant & 0x1f);
 6851   %}
 6852 
 6853   ins_pipe(ialu_reg_shift);
 6854 %}
 6855 
 6856 // Shift Right Arithmetic Register
 6857 // In RV64I, only the low 5 bits of src2 are considered for the shift amount
 6858 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6859   match(Set dst (RShiftI src1 src2));
 6860   ins_cost(ALU_COST);
 6861   format %{ "sraw  $dst, $src1, $src2\t#@rShiftI_reg_reg" %}
 6862 
 6863   ins_encode %{
 6864     // riscv will sign-ext dst high 32 bits
 6865     __ sraw(as_Register($dst$$reg),
 6866             as_Register($src1$$reg),
 6867             as_Register($src2$$reg));
 6868   %}
 6869 
 6870   ins_pipe(ialu_reg_reg_vshift);
 6871 %}
 6872 
 6873 // Shift Right Arithmetic Immediate
 6874 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 6875   match(Set dst (RShiftI src1 src2));
 6876   ins_cost(ALU_COST);
 6877   format %{ "sraiw  $dst, $src1, ($src2 & 0x1f)\t#@rShiftI_reg_imm" %}
 6878 
 6879   ins_encode %{
 6880     // riscv will sign-ext dst high 32 bits
 6881     __ sraiw(as_Register($dst$$reg),
 6882              as_Register($src1$$reg),
 6883              (unsigned) $src2$$constant & 0x1f);
 6884   %}
 6885 
 6886   ins_pipe(ialu_reg_shift);
 6887 %}
 6888 
 6889 // Long Shifts
 6890 
 6891 // Shift Left Register
 6892 // In RV64I, only the low 6 bits of src2 are considered for the shift amount
 6893 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 6894   match(Set dst (LShiftL src1 src2));
 6895 
 6896   ins_cost(ALU_COST);
 6897   format %{ "sll  $dst, $src1, $src2\t#@lShiftL_reg_reg" %}
 6898 
 6899   ins_encode %{
 6900     __ sll(as_Register($dst$$reg),
 6901            as_Register($src1$$reg),
 6902            as_Register($src2$$reg));
 6903   %}
 6904 
 6905   ins_pipe(ialu_reg_reg_vshift);
 6906 %}
 6907 
 6908 // Shift Left Immediate
 6909 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 6910   match(Set dst (LShiftL src1 src2));
 6911 
 6912   ins_cost(ALU_COST);
 6913   format %{ "slli  $dst, $src1, ($src2 & 0x3f)\t#@lShiftL_reg_imm" %}
 6914 
 6915   ins_encode %{
 6916     // the shift amount is encoded in the lower
 6917     // 6 bits of the I-immediate field for RV64I
 6918     __ slli(as_Register($dst$$reg),
 6919             as_Register($src1$$reg),
 6920             (unsigned) $src2$$constant & 0x3f);
 6921   %}
 6922 
 6923   ins_pipe(ialu_reg_shift);
 6924 %}
 6925 
 6926 // Shift Right Logical Register
 6927 // In RV64I, only the low 6 bits of src2 are considered for the shift amount
 6928 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 6929   match(Set dst (URShiftL src1 src2));
 6930 
 6931   ins_cost(ALU_COST);
 6932   format %{ "srl  $dst, $src1, $src2\t#@urShiftL_reg_reg" %}
 6933 
 6934   ins_encode %{
 6935     __ srl(as_Register($dst$$reg),
 6936             as_Register($src1$$reg),
 6937             as_Register($src2$$reg));
 6938   %}
 6939 
 6940   ins_pipe(ialu_reg_reg_vshift);
 6941 %}
 6942 
 6943 // Shift Right Logical Immediate
 6944 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 6945   match(Set dst (URShiftL src1 src2));
 6946 
 6947   ins_cost(ALU_COST);
 6948   format %{ "srli  $dst, $src1, ($src2 & 0x3f)\t#@urShiftL_reg_imm" %}
 6949 
 6950   ins_encode %{
 6951     // the shift amount is encoded in the lower
 6952     // 6 bits of the I-immediate field for RV64I
 6953     __ srli(as_Register($dst$$reg),
 6954             as_Register($src1$$reg),
 6955             (unsigned) $src2$$constant & 0x3f);
 6956   %}
 6957 
 6958   ins_pipe(ialu_reg_shift);
 6959 %}
 6960 
 6961 // A special-case pattern for card table stores.
 6962 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
 6963   match(Set dst (URShiftL (CastP2X src1) src2));
 6964 
 6965   ins_cost(ALU_COST);
 6966   format %{ "srli  $dst, p2x($src1), ($src2 & 0x3f)\t#@urShiftP_reg_imm" %}
 6967 
 6968   ins_encode %{
 6969     // the shift amount is encoded in the lower
 6970     // 6 bits of the I-immediate field for RV64I
 6971     __ srli(as_Register($dst$$reg),
 6972             as_Register($src1$$reg),
 6973             (unsigned) $src2$$constant & 0x3f);
 6974   %}
 6975 
 6976   ins_pipe(ialu_reg_shift);
 6977 %}
 6978 
 6979 // Shift Right Arithmetic Register
 6980 // In RV64I, only the low 6 bits of src2 are considered for the shift amount
 6981 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 6982   match(Set dst (RShiftL src1 src2));
 6983 
 6984   ins_cost(ALU_COST);
 6985   format %{ "sra  $dst, $src1, $src2\t#@rShiftL_reg_reg" %}
 6986 
 6987   ins_encode %{
 6988     __ sra(as_Register($dst$$reg),
 6989            as_Register($src1$$reg),
 6990            as_Register($src2$$reg));
 6991   %}
 6992 
 6993   ins_pipe(ialu_reg_reg_vshift);
 6994 %}
 6995 
 6996 // Shift Right Arithmetic Immediate
 6997 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 6998   match(Set dst (RShiftL src1 src2));
 6999 
 7000   ins_cost(ALU_COST);
 7001   format %{ "srai  $dst, $src1, ($src2 & 0x3f)\t#@rShiftL_reg_imm" %}
 7002 
 7003   ins_encode %{
 7004     // the shift amount is encoded in the lower
 7005     // 6 bits of the I-immediate field for RV64I
 7006     __ srai(as_Register($dst$$reg),
 7007             as_Register($src1$$reg),
 7008             (unsigned) $src2$$constant & 0x3f);
 7009   %}
 7010 
 7011   ins_pipe(ialu_reg_shift);
 7012 %}
 7013 
 7014 instruct regI_not_reg(iRegINoSp dst, iRegI src1, immI_M1 m1) %{
 7015   match(Set dst (XorI src1 m1));
 7016   ins_cost(ALU_COST);
 7017   format %{ "xori  $dst, $src1, -1\t#@regI_not_reg" %}
 7018 
 7019   ins_encode %{
 7020     __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
 7021   %}
 7022 
 7023   ins_pipe(ialu_reg_imm);
 7024 %}
 7025 
 7026 instruct regL_not_reg(iRegLNoSp dst, iRegL src1, immL_M1 m1) %{
 7027   match(Set dst (XorL src1 m1));
 7028   ins_cost(ALU_COST);
 7029   format %{ "xori  $dst, $src1, -1\t#@regL_not_reg" %}
 7030 
 7031   ins_encode %{
 7032     __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
 7033   %}
 7034 
 7035   ins_pipe(ialu_reg_imm);
 7036 %}
 7037 
 7038 
 7039 // ============================================================================
 7040 // Floating Point Arithmetic Instructions
 7041 
 7042 instruct addF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7043   match(Set dst (AddF src1 src2));
 7044 
 7045   ins_cost(DEFAULT_COST * 5);
 7046   format %{ "fadd.s  $dst, $src1, $src2\t#@addF_reg_reg" %}
 7047 
 7048   ins_encode %{
 7049     __ fadd_s(as_FloatRegister($dst$$reg),
 7050               as_FloatRegister($src1$$reg),
 7051               as_FloatRegister($src2$$reg));
 7052   %}
 7053 
 7054   ins_pipe(fp_dop_reg_reg_s);
 7055 %}
 7056 
 7057 instruct addD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7058   match(Set dst (AddD src1 src2));
 7059 
 7060   ins_cost(DEFAULT_COST * 5);
 7061   format %{ "fadd.d  $dst, $src1, $src2\t#@addD_reg_reg" %}
 7062 
 7063   ins_encode %{
 7064     __ fadd_d(as_FloatRegister($dst$$reg),
 7065               as_FloatRegister($src1$$reg),
 7066               as_FloatRegister($src2$$reg));
 7067   %}
 7068 
 7069   ins_pipe(fp_dop_reg_reg_d);
 7070 %}
 7071 
 7072 instruct subF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7073   match(Set dst (SubF src1 src2));
 7074 
 7075   ins_cost(DEFAULT_COST * 5);
 7076   format %{ "fsub.s  $dst, $src1, $src2\t#@subF_reg_reg" %}
 7077 
 7078   ins_encode %{
 7079     __ fsub_s(as_FloatRegister($dst$$reg),
 7080               as_FloatRegister($src1$$reg),
 7081               as_FloatRegister($src2$$reg));
 7082   %}
 7083 
 7084   ins_pipe(fp_dop_reg_reg_s);
 7085 %}
 7086 
 7087 instruct subD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7088   match(Set dst (SubD src1 src2));
 7089 
 7090   ins_cost(DEFAULT_COST * 5);
 7091   format %{ "fsub.d  $dst, $src1, $src2\t#@subD_reg_reg" %}
 7092 
 7093   ins_encode %{
 7094     __ fsub_d(as_FloatRegister($dst$$reg),
 7095               as_FloatRegister($src1$$reg),
 7096               as_FloatRegister($src2$$reg));
 7097   %}
 7098 
 7099   ins_pipe(fp_dop_reg_reg_d);
 7100 %}
 7101 
 7102 instruct mulF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7103   match(Set dst (MulF src1 src2));
 7104 
 7105   ins_cost(FMUL_SINGLE_COST);
 7106   format %{ "fmul.s  $dst, $src1, $src2\t#@mulF_reg_reg" %}
 7107 
 7108   ins_encode %{
 7109     __ fmul_s(as_FloatRegister($dst$$reg),
 7110               as_FloatRegister($src1$$reg),
 7111               as_FloatRegister($src2$$reg));
 7112   %}
 7113 
 7114   ins_pipe(fp_dop_reg_reg_s);
 7115 %}
 7116 
 7117 instruct mulD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7118   match(Set dst (MulD src1 src2));
 7119 
 7120   ins_cost(FMUL_DOUBLE_COST);
 7121   format %{ "fmul.d  $dst, $src1, $src2\t#@mulD_reg_reg" %}
 7122 
 7123   ins_encode %{
 7124     __ fmul_d(as_FloatRegister($dst$$reg),
 7125               as_FloatRegister($src1$$reg),
 7126               as_FloatRegister($src2$$reg));
 7127   %}
 7128 
 7129   ins_pipe(fp_dop_reg_reg_d);
 7130 %}
 7131 
 7132 // src1 * src2 + src3
 7133 instruct maddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7134   match(Set dst (FmaF src3 (Binary src1 src2)));
 7135 
 7136   ins_cost(FMUL_SINGLE_COST);
 7137   format %{ "fmadd.s  $dst, $src1, $src2, $src3\t#@maddF_reg_reg" %}
 7138 
 7139   ins_encode %{
 7140     assert(UseFMA, "Needs FMA instructions support.");
 7141     __ fmadd_s(as_FloatRegister($dst$$reg),
 7142                as_FloatRegister($src1$$reg),
 7143                as_FloatRegister($src2$$reg),
 7144                as_FloatRegister($src3$$reg));
 7145   %}
 7146 
 7147   ins_pipe(pipe_class_default);
 7148 %}
 7149 
 7150 // src1 * src2 + src3
 7151 instruct maddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7152   match(Set dst (FmaD src3 (Binary src1 src2)));
 7153 
 7154   ins_cost(FMUL_DOUBLE_COST);
 7155   format %{ "fmadd.d  $dst, $src1, $src2, $src3\t#@maddD_reg_reg" %}
 7156 
 7157   ins_encode %{
 7158     assert(UseFMA, "Needs FMA instructions support.");
 7159     __ fmadd_d(as_FloatRegister($dst$$reg),
 7160                as_FloatRegister($src1$$reg),
 7161                as_FloatRegister($src2$$reg),
 7162                as_FloatRegister($src3$$reg));
 7163   %}
 7164 
 7165   ins_pipe(pipe_class_default);
 7166 %}
 7167 
 7168 // src1 * src2 - src3
 7169 instruct msubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7170   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
 7171 
 7172   ins_cost(FMUL_SINGLE_COST);
 7173   format %{ "fmsub.s  $dst, $src1, $src2, $src3\t#@msubF_reg_reg" %}
 7174 
 7175   ins_encode %{
 7176     assert(UseFMA, "Needs FMA instructions support.");
 7177     __ fmsub_s(as_FloatRegister($dst$$reg),
 7178                as_FloatRegister($src1$$reg),
 7179                as_FloatRegister($src2$$reg),
 7180                as_FloatRegister($src3$$reg));
 7181   %}
 7182 
 7183   ins_pipe(pipe_class_default);
 7184 %}
 7185 
 7186 // src1 * src2 - src3
 7187 instruct msubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7188   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
 7189 
 7190   ins_cost(FMUL_DOUBLE_COST);
 7191   format %{ "fmsub.d  $dst, $src1, $src2, $src3\t#@msubD_reg_reg" %}
 7192 
 7193   ins_encode %{
 7194     assert(UseFMA, "Needs FMA instructions support.");
 7195     __ fmsub_d(as_FloatRegister($dst$$reg),
 7196                as_FloatRegister($src1$$reg),
 7197                as_FloatRegister($src2$$reg),
 7198                as_FloatRegister($src3$$reg));
 7199   %}
 7200 
 7201   ins_pipe(pipe_class_default);
 7202 %}
 7203 
 7204 // src1 * (-src2) + src3
 7205 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
 7206 instruct nmsubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7207   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
 7208 
 7209   ins_cost(FMUL_SINGLE_COST);
 7210   format %{ "fnmsub.s  $dst, $src1, $src2, $src3\t#@nmsubF_reg_reg" %}
 7211 
 7212   ins_encode %{
 7213     assert(UseFMA, "Needs FMA instructions support.");
 7214     __ fnmsub_s(as_FloatRegister($dst$$reg),
 7215                 as_FloatRegister($src1$$reg),
 7216                 as_FloatRegister($src2$$reg),
 7217                 as_FloatRegister($src3$$reg));
 7218   %}
 7219 
 7220   ins_pipe(pipe_class_default);
 7221 %}
 7222 
 7223 // src1 * (-src2) + src3
 7224 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
 7225 instruct nmsubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7226   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
 7227 
 7228   ins_cost(FMUL_DOUBLE_COST);
 7229   format %{ "fnmsub.d  $dst, $src1, $src2, $src3\t#@nmsubD_reg_reg" %}
 7230 
 7231   ins_encode %{
 7232     assert(UseFMA, "Needs FMA instructions support.");
 7233     __ fnmsub_d(as_FloatRegister($dst$$reg),
 7234                 as_FloatRegister($src1$$reg),
 7235                 as_FloatRegister($src2$$reg),
 7236                 as_FloatRegister($src3$$reg));
 7237   %}
 7238 
 7239   ins_pipe(pipe_class_default);
 7240 %}
 7241 
 7242 // src1 * (-src2) - src3
 7243 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
 7244 instruct nmaddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7245   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
 7246 
 7247   ins_cost(FMUL_SINGLE_COST);
 7248   format %{ "fnmadd.s  $dst, $src1, $src2, $src3\t#@nmaddF_reg_reg" %}
 7249 
 7250   ins_encode %{
 7251     assert(UseFMA, "Needs FMA instructions support.");
 7252     __ fnmadd_s(as_FloatRegister($dst$$reg),
 7253                 as_FloatRegister($src1$$reg),
 7254                 as_FloatRegister($src2$$reg),
 7255                 as_FloatRegister($src3$$reg));
 7256   %}
 7257 
 7258   ins_pipe(pipe_class_default);
 7259 %}
 7260 
 7261 // src1 * (-src2) - src3
 7262 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
 7263 instruct nmaddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7264   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
 7265 
 7266   ins_cost(FMUL_DOUBLE_COST);
 7267   format %{ "fnmadd.d  $dst, $src1, $src2, $src3\t#@nmaddD_reg_reg" %}
 7268 
 7269   ins_encode %{
 7270     assert(UseFMA, "Needs FMA instructions support.");
 7271     __ fnmadd_d(as_FloatRegister($dst$$reg),
 7272                 as_FloatRegister($src1$$reg),
 7273                 as_FloatRegister($src2$$reg),
 7274                 as_FloatRegister($src3$$reg));
 7275   %}
 7276 
 7277   ins_pipe(pipe_class_default);
 7278 %}
 7279 
 7280 // Math.max(FF)F
 7281 instruct maxF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
 7282   match(Set dst (MaxF src1 src2));
 7283   effect(TEMP_DEF dst, KILL cr);
 7284 
 7285   format %{ "maxF $dst, $src1, $src2" %}
 7286 
 7287   ins_encode %{
 7288     __ minmax_fp(as_FloatRegister($dst$$reg),
 7289                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7290                  false /* is_double */, false /* is_min */);
 7291   %}
 7292 
 7293   ins_pipe(pipe_class_default);
 7294 %}
 7295 
 7296 // Math.min(FF)F
 7297 instruct minF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
 7298   match(Set dst (MinF src1 src2));
 7299   effect(TEMP_DEF dst, KILL cr);
 7300 
 7301   format %{ "minF $dst, $src1, $src2" %}
 7302 
 7303   ins_encode %{
 7304     __ minmax_fp(as_FloatRegister($dst$$reg),
 7305                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7306                  false /* is_double */, true /* is_min */);
 7307   %}
 7308 
 7309   ins_pipe(pipe_class_default);
 7310 %}
 7311 
 7312 // Math.max(DD)D
 7313 instruct maxD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
 7314   match(Set dst (MaxD src1 src2));
 7315   effect(TEMP_DEF dst, KILL cr);
 7316 
 7317   format %{ "maxD $dst, $src1, $src2" %}
 7318 
 7319   ins_encode %{
 7320     __ minmax_fp(as_FloatRegister($dst$$reg),
 7321                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7322                  true /* is_double */, false /* is_min */);
 7323   %}
 7324 
 7325   ins_pipe(pipe_class_default);
 7326 %}
 7327 
 7328 // Math.min(DD)D
 7329 instruct minD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
 7330   match(Set dst (MinD src1 src2));
 7331   effect(TEMP_DEF dst, KILL cr);
 7332 
 7333   format %{ "minD $dst, $src1, $src2" %}
 7334 
 7335   ins_encode %{
 7336     __ minmax_fp(as_FloatRegister($dst$$reg),
 7337                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7338                  true /* is_double */, true /* is_min */);
 7339   %}
 7340 
 7341   ins_pipe(pipe_class_default);
 7342 %}
 7343 
 7344 // Float.isInfinite
 7345 instruct isInfiniteF_reg_reg(iRegINoSp dst, fRegF src)
 7346 %{
 7347   match(Set dst (IsInfiniteF src));
 7348 
 7349   format %{ "isInfinite $dst, $src" %}
 7350   ins_encode %{
 7351     __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7352     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::fclass_mask::inf);
 7353     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7354   %}
 7355 
 7356   ins_pipe(pipe_class_default);
 7357 %}
 7358 
 7359 // Double.isInfinite
 7360 instruct isInfiniteD_reg_reg(iRegINoSp dst, fRegD src)
 7361 %{
 7362   match(Set dst (IsInfiniteD src));
 7363 
 7364   format %{ "isInfinite $dst, $src" %}
 7365   ins_encode %{
 7366     __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7367     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::fclass_mask::inf);
 7368     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7369   %}
 7370 
 7371   ins_pipe(pipe_class_default);
 7372 %}
 7373 
 7374 // Float.isFinite
 7375 instruct isFiniteF_reg_reg(iRegINoSp dst, fRegF src)
 7376 %{
 7377   match(Set dst (IsFiniteF src));
 7378 
 7379   format %{ "isFinite $dst, $src" %}
 7380   ins_encode %{
 7381     __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7382     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::fclass_mask::finite);
 7383     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7384   %}
 7385 
 7386   ins_pipe(pipe_class_default);
 7387 %}
 7388 
 7389 // Double.isFinite
 7390 instruct isFiniteD_reg_reg(iRegINoSp dst, fRegD src)
 7391 %{
 7392   match(Set dst (IsFiniteD src));
 7393 
 7394   format %{ "isFinite $dst, $src" %}
 7395   ins_encode %{
 7396     __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7397     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::fclass_mask::finite);
 7398     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7399   %}
 7400 
 7401   ins_pipe(pipe_class_default);
 7402 %}
 7403 
 7404 instruct divF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7405   match(Set dst (DivF src1  src2));
 7406 
 7407   ins_cost(FDIV_COST);
 7408   format %{ "fdiv.s  $dst, $src1, $src2\t#@divF_reg_reg" %}
 7409 
 7410   ins_encode %{
 7411     __ fdiv_s(as_FloatRegister($dst$$reg),
 7412               as_FloatRegister($src1$$reg),
 7413               as_FloatRegister($src2$$reg));
 7414   %}
 7415 
 7416   ins_pipe(fp_div_s);
 7417 %}
 7418 
 7419 instruct divD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7420   match(Set dst (DivD src1  src2));
 7421 
 7422   ins_cost(FDIV_COST);
 7423   format %{ "fdiv.d  $dst, $src1, $src2\t#@divD_reg_reg" %}
 7424 
 7425   ins_encode %{
 7426     __ fdiv_d(as_FloatRegister($dst$$reg),
 7427               as_FloatRegister($src1$$reg),
 7428               as_FloatRegister($src2$$reg));
 7429   %}
 7430 
 7431   ins_pipe(fp_div_d);
 7432 %}
 7433 
 7434 instruct negF_reg_reg(fRegF dst, fRegF src) %{
 7435   match(Set dst (NegF src));
 7436 
 7437   ins_cost(XFER_COST);
 7438   format %{ "fsgnjn.s  $dst, $src, $src\t#@negF_reg_reg" %}
 7439 
 7440   ins_encode %{
 7441     __ fneg_s(as_FloatRegister($dst$$reg),
 7442               as_FloatRegister($src$$reg));
 7443   %}
 7444 
 7445   ins_pipe(fp_uop_s);
 7446 %}
 7447 
 7448 instruct negD_reg_reg(fRegD dst, fRegD src) %{
 7449   match(Set dst (NegD src));
 7450 
 7451   ins_cost(XFER_COST);
 7452   format %{ "fsgnjn.d  $dst, $src, $src\t#@negD_reg_reg" %}
 7453 
 7454   ins_encode %{
 7455     __ fneg_d(as_FloatRegister($dst$$reg),
 7456               as_FloatRegister($src$$reg));
 7457   %}
 7458 
 7459   ins_pipe(fp_uop_d);
 7460 %}
 7461 
 7462 instruct absI_reg(iRegINoSp dst, iRegIorL2I src) %{
 7463   match(Set dst (AbsI src));
 7464 
 7465   ins_cost(ALU_COST * 3);
 7466   format %{
 7467     "sraiw  t0, $src, 0x1f\n\t"
 7468     "addw  $dst, $src, t0\n\t"
 7469     "xorr  $dst, $dst, t0\t#@absI_reg"
 7470   %}
 7471 
 7472   ins_encode %{
 7473     __ sraiw(t0, as_Register($src$$reg), 0x1f);
 7474     __ addw(as_Register($dst$$reg), as_Register($src$$reg), t0);
 7475     __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
 7476   %}
 7477 
 7478   ins_pipe(pipe_class_default);
 7479 %}
 7480 
 7481 instruct absL_reg(iRegLNoSp dst, iRegL src) %{
 7482   match(Set dst (AbsL src));
 7483 
 7484   ins_cost(ALU_COST * 3);
 7485   format %{
 7486     "srai  t0, $src, 0x3f\n\t"
 7487     "add  $dst, $src, t0\n\t"
 7488     "xorr  $dst, $dst, t0\t#@absL_reg"
 7489   %}
 7490 
 7491   ins_encode %{
 7492     __ srai(t0, as_Register($src$$reg), 0x3f);
 7493     __ add(as_Register($dst$$reg), as_Register($src$$reg), t0);
 7494     __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
 7495   %}
 7496 
 7497   ins_pipe(pipe_class_default);
 7498 %}
 7499 
 7500 instruct absF_reg(fRegF dst, fRegF src) %{
 7501   match(Set dst (AbsF src));
 7502 
 7503   ins_cost(XFER_COST);
 7504   format %{ "fsgnjx.s  $dst, $src, $src\t#@absF_reg" %}
 7505   ins_encode %{
 7506     __ fabs_s(as_FloatRegister($dst$$reg),
 7507               as_FloatRegister($src$$reg));
 7508   %}
 7509 
 7510   ins_pipe(fp_uop_s);
 7511 %}
 7512 
 7513 instruct absD_reg(fRegD dst, fRegD src) %{
 7514   match(Set dst (AbsD src));
 7515 
 7516   ins_cost(XFER_COST);
 7517   format %{ "fsgnjx.d  $dst, $src, $src\t#@absD_reg" %}
 7518   ins_encode %{
 7519     __ fabs_d(as_FloatRegister($dst$$reg),
 7520               as_FloatRegister($src$$reg));
 7521   %}
 7522 
 7523   ins_pipe(fp_uop_d);
 7524 %}
 7525 
 7526 instruct sqrtF_reg(fRegF dst, fRegF src) %{
 7527   match(Set dst (SqrtF src));
 7528 
 7529   ins_cost(FSQRT_COST);
 7530   format %{ "fsqrt.s  $dst, $src\t#@sqrtF_reg" %}
 7531   ins_encode %{
 7532     __ fsqrt_s(as_FloatRegister($dst$$reg),
 7533                as_FloatRegister($src$$reg));
 7534   %}
 7535 
 7536   ins_pipe(fp_sqrt_s);
 7537 %}
 7538 
 7539 instruct sqrtD_reg(fRegD dst, fRegD src) %{
 7540   match(Set dst (SqrtD src));
 7541 
 7542   ins_cost(FSQRT_COST);
 7543   format %{ "fsqrt.d  $dst, $src\t#@sqrtD_reg" %}
 7544   ins_encode %{
 7545     __ fsqrt_d(as_FloatRegister($dst$$reg),
 7546                as_FloatRegister($src$$reg));
 7547   %}
 7548 
 7549   ins_pipe(fp_sqrt_d);
 7550 %}
 7551 
 7552 // Round Instruction
 7553 instruct roundD_reg(fRegD dst, fRegD src, immI rmode, iRegLNoSp tmp1, iRegLNoSp tmp2, iRegLNoSp tmp3) %{
 7554   match(Set dst (RoundDoubleMode src rmode));
 7555   ins_cost(2 * XFER_COST + BRANCH_COST);
 7556   effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 7557 
 7558   format %{ "RoundDoubleMode $src, $rmode" %}
 7559   ins_encode %{
 7560     __ round_double_mode(as_FloatRegister($dst$$reg),
 7561                as_FloatRegister($src$$reg), $rmode$$constant, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 7562   %}
 7563   ins_pipe(pipe_class_default);
 7564 %}
 7565 
 7566 // Copysign and signum intrinsics
 7567 
 7568 instruct copySignD_reg(fRegD dst, fRegD src1, fRegD src2, immD zero) %{
 7569   match(Set dst (CopySignD src1 (Binary src2 zero)));
 7570   format %{ "CopySignD  $dst $src1 $src2" %}
 7571   ins_encode %{
 7572     FloatRegister dst = as_FloatRegister($dst$$reg),
 7573                   src1 = as_FloatRegister($src1$$reg),
 7574                   src2 = as_FloatRegister($src2$$reg);
 7575     __ fsgnj_d(dst, src1, src2);
 7576   %}
 7577   ins_pipe(fp_dop_reg_reg_d);
 7578 %}
 7579 
 7580 instruct copySignF_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7581   match(Set dst (CopySignF src1 src2));
 7582   format %{ "CopySignF  $dst $src1 $src2" %}
 7583   ins_encode %{
 7584     FloatRegister dst = as_FloatRegister($dst$$reg),
 7585                   src1 = as_FloatRegister($src1$$reg),
 7586                   src2 = as_FloatRegister($src2$$reg);
 7587     __ fsgnj_s(dst, src1, src2);
 7588   %}
 7589   ins_pipe(fp_dop_reg_reg_s);
 7590 %}
 7591 
 7592 instruct signumD_reg(fRegD dst, immD zero, fRegD one) %{
 7593   match(Set dst (SignumD dst (Binary zero one)));
 7594   format %{ "signumD  $dst, $dst" %}
 7595   ins_encode %{
 7596     __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), true /* is_double */);
 7597   %}
 7598   ins_pipe(pipe_class_default);
 7599 %}
 7600 
 7601 instruct signumF_reg(fRegF dst, immF zero, fRegF one) %{
 7602   match(Set dst (SignumF dst (Binary zero one)));
 7603   format %{ "signumF  $dst, $dst" %}
 7604   ins_encode %{
 7605     __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), false /* is_double */);
 7606   %}
 7607   ins_pipe(pipe_class_default);
 7608 %}
 7609 
 7610 // Arithmetic Instructions End
 7611 
 7612 // ============================================================================
 7613 // Logical Instructions
 7614 
 7615 // Register And
 7616 instruct andI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7617   match(Set dst (AndI src1 src2));
 7618 
 7619   format %{ "andr  $dst, $src1, $src2\t#@andI_reg_reg" %}
 7620 
 7621   ins_cost(ALU_COST);
 7622   ins_encode %{
 7623     __ andr(as_Register($dst$$reg),
 7624             as_Register($src1$$reg),
 7625             as_Register($src2$$reg));
 7626   %}
 7627 
 7628   ins_pipe(ialu_reg_reg);
 7629 %}
 7630 
 7631 // Immediate And
 7632 instruct andI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7633   match(Set dst (AndI src1 src2));
 7634 
 7635   format %{ "andi  $dst, $src1, $src2\t#@andI_reg_imm" %}
 7636 
 7637   ins_cost(ALU_COST);
 7638   ins_encode %{
 7639     __ andi(as_Register($dst$$reg),
 7640             as_Register($src1$$reg),
 7641             (int32_t)($src2$$constant));
 7642   %}
 7643 
 7644   ins_pipe(ialu_reg_imm);
 7645 %}
 7646 
 7647 // Register Or
 7648 instruct orI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7649   match(Set dst (OrI src1 src2));
 7650 
 7651   format %{ "orr  $dst, $src1, $src2\t#@orI_reg_reg" %}
 7652 
 7653   ins_cost(ALU_COST);
 7654   ins_encode %{
 7655     __ orr(as_Register($dst$$reg),
 7656            as_Register($src1$$reg),
 7657            as_Register($src2$$reg));
 7658   %}
 7659 
 7660   ins_pipe(ialu_reg_reg);
 7661 %}
 7662 
 7663 // Immediate Or
 7664 instruct orI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7665   match(Set dst (OrI src1 src2));
 7666 
 7667   format %{ "ori  $dst, $src1, $src2\t#@orI_reg_imm" %}
 7668 
 7669   ins_cost(ALU_COST);
 7670   ins_encode %{
 7671     __ ori(as_Register($dst$$reg),
 7672            as_Register($src1$$reg),
 7673            (int32_t)($src2$$constant));
 7674   %}
 7675 
 7676   ins_pipe(ialu_reg_imm);
 7677 %}
 7678 
 7679 // Register Xor
 7680 instruct xorI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7681   match(Set dst (XorI src1 src2));
 7682 
 7683   format %{ "xorr  $dst, $src1, $src2\t#@xorI_reg_reg" %}
 7684 
 7685   ins_cost(ALU_COST);
 7686   ins_encode %{
 7687     __ xorr(as_Register($dst$$reg),
 7688             as_Register($src1$$reg),
 7689             as_Register($src2$$reg));
 7690   %}
 7691 
 7692   ins_pipe(ialu_reg_reg);
 7693 %}
 7694 
 7695 // Immediate Xor
 7696 instruct xorI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7697   match(Set dst (XorI src1 src2));
 7698 
 7699   format %{ "xori  $dst, $src1, $src2\t#@xorI_reg_imm" %}
 7700 
 7701   ins_cost(ALU_COST);
 7702   ins_encode %{
 7703     __ xori(as_Register($dst$$reg),
 7704             as_Register($src1$$reg),
 7705             (int32_t)($src2$$constant));
 7706   %}
 7707 
 7708   ins_pipe(ialu_reg_imm);
 7709 %}
 7710 
 7711 // Register And Long
 7712 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7713   match(Set dst (AndL src1 src2));
 7714 
 7715   format %{ "andr  $dst, $src1, $src2\t#@andL_reg_reg" %}
 7716 
 7717   ins_cost(ALU_COST);
 7718   ins_encode %{
 7719     __ andr(as_Register($dst$$reg),
 7720             as_Register($src1$$reg),
 7721             as_Register($src2$$reg));
 7722   %}
 7723 
 7724   ins_pipe(ialu_reg_reg);
 7725 %}
 7726 
 7727 // Immediate And Long
 7728 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 7729   match(Set dst (AndL src1 src2));
 7730 
 7731   format %{ "andi  $dst, $src1, $src2\t#@andL_reg_imm" %}
 7732 
 7733   ins_cost(ALU_COST);
 7734   ins_encode %{
 7735     __ andi(as_Register($dst$$reg),
 7736             as_Register($src1$$reg),
 7737             (int32_t)($src2$$constant));
 7738   %}
 7739 
 7740   ins_pipe(ialu_reg_imm);
 7741 %}
 7742 
 7743 // Register Or Long
 7744 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7745   match(Set dst (OrL src1 src2));
 7746 
 7747   format %{ "orr  $dst, $src1, $src2\t#@orL_reg_reg" %}
 7748 
 7749   ins_cost(ALU_COST);
 7750   ins_encode %{
 7751     __ orr(as_Register($dst$$reg),
 7752            as_Register($src1$$reg),
 7753            as_Register($src2$$reg));
 7754   %}
 7755 
 7756   ins_pipe(ialu_reg_reg);
 7757 %}
 7758 
 7759 // Immediate Or Long
 7760 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 7761   match(Set dst (OrL src1 src2));
 7762 
 7763   format %{ "ori  $dst, $src1, $src2\t#@orL_reg_imm" %}
 7764 
 7765   ins_cost(ALU_COST);
 7766   ins_encode %{
 7767     __ ori(as_Register($dst$$reg),
 7768            as_Register($src1$$reg),
 7769            (int32_t)($src2$$constant));
 7770   %}
 7771 
 7772   ins_pipe(ialu_reg_imm);
 7773 %}
 7774 
 7775 // Register Xor Long
 7776 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7777   match(Set dst (XorL src1 src2));
 7778 
 7779   format %{ "xorr  $dst, $src1, $src2\t#@xorL_reg_reg" %}
 7780 
 7781   ins_cost(ALU_COST);
 7782   ins_encode %{
 7783     __ xorr(as_Register($dst$$reg),
 7784             as_Register($src1$$reg),
 7785             as_Register($src2$$reg));
 7786   %}
 7787 
 7788   ins_pipe(ialu_reg_reg);
 7789 %}
 7790 
 7791 // Immediate Xor Long
 7792 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 7793   match(Set dst (XorL src1 src2));
 7794 
 7795   ins_cost(ALU_COST);
 7796   format %{ "xori  $dst, $src1, $src2\t#@xorL_reg_imm" %}
 7797 
 7798   ins_encode %{
 7799     __ xori(as_Register($dst$$reg),
 7800             as_Register($src1$$reg),
 7801             (int32_t)($src2$$constant));
 7802   %}
 7803 
 7804   ins_pipe(ialu_reg_imm);
 7805 %}
 7806 
 7807 // ============================================================================
 7808 // MemBar Instruction
 7809 
 7810 instruct load_fence() %{
 7811   match(LoadFence);
 7812   ins_cost(ALU_COST);
 7813 
 7814   format %{ "#@load_fence" %}
 7815 
 7816   ins_encode %{
 7817     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
 7818   %}
 7819   ins_pipe(pipe_serial);
 7820 %}
 7821 
 7822 instruct membar_acquire() %{
 7823   match(MemBarAcquire);
 7824   ins_cost(ALU_COST);
 7825 
 7826   format %{ "#@membar_acquire\n\t"
 7827             "fence ir iorw" %}
 7828 
 7829   ins_encode %{
 7830     __ block_comment("membar_acquire");
 7831     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
 7832   %}
 7833 
 7834   ins_pipe(pipe_serial);
 7835 %}
 7836 
 7837 instruct membar_acquire_lock() %{
 7838   match(MemBarAcquireLock);
 7839   ins_cost(0);
 7840 
 7841   format %{ "#@membar_acquire_lock (elided)" %}
 7842 
 7843   ins_encode %{
 7844     __ block_comment("membar_acquire_lock (elided)");
 7845   %}
 7846 
 7847   ins_pipe(pipe_serial);
 7848 %}
 7849 
 7850 instruct store_fence() %{
 7851   match(StoreFence);
 7852   ins_cost(ALU_COST);
 7853 
 7854   format %{ "#@store_fence" %}
 7855 
 7856   ins_encode %{
 7857     __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
 7858   %}
 7859   ins_pipe(pipe_serial);
 7860 %}
 7861 
 7862 instruct membar_release() %{
 7863   match(MemBarRelease);
 7864   ins_cost(ALU_COST);
 7865 
 7866   format %{ "#@membar_release\n\t"
 7867             "fence iorw ow" %}
 7868 
 7869   ins_encode %{
 7870     __ block_comment("membar_release");
 7871     __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
 7872   %}
 7873   ins_pipe(pipe_serial);
 7874 %}
 7875 
 7876 instruct membar_storestore() %{
 7877   match(MemBarStoreStore);
 7878   match(StoreStoreFence);
 7879   ins_cost(ALU_COST);
 7880 
 7881   format %{ "MEMBAR-store-store\t#@membar_storestore" %}
 7882 
 7883   ins_encode %{
 7884     __ membar(MacroAssembler::StoreStore);
 7885   %}
 7886   ins_pipe(pipe_serial);
 7887 %}
 7888 
 7889 instruct membar_release_lock() %{
 7890   match(MemBarReleaseLock);
 7891   ins_cost(0);
 7892 
 7893   format %{ "#@membar_release_lock (elided)" %}
 7894 
 7895   ins_encode %{
 7896     __ block_comment("membar_release_lock (elided)");
 7897   %}
 7898 
 7899   ins_pipe(pipe_serial);
 7900 %}
 7901 
 7902 instruct membar_volatile() %{
 7903   match(MemBarVolatile);
 7904   ins_cost(ALU_COST);
 7905 
 7906   format %{ "#@membar_volatile\n\t"
 7907              "fence iorw iorw"%}
 7908 
 7909   ins_encode %{
 7910     __ block_comment("membar_volatile");
 7911     __ membar(MacroAssembler::StoreLoad);
 7912   %}
 7913 
 7914   ins_pipe(pipe_serial);
 7915 %}
 7916 
 7917 instruct spin_wait() %{
 7918   predicate(UseZihintpause);
 7919   match(OnSpinWait);
 7920   ins_cost(CACHE_MISS_COST);
 7921 
 7922   format %{ "spin_wait" %}
 7923 
 7924   ins_encode %{
 7925     __ pause();
 7926   %}
 7927 
 7928   ins_pipe(pipe_serial);
 7929 %}
 7930 
 7931 // ============================================================================
 7932 // Cast Instructions (Java-level type cast)
 7933 
 7934 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 7935   match(Set dst (CastX2P src));
 7936 
 7937   ins_cost(ALU_COST);
 7938   format %{ "mv  $dst, $src\t# long -> ptr, #@castX2P" %}
 7939 
 7940   ins_encode %{
 7941     if ($dst$$reg != $src$$reg) {
 7942       __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 7943     }
 7944   %}
 7945 
 7946   ins_pipe(ialu_reg);
 7947 %}
 7948 
 7949 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 7950   match(Set dst (CastP2X src));
 7951 
 7952   ins_cost(ALU_COST);
 7953   format %{ "mv  $dst, $src\t# ptr -> long, #@castP2X" %}
 7954 
 7955   ins_encode %{
 7956     if ($dst$$reg != $src$$reg) {
 7957       __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 7958     }
 7959   %}
 7960 
 7961   ins_pipe(ialu_reg);
 7962 %}
 7963 
 7964 instruct castPP(iRegPNoSp dst)
 7965 %{
 7966   match(Set dst (CastPP dst));
 7967   ins_cost(0);
 7968 
 7969   size(0);
 7970   format %{ "# castPP of $dst, #@castPP" %}
 7971   ins_encode(/* empty encoding */);
 7972   ins_pipe(pipe_class_empty);
 7973 %}
 7974 
 7975 instruct castLL(iRegL dst)
 7976 %{
 7977   match(Set dst (CastLL dst));
 7978 
 7979   size(0);
 7980   format %{ "# castLL of $dst, #@castLL" %}
 7981   ins_encode(/* empty encoding */);
 7982   ins_cost(0);
 7983   ins_pipe(pipe_class_empty);
 7984 %}
 7985 
 7986 instruct castII(iRegI dst)
 7987 %{
 7988   match(Set dst (CastII dst));
 7989 
 7990   size(0);
 7991   format %{ "# castII of $dst, #@castII" %}
 7992   ins_encode(/* empty encoding */);
 7993   ins_cost(0);
 7994   ins_pipe(pipe_class_empty);
 7995 %}
 7996 
 7997 instruct checkCastPP(iRegPNoSp dst)
 7998 %{
 7999   match(Set dst (CheckCastPP dst));
 8000 
 8001   size(0);
 8002   ins_cost(0);
 8003   format %{ "# checkcastPP of $dst, #@checkCastPP" %}
 8004   ins_encode(/* empty encoding */);
 8005   ins_pipe(pipe_class_empty);
 8006 %}
 8007 
 8008 instruct castFF(fRegF dst)
 8009 %{
 8010   match(Set dst (CastFF dst));
 8011 
 8012   size(0);
 8013   format %{ "# castFF of $dst" %}
 8014   ins_encode(/* empty encoding */);
 8015   ins_cost(0);
 8016   ins_pipe(pipe_class_empty);
 8017 %}
 8018 
 8019 instruct castDD(fRegD dst)
 8020 %{
 8021   match(Set dst (CastDD dst));
 8022 
 8023   size(0);
 8024   format %{ "# castDD of $dst" %}
 8025   ins_encode(/* empty encoding */);
 8026   ins_cost(0);
 8027   ins_pipe(pipe_class_empty);
 8028 %}
 8029 
 8030 instruct castVV(vReg dst)
 8031 %{
 8032   match(Set dst (CastVV dst));
 8033 
 8034   size(0);
 8035   format %{ "# castVV of $dst" %}
 8036   ins_encode(/* empty encoding */);
 8037   ins_cost(0);
 8038   ins_pipe(pipe_class_empty);
 8039 %}
 8040 
 8041 // ============================================================================
 8042 // Convert Instructions
 8043 
 8044 // int to bool
 8045 instruct convI2Bool(iRegINoSp dst, iRegI src)
 8046 %{
 8047   match(Set dst (Conv2B src));
 8048 
 8049   ins_cost(ALU_COST);
 8050   format %{ "snez  $dst, $src\t#@convI2Bool" %}
 8051 
 8052   ins_encode %{
 8053     __ snez(as_Register($dst$$reg), as_Register($src$$reg));
 8054   %}
 8055 
 8056   ins_pipe(ialu_reg);
 8057 %}
 8058 
 8059 // pointer to bool
 8060 instruct convP2Bool(iRegINoSp dst, iRegP src)
 8061 %{
 8062   match(Set dst (Conv2B src));
 8063 
 8064   ins_cost(ALU_COST);
 8065   format %{ "snez  $dst, $src\t#@convP2Bool" %}
 8066 
 8067   ins_encode %{
 8068     __ snez(as_Register($dst$$reg), as_Register($src$$reg));
 8069   %}
 8070 
 8071   ins_pipe(ialu_reg);
 8072 %}
 8073 
 8074 // int <-> long
 8075 
 8076 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
 8077 %{
 8078   match(Set dst (ConvI2L src));
 8079 
 8080   ins_cost(ALU_COST);
 8081   format %{ "addw  $dst, $src, zr\t#@convI2L_reg_reg" %}
 8082   ins_encode %{
 8083     __ sign_extend(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8084   %}
 8085   ins_pipe(ialu_reg);
 8086 %}
 8087 
 8088 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
 8089   match(Set dst (ConvL2I src));
 8090 
 8091   ins_cost(ALU_COST);
 8092   format %{ "addw  $dst, $src, zr\t#@convL2I_reg" %}
 8093 
 8094   ins_encode %{
 8095     __ sign_extend(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8096   %}
 8097 
 8098   ins_pipe(ialu_reg);
 8099 %}
 8100 
 8101 // int to unsigned long (Zero-extend)
 8102 instruct convI2UL_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
 8103 %{
 8104   match(Set dst (AndL (ConvI2L src) mask));
 8105 
 8106   ins_cost(ALU_COST * 2);
 8107   format %{ "zero_extend $dst, $src, 32\t# i2ul, #@convI2UL_reg_reg" %}
 8108 
 8109   ins_encode %{
 8110     __ zero_extend(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8111   %}
 8112 
 8113   ins_pipe(ialu_reg_shift);
 8114 %}
 8115 
 8116 // float <-> double
 8117 
 8118 instruct convF2D_reg(fRegD dst, fRegF src) %{
 8119   match(Set dst (ConvF2D src));
 8120 
 8121   ins_cost(XFER_COST);
 8122   format %{ "fcvt.d.s  $dst, $src\t#@convF2D_reg" %}
 8123 
 8124   ins_encode %{
 8125     __ fcvt_d_s(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8126   %}
 8127 
 8128   ins_pipe(fp_f2d);
 8129 %}
 8130 
 8131 instruct convD2F_reg(fRegF dst, fRegD src) %{
 8132   match(Set dst (ConvD2F src));
 8133 
 8134   ins_cost(XFER_COST);
 8135   format %{ "fcvt.s.d  $dst, $src\t#@convD2F_reg" %}
 8136 
 8137   ins_encode %{
 8138     __ fcvt_s_d(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8139   %}
 8140 
 8141   ins_pipe(fp_d2f);
 8142 %}
 8143 
 8144 // single <-> half precision
 8145 
 8146 instruct convHF2F_reg_reg(fRegF dst, iRegINoSp src, iRegINoSp tmp) %{
 8147   match(Set dst (ConvHF2F src));
 8148   effect(TEMP tmp);
 8149   format %{ "fmv.h.x $dst, $src\t# move source from $src to $dst\n\t"
 8150             "fcvt.s.h $dst, $dst\t# convert half to single precision"
 8151   %}
 8152   ins_encode %{
 8153     __ float16_to_float($dst$$FloatRegister, $src$$Register, $tmp$$Register);
 8154   %}
 8155   ins_pipe(pipe_slow);
 8156 %}
 8157 
 8158 instruct convF2HF_reg_reg(iRegINoSp dst, fRegF src, fRegF ftmp, iRegINoSp xtmp) %{
 8159   match(Set dst (ConvF2HF src));
 8160   effect(TEMP_DEF dst, TEMP ftmp, TEMP xtmp);
 8161   format %{ "fcvt.h.s $ftmp, $src\t# convert single precision to half\n\t"
 8162             "fmv.x.h $dst, $ftmp\t# move result from $ftmp to $dst"
 8163   %}
 8164   ins_encode %{
 8165     __ float_to_float16($dst$$Register, $src$$FloatRegister, $ftmp$$FloatRegister, $xtmp$$Register);
 8166   %}
 8167   ins_pipe(pipe_slow);
 8168 %}
 8169 
 8170 // float <-> int
 8171 
 8172 instruct convF2I_reg_reg(iRegINoSp dst, fRegF src) %{
 8173   match(Set dst (ConvF2I src));
 8174 
 8175   ins_cost(XFER_COST);
 8176   format %{ "fcvt.w.s  $dst, $src\t#@convF2I_reg_reg" %}
 8177 
 8178   ins_encode %{
 8179     __ fcvt_w_s_safe($dst$$Register, $src$$FloatRegister);
 8180   %}
 8181 
 8182   ins_pipe(fp_f2i);
 8183 %}
 8184 
 8185 instruct convI2F_reg_reg(fRegF dst, iRegIorL2I src) %{
 8186   match(Set dst (ConvI2F src));
 8187 
 8188   ins_cost(XFER_COST);
 8189   format %{ "fcvt.s.w  $dst, $src\t#@convI2F_reg_reg" %}
 8190 
 8191   ins_encode %{
 8192     __ fcvt_s_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8193   %}
 8194 
 8195   ins_pipe(fp_i2f);
 8196 %}
 8197 
 8198 // float <-> long
 8199 
 8200 instruct convF2L_reg_reg(iRegLNoSp dst, fRegF src) %{
 8201   match(Set dst (ConvF2L src));
 8202 
 8203   ins_cost(XFER_COST);
 8204   format %{ "fcvt.l.s  $dst, $src\t#@convF2L_reg_reg" %}
 8205 
 8206   ins_encode %{
 8207     __ fcvt_l_s_safe($dst$$Register, $src$$FloatRegister);
 8208   %}
 8209 
 8210   ins_pipe(fp_f2l);
 8211 %}
 8212 
 8213 instruct convL2F_reg_reg(fRegF dst, iRegL src) %{
 8214   match(Set dst (ConvL2F src));
 8215 
 8216   ins_cost(XFER_COST);
 8217   format %{ "fcvt.s.l  $dst, $src\t#@convL2F_reg_reg" %}
 8218 
 8219   ins_encode %{
 8220     __ fcvt_s_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8221   %}
 8222 
 8223   ins_pipe(fp_l2f);
 8224 %}
 8225 
 8226 // double <-> int
 8227 
 8228 instruct convD2I_reg_reg(iRegINoSp dst, fRegD src) %{
 8229   match(Set dst (ConvD2I src));
 8230 
 8231   ins_cost(XFER_COST);
 8232   format %{ "fcvt.w.d  $dst, $src\t#@convD2I_reg_reg" %}
 8233 
 8234   ins_encode %{
 8235     __ fcvt_w_d_safe($dst$$Register, $src$$FloatRegister);
 8236   %}
 8237 
 8238   ins_pipe(fp_d2i);
 8239 %}
 8240 
 8241 instruct convI2D_reg_reg(fRegD dst, iRegIorL2I src) %{
 8242   match(Set dst (ConvI2D src));
 8243 
 8244   ins_cost(XFER_COST);
 8245   format %{ "fcvt.d.w  $dst, $src\t#@convI2D_reg_reg" %}
 8246 
 8247   ins_encode %{
 8248     __ fcvt_d_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8249   %}
 8250 
 8251   ins_pipe(fp_i2d);
 8252 %}
 8253 
 8254 // double <-> long
 8255 
 8256 instruct convD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
 8257   match(Set dst (ConvD2L src));
 8258 
 8259   ins_cost(XFER_COST);
 8260   format %{ "fcvt.l.d  $dst, $src\t#@convD2L_reg_reg" %}
 8261 
 8262   ins_encode %{
 8263     __ fcvt_l_d_safe($dst$$Register, $src$$FloatRegister);
 8264   %}
 8265 
 8266   ins_pipe(fp_d2l);
 8267 %}
 8268 
 8269 instruct convL2D_reg_reg(fRegD dst, iRegL src) %{
 8270   match(Set dst (ConvL2D src));
 8271 
 8272   ins_cost(XFER_COST);
 8273   format %{ "fcvt.d.l  $dst, $src\t#@convL2D_reg_reg" %}
 8274 
 8275   ins_encode %{
 8276     __ fcvt_d_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8277   %}
 8278 
 8279   ins_pipe(fp_l2d);
 8280 %}
 8281 
 8282 // Convert oop into int for vectors alignment masking
 8283 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8284   match(Set dst (ConvL2I (CastP2X src)));
 8285 
 8286   ins_cost(ALU_COST * 2);
 8287   format %{ "zero_extend $dst, $src, 32\t# ptr -> int, #@convP2I" %}
 8288 
 8289   ins_encode %{
 8290     __ zero_extend($dst$$Register, $src$$Register, 32);
 8291   %}
 8292 
 8293   ins_pipe(ialu_reg);
 8294 %}
 8295 
 8296 // Convert compressed oop into int for vectors alignment masking
 8297 // in case of 32bit oops (heap < 4Gb).
 8298 instruct convN2I(iRegINoSp dst, iRegN src)
 8299 %{
 8300   predicate(CompressedOops::shift() == 0);
 8301   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8302 
 8303   ins_cost(ALU_COST);
 8304   format %{ "mv  $dst, $src\t# compressed ptr -> int, #@convN2I" %}
 8305 
 8306   ins_encode %{
 8307     __ mv($dst$$Register, $src$$Register);
 8308   %}
 8309 
 8310   ins_pipe(ialu_reg);
 8311 %}
 8312 
 8313 instruct round_double_reg(iRegLNoSp dst, fRegD src, fRegD ftmp) %{
 8314   match(Set dst (RoundD src));
 8315 
 8316   ins_cost(XFER_COST + BRANCH_COST);
 8317   effect(TEMP ftmp);
 8318   format %{ "java_round_double $dst, $src\t#@round_double_reg" %}
 8319 
 8320   ins_encode %{
 8321     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
 8322   %}
 8323 
 8324   ins_pipe(pipe_slow);
 8325 %}
 8326 
 8327 instruct round_float_reg(iRegINoSp dst, fRegF src, fRegF ftmp) %{
 8328   match(Set dst (RoundF src));
 8329 
 8330   ins_cost(XFER_COST + BRANCH_COST);
 8331   effect(TEMP ftmp);
 8332   format %{ "java_round_float $dst, $src\t#@round_float_reg" %}
 8333 
 8334   ins_encode %{
 8335     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
 8336   %}
 8337 
 8338   ins_pipe(pipe_slow);
 8339 %}
 8340 
 8341 // Convert oop pointer into compressed form
 8342 instruct encodeHeapOop(iRegNNoSp dst, iRegP src) %{
 8343   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8344   match(Set dst (EncodeP src));
 8345   ins_cost(ALU_COST);
 8346   format %{ "encode_heap_oop  $dst, $src\t#@encodeHeapOop" %}
 8347   ins_encode %{
 8348     Register s = $src$$Register;
 8349     Register d = $dst$$Register;
 8350     __ encode_heap_oop(d, s);
 8351   %}
 8352   ins_pipe(pipe_class_default);
 8353 %}
 8354 
 8355 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src) %{
 8356   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8357   match(Set dst (EncodeP src));
 8358   ins_cost(ALU_COST);
 8359   format %{ "encode_heap_oop_not_null $dst, $src\t#@encodeHeapOop_not_null" %}
 8360   ins_encode %{
 8361     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8362   %}
 8363   ins_pipe(pipe_class_default);
 8364 %}
 8365 
 8366 instruct decodeHeapOop(iRegPNoSp dst, iRegN src) %{
 8367   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8368             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8369   match(Set dst (DecodeN src));
 8370 
 8371   ins_cost(0);
 8372   format %{ "decode_heap_oop  $dst, $src\t#@decodeHeapOop" %}
 8373   ins_encode %{
 8374     Register s = $src$$Register;
 8375     Register d = $dst$$Register;
 8376     __ decode_heap_oop(d, s);
 8377   %}
 8378   ins_pipe(pipe_class_default);
 8379 %}
 8380 
 8381 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src) %{
 8382   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8383             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8384   match(Set dst (DecodeN src));
 8385 
 8386   ins_cost(0);
 8387   format %{ "decode_heap_oop_not_null $dst, $src\t#@decodeHeapOop_not_null" %}
 8388   ins_encode %{
 8389     Register s = $src$$Register;
 8390     Register d = $dst$$Register;
 8391     __ decode_heap_oop_not_null(d, s);
 8392   %}
 8393   ins_pipe(pipe_class_default);
 8394 %}
 8395 
 8396 // Convert klass pointer into compressed form.
 8397 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8398   match(Set dst (EncodePKlass src));
 8399 
 8400   ins_cost(ALU_COST);
 8401   format %{ "encode_klass_not_null  $dst, $src\t#@encodeKlass_not_null" %}
 8402 
 8403   ins_encode %{
 8404     Register src_reg = as_Register($src$$reg);
 8405     Register dst_reg = as_Register($dst$$reg);
 8406     __ encode_klass_not_null(dst_reg, src_reg, t0);
 8407   %}
 8408 
 8409    ins_pipe(pipe_class_default);
 8410 %}
 8411 
 8412 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src, iRegPNoSp tmp) %{
 8413   match(Set dst (DecodeNKlass src));
 8414 
 8415   effect(TEMP tmp);
 8416 
 8417   ins_cost(ALU_COST);
 8418   format %{ "decode_klass_not_null  $dst, $src\t#@decodeKlass_not_null" %}
 8419 
 8420   ins_encode %{
 8421     Register src_reg = as_Register($src$$reg);
 8422     Register dst_reg = as_Register($dst$$reg);
 8423     Register tmp_reg = as_Register($tmp$$reg);
 8424     __ decode_klass_not_null(dst_reg, src_reg, tmp_reg);
 8425   %}
 8426 
 8427    ins_pipe(pipe_class_default);
 8428 %}
 8429 
 8430 // stack <-> reg and reg <-> reg shuffles with no conversion
 8431 
 8432 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
 8433 
 8434   match(Set dst (MoveF2I src));
 8435 
 8436   effect(DEF dst, USE src);
 8437 
 8438   ins_cost(LOAD_COST);
 8439 
 8440   format %{ "lw  $dst, $src\t#@MoveF2I_stack_reg" %}
 8441 
 8442   ins_encode %{
 8443     __ lw(as_Register($dst$$reg), Address(sp, $src$$disp));
 8444   %}
 8445 
 8446   ins_pipe(iload_reg_reg);
 8447 
 8448 %}
 8449 
 8450 instruct MoveI2F_stack_reg(fRegF dst, stackSlotI src) %{
 8451 
 8452   match(Set dst (MoveI2F src));
 8453 
 8454   effect(DEF dst, USE src);
 8455 
 8456   ins_cost(LOAD_COST);
 8457 
 8458   format %{ "flw  $dst, $src\t#@MoveI2F_stack_reg" %}
 8459 
 8460   ins_encode %{
 8461     __ flw(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
 8462   %}
 8463 
 8464   ins_pipe(fp_load_mem_s);
 8465 
 8466 %}
 8467 
 8468 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
 8469 
 8470   match(Set dst (MoveD2L src));
 8471 
 8472   effect(DEF dst, USE src);
 8473 
 8474   ins_cost(LOAD_COST);
 8475 
 8476   format %{ "ld  $dst, $src\t#@MoveD2L_stack_reg" %}
 8477 
 8478   ins_encode %{
 8479     __ ld(as_Register($dst$$reg), Address(sp, $src$$disp));
 8480   %}
 8481 
 8482   ins_pipe(iload_reg_reg);
 8483 
 8484 %}
 8485 
 8486 instruct MoveL2D_stack_reg(fRegD dst, stackSlotL src) %{
 8487 
 8488   match(Set dst (MoveL2D src));
 8489 
 8490   effect(DEF dst, USE src);
 8491 
 8492   ins_cost(LOAD_COST);
 8493 
 8494   format %{ "fld  $dst, $src\t#@MoveL2D_stack_reg" %}
 8495 
 8496   ins_encode %{
 8497     __ fld(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
 8498   %}
 8499 
 8500   ins_pipe(fp_load_mem_d);
 8501 
 8502 %}
 8503 
 8504 instruct MoveF2I_reg_stack(stackSlotI dst, fRegF src) %{
 8505 
 8506   match(Set dst (MoveF2I src));
 8507 
 8508   effect(DEF dst, USE src);
 8509 
 8510   ins_cost(STORE_COST);
 8511 
 8512   format %{ "fsw  $src, $dst\t#@MoveF2I_reg_stack" %}
 8513 
 8514   ins_encode %{
 8515     __ fsw(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
 8516   %}
 8517 
 8518   ins_pipe(fp_store_reg_s);
 8519 
 8520 %}
 8521 
 8522 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
 8523 
 8524   match(Set dst (MoveI2F src));
 8525 
 8526   effect(DEF dst, USE src);
 8527 
 8528   ins_cost(STORE_COST);
 8529 
 8530   format %{ "sw  $src, $dst\t#@MoveI2F_reg_stack" %}
 8531 
 8532   ins_encode %{
 8533     __ sw(as_Register($src$$reg), Address(sp, $dst$$disp));
 8534   %}
 8535 
 8536   ins_pipe(istore_reg_reg);
 8537 
 8538 %}
 8539 
 8540 instruct MoveD2L_reg_stack(stackSlotL dst, fRegD src) %{
 8541 
 8542   match(Set dst (MoveD2L src));
 8543 
 8544   effect(DEF dst, USE src);
 8545 
 8546   ins_cost(STORE_COST);
 8547 
 8548   format %{ "fsd  $dst, $src\t#@MoveD2L_reg_stack" %}
 8549 
 8550   ins_encode %{
 8551     __ fsd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
 8552   %}
 8553 
 8554   ins_pipe(fp_store_reg_d);
 8555 
 8556 %}
 8557 
 8558 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
 8559 
 8560   match(Set dst (MoveL2D src));
 8561 
 8562   effect(DEF dst, USE src);
 8563 
 8564   ins_cost(STORE_COST);
 8565 
 8566   format %{ "sd  $src, $dst\t#@MoveL2D_reg_stack" %}
 8567 
 8568   ins_encode %{
 8569     __ sd(as_Register($src$$reg), Address(sp, $dst$$disp));
 8570   %}
 8571 
 8572   ins_pipe(istore_reg_reg);
 8573 
 8574 %}
 8575 
 8576 instruct MoveF2I_reg_reg(iRegINoSp dst, fRegF src) %{
 8577 
 8578   match(Set dst (MoveF2I src));
 8579 
 8580   effect(DEF dst, USE src);
 8581 
 8582   ins_cost(FMVX_COST);
 8583 
 8584   format %{ "fmv.x.w  $dst, $src\t#@MoveF2I_reg_reg" %}
 8585 
 8586   ins_encode %{
 8587     __ fmv_x_w(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 8588   %}
 8589 
 8590   ins_pipe(fp_f2i);
 8591 
 8592 %}
 8593 
 8594 instruct MoveI2F_reg_reg(fRegF dst, iRegI src) %{
 8595 
 8596   match(Set dst (MoveI2F src));
 8597 
 8598   effect(DEF dst, USE src);
 8599 
 8600   ins_cost(FMVX_COST);
 8601 
 8602   format %{ "fmv.w.x  $dst, $src\t#@MoveI2F_reg_reg" %}
 8603 
 8604   ins_encode %{
 8605     __ fmv_w_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8606   %}
 8607 
 8608   ins_pipe(fp_i2f);
 8609 
 8610 %}
 8611 
 8612 instruct MoveD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
 8613 
 8614   match(Set dst (MoveD2L src));
 8615 
 8616   effect(DEF dst, USE src);
 8617 
 8618   ins_cost(FMVX_COST);
 8619 
 8620   format %{ "fmv.x.d $dst, $src\t#@MoveD2L_reg_reg" %}
 8621 
 8622   ins_encode %{
 8623     __ fmv_x_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 8624   %}
 8625 
 8626   ins_pipe(fp_d2l);
 8627 
 8628 %}
 8629 
 8630 instruct MoveL2D_reg_reg(fRegD dst, iRegL src) %{
 8631 
 8632   match(Set dst (MoveL2D src));
 8633 
 8634   effect(DEF dst, USE src);
 8635 
 8636   ins_cost(FMVX_COST);
 8637 
 8638   format %{ "fmv.d.x  $dst, $src\t#@MoveL2D_reg_reg" %}
 8639 
 8640   ins_encode %{
 8641     __ fmv_d_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8642   %}
 8643 
 8644   ins_pipe(fp_l2d);
 8645 
 8646 %}
 8647 
 8648 // ============================================================================
 8649 // Compare Instructions which set the result float comparisons in dest register.
 8650 
 8651 instruct cmpF3_reg_reg(iRegINoSp dst, fRegF op1, fRegF op2)
 8652 %{
 8653   match(Set dst (CmpF3 op1 op2));
 8654 
 8655   ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
 8656   format %{ "flt.s  $dst, $op2, $op1\t#@cmpF3_reg_reg\n\t"
 8657             "bgtz   $dst, done\n\t"
 8658             "feq.s  $dst, $op1, $op2\n\t"
 8659             "addi   $dst, $dst, -1\n\t"
 8660             "done:"
 8661   %}
 8662 
 8663   ins_encode %{
 8664     // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
 8665     __ float_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg),
 8666                      as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
 8667   %}
 8668 
 8669   ins_pipe(pipe_class_default);
 8670 %}
 8671 
 8672 instruct cmpD3_reg_reg(iRegINoSp dst, fRegD op1, fRegD op2)
 8673 %{
 8674   match(Set dst (CmpD3 op1 op2));
 8675 
 8676   ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
 8677   format %{ "flt.d  $dst, $op2, $op1\t#@cmpD3_reg_reg\n\t"
 8678             "bgtz   $dst, done\n\t"
 8679             "feq.d  $dst, $op1, $op2\n\t"
 8680             "addi   $dst, $dst, -1\n\t"
 8681             "done:"
 8682   %}
 8683 
 8684   ins_encode %{
 8685     // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
 8686     __ double_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
 8687   %}
 8688 
 8689   ins_pipe(pipe_class_default);
 8690 %}
 8691 
 8692 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
 8693 %{
 8694   match(Set dst (CmpL3 op1 op2));
 8695 
 8696   ins_cost(ALU_COST * 3 + BRANCH_COST);
 8697   format %{ "slt   $dst, $op2, $op1\t#@cmpL3_reg_reg\n\t"
 8698             "bnez  $dst, done\n\t"
 8699             "slt   $dst, $op1, $op2\n\t"
 8700             "neg   $dst, $dst\n\t"
 8701             "done:"
 8702   %}
 8703   ins_encode %{
 8704     __ cmp_l2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 8705     __ mv(as_Register($dst$$reg), t0);
 8706   %}
 8707 
 8708   ins_pipe(pipe_class_default);
 8709 %}
 8710 
 8711 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
 8712 %{
 8713   match(Set dst (CmpUL3 op1 op2));
 8714 
 8715   ins_cost(ALU_COST * 3 + BRANCH_COST);
 8716   format %{ "sltu  $dst, $op2, $op1\t#@cmpUL3_reg_reg\n\t"
 8717             "bnez  $dst, done\n\t"
 8718             "sltu  $dst, $op1, $op2\n\t"
 8719             "neg   $dst, $dst\n\t"
 8720             "done:"
 8721   %}
 8722   ins_encode %{
 8723     __ cmp_ul2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 8724     __ mv(as_Register($dst$$reg), t0);
 8725   %}
 8726 
 8727   ins_pipe(pipe_class_default);
 8728 %}
 8729 
 8730 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI op1, iRegI op2)
 8731 %{
 8732   match(Set dst (CmpU3 op1 op2));
 8733 
 8734   ins_cost(ALU_COST * 3 + BRANCH_COST);
 8735   format %{ "sltu  $dst, $op2, $op1\t#@cmpU3_reg_reg\n\t"
 8736             "bnez  $dst, done\n\t"
 8737             "sltu  $dst, $op1, $op2\n\t"
 8738             "neg   $dst, $dst\n\t"
 8739             "done:"
 8740   %}
 8741   ins_encode %{
 8742     __ cmp_uw2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 8743     __ mv(as_Register($dst$$reg), t0);
 8744   %}
 8745 
 8746   ins_pipe(pipe_class_default);
 8747 %}
 8748 
 8749 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegI p, iRegI q)
 8750 %{
 8751   match(Set dst (CmpLTMask p q));
 8752 
 8753   ins_cost(2 * ALU_COST);
 8754 
 8755   format %{ "slt $dst, $p, $q\t#@cmpLTMask_reg_reg\n\t"
 8756             "subw $dst, zr, $dst\t#@cmpLTMask_reg_reg"
 8757   %}
 8758 
 8759   ins_encode %{
 8760     __ slt(as_Register($dst$$reg), as_Register($p$$reg), as_Register($q$$reg));
 8761     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 8762   %}
 8763 
 8764   ins_pipe(ialu_reg_reg);
 8765 %}
 8766 
 8767 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I op, immI0 zero)
 8768 %{
 8769   match(Set dst (CmpLTMask op zero));
 8770 
 8771   ins_cost(ALU_COST);
 8772 
 8773   format %{ "sraiw $dst, $dst, 31\t#@cmpLTMask_reg_reg" %}
 8774 
 8775   ins_encode %{
 8776     __ sraiw(as_Register($dst$$reg), as_Register($op$$reg), 31);
 8777   %}
 8778 
 8779   ins_pipe(ialu_reg_shift);
 8780 %}
 8781 
 8782 
 8783 // ============================================================================
 8784 // Max and Min
 8785 
 8786 instruct minI_reg_reg(iRegINoSp dst, iRegI src)
 8787 %{
 8788   match(Set dst (MinI dst src));
 8789 
 8790   ins_cost(BRANCH_COST + ALU_COST);
 8791   format %{
 8792     "ble $dst, $src, skip\t#@minI_reg_reg\n\t"
 8793     "mv  $dst, $src\n\t"
 8794     "skip:"
 8795   %}
 8796 
 8797   ins_encode %{
 8798     Label Lskip;
 8799     __ ble(as_Register($dst$$reg), as_Register($src$$reg), Lskip);
 8800     __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 8801     __ bind(Lskip);
 8802   %}
 8803 
 8804   ins_pipe(pipe_class_compare);
 8805 %}
 8806 
 8807 instruct maxI_reg_reg(iRegINoSp dst, iRegI src)
 8808 %{
 8809   match(Set dst (MaxI dst src));
 8810 
 8811   ins_cost(BRANCH_COST + ALU_COST);
 8812   format %{
 8813     "bge $dst, $src, skip\t#@maxI_reg_reg\n\t"
 8814     "mv  $dst, $src\n\t"
 8815     "skip:"
 8816   %}
 8817 
 8818   ins_encode %{
 8819     Label Lskip;
 8820     __ bge(as_Register($dst$$reg), as_Register($src$$reg), Lskip);
 8821     __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 8822     __ bind(Lskip);
 8823   %}
 8824 
 8825   ins_pipe(pipe_class_compare);
 8826 %}
 8827 
 8828 // special case for comparing with zero
 8829 // n.b. this is selected in preference to the rule above because it
 8830 // avoids loading constant 0 into a source register
 8831 
 8832 instruct minI_reg_zero(iRegINoSp dst, immI0 zero)
 8833 %{
 8834   match(Set dst (MinI dst zero));
 8835   match(Set dst (MinI zero dst));
 8836 
 8837   ins_cost(BRANCH_COST + ALU_COST);
 8838   format %{
 8839     "blez $dst, skip\t#@minI_reg_zero\n\t"
 8840     "mv   $dst, zr\n\t"
 8841     "skip:"
 8842   %}
 8843 
 8844   ins_encode %{
 8845     Label Lskip;
 8846     __ blez(as_Register($dst$$reg), Lskip);
 8847     __ mv(as_Register($dst$$reg), zr);
 8848     __ bind(Lskip);
 8849   %}
 8850 
 8851   ins_pipe(pipe_class_compare);
 8852 %}
 8853 
 8854 instruct maxI_reg_zero(iRegINoSp dst, immI0 zero)
 8855 %{
 8856   match(Set dst (MaxI dst zero));
 8857   match(Set dst (MaxI zero dst));
 8858 
 8859   ins_cost(BRANCH_COST + ALU_COST);
 8860   format %{
 8861     "bgez $dst, skip\t#@maxI_reg_zero\n\t"
 8862     "mv   $dst, zr\n\t"
 8863     "skip:"
 8864   %}
 8865 
 8866   ins_encode %{
 8867     Label Lskip;
 8868     __ bgez(as_Register($dst$$reg), Lskip);
 8869     __ mv(as_Register($dst$$reg), zr);
 8870     __ bind(Lskip);
 8871   %}
 8872 
 8873   ins_pipe(pipe_class_compare);
 8874 %}
 8875 
 8876 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
 8877 %{
 8878   match(Set dst (MinI src1 src2));
 8879 
 8880   effect(DEF dst, USE src1, USE src2);
 8881 
 8882   ins_cost(BRANCH_COST + ALU_COST * 2);
 8883   format %{
 8884     "ble $src1, $src2, Lsrc1\t#@minI_rReg\n\t"
 8885     "mv $dst, $src2\n\t"
 8886     "j Ldone\n\t"
 8887     "Lsrc1:\n\t"
 8888     "mv $dst, $src1\n\t"
 8889     "Ldone:"
 8890   %}
 8891 
 8892   ins_encode %{
 8893     Label Lsrc1, Ldone;
 8894     __ ble(as_Register($src1$$reg), as_Register($src2$$reg), Lsrc1);
 8895     __ mv(as_Register($dst$$reg), as_Register($src2$$reg));
 8896     __ j(Ldone);
 8897     __ bind(Lsrc1);
 8898     __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
 8899     __ bind(Ldone);
 8900   %}
 8901 
 8902   ins_pipe(pipe_class_compare);
 8903 %}
 8904 
 8905 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
 8906 %{
 8907   match(Set dst (MaxI src1 src2));
 8908 
 8909   effect(DEF dst, USE src1, USE src2);
 8910 
 8911   ins_cost(BRANCH_COST + ALU_COST * 2);
 8912   format %{
 8913     "bge $src1, $src2, Lsrc1\t#@maxI_rReg\n\t"
 8914     "mv $dst, $src2\n\t"
 8915     "j Ldone\n\t"
 8916     "Lsrc1:\n\t"
 8917     "mv $dst, $src1\n\t"
 8918     "Ldone:"
 8919   %}
 8920 
 8921   ins_encode %{
 8922     Label Lsrc1, Ldone;
 8923     __ bge(as_Register($src1$$reg), as_Register($src2$$reg), Lsrc1);
 8924     __ mv(as_Register($dst$$reg), as_Register($src2$$reg));
 8925     __ j(Ldone);
 8926     __ bind(Lsrc1);
 8927     __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
 8928     __ bind(Ldone);
 8929 
 8930   %}
 8931 
 8932   ins_pipe(pipe_class_compare);
 8933 %}
 8934 
 8935 // ============================================================================
 8936 // Branch Instructions
 8937 // Direct Branch.
 8938 instruct branch(label lbl)
 8939 %{
 8940   match(Goto);
 8941 
 8942   effect(USE lbl);
 8943 
 8944   ins_cost(BRANCH_COST);
 8945   format %{ "j  $lbl\t#@branch" %}
 8946 
 8947   ins_encode(riscv_enc_j(lbl));
 8948 
 8949   ins_pipe(pipe_branch);
 8950 %}
 8951 
 8952 // ============================================================================
 8953 // Compare and Branch Instructions
 8954 
 8955 // Patterns for short (< 12KiB) variants
 8956 
 8957 // Compare flags and branch near instructions.
 8958 instruct cmpFlag_branch(cmpOpEqNe cmp, rFlagsReg cr, label lbl) %{
 8959   match(If cmp cr);
 8960   effect(USE lbl);
 8961 
 8962   ins_cost(BRANCH_COST);
 8963   format %{ "b$cmp  $cr, zr, $lbl\t#@cmpFlag_branch" %}
 8964 
 8965   ins_encode %{
 8966     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label));
 8967   %}
 8968   ins_pipe(pipe_cmpz_branch);
 8969   ins_short_branch(1);
 8970 %}
 8971 
 8972 // Compare signed int and branch near instructions
 8973 instruct cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
 8974 %{
 8975   // Same match rule as `far_cmpI_branch'.
 8976   match(If cmp (CmpI op1 op2));
 8977 
 8978   effect(USE lbl);
 8979 
 8980   ins_cost(BRANCH_COST);
 8981 
 8982   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpI_branch" %}
 8983 
 8984   ins_encode %{
 8985     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 8986   %}
 8987 
 8988   ins_pipe(pipe_cmp_branch);
 8989   ins_short_branch(1);
 8990 %}
 8991 
 8992 instruct cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
 8993 %{
 8994   // Same match rule as `far_cmpI_loop'.
 8995   match(CountedLoopEnd cmp (CmpI op1 op2));
 8996 
 8997   effect(USE lbl);
 8998 
 8999   ins_cost(BRANCH_COST);
 9000 
 9001   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpI_loop" %}
 9002 
 9003   ins_encode %{
 9004     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9005   %}
 9006 
 9007   ins_pipe(pipe_cmp_branch);
 9008   ins_short_branch(1);
 9009 %}
 9010 
 9011 // Compare unsigned int and branch near instructions
 9012 instruct cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl)
 9013 %{
 9014   // Same match rule as `far_cmpU_branch'.
 9015   match(If cmp (CmpU op1 op2));
 9016 
 9017   effect(USE lbl);
 9018 
 9019   ins_cost(BRANCH_COST);
 9020 
 9021   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpU_branch" %}
 9022 
 9023   ins_encode %{
 9024     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9025                   as_Register($op2$$reg), *($lbl$$label));
 9026   %}
 9027 
 9028   ins_pipe(pipe_cmp_branch);
 9029   ins_short_branch(1);
 9030 %}
 9031 
 9032 // Compare signed long and branch near instructions
 9033 instruct cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
 9034 %{
 9035   // Same match rule as `far_cmpL_branch'.
 9036   match(If cmp (CmpL op1 op2));
 9037 
 9038   effect(USE lbl);
 9039 
 9040   ins_cost(BRANCH_COST);
 9041 
 9042   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpL_branch" %}
 9043 
 9044   ins_encode %{
 9045     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9046   %}
 9047 
 9048   ins_pipe(pipe_cmp_branch);
 9049   ins_short_branch(1);
 9050 %}
 9051 
 9052 instruct cmpL_loop(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
 9053 %{
 9054   // Same match rule as `far_cmpL_loop'.
 9055   match(CountedLoopEnd cmp (CmpL op1 op2));
 9056 
 9057   effect(USE lbl);
 9058 
 9059   ins_cost(BRANCH_COST);
 9060 
 9061   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpL_loop" %}
 9062 
 9063   ins_encode %{
 9064     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9065   %}
 9066 
 9067   ins_pipe(pipe_cmp_branch);
 9068   ins_short_branch(1);
 9069 %}
 9070 
 9071 // Compare unsigned long and branch near instructions
 9072 instruct cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl)
 9073 %{
 9074   // Same match rule as `far_cmpUL_branch'.
 9075   match(If cmp (CmpUL op1 op2));
 9076 
 9077   effect(USE lbl);
 9078 
 9079   ins_cost(BRANCH_COST);
 9080   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpUL_branch" %}
 9081 
 9082   ins_encode %{
 9083     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9084                   as_Register($op2$$reg), *($lbl$$label));
 9085   %}
 9086 
 9087   ins_pipe(pipe_cmp_branch);
 9088   ins_short_branch(1);
 9089 %}
 9090 
 9091 // Compare pointer and branch near instructions
 9092 instruct cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
 9093 %{
 9094   // Same match rule as `far_cmpP_branch'.
 9095   match(If cmp (CmpP op1 op2));
 9096 
 9097   effect(USE lbl);
 9098 
 9099   ins_cost(BRANCH_COST);
 9100 
 9101   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpP_branch" %}
 9102 
 9103   ins_encode %{
 9104     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9105                   as_Register($op2$$reg), *($lbl$$label));
 9106   %}
 9107 
 9108   ins_pipe(pipe_cmp_branch);
 9109   ins_short_branch(1);
 9110 %}
 9111 
 9112 // Compare narrow pointer and branch near instructions
 9113 instruct cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
 9114 %{
 9115   // Same match rule as `far_cmpN_branch'.
 9116   match(If cmp (CmpN op1 op2));
 9117 
 9118   effect(USE lbl);
 9119 
 9120   ins_cost(BRANCH_COST);
 9121 
 9122   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpN_branch" %}
 9123 
 9124   ins_encode %{
 9125     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9126                   as_Register($op2$$reg), *($lbl$$label));
 9127   %}
 9128 
 9129   ins_pipe(pipe_cmp_branch);
 9130   ins_short_branch(1);
 9131 %}
 9132 
 9133 // Compare float and branch near instructions
 9134 instruct cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
 9135 %{
 9136   // Same match rule as `far_cmpF_branch'.
 9137   match(If cmp (CmpF op1 op2));
 9138 
 9139   effect(USE lbl);
 9140 
 9141   ins_cost(XFER_COST + BRANCH_COST);
 9142   format %{ "float_b$cmp $op1, $op2, $lbl \t#@cmpF_branch"%}
 9143 
 9144   ins_encode %{
 9145     __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), *($lbl$$label));
 9146   %}
 9147 
 9148   ins_pipe(pipe_class_compare);
 9149   ins_short_branch(1);
 9150 %}
 9151 
 9152 // Compare double and branch near instructions
 9153 instruct cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
 9154 %{
 9155   // Same match rule as `far_cmpD_branch'.
 9156   match(If cmp (CmpD op1 op2));
 9157   effect(USE lbl);
 9158 
 9159   ins_cost(XFER_COST + BRANCH_COST);
 9160   format %{ "double_b$cmp $op1, $op2, $lbl\t#@cmpD_branch"%}
 9161 
 9162   ins_encode %{
 9163     __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
 9164                         as_FloatRegister($op2$$reg), *($lbl$$label));
 9165   %}
 9166 
 9167   ins_pipe(pipe_class_compare);
 9168   ins_short_branch(1);
 9169 %}
 9170 
 9171 // Compare signed int with zero and branch near instructions
 9172 instruct cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9173 %{
 9174   // Same match rule as `far_cmpI_reg_imm0_branch'.
 9175   match(If cmp (CmpI op1 zero));
 9176 
 9177   effect(USE op1, USE lbl);
 9178 
 9179   ins_cost(BRANCH_COST);
 9180   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpI_reg_imm0_branch" %}
 9181 
 9182   ins_encode %{
 9183     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9184   %}
 9185 
 9186   ins_pipe(pipe_cmpz_branch);
 9187   ins_short_branch(1);
 9188 %}
 9189 
 9190 instruct cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9191 %{
 9192   // Same match rule as `far_cmpI_reg_imm0_loop'.
 9193   match(CountedLoopEnd cmp (CmpI op1 zero));
 9194 
 9195   effect(USE op1, USE lbl);
 9196 
 9197   ins_cost(BRANCH_COST);
 9198 
 9199   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpI_reg_imm0_loop" %}
 9200 
 9201   ins_encode %{
 9202     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9203   %}
 9204 
 9205   ins_pipe(pipe_cmpz_branch);
 9206   ins_short_branch(1);
 9207 %}
 9208 
 9209 // Compare unsigned int with zero and branch near instructions
 9210 instruct cmpUEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
 9211 %{
 9212   // Same match rule as `far_cmpUEqNeLeGt_reg_imm0_branch'.
 9213   match(If cmp (CmpU op1 zero));
 9214 
 9215   effect(USE op1, USE lbl);
 9216 
 9217   ins_cost(BRANCH_COST);
 9218 
 9219   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpUEqNeLeGt_reg_imm0_branch" %}
 9220 
 9221   ins_encode %{
 9222     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9223   %}
 9224 
 9225   ins_pipe(pipe_cmpz_branch);
 9226   ins_short_branch(1);
 9227 %}
 9228 
 9229 // Compare signed long with zero and branch near instructions
 9230 instruct cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9231 %{
 9232   // Same match rule as `far_cmpL_reg_imm0_branch'.
 9233   match(If cmp (CmpL op1 zero));
 9234 
 9235   effect(USE op1, USE lbl);
 9236 
 9237   ins_cost(BRANCH_COST);
 9238 
 9239   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpL_reg_imm0_branch" %}
 9240 
 9241   ins_encode %{
 9242     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9243   %}
 9244 
 9245   ins_pipe(pipe_cmpz_branch);
 9246   ins_short_branch(1);
 9247 %}
 9248 
 9249 instruct cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9250 %{
 9251   // Same match rule as `far_cmpL_reg_imm0_loop'.
 9252   match(CountedLoopEnd cmp (CmpL op1 zero));
 9253 
 9254   effect(USE op1, USE lbl);
 9255 
 9256   ins_cost(BRANCH_COST);
 9257 
 9258   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpL_reg_imm0_loop" %}
 9259 
 9260   ins_encode %{
 9261     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9262   %}
 9263 
 9264   ins_pipe(pipe_cmpz_branch);
 9265   ins_short_branch(1);
 9266 %}
 9267 
 9268 // Compare unsigned long with zero and branch near instructions
 9269 instruct cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
 9270 %{
 9271   // Same match rule as `far_cmpULEqNeLeGt_reg_imm0_branch'.
 9272   match(If cmp (CmpUL op1 zero));
 9273 
 9274   effect(USE op1, USE lbl);
 9275 
 9276   ins_cost(BRANCH_COST);
 9277 
 9278   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpULEqNeLeGt_reg_imm0_branch" %}
 9279 
 9280   ins_encode %{
 9281     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9282   %}
 9283 
 9284   ins_pipe(pipe_cmpz_branch);
 9285   ins_short_branch(1);
 9286 %}
 9287 
 9288 // Compare pointer with zero and branch near instructions
 9289 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
 9290   // Same match rule as `far_cmpP_reg_imm0_branch'.
 9291   match(If cmp (CmpP op1 zero));
 9292   effect(USE lbl);
 9293 
 9294   ins_cost(BRANCH_COST);
 9295   format %{ "b$cmp   $op1, zr, $lbl\t#@cmpP_imm0_branch" %}
 9296 
 9297   ins_encode %{
 9298     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9299   %}
 9300 
 9301   ins_pipe(pipe_cmpz_branch);
 9302   ins_short_branch(1);
 9303 %}
 9304 
 9305 // Compare narrow pointer with zero and branch near instructions
 9306 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
 9307   // Same match rule as `far_cmpN_reg_imm0_branch'.
 9308   match(If cmp (CmpN op1 zero));
 9309   effect(USE lbl);
 9310 
 9311   ins_cost(BRANCH_COST);
 9312 
 9313   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpN_imm0_branch" %}
 9314 
 9315   ins_encode %{
 9316     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9317   %}
 9318 
 9319   ins_pipe(pipe_cmpz_branch);
 9320   ins_short_branch(1);
 9321 %}
 9322 
 9323 // Compare narrow pointer with pointer zero and branch near instructions
 9324 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
 9325   // Same match rule as `far_cmpP_narrowOop_imm0_branch'.
 9326   match(If cmp (CmpP (DecodeN op1) zero));
 9327   effect(USE lbl);
 9328 
 9329   ins_cost(BRANCH_COST);
 9330   format %{ "b$cmp   $op1, zr, $lbl\t#@cmpP_narrowOop_imm0_branch" %}
 9331 
 9332   ins_encode %{
 9333     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9334   %}
 9335 
 9336   ins_pipe(pipe_cmpz_branch);
 9337   ins_short_branch(1);
 9338 %}
 9339 
 9340 // Patterns for far (20KiB) variants
 9341 
 9342 instruct far_cmpFlag_branch(cmpOp cmp, rFlagsReg cr, label lbl) %{
 9343   match(If cmp cr);
 9344   effect(USE lbl);
 9345 
 9346   ins_cost(BRANCH_COST);
 9347   format %{ "far_b$cmp $cr, zr, $lbl\t#@far_cmpFlag_branch"%}
 9348 
 9349   ins_encode %{
 9350     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label), /* is_far */ true);
 9351   %}
 9352 
 9353   ins_pipe(pipe_cmpz_branch);
 9354 %}
 9355 
 9356 // Compare signed int and branch far instructions
 9357 instruct far_cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
 9358   match(If cmp (CmpI op1 op2));
 9359   effect(USE lbl);
 9360 
 9361   ins_cost(BRANCH_COST * 2);
 9362 
 9363   // the format instruction [far_b$cmp] here is be used as two insructions
 9364   // in macroassembler: b$not_cmp(op1, op2, done), j($lbl), bind(done)
 9365   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpI_branch" %}
 9366 
 9367   ins_encode %{
 9368     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9369   %}
 9370 
 9371   ins_pipe(pipe_cmp_branch);
 9372 %}
 9373 
 9374 instruct far_cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
 9375   match(CountedLoopEnd cmp (CmpI op1 op2));
 9376   effect(USE lbl);
 9377 
 9378   ins_cost(BRANCH_COST * 2);
 9379   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpI_loop" %}
 9380 
 9381   ins_encode %{
 9382     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9383   %}
 9384 
 9385   ins_pipe(pipe_cmp_branch);
 9386 %}
 9387 
 9388 instruct far_cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl) %{
 9389   match(If cmp (CmpU op1 op2));
 9390   effect(USE lbl);
 9391 
 9392   ins_cost(BRANCH_COST * 2);
 9393   format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpU_branch" %}
 9394 
 9395   ins_encode %{
 9396     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9397                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9398   %}
 9399 
 9400   ins_pipe(pipe_cmp_branch);
 9401 %}
 9402 
 9403 instruct far_cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
 9404   match(If cmp (CmpL op1 op2));
 9405   effect(USE lbl);
 9406 
 9407   ins_cost(BRANCH_COST * 2);
 9408   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpL_branch" %}
 9409 
 9410   ins_encode %{
 9411     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9412   %}
 9413 
 9414   ins_pipe(pipe_cmp_branch);
 9415 %}
 9416 
 9417 instruct far_cmpLloop(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
 9418   match(CountedLoopEnd cmp (CmpL op1 op2));
 9419   effect(USE lbl);
 9420 
 9421   ins_cost(BRANCH_COST * 2);
 9422   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpL_loop" %}
 9423 
 9424   ins_encode %{
 9425     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9426   %}
 9427 
 9428   ins_pipe(pipe_cmp_branch);
 9429 %}
 9430 
 9431 instruct far_cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl) %{
 9432   match(If cmp (CmpUL op1 op2));
 9433   effect(USE lbl);
 9434 
 9435   ins_cost(BRANCH_COST * 2);
 9436   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpUL_branch" %}
 9437 
 9438   ins_encode %{
 9439     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9440                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9441   %}
 9442 
 9443   ins_pipe(pipe_cmp_branch);
 9444 %}
 9445 
 9446 instruct far_cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
 9447 %{
 9448   match(If cmp (CmpP op1 op2));
 9449 
 9450   effect(USE lbl);
 9451 
 9452   ins_cost(BRANCH_COST * 2);
 9453 
 9454   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpP_branch" %}
 9455 
 9456   ins_encode %{
 9457     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9458                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9459   %}
 9460 
 9461   ins_pipe(pipe_cmp_branch);
 9462 %}
 9463 
 9464 instruct far_cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
 9465 %{
 9466   match(If cmp (CmpN op1 op2));
 9467 
 9468   effect(USE lbl);
 9469 
 9470   ins_cost(BRANCH_COST * 2);
 9471 
 9472   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpN_branch" %}
 9473 
 9474   ins_encode %{
 9475     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9476                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9477   %}
 9478 
 9479   ins_pipe(pipe_cmp_branch);
 9480 %}
 9481 
 9482 // Float compare and branch instructions
 9483 instruct far_cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
 9484 %{
 9485   match(If cmp (CmpF op1 op2));
 9486 
 9487   effect(USE lbl);
 9488 
 9489   ins_cost(XFER_COST + BRANCH_COST * 2);
 9490   format %{ "far_float_b$cmp $op1, $op2, $lbl\t#@far_cmpF_branch"%}
 9491 
 9492   ins_encode %{
 9493     __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
 9494                         *($lbl$$label), /* is_far */ true);
 9495   %}
 9496 
 9497   ins_pipe(pipe_class_compare);
 9498 %}
 9499 
 9500 // Double compare and branch instructions
 9501 instruct far_cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
 9502 %{
 9503   match(If cmp (CmpD op1 op2));
 9504   effect(USE lbl);
 9505 
 9506   ins_cost(XFER_COST + BRANCH_COST * 2);
 9507   format %{ "far_double_b$cmp $op1, $op2, $lbl\t#@far_cmpD_branch"%}
 9508 
 9509   ins_encode %{
 9510     __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
 9511                         as_FloatRegister($op2$$reg), *($lbl$$label), /* is_far */ true);
 9512   %}
 9513 
 9514   ins_pipe(pipe_class_compare);
 9515 %}
 9516 
 9517 instruct far_cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9518 %{
 9519   match(If cmp (CmpI op1 zero));
 9520 
 9521   effect(USE op1, USE lbl);
 9522 
 9523   ins_cost(BRANCH_COST * 2);
 9524 
 9525   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpI_reg_imm0_branch" %}
 9526 
 9527   ins_encode %{
 9528     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9529   %}
 9530 
 9531   ins_pipe(pipe_cmpz_branch);
 9532 %}
 9533 
 9534 instruct far_cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9535 %{
 9536   match(CountedLoopEnd cmp (CmpI op1 zero));
 9537 
 9538   effect(USE op1, USE lbl);
 9539 
 9540   ins_cost(BRANCH_COST * 2);
 9541 
 9542   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpI_reg_imm0_loop" %}
 9543 
 9544   ins_encode %{
 9545     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9546   %}
 9547 
 9548   ins_pipe(pipe_cmpz_branch);
 9549 %}
 9550 
 9551 instruct far_cmpUEqNeLeGt_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
 9552 %{
 9553   match(If cmp (CmpU op1 zero));
 9554 
 9555   effect(USE op1, USE lbl);
 9556 
 9557   ins_cost(BRANCH_COST * 2);
 9558 
 9559   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpUEqNeLeGt_imm0_branch" %}
 9560 
 9561   ins_encode %{
 9562     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9563   %}
 9564 
 9565   ins_pipe(pipe_cmpz_branch);
 9566 %}
 9567 
 9568 // compare lt/ge unsigned instructs has no short instruct with same match
 9569 instruct far_cmpULtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegI op1, immI0 zero, label lbl)
 9570 %{
 9571   match(If cmp (CmpU op1 zero));
 9572 
 9573   effect(USE op1, USE lbl);
 9574 
 9575   ins_cost(BRANCH_COST);
 9576 
 9577   format %{ "j  $lbl if $cmp == ge\t#@far_cmpULtGe_reg_imm0_branch" %}
 9578 
 9579   ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
 9580 
 9581   ins_pipe(pipe_cmpz_branch);
 9582 %}
 9583 
 9584 instruct far_cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9585 %{
 9586   match(If cmp (CmpL op1 zero));
 9587 
 9588   effect(USE op1, USE lbl);
 9589 
 9590   ins_cost(BRANCH_COST * 2);
 9591 
 9592   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpL_reg_imm0_branch" %}
 9593 
 9594   ins_encode %{
 9595     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9596   %}
 9597 
 9598   ins_pipe(pipe_cmpz_branch);
 9599 %}
 9600 
 9601 instruct far_cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9602 %{
 9603   match(CountedLoopEnd cmp (CmpL op1 zero));
 9604 
 9605   effect(USE op1, USE lbl);
 9606 
 9607   ins_cost(BRANCH_COST * 2);
 9608 
 9609   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpL_reg_imm0_loop" %}
 9610 
 9611   ins_encode %{
 9612     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9613   %}
 9614 
 9615   ins_pipe(pipe_cmpz_branch);
 9616 %}
 9617 
 9618 instruct far_cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
 9619 %{
 9620   match(If cmp (CmpUL op1 zero));
 9621 
 9622   effect(USE op1, USE lbl);
 9623 
 9624   ins_cost(BRANCH_COST * 2);
 9625 
 9626   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpULEqNeLeGt_reg_imm0_branch" %}
 9627 
 9628   ins_encode %{
 9629     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9630   %}
 9631 
 9632   ins_pipe(pipe_cmpz_branch);
 9633 %}
 9634 
 9635 // compare lt/ge unsigned instructs has no short instruct with same match
 9636 instruct far_cmpULLtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegL op1, immL0 zero, label lbl)
 9637 %{
 9638   match(If cmp (CmpUL op1 zero));
 9639 
 9640   effect(USE op1, USE lbl);
 9641 
 9642   ins_cost(BRANCH_COST);
 9643 
 9644   format %{ "j  $lbl if $cmp == ge\t#@far_cmpULLtGe_reg_imm0_branch" %}
 9645 
 9646   ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
 9647 
 9648   ins_pipe(pipe_cmpz_branch);
 9649 %}
 9650 
 9651 instruct far_cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
 9652   match(If cmp (CmpP op1 zero));
 9653   effect(USE lbl);
 9654 
 9655   ins_cost(BRANCH_COST * 2);
 9656   format %{ "far_b$cmp   $op1, zr, $lbl\t#@far_cmpP_imm0_branch" %}
 9657 
 9658   ins_encode %{
 9659     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9660   %}
 9661 
 9662   ins_pipe(pipe_cmpz_branch);
 9663 %}
 9664 
 9665 instruct far_cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
 9666   match(If cmp (CmpN op1 zero));
 9667   effect(USE lbl);
 9668 
 9669   ins_cost(BRANCH_COST * 2);
 9670 
 9671   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpN_imm0_branch" %}
 9672 
 9673   ins_encode %{
 9674     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9675   %}
 9676 
 9677   ins_pipe(pipe_cmpz_branch);
 9678 %}
 9679 
 9680 instruct far_cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
 9681   match(If cmp (CmpP (DecodeN op1) zero));
 9682   effect(USE lbl);
 9683 
 9684   ins_cost(BRANCH_COST * 2);
 9685   format %{ "far_b$cmp   $op1, zr, $lbl\t#@far_cmpP_narrowOop_imm0_branch" %}
 9686 
 9687   ins_encode %{
 9688     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9689   %}
 9690 
 9691   ins_pipe(pipe_cmpz_branch);
 9692 %}
 9693 
 9694 // ============================================================================
 9695 // Conditional Move Instructions
 9696 instruct cmovI_cmpI(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOp cop) %{
 9697   match(Set dst (CMoveI (Binary cop (CmpI op1 op2)) (Binary dst src)));
 9698   ins_cost(ALU_COST + BRANCH_COST);
 9699 
 9700   format %{
 9701     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpI\n\t"
 9702   %}
 9703 
 9704   ins_encode %{
 9705     __ enc_cmove($cop$$cmpcode,
 9706                  as_Register($op1$$reg), as_Register($op2$$reg),
 9707                  as_Register($dst$$reg), as_Register($src$$reg));
 9708   %}
 9709 
 9710   ins_pipe(pipe_class_compare);
 9711 %}
 9712 
 9713 instruct cmovI_cmpU(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOpU cop) %{
 9714   match(Set dst (CMoveI (Binary cop (CmpU op1 op2)) (Binary dst src)));
 9715   ins_cost(ALU_COST + BRANCH_COST);
 9716 
 9717   format %{
 9718     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpU\n\t"
 9719   %}
 9720 
 9721   ins_encode %{
 9722     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9723                  as_Register($op1$$reg), as_Register($op2$$reg),
 9724                  as_Register($dst$$reg), as_Register($src$$reg));
 9725   %}
 9726 
 9727   ins_pipe(pipe_class_compare);
 9728 %}
 9729 
 9730 instruct cmovI_cmpL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOp cop) %{
 9731   match(Set dst (CMoveI (Binary cop (CmpL op1 op2)) (Binary dst src)));
 9732   ins_cost(ALU_COST + BRANCH_COST);
 9733 
 9734   format %{
 9735     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpL\n\t"
 9736   %}
 9737 
 9738   ins_encode %{
 9739     __ enc_cmove($cop$$cmpcode,
 9740                  as_Register($op1$$reg), as_Register($op2$$reg),
 9741                  as_Register($dst$$reg), as_Register($src$$reg));
 9742   %}
 9743 
 9744   ins_pipe(pipe_class_compare);
 9745 %}
 9746 
 9747 instruct cmovI_cmpUL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOpU cop) %{
 9748   match(Set dst (CMoveI (Binary cop (CmpUL op1 op2)) (Binary dst src)));
 9749   ins_cost(ALU_COST + BRANCH_COST);
 9750 
 9751   format %{
 9752     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpUL\n\t"
 9753   %}
 9754 
 9755   ins_encode %{
 9756     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9757                  as_Register($op1$$reg), as_Register($op2$$reg),
 9758                  as_Register($dst$$reg), as_Register($src$$reg));
 9759   %}
 9760 
 9761   ins_pipe(pipe_class_compare);
 9762 %}
 9763 
 9764 instruct cmovI_cmpN(iRegINoSp dst, iRegI src, iRegN op1, iRegN op2, cmpOpU cop) %{
 9765   match(Set dst (CMoveI (Binary cop (CmpN op1 op2)) (Binary dst src)));
 9766   ins_cost(ALU_COST + BRANCH_COST);
 9767 
 9768   format %{
 9769     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpN\n\t"
 9770   %}
 9771 
 9772   ins_encode %{
 9773     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9774                  as_Register($op1$$reg), as_Register($op2$$reg),
 9775                  as_Register($dst$$reg), as_Register($src$$reg));
 9776   %}
 9777 
 9778   ins_pipe(pipe_class_compare);
 9779 %}
 9780 
 9781 instruct cmovI_cmpP(iRegINoSp dst, iRegI src, iRegP op1, iRegP op2, cmpOpU cop) %{
 9782   match(Set dst (CMoveI (Binary cop (CmpP op1 op2)) (Binary dst src)));
 9783   ins_cost(ALU_COST + BRANCH_COST);
 9784 
 9785   format %{
 9786     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpP\n\t"
 9787   %}
 9788 
 9789   ins_encode %{
 9790     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9791                  as_Register($op1$$reg), as_Register($op2$$reg),
 9792                  as_Register($dst$$reg), as_Register($src$$reg));
 9793   %}
 9794 
 9795   ins_pipe(pipe_class_compare);
 9796 %}
 9797 
 9798 instruct cmovL_cmpL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOp cop) %{
 9799   match(Set dst (CMoveL (Binary cop (CmpL op1 op2)) (Binary dst src)));
 9800   ins_cost(ALU_COST + BRANCH_COST);
 9801 
 9802   format %{
 9803     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpL\n\t"
 9804   %}
 9805 
 9806   ins_encode %{
 9807     __ enc_cmove($cop$$cmpcode,
 9808                  as_Register($op1$$reg), as_Register($op2$$reg),
 9809                  as_Register($dst$$reg), as_Register($src$$reg));
 9810   %}
 9811 
 9812   ins_pipe(pipe_class_compare);
 9813 %}
 9814 
 9815 instruct cmovL_cmpUL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOpU cop) %{
 9816   match(Set dst (CMoveL (Binary cop (CmpUL op1 op2)) (Binary dst src)));
 9817   ins_cost(ALU_COST + BRANCH_COST);
 9818 
 9819   format %{
 9820     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpUL\n\t"
 9821   %}
 9822 
 9823   ins_encode %{
 9824     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9825                  as_Register($op1$$reg), as_Register($op2$$reg),
 9826                  as_Register($dst$$reg), as_Register($src$$reg));
 9827   %}
 9828 
 9829   ins_pipe(pipe_class_compare);
 9830 %}
 9831 
 9832 instruct cmovL_cmpI(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOp cop) %{
 9833   match(Set dst (CMoveL (Binary cop (CmpI op1 op2)) (Binary dst src)));
 9834   ins_cost(ALU_COST + BRANCH_COST);
 9835 
 9836   format %{
 9837     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpI\n\t"
 9838   %}
 9839 
 9840   ins_encode %{
 9841     __ enc_cmove($cop$$cmpcode,
 9842                  as_Register($op1$$reg), as_Register($op2$$reg),
 9843                  as_Register($dst$$reg), as_Register($src$$reg));
 9844   %}
 9845 
 9846   ins_pipe(pipe_class_compare);
 9847 %}
 9848 
 9849 instruct cmovL_cmpU(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOpU cop) %{
 9850   match(Set dst (CMoveL (Binary cop (CmpU op1 op2)) (Binary dst src)));
 9851   ins_cost(ALU_COST + BRANCH_COST);
 9852 
 9853   format %{
 9854     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpU\n\t"
 9855   %}
 9856 
 9857   ins_encode %{
 9858     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9859                  as_Register($op1$$reg), as_Register($op2$$reg),
 9860                  as_Register($dst$$reg), as_Register($src$$reg));
 9861   %}
 9862 
 9863   ins_pipe(pipe_class_compare);
 9864 %}
 9865 
 9866 instruct cmovL_cmpN(iRegLNoSp dst, iRegL src, iRegN op1, iRegN op2, cmpOpU cop) %{
 9867   match(Set dst (CMoveL (Binary cop (CmpN op1 op2)) (Binary dst src)));
 9868   ins_cost(ALU_COST + BRANCH_COST);
 9869 
 9870   format %{
 9871     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpN\n\t"
 9872   %}
 9873 
 9874   ins_encode %{
 9875     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9876                  as_Register($op1$$reg), as_Register($op2$$reg),
 9877                  as_Register($dst$$reg), as_Register($src$$reg));
 9878   %}
 9879 
 9880   ins_pipe(pipe_class_compare);
 9881 %}
 9882 
 9883 instruct cmovL_cmpP(iRegLNoSp dst, iRegL src, iRegP op1, iRegP op2, cmpOpU cop) %{
 9884   match(Set dst (CMoveL (Binary cop (CmpP op1 op2)) (Binary dst src)));
 9885   ins_cost(ALU_COST + BRANCH_COST);
 9886 
 9887   format %{
 9888     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpP\n\t"
 9889   %}
 9890 
 9891   ins_encode %{
 9892     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9893                  as_Register($op1$$reg), as_Register($op2$$reg),
 9894                  as_Register($dst$$reg), as_Register($src$$reg));
 9895   %}
 9896 
 9897   ins_pipe(pipe_class_compare);
 9898 %}
 9899 
 9900 // ============================================================================
 9901 // Procedure Call/Return Instructions
 9902 
 9903 // Call Java Static Instruction
 9904 // Note: If this code changes, the corresponding ret_addr_offset() and
 9905 //       compute_padding() functions will have to be adjusted.
 9906 instruct CallStaticJavaDirect(method meth)
 9907 %{
 9908   match(CallStaticJava);
 9909 
 9910   effect(USE meth);
 9911 
 9912   ins_cost(BRANCH_COST);
 9913 
 9914   format %{ "CALL,static $meth\t#@CallStaticJavaDirect" %}
 9915 
 9916   ins_encode(riscv_enc_java_static_call(meth),
 9917              riscv_enc_call_epilog);
 9918 
 9919   ins_pipe(pipe_class_call);
 9920   ins_alignment(4);
 9921 %}
 9922 
 9923 // TO HERE
 9924 
 9925 // Call Java Dynamic Instruction
 9926 // Note: If this code changes, the corresponding ret_addr_offset() and
 9927 //       compute_padding() functions will have to be adjusted.
 9928 instruct CallDynamicJavaDirect(method meth)
 9929 %{
 9930   match(CallDynamicJava);
 9931 
 9932   effect(USE meth);
 9933 
 9934   ins_cost(BRANCH_COST + ALU_COST * 5);
 9935 
 9936   format %{ "CALL,dynamic $meth\t#@CallDynamicJavaDirect" %}
 9937 
 9938   ins_encode(riscv_enc_java_dynamic_call(meth),
 9939              riscv_enc_call_epilog);
 9940 
 9941   ins_pipe(pipe_class_call);
 9942   ins_alignment(4);
 9943 %}
 9944 
 9945 // Call Runtime Instruction
 9946 
 9947 instruct CallRuntimeDirect(method meth)
 9948 %{
 9949   match(CallRuntime);
 9950 
 9951   effect(USE meth);
 9952 
 9953   ins_cost(BRANCH_COST);
 9954 
 9955   format %{ "CALL, runtime $meth\t#@CallRuntimeDirect" %}
 9956 
 9957   ins_encode(riscv_enc_java_to_runtime(meth));
 9958 
 9959   ins_pipe(pipe_class_call);
 9960 %}
 9961 
 9962 // Call Runtime Instruction
 9963 
 9964 instruct CallLeafDirect(method meth)
 9965 %{
 9966   match(CallLeaf);
 9967 
 9968   effect(USE meth);
 9969 
 9970   ins_cost(BRANCH_COST);
 9971 
 9972   format %{ "CALL, runtime leaf $meth\t#@CallLeafDirect" %}
 9973 
 9974   ins_encode(riscv_enc_java_to_runtime(meth));
 9975 
 9976   ins_pipe(pipe_class_call);
 9977 %}
 9978 
 9979 // Call Runtime Instruction without safepoint and with vector arguments
 9980 
 9981 instruct CallLeafDirectVector(method meth)
 9982 %{
 9983   match(CallLeafVector);
 9984 
 9985   effect(USE meth);
 9986 
 9987   ins_cost(BRANCH_COST);
 9988 
 9989   format %{ "CALL, runtime leaf vector $meth" %}
 9990 
 9991   ins_encode(riscv_enc_java_to_runtime(meth));
 9992 
 9993   ins_pipe(pipe_class_call);
 9994 %}
 9995 
 9996 // Call Runtime Instruction
 9997 
 9998 instruct CallLeafNoFPDirect(method meth)
 9999 %{
10000   match(CallLeafNoFP);
10001 
10002   effect(USE meth);
10003 
10004   ins_cost(BRANCH_COST);
10005 
10006   format %{ "CALL, runtime leaf nofp $meth\t#@CallLeafNoFPDirect" %}
10007 
10008   ins_encode(riscv_enc_java_to_runtime(meth));
10009 
10010   ins_pipe(pipe_class_call);
10011 %}
10012 
10013 // ============================================================================
10014 // Partial Subtype Check
10015 //
10016 // superklass array for an instance of the superklass.  Set a hidden
10017 // internal cache on a hit (cache is checked with exposed code in
10018 // gen_subtype_check()).  Return zero for a hit.  The encoding
10019 // ALSO sets flags.
10020 
10021 instruct partialSubtypeCheck(iRegP_R15 result, iRegP_R14 sub, iRegP_R10 super, iRegP_R12 tmp, rFlagsReg cr)
10022 %{
10023   match(Set result (PartialSubtypeCheck sub super));
10024   effect(KILL tmp, KILL cr);
10025 
10026   ins_cost(11 * DEFAULT_COST);
10027   format %{ "partialSubtypeCheck $result, $sub, $super\t#@partialSubtypeCheck" %}
10028 
10029   ins_encode(riscv_enc_partial_subtype_check(sub, super, tmp, result));
10030 
10031   opcode(0x1); // Force zero of result reg on hit
10032 
10033   ins_pipe(pipe_class_memory);
10034 %}
10035 
10036 instruct partialSubtypeCheckConstSuper(iRegP_R14 sub, iRegP_R10 super_reg, immP super_con, iRegP_R15 result,
10037                                        iRegP_R11 tmpR11, iRegP_R12 tmpR12, iRegP_R13 tmpR13, iRegP_R16 tmpR16, rFlagsReg cr)
10038 %{
10039   predicate(UseSecondarySupersTable);
10040   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
10041   effect(TEMP tmpR11, TEMP tmpR12, TEMP tmpR13, TEMP tmpR16, KILL cr);
10042 
10043   ins_cost(7 * DEFAULT_COST); // needs to be less than competing nodes
10044   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
10045 
10046   ins_encode %{
10047     bool success = false;
10048     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
10049     if (InlineSecondarySupersTest) {
10050       success = __ lookup_secondary_supers_table($sub$$Register, $super_reg$$Register, $result$$Register,
10051                                                  $tmpR11$$Register, $tmpR12$$Register, $tmpR13$$Register,
10052                                                  $tmpR16$$Register, super_klass_slot);
10053     } else {
10054       address call = __ reloc_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
10055       success = (call != nullptr);
10056     }
10057     if (!success) {
10058       ciEnv::current()->record_failure("CodeCache is full");
10059       return;
10060     }
10061   %}
10062 
10063   ins_pipe(pipe_class_memory);
10064 %}
10065 
10066 instruct partialSubtypeCheckVsZero(iRegP_R15 result, iRegP_R14 sub, iRegP_R10 super, iRegP_R12 tmp,
10067                                    immP0 zero, rFlagsReg cr)
10068 %{
10069   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
10070   effect(KILL tmp, KILL result);
10071 
10072   ins_cost(11 * DEFAULT_COST);
10073   format %{ "partialSubtypeCheck $result, $sub, $super == 0\t#@partialSubtypeCheckVsZero" %}
10074 
10075   ins_encode(riscv_enc_partial_subtype_check(sub, super, tmp, result));
10076 
10077   opcode(0x0); // Don't zero result reg on hit
10078 
10079   ins_pipe(pipe_class_memory);
10080 %}
10081 
10082 instruct string_compareU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10083                          iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10084 %{
10085   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UU);
10086   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10087   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10088 
10089   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareU" %}
10090   ins_encode %{
10091     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
10092     __ string_compare($str1$$Register, $str2$$Register,
10093                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10094                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10095                       StrIntrinsicNode::UU);
10096   %}
10097   ins_pipe(pipe_class_memory);
10098 %}
10099 
10100 instruct string_compareL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10101                          iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10102 %{
10103   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LL);
10104   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10105   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10106 
10107   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareL" %}
10108   ins_encode %{
10109     __ string_compare($str1$$Register, $str2$$Register,
10110                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10111                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10112                       StrIntrinsicNode::LL);
10113   %}
10114   ins_pipe(pipe_class_memory);
10115 %}
10116 
10117 instruct string_compareUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10118                           iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10119 %{
10120   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UL);
10121   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10122   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10123 
10124   format %{"String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareUL" %}
10125   ins_encode %{
10126     __ string_compare($str1$$Register, $str2$$Register,
10127                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10128                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10129                       StrIntrinsicNode::UL);
10130   %}
10131   ins_pipe(pipe_class_memory);
10132 %}
10133 
10134 instruct string_compareLU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10135                           iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3,
10136                           rFlagsReg cr)
10137 %{
10138   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LU);
10139   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10140   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10141 
10142   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareLU" %}
10143   ins_encode %{
10144     __ string_compare($str1$$Register, $str2$$Register,
10145                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10146                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10147                       StrIntrinsicNode::LU);
10148   %}
10149   ins_pipe(pipe_class_memory);
10150 %}
10151 
10152 instruct string_indexofUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10153                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10154                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10155 %{
10156   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
10157   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10158   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10159          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10160 
10161   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
10162   ins_encode %{
10163     __ string_indexof($str1$$Register, $str2$$Register,
10164                       $cnt1$$Register, $cnt2$$Register,
10165                       $tmp1$$Register, $tmp2$$Register,
10166                       $tmp3$$Register, $tmp4$$Register,
10167                       $tmp5$$Register, $tmp6$$Register,
10168                       $result$$Register, StrIntrinsicNode::UU);
10169   %}
10170   ins_pipe(pipe_class_memory);
10171 %}
10172 
10173 instruct string_indexofLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10174                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10175                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10176 %{
10177   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
10178   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10179   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10180          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10181 
10182   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
10183   ins_encode %{
10184     __ string_indexof($str1$$Register, $str2$$Register,
10185                       $cnt1$$Register, $cnt2$$Register,
10186                       $tmp1$$Register, $tmp2$$Register,
10187                       $tmp3$$Register, $tmp4$$Register,
10188                       $tmp5$$Register, $tmp6$$Register,
10189                       $result$$Register, StrIntrinsicNode::LL);
10190   %}
10191   ins_pipe(pipe_class_memory);
10192 %}
10193 
10194 instruct string_indexofUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10195                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10196                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10197 %{
10198   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
10199   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10200   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10201          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10202   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
10203 
10204   ins_encode %{
10205     __ string_indexof($str1$$Register, $str2$$Register,
10206                       $cnt1$$Register, $cnt2$$Register,
10207                       $tmp1$$Register, $tmp2$$Register,
10208                       $tmp3$$Register, $tmp4$$Register,
10209                       $tmp5$$Register, $tmp6$$Register,
10210                       $result$$Register, StrIntrinsicNode::UL);
10211   %}
10212   ins_pipe(pipe_class_memory);
10213 %}
10214 
10215 instruct string_indexof_conUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10216                               immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10217                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10218 %{
10219   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
10220   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10221   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10222          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10223 
10224   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
10225 
10226   ins_encode %{
10227     int icnt2 = (int)$int_cnt2$$constant;
10228     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10229                                  $cnt1$$Register, zr,
10230                                  $tmp1$$Register, $tmp2$$Register,
10231                                  $tmp3$$Register, $tmp4$$Register,
10232                                  icnt2, $result$$Register, StrIntrinsicNode::UU);
10233   %}
10234   ins_pipe(pipe_class_memory);
10235 %}
10236 
10237 instruct string_indexof_conLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10238                               immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10239                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10240 %{
10241   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
10242   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10243   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10244          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10245 
10246   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
10247   ins_encode %{
10248     int icnt2 = (int)$int_cnt2$$constant;
10249     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10250                                  $cnt1$$Register, zr,
10251                                  $tmp1$$Register, $tmp2$$Register,
10252                                  $tmp3$$Register, $tmp4$$Register,
10253                                  icnt2, $result$$Register, StrIntrinsicNode::LL);
10254   %}
10255   ins_pipe(pipe_class_memory);
10256 %}
10257 
10258 instruct string_indexof_conUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10259                               immI_1 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10260                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10261 %{
10262   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
10263   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10264   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10265          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10266 
10267   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
10268   ins_encode %{
10269     int icnt2 = (int)$int_cnt2$$constant;
10270     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10271                                  $cnt1$$Register, zr,
10272                                  $tmp1$$Register, $tmp2$$Register,
10273                                  $tmp3$$Register, $tmp4$$Register,
10274                                  icnt2, $result$$Register, StrIntrinsicNode::UL);
10275   %}
10276   ins_pipe(pipe_class_memory);
10277 %}
10278 
10279 instruct stringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
10280                               iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10281                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10282 %{
10283   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
10284   predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
10285   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
10286          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10287 
10288   format %{ "StringUTF16 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
10289   ins_encode %{
10290     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
10291                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
10292                            $tmp3$$Register, $tmp4$$Register, false /* isU */);
10293   %}
10294   ins_pipe(pipe_class_memory);
10295 %}
10296 
10297 
10298 instruct stringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
10299                               iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10300                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10301 %{
10302   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
10303   predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
10304   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
10305          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10306 
10307   format %{ "StringLatin1 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
10308   ins_encode %{
10309     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
10310                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
10311                            $tmp3$$Register, $tmp4$$Register, true /* isL */);
10312   %}
10313   ins_pipe(pipe_class_memory);
10314 %}
10315 
10316 // clearing of an array
10317 instruct clearArray_reg_reg(iRegL_R29 cnt, iRegP_R28 base, iRegP_R30 tmp1,
10318                             iRegP_R31 tmp2, rFlagsReg cr, Universe dummy)
10319 %{
10320   // temp registers must match the one used in StubGenerator::generate_zero_blocks()
10321   predicate(UseBlockZeroing || !UseRVV);
10322   match(Set dummy (ClearArray cnt base));
10323   effect(USE_KILL cnt, USE_KILL base, TEMP tmp1, TEMP tmp2, KILL cr);
10324 
10325   ins_cost(4 * DEFAULT_COST);
10326   format %{ "ClearArray $cnt, $base\t#@clearArray_reg_reg" %}
10327 
10328   ins_encode %{
10329     address tpc = __ zero_words($base$$Register, $cnt$$Register);
10330     if (tpc == nullptr) {
10331       ciEnv::current()->record_failure("CodeCache is full");
10332       return;
10333     }
10334   %}
10335 
10336   ins_pipe(pipe_class_memory);
10337 %}
10338 
10339 instruct clearArray_imm_reg(immL cnt, iRegP_R28 base, Universe dummy, rFlagsReg cr)
10340 %{
10341   predicate(!UseRVV && (uint64_t)n->in(2)->get_long()
10342             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
10343   match(Set dummy (ClearArray cnt base));
10344   effect(USE_KILL base, KILL cr);
10345 
10346   ins_cost(4 * DEFAULT_COST);
10347   format %{ "ClearArray $cnt, $base\t#@clearArray_imm_reg" %}
10348 
10349   ins_encode %{
10350     __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
10351   %}
10352 
10353   ins_pipe(pipe_class_memory);
10354 %}
10355 
10356 instruct string_equalsL(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt,
10357                         iRegI_R10 result, rFlagsReg cr)
10358 %{
10359   predicate(!UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
10360   match(Set result (StrEquals (Binary str1 str2) cnt));
10361   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
10362 
10363   format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsL" %}
10364   ins_encode %{
10365     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
10366     __ string_equals($str1$$Register, $str2$$Register,
10367                      $result$$Register, $cnt$$Register);
10368   %}
10369   ins_pipe(pipe_class_memory);
10370 %}
10371 
10372 instruct array_equalsB(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
10373                        iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
10374 %{
10375   predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
10376   match(Set result (AryEq ary1 ary2));
10377   effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
10378 
10379   format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsB // KILL all" %}
10380   ins_encode %{
10381     __ arrays_equals($ary1$$Register, $ary2$$Register,
10382                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10383                      $result$$Register, 1);
10384   %}
10385   ins_pipe(pipe_class_memory);
10386 %}
10387 
10388 instruct array_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
10389                        iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
10390 %{
10391   predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
10392   match(Set result (AryEq ary1 ary2));
10393   effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
10394 
10395   format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsC // KILL all" %}
10396   ins_encode %{
10397     __ arrays_equals($ary1$$Register, $ary2$$Register,
10398                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10399                      $result$$Register, 2);
10400   %}
10401   ins_pipe(pipe_class_memory);
10402 %}
10403 
10404 // fast ArraysSupport.vectorizedHashCode
10405 instruct arrays_hashcode(iRegP_R11 ary, iRegI_R12 cnt, iRegI_R10 result, immI basic_type,
10406                          iRegLNoSp tmp1, iRegLNoSp tmp2,
10407                          iRegLNoSp tmp3, iRegLNoSp tmp4,
10408                          iRegLNoSp tmp5, iRegLNoSp tmp6, rFlagsReg cr)
10409 %{
10410   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
10411   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
10412          USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
10413 
10414   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
10415   ins_encode %{
10416     __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
10417                        $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10418                        $tmp4$$Register, $tmp5$$Register, $tmp6$$Register,
10419                        (BasicType)$basic_type$$constant);
10420   %}
10421   ins_pipe(pipe_class_memory);
10422 %}
10423 
10424 // ============================================================================
10425 // Safepoint Instructions
10426 
10427 instruct safePoint(iRegP poll)
10428 %{
10429   match(SafePoint poll);
10430 
10431   ins_cost(2 * LOAD_COST);
10432   format %{
10433     "lwu zr, [$poll]\t# Safepoint: poll for GC, #@safePoint"
10434   %}
10435   ins_encode %{
10436     __ read_polling_page(as_Register($poll$$reg), 0, relocInfo::poll_type);
10437   %}
10438   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
10439 %}
10440 
10441 // ============================================================================
10442 // This name is KNOWN by the ADLC and cannot be changed.
10443 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
10444 // for this guy.
10445 instruct tlsLoadP(javaThread_RegP dst)
10446 %{
10447   match(Set dst (ThreadLocal));
10448 
10449   ins_cost(0);
10450 
10451   format %{ " -- \t// $dst=Thread::current(), empty, #@tlsLoadP" %}
10452 
10453   size(0);
10454 
10455   ins_encode( /*empty*/ );
10456 
10457   ins_pipe(pipe_class_empty);
10458 %}
10459 
10460 // inlined locking and unlocking
10461 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
10462 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box,
10463                      iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
10464 %{
10465   predicate(LockingMode != LM_LIGHTWEIGHT);
10466   match(Set cr (FastLock object box));
10467   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
10468 
10469   ins_cost(10 * DEFAULT_COST);
10470   format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLock" %}
10471 
10472   ins_encode %{
10473     __ fast_lock($object$$Register, $box$$Register,
10474                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
10475   %}
10476 
10477   ins_pipe(pipe_serial);
10478 %}
10479 
10480 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
10481 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2)
10482 %{
10483   predicate(LockingMode != LM_LIGHTWEIGHT);
10484   match(Set cr (FastUnlock object box));
10485   effect(TEMP tmp1, TEMP tmp2);
10486 
10487   ins_cost(10 * DEFAULT_COST);
10488   format %{ "fastunlock $object,$box\t! kills $tmp1, $tmp2, #@cmpFastUnlock" %}
10489 
10490   ins_encode %{
10491     __ fast_unlock($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register);
10492   %}
10493 
10494   ins_pipe(pipe_serial);
10495 %}
10496 
10497 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box,
10498                                 iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
10499 %{
10500   predicate(LockingMode == LM_LIGHTWEIGHT);
10501   match(Set cr (FastLock object box));
10502   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
10503 
10504   ins_cost(10 * DEFAULT_COST);
10505   format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLockLightweight" %}
10506 
10507   ins_encode %{
10508     __ fast_lock_lightweight($object$$Register, $box$$Register,
10509                              $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
10510   %}
10511 
10512   ins_pipe(pipe_serial);
10513 %}
10514 
10515 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box,
10516                                   iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
10517 %{
10518   predicate(LockingMode == LM_LIGHTWEIGHT);
10519   match(Set cr (FastUnlock object box));
10520   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
10521 
10522   ins_cost(10 * DEFAULT_COST);
10523   format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlockLightweight" %}
10524 
10525   ins_encode %{
10526     __ fast_unlock_lightweight($object$$Register, $box$$Register,
10527                                $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
10528   %}
10529 
10530   ins_pipe(pipe_serial);
10531 %}
10532 
10533 // Tail Call; Jump from runtime stub to Java code.
10534 // Also known as an 'interprocedural jump'.
10535 // Target of jump will eventually return to caller.
10536 // TailJump below removes the return address.
10537 // Don't use fp for 'jump_target' because a MachEpilogNode has already been
10538 // emitted just above the TailCall which has reset fp to the caller state.
10539 instruct TailCalljmpInd(iRegPNoSpNoFp jump_target, inline_cache_RegP method_oop)
10540 %{
10541   match(TailCall jump_target method_oop);
10542 
10543   ins_cost(BRANCH_COST);
10544 
10545   format %{ "jalr $jump_target\t# $method_oop holds method oop, #@TailCalljmpInd." %}
10546 
10547   ins_encode(riscv_enc_tail_call(jump_target));
10548 
10549   ins_pipe(pipe_class_call);
10550 %}
10551 
10552 instruct TailjmpInd(iRegPNoSpNoFp jump_target, iRegP_R10 ex_oop)
10553 %{
10554   match(TailJump jump_target ex_oop);
10555 
10556   ins_cost(ALU_COST + BRANCH_COST);
10557 
10558   format %{ "jalr $jump_target\t# $ex_oop holds exception oop, #@TailjmpInd." %}
10559 
10560   ins_encode(riscv_enc_tail_jmp(jump_target));
10561 
10562   ins_pipe(pipe_class_call);
10563 %}
10564 
10565 // Forward exception.
10566 instruct ForwardExceptionjmp()
10567 %{
10568   match(ForwardException);
10569 
10570   ins_cost(BRANCH_COST);
10571 
10572   format %{ "j forward_exception_stub\t#@ForwardException" %}
10573 
10574   ins_encode %{
10575     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
10576   %}
10577 
10578   ins_pipe(pipe_class_call);
10579 %}
10580 
10581 // Create exception oop: created by stack-crawling runtime code.
10582 // Created exception is now available to this handler, and is setup
10583 // just prior to jumping to this handler. No code emitted.
10584 instruct CreateException(iRegP_R10 ex_oop)
10585 %{
10586   match(Set ex_oop (CreateEx));
10587 
10588   ins_cost(0);
10589   format %{ " -- \t// exception oop; no code emitted, #@CreateException" %}
10590 
10591   size(0);
10592 
10593   ins_encode( /*empty*/ );
10594 
10595   ins_pipe(pipe_class_empty);
10596 %}
10597 
10598 // Rethrow exception: The exception oop will come in the first
10599 // argument position. Then JUMP (not call) to the rethrow stub code.
10600 instruct RethrowException()
10601 %{
10602   match(Rethrow);
10603 
10604   ins_cost(BRANCH_COST);
10605 
10606   format %{ "j rethrow_stub\t#@RethrowException" %}
10607 
10608   ins_encode(riscv_enc_rethrow());
10609 
10610   ins_pipe(pipe_class_call);
10611 %}
10612 
10613 // Return Instruction
10614 // epilog node loads ret address into ra as part of frame pop
10615 instruct Ret()
10616 %{
10617   match(Return);
10618 
10619   ins_cost(BRANCH_COST);
10620   format %{ "ret\t// return register, #@Ret" %}
10621 
10622   ins_encode(riscv_enc_ret());
10623 
10624   ins_pipe(pipe_branch);
10625 %}
10626 
10627 // Die now.
10628 instruct ShouldNotReachHere() %{
10629   match(Halt);
10630 
10631   ins_cost(BRANCH_COST);
10632 
10633   format %{ "#@ShouldNotReachHere" %}
10634 
10635   ins_encode %{
10636     if (is_reachable()) {
10637       __ stop(_halt_reason);
10638     }
10639   %}
10640 
10641   ins_pipe(pipe_class_default);
10642 %}
10643 
10644 
10645 //----------PEEPHOLE RULES-----------------------------------------------------
10646 // These must follow all instruction definitions as they use the names
10647 // defined in the instructions definitions.
10648 //
10649 // peepmatch ( root_instr_name [preceding_instruction]* );
10650 //
10651 // peepconstraint %{
10652 // (instruction_number.operand_name relational_op instruction_number.operand_name
10653 //  [, ...] );
10654 // // instruction numbers are zero-based using left to right order in peepmatch
10655 //
10656 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
10657 // // provide an instruction_number.operand_name for each operand that appears
10658 // // in the replacement instruction's match rule
10659 //
10660 // ---------VM FLAGS---------------------------------------------------------
10661 //
10662 // All peephole optimizations can be turned off using -XX:-OptoPeephole
10663 //
10664 // Each peephole rule is given an identifying number starting with zero and
10665 // increasing by one in the order seen by the parser.  An individual peephole
10666 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
10667 // on the command-line.
10668 //
10669 // ---------CURRENT LIMITATIONS----------------------------------------------
10670 //
10671 // Only match adjacent instructions in same basic block
10672 // Only equality constraints
10673 // Only constraints between operands, not (0.dest_reg == RAX_enc)
10674 // Only one replacement instruction
10675 //
10676 //----------SMARTSPILL RULES---------------------------------------------------
10677 // These must follow all instruction definitions as they use the names
10678 // defined in the instructions definitions.
10679 
10680 // Local Variables:
10681 // mode: c++
10682 // End: